code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import aerosandbox.numpy as np
import aerosandbox.tools.units as u
from typing import Dict
def modified_DAPCA_IV_production_cost_analysis(
design_empty_weight: float,
design_maximum_airspeed: float,
n_airplanes_produced: int,
n_engines_per_aircraft: int,
cost_per_engine: float,
cost_avionics_per_airplane: float,
n_pax: int,
cpi_relative_to_2012_dollars: float = 1.275, # updated for 2022
n_flight_test_aircraft: int = 4,
is_cargo_airplane: bool = False,
primary_structure_material: str = "aluminum",
per_passenger_cost_model: str = "general_aviation",
engineering_wrap_rate_2012_dollars: float = 115.,
tooling_wrap_rate_2012_dollars: float = 118.,
quality_control_wrap_rate_2012_dollars: float = 108.,
manufacturing_wrap_rate_2012_dollars: float = 98.,
) -> Dict[str, float]:
"""
Computes the cost of an aircraft in present-day dollars, using the Modified DAPCA IV cost model.
Be sure to adjust `cpi_relative_to_2012_dollars` to the current values in order to accurately model inflation.
The DAPCA IV cost model is a statistical regression of historical aircraft cost data. It provides reasonable
results for most classes of aircraft, including transports, fighters, bombers, and even GA and UAV aircraft with
suitable adjustments.
It was created by the RAND Corporation.
The Modified DAPCA IV cost model is a modification of the DAPCA IV cost model that includes additional cost
estimates for engine cost (as the original DAPCA model assumes that this is known).
See Raymer, Aircraft Design: A Conceptual Approach, 5th Edition, Section 18.4.2 pg. 711 for more information.
Args:
design_empty_weight: The design empty weight of the entire aircraft, in kg.
design_maximum_airspeed: The design maximum airspeed of the aircraft, in m/s.
n_airplanes_produced: The number of airplanes produced or the number to be produced in 5 years, whichever is
less.
n_engines_per_aircraft: The number of engines per aircraft.
cost_per_engine: The cost of each engine, in present-day dollars.
cost_avionics_per_airplane: The cost of avionics per airplane, in present-day dollars.
n_pax: The number of passengers.
cpi_relative_to_2012_dollars: The consumer price index at the present day divided by the consumer price index
in 2012, seasonally-adjusted.
To quickly find this, use data from the St. Louis Federal Reserve. Below is the CPI, normalized to 2012.
https://fred.stlouisfed.org/graph/?g=10PU0
For example, in 2022, one would use 1.275.
n_flight_test_aircraft: The number of flight test aircraft. Typically 2 to 6.
is_cargo_airplane: Whether the airplane is a cargo airplane. If so, the quality control cost is lower.
primary_structure_material: The primary structure material. Options are:
- "aluminum"
- "carbon_fiber"
- "fiberglass"
- "steel"
- "titanium"
per_passenger_cost_model: The per-passenger cost model. Options are:
- "general_aviation": General aviation aircraft, such as Cessna 172s.
- "jet_transport": Jet transport aircraft, such as Boeing 737s.
- "regional_transport": Regional transport aircraft, such as Embraer E175s.
engineering_wrap_rate_2012_dollars: The engineering wrap rate in 2012 dollars.
tooling_wrap_rate_2012_dollars: The tooling wrap rate in 2012 dollars.
quality_control_wrap_rate_2012_dollars: The quality control wrap rate in 2012 dollars.
manufacturing_wrap_rate_2012_dollars: The manufacturing wrap rate in 2012 dollars.
Returns:
A dictionary of costs, to produce all `n_airplanes_produced` airplanes, in present-day dollars.
Keys and values are as follows:
- "engineering_labor": Engineering labor cost.
- "tooling_labor": Tooling labor cost.
- "manufacturing_labor": Manufacturing labor cost.
- "quality_control_labor": Quality control labor cost.
- "development_support": Development support cost. From Raymer: "Includes fabrication of mockups, iron-bird subsystem
simulators, structural test articles, and other test articles."
- "flight_test": Flight test cost. From Raymer: "Includes all costs incurred to demonstrate airworthiness
for civil certification or Mil-Spec compliance except for the costs of the flight-test aircraft
themselves. Costs for the flight-test aircraft are included in the total production-run cost estimation.
Includes planning, instrumentation, flight operations, data reduction, and engineering and manufacturing
support of flight testing."
- "manufacturing_materials": Manufacturing materials cost. From Raymer: "Includes all raw materials and
purchased hardware and equipment."
- "engine_cost": Engine cost.
- "avionics_cost": Avionics cost.
- "total_cost": Total cost.
"""
# Abbreviated constants for readability
W = design_empty_weight # kg
V = design_maximum_airspeed / u.kph # km/hour
Q = n_airplanes_produced
### Estimate labor hours
hours = {}
hours["engineering"] = 5.18 * W ** 0.777 * V ** 0.894 * Q ** 0.163
hours["tooling"] = 7.22 * W ** 0.777 * V ** 0.696 * Q ** 0.263
hours["manufacturing"] = 10.5 * W ** 0.82 * V ** 0.484 * Q ** 0.641
hours["quality_control"] = hours["manufacturing"] * (0.076 if is_cargo_airplane else 0.133)
### Account for materials difficulties
if primary_structure_material == "aluminum":
materials_hourly_multiplier = 1.0
elif primary_structure_material == "carbon_fiber":
materials_hourly_multiplier = (1.1 + 1.8) / 2
elif primary_structure_material == "fiberglass":
materials_hourly_multiplier = (1.1 + 1.2) / 2
elif primary_structure_material == "steel":
materials_hourly_multiplier = (1.5 + 2.0) / 2
elif primary_structure_material == "titanium":
materials_hourly_multiplier = (1.1 + 1.8) / 2
else:
raise ValueError("Invalid value of `primary_structure_material`.")
hours = {
k: v * materials_hourly_multiplier
for k, v in hours.items()
}
### Convert labor hours to labor costs in 2012 dollars
costs_2012_dollars = {}
costs_2012_dollars["engineering_labor"] = hours["engineering"] * engineering_wrap_rate_2012_dollars
costs_2012_dollars["tooling_labor"] = hours["tooling"] * tooling_wrap_rate_2012_dollars
costs_2012_dollars["manufacturing_labor"] = hours["manufacturing"] * manufacturing_wrap_rate_2012_dollars
costs_2012_dollars["quality_control_labor"] = hours["quality_control"] * quality_control_wrap_rate_2012_dollars
costs_2012_dollars["development_support"] = 67.4 * W ** 0.630 * V ** 1.3
costs_2012_dollars["flight_test"] = 1947 * W ** 0.325 * V ** 0.822 * n_flight_test_aircraft ** 1.21
costs_2012_dollars["manufacturing_materials"] = 31.2 * W ** 0.921 * V ** 0.621 * Q ** 0.799
### Add in the per-passenger cost for aircraft interiors:
# Seats, luggage bins, closets, lavatories, insulation, ceilings, floors, walls, etc.
# Costs are from Raymer, Aircraft Design: A Conceptual Approach, 5th edition. Section 18.4.2, page 715.
if per_passenger_cost_model == "general_aviation":
costs_2012_dollars["aircraft_interiors"] = n_airplanes_produced * n_pax * 850
elif per_passenger_cost_model == "jet_transport":
costs_2012_dollars["aircraft_interiors"] = n_airplanes_produced * n_pax * 3500
elif per_passenger_cost_model == "regional_transport":
costs_2012_dollars["aircraft_interiors"] = n_airplanes_produced * n_pax * 1700
else:
raise ValueError(f"Invalid value of `per_passenger_cost_model`!")
### Convert all costs to present-day dollars
costs = {
k: v * cpi_relative_to_2012_dollars
for k, v in costs_2012_dollars.items()
}
### Add engine and avionics costs
costs["engine_cost"] = cost_per_engine * n_engines_per_aircraft * n_airplanes_produced
costs["avionics"] = cost_avionics_per_airplane * n_airplanes_produced
### Total all costs and return
costs["total"] = sum(costs.values())
return costs
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/costs.py
|
costs.py
|
import aerosandbox.numpy as np
def thrust_turbofan(
mass_turbofan: float,
) -> float:
"""
Estimates the maximum rated dry thrust of a turbofan engine. A regression to historical data.
Based on data for both civilian and military turbofans, available in:
`aerosandbox/library/datasets/turbine_engines/data.xlsx`
Applicable to both turbojets and turbofans, and with sizes ranging from micro-turbines (<1 kg) to large transport
aircraft turbofans.
See studies in `/AeroSandbox/studies/TurbofanStudies/make_fit_thrust.py` for model details.
Args:
mass_turbofan: The mass of the turbofan engine. [kg]
Returns:
The maximum (rated takeoff) dry thrust of the turbofan engine. [N]
"""
p = {'a': 12050.719283568596, 'w': 0.9353861810025565}
return (
p["a"] * mass_turbofan ** p["w"]
)
def thrust_specific_fuel_consumption_turbofan(
mass_turbofan: float,
bypass_ratio: float,
) -> float:
"""
Estimates the thrust-specific fuel consumption (TSFC) of a turbofan engine. A regression to historical data.
Based on data for both civilian and military turbofans, available in:
`aerosandbox/library/datasets/turbine_engines/data.xlsx`
Applicable to both turbojets and turbofans, and with sizes ranging from micro-turbines (<1 kg) to large transport
aircraft turbofans.
See studies in `/AeroSandbox/studies/TurbofanStudies/make_fit_tsfc.py` for model details.
"""
p = {'a' : 3.2916082331121034e-05, 'Weight [kg]': -0.07792863839756586, 'BPR': -0.3438158689838915,
'BPR2': 0.29880079602955967}
return (
p["a"]
* mass_turbofan ** p["Weight [kg]"]
* (bypass_ratio + p["BPR2"]) ** p["BPR"]
)
def mass_turbofan(
m_dot_core_corrected,
overall_pressure_ratio,
bypass_ratio,
diameter_fan,
):
"""
Computes the combined mass of a bare turbofan, nacelle, and accessory and pylon weights.
Bare weight depends on m_dot, OPR, and BPR.
Nacelle weight is a function of various areas and fan diameter.
From TASOPT documentation by Mark Drela, available here: http://web.mit.edu/drela/Public/web/tasopt/TASOPT_doc.pdf
Section: "Turbofan Weight Model from Historical Data"
Args:
m_dot_core_corrected: The mass flow of the core only, corrected to standard conditions. [kg/s]
overall_pressure_ratio: The overall pressure ratio (OPR) [-]
bypass_ratio: The bypass ratio (BPR) [-]
diameter_fan: The diameter of the fan. [m]
Returns: The total engine mass. [kg]
"""
kg_to_lbm = 2.20462262
m_to_ft = 1 / 0.3048
##### Compute bare turbofan weight
m_dot_core_corrected_lbm_per_sec = m_dot_core_corrected * kg_to_lbm # Converts from kg/s to lbm/s
### Parameters determined via least-squares fitting by Drela in TASOPT doc.
b_m = 1
b_pi = 1
b_alpha = 1.2
W_0_lbm = 1684.5
W_pi_lbm = 17.7
W_alpha_lbm = 1662.2
W_bare_lbm = (
m_dot_core_corrected_lbm_per_sec / 100
) ** b_m * (
W_0_lbm +
W_pi_lbm * (overall_pressure_ratio / 30) ** b_pi +
W_alpha_lbm * (bypass_ratio / 5) ** b_alpha
)
W_bare = W_bare_lbm / kg_to_lbm
##### Compute nacelle weight
### Nondimensional parameters, given by Drela in TASOPT doc.
r_s_nace = 12
f_inlet = 0.4
f_fan = 0.2
f_exit = 0.4
r_core = 12
### Fan size in imperial units
d_fan_ft = diameter_fan * m_to_ft
d_fan_in = d_fan_ft * 12
### Compute the diameter of the LPC based on fan diameter and BPR.
d_LPC_ft = d_fan_ft * (bypass_ratio) ** -0.5
### Models from Drela in TASOPT
S_nace_sqft = r_s_nace * np.pi * (d_fan_ft / 2) ** 2
A_inlet_sqft = f_inlet * S_nace_sqft
A_fan_sqft = f_fan * S_nace_sqft
A_exit_sqft = f_exit * S_nace_sqft
A_core_sqft = r_core * np.pi * (d_LPC_ft / 2) ** 2
W_inlet_lbm = A_inlet_sqft * (2.5 + 0.0238 * d_fan_in)
W_fan_lbm = A_fan_sqft * 1.9
W_exit_lbm = A_exit_sqft * (2.5 * 0.0363 * d_fan_in)
W_core_lbm = A_core_sqft * 1.9
W_nace_lbm = W_inlet_lbm + W_fan_lbm + W_exit_lbm + W_core_lbm
W_nace = W_nace_lbm / kg_to_lbm
##### Compute accessory and pylon weights
### Nondimensional parameters, given by Drela in TASOPT doc
f_add = 0.10
f_pylon = 0.10
W_add = f_add * W_bare
W_pylon = f_pylon * (W_bare + W_add + W_nace)
##### Compute the total weight
W_engine = W_bare + W_add + W_nace + W_pylon
return W_engine
def m_dot_corrected_over_m_dot(
temperature_total_2,
pressure_total_2,
):
"""
Computes the ratio `m_dot_corrected / m_dot`, where:
* `m_dot_corrected` is the corrected mass flow rate, where corrected refers to correction to ISO 3977 standard
temperature and pressure conditions (15C, 101325 Pa).
* `m_dot` is the raw mass flow rate, at some other conditions.
Args:
temperature_total_2: The total temperature at the compressor inlet face, at the conditions to be evaluated. [K]
pressure_total_2: The total pressure at the compressor inlet face, at the conditions to be evaluated. [Pa]
Returns:
The ratio `m_dot_corrected / m_dot`.
"""
temperature_standard = 273.15 + 15
pressure_standard = 101325
return (
temperature_total_2 / temperature_standard
) ** 0.5 / (pressure_total_2 / pressure_standard)
if __name__ == '__main__':
import aerosandbox as asb
atmo = asb.Atmosphere(altitude=10668)
op_point = asb.OperatingPoint(atmo, velocity=0.80 * atmo.speed_of_sound())
m_dot_corrected_over_m_dot_ratio = m_dot_corrected_over_m_dot(
temperature_total_2=op_point.total_temperature(),
pressure_total_2=op_point.total_pressure()
)
### CFM56-2 engine test
mass_cfm56_2 = mass_turbofan( # Data here from Wikipedia, cross-referenced to other sources for sanity check.
m_dot_core_corrected=364 / (5.95 + 1),
overall_pressure_ratio=31.2,
bypass_ratio=5.95,
diameter_fan=1.73
) # real mass: (2139 to 2200 kg bare, ~3400 kg installed)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/propulsion_turbofan.py
|
propulsion_turbofan.py
|
def mass_hpa_wing(
span,
chord,
vehicle_mass,
n_ribs, # You should optimize on this, there's a trade between rib weight and LE sheeting weight!
n_wing_sections=1, # defaults to a single-section wing (be careful: can you disassemble/transport this?)
ultimate_load_factor=1.75, # default taken from Daedalus design
type="cantilevered", # "cantilevered", "one-wire", "multi-wire"
t_over_c=0.128, # default from DAE11
include_spar=True,
# Should we include the mass of the spar? Useful if you want to do your own primary structure calculations.
):
"""
Finds the mass of the wing structure of a human powered aircraft (HPA), following Juan Cruz's correlations in
http://journals.sfu.ca/ts/index.php/ts/article/viewFile/760/718
:param span: wing span [m]
:param chord: wing mean chord [m]
:param vehicle_mass: aircraft gross weight [kg]
:param n_ribs: number of ribs in the wing
:param n_wing_sections: number of wing sections or panels (for disassembly?)
:param ultimate_load_factor: ultimate load factor [unitless]
:param type: Type of bracing: "cantilevered", "one-wire", "multi-wire"
:param t_over_c: wing airfoil thickness-to-chord ratio
:param include_spar: Should we include the mass of the spar? Useful if you want to do your own primary structure calculations. [boolean]
:return: Wing structure mass [kg]
"""
### Primary structure
if include_spar:
if type == "cantilevered":
mass_primary_spar = (
(span * 1.17e-1 + span ** 2 * 1.10e-2) *
(1 + (ultimate_load_factor * vehicle_mass / 100 - 2) / 4)
)
elif type == "one-wire":
mass_primary_spar = (
(span * 3.10e-2 + span ** 2 * 7.56e-3) *
(1 + (ultimate_load_factor * vehicle_mass / 100 - 2) / 4)
)
elif type == "multi-wire":
mass_primary_spar = (
(span * 1.35e-1 + span ** 2 * 1.68e-3) *
(1 + (ultimate_load_factor * vehicle_mass / 100 - 2) / 4)
)
else:
raise ValueError("Bad input for 'type'!")
mass_primary = mass_primary_spar * (
11382.3 / 9222.2) # accounts for rear spar, struts, fittings, kevlar x-bracing, and wing-fuselage mounts
else:
mass_primary = 0
### Secondary structure
ratio_of_rib_spacing_to_chord = (span / n_ribs) / chord
n_end_ribs = 2 * n_wing_sections - 2
area = span * chord
# Rib mass
W_wr = n_ribs * (chord ** 2 * t_over_c * 5.50e-2 + chord * 1.91e-3)
# End rib mass
W_wer = n_end_ribs * (chord ** 2 * t_over_c * 6.62e-1 + chord * 6.57e-3)
# LE sheeting mass
W_wLE = 0.456 * (span ** 2 * ratio_of_rib_spacing_to_chord ** (4 / 3) / span)
# TE mass
W_wTE = span * 2.77e-2
# Covering
W_wc = area * 3.08e-2
mass_secondary = W_wr + W_wer + W_wLE + W_wTE + W_wc
return mass_primary + mass_secondary
def mass_wing_spar(
span,
mass_supported,
ultimate_load_factor=1.75, # default taken from Daedalus design
n_booms=1,
):
"""
Finds the mass of the spar for a wing on a single- or multi-boom lightweight aircraft. Model originally designed for solar aircraft.
Data was fit to the range 3 < wing_span < 120 [m] and 5 < supported_mass < 3000 [kg], but validity should extend somewhat beyond that.
Extremely accurate fits within this range; R^2 > 0.995 for all fits.
Source: AeroSandbox\studies\MultiBoomSparMass_v2
Assumptions:
* Elliptical lift distribution
* Constraint that local wing dihedral/anhedral angle must not exceed 10 degrees anywhere in the ultimate load case.
* If multi-boom, assumes roughly static-aerostructurally-optimal placement of the outer booms and equal boom weights.
:param span: Wing span [m]
:param mass_supported: Total mass of all fuselages + tails
:param ultimate_load_factor: Design load factor. Default taken from Daedalus design.
:param n_booms: Number of booms on the design. Can be 1, 2, or 3. Assumes optimal placement of the outer booms.
:return:
"""
if n_booms == 1:
c = 20.7100792220283090
span_exp = 1.6155586404697364
mass_exp = 0.3779456295164249
elif n_booms == 2:
c = 12.3247625359796285
span_exp = 1.5670343007798109
mass_exp = 0.4342199756794465
elif n_booms == 3:
c = 10.0864141678007844
span_exp = 1.5614086940653213
mass_exp = 0.4377206254456823
else:
raise ValueError("Bad value of n_booms!")
mass_eff = mass_supported * ultimate_load_factor
spar_mass = c * (span / 40) ** span_exp * (mass_eff / 300) ** mass_exp
return spar_mass
def mass_hpa_stabilizer(
span,
chord,
dynamic_pressure_at_manuever_speed,
n_ribs, # You should optimize on this, there's a trade between rib weight and LE sheeting weight!
t_over_c=0.128, # default from DAE11
include_spar=True,
# Should we include the mass of the spar? Useful if you want to do your own primary structure calculations.
):
"""
Finds the mass of a stabilizer structure of a human powered aircraft (HPA), following Juan Cruz's correlations in
http://journals.sfu.ca/ts/index.php/ts/article/viewFile/760/718
Note: apply this once to BOTH the rudder and elevator!!!
:param span: stabilizer span [m]
:param chord: stabilizer mean chord [m]
:param dynamic_pressure_at_manuever_speed: dynamic pressure at maneuvering speed [Pa]
:param n_ribs: number of ribs in the wing
:param t_over_c: wing airfoil thickness-to-chord ratio
:param include_spar: Should we include the mass of the spar? Useful if you want to do your own primary structure calculations. [boolean]
:return: Stabilizer structure mass [kg]
"""
### Primary structure
area = span * chord
q = dynamic_pressure_at_manuever_speed
if include_spar:
W_tss = (
(span * 4.15e-2 + span ** 2 * 3.91e-3) *
(1 + ((q * area) / 78.5 - 1) / 2)
)
mass_primary = W_tss
else:
mass_primary = 0
### Secondary structure
ratio_of_rib_spacing_to_chord = (span / n_ribs) / chord
# Rib mass
W_tsr = n_ribs * (chord ** 2 * t_over_c * 1.16e-1 + chord * 4.01e-3)
# Leading edge sheeting
W_tsLE = 0.174 * (area ** 2 * ratio_of_rib_spacing_to_chord ** (4 / 3) / span)
# Covering
W_tsc = area * 1.93e-2
mass_secondary = W_tsr + W_tsLE + W_tsc
### Totaling
correction_factor = ((537.8 / (537.8 - 23.7 - 15.1)) * (623.3 / (623.3 - 63.2 - 8.1))) ** 0.5
# geometric mean of Daedalus elevator and rudder corrections from misc. weight
return (mass_primary + mass_secondary) * correction_factor
def mass_hpa_tail_boom(
length_tail_boom,
dynamic_pressure_at_manuever_speed,
mean_tail_surface_area,
):
"""
Finds the mass of a tail boom structure of a human powered aircraft (HPA), following Juan Cruz's correlations in
http://journals.sfu.ca/ts/index.php/ts/article/viewFile/760/718
Assumes a tubular tail boom of high modules (E > 228 GPa) graphite/epoxy
:param length_tail_boom: length of the tail boom [m]. Calculated as distance from the wing 1/4 chord to the furthest tail surface.
:param dynamic_pressure_at_manuever_speed: dynamic pressure at maneuvering speed [Pa]
:param mean_tail_surface_area: mean of the areas of the tail surfaces (elevator, rudder)
:return: mass of the tail boom [m]
"""
l = length_tail_boom
q = dynamic_pressure_at_manuever_speed
area = mean_tail_surface_area
w_tb = (l * 1.14e-1 + l ** 2 * 1.96e-2) * (1 + ((q * area) / 78.5 - 1) / 2)
return w_tb
def mass_surface_balsa_monokote_cf(
chord,
span,
mean_t_over_c=0.08
):
"""
Estimates the mass of a lifting surface constructed with balsa-monokote-carbon-fiber construction techniques.
Warning: Not well validated; spar sizing is a guessed scaling and not based on structural analysis.
:param chord: wing mean chord [m]
:param span: wing span [m]
:param mean_t_over_c: wing thickness-to-chord ratio [unitless]
:return: estimated surface mass [kg]
"""
mean_t = chord * mean_t_over_c
### Balsa wood + Monokote + a 1" dia CF tube spar.
monokote_mass = 0.061 * chord * span * 2 # 0.2 oz/sqft
rib_density = 200 # mass density, in kg/m^3
rib_spacing = 0.1 # one rib every x meters
rib_width = 0.003 # width of an individual rib
ribs_mass = (
(mean_t * chord * rib_width) * # volume of a rib
rib_density * # density of a rib
(span / rib_spacing) # number of ribs
)
spar_mass_1_inch = 0.2113 * span * 1.5 # assuming 1.5x 1" CF tube spar
spar_mass = spar_mass_1_inch * (
mean_t / 0.0254) ** 2 # Rough GUESS for scaling, FIX THIS before using seriously!
return (monokote_mass + ribs_mass + spar_mass) * 1.2 # for glue
def mass_surface_solid(
chord,
span,
density=2700, # kg/m^3, defaults to that of aluminum
mean_t_over_c=0.08
):
"""
Estimates the mass of a lifting surface constructed out of a solid piece of material.
Warning: Not well validated; spar sizing is a guessed scaling and not based on structural analysis.
:param chord: wing mean chord [m]
:param span: wing span [m]
:param mean_t_over_c: wing thickness-to-chord ratio [unitless]
:return: estimated surface mass [kg]
"""
mean_t = chord * mean_t_over_c
volume = chord * span * mean_t
return density * volume
if __name__ == "__main__":
import aerosandbox as asb
import aerosandbox.numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(palette=sns.color_palette("husl"))
# Daedalus wing mass validation
print(
"Daedalus wing, estimated mass: %f" %
mass_hpa_wing(
span=34,
chord=0.902,
vehicle_mass=104.1,
n_ribs=100,
n_wing_sections=5,
type="one-wire"
)
)
print(
"Daedalus wing, actual mass: %f" % 18.9854
)
nr = np.linspace(1, 400, 401)
m = mass_hpa_wing(
span=34,
chord=0.902,
vehicle_mass=104.1,
n_ribs=nr,
n_wing_sections=5,
type="one-wire"
)
plt.plot(nr, m)
plt.ylim([15, 20])
plt.grid(True)
plt.xlabel("Number of ribs")
plt.ylabel("Wing mass [kg]")
plt.title("Daedalus Wing Rib Count Optimization Test")
plt.show()
# Test rib number optimization
opti = asb.Opti()
nr_opt = opti.variable(init_guess=100)
opti.minimize(mass_hpa_wing(
span=34,
chord=0.902,
vehicle_mass=104.1,
n_ribs=nr_opt,
n_wing_sections=5,
type="one-wire"
))
sol = opti.solve()
print(f"Optimal number of ribs: {sol.value(nr_opt)}")
print(
"Daedalus elevator, estimated mass: %f" %
mass_hpa_stabilizer(
span=4.26,
chord=0.6,
dynamic_pressure_at_manuever_speed=1 / 2 * 1.225 * 7 ** 2,
n_ribs=20,
)
)
# Test Cruz and physics-based mass models
span = 34
mass_total = 104.1
mass_wing_primary_cruz = mass_hpa_wing(
span=span,
chord=0.902,
vehicle_mass=mass_total,
n_ribs=sol.value(nr_opt),
n_wing_sections=1,
type="cantilevered"
) - mass_hpa_wing(
span=span,
chord=0.902,
vehicle_mass=mass_total,
n_ribs=sol.value(nr_opt),
n_wing_sections=1,
type="cantilevered",
include_spar=False
)
mass_wing_primary_physics = mass_wing_spar(
span=span,
mass_supported=mass_total
)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/mass_structural.py
|
mass_structural.py
|
import aerosandbox.numpy as np
from aerosandbox.modeling.splines.hermite import linear_hermite_patch, cubic_hermite_patch
def sears_haack_drag(
radius_max: float,
length: float
) -> float:
"""
Yields the idealized drag area (denoted CDA, or equivalently, D/q) of a Sears-Haack body.
Assumes linearized supersonic (Prandtl-Glauert) flow.
https://en.wikipedia.org/wiki/Sears%E2%80%93Haack_body
Note that drag coefficient and drag area are independent of Mach number for this case (assuming linearized supersonic aero).
Args:
radius_max: The maximum radius of the Sears-Haack body.
length: The length of the Sears-Haack body.
Returns: The drag area (CDA, or D/q) of the body. To get the drag force, multiply by the dynamic pressure.
"""
CDA = 9 * np.pi ** 2 * radius_max ** 2 / (2 * length ** 2)
return CDA
def sears_haack_drag_from_volume(
volume: float,
length: float
) -> float:
"""
See documentation for sears_haack_drag() in this same file.
Identical, except takes volume as an input rather than max radius.
Also returns a drag area (denoted CDA, or equivalently, D/q).
"""
CDA = 128 * volume ** 2 / (np.pi * length ** 4)
return CDA
def mach_crit_Korn(
CL,
t_over_c,
sweep=0,
kappa_A=0.95
):
"""
Wave drag_force coefficient prediction using the low-fidelity Korn Equation method;
derived in "Configuration Aerodynamics" by W.H. Mason, Sect. 7.5.2, pg. 7-18
Args:
CL: Sectional lift coefficient
t_over_c: thickness-to-chord ratio
sweep: sweep angle, in degrees
kappa_A: Airfoil technology factor (0.95 for supercritical section, 0.87 for NACA 6-series)
Returns:
"""
smooth_abs_CL = np.softmax(CL, -CL, hardness=10)
M_dd = kappa_A / np.cosd(sweep) - t_over_c / np.cosd(sweep) ** 2 - smooth_abs_CL / (10 * np.cosd(sweep) ** 3)
M_crit = M_dd - (0.1 / 80) ** (1 / 3)
return M_crit
def approximate_CD_wave(
mach,
mach_crit,
CD_wave_at_fully_supersonic,
):
"""
An approximate relation for computing transonic wave drag, based on an object's Mach number.
Considered reasonably valid from Mach 0 up to around Mach 2 or 3-ish.
Methodology is a combination of:
* The methodology described in Raymer, "Aircraft Design: A Conceptual Approach", Section 12.5.10 Transonic Parasite Drag (pg. 449 in Ed. 2)
and
* The methodology described in W.H. Mason's Configuration Aerodynamics, Chapter 7. Transonic Aerodynamics of Airfoils and Wings.
Args:
mach: Mach number at the operating point to be evaluated
mach_crit: Critical mach number, a function of the body geometry
CD_wave_at_fully_supersonic: The wave drag coefficient of the body at the speed that it first goes (
effectively) fully supersonic.
Here, that is taken to mean at the Mach 1.2 case.
This value should probably be derived using something similar to a Sears-Haack relation for the body in
question, with a markup depending on geometry smoothness.
The CD_wave predicted by this function will match this value exactly at M=1.2 and M=1.05.
The peak CD_wave that is predicted is ~1.23 * this value, which occurs at M=1.10.
In the high-Mach limit, this function asymptotes at 0.80 * this value, as empirically stated by Raymer.
However, this model is only approximate and is likely not valid for high-supersonic flows.
Returns: The approximate wave drag coefficient at the specified Mach number.
The reference area is whatever the reference area used in the `CD_wave_at_fully_supersonic` parameter is.
"""
mach_crit_max = 1 - (0.1 / 80) ** (1 / 3)
mach_crit = -np.softmax(
-mach_crit,
-mach_crit_max,
hardness=50
)
### The following approximate relation is derived in W.H. Mason, "Configuration Aerodynamics", Chapter 7. Transonic Aerodynamics of Airfoils and Wings.
### Equation 7-8 on Page 7-19.
### This is in turn based on Lock's proposed empirically-derived shape of the drag rise, from Hilton, W.F., High Speed Aerodynamics, Longmans, Green & Co., London, 1952, pp. 47-49
mach_dd = mach_crit + (0.1 / 80) ** (1 / 3)
### Model drag sections and cutoffs:
return CD_wave_at_fully_supersonic * np.where(
mach < mach_crit,
0,
np.where(
mach < mach_dd,
20 * (mach - mach_crit) ** 4,
np.where(
mach < 1.05,
cubic_hermite_patch(
mach,
x_a=mach_dd,
x_b=1.05,
f_a=20 * (0.1 / 80) ** (4 / 3),
f_b=1,
dfdx_a=0.1,
dfdx_b=8
),
np.where(
mach < 1.2,
cubic_hermite_patch(
mach,
x_a=1.05,
x_b=1.2,
f_a=1,
f_b=1,
dfdx_a=8,
dfdx_b=-4
),
np.blend(
switch=4 * 2 * (mach - 1.2) / (1.2 - 0.8),
value_switch_high=0.8,
value_switch_low=1.2,
)
# 0.8 + 0.2 * np.exp(20 * (1.2 - mach))
)
)
)
)
if __name__ == '__main__':
mc = 0.6
drag = lambda mach: approximate_CD_wave(
mach,
mach_crit=mc,
CD_wave_at_fully_supersonic=1,
)
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
fig, ax = plt.subplots(1, 3, figsize=(10, 5))
mach = np.linspace(0., 2, 10000)
drag = drag(mach)
ddragdm = np.gradient(drag, np.diff(mach)[0])
dddragdm = np.gradient(ddragdm, np.diff(mach)[0])
plt.sca(ax[0])
plt.title("$C_D$")
plt.ylabel("$C_{D, wave} / C_{D, wave, M=1.2}$")
plt.plot(mach, drag)
plt.ylim(-0.05, 1.5)
# plt.ylim(-0.01, 0.05)
plt.sca(ax[1])
plt.title("$d(C_D)/d(M)$")
plt.ylabel(r"$\frac{d(C_{D, wave})}{dM}$")
plt.plot(mach, ddragdm)
plt.ylim(-5, 15)
plt.sca(ax[2])
plt.title("$d^2(C_D)/d(M)^2$")
plt.ylabel(r"$\frac{d^2(C_{D, wave})}{dM^2}$")
plt.plot(mach, dddragdm)
# plt.ylim(-5, 15)
for a in ax:
plt.sca(a)
plt.xlim(0.6, 1.2)
plt.xlabel("Mach [-]")
p.show_plot()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/aerodynamics/transonic.py
|
transonic.py
|
import aerosandbox.numpy as np
from typing import Union
def CDA_control_linkage(
Re_l: Union[float, np.ndarray],
linkage_length: Union[float, np.ndarray],
is_covered: Union[bool, np.ndarray] = False,
is_top: Union[bool, np.ndarray] = False,
) -> Union[float, np.ndarray]:
"""
Computes the drag area (CDA) of a typical control usage as used on a well-manufactured RC airplane.
The drag area (CDA) is defined as: CDA == D / q, where:
- D is the drag force (dimensionalized, e.g., in Newtons)
- q is the freestream dynamic pressure (dimensionalized, e.g., in Pascals)
See study with original data at `AeroSandbox/studies/LinkageDrag`.
Data from:
* Hepperle, Martin. "Drag of Linkages". https://www.mh-aerotools.de/airfoils/linkage.htm
* Summarizes data from "Werner Würz, published in the papers of the ISF-Seminar in December 1989 in Baden, Switzerland."
Args:
Re_l: Reynolds number, with reference length as the length of the linkage.
linkage_length: The length of the linkage. [m]
is_covered: A boolean of whether an aerodynamic fairing is placed around the linkage.
is_top: A boolean of whether the linkage is on the top surface of the wing (True) or the bottom surface (
False). Differences in local boundary layer and inviscid effects cause local velocity changes.
Returns: The drag area [m^2] of the control linkage.
"""
x = dict(
Re_l=Re_l,
linkage_length=linkage_length,
is_covered=is_covered,
is_top=is_top
)
p = {
'CD0' : 7.833083680086374e-05,
'CD1' : 0.0001216877860785463,
'c_length' : 30.572471745477774,
'covered_drag_ratio': 0.7520722978405192,
'top_drag_ratio' : 1.1139040832208857
}
Re = x["Re_l"]
linkage_length = x["linkage_length"]
is_covered = x["is_covered"]
is_top = x["is_top"]
side_drag_multiplier = np.where(
is_top,
p["top_drag_ratio"],
1
)
covered_drag_multiplier = np.where(
is_covered,
p["covered_drag_ratio"],
1
)
linkage_length_multiplier = 1 + p["c_length"] * linkage_length
CDA_raw = (
p["CD1"] / (Re / 1e5) +
p["CD0"]
)
return side_drag_multiplier * covered_drag_multiplier * linkage_length_multiplier * CDA_raw
def CDA_control_surface_gaps(
local_chord: float,
control_surface_span: float,
local_thickness_over_chord: float = 0.12,
control_surface_hinge_x: float = 0.75,
n_side_gaps: int = 2,
side_gap_width: float = None,
hinge_gap_width: float = None,
) -> float:
"""
Computes the drag area (CDA) of the gaps associated with a typical wing control surface.
(E.g., aileron, flap, elevator, rudder).
The drag area (CDA) is defined as: CDA == D / q, where:
- D is the drag force (dimensionalized, e.g., in Newtons)
- q is the freestream dynamic pressure (dimensionalized, e.g., in Pascals)
This drag area consists of two sources:
1. Chordwise gaps at the side edges of the control surface ("side gaps")
2. Spanwise gaps at the hinge line of the control surface ("hinge gap")
Args:
local_chord: The local chord of the wing at the midpoint of the control surface. [meters]
control_surface_span: The span of the control surface. [meters]
local_thickness_over_chord: The local thickness-to-chord ratio of the wing at the midpoint of the control
surface. [nondimensional] For example, this is 0.12 for a NACA0012 airfoil.
control_surface_hinge_x: The x-location of the hinge line of the control surface, as a fraction of the local
chord. [nondimensional] Defaults to x_hinge / c = 0.75, which is typical for an aileron.
n_side_gaps: The number of "side gaps" to count on this control surface when computing drag. Defaults to 2 (
i.e., one inboard gap, one outboard gap), which is the simplest case of a wing with a single partial-span
aileron. However, there may be cases where it is best to reduce this to 1 or 0. For example:
* A wing with a single full-span aileron would have 1 side gap (at the wing root, but not at the tip).
* A wing with a flap and aileron that share a chordwise gap would be best modeled by setting
n_side_gaps = 1 ( so that no double-counting occurs).
side_gap_width: The width of the chordwise gaps at the side edges of the control surface [meters]. If this is
left as the default (None), then a typical value will be computed based on the local chord and control surface
span.
hinge_gap_width: The width of the spanwise gap at the hinge line of the control surface [meters]. If this is
left as the default (None), then a typical value will be computed based on the local chord.
Returns: The drag area [m^2] of the gaps associated with the control surface. This should be added to the "clean"
wing drag to get a more realistic drag estimate.
"""
if side_gap_width is None:
side_gap_width = np.maximum(
np.maximum(
0.002,
0.006 * local_chord
),
control_surface_span * 0.01
)
if hinge_gap_width is None:
hinge_gap_width = 0.03 * local_chord
### Chordwise gaps (at side edges of control surface)
"""
Based on Hoerner, "Fluid Dynamic Drag", 1965, p. 5-13. Figure 26, "Drag of longitudinal wing gaps,
tested on 2412 airfoil at C_L = 0.1 and Re_c = 2 * 10^6"
"""
CDA_side_gaps = n_side_gaps * (side_gap_width * local_chord * local_thickness_over_chord) * 0.50
### Spanwise gaps (at hinge line of control surface)
"""
Based on Hoerner, "Fluid Dynamic Drag", 1965, p. 5-13. Figure 27, "Evaluation of drag due to control gaps"
"""
CDA_hinge_gap = 0.025 * hinge_gap_width * control_surface_span
### Total
return CDA_side_gaps + CDA_hinge_gap
def CDA_protruding_bolt_or_rivet(
diameter: float,
kind: str = "flush_rivet"
):
"""
Computes the drag area (CDA) of a protruding bolt or rivet.
The drag area (CDA) is defined as: CDA == D / q, where:
- D is the drag force (dimensionalized, e.g., in Newtons)
- q is the freestream dynamic pressure (dimensionalized, e.g., in Pascals)
Args:
diameter: The diameter of the bolt or rivet. [meters]
kind: The type of bolt or rivet. Valid options are:
- "flush_rivet"
- "round_rivet"
- "flat_head_bolt"
- "round_head_bolt"
- "cylindrical_bolt"
- "hex_bolt"
Returns: The drag area [m^2] of the bolt or rivet.
"""
S_ref = np.pi * diameter ** 2 / 4
CD_factors = {
"flush_rivet" : 0.002,
"round_rivet" : 0.04,
"flat_head_bolt" : 0.02,
"round_head_bolt" : 0.32,
"cylindrical_bolt": 0.42,
"hex_bolt" : 0.80,
}
try:
CDA = CD_factors[kind] * S_ref
except KeyError:
raise ValueError("Invalid `kind` of bolt or rivet.")
return CDA
def CDA_perpendicular_sheet_metal_joint(
joint_width: float,
sheet_metal_thickness: float,
kind: str = "butt_joint_with_inside_joiner"
):
"""
Computes the drag area (CDA) of a sheet metal joint that is perpendicular to the flow.
(E.g., spanwise on the wing, or circumferential on the fuselage).
The drag area (CDA) is defined as: CDA == D / q, where:
- D is the drag force (dimensionalized, e.g., in Newtons)
- q is the freestream dynamic pressure (dimensionalized, e.g., in Pascals)
Args:
joint_width: The width of the joint (perpendicular to the airflow, e.g., spanwise on a wing). [meters]
sheet_metal_thickness: The thickness of the sheet metal. [meters]
kind: The type of joint. Valid options are:
- "butt_joint_with_inside_joiner"
- "butt_joint_with_inside_weld"
- "butt_joint_with_outside_joiner"
- "butt_joint_with_outside_weld"
- "lap_joint_forward_facing_step"
- "lap_joint_backward_facing_step"
- "lap_joint_forward_facing_step_with_bevel"
- "lap_joint_backward_facing_step_with_bevel"
- "lap_joint_forward_facing_step_with_rounded_bevel"
- "lap_joint_backward_facing_step_with_rounded_bevel"
- "flush_lap_joint_forward_facing_step"
- "flush_lap_joint_backward_facing_step"
Returns: The drag area [m^2] of the sheet metal joint.
"""
S_ref = joint_width * sheet_metal_thickness
CD_factors = {
"butt_joint_with_inside_joiner" : 0.01,
"butt_joint_with_inside_weld" : 0.01,
"butt_joint_with_outside_joiner" : 0.70,
"butt_joint_with_outside_weld" : 0.51,
"lap_joint_forward_facing_step" : 0.40,
"lap_joint_backward_facing_step" : 0.22,
"lap_joint_forward_facing_step_with_bevel" : 0.11,
"lap_joint_backward_facing_step_with_bevel" : 0.24,
"lap joint_forward_facing_step_with_rounded_bevel" : 0.04,
"lap_joint_backward_facing_step_with_rounded_bevel": 0.16,
"flush_lap_joint_forward_facing_step" : 0.13,
"flush_lap_joint_backward_facing_step" : 0.07,
}
try:
CDA = CD_factors[kind] * S_ref
except KeyError:
raise ValueError("Invalid `kind` of sheet metal joint.")
return CDA
# def CDA_pitot_static_tube():
# pass
#
#
# def CDA_landing_gear():
# pass
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/aerodynamics/components.py
|
components.py
|
import aerosandbox.numpy as np
def Cd_cylinder(
Re_D: float,
mach: float = 0.,
include_mach_effects=True,
subcritical_only=False
) -> float:
"""
Returns the drag coefficient of a cylinder in crossflow as a function of its Reynolds number and Mach.
Args:
Re_D: Reynolds number, referenced to diameter
mach: Mach number
include_mach_effects: If this is set False, it assumes Mach = 0, which simplifies the computation.
subcritical_only: Determines whether the model models purely subcritical (Re < 300k) cylinder flows. Useful, since
this model is now convex and can be more well-behaved.
Returns:
# TODO rework this function to use tanh blending, which will mitigate overflows
"""
##### Do the viscous part of the computation
csigc = 5.5766722118597247
csigh = 23.7460859935990563
csub0 = -0.6989492360435040
csub1 = 1.0465189382830078
csub2 = 0.7044228755898569
csub3 = 0.0846501115443938
csup0 = -0.0823564417206403
csupc = 6.8020230357616764
csuph = 9.9999999999999787
csupscl = -0.4570690347113859
x = np.log10(np.abs(Re_D) + 1e-16)
if subcritical_only:
Cd_mach_0 = 10 ** (csub0 * x + csub1) + csub2 + csub3 * x
else:
log10_Cd = (
(np.log10(10 ** (csub0 * x + csub1) + csub2 + csub3 * x))
* (1 - 1 / (1 + np.exp(-csigh * (x - csigc))))
+ (csup0 + csupscl / csuph * np.log(np.exp(csuph * (csupc - x)) + 1))
* (1 / (1 + np.exp(-csigh * (x - csigc))))
)
Cd_mach_0 = 10 ** log10_Cd
##### Do the compressible part of the computation
if include_mach_effects:
m = mach
p = {'a_sub' : 0.03458900259594298,
'a_sup' : -0.7129528087049688,
'cd_sub' : 1.163206940186374,
'cd_sup' : 1.2899213533122527,
's_sub' : 3.436601777569716,
's_sup' : -1.37123096976983,
'trans' : 1.022819211244295,
'trans_str': 19.017600596069848}
Cd_over_Cd_mach_0 = np.blend(
p["trans_str"] * (m - p["trans"]),
p["cd_sup"] + np.exp(p["a_sup"] + p["s_sup"] * (m - p["trans"])),
p["cd_sub"] + np.exp(p["a_sub"] + p["s_sub"] * (m - p["trans"]))
) / 1.1940010047391572
Cd = Cd_mach_0 * Cd_over_Cd_mach_0
else:
Cd = Cd_mach_0
return Cd
def Cf_flat_plate(
Re_L: float,
method="hybrid-sharpe-convex"
) -> float:
"""
Returns the mean skin friction coefficient over a flat plate.
Don't forget to double it (two sides) if you want a drag coefficient.
Args:
Re_L: Reynolds number, normalized to the length of the flat plate.
method: The method of computing the skin friction coefficient. One of:
* "blasius": Uses the Blasius solution. Citing Cengel and Cimbala, "Fluid Mechanics: Fundamentals and
Applications", Table 10-4.
Valid approximately for Re_L <= 5e5.
* "turbulent": Uses turbulent correlations for smooth plates. Citing Cengel and Cimbala,
"Fluid Mechanics: Fundamentals and Applications", Table 10-4.
Valid approximately for 5e5 <= Re_L <= 1e7.
* "hybrid-cengel": Uses turbulent correlations for smooth plates, but accounts for a
non-negligible laminar run at the beginning of the plate. Citing Cengel and Cimbala, "Fluid Mechanics:
Fundamentals and Applications", Table 10-4. Returns: Mean skin friction coefficient over a flat plate.
Valid approximately for 5e5 <= Re_L <= 1e7.
* "hybrid-schlichting": Schlichting's model, that roughly accounts for a non-negligtible laminar run.
Citing "Boundary Layer Theory" 7th Ed., pg. 644
* "hybrid-sharpe-convex": A hybrid model that blends the Blasius and Schlichting models. Convex in
log-log space; however, it may overlook some truly nonconvex behavior near transitional Reynolds numbers.
* "hybrid-sharpe-nonconvex": A hybrid model that blends the Blasius and Cengel models. Nonconvex in
log-log-space; however, it may capture some truly nonconvex behavior near transitional Reynolds numbers.
Returns:
C_f: The skin friction coefficient, normalized to the length of the plate.
You can view all of these functions graphically using
`aerosandbox.library.aerodynamics.test_aerodynamics.test_Cf_flat_plate.py`
"""
Re_L = np.abs(Re_L)
if method == "blasius":
return 1.328 / Re_L ** 0.5
elif method == "turbulent":
return 0.074 / Re_L ** (1 / 5)
elif method == "hybrid-cengel":
return 0.074 / Re_L ** (1 / 5) - 1742 / Re_L
elif method == "hybrid-schlichting":
return 0.02666 * Re_L ** -0.139
elif method == "hybrid-sharpe-convex":
return np.softmax(
Cf_flat_plate(Re_L, method="blasius"),
Cf_flat_plate(Re_L, method="hybrid-schlichting"),
hardness=1e3
)
elif method == "hybrid-sharpe-nonconvex":
return np.softmax(
Cf_flat_plate(Re_L, method="blasius"),
Cf_flat_plate(Re_L, method="hybrid-cengel"),
hardness=1e3
)
def Cl_flat_plate(alpha, Re_c=None):
"""
Returns the approximate lift coefficient of a flat plate, following thin airfoil theory.
:param alpha: Angle of attack [deg]
:param Re_c: Reynolds number, normalized to the length of the flat plate.
:return: Approximate lift coefficient.
"""
if Re_c is not None:
from warnings import warn
warn("`Re_c` input will be deprecated in a future version.")
alpha_rad = alpha * np.pi / 180
return 2 * np.pi * alpha_rad
def Cd_flat_plate_normal():
"""
Returns the drag coefficient of a flat plat oriented normal to the flow (i.e., alpha = 90 deg).
Uses results from Tian, Xinliang, Muk Chen Ong, Jianmin Yang, and Dag Myrhaug. “Large-Eddy Simulation of the Flow
Normal to a Flat Plate Including Corner Effects at a High Reynolds Number.” Journal of Fluids and Structures 49 (
August 2014): 149–69. https://doi.org/10.1016/j.jfluidstructs.2014.04.008.
Note: Cd for this case is effectively invariant of Re.
Returns: Drag coefficient
"""
return 2.202
def Cl_2412(alpha, Re_c):
# A curve fit I did to a NACA 2412 airfoil, 2D XFoil data
# Within -2 < alpha < 12 and 10^5 < Re_c < 10^7, has R^2 = 0.9892
return 0.2568 + 0.1206 * alpha - 0.002018 * alpha ** 2
def Cd_profile_2412(alpha, Re_c):
# A curve fit I did to a NACA 2412 airfoil in incompressible flow.
# Within -2 < alpha < 12 and 10^5 < Re_c < 10^7, has R^2 = 0.9713
Re_c = np.maximum(Re_c, 1)
log_Re = np.log(Re_c)
CD0 = -5.249
Re0 = 15.61
Re1 = 15.31
alpha0 = 1.049
alpha1 = -4.715
cx = 0.009528
cxy = -0.00588
cy = 0.04838
log_CD = CD0 + cx * (alpha - alpha0) ** 2 + cy * (log_Re - Re0) ** 2 + cxy * (alpha - alpha1) * (
log_Re - Re1) # basically, a rotated paraboloid in logspace
CD = np.exp(log_CD)
return CD
def Cl_e216(alpha, Re_c):
# A curve fit I did to a Eppler 216 (e216) airfoil, 2D XFoil data. Incompressible flow.
# Within -2 < alpha < 12 and 10^4 < Re_c < 10^6, has R^2 = 0.9994
# Likely valid from -6 < alpha < 12 and 10^4 < Re_c < Inf.
# See: C:\Projects\GitHub\firefly_aerodynamics\Gists and Ideas\XFoil Drag Fitting\e216
Re_c = np.fmax(Re_c, 1)
log10_Re = np.log10(Re_c)
# Coeffs
a1l = 3.0904412662858878e-02
a1t = 9.6452654383488254e-02
a4t = -2.5633334023068302e-05
asl = 6.4175433185427011e-01
atr = 3.6775107602844948e-01
c0l = -2.5909363461176749e-01
c0t = 8.3824440586718862e-01
ctr = 1.1431810545735890e+02
ksl = 5.3416670116733611e-01
rtr = 3.9713338634462829e+01
rtr2 = -3.3634858542657771e+00
xsl = -1.2220899840236835e-01
a = alpha
r = log10_Re
Cl = (c0t + a1t * a + a4t * a ** 4) * 1 / (1 + np.exp(ctr - rtr * r - atr * a - rtr2 * r ** 2)) + (
c0l + a1l * a + asl / (1 + np.exp(-ksl * (a - xsl)))) * (
1 - 1 / (1 + np.exp(ctr - rtr * r - atr * a - rtr2 * r ** 2)))
return Cl
def Cd_profile_e216(alpha, Re_c):
# A curve fit I did to a Eppler 216 (e216) airfoil, 2D XFoil data. Incompressible flow.
# Within -2 < alpha < 12 and 10^4 < Re_c < 10^6, has R^2 = 0.9995
# Likely valid from -6 < alpha < 12 and 10^4 < Re_c < 10^6.
# see: C:\Projects\GitHub\firefly_aerodynamics\Gists and Ideas\XFoil Drag Fitting\e216
Re_c = np.fmax(Re_c, 1)
log10_Re = np.log10(Re_c)
# Coeffs
a1l = 4.7167470806940448e-02
a1t = 7.5663005080888857e-02
a2l = 8.7552076545610764e-04
a4t = 1.1220763679805319e-05
atr = 4.2456038382581129e-01
c0l = -1.4099657419753771e+00
c0t = -2.3855286371940609e+00
ctr = 9.1474872611212135e+01
rtr = 3.0218483612170434e+01
rtr2 = -2.4515094313899279e+00
a = alpha
r = log10_Re
log10_Cd = (c0t + a1t * a + a4t * a ** 4) * 1 / (1 + np.exp(ctr - rtr * r - atr * a - rtr2 * r ** 2)) + (
c0l + a1l * a + a2l * a ** 2) * (1 - 1 / (1 + np.exp(ctr - rtr * r - atr * a - rtr2 * r ** 2)))
Cd = 10 ** log10_Cd
return Cd
def Cd_wave_e216(Cl, mach, sweep=0.):
r"""
A curve fit I did to Eppler 216 airfoil data.
Within -0.4 < CL < 0.75 and 0 < mach < ~0.9, has R^2 = 0.9982.
See: C:\Projects\GitHub\firefly_aerodynamics\MSES Interface\analysis\e216
:param Cl: Lift coefficient
:param mach: Mach number
:param sweep: Sweep angle, in deg
:return: Wave drag coefficient.
"""
mach = np.fmax(mach, 0)
mach_perpendicular = mach * np.cosd(sweep) # Relation from FVA Eq. 8.176
Cl_perpendicular = Cl / np.cosd(sweep) ** 2 # Relation from FVA Eq. 8.177
# Coeffs
c0 = 7.2685945744797997e-01
c1 = -1.5483144040727698e-01
c3 = 2.1305118052118968e-01
c4 = 7.8812272501525316e-01
c5 = 3.3888938102072169e-03
l0 = 1.5298928303149546e+00
l1 = 5.2389999717540392e-01
m = mach_perpendicular
l = Cl_perpendicular
Cd_wave = (np.fmax(m - (c0 + c1 * np.sqrt(c3 + (l - c4) ** 2) + c5 * l), 0) * (l0 + l1 * l)) ** 2
return Cd_wave
def Cl_rae2822(alpha, Re_c):
# A curve fit I did to a RAE2822 airfoil, 2D XFoil data. Incompressible flow.
# Within -2 < alpha < 12 and 10^4 < Re_c < 10^6, has R^2 = 0.9857
# Likely valid from -6 < alpha < 12 and 10^4 < Re_c < 10^6.
# See: C:\Projects\GitHub\firefly_aerodynamics\Gists and Ideas\XFoil Drag Fitting\rae2822
Re_c = np.fmax(Re_c, 1)
log10_Re = np.log10(Re_c)
# Coeffs
a1l = 5.5686866813855172e-02
a1t = 9.7472055628494134e-02
a4l = -7.2145733312046152e-09
a4t = -3.6886704372829236e-06
atr = 8.3723547264375520e-01
atr2 = -8.3128119739031697e-02
c0l = -4.9103908291438701e-02
c0t = 2.3903424824298553e-01
ctr = 1.3082854754897108e+01
rtr = 2.6963082864300731e+00
a = alpha
r = log10_Re
Cl = (c0t + a1t * a + a4t * a ** 4) * 1 / (1 + np.exp(ctr - rtr * r - atr * a - atr2 * a ** 2)) + (
c0l + a1l * a + a4l * a ** 4) * (1 - 1 / (1 + np.exp(ctr - rtr * r - atr * a - atr2 * a ** 2)))
return Cl
def Cd_profile_rae2822(alpha, Re_c):
# A curve fit I did to a RAE2822 airfoil, 2D XFoil data. Incompressible flow.
# Within -2 < alpha < 12 and 10^4 < Re_c < 10^6, has R^2 = 0.9995
# Likely valid from -6 < alpha < 12 and 10^4 < Re_c < Inf.
# see: C:\Projects\GitHub\firefly_aerodynamics\Gists and Ideas\XFoil Drag Fitting\e216
Re_c = np.fmax(Re_c, 1)
log10_Re = np.log10(Re_c)
# Coeffs
at = 8.1034027621509015e+00
c0l = -8.4296746456429639e-01
c0t = -1.3700609138855402e+00
kart = -4.1609994062600880e-01
kat = 5.9510959342452441e-01
krt = -7.1938030052506197e-01
r1l = 1.1548628822014631e-01
r1t = -4.9133662875044504e-01
rt = 5.0070459892411696e+00
a = alpha
r = log10_Re
log10_Cd = (c0t + r1t * (r - 4)) * (
1 / (1 + np.exp(kat * (a - at) + krt * (r - rt) + kart * (a - at) * (r - rt)))) + (
c0l + r1l * (r - 4)) * (
1 - 1 / (1 + np.exp(kat * (a - at) + krt * (r - rt) + kart * (a - at) * (r - rt))))
Cd = 10 ** log10_Cd
return Cd
def Cd_wave_rae2822(Cl, mach, sweep=0.):
r"""
A curve fit I did to RAE2822 airfoil data.
Within -0.4 < CL < 0.75 and 0 < mach < ~0.9, has R^2 = 0.9982.
See: C:\Projects\GitHub\firefly_aerodynamics\MSES Interface\analysis\rae2822
:param Cl: Lift coefficient
:param mach: Mach number
:param sweep: Sweep angle, in deg
:return: Wave drag coefficient.
"""
mach = np.fmax(mach, 0)
mach_perpendicular = mach * np.cosd(sweep) # Relation from FVA Eq. 8.176
Cl_perpendicular = Cl / np.cosd(sweep) ** 2 # Relation from FVA Eq. 8.177
# Coeffs
c2 = 4.5776476424519119e+00
mc0 = 9.5623337929607111e-01
mc1 = 2.0552787101770234e-01
mc2 = 1.1259268018737063e+00
mc3 = 1.9538856688443659e-01
m = mach_perpendicular
l = Cl_perpendicular
Cd_wave = np.fmax(m - (mc0 - mc1 * np.sqrt(mc2 + (l - mc3) ** 2)), 0) ** 2 * c2
return Cd_wave
def Cd_wave_Korn(Cl, t_over_c, mach, sweep=0, kappa_A=0.95):
"""
Wave drag_force coefficient prediction using the low-fidelity Korn Equation method;
derived in "Configuration Aerodynamics" by W.H. Mason, Sect. 7.5.2, pg. 7-18
:param Cl: Sectional lift coefficient
:param t_over_c: thickness-to-chord ratio
:param sweep: sweep angle, in degrees
:param kappa_A: Airfoil technology factor (0.95 for supercritical section, 0.87 for NACA 6-series)
:return: Wave drag coefficient
"""
smooth_abs_Cl = np.softmax(Cl, -Cl, hardness=10)
mach = np.fmax(mach, 0)
Mdd = kappa_A / np.cosd(sweep) - t_over_c / np.cosd(sweep) ** 2 - smooth_abs_Cl / (10 * np.cosd(sweep) ** 3)
Mcrit = Mdd - (0.1 / 80) ** (1 / 3)
Cd_wave = np.where(
mach > Mcrit,
20 * (mach - Mcrit) ** 4,
0
)
return Cd_wave
def fuselage_upsweep_drag_area(
upsweep_angle_rad: float,
fuselage_xsec_area_max: float,
) -> float:
"""
Calculates the drag area (in m^2) of the aft end of a fuselage with a given upsweep angle.
Upsweep is the characteristic shape seen on the aft end of many fuselages in transport aircraft, where the
centerline of the fuselage is angled upwards near the aft end. This is done to reduce the required landing gear
height for adequate takeoff rotation, which in turn reduces mass. This nonzero centerline angle can cause some
separation drag, which is predicted here.
Equation is from Raymer, Aircraft Design: A Conceptual Approach, 5th Ed., Eq. 12.36, pg. 440.
Args:
upsweep_angle_rad: The upsweep angle of the aft end of the fuselage relative to the centerline, in radians.
fuselage_xsec_area_max: The maximum cross-sectional area of the fuselage, in m^2.
Returns: The drag area of the aft end of the fuselage [m^2]. This is equivalent to D/q, where D is the drag force
and q is the dynamic pressure.
"""
return 3.83 * np.abs(upsweep_angle_rad) ** 2.5 * fuselage_xsec_area_max
if __name__ == "__main__":
pass
# # Run some checks
# import matplotlib.pyplot as plt
# import matplotlib.style as style
# import plotly.express as px
# import plotly.graph_objects as go
# import dash
#
# style.use("seaborn")
#
# # # E216 checks
# alpha_inputs = np.linspace(-6, 12, 200)
# Re_inputs = np.logspace(4, 6, 200)
# alphas = []
# Res = []
# CLs = []
# CDs = []
# for alpha in alpha_inputs:
# for Re in Re_inputs:
# alphas.append(alpha)
# Res.append(Re)
# CLs.append(Cl_e216(alpha, Re))
# CDs.append(Cd_profile_e216(alpha, Re))
# px.scatter_3d(
# x=alphas,
# y=Res,
# z=CLs,
# size=np.ones_like(alphas),
# color=CLs,
# log_y=True,
# labels={"x": "alphas", "y": "Re", "z": "CL"}
# ).show()
# px.scatter_3d(
# x=alphas,
# y=Res,
# z=CDs,
# size=np.ones_like(alphas),
# color=CDs,
# log_y=True,
# labels={"x": "alphas", "y": "Re", "z": "CD"}
# ).show()
# px.scatter_3d(
# x=alphas,
# y=Res,
# z=np.array(CLs) / np.array(CDs),
# size=np.ones_like(alphas),
# color=np.array(CLs) / np.array(CDs),
# log_y=True,
# labels={"x": "alphas", "y": "Re", "z": "CL/CD"}
# ).show()
#
# # # rae2822 checks
# alpha_inputs = np.linspace(-6, 12)
# Re_inputs = np.logspace(4, 6)
# alphas = []
# Res = []
# CLs = []
# CDs = []
# for alpha in alpha_inputs:
# for Re in Re_inputs:
# alphas.append(alpha)
# Res.append(Re)
# CLs.append(Cl_rae2822(alpha, Re))
# CDs.append(Cd_profile_rae2822(alpha, Re))
# px.scatter_3d(
# x=alphas,
# y=Res,
# z=CLs,
# size=np.ones_like(alphas),
# color=CLs,
# log_y=True,
# labels={"x": "alphas", "y": "Re", "z": "CL"}
# ).show()
# px.scatter_3d(
# x=alphas,
# y=Res,
# z=CDs,
# size=np.ones_like(alphas),
# color=CDs,
# log_y=True,
# labels={"x": "alphas", "y": "Re", "z": "CD"}
# ).show()
# px.scatter_3d(
# x=alphas,
# y=Res,
# z=np.array(CLs) / np.array(CDs),
# size=np.ones_like(alphas),
# color=np.array(CLs) / np.array(CDs),
# log_y=True,
# labels={"x": "alphas", "y": "Re", "z": "CL/CD"}
# ).show()
# # Cd_wave_e216 check
# CL_inputs = np.linspace(-0.4, 1)
# mach_inputs = np.linspace(0.3, 1)
# CLs = []
# machs = []
# CD_waves = []
# for CL in CL_inputs:
# for mach in mach_inputs:
# CLs.append(CL)
# machs.append(mach)
# CD_waves.append(Cd_wave_e216(CL, mach))
# px.scatter_3d(
# x=CLs,
# y=machs,
# z=CD_waves,
# size=np.ones_like(CD_waves),
# color=CD_waves,
# title="E216 Fit",
# labels={"x": "CL", "y": "Mach", "z": "CD_wave"},
# range_z=(0, 200e-4)
# ).show()
# Cd_wave_rae2822 check
# CL_inputs = np.linspace(-0.4, 1)
# mach_inputs = np.linspace(0.3, 1)
# CLs = []
# machs = []
# CD_waves = []
# for CL in CL_inputs:
# for mach in mach_inputs:
# CLs.append(CL)
# machs.append(mach)
# CD_waves.append(Cd_wave_rae2822(CL, mach))
# px.scatter_3d(
# x=CLs,
# y=machs,
# z=CD_waves,
# size=np.ones_like(CD_waves),
# color=CD_waves,
# title="RAE2822 Fit",
# labels={"x": "CL", "y": "Mach", "z": "CD_wave"},
# # range_z=(0, 200e-4)
# ).show()
# # Cd_wave_Korn check
# CL_inputs = np.linspace(-0.4, 1)
# mach_inputs = np.linspace(0.3, 1)
# CLs = []
# machs = []
# CD_waves = []
# for CL in CL_inputs:
# for mach in mach_inputs:
# CLs.append(CL)
# machs.append(mach)
# CD_waves.append(Cd_wave_Korn(CL, t_over_c=0.121, mach=mach, kappa_A=0.95))
# px.scatter_3d(
# x=CLs,
# y=machs,
# z=CD_waves,
# size=np.ones_like(CD_waves),
# color=CD_waves,
# title="Korn Equation",
# labels={"x": "CL", "y": "Mach", "z": "CD_wave"},
# range_z=(0, 200e-4)
# ).show()
# # # Cylinder Drag Check
# Res = np.logspace(-1, 8, 300)
# CDs = Cd_cylinder(Res)
# CDs_s = Cd_cylinder(Res, True)
#
# plt.loglog(Res, CDs, label="Full Model")
# plt.loglog(Res, CDs_s, label="Subcrit. Only Model")
# plt.xlabel("Re")
# plt.ylabel("CD")
# plt.title("Cylinder Drag Checking")
# plt.legend()
# plt.show()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/aerodynamics/viscous.py
|
viscous.py
|
def mach_number_after_normal_shock(
mach_upstream,
gamma=1.4,
):
"""
Computes the mach number immediately after a normal shock wave.
Args:
mach_upstream: The mach number immediately before the normal shock wave.
gamma: The ratio of specific heats of the fluid. 1.4 for air.
Returns: The mach number immediately after the normal shock wave.
"""
gm1 = gamma - 1
m2 = mach_upstream ** 2
return (
(gm1 * m2 + 2) / (2 * gamma * m2 - gm1)
) ** 0.5
def density_ratio_across_normal_shock(
mach_upstream,
gamma=1.4
):
"""
Computes the ratio of fluid density across a normal shock.
Specifically, returns: rho_after_shock / rho_before_shock
Args:
mach_upstream: The mach number immediately before the normal shock wave.
gamma: The ratio of specific heats of the fluid. 1.4 for air.
Returns: rho_after_shock / rho_before_shock
"""
return (
(gamma + 1) * mach_upstream ** 2
) / (
(gamma - 1) * mach_upstream ** 2 + 2
)
def temperature_ratio_across_normal_shock(
mach_upstream,
gamma=1.4
):
"""
Computes the ratio of fluid temperature across a normal shock.
Specifically, returns: T_after_shock / T_before_shock
Args:
mach_upstream: The mach number immediately before the normal shock wave.
gamma: The ratio of specific heats of the fluid. 1.4 for air.
Returns: T_after_shock / T_before_shock
"""
gm1 = gamma - 1
m2 = mach_upstream ** 2
return (
(2 * gamma * m2 - gm1) * (gm1 * m2 + 2)
) / (
(gamma + 1) ** 2 * m2
)
def pressure_ratio_across_normal_shock(
mach_upstream,
gamma=1.4
):
"""
Computes the ratio of fluid static pressure across a normal shock.
Specifically, returns: P_after_shock / P_before_shock
Args:
mach_upstream: The mach number immediately before the normal shock wave.
gamma: The ratio of specific heats of the fluid. 1.4 for air.
Returns: P_after_shock / P_before_shock
"""
m2 = mach_upstream ** 2
return (
2 * gamma * m2 - (gamma - 1)
) / (
(gamma + 1)
)
def total_pressure_ratio_across_normal_shock(
mach_upstream,
gamma=1.4
):
"""
Computes the ratio of fluid total pressure across a normal shock.
Specifically, returns: Pt_after_shock / Pt_before_shock
Args:
mach_upstream: The mach number immediately before the normal shock wave.
gamma: The ratio of specific heats of the fluid. 1.4 for air.
Returns: Pt_after_shock / Pt_before_shock
"""
return density_ratio_across_normal_shock(
mach_upstream=mach_upstream,
gamma=gamma
) ** (gamma / (gamma - 1)) * (
(gamma + 1) / (2 * gamma * mach_upstream ** 2 - (gamma - 1))
) ** (1 / (gamma - 1))
if __name__ == '__main__':
def q_ratio(mach):
return (
density_ratio_across_normal_shock(mach) *
(
mach_number_after_normal_shock(mach) *
temperature_ratio_across_normal_shock(mach) ** 0.5
) ** 2
)
q_ratio(2)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/aerodynamics/normal_shock_relations.py
|
normal_shock_relations.py
|
import aerosandbox.numpy as np
def induced_drag(
lift,
span,
dynamic_pressure,
oswalds_efficiency=1,
):
"""
Computes the induced drag associated with a lifting planar wing.
Args:
lift: Lift force [Newtons]
span: Wing span [meters]
dynamic_pressure: Dynamic pressure [Pascals]
oswalds_efficiency: Oswald's efficiency factor [-]
Returns: Induced drag force [Newtons]
"""
return lift ** 2 / (
dynamic_pressure * np.pi * span ** 2 * oswalds_efficiency
)
def oswalds_efficiency(
taper_ratio: float,
aspect_ratio: float,
sweep: float = 0.,
fuselage_diameter_to_span_ratio: float = 0.,
) -> float:
"""
Computes the Oswald's efficiency factor for a planar, tapered, swept wing.
Based on "Estimating the Oswald Factor from Basic Aircraft Geometrical Parameters"
by M. Nita, D. Scholz; Hamburg Univ. of Applied Sciences, 2012.
Implementation of Section 5 from the above paper.
Only valid for backwards-swept wings; i.e. 0 <= sweep < 90.
Args:
taper_ratio: Taper ratio of the wing (tip_chord / root_chord) [-]
aspect_ratio: Aspect ratio of the wing (b^2 / S) [-]
sweep: Wing quarter-chord sweep angle [deg]
Returns: Oswald's efficiency factor [-]
"""
sweep = np.clip(sweep, 0, 90) # TODO input proper analytic continuation
def f(l): # f(lambda), given as Eq. 36 in the Nita and Scholz paper (see parent docstring).
return (
0.0524 * l ** 4
- 0.15 * l ** 3
+ 0.1659 * l ** 2
- 0.0706 * l
+ 0.0119
)
delta_lambda = -0.357 + 0.45 * np.exp(-0.0375 * sweep)
# Eq. 37 in Nita & Scholz.
# Note: there is a typo in the cited paper; the negative in the exponent was omitted.
# A bit of thinking about this reveals that this omission must be erroneous.
e_theo = 1 / (
1 + f(taper_ratio - delta_lambda) * aspect_ratio
)
fuselage_wake_contraction_correction_factor = 1 - 2 * (fuselage_diameter_to_span_ratio) ** 2
e = e_theo * fuselage_wake_contraction_correction_factor
return e
def optimal_taper_ratio(
sweep=0.,
) -> float:
"""
Computes the optimal (minimum-induced-drag) taper ratio for a given quarter-chord sweep angle.
Based on "Estimating the Oswald Factor from Basic Aircraft Geometrical Parameters"
by M. Nita, D. Scholz; Hamburg Univ. of Applied Sciences, 2012.
Only valid for backwards-swept wings; i.e. 0 <= sweep < 90.
Args:
sweep: Wing quarter-chord sweep angle [deg]
Returns: Optimal taper ratio
"""
sweep = np.clip(sweep, 0, 90) # TODO input proper analytic continuation
return 0.45 * np.exp(-0.0375 * sweep)
def CL_over_Cl(
aspect_ratio: float,
mach: float = 0.,
sweep: float = 0.,
Cl_is_compressible: bool = True
) -> float:
"""
Returns the ratio of 3D lift coefficient (with compressibility) to the 2D lift coefficient.
Specifically: CL_3D / CL_2D
Args:
aspect_ratio: The aspect ratio of the wing.
mach: The freestream Mach number.
sweep: The sweep of the wing, in degrees. To be most accurate, this should be the sweep at the locus of
thickest points along the wing.
Cl_is_compressible: This flag indicates whether the 2D airfoil data already has compressibility effects
modeled.
For example:
* If this flag is True, this function returns: CL_3D / CL_2D, where CL_2D is the sectional lift
coefficient based on the local profile at the freestream mach number.
* If this flag is False, this function returns: CL_3D / CL_2D_at_mach_zero, where CL_2D_... is the
sectional lift coefficient based on the local profile at mach zero.
For most accurate results, set this flag to True, and then model profile characteristics separately.
"""
prandtl_glauert_beta_squared_ideal = 1 - mach ** 2
# beta_squared = 1 - mach ** 2
beta_squared = np.softmax(
prandtl_glauert_beta_squared_ideal,
-prandtl_glauert_beta_squared_ideal,
hardness=3.0
)
### Alternate formulations
# CL_ratio = aspect_ratio / (aspect_ratio + 2) # Equivalent to equation in Drela's FVA in incompressible, 2*pi*alpha limit.
# CL_ratio = aspect_ratio / (2 + np.sqrt(4 + aspect_ratio ** 2)) # more theoretically sound at low aspect_ratio
### Formulation from Raymer, Sect. 12.4.1; citing DATCOM.
# Comparison to experiment suggests this is the most accurate.
# Symbolically simplified to remove the PG singularity.
eta = 0.95
CL_ratio = aspect_ratio / (
2 + (
4 + (aspect_ratio ** 2 * beta_squared / eta ** 2) + (np.tand(sweep) * aspect_ratio / eta) ** 2
) ** 0.5
)
if Cl_is_compressible:
CL_ratio = CL_ratio * beta_squared ** 0.5
return CL_ratio
def induced_drag_ratio_from_ground_effect(
h_over_b # type: float
):
"""
Gives the ratio of actual induced drag to free-flight induced drag experienced by a wing in ground effect.
Artificially smoothed below around h/b == 0.05 to retain differentiability and practicality.
Source: W. F. Phillips, D. F. Hunsaker, "Lifting-Line Predictions for Induced Drag and Lift in Ground Effect".
Using Equation 5 from the paper, which is modified from a model from Torenbeek:
Torenbeek, E. "Ground Effects", 1982.
:param h_over_b: (Height above ground) divided by (wingspan).
:return: Ratio of induced drag in ground effect to induced drag out of ground effect [unitless]
"""
h_over_b = np.softmax(
h_over_b,
0,
hardness=1 / 0.03
)
return 1 - np.exp(
-4.01 * (2 * h_over_b) ** 0.717
)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
fig, ax = plt.subplots()
machs = np.linspace(0, 2, 500)
plt.plot(machs, CL_over_Cl(5, machs, 0))
p.show_plot()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/aerodynamics/inviscid.py
|
inviscid.py
|
import aerosandbox.numpy as np
from typing import Union, Callable
from scipy.integrate import quad
# Welcome to the unsteady aerodynamics library!
# In here you will find analytical, time-domain models for the
# unsteady lift response of thin airfoils. Here is a quick overview
# of what's been implemented so far:
# 1) Unsteady pitching (Wagner's problem)
# 2) Transverse wing-gust encounters (Kussner's problem)
# 3) Added mass
# 4) Pitching maneuver through a gust (Combination of all 3 models above)
# The models usually take Callable objects as arguments which given the reduced time, return the quantity of
# interest (Velocity profile, angle of attack etc.). For an explanation of reduced time see function calculate_reduced_time.
# In main() you will find some example gusts as well as example pitchig profiles.
# You can easily build your own and pass them to the appropriate functions
# to instantly get the lift response! Although not yet implemented, it is possible to
# calculate an optimal unsteady maneuver through any known disturbance.
# If you run this file as is, the lift history of a flat plate pitching through a
# top hat gust will be computed.
def calculate_reduced_time(
time: Union[float, np.ndarray],
velocity: Union[float, np.ndarray],
chord: float
) -> Union[float, np.ndarray]:
"""
Calculates reduced time from time in seconds and velocity history in m/s.
For constant velocity it reduces to s = 2*U*t/c
The reduced time is the number of semichords travelled by the airfoil/aircaft
i.e. 2 / chord * integral from t0 to t of velocity dt
Args:
time (float,np.ndarray) : Time in seconds
velocity (float,np.ndarray): Either a constant velocity or array of velocities at corresponding reduced times
chord (float) : The chord of the airfoil
Returns:
The reduced time as an ndarray or float similar to the input. The first element is 0.
"""
if type(velocity) == float or type(velocity) == int:
return 2 * velocity * time / chord
else:
assert np.size(velocity) == np.size(time), "The velocity history and time must have the same length"
reduced_time = np.zeros_like(time)
for i in range(len(time) - 1):
reduced_time[i + 1] = reduced_time[i] + (velocity[i + 1] + velocity[i]) / 2 * (time[i + 1] - time[i])
return 2 / chord * reduced_time
def wagners_function(reduced_time: Union[float, np.ndarray]):
"""
A commonly used approximation to Wagner's function
(Jones, R.T. The Unsteady Lift of a Finite Wing; Technical Report NACA TN-682; NACA: Washington, DC, USA, 1939)
Args:
reduced_time (float,np.ndarray) : Equal to the number of semichords travelled. See function calculate_reduced_time
"""
wagner = (1 - 0.165 * np.exp(-0.0455 * reduced_time) -
0.335 * np.exp(-0.3 * reduced_time)) * np.where(reduced_time >= 0, 1, 0)
return wagner
def kussners_function(reduced_time: Union[float, np.ndarray]):
"""
A commonly used approximation to Kussner's function (Sears and Sparks 1941)
Args:
reduced_time (float,np.ndarray) : This is equal to the number of semichords travelled. See function calculate_reduced_time
"""
kussner = (1 - 0.5 * np.exp(-0.13 * reduced_time) -
0.5 * np.exp(-reduced_time)) * np.where(reduced_time >= 0, 1, 0)
return kussner
def indicial_pitch_response(
reduced_time: Union[float, np.ndarray],
angle_of_attack: float # In degrees
):
"""
Computes the evolution of the lift coefficient in Wagner's problem which can be interpreted as follows
1) An impulsively started flat plate at constant angle of attack
2) An impuslive change in the angle of attack of a flat plate at constant velocity
The model predicts infinite added mass at the first instant due to the infinite acceleration
The delta function term (and therefore added mass) has been ommited in this case.
Reduced_time = 0 corresponds to the instance the airfoil pitches/accelerates
Args:
reduced_time (float,np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time
angle_of_attack (float) : The angle of attack, in degrees
"""
return 2 * np.pi * np.deg2rad(angle_of_attack) * wagners_function(reduced_time)
def indicial_gust_response(
reduced_time: Union[float, np.ndarray],
gust_velocity: float,
plate_velocity: float,
angle_of_attack: float = 0, # In degrees
chord: float = 1
):
"""
Computes the evolution of the lift coefficient of a flat plate entering a
an infinitely long, sharp step gust (Heaveside function) at a constant angle of attack.
Reduced_time = 0 corresponds to the instance the gust is entered
(Leishman, Principles of Helicopter Aerodynamics, S8.10,S8.11)
Args:
reduced_time (float,np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time
gust_velocity (float) : velocity in m/s of the top hat gust
velocity (float) : velocity of the thin airfoil entering the gust
angle_of_attack (float) : The angle of attack, in degrees
chord (float) : The chord of the plate in meters
"""
angle_of_attack_radians = np.deg2rad(angle_of_attack)
offset = chord / 2 * (1 - np.cos(angle_of_attack_radians))
return (2 * np.pi * np.arctan(gust_velocity / plate_velocity) *
np.cos(angle_of_attack_radians) *
kussners_function(reduced_time - offset))
def calculate_lift_due_to_transverse_gust(
reduced_time: np.ndarray,
gust_velocity_profile: Callable[[float], float],
plate_velocity: float,
angle_of_attack: Union[float, Callable[[float], float]] = 0, # In Degrees
chord: float = 1
):
"""
Calculates the lift (as a function of reduced time) caused by an arbitrary transverse gust profile
by computing duhamel superposition integral of Kussner's problem at a constant angle of attack
Args:
reduced_time (float,np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time
gust_velocity_profile (Callable[[float],float]) : The transverse velocity profile that the flate plate experiences. Must be a function that takes reduced time and returns a velocity
plate_velocity (float) :The velocity by which the flat plate enters the gust
angle_of_attack (Union[float,Callable[[float],float]]) : The angle of attack, in degrees. Can either be a float for constant angle of attack or a Callable that takes reduced time and returns angle of attack
chord (float) : The chord of the plate in meters
Returns:
lift_coefficient (np.ndarray) : The lift coefficient history of the flat plate
"""
assert type(angle_of_attack) != np.ndarray, "Please provide either a Callable or a float for the angle of attack"
if isinstance(angle_of_attack, float) or isinstance(angle_of_attack, int):
def AoA_function(reduced_time):
return np.deg2rad(angle_of_attack)
else:
def AoA_function(reduced_time):
return np.deg2rad(angle_of_attack(reduced_time))
def dK_ds(reduced_time):
return (0.065 * np.exp(-0.13 * reduced_time) +
0.5 * np.exp(-reduced_time))
def integrand(sigma, s, chord):
offset = chord / 2 * (1 - np.cos(AoA_function(s - sigma)))
return (dK_ds(sigma) *
gust_velocity_profile(s - sigma - offset) *
np.cos(AoA_function(s - sigma)))
lift_coefficient = np.zeros_like(reduced_time)
for i, s in enumerate(reduced_time):
I = quad(integrand, 0, s, args=(s, chord))[0]
lift_coefficient[i] = 2 * np.pi * I / plate_velocity
return lift_coefficient
def calculate_lift_due_to_pitching_profile(
reduced_time: np.ndarray,
angle_of_attack: Union[Callable[[float], float], float] # In degrees
):
"""
Calculates the duhamel superposition integral of Wagner's problem.
Given some arbitrary pitching profile. The lift coefficient as a function
of reduced time of a flat plate can be computed using this function
Args:
reduced_time (float,np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time
angle_of_attack (Callable[[float],float]) : The angle of attack as a function of reduced time of the flat plate. Must be a Callable that takes reduced time and returns angle of attack
Returns:
lift_coefficient (np.ndarray) : The lift coefficient history of the flat plate
"""
assert (reduced_time >= 0).all(), "Please use positive time. Negative time not supported"
if isinstance(angle_of_attack, float) or isinstance(angle_of_attack, int):
def AoA_function(reduced_time):
return np.deg2rad(angle_of_attack)
else:
def AoA_function(reduced_time):
return np.deg2rad(angle_of_attack(reduced_time))
def dW_ds(reduced_time):
return (0.1005 * np.exp(-0.3 * reduced_time) +
0.00750075 * np.exp(-0.0455 * reduced_time))
def integrand(sigma, s):
if dW_ds(sigma) < 0:
dW_ds(sigma)
return dW_ds(sigma) * AoA_function(s - sigma)
lift_coefficient = np.zeros_like(reduced_time)
for i, s in enumerate(reduced_time):
I = quad(integrand, 0, s, args=s)[0]
# print(I)
lift_coefficient[i] = 2 * np.pi * (AoA_function(s) *
wagners_function(0) +
I)
return lift_coefficient
def added_mass_due_to_pitching(
reduced_time: np.ndarray,
angle_of_attack: Callable[[float], float] # In degrees
):
"""
This function calculate the lift coefficient due to the added mass of a flat plate
pitching about its midchord while moving at constant velocity.
Args:
reduced_time (np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time
angle_of_attack (Callable[[float],float]) : The angle of attack as a function of reduced time of the flat plate
Returns:
lift_coefficient (np.ndarray) : The lift coefficient history of the flat plate
"""
AoA = np.array([np.deg2rad(angle_of_attack(s)) for s in reduced_time])
da_ds = np.gradient(AoA, reduced_time)
# TODO: generalize to all unsteady motion
return np.pi / 2 * np.cos(AoA) ** 2 * da_ds
def pitching_through_transverse_gust(
reduced_time: np.ndarray,
gust_velocity_profile: Callable[[float], float],
plate_velocity: float,
angle_of_attack: Union[Callable[[float], float], float], # In degrees
chord: float = 1
):
"""
This function calculates the lift as a function of time of a flat plate pitching
about its midchord through an arbitrary transverse gust. It combines Kussner's gust response with
wagners pitch response as well as added mass.
The following physics are accounted for
1) Vorticity shed from the trailing edge due to gust profile
2) Vorticity shed from the trailing edge due to pitching profile
3) Added mass (non-circulatory force) due to pitching about midchord
The following physics are NOT taken accounted for
1) Any type of flow separation
2) Leading edge vorticity shedding
3) Deflected wake due to gust (flat wake assumption)
Args:
reduced_time (float,np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time
gust_velocity_profile (Callable[[float],float]) : The transverse velocity profile that the flate plate experiences. Must be a function that takes reduced time and returns a velocity
plate_velocity (float) :The velocity by which the flat plate enters the gust
angle_of_attack (Union[float,Callable[[float],float]]) : The angle of attack, in degrees. Can either be a float for constant angle of attack or a Callable that takes reduced time and returns angle of attack
chord (float) : The chord of the plate in meters
Returns:
lift_coefficient (np.ndarray) : The lift coefficient history of the flat plate
"""
gust_lift = calculate_lift_due_to_transverse_gust(reduced_time, gust_velocity_profile, plate_velocity,
angle_of_attack, chord)
pitch_lift = calculate_lift_due_to_pitching_profile(reduced_time, angle_of_attack)
added_mass_lift = added_mass_due_to_pitching(reduced_time, angle_of_attack)
return gust_lift + pitch_lift + added_mass_lift
def top_hat_gust(reduced_time: float) -> float:
"""
A canonical example gust.
Args:
reduced_time (float)
Returns:
gust_velocity (float)
"""
if 5 <= reduced_time <= 10:
gust_velocity = 1
else:
gust_velocity = 0
return gust_velocity
def sine_squared_gust(reduced_time: float) -> float:
"""
A canonical gust of used by the FAA to show 'compliance with the
requirements of Title 14, Code of Federal Regulations (14 CFR) 25.341,
Gust and turbulence loads. Section 25.341 specifies the discrete gust
and continuous turbulence dynamic load conditions that apply to the
airplane and engines.'
Args:
reduced_time (float)
Returns:
gust_velocity (float)
"""
gust_strength = 1
start = 5
finish = 10
gust_width_to_chord_ratio = 5
if start <= reduced_time <= finish:
gust_velocity = (gust_strength *
np.sin((np.pi * reduced_time) /
gust_width_to_chord_ratio) ** 2)
else:
gust_velocity = 0
return gust_velocity
def gaussian_pitch(reduced_time: float) -> float:
"""
A pitch maneuver resembling a guassian curve
Args:
reduced_time (float)
Returns:
angle_of_attack (float) : in degrees
"""
return -25 * np.exp(-((reduced_time - 7.5) / 3) ** 2)
def linear_ramp_pitch(reduced_time: float) -> float:
"""
A pitch maneuver resembling a linear ramp
Args:
reduced_time (float)
Returns:
angle_of_attack (float) : in degrees
"""
if reduced_time < 7.5:
angle_of_attack = -3.3 * reduced_time
else:
angle_of_attack = 2 * reduced_time - 40
return angle_of_attack
if __name__ == "__main__":
import matplotlib.pyplot as plt
time = np.linspace(0, 10, 100) # Time in seconds
wing_velocity = 2 # Wing horizontal velocity in m/s
chord = 2
reduced_time = calculate_reduced_time(time, wing_velocity, chord) # Number of semi chords travelled
# Visualize the gust profiles as well as the pitch maneuvers
fig, ax1 = plt.subplots(dpi=300)
ln1 = ax1.plot(reduced_time, np.array([top_hat_gust(s) for s in reduced_time]), label="Top-Hat Gust", lw=3)
ln2 = ax1.plot(reduced_time, np.array([sine_squared_gust(s) for s in reduced_time]), label="Sine-Squared Gust",
lw=3)
ax1.set_xlabel("Reduced time")
ax1.set_ylabel("Velocity (m/s)")
ax2 = ax1.twinx()
ln3 = ax2.plot(reduced_time, np.array([gaussian_pitch(s) for s in reduced_time]), label="Guassian Pitch", c="red",
ls="--", lw=3)
ax2.set_ylabel("Angle of Attack, degrees")
lns = ln1 + ln2 + ln3
labs = [l.get_label() for l in lns]
ax2.legend(lns, labs, loc="lower right")
plt.title("Gust and pitch example profiles")
total_lift = pitching_through_transverse_gust(reduced_time, top_hat_gust, wing_velocity, gaussian_pitch)
gust_lift = calculate_lift_due_to_transverse_gust(reduced_time, top_hat_gust, wing_velocity, gaussian_pitch)
pitch_lift = calculate_lift_due_to_pitching_profile(reduced_time, gaussian_pitch)
added_mass_lift = added_mass_due_to_pitching(reduced_time, gaussian_pitch)
# Visualize the different sources of lift
plt.figure(dpi=300)
plt.plot(reduced_time, total_lift, label="Total Lift", lw=2)
plt.plot(reduced_time, gust_lift, label="Gust Lift", lw=2)
plt.plot(reduced_time, pitch_lift, label="Pitching Lift", lw=2)
plt.plot(reduced_time, added_mass_lift, label="Added Mass Lift", lw=2)
plt.legend()
plt.xlabel("Reduced time")
plt.ylabel("$C_\ell$")
plt.title("Guassian Pitch Maneuver Through Top-Hat Gust")
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/aerodynamics/unsteady.py
|
unsteady.py
|
import aerosandbox as asb
import aerosandbox.numpy as np
import aerosandbox.tools.units as u
from typing import Dict, Union
# From Torenbeek: "Synthesis of Subsonic Airplane Design", 1976, Delft University Press
# Chapter 8: "Airplane Weight and Balance"
def mass_wing_simple(
wing: asb.Wing,
design_mass_TOGW: float,
ultimate_load_factor: float,
suspended_mass: float,
main_gear_mounted_to_wing: bool = True,
) -> float:
"""
Computes the mass of a wing of an aircraft, according to Torenbeek's "Synthesis of Subsonic
Airplane Design".
This is the simple version of the wing weight model, which is found in:
Section 8.4: Weight Prediction Data and Methods
8.4.1: Airframe Structure
Eq. 8-12
A more detailed version of the wing weight model is available in the `mass_wing()` function in this same module.
Args:
wing: The wing object. Should be an AeroSandbox Wing object.
design_mass_TOGW: The design takeoff gross weight of the entire aircraft [kg].
ultimate_load_factor: The ultimate load factor of the aircraft. 1.5x the limit load factor.
suspended_mass: The mass of the aircraft that is suspended from the wing [kg].
main_gear_mounted_to_wing: Whether the main gear is mounted to the wing structure.
Returns: The total mass of the wing [kg].
"""
k_w = np.blend(
(design_mass_TOGW - 5670) / 2000,
6.67e-3,
4.90e-3
)
span = wing.span() / np.cosd(wing.mean_sweep_angle(x_nondim=0.5))
wing_root_thickness = wing.xsecs[0].airfoil.max_thickness() * wing.xsecs[0].chord
return suspended_mass * (
k_w *
span ** 0.75 *
(1 + (1.905 / span) ** 0.5) *
ultimate_load_factor ** 0.55 *
((span / wing_root_thickness) / (suspended_mass / wing.area())) ** 0.30 *
(1 if main_gear_mounted_to_wing else 0.95)
)
def mass_wing_high_lift_devices(
wing: asb.Wing,
max_airspeed_for_flaps: float,
flap_deflection_angle: float = 30,
k_f1: float = 1.0,
k_f2: float = 1.0
) -> float:
"""
The function mass_high_lift() is designed to estimate the weight of the high-lift devices
on an airplane wing. It uses Torenbeek's method, which is based on multiple factors
like wing design and flap deflection.
Args:
wing, an instance of AeroSandbox's Wing class,
max_airspeed_for_flaps, the maximum airspeed at which the flaps can be deployed [m/s]
flap_deflection_angle, the angle to which the flaps can be deflected [deg]. Default value is 30 degrees.
k_f1, configuration factor 1, with values:
= 1.0 for single-slotted; double-slotted, fixed hinge
= 1.15 for double: slotted, 4-bar movement; single-slotted Fowler
= 1.3 for double-slotted Fowler
= 1.45 for triple-slotted Fowler
k_f2, configuration factor 2, with values:
= 1.0 for slotted flaps with fixed vanes
= 1.25 for double-slotted flaps with "variable geometry", i.e., extending
flaps with separately moving vanes or auxiliary flaps
Returns: Mass of the wing's high-lift system only [kg]
"""
# S_flaps represents the total area of the control surfaces (flaps) on the wing.
S_flaps = wing.control_surface_area()
# Wing span
span = wing.span()
# Sweep at 50% chord
sweep_half_chord = wing.mean_sweep_angle(x_nondim=0.5)
cos_sweep_half_chord = np.cosd(sweep_half_chord)
# span_structural is the "structural" wing span, which takes into account the wing's sweep angle.
span_structural = span / cos_sweep_half_chord
# Airfoil thickness over chord ratio at root
root_t_over_c = wing.xsecs[0].airfoil.max_thickness()
# Torenbeek Eq. C-10
k_f = k_f1 * k_f2
mass_trailing_edge_flaps = S_flaps * (
2.706 * k_f *
(S_flaps * span_structural) ** (3 / 16) *
(
(max_airspeed_for_flaps / 100) ** 2 *
np.sind(flap_deflection_angle) *
np.cosd(wing.mean_sweep_angle(x_nondim=1)) /
root_t_over_c
) ** (3 / 4)
)
mass_leading_edge_devices = 0
mass_high_lift_devices = mass_trailing_edge_flaps + mass_leading_edge_devices
return mass_high_lift_devices
def mass_wing_basic_structure(
wing: asb.Wing,
design_mass_TOGW: float,
ultimate_load_factor: float,
suspended_mass: float,
never_exceed_airspeed: float,
main_gear_mounted_to_wing: bool = True,
strut_y_location: float = None,
k_e: float = 0.95,
return_dict: bool = False,
) -> Union[float, Dict[str, float]]:
"""
Computes the mass of the basic structure of the wing of an aircraft, according to
Torenbeek's "Synthesis of Subsonic Airplane Design", 1976, Appendix C: "Prediction
of Wing Structural Weight". This is the basic wing structure without movables like spoilers,
high-lift devices, etc.
Likely more accurate than the Raymer wing weight models.
Args:
wing: The wing object.
design_mass_TOGW: The design takeoff gross weight of the entire aircraft [kg].
ultimate_load_factor: The ultimate load factor of the aircraft [-]. 1.5x the limit load factor.
suspended_mass: The mass of the aircraft that is suspended from the wing [kg]. It should exclude
any wing attachments that are not part of the wing structure.
never_exceed_airspeed: The never-exceed airspeed of the aircraft [m/s]. Used for flutter calculations.
main_gear_mounted_to_wing: Whether the main gear is mounted to the wing structure. Boolean.
strut_y_location: The spanwise-location of the strut (if any), as measured from the wing root [meters]. If None,
it is assumed that there is no strut (i.e., the wing is a cantilever beam).
k_e: represents weight knockdowns due to bending moment relief from engines mounted in front of elastic axis.
see Torenbeek unlabeled equations, between C-3 and C-4.
k_e = 1.0 if engines are not wing mounted,
k_e = 0.95 (default) two wing mounted engines in front of the elastic axis and
k_e = 0.90 four wing-mounted engines in front of the elastic axis
return_dict: Whether to return a dictionary of all the intermediate values, or just the final mass. Defaults
to False, which returns just the final mass [kg].
Returns: If return_dict is False (default), returns a single value: the mass of the basic wing [kg]. If return_dict is
True, returns a dictionary of all the intermediate values.
"""
# Wing span
span = wing.span()
# Sweep at 50% chord
sweep_half_chord = wing.mean_sweep_angle(x_nondim=0.5)
cos_sweep_half_chord = np.cosd(sweep_half_chord)
# Structural wing span
span_structural = span / cos_sweep_half_chord
# Airfoil thickness over chord ratio at root
root_t_over_c = wing.xsecs[0].airfoil.max_thickness()
# Torenbeek Eq. C-2
# `k_no` represents penalties due to skin joints, non-tapered skin, minimum gauge, etc.
k_no = 1 + (1.905 / span_structural) ** 0.5
# Torenbeek Eq. C-3
# `k_lambda` represents penalties due to taper ratio
k_lambda = (1 + wing.taper_ratio()) ** 0.4
# `k_uc` represents weight knockdowns due to undercarriage.
k_uc = 1 if main_gear_mounted_to_wing else 0.95
# Torenbeek Eq. C-4
# `k_st` represents weight excrescence due to structural stiffness against flutter.
k_st = (
1 +
9.06e-4 * (
(span * np.cosd(wing.mean_sweep_angle(x_nondim=0))) ** 3 /
design_mass_TOGW
) * (
never_exceed_airspeed / 100 / root_t_over_c
) ** 2 *
cos_sweep_half_chord
)
# Torenbeek Eq. C-5
# `k_b` represents weight knockdowns due to bending moment relief from strut location.
if strut_y_location is None:
k_b = 1
else:
k_b = 1 - (strut_y_location / (wing.span() / 2)) ** 2
### Use all the above to compute the basic wing structural mass
mass_wing_basic = (
4.58e-3 *
k_no *
k_lambda *
k_e *
k_uc *
k_st *
(
k_b * ultimate_load_factor * (0.8 * suspended_mass + 0.2 * design_mass_TOGW)
) ** 0.55 *
span ** 1.675 *
root_t_over_c ** -0.45 *
cos_sweep_half_chord ** -1.325
)
if return_dict:
return locals()
else:
return mass_wing_basic
def mass_wing_spoilers_and_speedbrakes(
wing: asb.Wing,
mass_basic_wing: float
) -> float:
"""
The function mass_spoilers_and_speedbrakes() estimates the weight of the spoilers and speedbrakes
according to Torenbeek's "Synthesis of Subsonic Airplane Design", 1976, Appendix C: "Prediction
of Wing Structural Weight".
N.B. the weight is coming out unrealistic and approx. 20-30% of the weight of the wing. This needs
a correction. It uses normally the 12.2 kg/m^2 wing area.
Args:
wing: an instance of AeroSandbox's Wing class.
mass_basic_wing: The basic weight of the wing (without spoilers, speedbrakes, flaps, slats) [kg]
Returns: The mass of the spoilers and speed brakes only [kg]
N.B. the weight estimation using the 12.2 kg/m^2 figure comes out too high if using
the wing as a referenced area. Reduced to 1.5% of the basic wing mass.
"""
# mass_spoilers_and_speedbrakes = np.softmax(
# 12.2 * wing.area(),
# 0.015 * mass_basic_wing
# )
mass_spoilers_and_speedbrakes = 0.015 * mass_basic_wing
return mass_spoilers_and_speedbrakes
def mass_wing(
wing: asb.Wing,
design_mass_TOGW: float,
ultimate_load_factor: float,
suspended_mass: float,
never_exceed_airspeed: float,
max_airspeed_for_flaps: float,
main_gear_mounted_to_wing: bool = True,
flap_deflection_angle: float = 30,
strut_y_location: float = None,
return_dict: bool = False,
) -> Union[float, Dict[str, float]]:
"""
Computes the mass of a wing of an aircraft, according to Torenbeek's "Synthesis of Subsonic Airplane Design",
1976, Appendix C: "Prediction of Wing Structural Weight".
Likely more accurate than the Raymer wing weight models.
Args:
wing: The wing object.
design_mass_TOGW: The design takeoff gross weight of the entire aircraft [kg].
ultimate_load_factor: The ultimate load factor of the aircraft. 1.5x the limit load factor.
suspended_mass: The mass of the aircraft that is suspended from the wing [kg].
never_exceed_airspeed: The never-exceed airspeed of the aircraft [m/s]. Used for flutter calculations.
max_airspeed_for_flaps: The maximum airspeed at which the flaps are allowed to be deployed [m/s]. In the
absence of other information, 1.8x stall speed is a good guess.
main_gear_mounted_to_wing: Whether the main gear is mounted to the wing structure.
flap_deflection_angle: The maximum deflection angle of the flaps [deg].
strut_y_location: The y-location of the strut (if any), relative to the wing's leading edge [m]. If None,
it is assumed that there is no strut (i.e., the wing is a cantilever beam).
return_dict: Whether to return a dictionary of all the intermediate values, or just the final mass. Defaults
to False, which returns just the final mass.
Returns: If return_dict is False (default), returns a single value: the total mass of the wing [kg]. If
return_dict is True, returns a dictionary of all the intermediate values.
"""
# High-lift mass estimation
mass_high_lift_devices = mass_wing_high_lift_devices(
wing=wing,
max_airspeed_for_flaps=max_airspeed_for_flaps,
flap_deflection_angle=flap_deflection_angle,
)
# Basic wing structure mass estimation
mass_basic_wing = mass_wing_basic_structure(
wing=wing,
design_mass_TOGW=design_mass_TOGW,
ultimate_load_factor=ultimate_load_factor,
suspended_mass=suspended_mass,
never_exceed_airspeed=never_exceed_airspeed,
main_gear_mounted_to_wing=main_gear_mounted_to_wing,
strut_y_location=strut_y_location,
)
# spoilers and speedbrake mass estimation
mass_spoilers_speedbrakes = mass_wing_spoilers_and_speedbrakes(
wing=wing,
mass_basic_wing=mass_basic_wing
)
mass_wing_total = (
mass_basic_wing +
1.2 * (mass_high_lift_devices + mass_spoilers_speedbrakes)
)
if return_dict:
return locals()
else:
return mass_wing_total
# def mass_hstab(
# hstab: asb.Wing,
# design_mass_TOGW: float,
# ultimate_load_factor: float,
# suspended_mass: float,
# main_gear_mounted_to_wing: bool = True,
# ) -> float:
#
# k_wt = 0.64
def mass_fuselage_simple(
fuselage: asb.Fuselage,
never_exceed_airspeed: float,
wing_to_tail_distance: float,
):
"""
Computes the mass of the fuselage, using Torenbeek's simple version of the calculation.
Source:
Torenbeek: "Synthesis of Subsonic Airplane Design", 1976
Section 8.4: Weight Prediction Data and Methods
8.4.1: Airframe Structure
Eq. 8-16
Args:
fuselage: The fuselage object. Should be an AeroSandbox Fuselage object.
never_exceed_airspeed: The never-exceed airspeed of the aircraft, in m/s.
wing_to_tail_distance: The distance from the quarter-chord of the wing to the quarter-chord of the tail,
in meters.
Returns: The mass of the fuselage, in kg.
"""
widths = [
xsec.width
for xsec in fuselage.xsecs
]
max_width = np.softmax(
*widths,
softness=np.mean(np.array(widths)) * 0.01
)
heights = [
xsec.height
for xsec in fuselage.xsecs
]
max_height = np.softmax(
*heights,
softness=np.mean(np.array(heights)) * 0.01
)
return (
0.23 *
(
never_exceed_airspeed *
wing_to_tail_distance /
(max_width + max_height)
) ** 0.5 *
fuselage.area_wetted() ** 1.2
)
def mass_fuselage(
fuselage: asb.Fuselage,
design_mass_TOGW: float,
ultimate_load_factor: float,
never_exceed_airspeed: float,
wing_to_tail_distance: float,
):
# TODO Torenbeek Appendix D (PDF page 477)
# Stage 1: Calculate the weight of the fuselage shell, which carries the primary loads and contributes
# approximately 1/3 to 1/2 of the fuselage weight ("gross shell weight").
# Torenbeek Eq. D-3
fuselage.fineness_ratio()
fuselage_quasi_slenderness_ratio = fuselage.fineness_ratio(assumed_shape="sears_haack")
k_lambda = np.softmin(
0.56 * fuselage.fineness_ratio(assumed_shape="sears_haack")
)
W_sk = 0.05428 * k_lambda * S_g ** 1.07 * never_exceed_airspeed ** 0.743
W_g = W_sk + W_str + W_fr
def mass_propeller(
propeller_diameter: float,
propeller_power: float,
n_blades: int,
) -> float:
"""
Computes the mass of a propeller.
From Torenbeek: "Synthesis of Subsonic Airplane Design", 1976, Delft University Press.
Table 8-9 (pg. 286, PDF page 306)
Args:
propeller_diameter: Propeller diameter, in meters.
propeller_power: Propeller power, in watts.
n_blades: Number of propeller blades.
Returns: Propeller mass, in kilograms.
"""
return (
0.108 *
n_blades *
(
(propeller_diameter / u.foot) *
(propeller_power / u.horsepower)
) ** 0.78174
) * u.lbm
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/weights/torenbeek_weights.py
|
torenbeek_weights.py
|
import aerosandbox as asb
import aerosandbox.numpy as np
import aerosandbox.tools.units as u
from .raymer_fudge_factors import advanced_composites
from typing import Union
# From Raymer, Aircraft Design: A Conceptual Approach, 5th Ed.
# Section 15.3.2: Cargo/Transport Weights
def mass_wing(
wing: asb.Wing,
design_mass_TOGW: float,
ultimate_load_factor: float,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of the wing for a cargo/transport aircraft, according to Raymer's Aircraft Design: A Conceptual
Approach.
Note: Torenbeek's wing mass model is likely more accurate; see `mass_wing()` in `torenbeek_weights.py` (same
directory).
Args:
wing: The wing object.
design_mass_TOGW: The design take-off gross weight of the entire airplane [kg].
ultimate_load_factor: Ultimate load factor of the airplane.
use_advanced_composites: Whether to use advanced composites for the wing. If True, the wing mass is modified
accordingly.
Returns:
Wing mass [kg].
"""
airfoil_thicknesses = [
xsec.airfoil.max_thickness()
for xsec in wing.xsecs
]
airfoil_t_over_c = np.min(airfoil_thicknesses)
return (
0.0051 *
(design_mass_TOGW / u.lbm * ultimate_load_factor) ** 0.557 *
(wing.area('planform') / u.foot ** 2) ** 0.649 *
wing.aspect_ratio() ** 0.5 *
airfoil_t_over_c ** -0.4 *
(1 + wing.taper_ratio()) ** 0.1 *
np.cosd(wing.mean_sweep_angle()) ** -1 *
(wing.control_surface_area() / u.foot ** 2) ** 0.1 *
(advanced_composites["wing"] if use_advanced_composites else 1)
) * u.lbm
def mass_hstab(
hstab: asb.Wing,
design_mass_TOGW: float,
ultimate_load_factor: float,
wing_to_hstab_distance: float,
fuselage_width_at_hstab_intersection: float,
aircraft_y_radius_of_gyration: float = None,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of the horizontal stabilizer for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
hstab: The horizontal stabilizer object.
design_mass_TOGW: The design take-off gross weight of the entire airplane [kg].
ultimate_load_factor: Ultimate load factor of the airplane.
wing_to_hstab_distance: Distance from the wing's root-quarter-chord-point to the hstab's
root-quarter-chord-point [m].
fuselage_width_at_hstab_intersection: Width of the fuselage at the intersection of the wing and hstab [m].
aircraft_y_radius_of_gyration: Radius of gyration of the aircraft about the y-axis [m]. If None, estimates
this as `0.3 * wing_to_hstab_distance`.
use_advanced_composites: Whether to use advanced composites for the hstab. If True, the hstab mass is modified
accordingly.
Returns:
The mass of the horizontal stabilizer [kg].
"""
if aircraft_y_radius_of_gyration is None:
aircraft_y_radius_of_gyration = 0.3 * wing_to_hstab_distance
area = hstab.area()
### Determine if the hstab is all-moving or not
all_moving = True
for xsec in hstab.xsecs:
for control_surface in xsec.control_surfaces:
if (
(control_surface.trailing_edge and control_surface.hinge_point > 0) or
(not control_surface.trailing_edge and control_surface.hinge_point < 1)
):
all_moving = False
break
return (
0.0379 *
(1.143 if all_moving else 1) *
(1 + fuselage_width_at_hstab_intersection / hstab.span()) ** -0.25 *
(design_mass_TOGW / u.lbm) ** 0.639 *
ultimate_load_factor ** 0.10 *
(area / u.foot ** 2) ** 0.75 *
(wing_to_hstab_distance / u.foot) ** -1 *
(aircraft_y_radius_of_gyration / u.foot) ** 0.704 *
np.cosd(hstab.mean_sweep_angle()) ** -1 *
hstab.aspect_ratio() ** 0.166 *
(1 + hstab.control_surface_area() / area) ** 0.1 *
(advanced_composites["tails"] if use_advanced_composites else 1)
) * u.lbm
def mass_vstab(
vstab: asb.Wing,
design_mass_TOGW: float,
ultimate_load_factor: float,
wing_to_vstab_distance: float,
is_t_tail: bool = False,
aircraft_z_radius_of_gyration: float = None,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of the vertical stabilizer for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
vstab: The vertical stabilizer object.
design_mass_TOGW: The design take-off gross weight of the entire airplane [kg].
ultimate_load_factor: Ultimate load factor of the airplane.
wing_to_vstab_distance: Distance from the wing's root-quarter-chord-point to the vstab's
root-quarter-chord-point [m].
is_t_tail: Whether the airplane is a T-tail or not.
aircraft_z_radius_of_gyration: The z-radius of gyration of the entire airplane [m]. If None, estimates this
as `1 * wing_to_vstab_distance`.
use_advanced_composites: Whether to use advanced composites for the vstab. If True, the vstab mass is modified
accordingly.
Returns:
The mass of the vertical stabilizer [kg].
"""
airfoil_thicknesses = [
xsec.airfoil.max_thickness()
for xsec in vstab.xsecs
]
airfoil_t_over_c = np.min(airfoil_thicknesses)
if aircraft_z_radius_of_gyration is None:
aircraft_z_radius_of_gyration = 1 * wing_to_vstab_distance
return (
0.0026 *
(1 + (1 if is_t_tail else 0)) ** 0.225 *
(design_mass_TOGW / u.lbm) ** 0.556 *
ultimate_load_factor ** 0.536 *
(wing_to_vstab_distance / u.foot) ** -0.5 *
(vstab.area('planform') / u.foot ** 2) ** 0.5 *
(aircraft_z_radius_of_gyration / u.foot) ** 0.875 *
np.cosd(vstab.mean_sweep_angle()) ** -1 *
vstab.aspect_ratio() ** 0.35 *
airfoil_t_over_c ** -0.5 *
(advanced_composites["tails"] if use_advanced_composites else 1)
) * u.lbm
def mass_fuselage(
fuselage: asb.Fuselage,
design_mass_TOGW: float,
ultimate_load_factor: float,
L_over_D: float,
main_wing: asb.Wing,
n_cargo_doors: int = 1,
has_aft_clamshell_door: bool = False,
landing_gear_mounted_on_fuselage: bool = False,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of the fuselage for a cargo/transport aircraft, according to Raymer's Aircraft Design: A
Conceptual Approach.
Args:
fuselage: The fuselage object.
design_mass_TOGW: The design take-off gross weight of the entire airplane [kg].
ultimate_load_factor: Ultimate load factor of the airplane.
L_over_D: The lift-to-drag ratio of the airplane in cruise.
main_wing: The main wing object. Can be:
* An instance of an AeroSandbox wing object (`asb.Wing`)
* None, if the airplane has no main wing.
n_cargo_doors: The number of cargo doors on the fuselage.
has_aft_clamshell_door: Whether or not the fuselage has an aft clamshell door.
landing_gear_mounted_on_fuselage: Whether or not the landing gear is mounted on the fuselage.
use_advanced_composites: Whether to use advanced composites for the fuselage. If True, the fuselage mass is
modified accordingly.
Returns:
The mass of the fuselage [kg].
"""
K_door = (1 + (0.06 * n_cargo_doors)) * (1.12 if has_aft_clamshell_door else 1)
K_lg = 1.12 if landing_gear_mounted_on_fuselage else 1
fuselage_structural_length = fuselage.length()
if main_wing is not None:
K_ws = (
0.75 *
(
(1 + 2 * main_wing.taper_ratio()) /
(1 + main_wing.taper_ratio())
) *
(
main_wing.span() / fuselage_structural_length *
np.tand(main_wing.mean_sweep_angle())
)
)
else:
K_ws = 0
return (
0.3280 *
K_door *
K_lg *
(design_mass_TOGW / u.lbm * ultimate_load_factor) ** 0.5 *
(fuselage_structural_length / u.foot) ** 0.25 *
(fuselage.area_wetted() / u.foot ** 2) ** 0.302 *
(1 + K_ws) ** 0.04 *
L_over_D ** 0.10 * # L/D
(advanced_composites["fuselage/nacelle"] if use_advanced_composites else 1)
) * u.lbm
def mass_main_landing_gear(
main_gear_length: float,
landing_speed: float,
design_mass_TOGW: float,
is_kneeling: bool = False,
n_gear: int = 2,
n_wheels: int = 12,
n_shock_struts: int = 4,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of the main landing gear for a cargo/transport aircraft, according to Raymer's Aircraft Design:
A Conceptual Approach.
Args:
main_gear_length: length of the main landing gear [m].
landing_speed: landing speed [m/s].
design_mass_TOGW: The design take-off gross weight of the entire airplane [kg].
is_kneeling: whether the main landing gear is capable of kneeling.
n_gear: number of landing gear.
n_wheels: number of wheels in total on the main landing gear.
n_shock_struts: number of shock struts.
use_advanced_composites: Whether to use advanced composites for the landing gear. If True, the landing gear mass
is modified accordingly.
Returns:
mass of the main landing gear [kg].
"""
K_mp = 1.126 if is_kneeling else 1
ultimate_landing_load_factor = n_gear * 1.5
return (
0.0106 *
K_mp * # non-kneeling LG
(design_mass_TOGW / u.lbm) ** 0.888 *
ultimate_landing_load_factor ** 0.25 *
(main_gear_length / u.inch) ** 0.4 *
n_wheels ** 0.321 *
n_shock_struts ** -0.5 *
(landing_speed / u.knot) ** 0.1 *
(advanced_composites["landing_gear"] if use_advanced_composites else 1)
) * u.lbm
def mass_nose_landing_gear(
nose_gear_length: float,
design_mass_TOGW: float,
is_kneeling: bool = False,
n_gear: int = 1,
n_wheels: int = 2,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of the nose landing gear for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
nose_gear_length: Length of nose landing gear when fully-extended [m].
design_mass_TOGW: The design take-off gross weight of the entire airplane [kg].
is_kneeling: Whether the nose landing gear is capable of kneeling.
n_gear: Number of nose landing gear.
n_wheels: Number of wheels in total on the nose landing gear.
use_advanced_composites: Whether to use advanced composites for the landing gear. If True, the landing gear mass
is modified accordingly.
Returns:
Mass of nose landing gear [kg].
"""
K_np = 1.15 if is_kneeling else 1
ultimate_landing_load_factor = n_gear * 1.5
return (
0.032 *
K_np *
(design_mass_TOGW / u.lbm) ** 0.646 *
ultimate_landing_load_factor ** 0.2 *
(nose_gear_length / u.inch) ** 0.5 *
n_wheels ** 0.45 *
(advanced_composites["landing_gear"] if use_advanced_composites else 1)
) * u.lbm
def mass_nacelles(
nacelle_length: float,
nacelle_width: float,
nacelle_height: float,
ultimate_load_factor: float,
mass_per_engine: float,
n_engines: int,
is_pylon_mounted: bool = False,
engines_have_propellers: bool = False,
engines_have_thrust_reversers: bool = False,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of the nacelles for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach. Excludes the engine itself and immediate engine peripherals.
Args:
nacelle_length: length of the nacelle, front to back [m]
nacelle_width: width of the nacelle [m]
nacelle_height: height of the nacelle, top to bottom [m]
ultimate_load_factor: ultimate load factor of the aircraft
mass_per_engine: mass of the engine itself [kg]
n_engines: number of engines
is_pylon_mounted: whether the engine is pylon-mounted or not
engines_have_propellers: whether the engines have propellers or not (e.g., a jet)
engines_have_thrust_reversers: whether the engines have thrust reversers or not
use_advanced_composites: Whether to use advanced composites for the nacelles. If True, the nacelles mass
is modified accordingly.
Returns:
mass of the nacelles [kg]
"""
K_ng = 1.017 if is_pylon_mounted else 1
K_p = 1.4 if engines_have_propellers else 1
K_tr = 1.18 if engines_have_thrust_reversers else 1
mass_per_engine_with_contents = np.softmax(
(2.331 * (mass_per_engine / u.lbm) ** 0.901) * K_p * K_tr * u.lbm,
mass_per_engine,
hardness=10 / mass_per_engine
)
nacelle_wetted_area = (
nacelle_length * nacelle_height * 2 +
nacelle_width * nacelle_height * 2
)
return (
0.6724 *
K_ng *
(nacelle_length / u.foot) ** 0.10 *
(nacelle_width / u.foot) ** 0.294 *
(ultimate_load_factor) ** 0.119 *
(mass_per_engine_with_contents / u.lbm) ** 0.611 *
(n_engines) ** 0.984 *
(nacelle_wetted_area / u.foot ** 2) ** 0.224 *
(advanced_composites["fuselage/nacelle"] if use_advanced_composites else 1)
)
def mass_engine_controls(
n_engines: int,
cockpit_to_engine_length: float,
) -> float:
"""
Computes the mass of the engine controls for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
n_engines: The number of engines in the aircraft.
cockpit_to_engine_length: The distance from the cockpit to the engine [m].
Returns:
The mass of the engine controls [kg].
"""
return (
5 * n_engines +
0.80 * (cockpit_to_engine_length / u.foot) * n_engines
) * u.lbm
def mass_starter(
n_engines: int,
mass_per_engine: float,
) -> float:
"""
Computes the mass of the engine starter for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
n_engines: The number of engines in the aircraft.
mass_per_engine: The mass of the engine [kg].
Returns:
The mass of the engine starter [kg].
"""
return (
49.19 * (
mass_per_engine / u.lbm * n_engines
/ 1000
) ** 0.541
) * u.lbm
def mass_fuel_system(
fuel_volume: float,
n_tanks: int,
fraction_in_integral_tanks: float = 0.5,
) -> float:
"""
Computes the mass of the fuel system (e.g., tanks, pumps, but not the fuel itself) for a cargo/transport
aircraft, according to Raymer's Aircraft Design: A Conceptual Approach.
Args:
fuel_volume: The volume of fuel in the aircraft [m^3].
n_tanks: The number of fuel tanks in the aircraft.
fraction_in_integral_tanks: The fraction of the fuel volume that is in integral tanks, as opposed to
protected tanks.
Returns:
The mass of the fuel system [kg].
"""
fraction_in_protected_tanks = 1 - fraction_in_integral_tanks
return (
2.405 *
(fuel_volume / u.gallon) ** 0.606 *
(1 + fraction_in_integral_tanks) ** -1 *
(1 + fraction_in_protected_tanks) *
n_tanks ** 0.5
) * u.lbm
def mass_flight_controls(
airplane: asb.Airplane,
aircraft_Iyy: float,
fraction_of_mechanical_controls: int = 0,
) -> float:
"""
Computes the added mass of the flight control surfaces (and any applicable linkages, in the case of mechanical
controls) for a cargo/transport aircraft, according to Raymer's Aircraft Design: A Conceptual Approach.
Args:
airplane: The airplane to calculate the mass of the flight controls for.
aircraft_Iyy: The moment of inertia of the aircraft about the y-axis.
fraction_of_mechanical_controls: The fraction of the flight controls that are mechanical, as opposed to
hydraulic.
Returns:
The mass of the flight controls [kg].
"""
### Compute how many functions the control surfaces are performing (e.g., aileron, elevator, flap, rudder, etc.)
N_functions_performed_by_controls = 0
for wing in airplane.wings:
N_functions_performed_by_controls += len(wing.get_control_surface_names())
### Compute the control surface area
control_surface_area = 0
for wing in airplane.wings:
control_surface_area += wing.control_surface_area()
return (
145.9 *
N_functions_performed_by_controls ** 0.554 * # number of functions performed by controls
(1 + fraction_of_mechanical_controls) ** -1 *
(control_surface_area / u.foot ** 2) ** 0.20 *
(aircraft_Iyy / (u.lbm * u.foot ** 2) * 1e-6) ** 0.07
) * u.lbm
def mass_APU(
mass_APU_uninstalled: float,
):
"""
Computes the mass of the auxiliary power unit (APU) for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
mass_APU_uninstalled: The mass of the APU uninstalled [kg].
Returns:
The mass of the APU, as installed [kg].
"""
return 2.2 * mass_APU_uninstalled
def mass_instruments(
fuselage: asb.Fuselage,
main_wing: asb.Wing,
n_engines: int,
n_crew: Union[int, float],
engine_is_reciprocating: bool = False,
engine_is_turboprop: bool = False,
):
"""
Computes the mass of the flight instruments for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
fuselage: The fuselage of the airplane.
main_wing: The main wing of the airplane.
n_engines: The number of engines on the airplane.
n_crew: The number of crew members on the airplane. Use 0.5 for a UAV.
engine_is_reciprocating: Whether the engine is reciprocating.
engine_is_turboprop: Whether the engine is a turboprop.
Returns:
The mass of the instruments [kg]
"""
K_r = 1.133 if engine_is_reciprocating else 1
K_tp = 0.793 if engine_is_turboprop else 1
return (
4.509 *
K_r *
K_tp *
n_crew ** 0.541 *
n_engines *
(fuselage.length() / u.foot * main_wing.span() / u.foot) ** 0.5
) * u.lbm
def mass_hydraulics(
airplane: asb.Airplane,
fuselage: asb.Fuselage,
main_wing: asb.Wing,
):
"""
Computes the mass of the hydraulic system for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
airplane: The airplane to calculate the mass of the hydraulic system for.
fuselage: The fuselage of the airplane.
main_wing: The main wing of the airplane.
Returns:
The mass of the hydraulic system [kg].
"""
N_functions_performed_by_controls = 0
for wing in airplane.wings:
N_functions_performed_by_controls += len(wing.get_control_surface_names())
return (
0.2673 *
N_functions_performed_by_controls *
(fuselage.length() / u.foot * main_wing.span() / u.foot) ** 0.937
) * u.lbm
def mass_electrical(
system_electrical_power_rating: float,
electrical_routing_distance: float,
n_engines: int,
):
"""
Computes the mass of the electrical system for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
system_electrical_power_rating: The total electrical power rating of the aircraft's electrical system [Watts].
Typical values:
* Transport airplane: 40,000 - 60,000 W
* Fighter/bomber airplane: 110,000 - 160,000 W
electrical_routing_distance: The electrical routing distance, generators to avionics to cockpit. [meters]
Returns:
The mass of the electrical system [kg].
"""
return (
7.291 *
(system_electrical_power_rating / 1e3) ** 0.782 *
(electrical_routing_distance / u.foot) ** 0.346 *
(n_engines) ** 0.10
) * u.lbm
def mass_avionics(
mass_uninstalled_avionics: float,
):
"""
Computes the mass of the avionics for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
mass_uninstalled_avionics: The mass of the avionics, before installation [kg].
Returns:
The mass of the avionics, as installed [kg].
"""
return (
1.73 *
(mass_uninstalled_avionics / u.lbm) ** 0.983
) * u.lbm
def mass_furnishings(
n_crew: Union[int, float],
mass_cargo: float,
fuselage: asb.Fuselage,
):
"""
Computes the mass of the furnishings for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach. Does not include cargo handling gear or seats.
Args:
n_crew: The number of crew members on the airplane. Use 0.5 for a UAV.
mass_cargo: The mass of the cargo [kg].
fuselage: The fuselage of the airplane.
Returns:
The mass of the furnishings [kg].
"""
return (
0.0577 *
n_crew ** 0.1 *
(mass_cargo / u.lbm) ** 0.393 *
(fuselage.area_wetted() / u.foot ** 2) ** 0.75
) * u.lbm
def mass_air_conditioning(
n_crew: int,
n_pax: int,
volume_pressurized: float,
mass_uninstalled_avionics: float,
):
"""
Computes the mass of the air conditioning system for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
n_crew: The number of crew members on the airplane.
n_pax: The number of passengers on the airplane.
volume_pressurized: The volume of the pressurized cabin [meters^3].
mass_uninstalled_avionics: The mass of the avionics, before installation [kg].
Returns:
The mass of the air conditioning system [kg].
"""
return (
62.36 *
(n_crew + n_pax) ** 0.25 *
(volume_pressurized / u.foot ** 3 / 1e3) ** 0.604 *
(mass_uninstalled_avionics / u.lbm) ** 0.10
) * u.lbm
def mass_anti_ice(
design_mass_TOGW: float,
):
"""
Computes the mass of the anti-ice system for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
design_mass_TOGW: The design takeoff gross weight of the entire airplane [kg].
Returns:
The mass of the anti-ice system [kg].
"""
return 0.002 * design_mass_TOGW
def mass_handling_gear(
design_mass_TOGW: float,
):
"""
Computes the mass of the handling gear for a cargo/transport aircraft, according to Raymer's Aircraft
Design: A Conceptual Approach.
Args:
design_mass_TOGW: The design takeoff gross weight of the entire airplane [kg].
Returns:
The mass of the handling gear [kg].
"""
return 3e-4 * design_mass_TOGW
def mass_military_cargo_handling_system(
cargo_floor_area: float,
):
"""
Computes the mass of the military cargo handling system for a cargo/transport aircraft, according to Raymer's
Aircraft Design: A Conceptual Approach.
Args:
cargo_floor_area: The floor area of the cargo compartment [meters^2].
Returns:
The mass of the military cargo handling system [kg].
"""
return (
2.4 *
(cargo_floor_area / u.foot ** 2)
) * u.lbm
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/weights/raymer_cargo_transport_weights.py
|
raymer_cargo_transport_weights.py
|
import aerosandbox as asb
import aerosandbox.numpy as np
import aerosandbox.tools.units as u
from .raymer_fudge_factors import advanced_composites
# From Raymer: "Aircraft Design: A Conceptual Approach", 5th Ed.
# Section 15.3.3: General Aviation Weights
def mass_wing(
wing: asb.Wing,
design_mass_TOGW: float,
ultimate_load_factor: float,
mass_fuel_in_wing: float,
cruise_op_point: asb.OperatingPoint,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of a wing of a general aviation aircraft, according to Raymer's Aircraft Design: A Conceptual
Approach.
Note: Torenbeek's wing mass model is likely more accurate; see `mass_wing()` in `torenbeek_weights.py` (same
directory).
Args:
wing: The wing object.
design_mass_TOGW: The design takeoff gross weight of the entire aircraft [kg].
ultimate_load_factor: The ultimate load factor of the aircraft.
mass_fuel_in_wing: The mass of fuel in the wing [kg]. If there is no fuel in the wing, set this to 0.
Note: Model extrapolates strangely for infinitesimally-small-but-nonzero fuel masses; don't let an
optimizer land here.
cruise_op_point: The cruise operating point of the aircraft.
use_advanced_composites: Whether to use advanced composites for the wing. If True, the wing mass is modified
accordingly.
Returns: The mass of the wing [kg].
"""
try:
fuel_is_in_wing = bool(mass_fuel_in_wing > 0)
except RuntimeError:
fuel_is_in_wing = True
if fuel_is_in_wing:
fuel_weight_factor = np.softmax(
(mass_fuel_in_wing / u.lbm) ** 0.0035,
1,
hardness=1000
)
else:
fuel_weight_factor = 1
airfoil_thicknesses = [
xsec.airfoil.max_thickness()
for xsec in wing.xsecs
]
airfoil_t_over_c = np.min(airfoil_thicknesses)
cos_sweep = np.cosd(wing.mean_sweep_angle())
return (
0.036 *
(wing.area('planform') / u.foot ** 2) ** 0.758 *
fuel_weight_factor *
(wing.aspect_ratio() / cos_sweep ** 2) ** 0.6 *
(cruise_op_point.dynamic_pressure() / u.psf) ** 0.006 *
wing.taper_ratio() ** 0.04 *
(100 * airfoil_t_over_c / cos_sweep) ** -0.3 *
(design_mass_TOGW / u.lbm * ultimate_load_factor) ** 0.49 *
(advanced_composites["wing"] if use_advanced_composites else 1)
) * u.lbm
def mass_hstab(
hstab: asb.Wing,
design_mass_TOGW: float,
ultimate_load_factor: float,
cruise_op_point: asb.OperatingPoint,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of a horizontal stabilizer of a general aviation aircraft, according to Raymer's Aircraft Design:
A Conceptual Approach.
Args:
hstab: The horizontal stabilizer object.
design_mass_TOGW: The design takeoff gross weight of the entire aircraft [kg].
ultimate_load_factor: The ultimate load factor of the aircraft.
cruise_op_point: The cruise operating point of the aircraft.
use_advanced_composites: Whether to use advanced composites for the horizontal stabilizer. If True, the
hstab mass is modified accordingly.
Returns: The mass of the horizontal stabilizer [kg].
"""
airfoil_thicknesses = [
xsec.airfoil.max_thickness()
for xsec in hstab.xsecs
]
airfoil_t_over_c = np.min(airfoil_thicknesses)
cos_sweep = np.cosd(hstab.mean_sweep_angle())
return (
0.016 *
(design_mass_TOGW / u.lbm * ultimate_load_factor) ** 0.414 *
(cruise_op_point.dynamic_pressure() / u.psf) ** 0.168 *
(hstab.area('planform') / u.foot ** 2) ** 0.896 *
(100 * airfoil_t_over_c / cos_sweep) ** -0.12 *
(hstab.aspect_ratio() / cos_sweep ** 2) ** 0.043 *
hstab.taper_ratio() ** -0.02 *
(advanced_composites["tails"] if use_advanced_composites else 1)
) * u.lbm
def mass_vstab(
vstab: asb.Wing,
design_mass_TOGW: float,
ultimate_load_factor: float,
cruise_op_point: asb.OperatingPoint,
is_t_tail: bool = False,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of a vertical stabilizer of a general aviation aircraft, according to Raymer's Aircraft Design:
A Conceptual Approach.
Args:
vstab: The vertical stabilizer object.
design_mass_TOGW: The design takeoff gross weight of the entire aircraft [kg].
ultimate_load_factor: The ultimate load factor of the aircraft.
cruise_op_point: The cruise operating point of the aircraft.
is_t_tail: Whether the aircraft is a T-tail or not.
use_advanced_composites: Whether to use advanced composites for the vertical stabilizer. If True, the vstab
mass is modified accordingly.
Returns: The mass of the vertical stabilizer [kg].
"""
airfoil_thicknesses = [
xsec.airfoil.max_thickness()
for xsec in vstab.xsecs
]
airfoil_t_over_c = np.min(airfoil_thicknesses)
cos_sweep = np.cosd(vstab.mean_sweep_angle())
return (
0.073 *
(1 + (0.2 if is_t_tail else 0)) *
(design_mass_TOGW / u.lbm * ultimate_load_factor) ** 0.376 *
(cruise_op_point.dynamic_pressure() / u.psf) ** 0.122 *
(vstab.area('planform') / u.foot ** 2) ** 0.876 *
(100 * airfoil_t_over_c / cos_sweep) ** -0.49 *
(vstab.aspect_ratio() / cos_sweep ** 2) ** 0.357 *
vstab.taper_ratio() ** 0.039 *
(advanced_composites["tails"] if use_advanced_composites else 1)
) * u.lbm
def mass_fuselage(
fuselage: asb.Fuselage,
design_mass_TOGW: float,
ultimate_load_factor: float,
L_over_D: float,
cruise_op_point: asb.OperatingPoint,
wing_to_tail_distance: float,
pressure_differential: float = 0.0,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of a fuselage of a general aviation aircraft, according to Raymer's Aircraft Design: A Conceptual
Approach.
Args:
fuselage: The fuselage object.
design_mass_TOGW: The design takeoff gross weight of the entire aircraft [kg].
ultimate_load_factor: The ultimate load factor of the aircraft.
L_over_D: The lift-to-drag ratio of the aircraft in cruise.
cruise_op_point: The cruise operating point of the aircraft.
wing_to_tail_distance: The distance between the wing root-quarter-chord-point and the tail
root-quarter-chord-point of the aircraft [m].
pressure_differential: The absolute value of the pressure differential across the fuselage [Pa].
use_advanced_composites: Whether to use advanced composites for the fuselage. If True, the fuselage mass is
modified accordingly.
Returns: The mass of the fuselage [kg].
"""
mass_fuselage_without_pressurization = (
0.052 *
(fuselage.area_wetted() / u.foot ** 2) ** 1.086 *
(design_mass_TOGW / u.lbm * ultimate_load_factor) ** 0.177 *
(wing_to_tail_distance / u.foot) ** -0.051 *
(L_over_D) ** -0.072 *
(cruise_op_point.dynamic_pressure() / u.psf) ** 0.241 *
(advanced_composites["fuselage/nacelle"]
if use_advanced_composites else 1)
) * u.lbm
mass_pressurization_components = (
11.9 *
(
fuselage.volume() / u.foot ** 3 *
pressure_differential / u.psi
) ** 0.271
) * u.lbm
return (
mass_fuselage_without_pressurization +
mass_pressurization_components
)
def mass_main_landing_gear(
main_gear_length: float,
design_mass_TOGW: float,
n_gear: int = 2,
is_retractable: bool = True,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of the main landing gear of a general aviation aircraft, according to Raymer's Aircraft Design:
A Conceptual Approach.
Args:
main_gear_length: The length of the main landing gear [m].
design_mass_TOGW: The design takeoff gross weight of the entire aircraft [kg].
n_gear: The number of main landing gear.
is_retractable: Whether the main landing gear is retractable or not.
use_advanced_composites: Whether to use advanced composites for the main landing gear. If True, the main
landing gear mass is modified accordingly.
Returns: The mass of the main landing gear [kg].
"""
ultimate_landing_load_factor = n_gear * 1.5
return (
0.095 *
(ultimate_landing_load_factor * design_mass_TOGW / u.lbm) ** 0.768 *
(main_gear_length / u.foot / 12) ** 0.409 *
(advanced_composites["landing_gear"] if use_advanced_composites else 1) *
(((5.7 - 1.4 / 2) / 5.7) if not is_retractable else 1) # derived from Raymer Section 15.2 and 15.3.3 together.
) * u.lbm
def mass_nose_landing_gear(
nose_gear_length: float,
design_mass_TOGW: float,
n_gear: int = 1,
is_retractable: bool = True,
use_advanced_composites: bool = False,
) -> float:
"""
Computes the mass of the nose landing gear of a general aviation aircraft, according to Raymer's Aircraft Design:
A Conceptual Approach.
Args:
nose_gear_length: The length of the nose landing gear [m].
design_mass_TOGW: The design takeoff gross weight of the entire aircraft [kg].
n_gear: The number of nose landing gear.
is_retractable: Whether the nose landing gear is retractable or not.
use_advanced_composites: Whether to use advanced composites for the nose landing gear. If True, the nose
landing gear mass is modified accordingly.
Returns: The mass of the nose landing gear [kg].
"""
ultimate_landing_load_factor = n_gear * 1.5
return (
0.125 *
(ultimate_landing_load_factor * design_mass_TOGW / u.lbm) ** 0.566 *
(nose_gear_length / u.foot / 12) ** 0.845 *
(advanced_composites["landing_gear"] if use_advanced_composites else 1) *
(((5.7 - 1.4 / 2) / 5.7) if not is_retractable else 1) # derived from Raymer Section 15.2 and 15.3.3 together.
) * u.lbm
def mass_engines_installed(
n_engines: int,
mass_per_engine: float,
) -> float:
"""
Computes the mass of the engines installed on a general aviation aircraft, according to Raymer's Aircraft Design:
A Conceptual Approach. Includes propellers and engine mounts.
Args:
n_engines: The number of engines installed on the aircraft.
mass_per_engine: The mass of a single engine [kg].
Returns: The mass of the engines installed on the aircraft [kg].
"""
return (
2.575 *
(mass_per_engine / u.lbm) ** 0.922 *
n_engines
) * u.lbm
def mass_fuel_system(
fuel_volume: float,
n_tanks: int,
n_engines: int,
fraction_in_integral_tanks: float = 0.5,
) -> float:
"""
Computes the mass of the fuel system (e.g., tanks, pumps, but not the fuel itself) for a general aviation
aircraft, according to Raymer's Aircraft Design: A Conceptual Approach.
Args:
fuel_volume: The volume of fuel in the aircraft [m^3].
n_tanks: The number of fuel tanks in the aircraft.
n_engines: The number of engines in the aircraft.
fraction_in_integral_tanks: The fraction of the fuel volume that is in integral tanks, as opposed to
protected tanks.
Returns: The mass of the fuel system [kg].
"""
return (
2.49 *
(fuel_volume / u.gallon) ** 0.726 *
(1 + fraction_in_integral_tanks) ** -0.363 *
n_tanks ** 0.242 *
n_engines ** 0.157
) * u.lbm
def mass_flight_controls(
airplane: asb.Airplane,
design_mass_TOGW: float,
ultimate_load_factor: float,
fuselage: asb.Fuselage = None,
main_wing: asb.Wing = None,
) -> float:
"""
Computes the mass of the flight controls for a general aviation aircraft, according to Raymer's Aircraft Design:
A Conceptual Approach.
Args:
airplane: The airplane for which to compute the flight controls mass.
design_mass_TOGW: The design takeoff gross weight of the entire aircraft [kg].
ultimate_load_factor: The ultimate load factor of the aircraft.
fuselage: The fuselage to use for computing the flight controls mass. If fuselage is None, or if there are no
fuselages in the airplane object, the flight controls mass will be computed without a fuselage.
main_wing: The main wing to use for computing the flight controls mass. If main_wing is None, or if there are
no wings in the airplane object, the flight controls mass will be computed without a main wing.
Returns: The mass of the flight controls [kg].
"""
### Handle the fuselage argument and get the fuselage length factor
if fuselage is None:
if len(airplane.fuselages) == 0:
pass
elif len(airplane.fuselages) == 1:
fuselage = airplane.fuselages[0]
else:
raise ValueError('More than one fuselage is present in the airplane. Please specify which fuselage to use '
'for computing flight control system mass.')
if fuselage is not None:
fuselage_length_factor = (fuselage.length() / u.foot) ** 1.536
else:
fuselage_length_factor = 1
### Handle the main wing argument and get the wing span factor
if main_wing is None:
if len(airplane.wings) == 0:
pass
elif len(airplane.wings) == 1:
main_wing = airplane.wings[0]
else:
raise ValueError('More than one wing is present in the airplane. Please specify which wing is the main'
'wing using the `main_wing` argument.')
if main_wing is not None:
wing_span_factor = (main_wing.span() / u.foot) ** 0.371
else:
wing_span_factor = 1
# ### Compute how many functions the control surfaces are performing (e.g., aileron, elevator, flap, rudder, etc.)
# N_functions_performed_by_controls = 0
# for wing in airplane.wings:
# N_functions_performed_by_controls += len(wing.get_control_surface_names())
#
# ### Compute the control surface area
# control_surface_area = 0
# for wing in airplane.wings:
# control_surface_area += wing.control_surface_area()
return (
0.053 *
fuselage_length_factor *
wing_span_factor *
(design_mass_TOGW / u.lbm * ultimate_load_factor * 1e-4) ** 0.80
) * u.lbm
def mass_hydraulics(
fuselage_width: float,
cruise_op_point: asb.OperatingPoint,
) -> float:
"""
Computes the mass of the hydraulics for a general aviation aircraft, according to Raymer's Aircraft Design:
A Conceptual Approach.
Args:
fuselage_width: The width of the fuselage [m].
cruise_op_point: The cruise operating point of the aircraft.
Returns: The mass of the hydraulics [kg].
"""
mach = cruise_op_point.mach()
K_h = 0.16472092991402892 * mach ** 0.8327375101470056
# This is a curve fit to a few points that Raymer gives in his book. The points are:
# {
# 0.1 : 0.013,
# 0.25: 0.05,
# 0.5 : 0.11,
# 0.75: 0.12
# }
# where the first column is the Mach number and the second column is the K_h value.
# These are described as:
#
# "0.05 for low subsonic with hydraulics for brakes and retracts only; 0.11 for medium subsonic with hydraulics
# for flaps; 0.12 for high subsonic with hydraulic flight controls; 0.013 for light plane with hydraulic brakes
# only (and use M=0.1)"
return (
K_h *
(fuselage_width / u.foot) ** 0.8 *
mach ** 0.5
) * u.lbm
def mass_avionics(
mass_uninstalled_avionics: float,
) -> float:
"""
Computes the mass of the avionics for a general aviation aircraft, according to Raymer's Aircraft Design: A
Conceptual Approach.
Args:
mass_uninstalled_avionics: The mass of the avionics, before installation [kg].
Returns: The mass of the avionics, as installed [kg].
"""
return (
2.117 *
(mass_uninstalled_avionics / u.lbm) ** 0.933
) * u.lbm
def mass_electrical(
fuel_system_mass: float,
avionics_mass: float,
) -> float:
"""
Computes the mass of the electrical system for a general aviation aircraft, according to Raymer's Aircraft Design:
A Conceptual Approach.
Args:
fuel_system_mass: The mass of the fuel system [kg].
avionics_mass: The mass of the avionics [kg].
Returns: The mass of the electrical system [kg].
"""
fuel_and_avionics_masses = fuel_system_mass + avionics_mass
return (
12.57 *
(fuel_and_avionics_masses / u.lbm) ** 0.51
) * u.lbm
def mass_air_conditioning_and_anti_ice(
design_mass_TOGW: float,
n_crew: int,
n_pax: int,
mass_avionics: float,
cruise_op_point: asb.OperatingPoint,
):
"""
Computes the mass of the air conditioning and anti-ice system for a general aviation aircraft, according to
Raymer's Aircraft Design: A Conceptual Approach.
Args:
design_mass_TOGW: The design takeoff gross weight of the entire airplane [kg].
n_crew: The number of crew members.
n_pax: The number of passengers.
mass_avionics: The mass of the avionics [kg].
cruise_op_point: The cruise operating point of the aircraft.
Returns: The mass of the air conditioning and anti-ice system [kg].
"""
mach = cruise_op_point.mach()
return (
0.265 *
(design_mass_TOGW / u.lbm) ** 0.52 *
(n_crew + n_pax) ** 0.68 *
(mass_avionics / u.lbm) ** 0.17 *
mach ** 0.08
) * u.lbm
def mass_furnishings(
design_mass_TOGW: float,
):
"""
Computes the mass of the furnishings for a general aviation aircraft, according to Raymer's Aircraft Design: A
Conceptual Approach.
Args:
design_mass_TOGW: The design takeoff gross weight of the entire airplane [kg].
Returns: The mass of the furnishings [kg].
"""
return np.softmax(
0.0582 * design_mass_TOGW - 65 * u.lbm,
0,
softness=10 * u.lbm,
)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/weights/raymer_general_aviation_weights.py
|
raymer_general_aviation_weights.py
|
import aerosandbox as asb
import aerosandbox.numpy as np
import aerosandbox.tools.units as u
# From Raymer, Aircraft Design: A Conceptual Approach, 5th Ed.
# Table 15.3: Miscellaneous Weights
mass_passenger = 215 * u.lbm # includes carry-on
def mass_seat(
kind="passenger"
) -> float:
"""
Computes the mass of an individual seat on an airplane.
Args:
kind: The kind of seat. Can be "passenger", "flight_deck", or "troop".
* "passenger" seats are standard commercial airline seats.
* "flight_deck" seats are the seats in the cockpit.
* "troop" seats are the seats in the cargo hold.
Returns: The mass of a single seat, in kg. Don't forget to multiply by the number of seats to get the total mass
of all seats.
"""
if kind == "passenger":
return 32 * u.lbm
elif kind == "flight_deck":
return 60 * u.lbm
elif kind == "troop":
return 11 * u.lbm
else:
raise ValueError("Bad value of `kind`!")
def mass_lavatories(
n_pax,
aircraft_type="short-haul"
) -> float:
"""
Computes the required mass of all lavatories on an airplane.
Args:
n_pax: The number of passengers on the airplane.
aircraft_type: The type of aircraft. Can be "long-haul", "short-haul", or "business-jet".
* "long-haul" aircraft are long-range commercial airliners, like the Boeing 777 or Airbus A350.
* "short-haul" aircraft are short-range commercial airliners, like the Boeing 737 or Airbus A320.
* "business-jet" aircraft are small private jets, like the Cessna Citation X or Gulfstream G650.
Returns: The mass of all lavatories on the airplane, in kg.
"""
if aircraft_type == "long-haul":
return (1.11 * n_pax ** 1.33) * u.lbm
elif aircraft_type == "short-haul":
return (0.31 * n_pax ** 1.33) * u.lbm
elif aircraft_type == "business-jet":
return (3.90 * n_pax ** 1.33) * u.lbm
else:
raise ValueError("Bad value of `aircraft_type`!")
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/weights/raymer_miscellaneous.py
|
raymer_miscellaneous.py
|
from aerosandbox.common import AeroSandboxObject
import aerosandbox.numpy as np
from aerosandbox.atmosphere._isa_atmo_functions import pressure_isa, temperature_isa
from aerosandbox.atmosphere._diff_atmo_functions import pressure_differentiable, temperature_differentiable
import aerosandbox.tools.units as u
### Define constants
gas_constant_universal = 8.31432 # J/(mol*K); universal gas constant
molecular_mass_air = 28.9644e-3 # kg/mol; molecular mass of air
gas_constant_air = gas_constant_universal / molecular_mass_air # J/(kg*K); gas constant of air
effective_collision_diameter = 0.365e-9 # m, effective collision diameter of an air molecule
### Define the Atmosphere class
class Atmosphere(AeroSandboxObject):
r"""
All models here are smoothed fits to the 1976 COESA model;
see AeroSandbox\studies\Atmosphere Fitting for details.
"""
def __init__(self,
altitude: float = 0., # meters
method: str = "differentiable"
):
"""
Initialize a new Atmosphere.
Args:
altitude: Flight altitude, in meters. This is assumed to be a geopotential altitude above MSL.
method: Method of atmosphere modeling to use. Either:
* "differentiable" - a C1-continuous fit to the International Standard Atmosphere
* "isa" - the International Standard Atmosphere
"""
self.altitude = altitude
self.method = method
self._valid_altitude_range = (0, 80000)
def __repr__(self) -> str:
try:
altitude_string = f"altitude: {self.altitude:.0f} m ({self.altitude / u.foot:.0f} ft)"
except (ValueError, TypeError):
altitude_string = f"altitude: {self.altitude} m"
return f"Atmosphere ({altitude_string}, method: '{self.method}')"
### The two primary state variables, pressure and temperature, go here!
def pressure(self):
"""
Returns the pressure, in Pascals.
"""
if self.method.lower() == "isa":
return pressure_isa(self.altitude)
elif self.method.lower() == "differentiable":
return pressure_differentiable(self.altitude)
else:
raise ValueError("Bad value of 'type'!")
def temperature(self):
"""
Returns the temperature, in Kelvin.
"""
if self.method.lower() == "isa":
return temperature_isa(self.altitude)
elif self.method.lower() == "differentiable":
return temperature_differentiable(self.altitude)
else:
raise ValueError("Bad value of 'type'!")
### Everything else in this class is a derived quantity; all models of derived quantities go here.
def density(self):
"""
Returns the density, in kg/m^3.
"""
rho = self.pressure() / (self.temperature() * gas_constant_air)
return rho
def speed_of_sound(self):
"""
Returns the speed of sound, in m/s.
"""
temperature = self.temperature()
return (self.ratio_of_specific_heats() * gas_constant_air * temperature) ** 0.5
def dynamic_viscosity(self):
"""
Returns the dynamic viscosity (mu), in kg/(m*s).
Based on Sutherland's Law, citing `https://www.cfd-online.com/Wiki/Sutherland's_law`.
According to Rathakrishnan, E. (2013). Theoretical aerodynamics. John Wiley & Sons.:
This relationship is valid from 0.01 to 100 atm, and between 0 and 3000K.
According to White, F. M., & Corfield, I. (2006). Viscous fluid flow (Vol. 3, pp. 433-434). New York: McGraw-Hill.:
The error is no more than approximately 2% for air between 170K and 1900K.
"""
# Sutherland constants
C1 = 1.458e-6 # kg/(m*s*sqrt(K))
S = 110.4 # K
# Sutherland equation
temperature = self.temperature()
mu = C1 * temperature ** 1.5 / (temperature + S)
return mu
def kinematic_viscosity(self):
"""
Returns the kinematic viscosity (nu), in m^2/s.
Definitional.
"""
return self.dynamic_viscosity() / self.density()
def ratio_of_specific_heats(self):
return 1.4 # TODO model temperature variation
# def thermal_velocity(self):
# """
# Returns the thermal velocity (mean particle speed)
# Returns:
#
# """
#
def mean_free_path(self):
"""
Returns the mean free path of an air molecule, in meters.
To find the collision radius, assumes "a hard-sphere gas that has the same viscosity as the actual gas being considered".
From Vincenti, W. G. and Kruger, C. H. (1965). Introduction to physical gas dynamics. Krieger Publishing Company. p. 414.
"""
return self.dynamic_viscosity() / self.pressure() * np.sqrt(
np.pi * gas_constant_universal * self.temperature() / (2 * molecular_mass_air)
)
def knudsen(self, length):
"""
Computes the Knudsen number for a given length.
"""
return self.mean_free_path() / length
if __name__ == "__main__":
# Make AeroSandbox Atmosphere
altitude = np.linspace(-5e3, 100e3, 1000)
atmo_diff = Atmosphere(altitude=altitude)
atmo_isa = Atmosphere(altitude=altitude, method="isa")
from aerosandbox.tools.pretty_plots import plt, sns, mpl, show_plot, set_ticks
fig, ax = plt.subplots()
plt.plot(
(
(atmo_diff.pressure() - atmo_isa.pressure()) / atmo_isa.pressure()
) * 100,
altitude / 1e3,
)
set_ticks(0.2, 0.1, 20, 10)
plt.xlim(-1, 1)
show_plot(
"AeroSandbox Atmosphere vs. ISA Atmosphere",
"Pressure, Relative Error [%]",
"Altitude [km]"
)
fig, ax = plt.subplots()
plt.plot(
atmo_diff.temperature() - atmo_isa.temperature(),
altitude / 1e3,
)
set_ticks(1, 0.5, 20, 10)
plt.xlim(-5, 5)
show_plot(
"AeroSandbox Atmosphere vs. ISA Atmosphere",
"Temperature, Absolute Error [K]",
"Altitude [km]"
)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/atmosphere/atmosphere.py
|
atmosphere.py
|
import aerosandbox.numpy as np
import pandas as pd
from pathlib import Path
### Define constants
gas_constant_universal = 8.31432 # J/(mol*K); universal gas constant
molecular_mass_air = 28.9644e-3 # kg/mol; molecular mass of air
gas_constant_air = gas_constant_universal / molecular_mass_air # J/(kg*K); gas constant of air
g = 9.81 # m/s^2, gravitational acceleration on earth
### Read ISA table data
isa_table = pd.read_csv(Path(__file__).parent.absolute() / "isa_data/isa_table.csv")
isa_base_altitude = isa_table["Base Altitude [m]"].values
isa_lapse_rate = isa_table["Lapse Rate [K/km]"].values / 1000
isa_base_temperature = isa_table["Base Temperature [C]"].values + 273.15
### Calculate pressure at each ISA level programmatically using the barometric pressure equation with linear temperature.
def barometric_formula(
P_b,
T_b,
L_b,
h,
h_b,
):
"""
The barometric pressure equation, from here: https://en.wikipedia.org/wiki/Barometric_formula
Args:
P_b: Pressure at the base of the layer, in Pa
T_b: Temperature at the base of the layer, in K
L_b: Temperature lapse rate, in K/m
h: Altitude, in m
h_b:
Returns:
"""
T = T_b + L_b * (h - h_b)
T = np.fmax(T, 1) # Keep temperature nonnegative, no matter the inputs.
if L_b != 0:
return P_b * (T / T_b) ** (-g / (gas_constant_air * L_b))
else:
return P_b * np.exp(
np.clip(
-g * (h - h_b) / (gas_constant_air * T_b),
-500,
500
)
)
isa_pressure = [101325.] # Pascals
for i in range(len(isa_table) - 1):
isa_pressure.append(
barometric_formula(
P_b=isa_pressure[i],
T_b=isa_base_temperature[i],
L_b=isa_lapse_rate[i],
h=isa_base_altitude[i + 1],
h_b=isa_base_altitude[i]
)
)
def pressure_isa(altitude):
"""
Computes the pressure at a given altitude based on the International Standard Atmosphere.
Uses the Barometric formula, as implemented here: https://en.wikipedia.org/wiki/Barometric_formula
Args:
altitude: Geopotential altitude [m]
Returns: Pressure [Pa]
"""
pressure = 0 * altitude # Initialize the pressure to all zeros.
for i in range(len(isa_table)):
pressure = np.where(
altitude > isa_base_altitude[i],
barometric_formula(
P_b=isa_pressure[i],
T_b=isa_base_temperature[i],
L_b=isa_lapse_rate[i],
h=altitude,
h_b=isa_base_altitude[i]
),
pressure
)
### Add lower bound case
pressure = np.where(
altitude <= isa_base_altitude[0],
barometric_formula(
P_b=isa_pressure[0],
T_b=isa_base_temperature[0],
L_b=isa_lapse_rate[0],
h=altitude,
h_b=isa_base_altitude[0]
),
pressure
)
return pressure
def temperature_isa(altitude):
"""
Computes the temperature at a given altitude based on the International Standard Atmosphere.
Args:
altitude: Geopotential altitude [m]
Returns: Temperature [K]
"""
temp = 0 * altitude # Initialize the temperature to all zeros.
for i in range(len(isa_table)):
temp = np.where(
altitude > isa_base_altitude[i],
(altitude - isa_base_altitude[i]) * isa_lapse_rate[i] + isa_base_temperature[i],
temp
)
### Add lower bound case
temp = np.where(
altitude <= isa_base_altitude[0],
(altitude - isa_base_altitude[0]) * isa_lapse_rate[0] + isa_base_temperature[0],
temp
)
return temp
if __name__ == '__main__':
pressure_isa(-50e3)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/atmosphere/_isa_atmo_functions.py
|
_isa_atmo_functions.py
|
import aerosandbox.numpy as np
from aerosandbox.tools.string_formatting import eng_string
import copy
from typing import Union
universal_gas_constant = 8.31432 # J/(mol*K); universal gas constant
class PerfectGas:
"""
Provides a class for an ideal, calorically perfect gas.
Specifically, this gas:
* Has PV = nRT (ideal)
* Has constant heat capacities C_V, C_P (independent of temperature and pressure).
* Is in thermodynamic equilibrium
* Is not chemically reacting
* Has internal energy and enthalpy purely as functions of temperature
"""
def __init__(self,
pressure: Union[float, np.ndarray] = 101325,
temperature: Union[float, np.ndarray] = 273.15 + 15,
specific_heat_constant_pressure: float = 1006,
specific_heat_constant_volume: float = 717,
molecular_mass: float = 28.9644e-3,
effective_collision_diameter: float = 0.365e-9,
):
"""
Args:
pressure: Pressure of the gas, in Pascals
temperature: Temperature of the gas, in Kelvin
specific_heat_constant_pressure: Specific heat at constant pressure, also known as C_p. In J/kg-K.
specific_heat_constant_volume: Specific heat at constant volume, also known as C_v. In J/kg-K.
molecular_mass: Molecular mass of the gas, in kg/mol
effective_collision_diameter: Effective collision diameter of a molecule, in meters.
"""
self.pressure = pressure
self.temperature = temperature
self.specific_heat_constant_pressure = specific_heat_constant_pressure
self.specific_heat_constant_volume = specific_heat_constant_volume
self.molecular_mass = molecular_mass
self.effective_collision_diameter = effective_collision_diameter
def __repr__(self) -> str:
f = lambda s, u: eng_string(s, unit=u, format="%.6g")
return f"Gas (P = {f(self.pressure, 'Pa')}, T = {self.temperature:.6g} K, ρ = {self.density:.6g} kg/m^3, Pv^gamma = {self.pressure * self.specific_volume ** self.ratio_of_specific_heats: .6g})"
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
return self.__dict__ == other.__dict__
@property
def density(self):
return self.pressure / (self.temperature * self.specific_gas_constant)
@property
def speed_of_sound(self):
return (self.ratio_of_specific_heats * self.specific_gas_constant * self.temperature) ** 0.5
@property
def specific_gas_constant(self):
return universal_gas_constant / self.molecular_mass
@property
def ratio_of_specific_heats(self):
return self.specific_heat_constant_pressure / self.specific_heat_constant_volume
def specific_enthalpy_change(self, start_temperature, end_temperature):
"""
Returns the change in specific enthalpy that would occur from a given temperature change via a thermodynamic
process.
Args:
start_temperature: Starting temperature [K]
end_temperature: Ending temperature [K]
Returns: The change in specific enthalpy, in J/kg.
"""
return self.specific_heat_constant_pressure * (end_temperature - start_temperature)
def specific_internal_energy_change(self, start_temperature, end_temperature):
"""
Returns the change in specific internal energy that would occur from a given temperature change via a
thermodynamic process.
Args:
start_temperature: Starting temperature [K]
end_temperature: Ending temperature [K]
Returns: The change in specific internal energy, in J/kg.
"""
return self.specific_heat_constant_volume * (end_temperature - start_temperature)
@property
def specific_volume(self):
"""
Gives the specific volume, often denoted `v`.
(Note the lowercase; "V" is often the volume of a specific amount of gas, and this presents a potential point
of confusion.)
"""
return 1 / self.density
@property
def specific_enthalpy(self):
"""
Gives the specific enthalpy, often denoted `h`.
Enthalpy here is in units of J/kg.
"""
return self.specific_enthalpy_change(start_temperature=0, end_temperature=self.temperature)
@property
def specific_internal_energy(self):
"""
Gives the specific internal energy, often denoted `u`.
Internal energy here is in units of J/kg.
"""
return self.specific_internal_energy_change(start_temperature=0, end_temperature=self.temperature)
def process(self,
process: str = "isentropic",
new_pressure: float = None,
new_temperature: float = None,
new_density: float = None,
enthalpy_addition_at_constant_pressure: float = None,
enthalpy_addition_at_constant_volume: float = None,
polytropic_n: float = None,
inplace=False
) -> "PerfectGas":
"""
Puts this gas under a thermodynamic process.
Equations here: https://en.wikipedia.org/wiki/Ideal_gas_law
Args:
process: Type of process. One of:
* "isobaric"
* "isochoric"
* "isothermal"
* "isentropic"
* "polytropic"
The `process` must be specified.
You must specifiy exactly one of the following arguments:
* `new_pressure`: the new pressure after the process [Pa].
* `new_temperature`: the new temperature after the process [K]
* `new_density`: the new density after the process [kg/m^3]
* `enthalpy_addition_at_constant_pressure`: [J/kg]
* `enthalpy_addition_at_constant_volume`: [J/kg]
polytropic_n: If you specified the process type to be "polytropic", you must provide the polytropic index
`n` to be used here. (Reminder: PV^n = constant)
inplace: Specifies whether to return the result in-place or to allocate a new PerfectGas object in memory
for the result.
Returns:
If `inplace` is False (default), returns a new PerfectGas object that represents the gas after the change.
If `inplace` is True, nothing is returned.
"""
pressure_specified = new_pressure is not None
temperature_specified = new_temperature is not None
density_specified = new_density is not None
enthalpy_at_pressure_specified = enthalpy_addition_at_constant_pressure is not None
enthalpy_at_volume_specified = enthalpy_addition_at_constant_volume is not None
number_of_conditions_specified = (
pressure_specified +
temperature_specified +
density_specified +
enthalpy_at_pressure_specified +
enthalpy_at_volume_specified
)
if number_of_conditions_specified != 1:
raise ValueError("You must specify exactly one of the following arguments:\n" + "\n".join([
"\t* `new_pressure`",
"\t* `new_temperature`",
"\t* `new_density`",
"\t* `enthalpy_addition_at_constant_pressure`",
"\t* `enthalpy_addition_at_constant_volume`",
]))
if enthalpy_at_pressure_specified:
new_temperature = self.temperature + enthalpy_addition_at_constant_pressure / self.specific_heat_constant_pressure
elif enthalpy_at_volume_specified:
new_temperature = self.temperature + enthalpy_addition_at_constant_volume / self.specific_heat_constant_volume
if pressure_specified:
P_ratio = new_pressure / self.pressure
elif temperature_specified:
T_ratio = new_temperature / self.temperature
elif density_specified:
V_ratio = 1 / (new_density / self.density)
if process == "isobaric":
new_pressure = self.pressure
if pressure_specified:
raise ValueError("Can't specify pressure change for an isobaric process!")
elif density_specified:
new_temperature = self.temperature * V_ratio
elif temperature_specified:
pass
elif process == "isochoric":
if pressure_specified:
new_temperature = self.temperature * P_ratio
elif density_specified:
raise ValueError("Can't specify density change for an isochoric process!")
elif temperature_specified:
new_pressure = self.pressure * T_ratio
elif process == "isothermal":
new_temperature = self.temperature
if pressure_specified:
pass
elif density_specified:
new_pressure = self.pressure / V_ratio
elif temperature_specified:
raise ValueError("Can't specify temperature change for an isothermal process!")
elif process == "isentropic":
gam = self.ratio_of_specific_heats
if pressure_specified:
new_temperature = self.temperature * P_ratio ** ((gam - 1) / gam)
elif density_specified:
new_pressure = self.pressure * V_ratio ** -gam
new_temperature = self.temperature * V_ratio ** (1 - gam)
elif temperature_specified:
new_pressure = self.pressure * T_ratio ** (gam / (gam - 1))
elif process == "polytropic":
if polytropic_n is None:
raise ValueError("If the process is polytropic, then the polytropic index `n` must be specified.")
n = polytropic_n
if pressure_specified:
new_temperature = self.temperature * P_ratio ** ((n - 1) / n)
elif density_specified:
new_pressure = self.pressure * V_ratio ** -n
new_temperature = self.temperature * V_ratio ** (1 - n)
elif temperature_specified:
new_pressure = self.pressure * T_ratio ** (n / (n - 1))
elif process == "isenthalpic":
raise NotImplementedError()
else:
raise ValueError("Bad value of `process`!")
if inplace:
self.pressure = new_pressure
self.temperature = new_temperature
else:
return PerfectGas(
pressure=new_pressure,
temperature=new_temperature,
specific_heat_constant_pressure=self.specific_heat_constant_pressure,
specific_heat_constant_volume=self.specific_heat_constant_volume,
molecular_mass=self.molecular_mass,
effective_collision_diameter=self.effective_collision_diameter
)
if __name__ == '__main__':
### Carnot
g = []
g.append(PerfectGas(pressure=100e3, temperature=300))
g.append(g[-1].process("isothermal", new_density=0.5))
g.append(g[-1].process("isentropic", new_density=0.25))
g.append(g[-1].process("isothermal", new_density=0.58))
g.append(g[-1].process("isentropic", new_temperature=300))
for i in range(len(g)):
print(f"After Process {i}: {g[i]}")
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/atmosphere/thermodynamics/gas.py
|
gas.py
|
import aerosandbox.numpy as np
def temperature_over_total_temperature(
mach,
gamma=1.4
):
"""
Gives T/T_t, the ratio of static temperature to total temperature.
Args:
mach: Mach number [-]
gamma: The ratio of specific heats. 1.4 for air across most temperature ranges of interest.
"""
return (1 + (gamma - 1) / 2 * mach ** 2) ** -1
def pressure_over_total_pressure(
mach,
gamma=1.4
):
"""
Gives P/P_t, the ratio of static pressure to total pressure.
Args:
mach: Mach number [-]
gamma: The ratio of specific heats. 1.4 for air across most temperature ranges of interest.
"""
return temperature_over_total_temperature(mach=mach, gamma=gamma) ** (gamma / (gamma - 1))
def density_over_total_density(
mach,
gamma=1.4
):
"""
Gives rho/rho_t, the ratio of density to density after isentropic compression.
Args:
mach: Mach number [-]
gamma: The ratio of specific heats. 1.4 for air across most temperature ranges of interest.
"""
return temperature_over_total_temperature(mach=mach, gamma=gamma) ** (1 / (gamma - 1))
def area_over_choked_area(
mach,
gamma=1.4
):
"""
Gives A/A^* (where A^* is "A-star"), the ratio of cross-sectional flow area to the cross-sectional flow area that would result in choked (M=1) flow.
Applicable to 1D isentropic nozzle flow.
Args:
mach: Mach number [-]
gamma: The ratio of specific heats. 1.4 for air across most temperature ranges of interest.
"""
gp1 = gamma + 1
gm1 = gamma - 1
return (
(gp1 / 2) ** (-gp1 / (2 * gm1)) *
(1 + gm1 / 2 * mach ** 2) ** (gp1 / (2 * gm1)) / mach
)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
mach = np.linspace(0.5, 3, 500)
fig, ax = plt.subplots()
for name, data in {
"$T/T_t$" : temperature_over_total_temperature(mach),
"$P/P_t$" : pressure_over_total_pressure(mach),
"$A/A^*$" : area_over_choked_area(mach),
r"$\rho/\rho_t$": density_over_total_density(mach),
}.items():
plt.plot(mach, data, label=name)
p.show_plot()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/atmosphere/thermodynamics/isentropic_flow.py
|
isentropic_flow.py
|
from aerosandbox.common import AeroSandboxObject
from aerosandbox import Atmosphere
import aerosandbox.numpy as np
from typing import Tuple, Union, Dict, List
from aerosandbox.tools.string_formatting import trim_string
import inspect
class OperatingPoint(AeroSandboxObject):
def __init__(self,
atmosphere: Atmosphere = Atmosphere(altitude=0),
velocity: float = 1.,
alpha: float = 0.,
beta: float = 0.,
p: float = 0.,
q: float = 0.,
r: float = 0.,
):
"""
An object that represents the instantaneous aerodynamic flight conditions of an aircraft.
Args:
atmosphere: The atmosphere object (of type asb.Atmosphere). Defaults to sea level conditions.
velocity: The flight velocity, expressed as a true airspeed. [m/s]
alpha: The angle of attack. [degrees]
beta: The sideslip angle. (Reminder: convention that a positive beta implies that the oncoming air comes
from the pilot's right-hand side.) [degrees]
p: The roll rate about the x_b axis. [rad/sec]
q: The pitch rate about the y_b axis. [rad/sec]
r: The yaw rate about the z_b axis. [rad/sec]
"""
self.atmosphere = atmosphere
self.velocity = velocity
self.alpha = alpha
self.beta = beta
self.p = p
self.q = q
self.r = r
@property
def state(self) -> Dict[str, Union[float, np.ndarray]]:
"""
Returns the state variables of this OperatingPoint instance as a Dict.
Keys are strings that give the name of the variables.
Values are the variables themselves.
"""
return {
"atmosphere": self.atmosphere,
"velocity" : self.velocity,
"alpha" : self.alpha,
"beta" : self.beta,
"p" : self.p,
"q" : self.q,
"r" : self.r,
}
def get_new_instance_with_state(self,
new_state: Union[
Dict[str, Union[float, np.ndarray]],
List, Tuple, np.ndarray
] = None
):
"""
Creates a new instance of the OperatingPoint class from the given state.
Args:
new_state: The new state to be used for the new instance. Ideally, this is represented as a Dict in identical format to the `state` of a OperatingPoint instance.
Returns: A new instance of this same OperatingPoint class.
"""
### Get a list of all the inputs that the class constructor wants to see
init_signature = inspect.signature(self.__class__.__init__)
init_args = list(init_signature.parameters.keys())[1:] # Ignore 'self'
### Create a new instance, and give the constructor all the inputs it wants to see (based on values in this instance)
new_op_point: __class__ = self.__class__(**{
k: getattr(self, k)
for k in init_args
})
### Overwrite the state variables in the new instance with those from the input
new_op_point._set_state(new_state=new_state)
### Return the new instance
return new_op_point
def _set_state(self,
new_state: Union[
Dict[str, Union[float, np.ndarray]],
List, Tuple, np.ndarray
] = None
):
"""
Force-overwrites all state variables with a new set (either partial or complete) of state variables.
Warning: this is *not* the intended public usage of OperatingPoint instances.
If you want a new state yourself, you should instantiate a new one either:
a) manually, or
b) by using OperatingPoint.get_new_instance_with_state()
Hence, this function is meant for PRIVATE use only - be careful how you use this!
"""
### Set the default parameters
if new_state is None:
new_state = {}
try: # Assume `value` is a dict-like, with keys
for key in new_state.keys(): # Overwrite each of the specified state variables
setattr(self, key, new_state[key])
except AttributeError: # Assume it's an iterable that has been sorted.
self._set_state(
self.pack_state(new_state)) # Pack the iterable into a dict-like, then do the same thing as above.
def unpack_state(self,
dict_like_state: Dict[str, Union[float, np.ndarray]] = None
) -> Tuple[Union[float, np.ndarray]]:
"""
'Unpacks' a Dict-like state into an array-like that represents the state of the OperatingPoint.
Args:
dict_like_state: Takes in a dict-like representation of the state.
Returns: The array representation of the state that you gave.
"""
if dict_like_state is None:
dict_like_state = self.state
return tuple(dict_like_state.values())
def pack_state(self,
array_like_state: Union[List, Tuple, np.ndarray] = None
) -> Dict[str, Union[float, np.ndarray]]:
"""
'Packs' an array into a Dict that represents the state of the OperatingPoint.
Args:
array_like_state: Takes in an iterable that must have the same number of entries as the state vector of the OperatingPoint.
Returns: The Dict representation of the state that you gave.
"""
if array_like_state is None:
return self.state
if not len(self.state.keys()) == len(array_like_state):
raise ValueError(
"There are a differing number of elements in the `state` variable and the `array_like` you're trying to pack!")
return {
k: v
for k, v in zip(
self.state.keys(),
array_like_state
)
}
def __repr__(self) -> str:
title = f"{self.__class__.__name__} instance:"
def makeline(k, v):
name = trim_string(str(k).strip(), length=10).rjust(10)
item = trim_string(str(v).strip(), length=120).ljust(120)
line = f"{name}: {item}"
return line
state_variables_title = "\tState variables:"
state_variables = "\n".join([
"\t\t" + makeline(k, v)
for k, v in self.state.items()
])
return "\n".join([
title,
state_variables_title,
state_variables,
])
def __getitem__(self, index: int) -> "OperatingPoint":
"""
Indexes one item from each attribute of an OperatingPoint instance.
Returns a new OperatingPoint instance.
Args:
index: The index that is being called; e.g.,:
>>> first_dyn = op_point[0]
Returns: A new OperatingPoint instance, where each attribute is subscripted at the given value, if possible.
"""
def get_item_of_attribute(a):
try:
return a[index]
except TypeError as e: # object is not subscriptable
return a
except IndexError as e: # index out of range
raise IndexError("A state variable could not be indexed, since the index is out of range!")
except NotImplementedError as e:
raise TypeError(f"Indices must be integers or slices, not {index.__class__.__name__}")
new_instance = self.get_new_instance_with_state()
for k, v in new_instance.__dict__.items():
setattr(new_instance, k, get_item_of_attribute(v))
return new_instance
def __len__(self):
length = 1
for v in self.state.values():
if np.length(v) == 1:
pass
elif length == 1:
length = np.length(v)
elif length == np.length(v):
pass
else:
raise ValueError("State variables are appear vectorized, but of different lengths!")
return length
def dynamic_pressure(self):
"""
Dynamic pressure of the working fluid
Returns:
float: Dynamic pressure of the working fluid. [Pa]
"""
return 0.5 * self.atmosphere.density() * self.velocity ** 2
def total_pressure(self):
"""
Total (stagnation) pressure of the working fluid.
Assumes a calorically perfect gas (i.e. specific heats do not change across the isentropic deceleration).
Note that `total pressure != static pressure + dynamic pressure`, due to compressibility effects.
Returns: Total pressure of the working fluid. [Pa]
"""
gamma = self.atmosphere.ratio_of_specific_heats()
return self.atmosphere.pressure() * (
1 + (gamma - 1) / 2 * self.mach() ** 2
) ** (
gamma / (gamma - 1)
)
def total_temperature(self):
"""
Total (stagnation) temperature of the working fluid.
Assumes a calorically perfect gas (i.e. specific heats do not change across the isentropic deceleration).
Returns: Total temperature of the working fluid [K]
"""
gamma = self.atmosphere.ratio_of_specific_heats()
# return self.atmosphere.temperature() * (
# self.total_pressure() / self.atmosphere.pressure()
# ) ** (
# (gamma - 1) / gamma
# )
return self.atmosphere.temperature() * (
1 + (gamma - 1) / 2 * self.mach() ** 2
)
def reynolds(self, reference_length):
"""
Computes a Reynolds number with respect to a given reference length.
:param reference_length: A reference length you choose [m]
:return: Reynolds number [unitless]
"""
density = self.atmosphere.density()
viscosity = self.atmosphere.dynamic_viscosity()
return density * self.velocity * reference_length / viscosity
def mach(self):
"""
Returns the Mach number associated with the current flight condition.
"""
return self.velocity / self.atmosphere.speed_of_sound()
def indicated_airspeed(self):
"""
Returns the indicated airspeed associated with the current flight condition, in meters per second.
"""
return np.sqrt(
2 * (self.total_pressure() - self.atmosphere.pressure())
/ Atmosphere(altitude=0, method="isa").density()
)
def equivalent_airspeed(self):
"""
Returns the equivalent airspeed associated with the current flight condition, in meters per second.
"""
return self.velocity * np.sqrt(
self.atmosphere.density() / Atmosphere(altitude=0, method="isa").density()
)
def energy_altitude(self):
"""
Returns the energy altitude associated with the current flight condition, in meters.
The energy altitude is the altitude at which a stationary aircraft would have the same total energy (kinetic
+ gravitational potential) as the aircraft at the current flight condition.
"""
return self.atmosphere.altitude + 1 / (2 * 9.81) * self.velocity ** 2
def convert_axes(self,
x_from: Union[float, np.ndarray],
y_from: Union[float, np.ndarray],
z_from: Union[float, np.ndarray],
from_axes: str,
to_axes: str,
) -> Tuple[float, float, float]:
"""
Converts a vector [x_from, y_from, z_from], as given in the `from_axes` frame, to an equivalent vector [x_to,
y_to, z_to], as given in the `to_axes` frame.
Both `from_axes` and `to_axes` should be a string, one of:
* "geometry"
* "body"
* "wind"
* "stability"
This whole function is vectorized, both over the vector and the OperatingPoint (e.g., a vector of
`OperatingPoint.alpha` values)
Wind axes rotations are taken from Eq. 6.7 in Sect. 6.2.2 of Drela's Flight Vehicle Aerodynamics textbook,
with axes corrections to go from [D, Y, L] to true wind axes (and same for geometry to body axes).
Args:
x_from: x-component of the vector, in `from_axes` frame.
y_from: y-component of the vector, in `from_axes` frame.
z_from: z-component of the vector, in `from_axes` frame.
from_axes: The axes to convert from.
to_axes: The axes to convert to.
Returns: The x-, y-, and z-components of the vector, in `to_axes` frame. Given as a tuple.
"""
if from_axes == to_axes:
return x_from, y_from, z_from
if from_axes == "geometry":
x_b = -x_from
y_b = y_from
z_b = -z_from
elif from_axes == "body":
x_b = x_from
y_b = y_from
z_b = z_from
elif from_axes == "wind":
sa = np.sind(self.alpha)
ca = np.cosd(self.alpha)
sb = np.sind(self.beta)
cb = np.cosd(self.beta)
x_b = (cb * ca) * x_from + (-sb * ca) * y_from + (-sa) * z_from
y_b = (sb) * x_from + (cb) * y_from # Note: z term is 0; not forgotten.
z_b = (cb * sa) * x_from + (-sb * sa) * y_from + (ca) * z_from
elif from_axes == "stability":
sa = np.sind(self.alpha)
ca = np.cosd(self.alpha)
x_b = ca * x_from - sa * z_from
y_b = y_from
z_b = sa * x_from + ca * z_from
else:
raise ValueError("Bad value of `from_axes`!")
if to_axes == "geometry":
x_to = -x_b
y_to = y_b
z_to = -z_b
elif to_axes == "body":
x_to = x_b
y_to = y_b
z_to = z_b
elif to_axes == "wind":
sa = np.sind(self.alpha)
ca = np.cosd(self.alpha)
sb = np.sind(self.beta)
cb = np.cosd(self.beta)
x_to = (cb * ca) * x_b + (sb) * y_b + (cb * sa) * z_b
y_to = (-sb * ca) * x_b + (cb) * y_b + (-sb * sa) * z_b
z_to = (-sa) * x_b + (ca) * z_b # Note: y term is 0; not forgotten.
elif to_axes == "stability":
sa = np.sind(self.alpha)
ca = np.cosd(self.alpha)
x_to = ca * x_b + sa * z_b
y_to = y_b
z_to = -sa * x_b + ca * z_b
else:
raise ValueError("Bad value of `to_axes`!")
return x_to, y_to, z_to
def compute_rotation_matrix_wind_to_geometry(self) -> np.ndarray:
"""
Computes the 3x3 rotation matrix that transforms from wind axes to geometry axes.
Returns: a 3x3 rotation matrix.
"""
alpha_rotation = np.rotation_matrix_3D(
angle=np.radians(-self.alpha),
axis="y",
)
beta_rotation = np.rotation_matrix_3D(
angle=np.radians(self.beta),
axis="z",
)
axes_flip = np.rotation_matrix_3D(
angle=np.pi,
axis="y",
)
# Since in geometry axes, X is downstream by convention, while in wind axes, X is upstream by convention.
# Same with Z being up/down respectively.
r = axes_flip @ alpha_rotation @ beta_rotation # where "@" is the matrix multiplication operator
return r
def compute_freestream_direction_geometry_axes(self):
# Computes the freestream direction (direction the wind is GOING TO) in the geometry axes
return self.compute_rotation_matrix_wind_to_geometry() @ np.array([-1, 0, 0])
def compute_freestream_velocity_geometry_axes(self):
# Computes the freestream velocity vector (direction the wind is GOING TO) in geometry axes
return self.compute_freestream_direction_geometry_axes() * self.velocity
def compute_rotation_velocity_geometry_axes(self, points):
# Computes the effective velocity-due-to-rotation at a set of points.
# Input: a Nx3 array of points
# Output: a Nx3 array of effective velocities
angular_velocity_vector_geometry_axes = np.array([
-self.p,
self.q,
-self.r
]) # signs convert from body axes to geometry axes
a = angular_velocity_vector_geometry_axes
b = points
rotation_velocity_geometry_axes = np.stack([
a[1] * b[:, 2] - a[2] * b[:, 1],
a[2] * b[:, 0] - a[0] * b[:, 2],
a[0] * b[:, 1] - a[1] * b[:, 0]
], axis=1)
rotation_velocity_geometry_axes = -rotation_velocity_geometry_axes # negative sign, since we care about the velocity the WING SEES, not the velocity of the wing.
return rotation_velocity_geometry_axes
if __name__ == '__main__':
op_point = OperatingPoint()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/performance/operating_point.py
|
operating_point.py
|
from typing import Union, List, Dict, Callable, Any, Tuple, Set, Optional
import json
import casadi as cas
import aerosandbox.numpy as np
from aerosandbox.tools import inspect_tools
from sortedcontainers import SortedDict
import copy
class Opti(cas.Opti):
"""
The base class for mathematical optimization. For detailed usage, see the docstrings in its key methods:
* Opti.variable()
* Opti.subject_to()
* Opti.parameter()
* Opti.solve()
Example usage is as follows:
>>> opti = asb.Opti() # Initializes an optimization environment
>>> x = opti.variable(init_guess=5) # Initializes a new variable in that environment
>>> f = x ** 2 # Evaluates a (in this case, nonlinear) function based on a variable
>>> opti.subject_to(x > 3) # Adds a constraint to be enforced
>>> opti.minimize(f) # Sets the objective function as f
>>> sol = opti.solve() # Solves the problem using CasADi and IPOPT backend
>>> print(sol.value(x)) # Prints the value of x at the optimum.
"""
def __init__(self,
variable_categories_to_freeze: Union[List[str], str] = None,
cache_filename: str = None,
load_frozen_variables_from_cache: bool = False,
save_to_cache_on_solve: bool = False,
ignore_violated_parametric_constraints: bool = False,
freeze_style: str = "parameter",
): # TODO document
# Default arguments
if variable_categories_to_freeze is None:
variable_categories_to_freeze = []
# Parent class initialization
super().__init__()
# Initialize class variables
self.variable_categories_to_freeze = variable_categories_to_freeze
self.cache_filename = cache_filename
self.load_frozen_variables_from_cache = load_frozen_variables_from_cache # TODO load and start tracking
self.save_to_cache_on_solve = save_to_cache_on_solve
self.ignore_violated_parametric_constraints = ignore_violated_parametric_constraints
self.freeze_style = freeze_style
# Start tracking variables and categorize them.
self.variables_categorized = {} # category name [str] : list of variables [list]
# Track variable declaration locations, useful for debugging
self._variable_declarations = SortedDict() # first index in super().x : (filename, lineno, code_context, n_vars)
self._constraint_declarations = SortedDict() # first index in super().g : (filename, lineno, code_context, n_cons)
self._variable_index_counter = 0
self._constraint_index_counter = 0
### Primary Methods
def variable(self,
init_guess: Union[float, np.ndarray] = None,
n_vars: int = None,
scale: float = None,
freeze: bool = False,
log_transform: bool = False,
category: str = "Uncategorized",
lower_bound: float = None,
upper_bound: float = None,
_stacklevel: int = 1,
) -> cas.MX:
"""
Initializes a new decision variable (or vector of decision variables). You should pass an initial guess (
`init_guess`) upon defining a new variable. Dimensionality is inferred from this initial guess, but it can be
overridden; see below for syntax.
It is highly, highly recommended that you provide a scale (`scale`) for each variable, especially for
nonconvex problems, although this is not strictly required.
Usage notes:
When using vector variables, individual components of this vector of variables can be accessed via normal
indexing. Example:
>>> opti = asb.Opti()
>>> my_var = opti.variable(n_vars = 5)
>>> opti.subject_to(my_var[3] >= my_var[2]) # This is a valid way of indexing
>>> my_sum = asb.sum(my_var) # This will sum up all elements of `my_var`
Args:
init_guess: Initial guess for the optimal value of the variable being initialized. This is where in the
design space the optimizer will start looking.
This can be either a float or a NumPy ndarray; the dimension of the variable (i.e. scalar,
vector) that is created will be automatically inferred from the shape of the initial guess you
provide here. (Although it can also be overridden using the `n_vars` parameter; see below.)
For scalar variables, your initial guess should be a float:
>>> opti = asb.Opti()
>>> scalar_var = opti.variable(init_guess=5) # Initializes a scalar variable at a value of 5
For vector variables, your initial guess should be either:
* a float, in which case you must pass the length of the vector as `n_vars`, otherwise a scalar
variable will be created:
>>> opti = asb.Opti()
>>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length
>>> # 10, with all 10 elements set to an initial guess of 5.
* a NumPy ndarray, in which case each element will be initialized to the corresponding value in
the given array:
>>> opti = asb.Opti()
>>> vector_var = opti.variable(init_guess=np.linspace(0, 5, 10)) # Initializes a vector variable of
>>> # length 10, with all 10 elements initialized to linearly vary between 0 and 5.
In the case where the variable is to be log-transformed (see `log_transform`), the initial guess
should not be log-transformed as well - just supply the initial guess as usual. (Log-transform of the
initial guess happens under the hood.) The initial guess must, of course, be a positive number in
this case.
n_vars: [Optional] Used to manually override the dimensionality of the variable to create; if not
provided, the dimensionality of the variable is inferred from the initial guess `init_guess`.
The only real case where you need to use this argument would be if you are initializing a vector
variable to a scalar value, but you don't feel like using `init_guess=value * np.ones(n_vars)`.
For example:
>>> opti = asb.Opti()
>>> vector_var = opti.variable(init_guess=5, n_vars=10) # Initializes a vector variable of length
>>> # 10, with all 10 elements set to an initial guess of 5.
scale: [Optional] Approximate scale of the variable.
For example, if you're optimizing the design of a automobile and setting the tire diameter as an
optimization variable, you might choose `scale=0.5`, corresponding to 0.5 meters.
Properly scaling your variables can have a huge impact on solution speed (or even if the optimizer
converges at all). Although most modern second-order optimizers (such as IPOPT, used here) are
theoretically scale-invariant, numerical precision issues due to floating-point arithmetic can make
solving poorly-scaled problems really difficult or impossible. See here for more info:
https://web.casadi.org/blog/nlp-scaling/
If not specified, the code will try to pick a sensible value by defaulting to the `init_guess`.
freeze: [Optional] This boolean tells the optimizer to "freeze" the variable at a specific value. In
order to select the determine to freeze the variable at, the optimizer will use the following logic:
* If you initialize a new variable with the parameter `freeze=True`: the optimizer will freeze
the variable at the value of initial guess.
>>> opti = Opti()
>>> my_var = opti.variable(init_guess=5, freeze=True) # This will freeze my_var at a value of 5.
* If the Opti instance is associated with a cache file, and you told it to freeze a specific
category(s) of variables that your variable is a member of, and you didn't manually specify to
freeze the variable: the variable will be frozen based on the value in the cache file (and ignore
the `init_guess`). Example:
>>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"])
>>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10.
>>> my_var = opti.variable(init_guess=5, category="Wheel Sizing")
>>> # This will freeze my_var at a value of 10 (from the cache file, not the init_guess)
* If the Opti instance is associated with a cache file, and you told it to freeze a specific
category(s) of variables that your variable is a member of, but you then manually specified that
the variable should be frozen: the variable will once again be frozen at the value of `init_guess`:
>>> opti = Opti(cache_filename="my_file.json", variable_categories_to_freeze=["Wheel Sizing"])
>>> # Assume, for example, that `my_file.json` was from a previous run where my_var=10.
>>> my_var = opti.variable(init_guess=5, category="Wheel Sizing", freeze=True)
>>> # This will freeze my_var at a value of 5 (`freeze` overrides category loading.)
Motivation for freezing variables:
The ability to freeze variables is exceptionally useful when designing engineering systems. Let's say
we're designing an airplane. In the beginning of the design process, we're doing "clean-sheet" design
- any variable is up for grabs for us to optimize on, because the airplane doesn't exist yet!
However, the farther we get into the design process, the more things get "locked in" - we may have
ordered jigs, settled on a wingspan, chosen an engine, et cetera. So, if something changes later (
let's say that we discover that one of our assumptions was too optimistic halfway through the design
process), we have to make up for that lost margin using only the variables that are still free. To do
this, we would freeze the variables that are already decided on.
By categorizing variables, you can also freeze entire categories of variables. For example,
you can freeze all of the wing design variables for an airplane but leave all of the fuselage
variables free.
This idea of freezing variables can also be used to look at off-design performance - freeze a
design, but change the operating conditions.
log_transform: [Optional] Advanced use only. A flag of whether to internally-log-transform this variable
before passing it to the optimizer. Good for known positive engineering quantities that become nonsensical
if negative (e.g. mass). Log-transforming these variables can also help maintain convexity.
category: [Optional] What category of variables does this belong to? # TODO expand docs
lower_bound: [Optional] If provided, defines a bounds constraint on the new variable that keeps the
variable above a given value.
upper_bound: [Optional] If provided, defines a bounds constraint on the new variable that keeps the
variable below a given value.
_stacklevel: Optional and advanced, purely used for debugging. Allows users to correctly track where
variables are declared in the event that they are subclassing `aerosandbox.Opti`. Modifies the
stacklevel of the declaration tracked, which is then presented using
`aerosandbox.Opti.variable_declaration()`.
Returns:
The variable itself as a symbolic CasADi variable (MX type).
"""
### Set defaults
if init_guess is None:
import warnings
if log_transform:
init_guess = 1
warnings.warn("No initial guess set for Opti.variable(). Defaulting to 1 (log-transformed variable).",
stacklevel=2)
else:
init_guess = 0
warnings.warn("No initial guess set for Opti.variable(). Defaulting to 0.", stacklevel=2)
if n_vars is None: # Infer dimensionality from init_guess if it is not provided
n_vars = np.length(init_guess)
if scale is None: # Infer a scale from init_guess if it is not provided
if log_transform:
scale = 1
else:
scale = np.mean(np.fabs(init_guess)) # Initialize the scale to a heuristic based on the init_guess
if isinstance(scale,
cas.MX) or scale == 0: # If that heuristic leads to a scale of 0, use a scale of 1 instead.
scale = 1
# scale = np.fabs(
# np.where(
# init_guess != 0,
# init_guess,
# 1
# ))
length_init_guess = np.length(init_guess)
if length_init_guess != 1 and length_init_guess != n_vars:
raise ValueError(f"`init_guess` has length {length_init_guess}, but `n_vars` is {n_vars}!")
# Try to convert init_guess to a float or np.ndarray if it is an Opti parameter.
try:
init_guess = self.value(init_guess)
except RuntimeError as e:
if not (
freeze and self.freeze_style == "float"
):
raise TypeError(
"The `init_guess` for a new Opti variable must not be a function of an existing Opti variable."
)
# Validate the inputs
if log_transform:
if np.any(init_guess <= 0):
raise ValueError(
"If you are initializing a log-transformed variable, the initial guess(es) must all be positive.")
if np.any(scale <= 0):
raise ValueError("The 'scale' argument must be a positive number.")
# If the variable is in a category to be frozen, fix the variable at the initial guess.
is_manually_frozen = freeze
if (
category in self.variable_categories_to_freeze or
category == self.variable_categories_to_freeze or
self.variable_categories_to_freeze == "all"
):
freeze = True
# If the variable is to be frozen, return the initial guess. Otherwise, define the variable using CasADi symbolics.
if freeze:
if self.freeze_style == "parameter":
var = self.parameter(n_params=n_vars, value=init_guess)
elif self.freeze_style == "float":
if n_vars == 1:
var = init_guess
else:
var = init_guess * np.ones(n_vars)
else:
raise ValueError("Bad value of `Opti.freeze_style`!")
else:
if not log_transform:
var = scale * super().variable(n_vars)
self.set_initial(var, init_guess)
else:
log_scale = scale / init_guess
log_var = log_scale * super().variable(n_vars)
var = np.exp(log_var)
self.set_initial(log_var, np.log(init_guess))
# Track where this variable was declared in code.
filename, lineno, code_context = inspect_tools.get_caller_source_location(stacklevel=_stacklevel + 1)
self._variable_declarations[self._variable_index_counter] = (
filename,
lineno,
code_context,
n_vars
)
self._variable_index_counter += n_vars
# Track the category of the variable
if category not in self.variables_categorized: # Add a category if it does not exist
self.variables_categorized[category] = []
self.variables_categorized[category].append(var)
try:
var.is_manually_frozen = is_manually_frozen
except AttributeError:
pass
# Apply bounds
if not (freeze and self.ignore_violated_parametric_constraints):
if (not log_transform) or (freeze):
if lower_bound is not None:
self.subject_to(
var / scale >= lower_bound / scale,
_stacklevel=_stacklevel + 1
)
if upper_bound is not None:
self.subject_to(
var / scale <= upper_bound / scale,
_stacklevel=_stacklevel + 1
)
else:
if lower_bound is not None:
self.subject_to(
log_var / log_scale >= np.log(lower_bound) / log_scale,
_stacklevel=_stacklevel + 1
)
if upper_bound is not None:
self.subject_to(
log_var / log_scale <= np.log(upper_bound) / log_scale,
_stacklevel=_stacklevel + 1
)
return var
def subject_to(self,
constraint: Union[cas.MX, bool, List], # TODO add scale
_stacklevel: int = 1,
) -> Union[cas.MX, None, List[cas.MX]]:
"""
Initialize a new equality or inequality constraint(s).
Args:
constraint: A constraint that you want to hold true at the optimum.
Inequality example:
>>> x = opti.variable()
>>> opti.subject_to(x >= 5)
Equality example; also showing that you can directly constrain functions of variables:
>>> x = opti.variable()
>>> f = np.sin(x)
>>> opti.subject_to(f == 0.5)
You can also pass in a list of multiple constraints using list syntax. For example:
>>> x = opti.variable()
>>> opti.subject_to([
>>> x >= 5,
>>> x <= 10
>>> ])
_stacklevel: Optional and advanced, purely used for debugging. Allows users to correctly track where
constraints are declared in the event that they are subclassing `aerosandbox.Opti`. Modifies the
stacklevel of the declaration tracked, which is then presented using
`aerosandbox.Opti.constraint_declaration()`.
Returns:
The dual variable associated with the new constraint. If the `constraint` input is a list, returns
a list of dual variables.
"""
# Determine whether you're dealing with a single (possibly vectorized) constraint or a list of constraints.
# If the latter, recursively apply them.
if type(constraint) in (list, tuple):
return [
self.subject_to(each_constraint, _stacklevel=_stacklevel + 2) # return the dual of each constraint
for each_constraint in constraint
]
# If it's a proper constraint (MX-type and non-parametric),
# pass it into the parent class Opti formulation and be done with it.
if isinstance(constraint, cas.MX) and not self.advanced.is_parametric(constraint):
# constraint = cas.cse(constraint)
super().subject_to(constraint)
dual = self.dual(constraint)
# Track where this constraint was declared in code.
n_cons = np.length(constraint)
filename, lineno, code_context = inspect_tools.get_caller_source_location(stacklevel=_stacklevel + 1)
self._constraint_declarations[self._constraint_index_counter] = (
filename,
lineno,
code_context,
n_cons
)
self._constraint_index_counter += np.length(constraint)
return dual
else: # Constraint is not valid because it is not MX type or is parametric.
try:
constraint_satisfied = np.all(self.value(constraint)) # Determine if the constraint is true
except Exception:
raise TypeError(f"""Opti.subject_to could not determine the truthiness of your constraint, and it
doesn't appear to be a symbolic type or a boolean type. You supplied the following constraint:
{constraint}""")
if isinstance(constraint,
cas.MX) and not constraint_satisfied: # Determine if the constraint is *almost* true
try:
LHS = constraint.dep(0)
RHS = constraint.dep(1)
LHS_value = self.value(LHS)
RHS_value = self.value(RHS)
except Exception:
raise ValueError(
"""Could not evaluate the LHS and RHS of the constraint - are you sure you passed in a comparative expression?""")
constraint_satisfied = np.allclose(LHS_value,
RHS_value) # Call the constraint satisfied if it is *almost* true.
if constraint_satisfied or self.ignore_violated_parametric_constraints:
# If the constraint(s) always evaluates True (e.g. if you enter "5 > 3"), skip it.
# This allows you to toggle frozen variables without causing problems with setting up constraints.
return None # dual of an always-true constraint doesn't make sense to evaluate.
else:
# If any of the constraint(s) are always False (e.g. if you enter "5 < 3"), raise an error.
# This indicates that the problem is infeasible as-written, likely because the user has frozen too
# many decision variables using the Opti.variable(freeze=True) syntax.
raise RuntimeError(f"""The problem is infeasible due to a constraint that always evaluates False.
This can happen if you've frozen too many decision variables, leading to an overconstrained problem.""")
def minimize(self,
f: cas.MX,
) -> None:
# f = cas.cse(f)
super().minimize(f)
def parameter(self,
value: Union[float, np.ndarray] = 0.,
n_params: int = None,
) -> cas.MX:
"""
Initializes a new parameter (or vector of parameters). You must pass a value (`value`) upon defining a new
parameter. Dimensionality is inferred from this value, but it can be overridden; see below for syntax.
Args:
value: Value to set the new parameter to.
This can either be a float or a NumPy ndarray; the dimension of the parameter (i.e. scalar,
vector) that is created will be automatically inferred from the shape of the value you provide here.
(Although it can be overridden using the `n_params` parameter; see below.)
For scalar parameters, your value should be a float:
>>> opti = asb.Opti()
>>> scalar_param = opti.parameter(value=5) # Initializes a scalar parameter and sets its value to 5.
For vector variables, your value should be either:
* a float, in which case you must pass the length of the vector as `n_params`, otherwise a scalar
parameter will be created:
>>> opti = asb.Opti()
>>> vector_param = opti.parameter(value=5, n_params=10) # Initializes a vector parameter of length
>>> # 10, with all 10 elements set to value of 5.
* a NumPy ndarray, in which case each element will be set to the corresponding value in the given
array:
>>> opti = asb.Opti()
>>> vector_param = opti.parameter(value=np.linspace(0, 5, 10)) # Initializes a vector parameter of
>>> # length 10, with all 10 elements set to a value varying from 0 to 5.
n_params: [Optional] Used to manually override the dimensionality of the parameter to create; if not
provided, the dimensionality of the parameter is inferred from `value`.
The only real case where you need to use this argument would be if you are initializing a vector
parameter to a scalar value, but you don't feel like using `value=my_value * np.ones(n_vars)`.
For example:
>>> opti = asb.Opti()
>>> vector_param = opti.parameter(value=5, n_params=10) # Initializes a vector parameter of length
>>> # 10, with all 10 elements set to a value of 5.
Returns:
The parameter itself as a symbolic CasADi variable (MX type).
"""
# Infer dimensionality from value if it is not provided
if n_params is None:
n_params = np.length(value)
# Create the parameter
param = super().parameter(n_params)
# Set the value of the parameter
self.set_value(param, value)
return param
def solve(self,
parameter_mapping: Dict[cas.MX, float] = None,
max_iter: int = 1000,
max_runtime: float = 1e20,
callback: Callable[[int], Any] = None,
verbose: bool = True,
jit: bool = False, # TODO document, add unit tests for jit
detect_simple_bounds: bool = False, # TODO document
options: Dict = None, # TODO document
behavior_on_failure: str = "raise",
) -> "OptiSol":
"""
Solve the optimization problem using CasADi with IPOPT backend.
Args:
parameter_mapping: [Optional] Allows you to specify values for parameters.
Dictionary where the key is the parameter and the value is the value to be set to.
Example: # TODO update syntax for required init_guess
>>> opti = asb.Opti()
>>> x = opti.variable()
>>> p = opti.parameter()
>>> opti.minimize(x ** 2)
>>> opti.subject_to(x >= p)
>>> sol = opti.solve(
>>> {
>>> p: 5 # Sets the value of parameter p to 5, then solves.
>>> }
>>> )
max_iter: [Optional] The maximum number of iterations allowed before giving up.
max_runtime: [Optional] Gives the maximum allowable runtime before giving up.
callback: [Optional] A function to be called at each iteration of the optimization algorithm.
Useful for printing progress or displaying intermediate results.
The callback function `func` should have the syntax `func(iteration_number)`, where iteration_number
is an integer corresponding to the current iteration number. In order to access intermediate
quantities of optimization variables (e.g. for plotting), use the `Opti.debug.value(x)` syntax for
each variable `x`.
verbose: Controls the verbosity of the solver. If True, IPOPT will print its progress to the console.
jit: Experimental. If True, the optimization problem will be compiled to C++ and then JIT-compiled
using the CasADi JIT compiler. This can lead to significant speedups, but may also lead to
unexpected behavior, and may not work on all platforms.
options: [Optional] A dictionary of options to pass to IPOPT. See the IPOPT documentation for a list of
available options.
behavior_on_failure: [Optional] What should we do if the optimization fails? Options are:
* "raise": Raise an exception. This is the default behavior.
* "return_last": Returns the solution from the last iteration, and raise a warning.
NOTE: The returned solution may not be feasible! (It also may not be optimal.)
Returns: An OptiSol object that contains the solved optimization problem. To extract values, use
my_optisol(variable).
Example:
>>> sol = opti.solve()
>>> x_opt = sol(x) # Get the value of variable x at the optimum.
"""
if parameter_mapping is None:
parameter_mapping = {}
### If you're loading frozen variables from cache, do it here:
if self.load_frozen_variables_from_cache:
solution_dict = self.get_solution_dict_from_cache()
for category in self.variable_categories_to_freeze:
category_variables = self.variables_categorized[category]
category_values = solution_dict[category]
if len(category_variables) != len(category_values):
raise RuntimeError("""Problem with loading cached solution: it looks like new variables have been
defined since the cached solution was saved (or variables were defined in a different order).
Because of this, the cache cannot be loaded.
Re-run the original optimization study to regenerate the cached solution.""")
for var, val in zip(category_variables, category_values):
if not var.is_manually_frozen:
parameter_mapping = {
**parameter_mapping,
var: val
}
### Map any parameters to needed values
for k, v in parameter_mapping.items():
if not np.is_casadi_type(k, recursive=False):
raise TypeError(f"All keys in `parameter_mapping` must be CasADi parameters; you gave an object of type \'{type(k).__name__}\'.\n"
f"In general, make sure all keys are the result of calling `opti.parameter()`.")
size_k = np.prod(k.shape)
try:
size_v = np.prod(v.shape)
except AttributeError:
size_v = 1
if size_k != size_v:
raise RuntimeError("""Problem with loading cached solution: it looks like the length of a vectorized
variable has changed since the cached solution was saved (or variables were defined in a different order).
Because of this, the cache cannot be loaded.
Re-run the original optimization study to regenerate the cached solution.""")
self.set_value(k, v)
### Set solver settings.
if options is None:
options = {}
default_options = {
"ipopt.sb" : 'yes', # Hide the IPOPT banner.
"ipopt.max_iter" : max_iter,
"ipopt.max_cpu_time" : max_runtime,
"ipopt.mu_strategy" : "adaptive",
"ipopt.fast_step_computation": "yes",
"detect_simple_bounds" : detect_simple_bounds,
}
if jit:
default_options["jit"] = True
# options["compiler"] = "shell" # Recommended by CasADi devs, but doesn't work on my machine
default_options["jit_options"] = {
"flags": ["-O3"],
# "verbose": True
}
if verbose:
default_options["ipopt.print_level"] = 5 # Verbose, per-iteration printing.
else:
default_options["print_time"] = False # No time printing
default_options["ipopt.print_level"] = 0 # No printing from IPOPT
self.solver('ipopt', {
**default_options,
**options,
})
# Set the callback
if callback is not None:
self.callback(callback)
# Do the actual solve
if behavior_on_failure == "raise":
sol = OptiSol(
opti=self,
cas_optisol=super().solve()
)
elif behavior_on_failure == "return_last":
try:
sol = OptiSol(
opti=self,
cas_optisol=super().solve()
)
except RuntimeError:
import warnings
warnings.warn("Optimization failed. Returning last solution.")
sol = OptiSol(
opti=self,
cas_optisol=self.debug
)
if self.save_to_cache_on_solve:
self.save_solution()
return sol
def solve_sweep(self,
parameter_mapping: Dict[cas.MX, np.ndarray],
update_initial_guesses_between_solves=False,
verbose=True,
solve_kwargs: Dict = None,
return_callable: bool = False,
garbage_collect_between_runs: bool = False,
) -> Union[np.ndarray, Callable[[cas.MX], np.ndarray]]:
# Handle defaults
if solve_kwargs is None:
solve_kwargs = {}
solve_kwargs = {
**dict(
verbose=False,
max_iter=200,
),
**solve_kwargs
}
# Split parameter_mappings up so that it can be passed into run() via np.vectorize
keys: Tuple[cas.MX] = tuple(parameter_mapping.keys())
values: Tuple[np.ndarray[float]] = tuple(parameter_mapping.values())
# Display an output
if verbose:
print("Running optimization sweep in serial...")
n_runs = np.broadcast(*values).size
run_number = 1
def run(*args: Tuple[float]) -> Optional["OptiSol"]:
# Collect garbage before each run, to avoid memory issues.
if garbage_collect_between_runs:
import gc
gc.collect()
# Reconstruct parameter mapping on a run-by-run basis by zipping together keys and this run's values.
parameter_mappings_for_this_run: [cas.MX, float] = {
k: v
for k, v in zip(keys, args)
}
# Pull in run_number so that we can increment this counter
nonlocal run_number
# Display as needed
if verbose:
print(
"|".join(
[
f"Run {run_number}/{n_runs}".ljust(12)
] + [
f"{v:10.5g}"
for v in args
] + [""]
),
end='' # Leave the newline off, since we'll complete the line later with a success or fail print.
)
run_number += 1
import time
start_time = time.time()
try:
sol = self.solve(
parameter_mapping=parameter_mappings_for_this_run,
**solve_kwargs
)
if update_initial_guesses_between_solves:
self.set_initial_from_sol(sol)
if verbose:
stats = sol.stats()
print(f" Solved in {stats['iter_count']} iterations, {time.time() - start_time:.2f} sec.")
return sol
except RuntimeError:
if verbose:
sol = OptiSol(opti=self, cas_optisol=self.debug)
stats = sol.stats()
print(f" Failed in {stats['iter_count']} iterations, {time.time() - start_time:.2f} sec.")
return None
run_vectorized = np.vectorize(
run,
otypes='O' # object output
)
sols = run_vectorized(*values)
if return_callable:
def get_vals(x: cas.MX) -> np.ndarray:
return np.vectorize(
lambda sol: sol.value(x) if sol is not None else np.nan
)(sols)
return get_vals
else:
return sols
### Debugging Methods
def find_variable_declaration(self,
index: int,
use_full_filename: bool = False,
return_string: bool = False,
) -> Union[None, str]:
### Check inputs
if index < 0:
raise ValueError("Indices must be nonnegative.")
if index >= self._variable_index_counter:
raise ValueError(
f"The variable index exceeds the number of declared variables ({self._variable_index_counter})!"
)
index_of_first_element = self._variable_declarations.iloc[self._variable_declarations.bisect_right(index) - 1]
filename, lineno, code_context, n_vars = self._variable_declarations[index_of_first_element]
source = inspect_tools.get_source_code_from_location(
filename=filename,
lineno=lineno,
code_context=code_context,
).strip("\n")
is_scalar = n_vars == 1
title = f"{'Scalar' if is_scalar else 'Vector'} variable"
if not is_scalar:
title += f" (index {index - index_of_first_element} of {n_vars})"
string = "\n".join([
"",
f"{title} defined in `{str(filename) if use_full_filename else filename.name}`, line {lineno}:",
"",
"```",
source,
"```"
])
if return_string:
return string
else:
print(string)
def find_constraint_declaration(self,
index: int,
use_full_filename: bool = False,
return_string: bool = False
) -> Union[None, str]:
### Check inputs
if index < 0:
raise ValueError("Indices must be nonnegative.")
if index >= self._constraint_index_counter:
raise ValueError(
f"The constraint index exceeds the number of declared constraints ({self._constraint_index_counter})!"
)
index_of_first_element = self._constraint_declarations.iloc[
self._constraint_declarations.bisect_right(index) - 1
]
filename, lineno, code_context, n_cons = self._constraint_declarations[index_of_first_element]
source = inspect_tools.get_source_code_from_location(
filename=filename,
lineno=lineno,
code_context=code_context,
).strip("\n")
is_scalar = n_cons == 1
title = f"{'Scalar' if is_scalar else 'Vector'} constraint"
if not is_scalar:
title += f" (index {index - index_of_first_element} of {n_cons})"
string = "\n".join([
"",
f"{title} defined in `{str(filename) if use_full_filename else filename.name}`, line {lineno}:",
"",
"```",
source,
"```"
])
if return_string:
return string
else:
print(string)
### Advanced Methods
def set_initial_from_sol(self,
sol: cas.OptiSol,
initialize_primals=True,
initialize_duals=True,
) -> None:
"""
Sets the initial value of all variables in the Opti object to the solution of another Opti instance. Useful
for warm-starting an Opti instance based on the result of another instance.
Args: sol: Takes in the solution object. Assumes that sol corresponds to exactly the same optimization
problem as this Opti instance, perhaps with different parameter values.
Returns: None (in-place)
"""
if initialize_primals:
self.set_initial(self.x, sol.value(self.x))
if initialize_duals:
self.set_initial(self.lam_g, sol.value(self.lam_g))
def save_solution(self):
if self.cache_filename is None:
raise ValueError("""In order to use the save feature, you need to supply a filepath for the cache upon
initialization of this instance of the Opti stack. For example: Opti(cache_filename = "cache.json")""")
# Write a function that tries to turn an iterable into a JSON-serializable list
def try_to_put_in_list(iterable):
try:
return list(iterable)
except TypeError:
return iterable
# Build up a dictionary of all the variables
solution_dict = {}
for category, category_variables in self.variables_categorized.items():
category_values = [
try_to_put_in_list(self.value(variable))
for variable in category_variables
]
solution_dict[category] = category_values
# Write the dictionary to file
with open(self.cache_filename, "w+") as f:
json.dump(
solution_dict,
fp=f,
indent=4
)
return solution_dict
def get_solution_dict_from_cache(self):
if self.cache_filename is None:
raise ValueError("""In order to use the load feature, you need to supply a filepath for the cache upon
initialization of this instance of the Opti stack. For example: Opti(cache_filename = "cache.json")""")
with open(self.cache_filename, "r") as f:
solution_dict = json.load(fp=f)
# Turn all vectorized variables back into NumPy arrays
for category in solution_dict:
for i, var in enumerate(solution_dict[category]):
solution_dict[category][i] = np.array(var)
return solution_dict
### Methods for Dynamics and Control Problems
def derivative_of(self,
variable: cas.MX,
with_respect_to: Union[np.ndarray, cas.MX],
derivative_init_guess: Union[float, np.ndarray], # TODO add default
derivative_scale: Union[float, np.ndarray] = None,
method: str = "midpoint",
explicit: bool = False, # TODO implement explicit
_stacklevel: int = 1,
) -> cas.MX:
"""
Returns a quantity that is either defined or constrained to be a derivative of an existing variable.
For example:
>>> opti = Opti()
>>> position = opti.variable(init_guess=0, n_vars=100)
>>> time = np.linspace(0, 1, 100)
>>> velocity = opti.derivative_of(position, with_respect_to=time)
>>> acceleration = opti.derivative_of(velocity, with_respect_to=time)
Args:
variable: The variable or quantity that you are taking the derivative of. The "numerator" of the
derivative, in colloquial parlance.
with_respect_to: The variable or quantity that you are taking the derivative with respect to. The
"denominator" of the derivative, in colloquial parlance.
In a typical example case, this `with_respect_to` parameter would be time. Please make sure that the
value of this parameter is monotonically increasing, otherwise you may get nonsensical answers.
derivative_init_guess: Initial guess for the value of the derivative. Should be either a float (in which
case the initial guess will be a vector equal to this value) or a vector of initial guesses with the same
length as `variable`. For more info, look at the docstring of opti.variable()'s `init_guess` parameter.
derivative_scale: Scale factor for the value of the derivative. For more info, look at the docstring of
opti.variable()'s `scale` parameter.
method: The type of integrator to use to define this derivative. Options are:
* "forward euler" - a first-order-accurate forward Euler method
Citation: https://en.wikipedia.org/wiki/Euler_method
* "backwards euler" - a first-order-accurate backwards Euler method
Citation: https://en.wikipedia.org/wiki/Backward_Euler_method
* "midpoint" or "trapezoid" - a second-order-accurate midpoint method
Citation: https://en.wikipedia.org/wiki/Midpoint_method
* "simpson" - Simpson's rule for integration
Citation: https://en.wikipedia.org/wiki/Simpson%27s_rule
* "runge-kutta" or "rk4" - a fourth-order-accurate Runge-Kutta method. I suppose that technically,
"forward euler", "backward euler", and "midpoint" are all (lower-order) Runge-Kutta methods...
Citation: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#The_Runge%E2%80%93Kutta_method
* "runge-kutta-3/8" - A modified version of the Runge-Kutta 4 proposed by Kutta in 1901. Also
fourth-order-accurate, but all of the error coefficients are smaller than they are in the standard
Runge-Kutta 4 method. The downside is that more floating point operations are required per timestep,
as the Butcher tableau is more dense (i.e. not banded).
Citation: Kutta, Martin (1901), "Beitrag zur näherungsweisen Integration totaler
Differentialgleichungen", Zeitschrift für Mathematik und Physik, 46: 435–453
explicit: If true, returns an explicit derivative rather than an implicit one. In other words,
this *defines* the output to be a derivative of the input rather than *constraining* the output to the a
derivative of the input.
Explicit derivatives result in smaller, denser systems of equations that are more akin to
shooting-type methods. Implicit derivatives result in larger, sparser systems of equations that are
more akin to collocation methods. Explicit derivatives are better for simple, stable systems with few
states, while implicit derivatives are better for complex, potentially-unstable systems with many
states.
# TODO implement explicit
_stacklevel: Optional and advanced, purely used for debugging. Allows users to correctly track where
constraints are declared in the event that they are subclassing `aerosandbox.Opti`. Modifies the
stacklevel of the declaration tracked, which is then presented using
`aerosandbox.Opti.variable_declaration()` and `aerosandbox.Opti.constraint_declaration()`.
Returns: A vector consisting of the derivative of the parameter `variable` with respect to `with_respect_to`.
"""
### Set defaults
# if with_respect_to is None:
# with_respect_to = np.ones(shape=np.length(variable)) # TODO consider whether we want to even allow this...
# if derivative_init_guess is None:
# raise NotImplementedError() # TODO implement default value for this
### Check inputs
N = np.length(variable)
if not np.length(with_respect_to) == N:
raise ValueError("The inputs `variable` and `with_respect_to` must be vectors of the same length!")
### Clean inputs
method = method.lower()
### Implement the derivative
if not explicit:
derivative = self.variable(
init_guess=derivative_init_guess,
n_vars=N,
scale=derivative_scale,
)
self.constrain_derivative(
derivative=derivative,
variable=variable,
with_respect_to=with_respect_to,
method=method,
_stacklevel=_stacklevel + 1
)
else:
raise NotImplementedError("Haven't yet implemented explicit derivatives! Use implicit ones for now...")
return derivative
def constrain_derivative(self,
derivative: cas.MX,
variable: cas.MX,
with_respect_to: Union[np.ndarray, cas.MX],
method: str = "midpoint",
_stacklevel: int = 1,
) -> None:
"""
Adds a constraint to the optimization problem such that:
d(variable) / d(with_respect_to) == derivative
Can be used directly; also called indirectly by opti.derivative_of() for implicit derivative creation.
Args:
derivative: The derivative that is to be constrained here.
variable: The variable or quantity that you are taking the derivative of. The "numerator" of the
derivative, in colloquial parlance.
with_respect_to: The variable or quantity that you are taking the derivative with respect to. The
"denominator" of the derivative, in colloquial parlance.
In a typical example case, this `with_respect_to` parameter would be time. Please make sure that the
value of this parameter is monotonically increasing, otherwise you may get nonsensical answers.
method: The type of integrator to use to define this derivative. Options are:
* "forward euler" - a first-order-accurate forward Euler method
Citation: https://en.wikipedia.org/wiki/Euler_method
* "backwards euler" - a first-order-accurate backwards Euler method
Citation: https://en.wikipedia.org/wiki/Backward_Euler_method
* "midpoint" or "trapezoid" - a second-order-accurate midpoint method
Citation: https://en.wikipedia.org/wiki/Midpoint_method
* "simpson" - Simpson's rule for integration
Citation: https://en.wikipedia.org/wiki/Simpson%27s_rule
* "runge-kutta" or "rk4" - a fourth-order-accurate Runge-Kutta method. I suppose that technically,
"forward euler", "backward euler", and "midpoint" are all (lower-order) Runge-Kutta methods...
Citation: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods#The_Runge%E2%80%93Kutta_method
* "runge-kutta-3/8" - A modified version of the Runge-Kutta 4 proposed by Kutta in 1901. Also
fourth-order-accurate, but all of the error coefficients are smaller than they are in the standard
Runge-Kutta 4 method. The downside is that more floating point operations are required per timestep,
as the Butcher tableau is more dense (i.e. not banded).
Citation: Kutta, Martin (1901), "Beitrag zur näherungsweisen Integration totaler
Differentialgleichungen", Zeitschrift für Mathematik und Physik, 46: 435–453
Note that all methods are expressed as integrators rather than differentiators; this prevents
singularities from forming in the limit of timestep approaching zero. (For those coming from the PDE
world, this is analogous to using finite volume methods rather than finite difference methods to allow
shock capturing.)
_stacklevel: Optional and advanced, purely used for debugging. Allows users to correctly track where
constraints are declared in the event that they are subclassing `aerosandbox.Opti`. Modifies the
stacklevel of the declaration tracked, which is then presented using
`aerosandbox.Opti.variable_declaration()` and `aerosandbox.Opti.constraint_declaration()`.
Returns: None (adds constraint in-place).
"""
try:
d_var = np.diff(variable)
except ValueError:
d_var = np.diff(np.zeros_like(with_respect_to))
try:
derivative[0]
except (TypeError, IndexError):
derivative = np.full_like(with_respect_to, fill_value=derivative)
d_time = np.diff(with_respect_to) # Calculate the timestep
# TODO scale constraints by variable scale?
# TODO make
if method == "forward euler" or method == "forward" or method == "forwards":
# raise NotImplementedError
self.subject_to(
d_var == derivative[:-1] * d_time,
_stacklevel=_stacklevel + 1
)
elif method == "backward euler" or method == "backward" or method == "backwards":
# raise NotImplementedError
self.subject_to(
d_var == derivative[1:] * d_time,
_stacklevel=_stacklevel + 1
)
elif method == "midpoint" or method == "trapezoid" or method == "trapezoidal":
self.subject_to(
d_var == np.trapz(derivative) * d_time,
_stacklevel=_stacklevel + 1
)
elif method == "simpson":
raise NotImplementedError
elif method == "runge-kutta" or method == "rk4":
raise NotImplementedError
elif method == "runge-kutta-3/8":
raise NotImplementedError
else:
raise ValueError("Bad value of `method`!")
class OptiSol:
def __init__(self,
opti: Opti,
cas_optisol: cas.OptiSol
):
"""
An OptiSol object represents a solution to an optimization problem. This class is a wrapper around CasADi's
`OptiSol` class that provides convenient solution query utilities for various Python data types.
Args:
opti: The `Opti` object that generated this solution.
cas_optisol: The `casadi.OptiSol` object from CasADi's optimization solver.
Returns:
An `OptiSol` object.
Usage:
>>> # Initialize an Opti object.
>>> opti = asb.Opti()
>>>
>>> # Define a scalar variable.
>>> x = opti.variable(init_guess=2.0)
>>>
>>> # Define an objective function.
>>> opti.minimize(x ** 2)
>>>
>>> # Solve the optimization problem. `sol` is now a
>>> sol = opti.solve()
>>>
>>> # Retrieve the value of the variable x in the solution:
>>> x_value = sol.value(x)
>>>
>>> # Or, to be more concise:
>>> x_value = sol(x)
"""
self.opti = opti
self._sol = cas_optisol
def __call__(self, x: Union[cas.MX, np.ndarray, float, int, List, Tuple, Set, Dict, Any]) -> Any:
"""
A shorthand alias for `sol.value(x)`. See `OptiSol.value()` documentation for details.
Args:
x: A Python data structure to substitute values into, using the solution in this OptiSol object.
Returns:
A copy of `x`, where all symbolic optimization variables (recursively substituted at unlimited depth)
have been converted to float or array values.
"""
return self.value(x)
def _value_scalar(self, x: Union[cas.MX, np.ndarray, float, int]) -> Union[float, np.ndarray]:
"""
Gets the value of a variable at the solution point. For developer use - see following paragraph.
This method is basically a less-powerful version of calling either `sol(x)` or `sol.value(x)` - if you're a
user and not a developer, you almost-certainly want to use one of those methods instead, as those are less
fragile with respect to various input data types. This method exists only as an abstraction to make it easier
for other developers to subclass OptiSol, if they wish to intercept the variable substitution process.
Args:
x:
Returns:
"""
return self._sol.value(x)
def value(self,
x: Union[cas.MX, np.ndarray, float, int, List, Tuple, Set, Dict, Any],
recursive: bool = True,
warn_on_unknown_types: bool = False
) -> Any:
"""
Gets the value of a variable (or a data structure) at the solution point. This solution point is the optimum,
if the optimization process solved successfully.
On a computer science level, this method converts a symbolic optimization variable to a concrete float or
array value. More generally, it converts any Python data structure (along with any of its contents,
recursively, at unlimited depth), replacing any symbolic optimization variables it finds with concrete float
or array values.
Note that, for convenience, you can simply call:
>>> sol(x)
if you prefer. This is equivalent to calling this method with the syntax:
>>> sol.value(x)
(these are aliases of each other)
Args:
x: A Python data structure to substitute values into, using the solution in this OptiSol object.
recursive: If True, the substitution will be performed recursively. Otherwise, only the top-level data
structure will be converted.
warn_on_unknown_types: If True, a warning will be issued if a data type that cannot be converted or
parsed as definitively un-convertable is encountered.
Returns:
A copy of `x`, where all symbolic optimization variables (recursively substituted at unlimited depth)
have been converted to float or array values.
Usage:
"""
if not recursive:
return self._value_scalar(x)
# If it's a CasADi type, do the conversion, and call it a day.
if np.is_casadi_type(x, recursive=False):
return self._value_scalar(x)
t = type(x)
# If it's a Python iterable, recursively convert it, and preserve the type as best as possible.
if issubclass(t, list):
return [self.value(i) for i in x]
if issubclass(t, tuple):
return tuple([self.value(i) for i in x])
if issubclass(t, (set, frozenset)):
return {self.value(i) for i in x}
if issubclass(t, dict):
return {
self.value(k): self.value(v)
for k, v in x.items()
}
# Skip certain Python types
if issubclass(t, (
bool, str,
int, float, complex,
range,
type(None),
bytes, bytearray, memoryview
)):
return x
# Skip certain CasADi types
if issubclass(t, (
cas.Opti, cas.OptiSol,
)):
return x
# If it's any other type, try converting its attribute dictionary, if it has one:
try:
new_x = copy.copy(x)
for k, v in x.__dict__.items():
setattr(new_x, k, self.value(v))
return new_x
except (AttributeError, TypeError):
pass
# Try converting it blindly. This will catch most NumPy-array-like types.
try:
return self._value_scalar(x)
except (NotImplementedError, TypeError, ValueError):
pass
# At this point, we're not really sure what type the object is. Raise a warning if directed and return the
# item, then hope for the best.
if warn_on_unknown_types:
import warnings
warnings.warn(f"In solution substitution, could not convert an object of type {t}.\n"
f"Returning it and hoping for the best.", UserWarning)
return x
def stats(self) -> Dict[str, Any]:
return self._sol.stats()
def value_variables(self):
return self._sol.value_variables()
def value_parameters(self):
return self._sol.value_parameters()
def show_infeasibilities(self, tol: float = 1e-3) -> None:
"""
Prints a summary of any violated constraints in the solution.
Args:
tol: The tolerance for violation. If the constraint is violated by less than this amount, it will not be
printed.
Returns: None (prints to console)
"""
lbg = self(self.opti.lbg)
ubg = self(self.opti.ubg)
g = self(self.opti.g)
constraint_violated = np.logical_or(
g + tol < lbg,
g - tol > ubg
)
lbg_isfinite = np.isfinite(lbg)
ubg_isfinite = np.isfinite(ubg)
for i in np.arange(len(g)):
if constraint_violated[i]:
print("-" * 50)
if lbg_isfinite[i] and ubg_isfinite[i]:
if lbg[i] == ubg[i]:
print(f"{lbg[i]} == {g[i]} (violation: {np.abs(g[i] - lbg[i])})")
else:
print(f"{lbg[i]} < {g[i]} < {ubg[i]} (violation: {np.maximum(lbg[i] - g[i], g[i] - ubg[i])})")
elif lbg_isfinite[i] and not ubg_isfinite[i]:
print(f"{lbg[i]} < {g[i]} (violation: {lbg[i] - g[i]})")
elif not lbg_isfinite[i] and ubg_isfinite[i]:
print(f"{g[i]} < {ubg[i]} (violation: {g[i] - ubg[i]})")
else:
raise ValueError(
"Contact the AeroSandbox developers if you see this message; it should be impossible.")
self.opti.find_constraint_declaration(index=i)
if __name__ == '__main__':
import pytest
# pytest.main()
opti = Opti() # set up an optimization environment
a = opti.parameter(1)
b = opti.parameter(100)
# Define optimization variables
x = opti.variable(init_guess=0)
y = opti.variable(init_guess=0)
# Define objective
f = (a - x) ** 2 + b * (y - x ** 2) ** 2
opti.minimize(f)
# Optimize
sol = opti.solve()
for i in [x, y]:
assert sol.value(i) == pytest.approx(1, abs=1e-4)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/optimization/opti.py
|
opti.py
|
import inspect
from typing import List, Union, Tuple, Optional, Set, Any
from pathlib import Path
from aerosandbox.tools.string_formatting import has_balanced_parentheses
import numpy as np
def get_caller_source_location(
stacklevel: int = 1,
) -> (Path, int, str):
"""
Gets the file location where this function itself (`get_caller_source_location()`) is called.
This is not usually useful by itself. However, with the use of the `stacklevel` argument, you can get the call
location at any point arbitrarily high up in the call stack from this function.
This potentially lets you determine the file location where any Python object was declared.
Examples:
Consider the file below (and assume we somehow have this function in scope):
my_file.py:
>>> def my_func():
>>> print(
>>> get_caller_source_location(stacklevel=2)
>>> )
>>>
>>> if_you_can_see_this_it_works = my_func()
This will print out the following:
(/path/to/my_file.py, 5, "if_you_can_see_this_it_works = my_func()\n")
Args:
stacklevel: Choose the level of the stack that you want to retrieve source code at. Higher integers will get
you higher (i.e., more end-user-facing) in the stack. Same behaviour as the `stacklevel` argument in
warnings.warn().
Returns: A tuple of:
(filename, lineno, code_context)
* `filename`: a Path object (see `pathlib.Path` from the standard Python library) of the file where this function was called.
* `lineno`: the line number in the file where this function was called.
* `code_context`: the immediate line of code where this function was called. A string. Note that, in the case of
multiline statements, this may not be a complete Python expression. Includes the trailing newline character ("\n") at the end.
"""
### Go up `stacklevel` frames from the current one to get to the caller frame.
frame = inspect.currentframe()
for _ in range(stacklevel):
frame = frame.f_back
### Extract the frame info (an `inspect.Traceback` type) from the caller frame
frame_info: inspect.Traceback = inspect.getframeinfo(frame)
filename = Path(frame_info.filename)
lineno = frame_info.lineno
if frame_info.code_context is not None:
code_context = "".join(frame_info.code_context)
else:
code_context = ""
return filename, lineno, code_context
def get_source_code_from_location(
filename: Union[Path, str],
lineno: int,
code_context: str = None,
strip_lines: bool = False
) -> str:
"""
Gets the source code of the single statement that begins at the file location specified.
File location must, at a minimum, contain the filename and the line number. Optionally, you can also provide `code_context`.
These should have the format:
* `filename`: a Path object (see `pathlib.Path` from the standard Python library) of the file where this function was called.
* `lineno`: the line number in the file where this function was called.
Optionally, you can also provide `code_context`, which has the format:
* `code_context`: the immediate line of code where this function was called. A string. Note that, in the case of
multiline statements, this may not be a complete Python expression.
You can get source code from further up the call stack by using the `stacklevel` argument.
Args:
filename: a Path object (see `pathlib.Path` from the standard Python library) of the file where this function
was called. Alternatively, a string containing a filename.
lineno: the line number in the file where this function was called. An integer. Should refer to the first
line of a string in question.
code_context: Optional. Should be a string containing the immediate line of code at this location. If
provided, allows short-circuiting (bypassing file I/O) if the line is a complete expression.
strip_lines: A boolean flag about whether or not to strip leading and trailing whitespace off each line of a
multi-line function call. See the built-in string method `str.strip()` for behaviour.
Returns: The source code of the call, as a string. Might be a multi-line string (i.e., contains '\n' characters)
if the call is multi-line. Almost certainly (but not guaranteed due to edge cases) to be a complete Python expression.
"""
### If Python's auto-extracted "code context" is a compete statement, then you're done here.
if code_context is not None:
if has_balanced_parentheses(code_context):
return code_context
### Initialize the caller source lines, which is a list of strings that contain the source for the call.
source_lines: List[str] = []
### Read the source lines of code surrounding the call
try:
with open(filename, "r") as f: # Read the file containing the call
for _ in range(lineno - 1): # Skip the first N lines of code, until you get to the call
f.readline() # Unfortunately there's no way around this, since you need to find the "\n" encodings in the file
parenthesis_level = 0 # Track the number of "(" and ")" characters, so you know when the function call is complete
def add_line() -> None:
"""
Adds the subsequent line to the caller source lines (`caller_source_lines`). In-place.
"""
line = f.readline()
if strip_lines:
line = line.strip()
nonlocal parenthesis_level # TODO add "\" support
for char in line:
if char == "(":
parenthesis_level += 1
elif char == ")":
parenthesis_level -= 1
source_lines.append(line)
### Get the first line, which is always part of the function call, and includes the opening parenthesis
add_line()
### Do subsequent lines
while parenthesis_level > 0:
add_line()
except OSError as e:
raise FileNotFoundError(
"\n".join([
"Couldn't retrieve source code at this stack level, because the source code file couldn't be opened for some reason.",
"One common possible reason is that you're referring to an IPython console with a multi-line statement."
])
)
source = "".join(source_lines)
return source
def get_caller_source_code(
stacklevel: int = 1,
strip_lines: bool = False
) -> str:
"""
Gets the source code of wherever this function is called.
You can get source code from further up the call stack by using the `stacklevel` argument.
Args:
stacklevel: Choose the level of the stack that you want to retrieve source code at. Higher integers will get
you higher (i.e., more end-user-facing) in the stack. Same behaviour as the `stacklevel` argument in
warnings.warn().
strip_lines: A boolean flag about whether or not to strip leading and trailing whitespace off each line of a
multi-line function call. See the built-in string method `str.strip()` for behaviour.
Returns: The source code of the call, as a string. Might be a multi-line string (i.e., contains '\n' characters)
if the call is multi-line. Almost certainly (but not guaranteed due to edge cases) to be a complete Python expression.
"""
filename, lineno, code_context = get_caller_source_location(
stacklevel=stacklevel + 1
)
return get_source_code_from_location(
filename=filename,
lineno=lineno,
code_context=code_context,
strip_lines=strip_lines
)
def get_function_argument_names_from_source_code(source_code: str) -> List[str]:
"""
Gets the names of the function arguments found in a particular line of source code.
Specifically, it retrieves the names of the arguments in the first function call found in the source code string.
If the source code line is an assignment statement, only the right-hand-side of the line is analyzed.
Also, removes all line breaks ('\n').
Examples function inputs and outputs:
"f(a, b)" -> ['a', 'b']
"f(a,b)" -> ['a', 'b']
"f(\na,\nb)" -> ['a', 'b']
"g = f(a, b)" -> ['a', 'b']
"g.h = f(a, b)" -> ['a', 'b']
"g.h() = f(a, b)" -> ['a', 'b']
"g.h(i=j) = f(a, b)" -> ['a', 'b']
"f(a, b) + g(h)" -> ['a', 'b']
"f(a: int, b: MyType())" -> ['a', 'b']
"f(a, b).g(c, d)" -> ['a', 'b']
"f(a(b), c)" -> ['a(b)', 'c']
"f(a(b,c), d)" -> ['a(b,c)', 'd']
"f({a:b}, c)" -> ['{a:b}', 'c']
"f(a[b], c)" -> ['a[b]', 'c']
"f({a:b, c:d}, e)" -> ['{a:b, c:d}', 'e']
"f({a:b,\nc:d}, e)" -> ['{a:b,c:d}', 'e']
"f(dict(a=b,c=d), e)" -> ['dict(a=b,c=d)', 'e']
"f(a=1, b=2)" -> ['a=1', 'b=2']
"f()" -> ['']
"f(a, [i for i in l])" -> ['a', '[i for i in l]'],
"f(incomplete, " -> raises ValueError
"3 + 5" -> raises ValueError
"" -> raises ValueError
Args:
source_code: A line of Python source code that includes a function call. Can be a multi-line piece of source code (e.g., includes '\n').
Returns: A list of strings containing all of the function arguments. If keyword arguments are found, includes both the key and the value, as-written.
"""
assignment_equals_index = 0
parenthesis_level = 0
for i, char in enumerate(source_code):
if char == "(":
parenthesis_level += 1
elif char == ")":
parenthesis_level -= 1
elif char == "=" and parenthesis_level == 0:
assignment_equals_index = i + 1
break
source_code_rhs = source_code[assignment_equals_index:]
source_code_rhs = source_code_rhs.replace("\n", "")
parenthesis_level = 0
braces_level = 0
for i, char in enumerate(source_code_rhs):
if char == "(":
parenthesis_level += 1
break
if parenthesis_level == 0:
raise ValueError("No function call was found in the source code provided!")
arg_names: List[str] = []
current_arg = ""
in_type_hinting_block = False
while parenthesis_level != 0:
i += 1
if i >= len(source_code_rhs):
raise ValueError("Couldn't match all parentheses, so this doesn't look like valid code!")
char = source_code_rhs[i]
if char == "(":
parenthesis_level += 1
elif char == ")":
parenthesis_level -= 1
elif char == "{":
braces_level += 1
elif char == "}":
braces_level -= 1
if char == "," and parenthesis_level == 1 and braces_level == 0:
arg_names.append(current_arg)
current_arg = ""
in_type_hinting_block = False
elif char == ":" and parenthesis_level == 1 and braces_level == 0:
in_type_hinting_block = True
elif parenthesis_level >= 1 and not in_type_hinting_block:
current_arg += char
arg_names.append(current_arg.strip())
def clean(s: str) -> str:
return s.strip()
arg_names = [
clean(arg) for arg in arg_names
]
return arg_names
def codegen(
x: Any,
indent_str: str = " ",
_required_imports: Optional[Set[str]] = None,
_recursion_depth: int = 0,
) -> Tuple[str, Set[str]]:
"""
Attempts to generate a string of Python code that, when evaluated, would produce the same value as the input.
Also generates the required imports for the code to run.
In other words, in general, the following should evaluate True:
>>> code, imports = codegen(x)
>>> for import_str in imports:
>>> exec(import_str)
>>> eval(code) == x # Should evaluate True
Not guaranteed to work for all inputs, but should work for most common cases.
Args:
x: The object to generate the code of.
indent_str: The string to use for indentation. Defaults to four spaces.
_required_imports: A set of strings containing the names of all required imports. This is an internal
argument that should not be used by the user.
_recursion_depth: The current recursion depth. This is an internal argument that should not be used by the user.
Returns: A tuple containing:
- The string of Python code that, when evaluated, would produce the same value as the input.
- A set of strings that, when evaluated, would import all of the required imports for the code to run.
Examples:
>>> codegen(5)
('5', set())
>>> codegen([1, 2, 3])
('[1, 2, 3]', set())
>>> codegen(np.array([1, 2, 3]))
('np.array([1, 2, 3])', {'import numpy as np'})
>>> codegen(dict(my_int=4, my_array=np.array([1, 2, 3])))
('{
'my_int': 4,
'my_array': np.array([1, 2, 3]),
}', {'import numpy as np'})
"""
### Set defaults
if _required_imports is None:
_required_imports = set()
import_aliases = {
"aerosandbox" : "asb",
"aerosandbox.numpy": "np",
"numpy" : "np",
}
indent = indent_str * _recursion_depth
next_indent = indent_str * (_recursion_depth + 1)
if isinstance(x, (
bool, str,
int, float, complex,
range,
type(None),
bytes, bytearray, memoryview
)):
code = repr(x)
elif isinstance(x, list):
if len(x) == 0:
code = "[]"
else:
lines = []
lines.append("[")
for xi in x:
item_code, item_required_imports = codegen(xi, _recursion_depth=_recursion_depth + 1)
_required_imports.update(item_required_imports)
lines.append(next_indent + item_code + ",")
lines.append(indent + "]")
code = "\n".join(lines)
elif isinstance(x, tuple):
if len(x) == 0:
code = "()"
else:
lines = []
lines.append("(")
for xi in x:
item_code, item_required_imports = codegen(xi, _recursion_depth=_recursion_depth + 1)
_required_imports.update(item_required_imports)
lines.append(next_indent + item_code + ",")
lines.append(indent + ")")
code = "\n".join(lines)
elif isinstance(x, (set, frozenset)):
if len(x) == 0:
code = "set()"
else:
lines = []
lines.append("{")
for xi in x:
item_code, item_required_imports = codegen(xi, _recursion_depth=_recursion_depth + 1)
_required_imports.update(item_required_imports)
lines.append(next_indent + item_code + ",")
lines.append(indent + "}")
code = "\n".join(lines)
elif isinstance(x, dict):
if len(x) == 0:
code = "{}"
else:
lines = []
lines.append("{")
for k, v in x.items():
k_code, k_required_imports = codegen(k, _recursion_depth=_recursion_depth + 1)
v_code, v_required_imports = codegen(v, _recursion_depth=_recursion_depth + 1)
_required_imports.update(k_required_imports)
_required_imports.update(v_required_imports)
lines.append(next_indent + k_code + ": " + v_code + ",")
lines.append(indent + "}")
code = "\n".join(lines)
elif isinstance(x, np.ndarray):
_required_imports.add("import numpy as np")
code = f"np.{repr(x)}"
else: # At this point, we assume it's a class instance, and could be from any package.
module_name = x.__class__.__module__
package_name = module_name.split(".")[0]
if package_name == "builtins":
pre_string = ""
# elif package_name in import_aliases:
# pre_string = import_aliases[package_name] + "."
else:
_required_imports.add(
f"from {module_name} import {x.__class__.__name__}"
)
lines = []
lines.append(x.__class__.__name__ + "(")
for arg_name in inspect.getfullargspec(x.__init__).args[1:]:
if hasattr(x, arg_name):
arg_value = getattr(x, arg_name)
if inspect.ismethod(arg_value) or inspect.isfunction(arg_value):
continue
arg_code, arg_required_imports = codegen(arg_value, _recursion_depth=_recursion_depth + 1)
_required_imports.update(arg_required_imports)
lines.append(next_indent + arg_name + "=" + arg_code + ",")
lines.append(indent + ")")
code = "\n".join(lines)
return code, _required_imports
#
# if _recursion_depth == 0:
# if len(_required_imports) > 0:
# imports = "\n".join(sorted(_required_imports))
# return imports + "\n\n" + code
#
# else:
# return code
# else:
# return code, _required_imports
if __name__ == '__main__':
def dashes():
"""A quick macro for drawing some dashes, to make the terminal output clearer to distinguish."""
print("\n" + "-" * 50 + "\n")
dashes()
print("Caller location:\n", get_caller_source_location(stacklevel=1))
dashes()
print("Caller source code:\n", get_caller_source_code(stacklevel=1))
dashes()
def my_func():
print(
get_caller_source_code(
stacklevel=2
)
)
print("Caller source code of a function call:")
if_you_can_see_this_it_works = my_func()
dashes()
print("Arguments of f(a, b):")
print(
get_function_argument_names_from_source_code("f(a, b)")
)
location = get_caller_source_location()
dashes()
print("Codegen test:")
def pc(x):
code, imports = codegen(x)
print("\n".join(sorted(imports)))
print(code + "\n" + "-" * 50)
pc(1)
pc([1, 2, 3])
pc([1, 2, [3, 4, 5], 6])
pc({"a": 1, "b": 2})
pc(np.array([1, 2, 3]))
pc(dict(myarray=np.array([1, 2, 3]), yourarray=np.arange(10)))
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/inspect_tools.py
|
inspect_tools.py
|
# Miscellaneous Tools
**Note: all contents in this folder are miscellaneous and do not interact in any way with the rest of AeroSandbox. If you are trying to learn AeroSandbox, you should completely ignore this folder and move on.**
Some of them may use libraries which are not required as part of a base install of AeroSandbox, so many of these scripts won't work without extra installation on users' ends. Things in this folder may break, without warning - they are just gists stored in a common place for the benefit of a small core group of developers on various aircraft design projects at MIT.
Think of this as a "Gists" folder - random snippets and scripts from scientific computing work go here. Basically, these are a few snippets of "bundled software".
A summary of a few of these:
- `aerosandbox.tools.pretty_plots`: A set of plotting functions for making plots look pretty, and for producing various plots that are useful for engineering design optimization. Built on top of Matplotlib and Seaborn.
- `aerosandbox.tools.string_formatting`: A set of functions for formatting strings in a variety of ways, including LaTeX formatting. Useful for making pretty plot labels.
- `aerosandbox.tools.units`: A set of scalars that represents various units. (Note: AeroSandbox uses base SI units everywhere internally - these are just for user convenience.)
- `aerosandbox.tools.inspect_tools`: This is where some Python black magic happens - basically, it's Python interpreting its own source code. Has functions that will return their own source code (as a string), at any arbitrary level of the call stack. Can take a Python object and generate source code (as a string) that attempts to reconstruct it by parsing its constructor. Has functions that can tell you from what file and line of code they were called from.
- `aerosandbox.tools.webplotdigitizer_reader`: A function that reads in a [WebPlotDigitizer](https://github.com/ankitrohatgi/WebPlotDigitizer) CSV file and returns a dictionary of the data. Useful for reading in data from old, image-based data tables (e.g., wind tunnel charts) and reconstructing it digitally.
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/README.md
|
README.md
|
import hashlib
import aerosandbox.numpy as np
def eng_string(
x: float,
unit: str = None,
format='%.3g',
si=True
) -> str:
'''
Taken from: https://stackoverflow.com/questions/17973278/python-decimal-engineering-notation-for-mili-10e-3-and-micro-10e-6/40691220
Returns float/int value <x> formatted in a simplified engineering format -
using an exponent that is a multiple of 3.
Args:
x: The value to be formatted. Float or int.
unit: A unit of the quantity to be expressed, given as a string. Example: Newtons -> "N"
format: A printf-style string used to format the value before the exponent.
si: if true, use SI suffix for exponent. (k instead of e3, n instead of
e-9, etc.)
Examples:
With format='%.2f':
1.23e-08 -> 12.30e-9
123 -> 123.00
1230.0 -> 1.23e3
-1230000.0 -> -1.23e6
With si=True:
1230.0 -> "1.23k"
-1230000.0 -> "-1.23M"
With unit="N" and si=True:
1230.0 -> "1.23 kN"
-1230000.0 -> "-1.23 MN"
'''
sign = ''
if x < 0:
x = -x
sign = '-'
elif x == 0:
return format % 0
elif np.isnan(x):
return "NaN"
exp = int(np.floor(np.log10(x)))
exp3 = exp - (exp % 3)
x3 = x / (10 ** exp3)
if si and exp3 >= -24 and exp3 <= 24 and exp3 != 0:
exp3_text = 'yzafpnμm kMGTPEZY'[(exp3 + 24) // 3]
elif exp3 == 0:
exp3_text = ''
else:
exp3_text = f'e{exp3}'
if unit is not None:
if si:
exp3_text = " " + exp3_text + unit
else:
exp3_text = exp3_text + " " + unit
return ('%s' + format + '%s') % (sign, x3, exp3_text)
def latex_sci_notation_string(
x: float,
format='%.2e',
) -> str:
"""
Converts a floating-point number to a LaTeX-style formatted string. Does not include the `$$` wrapping to put you in math mode.
Does not use scientific notation if the base would be zero.
Examples:
latex_sci_notation_string(3000) -> '3 \\times 10^{3}'
"""
float_str = format % x
base, exponent = float_str.split("e")
exponent = int(exponent)
if exponent == 0:
return base
else:
return r"{0} \times 10^{{{1}}}".format(base, exponent)
def hash_string(string: str) -> int:
"""
Hashes a string into a quasi-random 32-bit integer! (Based on an MD5 checksum algorithm.)
Usual warnings apply: it's MD5, don't use this for anything intended to be cryptographically secure.
"""
md5 = hashlib.md5(string.encode('utf-8'))
hash_hex = md5.hexdigest()
hash_int = int(hash_hex, 16)
hash_int64 = hash_int % (2 ** 32)
return hash_int64
def trim_string(string: str, length: int = 80) -> str:
"""
Trims a string to be less than a given length. If the string would exceed the length, makes it end in ellipses ("…").
Args:
string: The string to be trimmed.
length: The length to trim the string to, including any ellipses that may be added.
Returns: The trimmed string, including ellipses if needed.
"""
if len(string) > length:
return string[:length - 1] + "…"
else:
return string
def has_balanced_parentheses(string: str, left="(", right=")") -> bool:
"""
Determines whether a string has matching parentheses or not.
Examples:
>>> has_balanced_parentheses("3 * (x + (2 ** 5))") -> True
>>> has_balanced_parentheses("3 * (x + (2 ** 5)") -> False
Args:
string: The string to be evaluated.
left: The left parentheses. Can be modified if, for example, you need to check square brackets.
right: The right parentheses. Can be modified if, for example, you need to check square brackets.
Returns: A boolean of whether or not the string has balanced parentheses.
"""
parenthesis_level = 0
for char in string:
if char == left:
parenthesis_level += 1
elif char == right:
parenthesis_level -= 1
return parenthesis_level == 0
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/string_formatting.py
|
string_formatting.py
|
import matplotlib
import mpl_toolkits
import matplotlib.pyplot as plt
import aerosandbox.numpy as np
from typing import Dict, Tuple, Union
preset_view_angles = {
# Given in the form:
# * key is the view name
# * value is a tuple of three floats: (elev, azim, roll)
'XY' : (90, -90, 0),
'XZ' : (0, -90, 0),
'YZ' : (0, 0, 0),
'-XY' : (-90, 90, 0),
'-XZ' : (0, 90, 0),
'-YZ' : (0, 180, 0),
'left_isometric' : (np.arctan2d(1, 2 ** 0.5), -135, 0),
'right_isometric': (np.arctan2d(1, 2 ** 0.5), 135, 0)
}
preset_view_angles['front'] = preset_view_angles["-YZ"]
preset_view_angles['top'] = preset_view_angles["XY"]
preset_view_angles['side'] = preset_view_angles["XZ"]
def figure3d(
nrows: int = 1,
ncols: int = 1,
orthographic: bool = True,
box_aspect: Tuple[float] = None,
adjust_colors: bool = True,
computed_zorder: bool = True,
ax_kwargs: Dict = None,
**fig_kwargs
) -> Tuple[matplotlib.figure.Figure, mpl_toolkits.mplot3d.axes3d.Axes3D]:
"""
Creates a new 3D figure. Args and kwargs are passed into matplotlib.pyplot.figure().
Returns: (fig, ax)
"""
### Set defaults
if ax_kwargs is None:
ax_kwargs = {}
### Collect the keyword arguments to be used for each 3D axis
default_axes_kwargs = dict(
projection='3d',
proj_type='ortho' if orthographic else 'persp',
box_aspect=box_aspect,
computed_zorder=computed_zorder,
)
axes_kwargs = { # Overwrite any of the computed kwargs with user-provided ones, where applicable.
**default_axes_kwargs,
**ax_kwargs,
}
### Generate the 3D axis (or axes)
fig, ax = plt.subplots(
nrows=nrows,
ncols=ncols,
subplot_kw=axes_kwargs,
**fig_kwargs
)
if adjust_colors:
try:
axs = ax.flatten()
except AttributeError:
axs = [ax]
for a in axs:
pane_color = a.get_facecolor()
a.set_facecolor((0, 0, 0, 0)) # Set transparent
a.xaxis.pane.set_facecolor(pane_color)
a.xaxis.pane.set_alpha(1)
a.yaxis.pane.set_facecolor(pane_color)
a.yaxis.pane.set_alpha(1)
a.zaxis.pane.set_facecolor(pane_color)
a.zaxis.pane.set_alpha(1)
return fig, ax
def ax_is_3d(
ax: matplotlib.axes.Axes = None
) -> bool:
"""
Determines if a Matplotlib axis object is 3D or not.
Args:
ax: The axis object. If not given, uses the current active axes.
Returns: A boolean of whether the axis is 3D or not.
"""
if ax is None:
ax = plt.gca()
return hasattr(ax, 'zaxis')
def set_preset_3d_view_angle(
preset_view: str
) -> None:
ax = plt.gca()
if not ax_is_3d(ax):
raise Exception("Can't set a 3D view angle on a non-3D plot!")
try:
elev, azim, roll = preset_view_angles[preset_view]
except KeyError:
raise ValueError(
f"Input '{preset_view}' is not a valid preset view. Valid presets are:\n" +
"\n".join([f" * '{k}'" for k in preset_view_angles.keys()])
)
if roll == 0:
# This is to maintain back-compatibility to older Matplotlib versions.
# Older versions of Matplotlib (roughly, <=3.4.0) didn't support the `roll` kwarg.
# Hence, if we don't need to edit the roll, we don't - this extends back-compatibility.
ax.view_init(
elev=elev,
azim=azim,
)
else:
ax.view_init(
elev=elev,
azim=azim,
roll=roll
)
if __name__ == '__main__':
import aerosandbox.numpy as np
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
t = np.linspace(0, 1, 100)
x = np.sin(4 * 2 * np.pi * t)
y = t ** 2
z = 5 * t
fig, ax = p.figure3d()
p.set_preset_3d_view_angle('left_isometric')
ax.plot(
x, y, z, "-"
)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
p.equal()
p.show_plot()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/pretty_plots/threedim.py
|
threedim.py
|
import matplotlib.pyplot as plt
from matplotlib import ticker as mt
from typing import Union, List
from aerosandbox.tools.pretty_plots.labellines import labelLines
import aerosandbox.numpy as np
from aerosandbox.tools.pretty_plots.threedim import ax_is_3d
from functools import partial
def show_plot(
title: str = None,
xlabel: str = None,
ylabel: str = None,
dpi: float = None,
savefig: Union[str, List[str]] = None,
savefig_transparent: bool = True,
tight_layout: bool = True,
legend: bool = None,
legend_inline: bool = False,
legend_frame: bool = True,
pretty_grids: bool = True,
set_ticks: bool = True,
show: bool = True,
):
"""
Makes a matplotlib Figure (and all its constituent Axes) look "nice", then displays it.
Arguments control whether various changes (from the default matplotlib settings) are made to the plot.
One argument in particular, `show` (a boolean), controls whether the plot is displayed.
Args:
title: If given, sets the title of the plot. If the Figure has multiple axes, this sets the Figure-level
suptitle instead of setting the individual Axis title.
xlabel: If given, sets the xlabel of the plot on the current Axis. (Equivalent to `plt.xlabel(my_label)`)
ylabel: If given, sets the ylabel of the plot on the current Axis. (Equivalent to `plt.ylabel(my_label)`)
dpi: If given, sets the dpi (display resolution, in Dots Per Inch) of the Figure.
savefig: If given, saves the figure to the given path(s).
* If a string is given, saves the figure to that path.
(E.g., `savefig="my_plot.png"`)
* If a list of strings is given, saves the figure to each of those paths.
(E.g., `savefig=["my_plot.png", "my_plot.pdf"]`)
savefig_transparent: If True, saves the figure with a transparent background. If False, saves the figure with
a white background. Only has an effect if `savefig` is not None.
tight_layout: If True, calls `plt.tight_layout()` to adjust the spacing of individual Axes. If False, skips
this step.
legend: This value can be True, False, or None.
* If True, displays a legend on the current Axis.
* If False, does not add a legend on the current Axis. (However, does not delete any existing legends.)
* If None (default), goes through some logic to determine whether a legend should be displayed. If there
is only one line on the current Axis, no legend is displayed. If there are multiple lines, a legend is (
in general) displayed.
legend_inline: Boolean that controls whether an "inline" legend is displayed.
* If True, displays an "inline" legend, where the labels are next to the lines instead of in a box.
* If False (default), displays a traditional legend.
Only has an effect if `legend=True` (or `legend=None`, and logic determines that a legend should be
displayed).
legend_frame: Boolean that controls whether a frame (rectangular background box) is displayed around the legend.
Default is True.
pretty_grids: Boolean that controls whether the gridlines are formatted have linewidths that are (subjectively)
more readable.
set_ticks: Boolean that controls whether the tick and grid locations + labels are formatted to be (
subjectively) more readable.
Works with both linear and log scales, and with both 2D and 3D plots.
show: Boolean that controls whether the plot is displayed after all plot changes are applied. Default is
True. You may want to set this to False if you want to make additional manual changes to the plot before
displaying it. Default is True.
Returns: None (completely in-place function). If `show=True` (default), displays the plot after applying changes.
"""
fig = plt.gcf()
axes = fig.get_axes()
if pretty_grids:
for ax in axes:
if not ax.get_label() == '<colorbar>':
if not ax_is_3d(ax):
if any(line.get_visible() for line in ax.get_xgridlines()):
ax.grid(True, 'major', axis='x', linewidth=1.6)
ax.grid(True, 'minor', axis='x', linewidth=0.7)
if any(line.get_visible() for line in ax.get_ygridlines()):
ax.grid(True, 'major', axis='y', linewidth=1.6)
ax.grid(True, 'minor', axis='y', linewidth=0.7)
else:
for i_ax in [ax.xaxis, ax.yaxis, ax.zaxis]:
i_ax._axinfo["grid"].update(dict(
linewidth=0.7,
))
i_ax.set_tick_params(which="minor", color=(0, 0, 0, 0))
if set_ticks:
for ax in axes:
individual_axes_and_limits = {
ax.xaxis: ax.get_xlim(),
ax.yaxis: ax.get_ylim(),
}
if hasattr(ax, "zaxis"):
individual_axes_and_limits[ax.zaxis] = ax.get_zlim()
for i_ax, lims in individual_axes_and_limits.items():
maj_loc = None
maj_fmt = None
min_loc = None
min_fmt = None
if i_ax.get_scale() == "log":
def linlogfmt(x, pos, ticks=[1.], default="", base=10):
if x < 0:
sign_string = "-"
x = -x
else:
sign_string = ""
exponent = np.floor(np.log(x) / np.log(base))
coeff = x / base ** exponent
### Fix any floating-point error during the floor function
if coeff < 1:
coeff *= base
exponent -= 1
elif coeff >= base:
coeff /= base
exponent += 1
for tick in ticks:
if np.isclose(coeff, tick):
return r"$\mathdefault{%s%g}$" % (
sign_string,
x
)
return default
def logfmt(x, pos, ticks=[1.], default="", base=10):
if x < 0:
sign_string = "-"
x = -x
else:
sign_string = ""
exponent = np.floor(np.log(x) / np.log(base))
coeff = x / base ** exponent
### Fix any floating-point error during the floor function
if coeff < 1:
coeff *= base
exponent -= 1
elif coeff >= base:
coeff /= base
exponent += 1
for tick in ticks:
if tick == 1:
if np.isclose(coeff, 1):
return r"$\mathdefault{%s%s^{%d}}$" % (
sign_string,
base,
exponent
)
else:
if np.isclose(coeff, tick):
# return f"${base:.0f} {{\\times 10^{int(exponent)}}}$"
return r"$\mathdefault{%s%g\times%s^{%d}}$" % (
sign_string,
coeff,
base,
exponent
)
return default
ratio = lims[1] / lims[0]
i_ax.set_tick_params(which="minor", labelsize=8)
if ratio < 10:
maj_loc = mt.MaxNLocator(
nbins=6,
steps=[1, 2, 5, 10],
min_n_ticks=4,
)
maj_fmt = mt.ScalarFormatter()
class LogAutoMinorLocator(mt.AutoMinorLocator):
"""
Dynamically find minor tick positions based on the positions of
major ticks. The scale must be linear with major ticks evenly spaced.
"""
def __call__(self):
majorlocs = self.axis.get_majorticklocs()
try:
majorstep = majorlocs[1] - majorlocs[0]
except IndexError:
# Need at least two major ticks to find minor tick locations
# TODO: Figure out a way to still be able to display minor
# ticks without two major ticks visible. For now, just display
# no ticks at all.
return []
if self.ndivs is None:
majorstep_no_exponent = 10 ** (np.log10(majorstep) % 1)
if np.isclose(majorstep_no_exponent, [1.0, 2.5, 5.0, 10.0]).any():
ndivs = 5
else:
ndivs = 4
else:
ndivs = self.ndivs
minorstep = majorstep / ndivs
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
t0 = majorlocs[0]
tmin = ((vmin - t0) // minorstep + 1) * minorstep
tmax = ((vmax - t0) // minorstep + 1) * minorstep
locs = np.arange(tmin, tmax, minorstep) + t0
return self.raise_if_exceeds(locs)
min_loc = LogAutoMinorLocator()
min_fmt = mt.NullFormatter()
elif ratio < 10 ** 1.5:
maj_loc = mt.LogLocator(subs=np.arange(1, 10))
# if i_ax.axis_name == "x":
# default = r"$^{^|}$"
# elif i_ax.axis_name == "y":
# default = r"–"
# else:
# default = ""
maj_fmt = mt.FuncFormatter(
partial(linlogfmt, ticks=[1, 2, 5])
)
min_loc = mt.LogLocator(numticks=999, subs=np.arange(10, 100) / 10)
min_fmt = mt.NullFormatter()
elif ratio < 10 ** 2.5:
maj_loc = mt.LogLocator()
maj_fmt = mt.FuncFormatter(partial(logfmt, ticks=[1]))
min_loc = mt.LogLocator(numticks=999, subs=np.arange(1, 10))
min_fmt = mt.FuncFormatter(partial(logfmt, ticks=[2, 5]))
elif ratio < 10 ** 8:
maj_loc = mt.LogLocator()
maj_fmt = mt.FuncFormatter(partial(logfmt, ticks=[1]))
min_loc = mt.LogLocator(numticks=999, subs=np.arange(1, 10))
min_fmt = mt.FuncFormatter(partial(logfmt, ticks=[1]))
elif ratio < 10 ** 16:
maj_loc = mt.LogLocator()
maj_fmt = mt.LogFormatterSciNotation()
min_loc = mt.LogLocator(numticks=999, subs=np.arange(1, 10))
min_fmt = mt.NullFormatter()
else:
pass
elif i_ax.get_scale() == "linear":
maj_loc = mt.MaxNLocator(
nbins='auto',
steps=[1, 2, 5, 10],
min_n_ticks=3,
)
min_loc = mt.AutoMinorLocator()
else: # For any other scale, just use the default tick locations
continue
if len(i_ax.get_major_ticks()) != 0: # Unless the user has manually set the ticks to be empty
if maj_loc is not None:
i_ax.set_major_locator(maj_loc)
if min_loc is not None:
i_ax.set_minor_locator(min_loc)
if maj_fmt is not None:
i_ax.set_major_formatter(maj_fmt)
if min_fmt is not None:
i_ax.set_minor_formatter(min_fmt)
### Determine if a legend should be shown
if legend is None:
lines = plt.gca().lines
if len(lines) <= 1:
legend = False
else:
legend = False
for line in lines:
if line.get_label()[0] != "_":
legend = True
break
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if title is not None:
if len(axes) <= 1:
plt.title(title)
else:
plt.suptitle(title)
if tight_layout:
plt.tight_layout()
if legend:
if legend_inline: # Display an inline (matplotlib-label-lines) legend instead
labelLines(
lines=plt.gca().get_lines(),
)
else: # Display a traditional legend
plt.legend(frameon=legend_frame)
if dpi is not None:
fig.set_dpi(dpi)
if savefig is not None:
if not isinstance(savefig, (list, tuple, set)):
savefig = [savefig]
for savefig_i in savefig:
plt.savefig(savefig_i, transparent=savefig_transparent)
if show:
plt.show()
def set_ticks(
x_major: Union[float, int] = None,
x_minor: Union[float, int] = None,
y_major: Union[float, int] = None,
y_minor: Union[float, int] = None,
z_major: Union[float, int] = None,
z_minor: Union[float, int] = None,
):
ax = plt.gca()
if x_major is not None:
ax.xaxis.set_major_locator(mt.MultipleLocator(base=x_major))
if x_minor is not None:
ax.xaxis.set_minor_locator(mt.MultipleLocator(base=x_minor))
if y_major is not None:
ax.yaxis.set_major_locator(mt.MultipleLocator(base=y_major))
if y_minor is not None:
ax.yaxis.set_minor_locator(mt.MultipleLocator(base=y_minor))
if z_major is not None:
ax.zaxis.set_major_locator(mt.MultipleLocator(base=z_major))
if z_minor is not None:
ax.zaxis.set_minor_locator(mt.MultipleLocator(base=z_minor))
def equal() -> None:
"""
Sets all axes to be equal. Works for both 2d plots and 3d plots.
Returns: None
"""
ax = plt.gca()
if not ax_is_3d(ax):
ax.set_aspect("equal", adjustable='box')
else:
ax.set_box_aspect((1, 1, 1))
xlim = ax.get_xlim3d()
ylim = ax.get_ylim3d()
zlim = ax.get_zlim3d()
x_range = abs(xlim[1] - xlim[0])
x_middle = np.mean(xlim)
y_range = abs(ylim[1] - ylim[0])
y_middle = np.mean(ylim)
z_range = abs(zlim[1] - zlim[0])
z_middle = np.mean(zlim)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5 * max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/pretty_plots/formatting.py
|
formatting.py
|
from aerosandbox.tools import inspect_tools
from typing import Union, Tuple, List
import aerosandbox.numpy as np
import warnings
def qp(
*args: Tuple[Union[np.ndarray, List]],
backend="plotly",
show=True,
plotly_renderer: Union[str, None] = "browser",
orthographic=True,
stacklevel=1
) -> None:
"""
Quickly plots ("QP") a 1D, 2D, or 3D dataset as a line plot with markers. Useful for exploratory data analysis.
Example:
>>> import aerosandbox.numpy as np
>>>
>>> x = np.linspace(0, 10)
>>> y = x ** 2
>>> z = np.sin(x)
>>> qp(x, y, z)
Args:
*args: The arguments that you want to plot. You can provide 1, 2, or 3 arrays, all of which should be 1D and of the same length.
backend: The backend to use. Current options:
* "plotly"
show: A boolean of whether or not to show the plot.
plotly_renderer: A string of what to use as the Plotly renderer. If you don't want to overwrite a default that you've already set, set this variable to None.
orthographic: A boolean of whether or not to use an orthographic (rather than persepctive) projection when viewing 3D plots.
stacklevel: (Advanced) Choose the level of the stack that you want to retrieve plot labels at. Higher
integers will get you higher (i.e., more end-user-facing) in the stack. Same behaviour as the `stacklevel`
argument in warnings.warn().
Returns: None (in-place)
"""
arg_values = args
n_dimensions = len(arg_values) # dimensionality
##### This section serves to try to retrieve appropriate axis labels for the plot.
### First, set some defaults.
arg_names = []
if n_dimensions >= 1:
arg_names += ["x"]
if n_dimensions >= 2:
arg_names += ["y"]
if n_dimensions >= 3:
arg_names += ["z"]
if n_dimensions >= 4:
arg_names += [
f"Dim. {i}"
for i in range(4, n_dimensions + 1)
]
title = "QuickPlot"
try:
### This is some interesting and tricky code here: retrieves the source code of where qp() was called, as a string.
caller_source_code = inspect_tools.get_caller_source_code(stacklevel=stacklevel + 1)
try:
parsed_arg_names = inspect_tools.get_function_argument_names_from_source_code(caller_source_code)
title = "QuickPlot: " + " vs. ".join(parsed_arg_names)
if len(parsed_arg_names) == n_dimensions:
arg_names = parsed_arg_names
else:
warnings.warn(
f"Couldn't parse QuickPlot call signature (dimension mismatch):\n\n{caller_source_code}",
stacklevel=2,
)
except ValueError as e:
warnings.warn(
f"Couldn't parse QuickPlot call signature (invalid source code):\n\n{caller_source_code}",
stacklevel=2,
)
except FileNotFoundError:
warnings.warn(
f"Couldn't parse QuickPlot call signature (missing filepath).",
stacklevel=2,
)
##### Do the plotting:
if backend == "plotly":
import plotly.express as px
import plotly.graph_objects as go
mode = "markers+lines"
marker_dict = dict(
size=5 if n_dimensions != 3 else 2,
line=dict(
width=0
)
)
if n_dimensions == 1:
fig = go.Figure(
data=go.Scatter(
y=arg_values[0],
mode=mode,
marker=marker_dict
)
)
fig.update_layout(
title=title,
xaxis_title="Array index #",
yaxis_title=arg_names[0]
)
elif n_dimensions == 2:
fig = go.Figure(
data=go.Scatter(
x=arg_values[0],
y=arg_values[1],
mode=mode,
marker=marker_dict
)
)
fig.update_layout(
title=title,
xaxis_title=arg_names[0],
yaxis_title=arg_names[1]
)
elif n_dimensions == 3:
fig = go.Figure(
data=go.Scatter3d(
x=arg_values[0],
y=arg_values[1],
z=arg_values[2],
mode=mode,
marker=marker_dict
),
)
fig.update_layout(
title=title,
scene=dict(
xaxis_title=arg_names[0],
yaxis_title=arg_names[1],
zaxis_title=arg_names[2],
)
)
else:
raise ValueError("Too many inputs to plot!")
# fig.data[0].update(mode='markers+lines')
if orthographic:
fig.layout.scene.camera.projection.type = "orthographic"
if show:
fig.show(
renderer=plotly_renderer
)
else:
raise ValueError("Bad value of `backend`!")
if __name__ == '__main__':
import aerosandbox.numpy as np
x = np.linspace(0, 10, 100)
y = x ** 2
z = np.sin(y)
qp(x)
qp(x, y)
qp(x, y, z)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/pretty_plots/quickplot.py
|
quickplot.py
|
from typing import Union, Iterable, Tuple, Optional, Callable
import matplotlib.pyplot as plt
import numpy as np
from aerosandbox.tools.statistics import time_series_uncertainty_quantification as tsuq
def plot_with_bootstrapped_uncertainty(
x: np.ndarray,
y: np.ndarray,
ci: Optional[Union[float, Iterable[float], np.ndarray]] = 0.95,
x_stdev: Union[None, float] = 0.,
y_stdev: Union[None, float] = None,
color: Optional[Union[str, Tuple[float]]] = None,
draw_data: bool = True,
label_line: Union[bool, str] = "Best Estimate",
label_ci: bool = True,
label_data: Union[bool, str] = "Raw Data",
line_alpha: float = 0.9,
ci_to_alpha_mapping: Callable[[float], float] = lambda ci: 0.8 * (1 - ci) ** 0.4,
n_bootstraps=2000,
n_fit_points=500,
spline_degree=3,
normalize: bool=True,
x_log_scale: bool = False,
y_log_scale: bool = False,
):
x = np.array(x)
y = np.array(y)
### Log-transform the data if desired
if x_log_scale:
x = np.log(x)
if y_log_scale:
y = np.log(y)
### Make sure `ci` is a NumPy array
if ci is None:
ci = []
else:
try:
iter(ci)
except TypeError:
ci = [ci]
ci = np.array(ci)
### Make sure `ci` is sorted
ci = np.sort(ci)
### Make sure `ci` is in bounds
if not (np.all(ci > 0) and np.all(ci < 1)):
raise ValueError("Confidence interval values in `ci` should all be in the range of (0, 1).")
### Do the bootstrap fits
x_fit, y_bootstrap_fits = tsuq.bootstrap_fits(
x=x,
y=y,
x_noise_stdev=x_stdev,
y_noise_stdev=y_stdev,
n_bootstraps=n_bootstraps,
fit_points=n_fit_points,
spline_degree=spline_degree,
normalize=normalize,
)
### Undo the log-transform if desired
if x_log_scale:
x = np.exp(x)
x_fit = np.exp(x_fit)
if y_log_scale:
y = np.exp(y)
y_bootstrap_fits = np.exp(y_bootstrap_fits)
### Plot the best-estimator line
line, = plt.plot(
x_fit,
np.nanquantile(y_bootstrap_fits, q=0.5, axis=0),
color=color,
label=label_line,
zorder=2,
alpha=line_alpha,
)
if color is None:
color = line.get_color()
if x_log_scale:
plt.xscale('log')
if y_log_scale:
plt.yscale('log')
### Plot the confidence intervals
if len(ci) != 0:
### Using the method of equal-tails confidence intervals
lower_quantiles = np.concatenate([[0.5], (1 - ci) / 2])
upper_quantiles = np.concatenate([[0.5], 1 - (1 - ci) / 2])
lower_ci = np.nanquantile(y_bootstrap_fits, q=lower_quantiles, axis=0)
upper_ci = np.nanquantile(y_bootstrap_fits, q=upper_quantiles, axis=0)
for i, ci_val in enumerate(ci):
settings = dict(
color=color,
alpha=ci_to_alpha_mapping(ci_val),
linewidth=0,
zorder=1.5
)
plt.fill_between(
x_fit,
lower_ci[i],
lower_ci[i + 1],
label=f"{ci_val:.0%} CI" if label_ci else None,
**settings
)
plt.fill_between(
x_fit,
upper_ci[i],
upper_ci[i + 1],
**settings
)
### Plot the data
if draw_data:
plt.plot(
x,
y,
".k",
label=label_data,
alpha=0.25,
markersize=5,
markeredgewidth=0,
zorder=1,
)
return x_fit, y_bootstrap_fits
if __name__ == '__main__':
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
np.random.seed(0)
### Generate data
x = np.linspace(0, 20, 1001)
y_true = np.sin(x - 5) # np.sin(x)
y_stdev = 0.5
y_noisy = y_true + y_stdev * np.random.randn(len(x))
### Plot spline regression
fig, ax = plt.subplots(dpi=300)
x_fit, y_bootstrap_fits = plot_with_bootstrapped_uncertainty(
x,
y_noisy,
ci=[0.75, 0.95],
label_line="Best Estimate",
label_data="Data (True Function + Noise)",
)
ax.plot(x, y_true, "k--", label="True Function (Hidden)", alpha=0.8, zorder=1)
plt.legend(ncols=2)
p.show_plot(
"Spline Bootstrapping Test",
r"$x$",
r"$y$",
legend=False
)
### Generate data
x = np.geomspace(10, 1000, 1000)
y_true = 3 * x ** 0.5
y_stdev = 0.1
y_noisy = y_true * y_stdev * np.random.lognormal(size=len(x))
fig, ax = plt.subplots()
x_fit, y_bootstrap_fits = plot_with_bootstrapped_uncertainty(
x,
y_noisy,
ci=[0.75, 0.95],
label_line="Best Estimate",
label_data="Data (True Function + Noise)",
# normalize=False,
x_log_scale=True,
y_log_scale=True,
)
p.show_plot()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/pretty_plots/plots/plot_with_bootstrapped_uncertainty.py
|
plot_with_bootstrapped_uncertainty.py
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import aerosandbox.numpy as np
from typing import Tuple, Dict, Union, Callable, List
from scipy import interpolate
def plot_smooth(
*args,
color=None,
label=None,
function_of: str = None,
resample_resolution: int = 500,
drop_nans: bool = False,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Plots a curve that interpolates a 2D dataset. Same as matplotlib.pyplot.plot(), with the following changes:
* uses B-splines to draw a smooth curve rather than a jagged polyline
* By default, plots in line format `fmt='.-'` rather than `fmt='-'`.
Other than that, almost all matplotlib.pyplot.plot() syntax can be used. See syntax here:
https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.plot.html
Example usage:
>>> import aerosandbox.numpy as np
>>>
>>> t = np.linspace(0, 1, 12) # Parametric variable
>>> x = np.cos(2 * np.pi * t)
>>> y = np.cos(2 * np.pi * t ** 4) - t
>>>
>>> plot_smooth(
>>> x, y, 'o--', color='purple'
>>> )
>>> plt.show()
* Note: a true 2D interpolation is performed - it is not assumed y is a function of x, or vice versa. This can,
in rare cases, cause single-valuedness to not be preserved in cases where it logically should. If this is the
case, you need to perform the interpolation yourself without `plot_smooth()`.
Args:
*args: Same arguments as `matplotlib.pyplot.plot()`.
Notes on standard plot() syntax:
Call signatures:
>>> plot([x], y, [fmt], *, data=None, **kwargs)
>>> plot([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
Examples:
>>> plot(x, y) # plot x and y using default line style and color
>>> plot(x, y, 'bo') # plot x and y using blue circle markers
>>> plot(y) # plot y using x as index array 0..N-1
>>> plot(y, 'r+') # ditto, but with red plusses
color: Specifies the color of any line and/or markers that are plotted (as determined by the `fmt`).
label: Attaches a label to this line. Use `plt.legend()` to display.
resample_resolution: The number of points to use when resampling the interpolated curve.
**kwargs: Same keyword arguments as `matplotlib.pyplot.plot()`.
Returns: A tuple `(x, y)` of the resampled points on the interpolated curve. Both `x` and `y` are 1D ndarrays.
"""
### Parse *args
argslist = list(args)
if len(args) == 3:
x = argslist.pop(0)
y = argslist.pop(0)
fmt = argslist.pop(0)
elif len(args) == 2:
if isinstance(args[1], str):
x = np.arange(np.length(args[0]))
y = argslist.pop(0)
fmt = argslist.pop(0)
else:
x = argslist.pop(0)
y = argslist.pop(0)
fmt = '.-'
elif len(args) == 1:
x = np.arange(np.length(args[0]))
y = argslist.pop(0)
fmt = '.-'
elif len(args) == 0:
raise ValueError("Missing plot data. Use syntax `plot_smooth(x, y, fmt, **kwargs)'.")
else:
raise ValueError("Unrecognized syntax. Use syntax `plot_smooth(x, y, fmt, **kwargs)'.")
if drop_nans:
nanmask = np.logical_not(
np.logical_or(
np.isnan(x),
np.isnan(y)
)
)
x = x[nanmask]
y = y[nanmask]
# At this point, x, y, and fmt are defined.
### Resample points
if function_of is None:
# Compute the relative spacing of points
dx = np.diff(x)
dy = np.diff(y)
x_rng = np.nanmax(x) - np.nanmin(x)
y_rng = np.nanmax(y) - np.nanmin(y)
dx_norm = dx / x_rng
dy_norm = dy / y_rng
ds_norm = np.sqrt(dx_norm ** 2 + dy_norm ** 2)
s_norm = np.concatenate([
[0],
np.nancumsum(ds_norm) / np.nansum(ds_norm)
])
bspline = interpolate.make_interp_spline(
x=s_norm,
y=np.stack(
(x, y), axis=1
)
)
result = bspline(np.linspace(0, 1, resample_resolution))
x_resample = result[:, 0]
y_resample = result[:, 1]
elif function_of == "x":
x_resample = np.linspace(
np.nanmin(x),
np.nanmax(x),
resample_resolution
)
mask = ~np.isnan(x) & ~np.isnan(y)
x = x[mask]
y = y[mask]
order = np.argsort(x)
y_resample = interpolate.PchipInterpolator(
x=x[order],
y=y[order],
)(x_resample)
elif function_of == "y":
y_resample = np.linspace(
np.nanmin(y),
np.nanmax(y),
resample_resolution
)
mask = ~np.isnan(x) & ~np.isnan(y)
x = x[mask]
y = y[mask]
order = np.argsort(y)
x_resample = interpolate.PchipInterpolator(
x=y[order],
y=x[order],
)(y_resample)
### Plot
scatter_kwargs = {
**kwargs,
'linewidth': 0,
}
if color is not None:
scatter_kwargs['color'] = color
line, = plt.plot(
x,
y,
fmt,
*argslist,
**scatter_kwargs
)
if color is None:
color = line.get_color()
line_kwargs = {
'color' : color,
'label' : label,
**kwargs,
'markersize': 0,
}
plt.plot(
x_resample,
y_resample,
fmt,
*argslist,
**line_kwargs
)
return x_resample, y_resample
if __name__ == '__main__':
import aerosandbox.numpy as np
# t = np.linspace(0, 1, 12) # Parametric variable
# x = np.cos(2 * np.pi * t)
# y = np.cos(2 * np.pi * t ** 4) - t
#
# fig, ax = plt.subplots()
# plot_smooth(
# x, y, color='purple'
# )
# plt.show()
fig, ax = plt.subplots()
x = np.linspace(0, 1, 8)
plot_smooth(
x, np.exp(-10 * x**0.5), color='goldenrod',
function_of="x",
# markersize=0,
resample_resolution=2000
)
plt.show()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/pretty_plots/plots/plot_smooth.py
|
plot_smooth.py
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from typing import Tuple, Dict, Union, Callable, List
from scipy import interpolate
from aerosandbox.tools.string_formatting import eng_string
def contour(
*args,
levels: Union[int, List, np.ndarray] = 31,
colorbar: bool = True,
linelabels: bool = True,
cmap=None,
alpha: float = 0.7,
extend: str = "neither",
linecolor="k",
linewidths: float = 0.5,
extendrect: bool = True,
linelabels_format: Union[str, Callable[[float], str]] = eng_string,
linelabels_fontsize: float = 8,
max_side_length_nondim: float = np.Inf,
colorbar_label: str = None,
x_log_scale: bool = False,
y_log_scale: bool = False,
z_log_scale: bool = False,
mask: np.ndarray = None,
drop_nans: bool = None,
# smooth: Union[bool, int] = False, # TODO implement
contour_kwargs: Dict = None,
contourf_kwargs: Dict = None,
colorbar_kwargs: Dict = None,
linelabels_kwargs: Dict = None,
**kwargs,
):
"""
An analogue for plt.contour and plt.tricontour and friends that produces a much prettier default graph.
Can take inputs with either contour or tricontour syntax.
See syntax here:
https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.contour.html
https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.contourf.html
https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.tricontour.html
https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.tricontourf.html
Args:
X: If dataset is gridded, follow `contour` syntax. Otherwise, follow `tricontour` syntax.
Y: If dataset is gridded, follow `contour` syntax. Otherwise, follow `tricontour` syntax.
Z: If dataset is gridded, follow `contour` syntax. Otherwise, follow `tricontour` syntax.
levels: See contour docs.
colorbar: Should we draw a colorbar?
linelabels: Should we add line labels?
cmap: What colormap should we use?
alpha: What transparency should all plot elements be?
extend: See contour docs.
linecolor: What color should the line labels be?
linewidths: See contour docs.
extendrect: See colorbar docs.
linelabels_format: See ax.clabel docs.
linelabels_fontsize: See ax.clabel docs.
contour_kwargs: Additional keyword arguments for contour.
contourf_kwargs: Additional keyword arguments for contourf.
colorbar_kwargs: Additional keyword arguments for colorbar.
linelabels_kwargs: Additional keyword arguments for the line labels (ax.clabel).
**kwargs: Additional keywords, which are passed to both contour and contourf.
Returns: A tuple of (contour, contourf, colorbar) objects.
"""
bad_signature_error = ValueError("Call signature should be one of:\n"
" * `contour(Z, **kwargs)`\n"
" * `contour(X, Y, Z, **kwargs)`\n"
" * `contour(X, Y, Z, levels, **kwargs)`"
)
### Parse *args
if len(args) == 1:
X = None
Y = None
Z = args[0]
elif len(args) == 3:
X = args[0]
Y = args[1]
Z = args[2]
else:
raise bad_signature_error
if X is None:
X = np.arange(Z.shape[1])
if Y is None:
Y = np.arange(Z.shape[0])
is_gridded = not ( # Determine if the data is gridded or not (i.e., contour vs. tricontour)
X.ndim == 1 and
Y.ndim == 1 and
Z.ndim == 1
)
### Check inputs for sanity
for k, v in dict(
X=X,
Y=Y,
Z=Z,
).items():
if np.all(np.isnan(v)):
raise ValueError(
f"All values of '{k}' are NaN!"
)
### Set defaults
if cmap is None:
cmap = mpl.colormaps.get_cmap('viridis')
if contour_kwargs is None:
contour_kwargs = {}
if contourf_kwargs is None:
contourf_kwargs = {}
if colorbar_kwargs is None:
colorbar_kwargs = {}
if linelabels_kwargs is None:
linelabels_kwargs = {}
shared_kwargs = kwargs
if levels is not None:
shared_kwargs["levels"] = levels
if alpha is not None:
shared_kwargs["alpha"] = alpha
if extend is not None:
shared_kwargs["extend"] = extend
if z_log_scale:
if np.any(Z <= 0):
raise ValueError(
"All values of the `Z` input to `contour()` should be nonnegative if `z_log_scale` is True!"
)
Z_ratio = np.nanmax(Z) / np.nanmin(Z)
log10_ceil_z_max = np.ceil(np.log10(np.nanmax(Z)))
log10_floor_z_min = np.floor(np.log10(np.nanmin(Z)))
try:
default_levels = int(levels)
except TypeError:
default_levels = 31
divisions_per_decade = np.ceil(default_levels / np.log10(Z_ratio)).astype(int)
if Z_ratio > 1e8:
locator = mpl.ticker.LogLocator()
else:
locator = mpl.ticker.LogLocator(
subs=np.geomspace(1, 10, divisions_per_decade + 1)[:-1]
)
shared_kwargs = {
"norm" : mpl.colors.LogNorm(),
"locator": locator,
**shared_kwargs
}
colorbar_kwargs = {
"norm": mpl.colors.LogNorm(),
**colorbar_kwargs
}
if colorbar_label is not None:
colorbar_kwargs["label"] = colorbar_label
contour_kwargs = {
"colors" : linecolor,
"linewidths": linewidths,
**shared_kwargs,
**contour_kwargs
}
contourf_kwargs = {
"cmap": cmap,
**shared_kwargs,
**contourf_kwargs
}
colorbar_kwargs = {
"extendrect": extendrect,
**colorbar_kwargs
}
linelabels_kwargs = {
"inline" : 1,
"fontsize": linelabels_fontsize,
"fmt" : linelabels_format,
**linelabels_kwargs
}
if drop_nans is None:
if is_gridded:
drop_nans = False
else:
drop_nans = True
### Now, with all the kwargs merged, prep for the actual plotting.
if mask is not None:
X = X[mask]
Y = Y[mask]
Z = Z[mask]
is_gridded = False
if drop_nans:
nanmask = np.logical_not(
np.logical_or.reduce(
[np.isnan(X), np.isnan(Y), np.isnan(Z)]
)
)
X = X[nanmask]
Y = Y[nanmask]
Z = Z[nanmask]
is_gridded = False
# if smooth:
# if isinstance(smooth, bool):
# smoothing_factor = 3
# else:
# try:
# smoothing_factor = int(smooth)
# except TypeError:
# raise TypeError("`smooth` must be an integer (the smoothing factor) or a boolean!")
### Do the actual plotting
if is_gridded:
cont = plt.contour(X, Y, Z, **contour_kwargs)
contf = plt.contourf(X, Y, Z, **contourf_kwargs)
else: ### If this fails, then the data is unstructured (i.e. X and Y are 1D arrays)
### Create the triangulation
tri = mpl.tri.Triangulation(X, Y)
t = tri.triangles
### Filter out extrapolation that's too large
# See also: https://stackoverflow.com/questions/42426095/matplotlib-contour-contourf-of-concave-non-gridded-data
if x_log_scale:
X_nondim = (
np.log(X[t]) - np.roll(np.log(X[t]), 1, axis=1)
) / (np.nanmax(np.log(X)) - np.nanmin(np.log(X)))
else:
X_nondim = (
X[t] - np.roll(X[t], 1, axis=1)
) / (np.nanmax(X) - np.nanmin(X))
if y_log_scale:
Y_nondim = (
np.log(Y[t]) - np.roll(np.log(Y[t]), 1, axis=1)
) / (np.nanmax(np.log(Y)) - np.nanmin(np.log(Y)))
else:
Y_nondim = (
Y[t] - np.roll(Y[t], 1, axis=1)
) / (np.nanmax(Y) - np.nanmin(Y))
side_length_nondim = np.max(
np.sqrt(
X_nondim ** 2 +
Y_nondim ** 2
),
axis=1
)
if np.all(side_length_nondim > max_side_length_nondim):
raise ValueError(
"All triangles in the triangulation are too large to be plotted!\n"
"Try increasing `max_side_length_nondim`!"
)
tri.set_mask(side_length_nondim > max_side_length_nondim)
cont = plt.tricontour(tri, Z, **contour_kwargs)
contf = plt.tricontourf(tri, Z, **contourf_kwargs)
if x_log_scale:
plt.xscale("log")
if y_log_scale:
plt.yscale("log")
if colorbar:
from matplotlib import cm
cbar = plt.colorbar(
ax=contf.axes,
mappable=cm.ScalarMappable(
norm=contf.norm,
cmap=contf.cmap,
),
**colorbar_kwargs
)
if z_log_scale:
cbar.ax.tick_params(which="minor", labelsize=8)
if Z_ratio >= 10 ** 2.05:
cbar.ax.yaxis.set_major_locator(mpl.ticker.LogLocator())
cbar.ax.yaxis.set_minor_locator(mpl.ticker.LogLocator(subs=np.arange(1, 10)))
cbar.ax.yaxis.set_major_formatter(mpl.ticker.LogFormatterSciNotation())
cbar.ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
elif Z_ratio >= 10 ** 1.5:
cbar.ax.yaxis.set_major_locator(mpl.ticker.LogLocator())
cbar.ax.yaxis.set_minor_locator(mpl.ticker.LogLocator(subs=np.arange(1, 10)))
cbar.ax.yaxis.set_major_formatter(mpl.ticker.LogFormatterSciNotation())
cbar.ax.yaxis.set_minor_formatter(mpl.ticker.LogFormatterSciNotation(
minor_thresholds=(np.inf, np.inf)
))
else:
cbar.ax.yaxis.set_major_locator(mpl.ticker.LogLocator(subs=np.arange(1, 10)))
cbar.ax.yaxis.set_minor_locator(mpl.ticker.LogLocator(subs=np.arange(10, 100) / 10))
cbar.ax.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
cbar.ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
else:
cbar = None
if linelabels:
cont.axes.clabel(cont, **linelabels_kwargs)
return cont, contf, cbar
if __name__ == '__main__':
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
X, Y = np.meshgrid(x, y)
Z_ratio = 1
Z = 10 ** (
Z_ratio / 2 * np.cos(
2 * np.pi * (X ** 4 + Y ** 4)
)
)
# Z += 0.1 * np.random.randn(*Z.shape)
fig, ax = plt.subplots(figsize=(6, 6))
cmap = plt.get_cmap("rainbow")
cont, contf, cbar = contour(
X,
Y,
np.abs(Z),
drop_nans=True,
# x_log_scale=True,
z_log_scale=True,
cmap=cmap,
levels=20,
colorbar_label="Colorbar label"
)
# plt.clim(0.1, 10)
p.show_plot(
"Title",
"X label",
"Y label"
)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/pretty_plots/plots/contour.py
|
contour.py
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from typing import Tuple, Dict, Union, Callable, List
from scipy import interpolate
def plot_color_by_value(
x: np.ndarray,
y: np.ndarray,
*args,
c: np.ndarray,
cmap='turbo',
colorbar: bool = False,
colorbar_label: str = None,
clim: Tuple[float, float] = None,
**kwargs
):
"""
Uses same syntax as matplotlib.pyplot.plot, except that `c` is now an array-like that maps to a specific color
pulled from `cmap`. Makes lines that are multicolored based on this `c` value.
Args:
x: Array of x-points.
y: Array of y-points.
*args: Args that will be passed into matplotlib.pyplot.plot().
Example: ".-" for a dotted line.
c: Array of values that will map to colors. Must be the same length as x and y.
cmap: The colormap to use.
colorbar: Whether or not to display the colormap. [bool]
colorbar_label: The label to add to the colorbar. Only applies if the colorbar is created. [str]
clim: A tuple of (min, max) that assigns bounds to the colormap. Computed from the range of `c` if not given.
**kwargs: Kwargs that will be passed into matplotlib.pyplot.plot()
Returns:
"""
cmap = mpl.cm.get_cmap(cmap)
x = np.array(x)
y = np.array(y)
c = np.array(c)
cmin = c.min()
cmax = c.max()
if clim is None:
clim = (cmin, cmax)
norm = plt.Normalize(vmin=clim[0], vmax=clim[1], clip=False)
label = kwargs.pop("label", None)
lines = []
for i, (
x1, x2,
y1, y2,
c1, c2,
) in enumerate(zip(
x[:-1], x[1:],
y[:-1], y[1:],
c[:-1], c[1:],
)):
line = plt.plot(
[x1, x2],
[y1, y2],
*args,
color=cmap(
norm(
(c1 + c2) / 2
) if cmin != cmax else 0.5
),
**kwargs
)
lines += line
if label is not None:
line = plt.plot(
[None],
[None],
*args,
color=cmap(0.5),
label=label,
**kwargs
)
lines += line
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
if colorbar:
if colorbar_label is None:
cbar = plt.colorbar(sm)
else:
cbar = plt.colorbar(sm, label=colorbar_label)
else:
cbar = None
return lines, sm, cbar
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/pretty_plots/plots/plot_color_by_value.py
|
plot_color_by_value.py
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from typing import Tuple, Dict, Union, Callable, List
from scipy import interpolate
from aerosandbox.tools.string_formatting import eng_string
import seaborn as sns
def pie(
values: Union[np.ndarray, List[float]],
names: List[str],
colors: Union[np.ndarray, List[str]] = None,
label_format: Callable[[str, float, float], str] = lambda name, value, percentage: name,
sort_by: Union[np.ndarray, List[float], str, None] = None,
startangle: float = 0.,
center_text: str = None,
x_labels: float = 1.25,
y_max_labels: float = 1.3,
arm_length=20,
arm_radius=5,
):
# TODO docs
ax = plt.gca()
n_wedges = len(values)
### Check inputs
if not len(names) == n_wedges:
raise ValueError() # TODO
### Sort by
sort_by_error= ValueError('''Argument `sort_by` must be one of:\n
* a string of "values", "names", or "colors"
* an array of numbers corresponding to each pie slice, which will then be used for sorting
''')
if sort_by is None:
sort_by = np.arange(n_wedges)
elif sort_by == "values":
sort_by=values
elif sort_by=="names":
sort_by=names
elif sort_by=="colors":
sort_by=colors # this might not make sense, depending on
elif isinstance(sort_by, str):
raise sort_by_error
order = np.argsort(sort_by)
names = np.array(names)[order]
values=np.array(values)[order]
if colors is None:
# Set default colors
colors = sns.color_palette("husl", n_colors=n_wedges)
else:
colors=np.array(colors)[order]
### Compute percentages
values = np.array(values).astype(float)
total = np.sum(values)
percentages = 100 * values / total
wedges, texts = ax.pie(
x=values,
colors=colors,
startangle=startangle,
wedgeprops=dict(
width=0.3
)
)
for w in wedges:
w.theta_mid = (w.theta1 + w.theta2) / 2
w.x_pie = np.cos(np.deg2rad(w.theta_mid))
w.y_pie = np.sin(np.deg2rad(w.theta_mid))
w.is_right = w.x_pie > 0
left_wedges = [w for w in wedges if not w.is_right]
right_wedges = [w for w in wedges if w.is_right]
y_texts_left = y_max_labels * np.linspace(-1, 1, len(left_wedges))
y_texts_right = y_max_labels * np.linspace(-1, 1, len(right_wedges))
if len(left_wedges) == 1:
y_texts_left = [w.y_pie for w in left_wedges]
if len(right_wedges) == 1:
y_texts_right = [w.y_pie for w in right_wedges]
left_wedge_order = np.argsort([w.y_pie for w in left_wedges])
for i, w in enumerate(np.array(left_wedges, "O")[left_wedge_order]):
w.y_text = y_texts_left[i]
right_wedge_order = np.argsort([w.y_pie for w in right_wedges])
for i, w in enumerate(np.array(right_wedges, "O")[right_wedge_order]):
w.y_text = y_texts_right[i]
for i, w in enumerate(wedges):
x_text = x_labels * np.sign(w.x_pie)
ax.annotate(
text=label_format(names[i], values[i], percentages[i]),
xy=(w.x_pie, w.y_pie),
xytext=(x_text, w.y_text),
horizontalalignment="left" if w.is_right else "right",
arrowprops=dict(
arrowstyle="-",
color="k",
connectionstyle=f"arc,angleA={180 if w.is_right else 0},angleB={w.theta_mid},armA={arm_length},armB={arm_length},rad={arm_radius}",
relpos=(
0 if w.is_right else 1,
0.5
)
),
va="center",
)
if center_text is not None:
plt.text(
x=0,
y=0,
s=center_text,
ha="center",
va="center",
fontsize=16,
)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
data = {
"USA" : 336997624,
"Mexico" : 126705138,
"Canada" : 38115012,
"Guatemala" : 17608483,
"Haiti" : 11447569,
"Cuba" : 11256372,
"Dominican Republic": 11117873,
"Honduras" : 10278345,
"Nicaragua" : 6850540,
"El Salvador" : 6314167,
"Costa Rica" : 5153957,
"Panama" : 4351267,
}
data["Other"] = 597678511 - np.sum(np.array(list(data.values())))
fig, ax = plt.subplots(figsize=(9, 5))
pie(
values=list(data.values()),
names=list(data.keys()),
colors=[
"navy" if s in ["USA"] else "lightgray"
for s in data.keys()
],
label_format=lambda name, value, percentage: f"{name}, {eng_string(value)} ({percentage:.0f}%)",
startangle=40,
center_text="Majority of North\nAmerica's Population\nlives in USA"
)
p.show_plot()
# import pandas as pd
# from io import StringIO
#
# df = pd.read_csv(
# StringIO("""\
# person,slices eaten,gender
# alice,9,woman
# bob,6,man
# charlie,5,man
# dan,8,man
# eve,7,woman
# frank,9,man
# grace,4,woman
# heidi,3,woman
# """)
# )
#
# fig, ax = plt.subplots(figsize=(8, 5))
# pie(
# values=df['slices eaten'],
# names=df['person'],
# colors=['blue' if g == 'man' else 'red' for g in df['gender']],
# label_format=lambda n, v, p: f"{n.capitalize()}, {v:.0g} ({p:.0f}%)",
# # sort_by=df[]
# )
# p.show_plot()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/pretty_plots/plots/pie.py
|
pie.py
|
import numpy as np
from scipy import interpolate
class NaturalUnivariateSpline(interpolate.PPoly):
"""
A Natural UnivariateSpline.
Identical to a UnivariateSpline, except that extrapolation outside the data range is constrained to be linear.
Based on: https://bobby.gramacy.com/surrogates/splines.html
Which paraphrases [Hastie, Tibshirani & Friedman (2017)](https://hastie.su.domains/ElemStatLearn/), Chapters 5, 7, & 8.
"""
def __init__(self,
x: np.ndarray,
y: np.ndarray,
w: np.ndarray = None,
k: int = 3,
s: float = None,
ext=None,
bbox=None,
check_finite=None
):
"""
Args:
x: 1-D array of independent input data. Must be increasing; must be strictly increasing if s is 0.
y: 1-D array of dependent input data, of the same length as x.
w: Weights for spline fitting. Must be positive. If w is None, weights are all 1. Default is None.
k: Degree of the smoothing spline. Must be 1 <= k <= 5. k = 3 is a cubic spline. Default is 3.
s: Positive smoothing factor used to choose the number of knots.
Returns:
"""
if s is None:
m = len(x)
s = m - (2 * m) ** 0.5 # Identical default to UnivariateSpline's `s` argument.
### Deprecate and warn
import warnings
if ext is not None:
warnings.warn(
"The `ext` argument is deprecated, as a NaturalUnivariateSpline implies extrapolation.",
DeprecationWarning
)
if bbox is not None:
warnings.warn(
"The `bbox` argument is deprecated, as a NaturalUnivariateSpline implies extrapolation.",
DeprecationWarning
)
if check_finite is not None:
warnings.warn(
"The `check_finite` argument is deprecated.",
DeprecationWarning
)
### Compute the t, c, and k parameters for a UnivariateSpline
tck = interpolate.splrep(
x=x,
y=y,
w=w,
k=k,
s=s,
)
### Construct the spline, without natural extrapolation
spline = interpolate.PPoly.from_spline(
tck=tck
)
### Add spline knots for natural positive extrapolation
spline.extend(
c=np.array(
[[0]] * (k - 2) + [
[spline(spline.x[-1], 1)],
[spline(spline.x[-1])]
]),
x=np.array([np.Inf])
)
### Add spline knots for natural negative extrapolation
spline.extend(
c=np.array(
[[0]] * (k - 1) + [
[spline(spline.x[0], 1)],
[spline(spline.x[0])]
]),
x=np.array([spline.x[0]])
)
### Construct the Natural Univariate Spline
super().__init__(
c=spline.c,
x=spline.x,
extrapolate=True,
)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
x = np.linspace(0, 10, 20)
y = np.sin(x)
nus = NaturalUnivariateSpline(
x,
y,
k=3,
)
t, c, k = interpolate.splrep(
x,
y,
s=len(x) - (2 * len(x)) ** 0.5,
)
us = interpolate.PPoly.from_spline((t, c, k))
x_plot = np.linspace(-5, 15, 5000)
fig, ax = plt.subplots()
plt.plot(x, y, ".k", label="Data")
plt.plot(x_plot, nus(x_plot), "--", label="Natural Univariate Spline")
plt.plot(x_plot, us(x_plot), "--", label="Univariate Spline")
p.set_ticks(1, 1, 1, 1)
p.show_plot()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/pretty_plots/utilities/natural_univariate_spline.py
|
natural_univariate_spline.py
|
import warnings
from math import atan2, degrees
import matplotlib.patheffects as path_effects
import numpy as np
from matplotlib.container import ErrorbarContainer
from matplotlib.dates import DateConverter, num2date
from .utils import ensure_float, maximum_bipartite_matching, always_iterable
# Label line with line2D label data
def labelLine(
line,
x,
label=None,
align=True,
drop_label=False,
yoffset=0,
yoffset_logspace=False,
outline_color="auto",
outline_width=8,
**kwargs,
):
"""Label a single matplotlib line at position x
Parameters
----------
line : matplotlib.lines.Line
The line holding the label
x : number
The location in data unit of the label
label : string, optional
The label to set. This is inferred from the line by default
drop_label : bool, optional
If True, the label is consumed by the function so that subsequent
calls to e.g. legend do not use it anymore.
yoffset : double, optional
Space to add to label's y position
yoffset_logspace : bool, optional
If True, then yoffset will be added to the label's y position in
log10 space
outline_color : None | "auto" | color
Colour of the outline. If set to "auto", use the background color.
If set to None, do not draw an outline.
outline_width : number
Width of the outline
kwargs : dict, optional
Optional arguments passed to ax.text
"""
ax = line.axes
xdata = ensure_float(line.get_xdata())
ydata = line.get_ydata()
mask = np.isfinite(ydata)
if mask.sum() == 0:
raise Exception(f"The line {line} only contains nan!")
# Find first segment of xdata containing x
if isinstance(xdata, tuple) and len(xdata) == 2:
i = 0
xa = min(xdata)
xb = max(xdata)
else:
for imatch, (xa, xb) in enumerate(zip(xdata[:-1], xdata[1:])):
if min(xa, xb) <= ensure_float(x) <= max(xa, xb):
i = imatch
break
else:
raise Exception("x label location is outside data range!")
xfa = ensure_float(xa)
xfb = ensure_float(xb)
ya = ydata[i]
yb = ydata[i + 1]
# Handle vertical case
if xfb == xfa:
fraction = 0.5
else:
fraction = (ensure_float(x) - xfa) / (xfb - xfa)
if yoffset_logspace:
y = ya + (yb - ya) * fraction
y *= 10 ** yoffset
else:
y = ya + (yb - ya) * fraction + yoffset
if not (np.isfinite(ya) and np.isfinite(yb)):
warnings.warn(
(
"%s could not be annotated due to `nans` values. "
"Consider using another location via the `x` argument."
)
% line,
UserWarning,
)
return
if not label:
label = line.get_label()
if drop_label:
line.set_label(None)
if align:
# Compute the slope and label rotation
screen_dx, screen_dy = ax.transData.transform(
(xfa, ya)
) - ax.transData.transform((xfb, yb))
rotation = (degrees(atan2(screen_dy, screen_dx)) + 90) % 180 - 90
else:
rotation = 0
# Set a bunch of keyword arguments
if "color" not in kwargs:
kwargs["color"] = line.get_color()
if ("horizontalalignment" not in kwargs) and ("ha" not in kwargs):
kwargs["ha"] = "center"
if ("verticalalignment" not in kwargs) and ("va" not in kwargs):
kwargs["va"] = "center"
if "clip_on" not in kwargs:
kwargs["clip_on"] = True
if "zorder" not in kwargs:
kwargs["zorder"] = 2.5
if outline_color == "auto":
outline_color = ax.get_facecolor()
txt = ax.text(x, y, label, rotation=rotation, **kwargs)
if outline_color is None:
effects = [path_effects.Normal()]
else:
effects = [
path_effects.Stroke(linewidth=outline_width, foreground=outline_color),
path_effects.Normal(),
]
txt.set_path_effects(effects)
return txt
def labelLines(
lines,
align=True,
xvals=None,
drop_label=False,
shrink_factor=0.05,
yoffsets=0,
outline_color="auto",
outline_width=5,
**kwargs,
):
"""Label all lines with their respective legends.
Parameters
----------
lines : list of matplotlib lines
The lines to label
align : boolean, optional
If True, the label will be aligned with the slope of the line
at the location of the label. If False, they will be horizontal.
xvals : (xfirst, xlast) or array of float, optional
The location of the labels. If a tuple, the labels will be
evenly spaced between xfirst and xlast (in the axis units).
drop_label : bool, optional
If True, the label is consumed by the function so that subsequent
calls to e.g. legend do not use it anymore.
shrink_factor : double, optional
Relative distance from the edges to place closest labels. Defaults to 0.05.
yoffsets : number or list, optional.
Distance relative to the line when positioning the labels. If given a number,
the same value is used for all lines.
outline_color : None | "auto" | color
Colour of the outline. If set to "auto", use the background color.
If set to None, do not draw an outline.
outline_width : number
Width of the outline
kwargs : dict, optional
Optional arguments passed to ax.text
"""
ax = lines[0].axes
handles, allLabels = ax.get_legend_handles_labels()
all_lines = []
for h in handles:
if isinstance(h, ErrorbarContainer):
all_lines.append(h.lines[0])
else:
all_lines.append(h)
# In case no x location was provided, we need to use some heuristics
# to generate them.
if xvals is None:
xvals = ax.get_xlim()
xvals_rng = xvals[1] - xvals[0]
shrinkage = xvals_rng * shrink_factor
xvals = (xvals[0] + shrinkage, xvals[1] - shrinkage)
if isinstance(xvals, tuple) and len(xvals) == 2:
xmin, xmax = xvals
xscale = ax.get_xscale()
if xscale == "log":
xvals = np.logspace(np.log10(xmin), np.log10(xmax), len(all_lines) + 2)[
1:-1
]
else:
xvals = np.linspace(xmin, xmax, len(all_lines) + 2)[1:-1]
# Build matrix line -> xvalue
ok_matrix = np.zeros((len(all_lines), len(all_lines)), dtype=bool)
for i, line in enumerate(all_lines):
xdata = ensure_float(line.get_xdata())
minx, maxx = min(xdata), max(xdata)
for j, xv in enumerate(xvals):
ok_matrix[i, j] = minx < xv < maxx
# If some xvals do not fall in their corresponding line,
# find a better matching using maximum bipartite matching.
if not np.all(np.diag(ok_matrix)):
order = maximum_bipartite_matching(ok_matrix)
# The maximum match may miss a few points, let's add them back
imax = order.max()
order[order < 0] = np.arange(imax + 1, len(order))
# Now reorder the xvalues
old_xvals = xvals.copy()
xvals[order] = old_xvals
else:
xvals = list(always_iterable(xvals)) # force the creation of a copy
labLines, labels = [], []
# Take only the lines which have labels other than the default ones
for i, (line, xv) in enumerate(zip(all_lines, xvals)):
label = allLabels[all_lines.index(line)]
labLines.append(line)
labels.append(label)
# Move xlabel if it is outside valid range
xdata = ensure_float(line.get_xdata())
if not (min(xdata) <= xv <= max(xdata)):
warnings.warn(
(
"The value at position %s in `xvals` is outside the range of its "
"associated line (xmin=%s, xmax=%s, xval=%s). Clipping it "
"into the allowed range."
)
% (i, min(xdata), max(xdata), xv),
UserWarning,
)
new_xv = min(xdata) + (max(xdata) - min(xdata)) * 0.9
xvals[i] = new_xv
# Convert float values back to datetime in case of datetime axis
if isinstance(ax.xaxis.converter, DateConverter):
xvals = [num2date(x).replace(tzinfo=ax.xaxis.get_units()) for x in xvals]
txts = []
try:
yoffsets = [float(yoffsets)] * len(all_lines)
except TypeError:
pass
for line, x, yoffset, label in zip(labLines, xvals, yoffsets, labels):
txts.append(
labelLine(
line,
x,
label=label,
align=align,
drop_label=drop_label,
yoffset=yoffset,
outline_color=outline_color,
outline_width=outline_width,
**kwargs,
)
)
return txts
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/pretty_plots/labellines/core.py
|
core.py
|
from datetime import datetime
import numpy as np
from matplotlib.dates import date2num
def ensure_float(value):
"""Make sure datetime values are properly converted to floats."""
try:
# the last 3 boolean checks are for arrays with datetime64 and with
# a timezone, see these SO posts:
# https://stackoverflow.com/q/60714568/4549682
# https://stackoverflow.com/q/23063362/4549682
# somewhere, the datetime64 with timezone is getting converted to 'O' dtype
if (
isinstance(value, datetime)
or isinstance(value, np.datetime64)
or np.issubdtype(value.dtype, np.datetime64)
or str(value.dtype).startswith("datetime64")
or value.dtype == "O"
):
return date2num(value)
else: # another numpy dtype like float64
return value
except AttributeError: # possibly int or other float/int dtype
return value
# From https://www.geeksforgeeks.org/maximum-bipartite-matching/
class GFG:
def __init__(self, graph):
# residual graph
self.graph = graph
self.ppl = len(graph)
self.jobs = len(graph[0])
# A DFS based recursive function that returns true if a matching for vertex
# u is possible
def bpm(self, u, matchR, seen):
# Try every job one by one
for v in range(self.jobs):
# If applicant u is interested
# in job v and v is not seen
if self.graph[u][v] and not seen[v]:
# Mark v as visited
seen[v] = True
# If job 'v' is not assigned to an applicant OR previously
# assigned applicant for job v (which is matchR[v]) has an
# alternate job available. Since v is marked as visited in the
# above line, matchR[v] in the following recursive call will not
# get job 'v' again
if matchR[v] == -1 or self.bpm(matchR[v], matchR, seen):
matchR[v] = u
return True
return False
# Returns maximum number of matching
def maxBPM(self):
# An array to keep track of the applicants assigned to jobs. The value
# of matchR[i] is the applicant number assigned to job i, the value -1
# indicates nobody is assigned.
matchR = [-1] * self.jobs
# Count of jobs assigned to applicants
result = 0
for i in range(self.ppl):
# Mark all jobs as not seen for next applicant.
seen = [False] * self.jobs
# Find if the applicant 'u' can get a job
if self.bpm(i, matchR, seen):
result += 1
return result, matchR
def maximum_bipartite_matching(graph: np.ndarray) -> np.ndarray:
"""Finds the maximum bipartite matching of a graph
Parameters
----------
graph : np.ndarray
The graph, represented as a boolean matrix
Returns
-------
order : np.ndarray
The order in which to traverse the graph to visit a maximum of nodes
"""
g = GFG(graph)
_, order = g.maxBPM()
return np.asarray(order)
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/pretty_plots/labellines/utils.py
|
utils.py
|
import warnings
from typing import Union, Iterable, List, Tuple
from tqdm import tqdm
import aerosandbox.numpy as np
from aerosandbox.tools.pretty_plots.utilities.natural_univariate_spline import NaturalUnivariateSpline as Spline
def estimate_noise_standard_deviation(
data: np.ndarray,
estimator_order: int = None,
) -> float:
"""
Estimates the standard deviation of the random noise in a time-series dataset.
Relies on several assumptions:
- The noise is normally-distributed and independent between samples (i.e. white noise).
- The noise is stationary and homoscedastic (i.e., the noise standard deviation is constant).
- The noise is uncorrelated with the signal.
- The sample rate of the data is significantly higher than the highest-frequency component of the signal. (In
practice, this ratio need not be more than ~5:1, if higher-order estimators are used. At a minimum, however,
this ratio must be greater than 2:1, corresponding to the Nyquist frequency.)
The algorithm used in this function is a highly-optimized version of the math described in this repository,
part of an upcoming paper: https://github.com/peterdsharpe/aircraft-polar-reconstruction-from-flight-test
The repository is currently private, but will be public at some point; if you would like access to it,
please contact Peter Sharpe at [email protected].
Args:
data: A 1D NumPy array of time-series data.
estimator_order: The order of the estimator to use. Higher orders are generally more accurate, up to the
point where sample error starts to dominate. If None, a reasonable estimator order will be chosen automatically.
Returns: An estimate of the standard deviation of the data's noise component.
"""
if len(data) < 2:
raise ValueError("Data must have at least 2 points.")
if estimator_order is None:
estimator_order = min(
max(
1,
int(len(data) ** 0.5)
),
1000
)
##### Noise Variance Reconstruction #####
from scipy.special import gammaln
ln_factorial = lambda x: gammaln(x + 1)
### For speed, pre-compute the log-factorial of integers from 1 to estimator_order
ln_f = ln_factorial(np.arange(estimator_order + 1))
### Create a convolutional kernel to vectorize the summation
coefficients = np.exp(
2 * ln_f[estimator_order] - ln_f - ln_f[::-1] - 0.5 * ln_factorial(2 * estimator_order)
) * (-1) ** np.arange(estimator_order + 1)
coefficients -= np.mean(coefficients) # Remove any bias introduced by floating-point error
sample_stdev = np.convolve(data, coefficients[::-1], 'valid')
return np.mean(sample_stdev ** 2) ** 0.5
def bootstrap_fits(
x: np.ndarray,
y: np.ndarray,
x_noise_stdev: Union[None, float] = 0.,
y_noise_stdev: Union[None, float] = None,
n_bootstraps: int = 2000,
fit_points: Union[int, Iterable[float], None] = 300,
spline_degree: int = 3,
normalize: bool = None,
) -> Union[Tuple[np.ndarray, np.ndarray], List[Spline]]:
"""
Bootstraps a time-series dataset and fits splines to each bootstrap resample.
Args:
x: The independent variable (e.g., time) of the dataset. A 1D NumPy array.
y: The dependent variable (e.g., altitude) of the dataset. A 1D NumPy array.
n_bootstraps: The number of bootstrap resamples to create.
fit_points: An optional variable that determines what to do with the splines after they are fit:
- If an integer, the splines will be evaluated at a linearly-spaced vector of points between the minimum
and maximum x-values of the dataset, with the number of points equal to `fit_points`. This is the default.
- If an iterable of floats (e.g. a 1D NumPy array), the splines will be evaluated at those points.
- If None, the splines won't be evaluated, and instead the splines are returned directly.
spline_degree: The degree of the splines to fit.
normalize: Whether or not to normalize the data before fitting. If True, the data will be normalized to
the range [0, 1] before fitting, and the splines will be un-normalized before being returned. If False,
the data will not be normalized before fitting.
- If None (the default), the data will be normalized if and only if `fit_points` is not None.
Returns: One of the following, depending on the value of `fit_points`:
- If `fit_points` is an integer or array, then this function returns a tuple of NumPy arrays:
- `x_fit`: A 1D NumPy array with the x-values at which the splines were evaluated.
- `y_bootstrap_fits`: A 2D NumPy array of shape (n_bootstraps, len(x_fit)) with the y-values of the
splines evaluated at each bootstrap resample and at each x-value.
- If `fit_points` is None, then this function returns a list of `n_bootstraps` splines, each of which is a
`NaturalUnivariateSpline`, which is a subclass of `scipy.interpolate.UnivariateSpline` with more sensible
extrapolation.
"""
### Set defaults
if normalize is None:
normalize = fit_points is not None
### Discard any NaN points
isnan = np.logical_or(
np.isnan(x),
np.isnan(y),
)
x = x[~isnan]
y = y[~isnan]
### Compute the standard deviation of the noise
if x_noise_stdev is None:
x_noise_stdev = estimate_noise_standard_deviation(x)
print(f"Estimated x-component of noise standard deviation: {x_noise_stdev}")
if y_noise_stdev is None:
y_noise_stdev = estimate_noise_standard_deviation(y)
print(f"Estimated y-component of noise standard deviation: {y_noise_stdev}")
### Sort the data by x-value
sort_indices = np.argsort(x)
x = x[sort_indices]
y = y[sort_indices]
### Prepare for normalization
x_min = np.min(x)
x_max = np.max(x)
x_rng = x_max - x_min
y_min = np.min(y)
y_max = np.max(y)
y_rng = y_max - y_min
if normalize:
x_normalize = lambda x: (x - x_min) / x_rng
y_normalize = lambda y: (y - y_min) / y_rng
# x_unnormalize = lambda x_n: x_n * x_rng + x_min
y_unnormalize = lambda y_n: y_n * y_rng + y_min
x_stdev_normalized = x_noise_stdev / x_rng
y_stdev_normalized = y_noise_stdev / y_rng
else:
x_normalize = lambda x: x
y_normalize = lambda y: y
# x_unnormalize = lambda x_n: x_n
y_unnormalize = lambda y_n: y_n
x_stdev_normalized = x_noise_stdev
y_stdev_normalized = y_noise_stdev
with tqdm(total=n_bootstraps, desc="Bootstrapping", unit=" samples") as progress_bar:
splines = []
n_valid_splines = 0
n_attempted_splines = 0
while n_valid_splines < n_bootstraps:
n_attempted_splines += 1
### Obtain a bootstrap resample
indices = np.random.choice(len(x), size=len(x), replace=True)
x_sample = x[indices] + np.random.normal(scale=x_noise_stdev, size=len(x))
y_sample = y[indices]
order = np.argsort(x_sample)
x_sample = x_sample[order]
y_sample = y_sample[order]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
spline = Spline(
x=x_normalize(x_sample),
y=y_normalize(y_sample),
w=np.ones_like(x) / y_stdev_normalized,
s=len(x),
k=spline_degree,
)
if not np.isnan(spline(x_normalize((x_min + x_max) / 2))):
n_valid_splines += 1
progress_bar.update(1)
splines.append(spline)
else:
continue
if fit_points is None:
return splines
else:
### Determine which x-points to resample at
if fit_points is None:
x_fit = None
if normalize:
raise ValueError("If `fit_points` is None, `normalize` must be False.")
elif isinstance(fit_points, int):
x_fit = np.linspace(
np.min(x),
np.max(x),
fit_points
)
else:
x_fit = np.array(fit_points)
### Evaluate the splines at the x-points
y_bootstrap_fits = np.array([
y_unnormalize(spline(x_normalize(x_fit)))
for spline in splines
])
### Throw an error if all of the splines are NaN
if np.all(np.isnan(y_bootstrap_fits)):
raise ValueError("All of the splines are NaN. This is likely due to a poor choice of `spline_degree`.")
return x_fit, y_bootstrap_fits
if __name__ == '__main__':
# np.random.seed(1)
# N = 1000
# f_sample_over_f_signal = 1000
#
# t = np.arange(N)
# y = np.sin(2 * np.pi / f_sample_over_f_signal * t) + 0.1 * np.random.randn(len(t))
#
# print(estimate_noise_standard_deviation(y))
d = dict(np.load("raw_data.npz"))
x = d["airspeed"]
y = d["voltage"] * d["current"]
# estimate_noise_standard_deviation(x)
#
# x_fit, y_bootstrap_fits = bootstrap_fits(
# x, y,
# x_stdev=None,
# y_stdev=None,
# n_bootstraps=20,
# spline_degree=5,
# )
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
fig, ax = plt.subplots(figsize=(7, 4))
p.plot_with_bootstrapped_uncertainty(
x, y,
x_stdev=None,
y_stdev=estimate_noise_standard_deviation(y[np.argsort(x)]),
ci=[0.75, 0.95],
color="coral",
n_bootstraps=100,
n_fit_points=200,
# ci_to_alpha_mapping=lambda ci: 0.4,
normalize=False,
spline_degree=3,
)
plt.xlim(x.min(), x.max())
plt.ylim(-10, 800)
p.set_ticks(1, 0.25, 100, 25)
plt.legend(
loc="lower right"
)
p.show_plot(
xlabel="Cruise Airspeed [m/s]",
ylabel="Electrical Power Required [W]",
title="Raw Data",
legend=False,
dpi=300
)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/tools/statistics/time_series_uncertainty_quantification.py
|
time_series_uncertainty_quantification.py
|
import aerosandbox.numpy as np
import signal
from contextlib import contextmanager
import sys
@contextmanager
def time_limit(seconds):
"""
Allows you to run a block of code with a timeout. This way, you can sweep through points to make a carpet plot
without getting stuck on a particular point that may not terminate in a reasonable amount of time.
Only runs on Linux!
Usage:
Attempt to set x equal to the value of a complicated function. If it takes longer than 5 seconds, skip it.
>>> try:
>>> with time_limit(5):
>>> x = complicated_function()
>>> except TimeoutException:
>>> x = np.nan
Args:
seconds: Duration of timeout [seconds]
Returns:
"""
def signal_handler(signum, frame):
raise TimeoutError()
try:
signal.signal(signal.SIGALRM, signal_handler)
except AttributeError:
raise OSError("signal.SIGALRM could not be found. This is probably because you're not using Linux.")
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
def remove_nans(array):
"""
Removes NaN values in a 1D array.
Args:
array: a 1D array of data.
Returns: The array with all NaN values stripped.
"""
return array[~np.isnan(array)]
def patch_nans(array): # TODO remove modification on incoming values; only patch nans
"""
Patches NaN values in a 2D array. Can patch holes or entire regions. Uses Laplacian smoothing.
:param array:
:return:
"""
original_nans = np.isnan(array)
nanfrac = lambda array: np.sum(np.isnan(array)) / len(array.flatten())
def item(i, j):
if i < 0 or j < 0: # don't allow wrapping other than what's controlled here
return np.nan
try:
return array[i, j % array.shape[1]] # allow wrapping around day of year
except IndexError:
return np.nan
print_title = lambda name: print(f"{name}\nIter | NaN Fraction")
print_progress = lambda iter: print(f"{iter:4} | {nanfrac(array):.6f}")
# Bridging
print_title("Bridging")
print_progress(0)
iter = 1
last_nanfrac = nanfrac(array)
making_progress = True
while making_progress:
for i in range(array.shape[0]):
for j in range(array.shape[1]):
if not np.isnan(array[i, j]):
continue
pairs = [
[item(i, j - 1), item(i, j + 1)],
[item(i - 1, j), item(i + 1, j)],
[item(i - 1, j + 1), item(i + 1, j - 1)],
[item(i - 1, j - 1), item(i + 1, j + 1)],
]
for pair in pairs:
a = pair[0]
b = pair[1]
if not (np.isnan(a) or np.isnan(b)):
array[i, j] = (a + b) / 2
continue
print_progress(iter)
making_progress = nanfrac(array) != last_nanfrac
last_nanfrac = nanfrac(array)
iter += 1
# Spreading
for neighbors_to_spread in [4, 3, 2, 1]:
print_title(f"Spreading with {neighbors_to_spread} neighbors")
print_progress(0)
iter = 1
last_nanfrac = nanfrac(array)
making_progress = True
while making_progress:
for i in range(array.shape[0]):
for j in range(array.shape[1]):
if not np.isnan(array[i, j]):
continue
neighbors = np.array([
item(i, j - 1), item(i, j + 1),
item(i - 1, j), item(i + 1, j),
item(i - 1, j + 1), item(i + 1, j - 1),
item(i - 1, j - 1), item(i + 1, j + 1),
])
valid_neighbors = neighbors[np.logical_not(np.isnan(neighbors))]
if len(valid_neighbors) > neighbors_to_spread:
array[i, j] = np.mean(valid_neighbors)
print_progress(iter)
making_progress = nanfrac(array) != last_nanfrac
last_nanfrac = nanfrac(array)
iter += 1
if last_nanfrac == 0:
break
assert last_nanfrac == 0, "Could not patch all NaNs!"
# Diffusing
print_title("Diffusing") # TODO Perhaps use skimage gaussian blur kernel or similar instead of "+" stencil?
for iter in range(50):
print(f"{iter + 1:4}")
for i in range(array.shape[0]):
for j in range(array.shape[1]):
if original_nans[i, j]:
neighbors = np.array([
item(i, j - 1),
item(i, j + 1),
item(i - 1, j),
item(i + 1, j),
])
valid_neighbors = neighbors[np.logical_not(np.isnan(neighbors))]
array[i, j] = np.mean(valid_neighbors)
return array
if __name__ == '__main__':
import time
import numpy as np
from numpy import linalg
def complicated_function():
print("Starting...")
n = 10000
linalg.solve(np.random.randn(n, n), np.random.randn(n))
print("Finished")
return True
try:
with time_limit(1):
complicated_function()
except TimeoutError:
print("Timed out.")
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/visualization/carpet_plot_utils.py
|
carpet_plot_utils.py
|
import plotly.graph_objects as go
import aerosandbox.numpy as np
def reflect_over_XZ_plane(input_vector):
"""
Takes in a vector or an array and flips the y-coordinates.
:param input_vector: A vector or list of vectors to flip.
:return: Vector with flipped sign on y-coordinate.
"""
shape = input_vector.shape
if len(shape) == 1:
return input_vector * np.array([1, -1, 1])
elif len(shape) == 2:
if not shape[1] == 3:
raise ValueError("The function expected either a 3-element vector or a Nx3 array!")
return input_vector * np.array([1, -1, 1])
else:
raise ValueError("The function expected either a 3-element vector or a Nx3 array!")
class Figure3D:
def __init__(self):
self.fig = go.Figure()
# Vertices of the faces
self.x_face = []
self.y_face = []
self.z_face = []
# Connectivity and color of the faces
self.i_face = []
self.j_face = []
self.k_face = []
self.intensity_face = []
# Vertices of the lines
self.x_line = []
self.y_line = []
self.z_line = []
# Vertices of the streamlines
self.x_streamline = []
self.y_streamline = []
self.z_streamline = []
def add_line(self,
points,
mirror=False,
):
"""
Adds a line (or series of lines) to draw.
:param points: an iterable with an arbitrary number of items. Each item is a 3D point, represented as an iterable of length 3.
:param mirror: Should we also draw a version that's mirrored over the XZ plane? [boolean]
:return: None
E.g. add_line([(0, 0, 0), (1, 0, 0)])
"""
for p in points:
self.x_line.append(float(p[0]))
self.y_line.append(float(p[1]))
self.z_line.append(float(p[2]))
self.x_line.append(None)
self.y_line.append(None)
self.z_line.append(None)
if mirror:
reflected_points = [reflect_over_XZ_plane(point) for point in points]
self.add_line(
points=reflected_points,
mirror=False
)
def add_streamline(self,
points,
mirror=False,
):
"""
Adds a line (or series of lines) to draw.
:param points: an iterable with an arbitrary number of items. Each item is a 3D point, represented as an iterable of length 3.
:param mirror: Should we also draw a version that's mirrored over the XZ plane? [boolean]
:return: None
E.g. add_line([(0, 0, 0), (1, 0, 0)])
"""
for p in points:
self.x_streamline.append(float(p[0]))
self.y_streamline.append(float(p[1]))
self.z_streamline.append(float(p[2]))
self.x_streamline.append(None)
self.y_streamline.append(None)
self.z_streamline.append(None)
if mirror:
reflected_points = [reflect_over_XZ_plane(point) for point in points]
self.add_streamline(
points=reflected_points,
mirror=False
)
def add_tri(self,
points,
intensity=0,
outline=False,
mirror=False,
):
"""
Adds a triangular face to draw.
:param points: an iterable with 3 items. Each item is a 3D point, represented as an iterable of length 3.
:param intensity: Intensity associated with this face
:param outline: Do you want to outline this triangle? [boolean]
:param mirror: Should we also draw a version that's mirrored over the XZ plane? [boolean]
:return: None
E.g. add_face([(0, 0, 0), (1, 0, 0), (0, 1, 0)])
"""
if not len(points) == 3:
raise ValueError("'points' must have exactly 3 items!")
for p in points:
self.x_face.append(float(p[0]))
self.y_face.append(float(p[1]))
self.z_face.append(float(p[2]))
self.intensity_face.append(intensity)
indices_added = np.arange(len(self.x_face) - 3, len(self.x_face))
self.i_face.append(indices_added[0])
self.j_face.append(indices_added[1])
self.k_face.append(indices_added[2])
if outline:
self.add_line(list(points) + [points[0]])
if mirror:
reflected_points = [reflect_over_XZ_plane(point) for point in points]
self.add_tri(
points=reflected_points,
intensity=intensity,
outline=outline,
mirror=False
)
def add_quad(self,
points,
intensity=0,
outline=True,
mirror=False,
):
"""
Adds a quadrilateral face to draw. All points should be (approximately) coplanar if you want it to look right.
:param points: an iterable with 4 items. Each item is a 3D point, represented as an iterable of length 3. Points should be given in sequential order.
:param intensity: Intensity associated with this face
:param outline: Do you want to outline this quad? [boolean]
:param mirror: Should we also draw a version that's mirrored over the XZ plane? [boolean]
:return: None
E.g. add_face([(0, 0, 0), (1, 0, 0), (0, 1, 0)])
"""
if not len(points) == 4:
raise ValueError("'points' must have exactly 4 items!")
for p in points:
self.x_face.append(float(p[0]))
self.y_face.append(float(p[1]))
self.z_face.append(float(p[2]))
self.intensity_face.append(intensity)
indices_added = np.arange(len(self.x_face) - 4, len(self.x_face))
self.i_face.append(indices_added[0])
self.j_face.append(indices_added[1])
self.k_face.append(indices_added[2])
self.i_face.append(indices_added[0])
self.j_face.append(indices_added[2])
self.k_face.append(indices_added[3])
if outline:
self.add_line(list(points) + [points[0]])
if mirror:
reflected_points = [reflect_over_XZ_plane(point) for point in points]
self.add_quad(
points=reflected_points,
intensity=intensity,
outline=outline,
mirror=False
)
def draw(self,
show=True,
title="",
colorbar_title="",
colorscale="viridis",
):
# Draw faces
self.fig.add_trace(
go.Mesh3d(
x=self.x_face,
y=self.y_face,
z=self.z_face,
i=self.i_face,
j=self.j_face,
k=self.k_face,
flatshading=False,
intensity=self.intensity_face,
colorbar=dict(title=colorbar_title),
colorscale=colorscale,
showscale=colorbar_title is not None
),
)
# Draw lines
self.fig.add_trace(
go.Scatter3d(
x=self.x_line,
y=self.y_line,
z=self.z_line,
mode='lines',
name='',
line=dict(color='rgb(0,0,0)', width=3),
showlegend=False,
)
)
# Draw streamlines
self.fig.add_trace(
go.Scatter3d(
x=self.x_streamline,
y=self.y_streamline,
z=self.z_streamline,
mode='lines',
name='',
line=dict(color='rgba(119,0,255,200)', width=1),
showlegend=False,
)
)
self.fig.update_layout(
title=title,
scene=dict(aspectmode='data'),
)
if show:
self.fig.show()
return self.fig
if __name__ == '__main__':
fig = Figure3D()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/visualization/plotly_Figure3D.py
|
plotly_Figure3D.py
|
from aerosandbox import AeroSandboxObject
from aerosandbox.geometry.common import *
from typing import List, Dict, Any, Union, Optional, Tuple
import aerosandbox.geometry.mesh_utilities as mesh_utils
from aerosandbox.geometry.wing import Wing
from aerosandbox.geometry.fuselage import Fuselage
from aerosandbox.geometry.propulsor import Propulsor
from aerosandbox.weights.mass_properties import MassProperties
import copy
class Airplane(AeroSandboxObject):
"""
Definition for an airplane.
Anatomy of an Airplane:
An Airplane consists chiefly of a collection of wings and fuselages. These can be accessed with
`Airplane.wings` and `Airplane.fuselages`, which gives a list of those respective components. Each wing is a
Wing object, and each fuselage is a Fuselage object.
"""
def __init__(self,
name: Optional[str] = None,
xyz_ref: Union[np.ndarray, List] = None,
wings: Optional[List[Wing]] = None,
fuselages: Optional[List[Fuselage]] = None,
propulsors: Optional[List[Propulsor]] = None,
s_ref: Optional[float] = None,
c_ref: Optional[float] = None,
b_ref: Optional[float] = None,
analysis_specific_options: Optional[Dict[type, Dict[str, Any]]] = None
):
"""
Defines a new airplane.
Args:
name: Name of the airplane [optional]. It can help when debugging to give the airplane a sensible name.
xyz_ref: An array-like that gives the x-, y-, and z- reference point of the airplane, used when computing
moments and stability derivatives. Generally, this should be the center of gravity.
# In a future version, this will be deprecated and replaced with asb.MassProperties.
wings: A list of Wing objects that are a part of the airplane.
fuselages: A list of Fuselage objects that are a part of the airplane.
propulsors: A list of Propulsor objects that are a part of the airplane.
s_ref: Reference area. If undefined, it's set from the area of the first Wing object. # Note: will be deprecated
c_ref: Reference chord. If undefined, it's set from the mean aerodynamic chord of the first Wing object. # Note: will be deprecated
b_ref: Reference span. If undefined, it's set from the span of the first Wing object. # Note: will be deprecated
analysis_specific_options: Analysis-specific options are additional constants or modeling assumptions
that should be passed on to specific analyses and associated with this specific geometry object.
This should be a dictionary where:
* Keys are specific analysis types (typically a subclass of asb.ExplicitAnalysis or
asb.ImplicitAnalysis), but if you decide to write your own analysis and want to make this key
something else (like a string), that's totally fine - it's just a unique identifier for the
specific analysis you're running.
* Values are a dictionary of key:value pairs, where:
* Keys are strings.
* Values are some value you want to assign.
This is more easily demonstrated / understood with an example:
>>> analysis_specific_options = {
>>> asb.AeroBuildup: dict(
>>> include_wave_drag=True,
>>> )
>>> }
"""
### Set defaults
if name is None:
name = "Untitled"
if xyz_ref is None:
xyz_ref = np.array([0., 0., 0.])
if wings is None:
wings: List[Wing] = []
if fuselages is None:
fuselages: List[Fuselage] = []
if propulsors is None:
propulsors: List[Propulsor] = []
if analysis_specific_options is None:
analysis_specific_options = {}
### Initialize
self.name = name
self.xyz_ref = np.array(xyz_ref)
self.wings = wings
self.fuselages = fuselages
self.propulsors = propulsors
self.analysis_specific_options = analysis_specific_options
### Assign reference values
try:
main_wing = self.wings[0]
except IndexError:
main_wing = None
try:
main_fuse = self.fuselages[0]
except IndexError:
main_fuse = None
if s_ref is not None:
self.s_ref = s_ref
else:
if main_wing is not None:
self.s_ref = main_wing.area()
else:
if main_fuse is not None:
self.s_ref = main_fuse.area_projected()
else:
raise ValueError(
"`s_ref` was not provided, and a value cannot be inferred automatically from wings or fuselages.\n"
"You must set this manually when instantiating your asb.Airplane object.")
if c_ref is not None:
self.c_ref = c_ref
else:
if main_wing is not None:
self.c_ref = main_wing.mean_aerodynamic_chord()
else:
if main_fuse is not None:
self.c_ref = main_fuse.length()
else:
raise ValueError(
"`c_ref` was not provided, and a value cannot be inferred automatically from wings or fuselages.\n"
"You must set this manually when instantiating your asb.Airplane object."
)
if b_ref is not None:
self.b_ref = b_ref
else:
if main_wing is not None:
self.b_ref = main_wing.span(include_centerline_distance=True)
else:
if main_fuse is not None:
self.b_ref = main_fuse.area_projected() / main_fuse.length()
else:
raise ValueError(
"`b_ref` was not provided, and a value cannot be inferred automatically from wings or fuselages.\n"
"You must set this manually when instantiating your asb.Airplane object."
)
def __repr__(self):
n_wings = len(self.wings)
n_fuselages = len(self.fuselages)
return f"Airplane '{self.name}' " \
f"({n_wings} {'wing' if n_wings == 1 else 'wings'}, " \
f"{n_fuselages} {'fuselage' if n_fuselages == 1 else 'fuselages'})"
# TODO def add_wing(wing: 'Wing') -> None
def mesh_body(self,
method="quad",
thin_wings=False,
stack_meshes=True,
):
"""
Returns a surface mesh of the Airplane, in (points, faces) format. For reference on this format,
see the documentation in `aerosandbox.geometry.mesh_utilities`.
Args:
method:
thin_wings: Controls whether wings should be meshed as thin surfaces, rather than full 3D bodies.
stack_meshes: Controls whether the meshes should be merged into a single mesh or not.
* If True, returns a (points, faces) tuple in standard mesh format.
* If False, returns a list of (points, faces) tuples in standard mesh format.
Returns:
"""
if thin_wings:
wing_meshes = [
wing.mesh_thin_surface(
method=method,
)
for wing in self.wings
]
else:
wing_meshes = [
wing.mesh_body(
method=method,
)
for wing in self.wings
]
fuse_meshes = [
fuse.mesh_body(
method=method
)
for fuse in self.fuselages
]
meshes = wing_meshes + fuse_meshes
if stack_meshes:
points, faces = mesh_utils.stack_meshes(*meshes)
return points, faces
else:
return meshes
def draw(self,
backend: str = "pyvista",
thin_wings: bool = False,
ax=None,
use_preset_view_angle: str = None,
set_background_pane_color: Union[str, Tuple[float, float, float]] = None,
set_background_pane_alpha: float = None,
set_lims: bool = True,
set_equal: bool = True,
set_axis_visibility: bool = None,
show: bool = True,
show_kwargs: Dict = None,
):
"""
Produces an interactive 3D visualization of the airplane.
Args:
backend: The visualization backend to use. Options are:
* "matplotlib" for a Matplotlib backend
* "pyvista" for a PyVista backend
* "plotly" for a Plot.ly backend
* "trimesh" for a trimesh backend
thin_wings: A boolean that determines whether to draw the full airplane (i.e. thickened, 3D bodies), or to use a
thin-surface representation for any Wing objects.
show: A boolean that determines whether to display the object after plotting it. If False, the object is
returned but not displayed. If True, the object is displayed and returned.
Returns: The plotted object, in its associated backend format. Also displays the object if `show` is True.
"""
if show_kwargs is None:
show_kwargs = {}
points, faces = self.mesh_body(method="quad", thin_wings=thin_wings)
if backend == "matplotlib":
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
if ax is None:
_, ax = p.figure3d(figsize=(8, 8), computed_zorder=False)
else:
if not p.ax_is_3d(ax):
raise ValueError("`ax` must be a 3D axis.")
plt.sca(ax)
### Set the view angle
if use_preset_view_angle is not None:
p.set_preset_3d_view_angle(use_preset_view_angle)
### Set the background pane color
if set_background_pane_color is not None:
ax.xaxis.pane.set_facecolor(set_background_pane_color)
ax.yaxis.pane.set_facecolor(set_background_pane_color)
ax.zaxis.pane.set_facecolor(set_background_pane_color)
### Set the background pane alpha
if set_background_pane_alpha is not None:
ax.xaxis.pane.set_alpha(set_background_pane_alpha)
ax.yaxis.pane.set_alpha(set_background_pane_alpha)
ax.zaxis.pane.set_alpha(set_background_pane_alpha)
ax.add_collection(
Poly3DCollection(
points[faces], facecolors='lightgray', edgecolors=(0, 0, 0, 0.1),
linewidths=0.5, alpha=0.8, shade=True,
),
)
for prop in self.propulsors:
### Disk
if prop.length == 0:
ax.add_collection(
Poly3DCollection(
np.stack([np.stack(
prop.get_disk_3D_coordinates(),
axis=1
)], axis=0),
facecolors='darkgray', edgecolors=(0, 0, 0, 0.2),
linewidths=0.5, alpha=0.35, shade=True, zorder=4,
)
)
if set_lims:
ax.set_xlim(points[:, 0].min(), points[:, 0].max())
ax.set_ylim(points[:, 1].min(), points[:, 1].max())
ax.set_zlim(points[:, 2].min(), points[:, 2].max())
if set_equal:
p.equal()
if set_axis_visibility is not None:
if set_axis_visibility:
ax.set_axis_on()
else:
ax.set_axis_off()
if show:
p.show_plot()
elif backend == "plotly":
from aerosandbox.visualization.plotly_Figure3D import Figure3D
fig = Figure3D()
for f in faces:
fig.add_quad((
points[f[0]],
points[f[1]],
points[f[2]],
points[f[3]],
), outline=True)
show_kwargs = {
"show": show,
**show_kwargs
}
return fig.draw(**show_kwargs)
elif backend == "pyvista":
import pyvista as pv
fig = pv.PolyData(
*mesh_utils.convert_mesh_to_polydata_format(points, faces)
)
show_kwargs = {
"show_edges": True,
"show_grid" : True,
**show_kwargs,
}
if show:
fig.plot(**show_kwargs)
return fig
elif backend == "trimesh":
import trimesh as tri
fig = tri.Trimesh(points, faces)
if show:
fig.show(**show_kwargs)
return fig
else:
raise ValueError("Bad value of `backend`!")
def draw_wireframe(self,
ax=None,
color="k",
thin_linewidth=0.2,
thick_linewidth=0.5,
fuselage_longeron_theta=None,
use_preset_view_angle: str = None,
set_background_pane_color: Union[str, Tuple[float, float, float]] = None,
set_background_pane_alpha: float = None,
set_lims: bool = True,
set_equal: bool = True,
set_axis_visibility: bool = None,
show: bool = True,
):
"""
Draws a wireframe of the airplane on a Matplotlib 3D axis.
Args:
ax: The axis to draw on. Must be a 3D axis. If None, creates a new axis.
color: The color of the wireframe.
thin_linewidth: The linewidth of the thin lines.
"""
### Set defaults
if fuselage_longeron_theta is None:
fuselage_longeron_theta = np.linspace(0, 2 * np.pi, 8 + 1)[:-1]
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
if ax is None:
_, ax = p.figure3d(figsize=(8, 8), computed_zorder=False)
else:
if not p.ax_is_3d(ax):
raise ValueError("`ax` must be a 3D axis.")
plt.sca(ax)
### Set the view angle
if use_preset_view_angle is not None:
p.set_preset_3d_view_angle(use_preset_view_angle)
### Set the background pane color
if set_background_pane_color is not None:
ax.xaxis.pane.set_facecolor(set_background_pane_color)
ax.yaxis.pane.set_facecolor(set_background_pane_color)
ax.zaxis.pane.set_facecolor(set_background_pane_color)
### Set the background pane alpha
if set_background_pane_alpha is not None:
ax.xaxis.pane.set_alpha(set_background_pane_alpha)
ax.yaxis.pane.set_alpha(set_background_pane_alpha)
ax.zaxis.pane.set_alpha(set_background_pane_alpha)
def plot_line(
xyz: np.ndarray,
symmetric: bool = False,
color=color,
linewidth=0.4,
**kwargs
):
if symmetric:
xyz = np.concatenate([
xyz,
np.array([[np.nan] * 3]),
xyz * np.array([[1, -1, 1]])
], axis=0)
ax.plot(
xyz[:, 0],
xyz[:, 1],
xyz[:, 2],
color=color,
linewidth=linewidth,
**kwargs
)
def reshape(x):
return np.array(x).reshape((1, 3))
##### Wings
for wing in self.wings:
try:
if wing.color is not None:
color_to_use = wing.color
else:
color_to_use = color
except AttributeError:
color_to_use = color
### LE and TE lines
for xy in [
(0, 0), # Leading Edge
(1, 0), # Trailing Edge
]:
plot_line(
np.stack(wing.mesh_line(x_nondim=xy[0], z_nondim=xy[1]), axis=0),
symmetric=wing.symmetric,
linewidth=thick_linewidth,
color=color_to_use
)
### Top and Bottom lines
x = 0.4
afs = [xsec.airfoil for xsec in wing.xsecs]
thicknesses = np.array([af.local_thickness(x_over_c=x) for af in afs])
plot_line(
np.stack(wing.mesh_line(x_nondim=x, z_nondim=thicknesses / 2, add_camber=True), axis=0),
symmetric=wing.symmetric,
linewidth=thin_linewidth,
color=color_to_use
)
plot_line(
np.stack(wing.mesh_line(x_nondim=x, z_nondim=-thicknesses / 2, add_camber=True), axis=0),
symmetric=wing.symmetric,
linewidth=thin_linewidth,
color=color_to_use
)
### Airfoils
for i, xsec in enumerate(wing.xsecs):
xg_local, yg_local, zg_local = wing._compute_frame_of_WingXSec(i)
xg_local = reshape(xg_local)
yg_local = reshape(yg_local)
zg_local = reshape(zg_local)
origin = reshape(xsec.xyz_le)
scale = xsec.chord
line_upper = origin + (
xsec.airfoil.upper_coordinates()[:, 0].reshape((-1, 1)) * scale * xg_local +
xsec.airfoil.upper_coordinates()[:, 1].reshape((-1, 1)) * scale * zg_local
)
line_lower = origin + (
xsec.airfoil.lower_coordinates()[:, 0].reshape((-1, 1)) * scale * xg_local +
xsec.airfoil.lower_coordinates()[:, 1].reshape((-1, 1)) * scale * zg_local
)
for line in [line_upper, line_lower]:
plot_line(
line,
symmetric=wing.symmetric,
linewidth=thick_linewidth if i == 0 or i == len(wing.xsecs) - 1 else thin_linewidth,
color=color_to_use
)
##### Fuselages
for fuse in self.fuselages:
try:
if fuse.color is not None:
color_to_use = fuse.color
else:
color_to_use = color
except AttributeError:
color_to_use = color
### Bulkheads
perimeters_xyz = [
xsec.get_3D_coordinates(theta=np.linspace(0, 2 * np.pi, 121))
for xsec in fuse.xsecs
]
for i, perim in enumerate(perimeters_xyz):
plot_line(
np.stack(perim, axis=1),
linewidth=thick_linewidth if i == 0 or i == len(fuse.xsecs) - 1 else thin_linewidth,
color=color_to_use
)
### Centerline
plot_line(
np.stack(
fuse.mesh_line(y_nondim=0, z_nondim=0),
axis=0,
),
linewidth=thin_linewidth,
color=color_to_use
)
### Longerons
for theta in fuselage_longeron_theta:
plot_line(
np.stack([
np.array(xsec.get_3D_coordinates(theta=theta))
for xsec in fuse.xsecs
], axis=0),
linewidth=thick_linewidth,
color=color_to_use
)
##### Propulsors
for prop in self.propulsors:
try:
if prop.color is not None:
color_to_use = prop.color
else:
color_to_use = color
except AttributeError:
color_to_use = color
### Disk
if prop.length == 0:
plot_line(
np.stack(
prop.get_disk_3D_coordinates(),
axis=1
),
color=color_to_use
)
if set_lims:
points, _ = self.mesh_body()
ax.set_xlim(points[:, 0].min(), points[:, 0].max())
ax.set_ylim(points[:, 1].min(), points[:, 1].max())
ax.set_zlim(points[:, 2].min(), points[:, 2].max())
if set_equal:
p.equal()
if set_axis_visibility is not None:
if set_axis_visibility:
ax.set_axis_on()
else:
ax.set_axis_off()
if show:
p.show_plot()
def draw_three_view(self,
style: str = "shaded",
show: bool = True,
):
"""
Draws a standard 4-panel three-view diagram of the airplane using Matplotlib backend. Creates a new figure.
Args:
style: Determines what drawing style to use for the three-view. A string, one of:
* "shaded"
* "wireframe"
show: A boolean of whether to show the figure after creating it, or to hold it so that the user can modify the figure further before showing.
Returns:
"""
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
preset_view_angles = np.array([
["XZ", "-YZ"],
["XY", "left_isometric"]
], dtype="O")
fig, axs = p.figure3d(
nrows=preset_view_angles.shape[0],
ncols=preset_view_angles.shape[1],
figsize=(8, 8),
computed_zorder=False,
)
for i in range(axs.shape[0]):
for j in range(axs.shape[1]):
ax = axs[i, j]
preset_view = preset_view_angles[i, j]
if style == "shaded":
self.draw(
backend="matplotlib",
ax=ax,
set_axis_visibility=False if 'isometric' in preset_view else None,
show=False
)
elif style == "wireframe":
if preset_view == "XZ":
fuselage_longeron_theta = [np.pi / 2, 3 * np.pi / 2]
elif preset_view == "XY":
fuselage_longeron_theta = [0, np.pi]
else:
fuselage_longeron_theta = None
self.draw_wireframe(
ax=ax,
set_axis_visibility=False if 'isometric' in preset_view else None,
fuselage_longeron_theta=fuselage_longeron_theta,
show=False
)
p.set_preset_3d_view_angle(preset_view)
xres = np.diff(ax.get_xticks())[0]
yres = np.diff(ax.get_yticks())[0]
zres = np.diff(ax.get_zticks())[0]
p.set_ticks(
xres, xres / 4,
yres, yres / 4,
zres, zres / 4,
)
ax.xaxis.set_tick_params(color='white', which='minor')
ax.yaxis.set_tick_params(color='white', which='minor')
ax.zaxis.set_tick_params(color='white', which='minor')
if preset_view == 'XY' or preset_view == '-XY':
ax.set_zticks([])
if preset_view == 'XZ' or preset_view == '-XZ':
ax.set_yticks([])
if preset_view == 'YZ' or preset_view == '-YZ':
ax.set_xticks([])
axs[1, 0].set_xlabel("$x_g$ [m]")
axs[1, 0].set_ylabel("$y_g$ [m]")
axs[0, 0].set_zlabel("$z_g$ [m]")
axs[0, 0].set_xticklabels([])
axs[0, 1].set_yticklabels([])
axs[0, 1].set_zticklabels([])
plt.subplots_adjust(
left=-0.08,
right=1.08,
bottom=-0.08,
top=1.08,
wspace=-0.38,
hspace=-0.38,
)
if show:
p.show_plot(
tight_layout=False,
)
def is_entirely_symmetric(self):
"""
Returns a boolean describing whether the airplane is geometrically entirely symmetric across the XZ-plane.
:return: [boolean]
"""
for wing in self.wings:
if not wing.is_entirely_symmetric():
return False
# TODO add in logic for fuselages
return True
def aerodynamic_center(self, chord_fraction: float = 0.25):
"""
Computes the approximate location of the aerodynamic center of the wing.
Uses the generalized methodology described here:
https://core.ac.uk/download/pdf/79175663.pdf
Args:
chord_fraction: The position of the aerodynamic center along the MAC, as a fraction of MAC length.
Typically, this value (denoted `h_0` in the literature) is 0.25 for a subsonic wing. However,
wing-fuselage interactions can cause a forward shift to a value more like 0.1 or less. Citing Cook,
Michael V., "Flight Dynamics Principles", 3rd Ed., Sect. 3.5.3 "Controls-fixed static stability". PDF:
https://www.sciencedirect.com/science/article/pii/B9780080982427000031
Returns: The (x, y, z) coordinates of the aerodynamic center of the airplane.
"""
wing_areas = [wing.area(type="projected") for wing in self.wings]
ACs = [wing.aerodynamic_center() for wing in self.wings]
wing_AC_area_products = [
AC * area
for AC, area in zip(
ACs,
wing_areas
)
]
aerodynamic_center = sum(wing_AC_area_products) / sum(wing_areas)
return aerodynamic_center
def with_control_deflections(self,
control_surface_deflection_mappings: Dict[str, float]
) -> "Airplane":
"""
Returns a copy of the airplane with the specified control surface deflections applied.
Args:
control_surface_deflection_mappings: A dictionary mapping control surface names to deflections.
* Keys: Control surface names.
* Values: Deflections, in degrees. Downwards-positive, following typical convention.
Returns: A copy of the airplane with the specified control surface deflections applied.
"""
deflected_airplane = copy.deepcopy(self)
for name, deflection in control_surface_deflection_mappings.items():
for wi, wing in enumerate(deflected_airplane.wings):
for xi, xsec in enumerate(wing.xsecs):
for csi, surf in enumerate(xsec.control_surfaces):
if surf.name == name:
surf.deflection = deflection
return deflected_airplane
def generate_cadquery_geometry(self,
minimum_airfoil_TE_thickness: float = 0.001,
fuselage_tol: float = 1e-4,
) -> "Workplane":
"""
Uses the CADQuery library (OpenCASCADE backend) to generate a 3D CAD model of the airplane.
Args:
minimum_airfoil_TE_thickness: The minimum thickness of the trailing edge of the airfoils, as a fraction
of each airfoil's chord. This will be enforced by thickening the trailing edge of the airfoils if
necessary. This is useful for avoiding numerical issues in CAD software that can arise from extremely
thin (i.e., <1e-6 meters) trailing edges.
tol: The geometric tolerance (meters) to use when generating the CAD geometry. This is passed directly to the CADQuery
Returns: A CADQuery Workplane object containing the CAD geometry of the airplane.
"""
import cadquery as cq
solids = []
for wing in self.wings:
xsec_wires = []
for i, xsec in enumerate(wing.xsecs):
csys = wing._compute_frame_of_WingXSec(i)
af = xsec.airfoil
if af.TE_thickness() < minimum_airfoil_TE_thickness:
af = af.set_TE_thickness(
thickness=minimum_airfoil_TE_thickness
)
LE_index = af.LE_index()
xsec_wires.append(
cq.Workplane(
inPlane=cq.Plane(
origin=tuple(xsec.xyz_le),
xDir=tuple(csys[0]),
normal=tuple(-csys[1])
)
).spline(
listOfXYTuple=[
tuple(xy * xsec.chord)
for xy in af.coordinates[:LE_index, :]
]
).spline(
listOfXYTuple=[
tuple(xy * xsec.chord)
for xy in af.coordinates[LE_index:, :]
]
).close()
)
wire_collection = xsec_wires[0]
for s in xsec_wires[1:]:
wire_collection.ctx.pendingWires.extend(s.ctx.pendingWires)
loft = wire_collection.loft(ruled=True, clean=False)
solids.append(loft)
if wing.symmetric:
loft = loft.mirror(
mirrorPlane='XZ',
union=False
)
solids.append(loft)
for fuse in self.fuselages:
xsec_wires = []
for i, xsec in enumerate(fuse.xsecs):
if xsec.height < fuselage_tol or xsec.width < fuselage_tol: # If the xsec is so small as to effectively be a point
xsec = copy.deepcopy(xsec) # Modify the xsec to be big enough to not error out.
xsec.height = np.maximum(xsec.height, fuselage_tol)
xsec.width = np.maximum(xsec.width, fuselage_tol)
xsec_wires.append(
cq.Workplane(
inPlane=cq.Plane(
origin=tuple(xsec.xyz_c),
xDir=(0, 1, 0),
normal=(-1, 0, 0)
)
).spline(
listOfXYTuple=[
(y - xsec.xyz_c[1], z - xsec.xyz_c[2])
for x, y, z in zip(*xsec.get_3D_coordinates(
theta=np.linspace(
np.pi / 2, np.pi / 2 + 2 * np.pi,
181
)
))
]
).close()
)
wire_collection = xsec_wires[0]
for s in xsec_wires[1:]:
wire_collection.ctx.pendingWires.extend(s.ctx.pendingWires)
loft = wire_collection.loft(ruled=True, clean=False)
solids.append(loft)
solid = solids[0]
for s in solids[1:]:
solid.add(s)
return solid.clean()
def export_cadquery_geometry(self,
filename: str,
minimum_airfoil_TE_thickness: float = 0.001
) -> None:
"""
Exports the airplane geometry to a STEP file.
Args:
filename: The filename to export to. Should include the ".step" extension.
minimum_airfoil_TE_thickness: The minimum thickness of the trailing edge of the airfoils, as a fraction
of each airfoil's chord. This will be enforced by thickening the trailing edge of the airfoils if
necessary. This is useful for avoiding numerical issues in CAD software that can arise from extremely
thin (i.e., <1e-6 meters) trailing edges.
Returns: None, but exports the airplane geometry to a STEP file.
"""
solid = self.generate_cadquery_geometry(
minimum_airfoil_TE_thickness=minimum_airfoil_TE_thickness,
)
solid.objects = [
o.scale(1000)
for o in solid.objects
]
from cadquery import exporters
exporters.export(
solid,
fname=filename
)
def export_AVL(self,
filename,
include_fuselages: bool = True
):
# TODO include option for mass file export as well
# Use MassProperties.export_AVL_mass...
from aerosandbox.aerodynamics.aero_3D.avl import AVL
avl = AVL(
airplane=self,
op_point=None,
xyz_ref=self.xyz_ref
)
avl.write_avl(filepath=filename)
def export_XFLR(self,
filename,
mass_props: MassProperties = None,
include_fuselages: bool = False,
mainwing: Wing = None,
elevator: Wing = None,
fin: Wing = None,
):
"""
Exports the airplane geometry to an XFLR5 `.xml` file.
Args:
filename: The filename to export to. Should include the ".xml" extension.
mass_props: The MassProperties object to use when exporting the airplane. If not specified, will default to
a 1 kg point mass at the origin.
- Note: XFLR5 does not natively support user-defined inertia tensors, so we have to synthesize an equivalent
set of point masses to represent the inertia tensor.
include_fuselages: Whether to include fuselages in the export.
mainwing: The main wing of the airplane. If not specified, will default to the first wing in the airplane.
elevator: The elevator of the airplane. If not specified, will default to the second wing in the airplane.
fin: The fin of the airplane. If not specified, will default to the third wing in the airplane.
Returns: None, but exports the airplane geometry to an XFLR5 `.xml` file.
To import the `.xml` file into XFLR5, go to File -> Import -> Import from XML.
"""
### Handle default arguments
if mass_props is None:
mass_props = MassProperties(
mass=1,
x_cg=0,
y_cg=0,
z_cg=0,
)
### Identify which wings are the main wing, elevator, and fin.
wings_specified = [
mainwing is not None,
elevator is not None,
fin is not None,
]
if all(wings_specified):
pass
elif any(wings_specified):
raise ValueError(
"If any wings are specified (`mainwing`, `elevator`, `fin`), then all wings must be specified.")
else:
n_wings = len(self.wings)
if n_wings == 0:
pass
else:
import warnings
warnings.warn(
"No wings were specified (`mainwing`, `elevator`, `fin`). Automatically assigning the first wing "
"to `mainwing`, the second wing to `elevator`, and the third wing to `fin`. If this is not "
"correct, manually specify these with (`mainwing`, `elevator`, and `fin`) arguments."
)
if n_wings == 1:
mainwing = self.wings[0]
elif n_wings == 2:
mainwing = self.wings[0]
elevator = self.wings[1]
elif n_wings == 3:
mainwing = self.wings[0]
elevator = self.wings[1]
fin = self.wings[2]
else:
raise ValueError(
"Could not automatically parse which wings should be assigned to which XFLR5 lifting surfaces, "
"since there are too many. Manually assign these with (`mainwing`, `elevator`, and `fin`) "
"arguments."
)
### Determine where point masses should be in order to yield the specified mass properties.
point_masses = mass_props.generate_possible_set_of_point_masses()
### Handle the fuselage
if include_fuselages:
raise NotImplementedError(
"Fuselage export to XFLR5 is not yet implemented."
)
### Write the XML file.
import xml.etree.ElementTree as ET
base_xml = f"""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE explane>
<explane version="1.0">
<Units>
<length_unit_to_meter>1</length_unit_to_meter>
<mass_unit_to_kg>1</mass_unit_to_kg>
</Units>
<Plane>
<Name>{self.name}</Name>
<Description></Description>
<Inertia>
</Inertia>
<has_body>false</has_body>
</Plane>
</explane>
"""
root = ET.fromstring(base_xml)
plane = root.find("Plane")
### Add point masses
inertia = plane.find("Inertia")
for i, point_mass in enumerate(point_masses):
point_mass_xml = ET.SubElement(inertia, "Point_Mass")
for k, v in {
"Tag" : f"pm{i}",
"Mass" : point_mass.mass,
"coordinates": ",".join([str(x) for x in point_mass.xyz_cg]),
}.items():
subelement = ET.SubElement(point_mass_xml, k)
subelement.text = str(v)
### Add the wings
if mainwing is not None:
wing = mainwing
wingxml = ET.SubElement(plane, "wing")
xyz_le_root = wing._compute_xyz_of_WingXSec(index=0, x_nondim=0, z_nondim=0)
for k, v in {
"Name" : wing.name,
"Type" : "MAINWING",
"Position" : ",".join([str(x) for x in xyz_le_root]),
"Tilt_angle": 0.,
"Symetric" : wing.symmetric, # This tag is a typo in XFLR...
"isFin" : "false",
"isSymFin" : "false",
}.items():
subelement = ET.SubElement(wingxml, k)
subelement.text = str(v)
sections = ET.SubElement(wingxml, "Sections")
xyz_le_sects_rel = [
wing._compute_xyz_of_WingXSec(index=i, x_nondim=0, z_nondim=0) - xyz_le_root
for i in range(len(wing.xsecs))
]
for i, xsec in enumerate(wing.xsecs):
sect = ET.SubElement(sections, "Section")
if i == len(wing.xsecs) - 1:
dihedral = 0
else:
dihedral = np.arctan2d(
xyz_le_sects_rel[i + 1][2] - xyz_le_sects_rel[i][2],
xyz_le_sects_rel[i + 1][1] - xyz_le_sects_rel[i][1],
)
for k, v in {
"y_position" : xyz_le_sects_rel[i][1],
"Chord" : xsec.chord,
"xOffset" : xyz_le_sects_rel[i][0],
"Dihedral" : dihedral,
"Twist" : xsec.twist,
"Left_Side_FoilName" : xsec.airfoil.name,
"Right_Side_FoilName": xsec.airfoil.name,
"x_number_of_panels" : 8,
"y_number_of_panels" : 8,
}.items():
subelement = ET.SubElement(sect, k)
subelement.text = str(v)
if elevator is not None:
wing = elevator
wingxml = ET.SubElement(plane, "wing")
xyz_le_root = wing._compute_xyz_of_WingXSec(index=0, x_nondim=0, z_nondim=0)
for k, v in {
"Name" : wing.name,
"Type" : "ELEVATOR",
"Position" : ",".join([str(x) for x in xyz_le_root]),
"Tilt_angle": 0.,
"Symetric" : wing.symmetric, # This tag is a typo in XFLR...
"isFin" : "false",
"isSymFin" : "false",
}.items():
subelement = ET.SubElement(wingxml, k)
subelement.text = str(v)
sections = ET.SubElement(wingxml, "Sections")
xyz_le_sects_rel = [
wing._compute_xyz_of_WingXSec(index=i, x_nondim=0, z_nondim=0) - xyz_le_root
for i in range(len(wing.xsecs))
]
for i, xsec in enumerate(wing.xsecs):
sect = ET.SubElement(sections, "Section")
if i == len(wing.xsecs) - 1:
dihedral = 0
else:
dihedral = np.arctan2d(
xyz_le_sects_rel[i + 1][2] - xyz_le_sects_rel[i][2],
xyz_le_sects_rel[i + 1][1] - xyz_le_sects_rel[i][1],
)
for k, v in {
"y_position" : xyz_le_sects_rel[i][1],
"Chord" : xsec.chord,
"xOffset" : xyz_le_sects_rel[i][0],
"Dihedral" : dihedral,
"Twist" : xsec.twist,
"Left_Side_FoilName" : xsec.airfoil.name,
"Right_Side_FoilName": xsec.airfoil.name,
"x_number_of_panels" : 8,
"y_number_of_panels" : 8,
}.items():
subelement = ET.SubElement(sect, k)
subelement.text = str(v)
if fin is not None:
wing = fin
wingxml = ET.SubElement(plane, "wing")
xyz_le_root = wing._compute_xyz_of_WingXSec(index=0, x_nondim=0, z_nondim=0)
for k, v in {
"Name" : wing.name,
"Type" : "FIN",
"Position" : ",".join([str(x) for x in xyz_le_root]),
"Tilt_angle": 0.,
"Symetric" : "true", # This tag is a typo in XFLR...
"isFin" : "true",
"isSymFin" : wing.symmetric,
}.items():
subelement = ET.SubElement(wingxml, k)
subelement.text = str(v)
sections = ET.SubElement(wingxml, "Sections")
xyz_le_sects_rel = [
wing._compute_xyz_of_WingXSec(index=i, x_nondim=0, z_nondim=0) - xyz_le_root
for i in range(len(wing.xsecs))
]
for i, xsec in enumerate(wing.xsecs):
sect = ET.SubElement(sections, "Section")
if i == len(wing.xsecs) - 1:
dihedral = 0
else:
dihedral = np.arctan2d(
xyz_le_sects_rel[i + 1][1] - xyz_le_sects_rel[i][1],
xyz_le_sects_rel[i + 1][2] - xyz_le_sects_rel[i][2],
)
for k, v in {
"y_position" : xyz_le_sects_rel[i][2],
"Chord" : xsec.chord,
"xOffset" : xyz_le_sects_rel[i][0],
"Dihedral" : dihedral,
"Twist" : xsec.twist,
"Left_Side_FoilName" : xsec.airfoil.name,
"Right_Side_FoilName": xsec.airfoil.name,
"x_number_of_panels" : 8,
"y_number_of_panels" : 8,
}.items():
subelement = ET.SubElement(sect, k)
subelement.text = str(v)
### Indents the XML file properly
def indent(elem, level=0):
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(root)
xml_string = ET.tostring(
root,
encoding="UTF-8",
xml_declaration=True
).decode()
with open(filename, "w+") as f:
f.write(xml_string)
return xml_string
if __name__ == '__main__':
import aerosandbox as asb
# import aerosandbox.numpy as np
import aerosandbox.tools.units as u
def ft(feet, inches=0): # Converts feet (and inches) to meters
return feet * u.foot + inches * u.inch
naca2412 = asb.Airfoil("naca2412")
naca0012 = asb.Airfoil("naca0012")
airplane = Airplane(
name="Cessna 152",
wings=[
asb.Wing(
name="Wing",
xsecs=[
asb.WingXSec(
xyz_le=[0, 0, 0],
chord=ft(5, 4),
airfoil=naca2412
),
asb.WingXSec(
xyz_le=[0, ft(7), ft(7) * np.sind(1)],
chord=ft(5, 4),
airfoil=naca2412,
control_surfaces=[
asb.ControlSurface(
name="aileron",
symmetric=False,
hinge_point=0.8,
deflection=0
)
]
),
asb.WingXSec(
xyz_le=[
ft(4, 3 / 4) - ft(3, 8 + 1 / 2),
ft(33, 4) / 2,
ft(33, 4) / 2 * np.sind(1)
],
chord=ft(3, 8 + 1 / 2),
airfoil=naca0012
)
],
symmetric=True
),
asb.Wing(
name="Horizontal Stabilizer",
xsecs=[
asb.WingXSec(
xyz_le=[0, 0, 0],
chord=ft(3, 8),
airfoil=naca0012,
twist=-2,
control_surfaces=[
asb.ControlSurface(
name="elevator",
symmetric=True,
hinge_point=0.75,
deflection=0
)
]
),
asb.WingXSec(
xyz_le=[ft(1), ft(10) / 2, 0],
chord=ft(2, 4 + 3 / 8),
airfoil=naca0012,
twist=-2
)
],
symmetric=True
).translate([ft(13, 3), 0, ft(-2)]),
asb.Wing(
name="Vertical Stabilizer",
xsecs=[
asb.WingXSec(
xyz_le=[ft(-5), 0, 0],
chord=ft(8, 8),
airfoil=naca0012,
),
asb.WingXSec(
xyz_le=[ft(0), 0, ft(1)],
chord=ft(3, 8),
airfoil=naca0012,
control_surfaces=[
asb.ControlSurface(
name="rudder",
hinge_point=0.75,
deflection=0
)
]
),
asb.WingXSec(
xyz_le=[ft(0, 8), 0, ft(5)],
chord=ft(2, 8),
airfoil=naca0012,
),
]
).translate([ft(16, 11) - ft(3, 8), 0, ft(-2)])
],
fuselages=[
asb.Fuselage(
xsecs=[
asb.FuselageXSec(
xyz_c=[0, 0, ft(-1)],
radius=0,
),
asb.FuselageXSec(
xyz_c=[0, 0, ft(-1)],
radius=ft(1.5),
shape=3,
),
asb.FuselageXSec(
xyz_c=[ft(3), 0, ft(-0.85)],
radius=ft(1.7),
shape=7,
),
asb.FuselageXSec(
xyz_c=[ft(5), 0, ft(0)],
radius=ft(2.7),
shape=7,
),
asb.FuselageXSec(
xyz_c=[ft(10, 4), 0, ft(0.3)],
radius=ft(2.3),
shape=7,
),
asb.FuselageXSec(
xyz_c=[ft(21, 11), 0, ft(0.8)],
radius=ft(0.3),
shape=3,
),
]
).translate([ft(-5), 0, ft(-3)])
]
)
airplane.draw_three_view()
# airplane.export_XFLR("test.xml", mass_props=asb.MassProperties(mass=1, Ixx=1, Iyy=1, Izz=1))
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/geometry/airplane.py
|
airplane.py
|
from aerosandbox.common import AeroSandboxObject
from aerosandbox.geometry.common import *
from typing import List, Dict, Any, Tuple, Union, Optional, Callable
from aerosandbox.geometry.airfoil import Airfoil
from numpy import pi
import aerosandbox.numpy as np
import aerosandbox.geometry.mesh_utilities as mesh_utils
import copy
class Wing(AeroSandboxObject):
"""
Definition for a Wing.
Anatomy of a Wing:
A wing consists chiefly of a collection of cross-sections, or "xsecs". A cross-section is a 2D "slice" of a
wing. These can be accessed with `Wing.xsecs`, which gives a list of xsecs in the Wing. Each xsec is a
WingXSec object, a class that is defined separately.
You may also see references to wing "sections", which are different than cross-sections (xsecs)! Sections are
the portions of the wing that are in between xsecs. In other words, a wing with N cross-sections (xsecs,
WingXSec objects) will always have N-1 sections. Sections are never explicitly defined, since you can get all
needed information by lofting from the adjacent cross-sections. For example, section 0 (the first one) is a
loft between cross-sections 0 and 1.
Wings are lofted linearly between cross-sections.
If the wing is symmetric across the XZ plane, just define the right half and supply `symmetric=True` in the
constructor.
If the wing is not symmetric across the XZ plane (e.g., a single vertical stabilizer), just define the wing.
"""
def __init__(self,
name: Optional[str] = None,
xsecs: List['WingXSec'] = None,
symmetric: bool = False,
color: Optional[Union[str, Tuple[float]]] = None,
analysis_specific_options: Optional[Dict[type, Dict[str, Any]]] = None,
**kwargs, # Only to allow for capturing of deprecated arguments, don't use this.
):
"""
Defines a new wing object.
Args:
name: Name of the wing [optional]. It can help when debugging to give each wing a sensible name.
xsecs: A list of wing cross-sections ("xsecs") in the form of WingXSec objects.
symmetric: Is the wing symmetric across the XZ plane?
color: Determines what color to use for this component when drawing the airplane. Optional,
and for visualization purposes only. If left as None, a default color will be chosen at the time of
drawing (usually, black). Can be any color format recognized by MatPlotLib, namely:
* A RGB or RGBA tuple of floats in the interval [0, 1], e.g., (0.1, 0.2, 0.5, 0.3)
* Case-insensitive hex RGB or RGBA string, e.g., '#0f0f0f80'
* String representation of float value in closed interval [0, 1] for grayscale values, e.g.,
'0.8' for light gray
* Single character shorthand notation for basic colors, e.g., 'k' -> black, 'r' -> red
See also: https://matplotlib.org/stable/tutorials/colors/colors.html
analysis_specific_options: Analysis-specific options are additional constants or modeling assumptions
that should be passed on to specific analyses and associated with this specific geometry object.
This should be a dictionary where:
* Keys are specific analysis types (typically a subclass of asb.ExplicitAnalysis or
asb.ImplicitAnalysis), but if you decide to write your own analysis and want to make this key
something else (like a string), that's totally fine - it's just a unique identifier for the
specific analysis you're running.
* Values are a dictionary of key:value pairs, where:
* Keys are strings.
* Values are some value you want to assign.
This is more easily demonstrated / understood with an example:
>>> analysis_specific_options = {
>>> asb.AeroBuildup: dict(
>>> include_wave_drag=True,
>>> )
>>> }
"""
### Set defaults
if name is None:
name = "Untitled"
if xsecs is None:
xsecs: List['WingXSec'] = []
if analysis_specific_options is None:
analysis_specific_options = {}
### Initialize
self.name = name
self.xsecs = xsecs
self.symmetric = symmetric
self.color = color
self.analysis_specific_options = analysis_specific_options
### Handle deprecated parameters
if 'xyz_le' in locals():
import warnings
warnings.warn(
"The `xyz_le` input for Wing is pending deprecation and will be removed in a future version. Use Wing().translate(xyz) instead.",
stacklevel=2
)
self.xsecs = [
xsec.translate(xyz_le)
for xsec in self.xsecs
]
def __repr__(self) -> str:
n_xsecs = len(self.xsecs)
symmetry_description = "symmetric" if self.symmetric else "asymmetric"
return f"Wing '{self.name}' ({len(self.xsecs)} {'xsec' if n_xsecs == 1 else 'xsecs'}, {symmetry_description})"
def translate(self,
xyz: Union[np.ndarray, List[float]]
) -> 'Wing':
"""
Translates the entire Wing by a certain amount.
Args:
xyz:
Returns: The new wing object.
"""
new_wing = copy.copy(self)
new_wing.xsecs = [
xsec.translate(xyz)
for xsec in new_wing.xsecs
]
return new_wing
def span(self,
type: str = "yz",
include_centerline_distance=False,
_sectional: bool = False,
) -> Union[float, List[float]]:
"""
Computes the span, with options for various ways of measuring this (see `type` argument).
If the wing is symmetric, both left/right sides are included in order to obtain the full span. In the case
where the root cross-section is not coincident with the center plane (e.g., XZ plane), this function's
behavior depends on the `include_centerline_distance` argument.
Args:
type: One of the following options, as a string:
* "xyz": First, computes the quarter-chord point of each WingXSec. Then, connects these with
straight lines. Then, adds up the lengths of these lines.
* "xy" or "top": Same as "xyz", except it projects each line segment onto the XY plane before adding up the
lengths.
* "yz" (default) or "front": Same as "xyz", except it projects each line segment onto the YZ plane (i.e., front view)
before adding up the lengths.
* "xz" or "side": Same as "xyz", except it projects each line segment onto the XZ plane before adding up the
lengths. Rarely needed.
* "x": Same as "xyz", except it only counts the x-components of each line segment when adding up the
lengths.
* "y": Same as "xyz", except it only counts the y-components of each line segment when adding up the
lengths.
* "z": Same as "xyz", except it only counts the z-components of each line segment when adding up the
lengths.
include_centerline_distance: A boolean flag that tells the function what to do if a wing's root is not
coincident with the centerline plane (i.e., XZ plane).
* If True, we first figure out which WingXSec has its quarter-chord point closest to the centerline
plane (i.e., XZ plane). Then, we compute the distance from that quarter-chord point directly to the
centerline plane (along Y). We then add that distance to the span calculation. In other words,
the fictitious span connecting the left and right root cross-sections is included.
* If False, this distance is ignored. In other words, the fictitious span connecting the left and
right root cross-sections is not included. This is the default behavior.
Note: For computation, either the root WingXSec (i.e., index=0) or the tip WingXSec (i.e., index=-1)
is used, whichever is closer to the centerline plane. This will almost-always be the root WingXSec,
but some weird edge cases (e.g., a half-wing defined on the left-hand-side of the airplane,
rather than the conventional right-hand side) will result in the tip WingXSec being used.
_sectional: A boolean. If False, returns the total span. If True, returns a list of spans for each of the
`n-1` lofted sections (between the `n` wing cross-sections in wing.xsec).
"""
# Check inputs
if include_centerline_distance and _sectional:
raise ValueError("Cannot use `_sectional` with `include_centerline_distance`!")
# Handle overloaded names
if type == "top":
type = "xy"
elif type == "front":
type = "yz"
elif type == "side":
type = "xz"
# Figure out where the quarter-chord points of each WingXSec are
i_range = range(len(self.xsecs))
quarter_chord_locations = [
self._compute_xyz_of_WingXSec(
i,
x_nondim=0.25,
z_nondim=0,
)
for i in i_range
]
# Compute sectional spans
sectional_spans = []
for inner_i, outer_i in zip(i_range, i_range[1:]):
quarter_chord_vector = (
quarter_chord_locations[outer_i] -
quarter_chord_locations[inner_i]
)
if type == "xyz":
section_span = (
quarter_chord_vector[0] ** 2 +
quarter_chord_vector[1] ** 2 +
quarter_chord_vector[2] ** 2
) ** 0.5
elif type == "xy":
section_span = (
quarter_chord_vector[0] ** 2 +
quarter_chord_vector[1] ** 2
) ** 0.5
elif type == "yz":
section_span = (
quarter_chord_vector[1] ** 2 +
quarter_chord_vector[2] ** 2
) ** 0.5
elif type == "xz":
section_span = (
quarter_chord_vector[0] ** 2 +
quarter_chord_vector[2] ** 2
) ** 0.5
elif type == "x":
section_span = quarter_chord_vector[0]
elif type == "y":
section_span = quarter_chord_vector[1]
elif type == "z":
section_span = quarter_chord_vector[2]
else:
raise ValueError("Bad value of 'type'!")
sectional_spans.append(section_span)
if _sectional:
return sectional_spans
half_span = sum(sectional_spans)
if include_centerline_distance and len(self.xsecs) > 0:
half_span_to_XZ_plane = np.Inf
for i in i_range:
half_span_to_XZ_plane = np.minimum(
half_span_to_XZ_plane,
np.abs(quarter_chord_locations[i][1])
)
half_span = half_span + half_span_to_XZ_plane
if self.symmetric:
span = 2 * half_span
else:
span = half_span
return span
def area(self,
type: str = "planform",
include_centerline_distance=False,
_sectional: bool = False,
) -> Union[float, List[float]]:
"""
Computes the wing area, with options for various ways of measuring this (see `type` argument):
If the wing is symmetric, both left/right sides are included in order to obtain the full area. In the case
where the root cross-section is not coincident with the center plane (e.g., XZ plane), this function's
behavior depends on the `include_centerline_distance` argument.
Args:
type: One of the following options, as a string:
* "planform" (default): First, lofts a quadrilateral mean camber surface between each WingXSec. Then,
computes the area of each of these sectional surfaces. Then, sums up all the areas and returns it.
When airplane designers refer to "wing area" (in the absence of any other qualifiers),
this is typically what they mean.
* "wetted": Computes the actual surface area of the wing that is in contact with the air. Will
typically be a little more than double the "planform" area above; intuitively, this is because it
adds both the "top" and "bottom" surface areas. Accounts for airfoil thickness/shape effects.
* "xy" or "projected" or "top": Same as "planform", but each sectional surface is projected onto the XY plane
(i.e., top-down view) before computing the areas. Note that if you try to use this method with a
vertically-oriented wing, like most vertical stabilizers, you will get an area near zero.
* "xz" or "side": Same as "planform", but each sectional surface is projected onto the XZ plane before
computing the areas.
include_centerline_distance: A boolean flag that tells the function what to do if a wing's root chord is
not coincident with the centerline plane (i.e., XZ plane).
* If True, we first figure out which WingXSec is closest to the centerline plane (i.e., XZ plane).
Then, we imagine that this WingXSec is extruded along the Y axis to the centerline plane (assuming a
straight extrusion to produce a rectangular mid-camber surface). In doing so, we use the wing
geometric chord as the extrusion width. We then add the area of this fictitious surface to the area
calculation.
* If False, this function will simply ignore this fictitious wing area. This is the default behavior.
_sectional: A boolean. If False, returns the total area. If True, returns a list of areas for each of the
`n-1` lofted sections (between the `n` wing cross-sections in wing.xsec).
"""
# Check inputs
if include_centerline_distance and _sectional:
raise ValueError("`include_centerline_distance` and `_sectional` cannot both be True!")
# Handle overloaded names
if type == "projected" or type == "top":
type = "xy"
elif type == "side":
type = "xz"
# Compute sectional areas. Each method must compute the sectional spans and the effective chords at each
# cross-section to use.
if type == "planform":
sectional_spans = self.span(type="yz", _sectional=True)
xsec_chords = [xsec.chord for xsec in self.xsecs]
elif type == "wetted":
sectional_spans = self.span(type="yz", _sectional=True)
xsec_chords = [
xsec.chord * xsec.airfoil.perimeter()
for xsec in self.xsecs
]
elif type == "xy":
sectional_spans = self.span(type="y", _sectional=True)
xsec_chords = [xsec.chord for xsec in self.xsecs]
elif type == "yz":
raise ValueError("Area of wing projected to the YZ plane is zero.")
# sectional_spans = self.span(type="yz", _sectional=True)
# xsec_chords = [xsec.chord for xsec in self.xsecs]
elif type == "xz":
sectional_spans = self.span(type="z", _sectional=True)
xsec_chords = [xsec.chord for xsec in self.xsecs]
else:
raise ValueError("Bad value of `type`!")
sectional_chords = [
(inner_chord + outer_chord) / 2
for inner_chord, outer_chord in zip(
xsec_chords[1:],
xsec_chords[:-1]
)
]
sectional_areas = [
span * chord
for span, chord in zip(
sectional_spans,
sectional_chords
)
]
if _sectional:
return sectional_areas
half_area = sum(sectional_areas)
if include_centerline_distance and len(self.xsecs) > 0:
half_span_to_centerline = np.Inf
for i in range(len(self.xsecs)):
quarter_chord_location = self._compute_xyz_of_WingXSec(
i,
x_nondim=0.25,
z_nondim=0,
)
half_span_to_centerline = np.minimum(
half_span_to_centerline,
np.abs(quarter_chord_location[1])
)
half_area = half_area + (
half_span_to_centerline * self.mean_geometric_chord()
)
if self.symmetric: # Returns the total area of both the left and right wing halves on mirrored wings.
area = 2 * half_area
else:
area = half_area
return area
def aspect_ratio(self,
type: str = "geometric",
) -> float:
"""
Computes the aspect ratio of the wing, with options for various ways of measuring this.
* geometric: geometric aspect ratio, computed in the typical fashion (b^2 / S).
* effective: Differs from the geometric aspect ratio only in the case of symmetric wings whose root
cross-section is not on the centerline. In these cases, it includes the span and area of the fictitious wing
center when computing aspect ratio.
Args:
type: One of the above options, as a string.
"""
if type == "geometric":
return self.span() ** 2 / self.area()
elif type == "effective":
return (
self.span(type="yz", include_centerline_distance=True) ** 2 /
self.area(type="planform", include_centerline_distance=True)
)
else:
raise ValueError("Bad value of `type`!")
def is_entirely_symmetric(self) -> bool:
# Returns a boolean of whether the wing is totally symmetric (i.e.), every xsec has symmetric control surfaces.
for xsec in self.xsecs: # To be symmetric, all
for surf in xsec.control_surfaces:
if not (surf.symmetric or surf.deflection == 0):
return False
if not self.symmetric: # If the wing itself isn't mirrored (e.g., vertical stabilizer), check that it's symmetric
for xsec in self.xsecs:
if not xsec.xyz_le[1] == 0: # Surface has to be right on the centerline
return False
if not xsec.twist == 0: # Surface has to be untwisted
return False
if not np.allclose(xsec.airfoil.local_camber(), 0): # Surface has to have a symmetric airfoil.
return False
return True
def mean_geometric_chord(self) -> float:
"""
Returns the mean geometric chord of the wing (S/b).
:return:
"""
return self.area() / self.span()
def mean_aerodynamic_chord(self) -> float:
"""
Computes the length of the mean aerodynamic chord of the wing.
Uses the generalized methodology described here:
https://core.ac.uk/download/pdf/79175663.pdf
Returns: The length of the mean aerodynamic chord.
"""
sectional_areas = self.area(_sectional=True)
sectional_MAC_lengths = []
for inner_xsec, outer_xsec in zip(self.xsecs[:-1], self.xsecs[1:]):
section_taper_ratio = outer_xsec.chord / inner_xsec.chord
section_MAC_length = (2 / 3) * inner_xsec.chord * (
(1 + section_taper_ratio + section_taper_ratio ** 2) /
(1 + section_taper_ratio)
)
sectional_MAC_lengths.append(section_MAC_length)
sectional_MAC_length_area_products = [
MAC * area
for MAC, area in zip(
sectional_MAC_lengths,
sectional_areas,
)
]
MAC_length = sum(sectional_MAC_length_area_products) / sum(sectional_areas)
return MAC_length
def mean_twist_angle(self) -> float:
r"""
Returns the mean twist angle (in degrees) of the wing, weighted by area.
:return: mean twist angle (in degrees)
"""
sectional_twists = [
(inner_xsec.twist + outer_xsec.twist) / 2
for inner_xsec, outer_xsec in zip(
self.xsecs[1:],
self.xsecs[:-1]
)
]
sectional_areas = self.area(_sectional=True)
sectional_twist_area_products = [
twist * area
for twist, area in zip(
sectional_twists, sectional_areas
)
]
mean_twist = sum(sectional_twist_area_products) / sum(sectional_areas)
return mean_twist
def mean_sweep_angle(self,
x_nondim=0.25
) -> float:
"""
Returns the mean sweep angle (in degrees) of the wing, relative to the x-axis.
Positive sweep is backwards, negative sweep is forward.
This is purely measured from root to tip, with no consideration for the sweep of the individual
cross-sections in between.
Args:
x_nondim: The nondimensional x-coordinate of the cross-section to use for sweep angle computation.
* If you provide 0, it will use the leading edge of the cross-section.
* If you provide 0.25, it will use the quarter-chord point of the cross-section.
* If you provide 1, it will use the trailing edge of the cross-section.
Returns:
The mean sweep angle, in degrees.
"""
root_quarter_chord = self._compute_xyz_of_WingXSec(
0,
x_nondim=x_nondim,
z_nondim=0
)
tip_quarter_chord = self._compute_xyz_of_WingXSec(
-1,
x_nondim=x_nondim,
z_nondim=0
)
vec = tip_quarter_chord - root_quarter_chord
vec_norm = vec / np.linalg.norm(vec)
sin_sweep = vec_norm[0] # from dot product with x_hat
sweep_deg = np.arcsind(sin_sweep)
return sweep_deg
def mean_dihedral_angle(self,
x_nondim=0.25
) -> float:
"""
Returns the mean dihedral angle (in degrees) of the wing, relative to the XY plane.
Positive dihedral is bending up, negative dihedral is bending down.
This is purely measured from root to tip, with no consideration for the dihedral of the individual
cross-sections in between.
Args:
x_nondim: The nondimensional x-coordinate of the cross-section to use for sweep angle computation.
* If you provide 0, it will use the leading edge of the cross-section.
* If you provide 0.25, it will use the quarter-chord point of the cross-section.
* If you provide 1, it will use the trailing edge of the cross-section.
Returns:
The mean dihedral angle, in degrees
"""
root_quarter_chord = self._compute_xyz_of_WingXSec(
0,
x_nondim=x_nondim,
z_nondim=0
)
tip_quarter_chord = self._compute_xyz_of_WingXSec(
-1,
x_nondim=x_nondim,
z_nondim=0
)
vec = tip_quarter_chord - root_quarter_chord
vec_norm = vec / np.linalg.norm(vec)
return np.arctan2d(
vec_norm[2],
vec_norm[1],
)
def aerodynamic_center(self, chord_fraction: float = 0.25, _sectional=False) -> np.ndarray:
"""
Computes the location of the aerodynamic center of the wing.
Uses the generalized methodology described here:
https://core.ac.uk/downloattttd/pdf/79175663.pdf
Args: chord_fraction: The position of the aerodynamic center along the MAC, as a fraction of MAC length.
Typically, this value (denoted `h_0` in the literature) is 0.25 for a subsonic wing. However,
wing-fuselage interactions can cause a forward shift to a value more like 0.1 or less. Citing Cook,
Michael V., "Flight Dynamics Principles", 3rd Ed., Sect. 3.5.3 "Controls-fixed static stability". PDF:
https://www.sciencedirect.com/science/article/pii/B9780080982427000031
Returns: The (x, y, z) coordinates of the aerodynamic center of the wing.
"""
sectional_areas = self.area(_sectional=True)
sectional_ACs = []
for inner_xsec, outer_xsec in zip(self.xsecs[:-1], self.xsecs[1:]):
section_taper_ratio = outer_xsec.chord / inner_xsec.chord
section_MAC_length = (2 / 3) * inner_xsec.chord * (
(1 + section_taper_ratio + section_taper_ratio ** 2) /
(1 + section_taper_ratio)
)
section_MAC_le = (
inner_xsec.xyz_le +
(outer_xsec.xyz_le - inner_xsec.xyz_le) *
(1 + 2 * section_taper_ratio) /
(3 + 3 * section_taper_ratio)
)
section_AC = section_MAC_le + np.array([ # TODO rotate this vector by the local twist angle
chord_fraction * section_MAC_length,
0,
0
])
sectional_ACs.append(section_AC)
if _sectional:
return sectional_ACs
sectional_AC_area_products = [
AC * area
for AC, area in zip(
sectional_ACs,
sectional_areas,
)
]
aerodynamic_center = sum(sectional_AC_area_products) / sum(sectional_areas)
if self.symmetric:
aerodynamic_center[1] = 0
return aerodynamic_center
def taper_ratio(self) -> float:
"""
Gives the taper ratio of the Wing. Strictly speaking, only valid for trapezoidal wings.
Returns:
Taper ratio of the Wing.
"""
return self.xsecs[-1].chord / self.xsecs[0].chord
def volume(self,
_sectional: bool = False
) -> Union[float, List[float]]:
"""
Computes the volume of the Wing.
Args:
_sectional: A boolean. If False, returns the total volume. If True, returns a list of volumes for each of
the `n-1` lofted sections (between the `n` wing cross-sections in wing.xsec).
Returns:
The computed volume.
"""
xsec_areas = [
xsec.xsec_area()
for xsec in self.xsecs
]
separations = self.span(
type="yz",
_sectional=True
)
sectional_volumes = [
separation / 3 * (area_a + area_b + (area_a * area_b + 1e-100) ** 0.5)
for area_a, area_b, separation in zip(
xsec_areas[1:],
xsec_areas[:-1],
separations
)
]
volume = sum(sectional_volumes)
if self.symmetric:
volume *= 2
if _sectional:
return sectional_volumes
else:
return volume
def get_control_surface_names(self) -> List[str]:
"""
Gets the names of all control surfaces on this wing.
Returns:
A list of control surface names.
"""
control_surface_names = []
for xsec in self.xsecs:
for control_surface in xsec.control_surfaces:
control_surface_names.append(control_surface.name)
return control_surface_names
def set_control_surface_deflections(self,
control_surface_mappings: Dict[str, float],
) -> None:
"""
Sets the deflection of all control surfaces on this wing, based on the provided mapping.
Args:
control_surface_mappings: A dictionary mapping control surface names to their deflection angles, in degrees.
Note: control surface names are set in the asb.ControlSurface constructor.
Returns:
None. (in-place)
"""
for xsec in self.xsecs:
for control_surface in xsec.control_surfaces:
if control_surface.name in control_surface_mappings.keys():
control_surface.deflection = control_surface_mappings[control_surface.name]
def control_surface_area(self,
by_name: Optional[str] = None,
type: Optional[str] = "planform",
) -> float:
"""
Computes the total area of all control surfaces on this wing, optionally filtered by their name.
Control surfaces are defined on a section-by-section basis, and are defined in the WingXSec constructor using
its `control_surfaces` argument.
Note: If redundant control surfaces are defined (e.g., elevons, as defined by separate ailerons + elevator),
the area will be duplicated.
If the wing is symmetric, control surfaces on both left/right sides are included in order to obtain the full area.
Args:
by_name: If not None, only control surfaces with this name will be included in the area calculation.
Note: control surface names are set in the asb.ControlSurface constructor.
type: One of the following options, as a string:
* "planform" (default): First, lofts a quadrilateral mean camber surface between each WingXSec. Then,
computes the area of each of these sectional surfaces. Then, computes what fraction of this area is
control surface. Then, sums up all the areas and returns it. When airplane designers refer to
"control surface area" (in the absence of any other qualifiers), this is typically what they mean.
* "wetted": Computes the actual surface area of the control surface that is in contact with the air.
Will typically be a little more than double the "planform" area above; intuitively, this is because
it adds both the "top" and "bottom" surface areas. Accounts for airfoil thickness/shape effects.
* "xy" or "projected" or "top": Same as "planform", but each sectional surface is projected onto the XY plane
(i.e., top-down view) before computing the areas. Note that if you try to use this method with a
vertically-oriented wing, like most vertical stabilizers, you will get an area near zero.
* "xz" or "side": Same as "planform", but each sectional surface is projected onto the XZ plane before
computing the areas.
"""
sectional_areas = self.area(
type=type,
include_centerline_distance=False,
_sectional=True
)
control_surface_area = 0.
for xsec, sect_area in zip(self.xsecs[:-1], sectional_areas):
for control_surface in xsec.control_surfaces:
if (by_name is None) or (control_surface.name == by_name):
if control_surface.trailing_edge:
control_surface_chord_fraction = np.maximum(
1 - control_surface.hinge_point,
0
)
else:
control_surface_chord_fraction = np.maximum(
control_surface.hinge_point,
0
)
control_surface_area += control_surface_chord_fraction * sect_area
if self.symmetric:
control_surface_area *= 2
return control_surface_area
def mesh_body(self,
method="quad",
chordwise_resolution: int = 36,
chordwise_spacing_function_per_side: Callable[[float, float, float], np.ndarray] = np.cosspace,
mesh_surface: bool = True,
mesh_tips: bool = True,
mesh_trailing_edge: bool = True,
mesh_symmetric: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Meshes the outer mold line surface of the wing.
Uses the `(points, faces)` standard mesh format. For reference on this format, see the documentation in
`aerosandbox.geometry.mesh_utilities`.
Order of faces:
* On the right wing (or, if `Wing.symmetric` is `False`, just the wing itself):
* If `mesh_surface` is `True`:
* First face is nearest the top-side trailing edge of the wing root.
* Proceeds chordwise, along the upper surface of the wing from back to front. Upon reaching the
leading edge, continues along the lower surface of the wing from front to back.
* Then, repeats this process for the next spanwise slice of the wing, and so on.
* If `mesh_trailing_edge` is `True`:
* Continues by meshing the trailing edge of the wing. Meshes the inboard trailing edge first, then
proceeds spanwise to the outboard trailing edge.
* If `mesh_tips` is `True`:
* Continues by meshing the wing tips. Meshes the inboard tip first, then meshes the outboard tip.
* Within each tip, meshes from the
Args:
method: One of the following options, as a string:
* "tri": Triangular mesh.
* "quad": Quadrilateral mesh.
chordwise_resolution: Number of points to use per wing chord, per wing section.
chordwise_spacing_function_per_side: A function that determines how to space points in the chordwise
direction along the top and bottom surfaces. Common values would be `np.linspace` or `np.cosspace`,
but it can be any function with the call signature `f(a, b, n)` that returns a spaced array of `n` points
between `a` and `b`. [function]
mesh_surface: If True, includes the actual wing surface in the mesh.
mesh_tips: If True, includes the wing tips (both on the inboard-most section and on the outboard-most
section) in the mesh.
mesh_trailing_edge: If True, includes the wing trailing edge in the mesh, if the trailing-edge thickness
is nonzero.
mesh_symmetric: Has no effect if the wing is not symmetric. If the wing is symmetric this determines whether
the generated mesh is also symmetric, or if if only one side of the wing (right side) is meshed.
Returns: Standard unstructured mesh format: A tuple of `points` and `faces`, where:
* `points` is a `n x 3` array of points, where `n` is the number of points in the mesh.
* `faces` is a `m x 3` array of faces if `method` is "tri", or a `m x 4` array of faces if `method` is "quad".
* Each row of `faces` is a list of indices into `points`, which specifies a face.
"""
airfoil_nondim_coordinates = np.array([
xsec.airfoil
.repanel(
n_points_per_side=chordwise_resolution + 1,
spacing_function_per_side=chordwise_spacing_function_per_side,
)
.coordinates
for xsec in self.xsecs
])
x_nondim = airfoil_nondim_coordinates[:, :, 0].T
y_nondim = airfoil_nondim_coordinates[:, :, 1].T
spanwise_strips = []
for x_n, y_n in zip(x_nondim, y_nondim):
spanwise_strips.append(
np.stack(
self.mesh_line(
x_nondim=x_n,
z_nondim=y_n,
add_camber=False,
),
axis=0
)
)
points = np.concatenate(spanwise_strips, axis=0)
faces = []
num_i = (len(self.xsecs) - 1)
num_j = len(spanwise_strips) - 1
def index_of(iloc, jloc):
return iloc + jloc * (num_i + 1)
def add_face(*indices):
entry = list(indices)
if method == "quad":
faces.append(entry)
elif method == "tri":
faces.append([entry[0], entry[1], entry[3]])
faces.append([entry[1], entry[2], entry[3]])
if mesh_surface:
for i in range(num_i):
for j in range(num_j):
add_face(
index_of(i, j),
index_of(i + 1, j),
index_of(i + 1, j + 1),
index_of(i, j + 1),
)
if mesh_tips:
for j in range(num_j // 2):
add_face( # Mesh the root face
index_of(0, num_j - j),
index_of(0, j),
index_of(0, j + 1),
index_of(0, num_j - j - 1),
)
add_face( # Mesh the tip face
index_of(num_i, j),
index_of(num_i, j + 1),
index_of(num_i, num_j - j - 1),
index_of(num_i, num_j - j),
)
if mesh_trailing_edge:
for i in range(num_i):
add_face(
index_of(i + 1, 0),
index_of(i + 1, num_j),
index_of(i, num_j),
index_of(i, 0),
)
faces = np.array(faces)
if mesh_symmetric and self.symmetric:
flipped_points = np.multiply(
points,
np.array([
[1, -1, 1]
])
)
points, faces = mesh_utils.stack_meshes(
(points, faces),
(flipped_points, faces)
)
return points, faces
def mesh_thin_surface(self,
method="tri",
chordwise_resolution: int = 36,
chordwise_spacing_function: Callable[[float, float, float], np.ndarray] = np.cosspace,
add_camber: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Meshes the mean camber line of the wing as a thin-sheet body.
Uses the `(points, faces)` standard mesh format. For reference on this format, see the documentation in
`aerosandbox.geometry.mesh_utilities`.
Order of faces:
* On the right wing (or, if `Wing.symmetric` is `False`, just the wing itself):
* First face is the face nearest the leading edge of the wing root.
* Proceeds along a chordwise strip to the trailing edge.
* Then, goes to the subsequent spanwise location and does another chordwise strip, et cetera until
we get to the wing tip.
* On the left wing (applicable only if `Wing.symmetric` is `True`):
* Same order: Starts at the root leading edge, goes in chordwise strips.
Order of vertices within each face:
* On the right wing (or, if `Wing.symmetric` is `False`, just the wing itself):
* Front-left
* Back-left
* Back-right
* Front-right
* On the left wing (applicable only if `Wing.symmetric` is `True`):
* Front-left
* Back-left
* Back-right
* Front-right
Args:
method: A string, which determines whether to mesh the fuselage as a series of quadrilaterals or triangles.
* "quad" meshes the fuselage as a series of quadrilaterals.
* "tri" meshes the fuselage as a series of triangles.
chordwise_resolution: Determines the number of chordwise panels to use in the meshing. [int]
chordwise_spacing_function: Determines how to space the chordwise panels. Can be `np.linspace` or
`np.cosspace`, or any other function of the call signature `f(a, b, n)` that returns a spaced array of
`n` points between `a` and `b`. [function]
add_camber: Controls whether to mesh the thin surface with camber (i.e., mean camber line), or to just
mesh the flat planform. [bool]
Returns: Standard unstructured mesh format: A tuple of `points` and `faces`, where:
* `points` is a `n x 3` array of points, where `n` is the number of points in the mesh.
* `faces` is a `m x 3` array of faces if `method` is "tri", or a `m x 4` array of faces if `method` is "quad".
* Each row of `faces` is a list of indices into `points`, which specifies a face.
"""
x_nondim = chordwise_spacing_function(
0,
1,
chordwise_resolution + 1
)
spanwise_strips = []
for x_n in x_nondim:
spanwise_strips.append(
np.stack(
self.mesh_line(
x_nondim=x_n,
z_nondim=0,
add_camber=add_camber,
),
axis=0
)
)
points = np.concatenate(spanwise_strips)
faces = []
num_i = np.length(spanwise_strips[0]) # spanwise
num_j = np.length(spanwise_strips) # chordwise
def index_of(iloc, jloc):
return iloc + jloc * num_i
def add_face(*indices):
entry = list(indices)
if method == "quad":
faces.append(entry)
elif method == "tri":
faces.append([entry[0], entry[1], entry[3]])
faces.append([entry[1], entry[2], entry[3]])
for i in range(num_i - 1):
for j in range(num_j - 1):
add_face( # On right wing:
index_of(i, j), # Front-left
index_of(i, j + 1), # Back-left
index_of(i + 1, j + 1), # Back-right
index_of(i + 1, j), # Front-right
)
if self.symmetric:
index_offset = np.length(points)
points = np.concatenate([
points,
np.multiply(points, np.array([[1, -1, 1]]))
])
def index_of(iloc, jloc):
return index_offset + iloc + jloc * num_i
for i in range(num_i - 1):
for j in range(num_j - 1):
add_face( # On left wing:
index_of(i + 1, j), # Front-left
index_of(i + 1, j + 1), # Back-left
index_of(i, j + 1), # Back-right
index_of(i, j), # Front-right
)
faces = np.array(faces)
return points, faces
def mesh_line(self,
x_nondim: Union[float, List[float]] = 0.25,
z_nondim: Union[float, List[float]] = 0,
add_camber: bool = True,
) -> List[np.ndarray]:
"""
Meshes a line that goes through each of the WingXSec objects in this wing.
Args:
x_nondim: The nondimensional (chord-normalized) x-coordinate that the line should go through. Can either
be a single value used at all cross-sections, or can be an iterable of values to be used at the
respective cross-sections.
z_nondim: The nondimensional (chord-normalized) y-coordinate that the line should go through. Here,
y-coordinate means the "vertical" component (think standard 2D airfoil axes). Can either be a single
value used at all cross-sections, or can be an iterable of values to be used at the respective cross
sections.
add_camber: Controls whether the camber of each cross-section's airfoil should be added to the line or
not. Essentially modifies `z_nondim` to be `z_nondim + camber`.
Returns: A list of points, where each point is a 3-element array of the form `[x, y, z]`. Goes from the root
to the tip. Ignores any wing symmetry (e.g., only gives one side).
"""
points_on_line: List[np.ndarray] = []
try:
if len(x_nondim) != len(self.xsecs):
raise ValueError(
f"If `x_nondim` is an iterable, it should be the same length as `Wing.xsecs` ({len(self.xsecs)})."
)
except TypeError:
pass
try:
if len(z_nondim) != len(self.xsecs):
raise ValueError(
f"If `z_nondim` is an iterable, it should be the same length as `Wing.xsecs` ({len(self.xsecs)})."
)
except TypeError:
pass
for i, xsec in enumerate(self.xsecs):
try:
xsec_x_nondim = x_nondim[i]
except (TypeError, IndexError):
xsec_x_nondim = x_nondim
try:
xsec_z_nondim = z_nondim[i]
except (TypeError, IndexError):
xsec_z_nondim = z_nondim
if add_camber:
xsec_z_nondim = xsec_z_nondim + xsec.airfoil.local_camber(x_over_c=x_nondim)
points_on_line.append(
self._compute_xyz_of_WingXSec(
i,
x_nondim=xsec_x_nondim,
z_nondim=xsec_z_nondim,
)
)
return points_on_line
def draw(self, *args, **kwargs):
"""
An alias to the more general Airplane.draw() method. See there for documentation.
Args:
*args: Arguments to pass through to Airplane.draw()
**kwargs: Keyword arguments to pass through to Airplane.draw()
Returns: Same return as Airplane.draw()
"""
from aerosandbox.geometry.airplane import Airplane
return Airplane(wings=[self]).draw(*args, **kwargs)
def draw_wireframe(self, *args, **kwargs):
"""
An alias to the more general Airplane.draw_wireframe() method. See there for documentation.
Args:
*args: Arguments to pass through to Airplane.draw_wireframe()
**kwargs: Keyword arguments to pass through to Airplane.draw_wireframe()
Returns: Same return as Airplane.draw_wireframe()
"""
from aerosandbox.geometry.airplane import Airplane
return Airplane(wings=[self]).draw_wireframe(*args, **kwargs)
def draw_three_view(self, *args, **kwargs):
"""
An alias to the more general Airplane.draw_three_view() method. See there for documentation.
Args:
*args: Arguments to pass through to Airplane.draw_three_view()
**kwargs: Keyword arguments to pass through to Airplane.draw_three_view()
Returns: Same return as Airplane.draw_three_view()
"""
from aerosandbox.geometry.airplane import Airplane
return Airplane(wings=[self]).draw_three_view(*args, **kwargs)
def subdivide_sections(self,
ratio: int,
spacing_function: Callable[[float, float, float], np.ndarray] = np.linspace
) -> "Wing":
"""
Generates a new Wing that subdivides the existing sections of this Wing into several smaller ones. Splits
each section into N=`ratio` smaller sub-sections by inserting new cross-sections (xsecs) as needed.
This can allow for finer aerodynamic resolution of sectional properties in certain analyses.
Args:
ratio: The number of new sections to split each old section into.
spacing_function: A function that takes in three arguments: the start, end, and number of points to generate.
The default is `np.linspace`, which generates a linearly-spaced array of points.
Other options include `np.cosspace`, which generates a cosine-spaced array of points.
Returns: A new Wing object with subdivided sections.
"""
if not (ratio >= 2 and isinstance(ratio, int)):
raise ValueError("`ratio` must be an integer greater than or equal to 2.")
new_xsecs = []
span_fractions_along_section = spacing_function(0, 1, ratio + 1)[:-1]
for xsec_a, xsec_b in zip(self.xsecs[:-1], self.xsecs[1:]):
for s in span_fractions_along_section:
a_weight = 1 - s
b_weight = s
if xsec_a.airfoil == xsec_b.airfoil:
blended_airfoil = xsec_a.airfoil
elif a_weight == 1:
blended_airfoil = xsec_a.airfoil
elif b_weight == 1:
blended_airfoil = xsec_b.airfoil
else:
blended_airfoil = xsec_a.airfoil.blend_with_another_airfoil(
airfoil=xsec_b.airfoil,
blend_fraction=b_weight
)
new_xsecs.append(
WingXSec(
xyz_le=xsec_a.xyz_le * a_weight + xsec_b.xyz_le * b_weight,
chord=xsec_a.chord * a_weight + xsec_b.chord * b_weight,
twist=xsec_a.twist * a_weight + xsec_b.twist * b_weight,
airfoil=blended_airfoil,
control_surfaces=xsec_a.control_surfaces,
analysis_specific_options=xsec_a.analysis_specific_options,
)
)
new_xsecs.append(self.xsecs[-1])
return Wing(
name=self.name,
xsecs=new_xsecs,
symmetric=self.symmetric,
analysis_specific_options=self.analysis_specific_options
)
def _compute_xyz_le_of_WingXSec(self, index: int):
return self.xsecs[index].xyz_le
def _compute_xyz_te_of_WingXSec(self, index: int):
return self._compute_xyz_of_WingXSec(
index,
x_nondim=1,
z_nondim=0,
)
def _compute_xyz_of_WingXSec(self,
index,
x_nondim,
z_nondim,
):
xg_local, yg_local, zg_local = self._compute_frame_of_WingXSec(index)
origin = self.xsecs[index].xyz_le
xsec = self.xsecs[index]
return origin + (
x_nondim * xsec.chord * xg_local +
z_nondim * xsec.chord * zg_local
)
def _compute_frame_of_WingXSec(
self, index: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Computes the local reference frame associated with a particular cross-section (XSec) of this wing.
Args:
index: Which cross-section (as indexed in Wing.xsecs) should we get the frame of?
Returns:
A tuple of (xg_local, yg_local, zg_local), where each entry refers to the respective (normalized) axis of
the local reference frame of the WingXSec. Given in geometry axes.
"""
def project_to_YZ_plane_and_normalize(vector):
YZ_magnitude = (vector[1] ** 2 + vector[2] ** 2) ** 0.5
return np.array([0, vector[1], vector[2]]) / YZ_magnitude
### Compute the untwisted reference frame
xg_local = np.array([1, 0, 0])
if index == 0:
yg_local = project_to_YZ_plane_and_normalize(
self.xsecs[1].xyz_le - self.xsecs[0].xyz_le
)
z_scale = 1
elif index == len(self.xsecs) - 1 or index == -1:
yg_local = project_to_YZ_plane_and_normalize(
self.xsecs[-1].xyz_le - self.xsecs[-2].xyz_le
)
z_scale = 1
else:
vector_before = project_to_YZ_plane_and_normalize(
self.xsecs[index].xyz_le - self.xsecs[index - 1].xyz_le
)
vector_after = project_to_YZ_plane_and_normalize(
self.xsecs[index + 1].xyz_le - self.xsecs[index].xyz_le
)
span_vector = (vector_before + vector_after) / 2
yg_local = span_vector / np.linalg.norm(span_vector)
cos_vectors = np.linalg.inner(vector_before, vector_after)
z_scale = np.sqrt(2 / (cos_vectors + 1))
zg_local = np.cross(xg_local, yg_local) * z_scale
### Twist the reference frame by the WingXSec twist angle
rot = np.rotation_matrix_3D(
self.xsecs[index].twist * pi / 180,
yg_local
)
xg_local = rot @ xg_local
zg_local = rot @ zg_local
return xg_local, yg_local, zg_local
def _compute_frame_of_section(self, index: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Computes the local reference frame associated with a particular section. (Note that sections and cross
sections are different! cross-sections, or xsecs, are the vertices, and sections are the parts in between. In
other words, a wing with N cross-sections (xsecs) will always have N-1 sections.
Args:
index: Which section should we get the frame of? If given `i`, this retrieves the frame of the section
between xsecs `i` and `i+1`.
Returns:
A tuple of (xg_local, yg_local, zg_local), where each entry refers to the respective (normalized) axis
of the local reference frame of the section. Given in geometry axes.
"""
in_front = self._compute_xyz_le_of_WingXSec(index)
in_back = self._compute_xyz_te_of_WingXSec(index)
out_front = self._compute_xyz_le_of_WingXSec(index + 1)
out_back = self._compute_xyz_te_of_WingXSec(index + 1)
diag1 = out_back - in_front
diag2 = out_front - in_back
cross = np.cross(diag1, diag2)
zg_local = cross / np.linalg.norm(cross)
quarter_chord_vector = (
0.75 * out_front + 0.25 * out_back
) - (
0.75 * in_front + 0.25 * in_back
)
quarter_chord_vector[0] = 0
yg_local = quarter_chord_vector / np.linalg.norm(quarter_chord_vector)
xg_local = np.cross(yg_local, zg_local)
return xg_local, yg_local, zg_local
class WingXSec(AeroSandboxObject):
"""
Definition for a wing cross-section ("X-section").
"""
def __init__(self,
xyz_le: Union[np.ndarray, List] = None,
chord: float = 1.,
twist: float = 0.,
airfoil: Airfoil = None,
control_surfaces: Optional[List['ControlSurface']] = None,
analysis_specific_options: Optional[Dict[type, Dict[str, Any]]] = None,
**deprecated_kwargs,
):
"""
Defines a new wing cross-section.
Args:
xyz_le: An array-like that represents the xyz-coordinates of the leading edge of the cross-section, in
geometry axes.
chord: Chord of the wing at this cross-section.
twist: Twist angle, in degrees, as defined about the leading edge.
The twist axis is computed with the following procedure:
* The quarter-chord point of this WingXSec and the following one are identified.
* A line is drawn connecting them, and it is normalized to a unit direction vector.
* That direction vector is projected onto the geometry Y-Z plane.
* That direction vector is now the twist axis.
airfoil: Airfoil associated with this cross-section. [aerosandbox.Airfoil]
control_surfaces: A list of control surfaces in the form of ControlSurface objects.
analysis_specific_options: Analysis-specific options are additional constants or modeling assumptions
that should be passed on to specific analyses and associated with this specific geometry object.
This should be a dictionary where:
* Keys are specific analysis types (typically a subclass of asb.ExplicitAnalysis or
asb.ImplicitAnalysis), but if you decide to write your own analysis and want to make this key
something else (like a string), that's totally fine - it's just a unique identifier for the
specific analysis you're running.
* Values are a dictionary of key:value pairs, where:
* Keys are strings.
* Values are some value you want to assign.
This is more easily demonstrated / understood with an example:
>>> analysis_specific_options = {
>>> asb.AeroBuildup: dict(
>>> include_wave_drag=True,
>>> )
>>> }
Note: Control surface definition through WingXSec properties (control_surface_is_symmetric, control_surface_hinge_point, control_surface_deflection)
is deprecated. Control surfaces should be handled according to the following protocol:
1. If control_surfaces is an empty list (default, user does not specify any control surfaces), use deprecated WingXSec control surface definition properties.
This will result in 1 control surface at this xsec.
Usage example:
>>> xsecs = asb.WingXSec(
>>> chord = 2
>>> )
2. If control_surfaces is a list of ControlSurface instances, use ControlSurface properties to define control surfaces. This will result in as many
control surfaces at this xsec as there are entries in the control_surfaces list (an arbitrary number >= 1).
Usage example:
>>>xsecs = asb.WingXSec(
>>> chord = 2,
>>> control_surfaces = [
>>> ControlSurface(
>>> trailing_edge = False
>>> )
>>> ]
>>>)
3. If control_surfaces is None, override deprecated control surface definition properties and do not define a control surface at this xsec. This will
result in 0 control surfaces at this xsec.
Usage example:
>>>xsecs = asb.WingXSec(
>>> chord = 2,
>>> control_surfaces = None
>>>)
See avl.py for example of control_surface handling using this protocol.
"""
### Set defaults
if xyz_le is None:
xyz_le = np.array([0., 0., 0.])
if airfoil is None:
import warnings
warnings.warn(
"An airfoil is not specified for WingXSec. Defaulting to NACA 0012.",
stacklevel=2
)
airfoil = Airfoil("naca0012")
if control_surfaces is None:
control_surfaces = []
if analysis_specific_options is None:
analysis_specific_options = {}
self.xyz_le = np.array(xyz_le)
self.chord = chord
self.twist = twist
self.airfoil = airfoil
self.control_surfaces = control_surfaces
self.analysis_specific_options = analysis_specific_options
### Handle deprecated arguments
if 'twist_angle' in deprecated_kwargs.keys():
import warnings
warnings.warn(
"DEPRECATED: 'twist_angle' has been renamed 'twist', and will break in future versions.",
stacklevel=2
)
self.twist = deprecated_kwargs['twist_angle']
if (
'control_surface_is_symmetric' in locals() or
'control_surface_hinge_point' in locals() or
'control_surface_deflection' in locals()
):
import warnings
warnings.warn(
"DEPRECATED: Define control surfaces using the `control_surfaces` parameter, which takes in a list of asb.ControlSurface objects.",
stacklevel=2
)
if 'control_surface_is_symmetric' not in locals():
control_surface_is_symmetric = True
if 'control_surface_hinge_point' not in locals():
control_surface_hinge_point = 0.75
if 'control_surface_deflection' not in locals():
control_surface_deflection = 0
self.control_surfaces.append(
ControlSurface(
hinge_point=control_surface_hinge_point,
symmetric=control_surface_is_symmetric,
deflection=control_surface_deflection,
)
)
def __repr__(self) -> str:
return f"WingXSec (Airfoil: {self.airfoil.name}, chord: {self.chord}, twist: {self.twist})"
def translate(self,
xyz: Union[np.ndarray, List]
) -> "WingXSec":
"""
Returns a copy of this WingXSec that has been translated by `xyz`.
Args:
xyz: The amount to translate the WingXSec. Given as a 3-element NumPy vector.
Returns: A new WingXSec object.
"""
new_xsec = copy.copy(self)
new_xsec.xyz_le = new_xsec.xyz_le + np.array(xyz)
return new_xsec
def xsec_area(self):
"""
Computes the WingXSec's cross-sectional (xsec) area.
Returns: The (dimensional) cross-sectional area of the WingXSec.
"""
return self.airfoil.area() * self.chord ** 2
class ControlSurface(AeroSandboxObject):
"""
Definition for a control surface, which is attached to a particular WingXSec via WingXSec's `control_surfaces=[]` parameter.
"""
def __init__(self,
name: str = "Untitled",
symmetric: bool = True,
deflection: float = 0.0,
hinge_point: float = 0.75,
trailing_edge: bool = True,
analysis_specific_options: Optional[Dict[type, Dict[str, Any]]] = None,
):
"""
Define a new control surface.
Args:
name: Name of the control surface [optional]. It can help when debugging to give each control surface a
sensible name.
symmetric: Is the control surface symmetric? If False, control surface is anti-symmetric. (e.g.,
True for flaps, False for ailerons.)
hinge_point: The location of the control surface hinge, as a fraction of chord. A float in the range of 0 to 1.
deflection: Control deflection, in degrees. Downwards-positive.
trailing_edge: Is the control surface on the trailing edge? If False, control surface is on the leading
edge. (e.g., True for flaps, False for slats.). Support is experimental for leading-edge control
surfaces, be aware that not all modules may treat this correctly.
analysis_specific_options: Analysis-specific options are additional constants or modeling assumptions
that should be passed on to specific analyses and associated with this specific geometry object.
This should be a dictionary where:
* Keys are specific analysis types (typically a subclass of asb.ExplicitAnalysis or
asb.ImplicitAnalysis), but if you decide to write your own analysis and want to make this key
something else (like a string), that's totally fine - it's just a unique identifier for the
specific analysis you're running.
* Values are a dictionary of key:value pairs, where:
* Keys are strings.
* Values are some value you want to assign.
This is more easily demonstrated / understood with an example:
>>> analysis_specific_options = {
>>> asb.AeroBuildup: dict(
>>> include_wave_drag=True,
>>> )
>>> }
"""
### Set defaults
if analysis_specific_options is None:
analysis_specific_options = {}
self.name = name
self.symmetric = symmetric
self.deflection = deflection
self.hinge_point = hinge_point
self.trailing_edge = trailing_edge
self.analysis_specific_options = analysis_specific_options
def __repr__(self) -> str:
keys = [
"name",
"symmetric",
"deflection",
"hinge_point",
]
if not self.trailing_edge:
keys += ["trailing_edge"]
info = ", ".join([
f"{k}={self.__dict__[k]}"
for k in keys
])
return f"ControlSurface ({info})"
if __name__ == '__main__':
wing = Wing(
xsecs=[
WingXSec(
xyz_le=[0, 0, 0],
chord=1,
airfoil=Airfoil("naca4412"),
twist=0,
control_surfaces=[
ControlSurface(
name="Elevator",
trailing_edge=True,
hinge_point=0.75,
deflection=5
)
]
),
WingXSec(
xyz_le=[0.5, 1, 0],
chord=0.5,
airfoil=Airfoil("naca4412"),
twist=0,
),
WingXSec(
xyz_le=[0.7, 1, 0.3],
chord=0.3,
airfoil=Airfoil("naca0012"),
twist=0,
)
]
).translate([1, 0, 0])
# wing.subdivide_sections(5).draw()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/geometry/wing.py
|
wing.py
|
from aerosandbox import AeroSandboxObject
from aerosandbox.geometry.common import *
from typing import List, Dict, Any, Union, Tuple, Optional
import copy
class Propulsor(AeroSandboxObject):
"""
Definition for a Propulsor, which could be a propeller, a rotor, or a jet engine.
Assumes a disk- or cylinder-shaped propulsor.
"""
def __init__(self,
name: Optional[str] = "Untitled",
xyz_c: Union[np.ndarray, List[float]] = None,
xyz_normal: Union[np.ndarray, List[float]] = None,
radius: float = 1.,
length: float = 0.,
color: Optional[Union[str, Tuple[float]]] = None,
analysis_specific_options: Optional[Dict[type, Dict[str, Any]]] = None,
):
"""
Defines a new propulsor object.
TODO add docs
"""
### Set defaults
if xyz_c is None:
xyz_c = np.array([0., 0., 0.])
if xyz_normal is None:
xyz_normal = np.array([-1., 0., 0.])
if analysis_specific_options is None:
analysis_specific_options = {}
self.name = name
self.xyz_c = np.array(xyz_c)
self.xyz_normal = np.array(xyz_normal)
self.radius = radius
self.length = length
self.color = color
self.analysis_specific_options = analysis_specific_options
def __repr__(self) -> str:
return f"Propulsor '{self.name}' (xyz_c: {self.xyz_c}, radius: {self.radius})"
def xsec_area(self) -> float:
"""
Returns the cross-sectional area of the propulsor, in m^2.
"""
return np.pi * self.radius ** 2
def xsec_perimeter(self) -> float:
"""
Returns the cross-sectional perimeter of the propulsor, in m.
"""
return 2 * np.pi * self.radius
def volume(self) -> float:
"""
Returns the volume of the propulsor, in m^3.
"""
return self.xsec_area() * self.length
def compute_frame(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Computes the local coordinate frame of the propulsor, in aircraft geometry axes.
xg_local is aligned with the propulsor's normal vector.
zg_local is roughly aligned with the z-axis of the aircraft geometry axes, but projected onto the propulsor's plane.
yg_local is the cross product of zg_local and xg_local.
Returns: A tuple:
xg_local: The x-axis of the local coordinate frame, in aircraft geometry axes.
yg_local: The y-axis of the local coordinate frame, in aircraft geometry axes.
zg_local: The z-axis of the local coordinate frame, in aircraft geometry axes.
"""
xyz_normal = self.xyz_normal / np.linalg.norm(self.xyz_normal)
xg_local = xyz_normal
zg_local = np.array([0, 0, 1])
zg_local = zg_local - np.dot(zg_local, xg_local) * xg_local
yg_local = np.cross(zg_local, xg_local)
return xg_local, yg_local, zg_local
def get_disk_3D_coordinates(self,
theta: Union[float, np.ndarray] = None,
l_over_length: Union[float, np.ndarray] = None,
) -> Tuple[Union[float, np.ndarray]]:
### Set defaults
if theta is None:
theta = np.linspace(
0,
2 * np.pi,
60 + 1
)[:-1]
if l_over_length is None:
if self.length == 0:
l_over_length = 0
else:
l_over_length = np.linspace(
0,
1,
4
).reshape((1, -1))
theta = np.array(theta).reshape((-1, 1))
st = np.sin(np.mod(theta, 2 * np.pi))
ct = np.cos(np.mod(theta, 2 * np.pi))
x = l_over_length * self.length
y = ct * self.radius
z = st * self.radius
xg_local, yg_local, zg_local = self.compute_frame()
return (
self.xyz_c[0] + x * xg_local[0] + y * yg_local[0] + z * zg_local[0],
self.xyz_c[1] + x * xg_local[1] + y * yg_local[1] + z * zg_local[1],
self.xyz_c[2] + x * xg_local[2] + y * yg_local[2] + z * zg_local[2],
)
def translate(self,
xyz: Union[np.ndarray, List[float]],
) -> 'Propulsor':
"""
Returns a copy of this propulsor that has been translated by `xyz`.
Args:
xyz: The amount to translate the propulsor, in meters. Given in aircraft geometry axes, as with everything else.
Returns: A copy of this propulsor, translated by `xyz`.
"""
new_propulsor = copy.deepcopy(self)
new_propulsor.xyz_c = new_propulsor.xyz_c + np.array(xyz)
return new_propulsor
if __name__ == '__main__':
p_disk = Propulsor(radius=3)
p_can = Propulsor(length=1)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/geometry/propulsor.py
|
propulsor.py
|
import numpy as np
from aerosandbox import AeroSandboxObject
from aerosandbox.geometry.common import *
from typing import List, Dict, Any, Union, Tuple, Optional, Callable
import copy
class Fuselage(AeroSandboxObject):
"""
Definition for a Fuselage or other slender body (pod, fuel tank, etc.).
Anatomy of a Fuselage:
A fuselage consists chiefly of a collection of cross-sections, or "xsecs". A cross-section is a 2D "slice" of
a fuselage. These can be accessed with `Fuselage.xsecs`, which gives a list of xsecs in the Fuselage. Each
xsec is a FuselageXSec object, a class that is defined separately.
You may also see references to fuselage "sections", which are different from cross-sections (xsecs)! Sections
are the portions of the fuselage that are in between xsecs. In other words, a fuselage with N cross-sections
(xsecs, FuselageXSec objects) will always have N-1 sections. Sections are never explicitly defined,
since you can get all needed information by lofting from the adjacent cross-sections. For example,
section 0 (the first one) is a loft between cross-sections 0 and 1.
Fuselages are lofted linearly between cross-sections.
"""
def __init__(self,
name: Optional[str] = "Untitled",
xsecs: List['FuselageXSec'] = None,
color: Optional[Union[str, Tuple[float]]] = None,
analysis_specific_options: Optional[Dict[type, Dict[str, Any]]] = None,
**kwargs, # Only to allow for capturing of deprecated arguments, don't use this.
):
"""
Defines a new fuselage object.
Args:
name: Name of the fuselage [optional]. It can help when debugging to give each fuselage a sensible name.
xsecs: A list of fuselage cross-sections ("xsecs") in the form of FuselageXSec objects.
color: Determines what color to use for this component when drawing the airplane. Optional,
and for visualization purposes only. If left as None, a default color will be chosen at the time of
drawing (usually, black). Can be any color format recognized by MatPlotLib, namely:
* A RGB or RGBA tuple of floats in the interval [0, 1], e.g., (0.1, 0.2, 0.5, 0.3)
* Case-insensitive hex RGB or RGBA string, e.g., '#0f0f0f80'
* String representation of float value in closed interval [0, 1] for grayscale values, e.g.,
'0.8' for light gray
* Single character shorthand notation for basic colors, e.g., 'k' -> black, 'r' -> red
See also: https://matplotlib.org/stable/tutorials/colors/colors.html
analysis_specific_options: Analysis-specific options are additional constants or modeling assumptions
that should be passed on to specific analyses and associated with this specific geometry object.
This should be a dictionary where:
* Keys are specific analysis types (typically a subclass of asb.ExplicitAnalysis or
asb.ImplicitAnalysis), but if you decide to write your own analysis and want to make this key
something else (like a string), that's totally fine - it's just a unique identifier for the
specific analysis you're running.
* Values are a dictionary of key:value pairs, where:
* Keys are strings.
* Values are some value you want to assign.
This is more easily demonstrated / understood with an example:
>>> analysis_specific_options = {
>>> asb.AeroBuildup: dict(
>>> include_wave_drag=True,
>>> )
>>> }
"""
### Set defaults
if xsecs is None:
xsecs: List['FuselageXSec'] = []
if analysis_specific_options is None:
analysis_specific_options = {}
### Initialize
self.name = name
self.xsecs = xsecs
self.color = color
self.analysis_specific_options = analysis_specific_options
### Handle deprecated parameters
if 'symmetric' in locals():
raise DeprecationWarning(
"The `symmetric` argument for Fuselage objects is deprecated. Make your fuselages separate instead!")
if 'xyz_le' in locals():
import warnings
warnings.warn(
"The `xyz_le` input for Fuselage is pending deprecation and will be removed in a future version. Use Fuselage().translate(xyz) instead.",
stacklevel=2
)
self.xsecs = [
xsec.translate(xyz_le)
for xsec in self.xsecs
]
def __repr__(self) -> str:
n_xsecs = len(self.xsecs)
return f"Fuselage '{self.name}' ({len(self.xsecs)} {'xsec' if n_xsecs == 1 else 'xsecs'})"
def add_loft(self,
kind: str,
to_xsec: 'FuselageXSec',
from_xsec: 'FuselageXSec' = None,
n_points: int = 5,
spacing: Callable[[float, float, int], np.ndarray] = np.cosspace,
) -> "Fuselage":
raise NotImplementedError # Function under construction!
### Set defaults
if from_xsec is None:
if len(self.xsecs) == 0:
from_xsec = FuselageXSec(
xyz_c=[0, 0, 0],
width=0,
height=0,
shape=2
)
else:
from_xsec = self.xsecs[-1]
### Define a nondimensional coordinate
t = spacing(0, 1, n_points)
if kind == "linear":
new_xsecs = [
FuselageXSec(
xyz_c=from_xsec.xyz_c * (1 - ti) + to_xsec.xyz_c * ti,
width=from_xsec.width * (1 - ti) + to_xsec.width * ti,
height=from_xsec.height * (1 - ti) + to_xsec.height * ti,
shape=from_xsec.shape * (1 - ti) + to_xsec.shape * ti,
analysis_specific_options=from_xsec.analysis_specific_options,
)
for ti in t
]
elif kind == "ellipsoid-nose":
new_xsecs = [
FuselageXSec(
xyz_c=from_xsec.xyz_c * (1 - ti) + to_xsec.xyz_c * ti,
width=from_xsec.width * (1 - ti) + to_xsec.width * ti,
height=from_xsec.height * (1 - ti) + to_xsec.height * ti,
shape=from_xsec.shape * (1 - ti) + to_xsec.shape * ti,
analysis_specific_options=from_xsec.analysis_specific_options,
)
for ti in t
]
self.xsecs.extend(new_xsecs)
def translate(self,
xyz: Union[np.ndarray, List[float]]
) -> "Fuselage":
"""
Translates the entire Fuselage by a certain amount.
Args:
xyz:
Returns: self
"""
new_fuse = copy.copy(self)
new_fuse.xsecs = [
xsec.translate(xyz)
for xsec in new_fuse.xsecs
]
return new_fuse
def area_wetted(self) -> float:
"""
Returns the wetted area of the fuselage.
:return:
"""
area = 0
perimeters = [xsec.xsec_perimeter() for xsec in self.xsecs]
for i in range(len(self.xsecs) - 1):
x_separation = self.xsecs[i + 1].xyz_c[0] - self.xsecs[i].xyz_c[0]
area += (perimeters[i] + perimeters[i + 1]) / 2 * x_separation
return area
def area_projected(self,
type: str = "XY",
) -> float:
"""
Returns the area of the fuselage as projected onto one of the principal planes.
Args:
type: A string, which determines which principal plane to use for projection. One of:
* "XY", in which case the projected area is onto the XY plane (i.e., top-down)
* "XZ", in which case the projected area is onto the XZ plane (i.e., side-view)
Returns: The projected area.
"""
area = 0
for i in range(len(self.xsecs) - 1):
x_separation = self.xsecs[i + 1].xyz_c[0] - self.xsecs[i].xyz_c[0]
if type == "XY":
width_a = self.xsecs[i].width
width_b = self.xsecs[i + 1].width
area += (width_a + width_b) / 2 * x_separation
elif type == "XZ":
height_a = self.xsecs[i].height
height_b = self.xsecs[i + 1].height
area += (height_a + height_b) / 2 * x_separation
else:
raise ValueError("Bad value of `type`!")
return area
def area_base(self) -> float:
"""
Returns the area of the base (i.e. "trailing edge") of the fuselage. Useful for certain types of drag
calculation.
Returns:
"""
return self.xsecs[-1].xsec_area()
def fineness_ratio(
self,
assumed_shape="cylinder",
) -> float:
"""
Approximates the fineness ratio using the volume and length. The fineness ratio of a fuselage is defined as:
FR = length / max_diameter
Args:
assumed_shape: A string, which determines the assumed shape of the fuselage for the approximation. One of:
* "cylinder", in which case the fuselage is assumed to have a cylindrical shape.
* "sears-haack", in which case the fuselage is assumed to have Sears-Haack fuselage shape.
Returns: An approximate value of the fuselage's fineness ratio.
"""
if assumed_shape == "cylinder":
return np.sqrt(
self.length() ** 3 / self.volume() * np.pi / 4
)
elif assumed_shape == "sears-haack":
length = self.length()
r_max = np.sqrt(
self.volume() / length / (3 * np.pi ** 2 / 16)
)
return length / r_max
def length(self) -> float:
"""
Returns the total front-to-back length of the fuselage. Measured as the difference between the x-coordinates
of the leading and trailing cross-sections.
:return:
"""
return np.fabs(self.xsecs[-1].xyz_c[0] - self.xsecs[0].xyz_c[0])
def volume(self,
_sectional: bool = False
) -> Union[float, List[float]]:
"""
Computes the volume of the Fuselage.
Args:
_sectional: A boolean. If False, returns the total volume. If True, returns a list of volumes for each of
the `n-1` lofted sections (between the `n` fuselage cross-sections in fuselage.xsec).
Returns:
The computed volume.
"""
xsec_areas = [
xsec.xsec_area()
for xsec in self.xsecs
]
separations = [
xsec_b.xyz_c[0] - xsec_a.xyz_c[0]
for xsec_a, xsec_b in zip(
self.xsecs[:-1],
self.xsecs[1:]
)
]
sectional_volumes = [
separation / 3 * (area_a + area_b + (area_a * area_b + 1e-100) ** 0.5)
for area_a, area_b, separation in zip(
xsec_areas[1:],
xsec_areas[:-1],
separations
)
]
volume = sum(sectional_volumes)
if _sectional:
return sectional_volumes
else:
return volume
def x_centroid_projected(self,
type: str = "XY",
) -> float:
"""
Returns the x_g coordinate of the centroid of the planform area.
Args:
type: A string, which determines which principal plane to use for projection. One of:
* "XY", in which case the projected area is onto the XY plane (i.e., top-down)
* "XZ", in which case the projected area is onto the XZ plane (i.e., side-view)
Returns: The x_g coordinate of the centroid.
"""
total_x_area_product = 0
total_area = 0
for xsec_a, xsec_b in zip(self.xsecs, self.xsecs[1:]):
x_a = xsec_a.xyz_c[0]
x_b = xsec_b.xyz_c[0]
if type == "XY":
r_a = xsec_a.width / 2
r_b = xsec_b.width / 2
elif type == "XZ":
r_a = xsec_a.height / 2
r_b = xsec_b.height / 2
else:
raise ValueError("Bad value of `type`!")
dx = x_b - x_a
x_c = x_a + (r_a + 2 * r_b) / (3 * (r_a + r_b)) * dx
area = (r_a + r_b) / 2 * dx
total_area += area
total_x_area_product += x_c * area
x_centroid = total_x_area_product / total_area
return x_centroid
def mesh_body(self,
method="quad",
tangential_resolution: int = 36,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Meshes the fuselage as a solid (thickened) body.
Uses the `(points, faces)` standard mesh format. For reference on this format, see the documentation in
`aerosandbox.geometry.mesh_utilities`.
Args:
method: A string, which determines whether to mesh the fuselage as a series of quadrilaterals or triangles.
* "quad" meshes the fuselage as a series of quadrilaterals.
* "tri" meshes the fuselage as a series of triangles.
tangential_resolution: An integer, which determines the number of points to use to mesh each cross-section.
Returns: Standard unstructured mesh format: A tuple of`points` and `faces`, where:
* `points` is a `n x 3` array of points, where `n` is the number of points in the mesh.
* `faces` is a `m x 3` array of faces if `method` is "tri", or a `m x 4` array of faces if `method` is "quad".
* Each row of `faces` is a list of indices into `points`, which specifies a face.
"""
t = np.linspace(0, 2 * np.pi, tangential_resolution + 1)[:-1]
points = np.concatenate([
np.stack(
xsec.get_3D_coordinates(theta=t),
axis=1
)
for xsec in self.xsecs
],
axis=0
)
faces = []
num_i = len(self.xsecs)
num_j = len(t)
def index_of(iloc, jloc):
return iloc * num_j + (jloc % num_j)
def add_face(*indices):
entry = list(indices)
if method == "quad":
faces.append(entry)
elif method == "tri":
faces.append([entry[0], entry[1], entry[3]])
faces.append([entry[1], entry[2], entry[3]])
for i in range(num_i - 1):
for j in range(num_j):
add_face(
index_of(i, j),
index_of(i, j + 1),
index_of(i + 1, j + 1),
index_of(i + 1, j),
)
faces = np.array(faces)
return points, faces
def mesh_line(self,
y_nondim: Union[float, List[float]] = 0.,
z_nondim: Union[float, List[float]] = 0.,
) -> List[np.ndarray]:
"""
Returns points along a line that goes through each of the FuselageXSec objects in this Fuselage.
Args:
y_nondim: The nondimensional (width-normalized) y-coordinate that the line should go through. Can either
be a single value used at all cross-sections, or can be an iterable of values to be used at the
respective cross-sections.
z_nondim: The nondimensional (height-normalized) z-coordinate that the line should go through. Can either
be a single value used at all cross-sections, or can be an iterable of values to be used at the
respective cross-sections.
Returns: A list of points, where each point is a 3-element array of the form `[x, y, z]`. Goes from the nose
to the tail.
"""
points_on_line: List[np.ndarray] = []
try:
if len(y_nondim) != len(self.xsecs):
raise ValueError(
f"If `y_nondim` is an iterable, it should be the same length as `Fuselage.xsecs` ({len(self.xsecs)})."
)
except TypeError:
pass
try:
if len(z_nondim) != len(self.xsecs):
raise ValueError(
f"If `z_nondim` is an iterable, it should be the same length as `Fuselage.xsecs` ({len(self.xsecs)})."
)
except TypeError:
pass
for i, xsec in enumerate(self.xsecs):
origin = xsec.xyz_c
xg_local, yg_local, zg_local = xsec.compute_frame()
try:
xsec_y_nondim = y_nondim[i]
except (TypeError, IndexError):
xsec_y_nondim = y_nondim
try:
xsec_z_nondim = z_nondim[i]
except (TypeError, IndexError):
xsec_z_nondim = z_nondim
xsec_point = origin + (
xsec_y_nondim * (xsec.width / 2) * yg_local +
xsec_z_nondim * (xsec.height / 2) * zg_local
)
points_on_line.append(xsec_point)
return points_on_line
def draw(self, *args, **kwargs):
"""
An alias to the more general Airplane.draw() method. See there for documentation.
Args:
*args: Arguments to pass through to Airplane.draw()
**kwargs: Keyword arguments to pass through to Airplane.draw()
Returns: Same return as Airplane.draw()
"""
from aerosandbox.geometry.airplane import Airplane
return Airplane(fuselages=[self]).draw(*args, **kwargs)
def draw_wireframe(self, *args, **kwargs):
"""
An alias to the more general Airplane.draw_wireframe() method. See there for documentation.
Args:
*args: Arguments to pass through to Airplane.draw_wireframe()
**kwargs: Keyword arguments to pass through to Airplane.draw_wireframe()
Returns: Same return as Airplane.draw_wireframe()
"""
from aerosandbox.geometry.airplane import Airplane
return Airplane(fuselages=[self]).draw_wireframe(*args, **kwargs)
def draw_three_view(self, *args, **kwargs):
"""
An alias to the more general Airplane.draw_three_view() method. See there for documentation.
Args:
*args: Arguments to pass through to Airplane.draw_three_view()
**kwargs: Keyword arguments to pass through to Airplane.draw_three_view()
Returns: Same return as Airplane.draw_three_view()
"""
from aerosandbox.geometry.airplane import Airplane
return Airplane(fuselages=[self]).draw_three_view(*args, **kwargs)
def subdivide_sections(self,
ratio: int,
spacing_function: Callable[[float, float, float], np.ndarray] = np.linspace
) -> "Fuselage":
"""
Generates a new Fuselage that subdivides the existing sections of this Fuselage into several smaller ones. Splits
each section into N=`ratio` smaller subsections by inserting new cross-sections (xsecs) as needed.
This can allow for finer aerodynamic resolution of sectional properties in certain analyses.
Args:
ratio: The number of new sections to split each old section into.
spacing_function: A function that takes in three arguments: the start, end, and number of points to generate.
The default is `np.linspace`, which generates a linearly-spaced array of points.
Other options include `np.cosspace`, which generates a cosine-spaced array of points.
Returns: A new Fuselage object with subdivided sections.
"""
if not (ratio >= 2 and isinstance(ratio, int)):
raise ValueError("`ratio` must be an integer greater than or equal to 2.")
new_xsecs = []
length_fractions_along_section = spacing_function(0, 1, ratio + 1)[:-1]
for xsec_a, xsec_b in zip(self.xsecs[:-1], self.xsecs[1:]):
for s in length_fractions_along_section:
a_weight = 1 - s
b_weight = s
new_xsecs.append(
FuselageXSec(
xyz_c=xsec_a.xyz_c * a_weight + xsec_b.xyz_c * b_weight,
width=xsec_a.width * a_weight + xsec_b.width * b_weight,
height=xsec_a.height * a_weight + xsec_b.height * b_weight,
shape=xsec_a.shape * a_weight + xsec_b.shape * b_weight,
analysis_specific_options=xsec_a.analysis_specific_options,
)
)
new_xsecs.append(self.xsecs[-1])
return Fuselage(
name=self.name,
xsecs=new_xsecs,
analysis_specific_options=self.analysis_specific_options
)
def _compute_frame_of_FuselageXSec(self, index: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Computes the local frame of a FuselageXSec, given the index of the FuselageXSec in the Fuselage.xsecs list.
Args:
index: The index of the FuselageXSec in the Fuselage.xsecs list.
Returns: A tuple:
xg_local: The x-axis of the local coordinate frame, in aircraft geometry axes.
yg_local: The y-axis of the local coordinate frame, in aircraft geometry axes.
zg_local: The z-axis of the local coordinate frame, in aircraft geometry axes.
"""
import warnings
warnings.warn(
"Fuselage._compute_frame_of_FuselageXSec() is deprecated. "
"Use FuselageXSec.compute_frame() instead.",
DeprecationWarning
)
return self.xsecs[index].compute_frame()
class FuselageXSec(AeroSandboxObject):
"""
Definition for a fuselage cross-section ("X-section").
"""
def __init__(self,
xyz_c: Union[np.ndarray, List[float]] = None,
xyz_normal: Union[np.ndarray, List[float]] = None,
radius: float = None,
width: float = None,
height: float = None,
shape: float = 2.,
analysis_specific_options: Optional[Dict[type, Dict[str, Any]]] = None,
):
"""
Defines a new Fuselage cross-section.
Fuselage cross-sections are essentially a sketch on a 2D plane.
* This plane is defined by a center point (`xyz_c`) and a normal vector (`xyz_normal`).
* The cross-section is a superellipse shape, which is a generalization of a circle and a square.
It is mathematically defined by three parameters, using `y` and `z` as the two axes:
abs(y / width) ^ shape + abs(z / height) ^ shape = 1
See also: https://en.wikipedia.org/wiki/Superellipse
There are some notable special cases:
* A circle is a special case of a superellipse, where `shape = 2`.
* A square is a special case of a superellipse, where `shape = Inf` (in practice, set this to some
high value like 1000).
* A diamond is a special case of a superellipse, where `shape = 1`.
Must specify either `radius` or both `width` and `height`. Cannot specify both.
Args:
xyz_c: An array-like that represents the xyz-coordinates of the center of this fuselage cross-section,
in geometry axes.
xyz_normal: An array-like that represents the xyz-coordinates of the normal vector of this fuselage
cross-section, in geometry axes.
radius: Radius of the fuselage cross-section.
width: Width of the fuselage cross-section.
height: Height of the fuselage cross-section.
shape: A parameter that determines what shape the cross-section is. Should be in the range 1 < shape < infinity.
In short, here's how to interpret this value:
* shape=2 is a circle.
* shape=1 is a diamond shape.
* A high value of, say, 10, will get you a square-ish shape.
To be more precise:
* If the `shape` parameter is `s`, then the corresponding shape is the same as a level-set of a L^s norm in R^2.
* Defined another way, if the `shape` parameter is `s`, then the shape is the solution to the equation:
* x^s + y^s = 1 in the first quadrant (x>0, y>0); then mirrored for all four quadrants.
analysis_specific_options: Analysis-specific options are additional constants or modeling assumptions
that should be passed on to specific analyses and associated with this specific geometry object.
This should be a dictionary where:
* Keys are specific analysis types (typically a subclass of asb.ExplicitAnalysis or
asb.ImplicitAnalysis), but if you decide to write your own analysis and want to make this key
something else (like a string), that's totally fine - it's just a unique identifier for the
specific analysis you're running.
* Values are a dictionary of key:value pairs, where:
* Keys are strings.
* Values are some value you want to assign.
This is more easily demonstrated / understood with an example:
>>> analysis_specific_options = {
>>> asb.AeroBuildup: dict(
>>> include_wave_drag=True,
>>> )
>>> }
"""
### Set defaults
if xyz_c is None:
xyz_c = np.array([0., 0., 0.])
if xyz_normal is None:
xyz_normal = np.array([1., 0., 0.]) # points backwards
if analysis_specific_options is None:
analysis_specific_options = {}
### Set width and height
radius_specified = (radius is not None)
width_height_specified = [
(width is not None),
(height is not None)
]
if radius_specified:
if any(width_height_specified):
raise ValueError(
"Cannot specify both `radius` and (`width`, `height`) parameters - must be one or the other."
)
self.width = 2 * radius
self.height = 2 * radius
else:
if not all(width_height_specified):
raise ValueError(
"Must specify either `radius` or both (`width`, `height`) parameters."
)
self.width = width
self.height = height
### Initialize
self.xyz_c = np.array(xyz_c)
self.xyz_normal = np.array(xyz_normal)
self.shape = shape
self.analysis_specific_options = analysis_specific_options
def __repr__(self) -> str:
return f"FuselageXSec (xyz_c: {self.xyz_c}, width: {self.width}, height: {self.height}, shape: {self.shape})"
def xsec_area(self):
"""
Computes the FuselageXSec's cross-sectional (xsec) area.
The computation method is a closed-form approximation for the area of a superellipse. The exact equation for
the area of a superellipse with shape parameter `s` is:
area = width * height * (gamma(1 + 1/n))^2 / gamma(1 + 2/n)
where gamma() is the gamma function. The gamma function is (relatively) computationally expensive to evaluate
and differentiate, so we replace this area calculation with a closed-form approximation (with essentially no
loss in accuracy):
area = width * height / (s^-1.8717618013591173 + 1)
This approximation has the following properties:
* It is numerically exact for the case of s = 1 (a diamond)
* It is numerically exact for the case of s = 2 (a circle)
* It is correct in the asymptotic limit where s -> infinity (a square)
* In the range of sensible s values (1 < s < infinity), its error is less than 0.6%.
* It always produces a positive area for any physically-meaningful value of s (s > 0). In the range of s
values where s is physically-meaningful but not in a sensible range (0 < s < 1), this equation will
over-predict area.
The value of the constant seen in this expression (1.872...) is given by log(4/pi - 1) / log(2), and it is
chosen as such so that the expression is exactly correct in the s=2 (circle) case.
Returns:
"""
area = self.width * self.height / (self.shape ** -1.8717618013591173 + 1)
return area
def xsec_perimeter(self):
"""
Computes the FuselageXSec's perimeter. ("Circumference" in the case of a circular cross-section.)
The computation method is a closed-form approximation for the perimeter of a superellipse. The exact equation
for the perimeter of a superellipse is quite long and is not repeated here for brevity; a Google search will
bring it up. More importantly, this exact equation can only be represented as an infinite sum - not
particularly useful for fast computation.
We replace this exact equation with the following closed-form approximation obtained from symbolic regression:
Imagine a superellipse centered on the origin of a 2D plane. Now, imagine that the superellipse is
stretched such that the first quadrant (e.g., x>0, y>0) goes from (1, 0) to (0, h). Assume it has shape
parameter s (where, as a reminder, s=1 is a diamond, s=2 is a circle, s=Inf is a square).
Then, the perimeter of that single quadrant is:
h + (((((s-0.88487077) * h + 0.2588574 / h) ^ exp(s / -0.90069205)) + h) + 0.09919785) ^ (-1.4812293 / s)
See `AeroSandbox/studies/SuperellipseProperties` for details about how this was obtained.
We can extrapolate from here to the general case of a superellipse, as shown in the code below.
This approximation has the following properties:
* For the s=1 case (diamond), the error is +0.2%.
* For the s=2 case (circle), the error is -0.1%.
* In the s -> infinity limit (square), the error is +0.1%.
Returns:
"""
try:
if self.width == 0:
return 2 * self.height
elif self.height == 0:
return 2 * self.width
except RuntimeError: # Will error if width and height are optimization variables, as truthiness is indeterminate
pass
s = self.shape
h = np.maximum(
(self.width + 1e-16) / (self.height + 1e-16),
(self.height + 1e-16) / (self.width + 1e-16)
)
nondim_quadrant_perimeter = (
h + (((((s - 0.88487077) * h + 0.2588574 / h) ** np.exp(s / -0.90069205)) + h) + 0.09919785) ** (
-1.4812293 / s)
)
perimeter = 2 * nondim_quadrant_perimeter * np.minimum(self.width, self.height)
return np.where(
self.width == 0,
2 * self.height,
np.where(
self.height == 0,
2 * self.width,
perimeter
)
)
def compute_frame(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Computes the local coordinate frame of the FuselageXSec, in aircraft geometry axes.
xg_local is aligned with the FuselageXSec's normal vector.
zg_local is roughly aligned with the z-axis of the aircraft geometry axes, but projected onto the FuselageXSec's plane.
yg_local is the cross product of zg_local and xg_local.
Returns: A tuple:
xg_local: The x-axis of the local coordinate frame, in aircraft geometry axes.
yg_local: The y-axis of the local coordinate frame, in aircraft geometry axes.
zg_local: The z-axis of the local coordinate frame, in aircraft geometry axes.
"""
xyz_normal = self.xyz_normal / np.linalg.norm(self.xyz_normal)
xg_local = xyz_normal
zg_local = np.array([0, 0, 1])
zg_local = zg_local - np.dot(zg_local, xg_local) * xg_local
yg_local = np.cross(zg_local, xg_local)
return xg_local, yg_local, zg_local
def get_3D_coordinates(self,
theta: Union[float, np.ndarray] = None
) -> Tuple[Union[float, np.ndarray]]:
"""
Samples points from the perimeter of this FuselageXSec.
Args:
theta: Coordinate in the tangential-ish direction to sample points at. Given in the 2D FuselageXSec
coordinate system, where:
* y_2D points along the (global) y_g
* z_2D points along the (global) z_g
In other words, a value of:
* theta=0 -> samples points from the right side of the FuselageXSec
* theta=pi/2 -> samples points from the top of the FuselageXSec
* theta=pi -> samples points from the left side of the FuselageXSec
* theta=3pi/2 -> samples points from the bottom of the FuselageXSec
Returns: Points sampled from the perimeter of the FuselageXSec, as a [x, y, z] tuple.
If theta is a float, then each of x, y, and z will be floats.
If theta is an array, then x, y, and z will also be arrays of the same size.
"""
### Set defaults
if theta is None:
theta = np.linspace(
0,
2 * np.pi,
60 + 1
)[:-1]
st = np.sin(np.mod(theta, 2 * np.pi))
ct = np.cos(np.mod(theta, 2 * np.pi))
y = (self.width / 2) * np.abs(ct) ** (2 / self.shape) * np.where(ct > 0, 1, -1)
z = (self.height / 2) * np.abs(st) ** (2 / self.shape) * np.where(st > 0, 1, -1)
xg_local, yg_local, zg_local = self.compute_frame()
return (
self.xyz_c[0] + y * yg_local[0] + z * zg_local[0],
self.xyz_c[1] + y * yg_local[1] + z * zg_local[1],
self.xyz_c[2] + y * yg_local[2] + z * zg_local[2],
)
def equivalent_radius(self,
preserve="area"
) -> float:
"""
Computes an equivalent radius for non-circular cross-sections. This may be necessary when doing analysis that
uses axisymmetric assumptions.
Can either hold area or perimeter fixed, depending on whether cross-sectional area or wetted area is more
important.
Args:
preserve: One of:
* "area": holds the cross-sectional area constant
* "perimeter": holds the cross-sectional perimeter (i.e., the wetted area of the Fuselage) constant
Returns: An equivalent radius value.
"""
if preserve == "area":
return (self.xsec_area() / np.pi + 1e-16) ** 0.5
elif preserve == "perimeter":
return (self.xsec_perimeter() / (2 * np.pi))
else:
raise ValueError("Bad value of `preserve`!")
def translate(self,
xyz: Union[np.ndarray, List[float]]
) -> "FuselageXSec":
"""
Returns a copy of this FuselageXSec that has been translated by `xyz`.
Args:
xyz: The amount to translate the FuselageXSec. Given as a 3-element NumPy vector.
Returns: A copy of this FuselageXSec, translated by `xyz`.
"""
new_xsec = copy.copy(self)
new_xsec.xyz_c = new_xsec.xyz_c + np.array(xyz)
return new_xsec
if __name__ == '__main__':
fuse = Fuselage(
xsecs=[
FuselageXSec(
xyz_c=[0, 0, 1],
radius=0,
),
FuselageXSec(
xyz_c=[1, 0, 1],
width=0.5,
height=0.2,
shape=5
),
FuselageXSec(
xyz_c=[2, 0, 1],
radius=0.2,
)
]
).translate([0, 0, 2])
fuse.draw()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/geometry/fuselage.py
|
fuselage.py
|
import aerosandbox.numpy as np
from typing import Tuple
"""
Documentation of (points, faces) standard format, which is an unstructured mesh format:
Meshes are given here in the common (points, faces) format. In this format, `points` is a Nx3 array, where each row
gives the 3D coordinates of a vertex in the mesh. Entries into this array are floating-point, generally speaking.
`faces` is a Mx3 array in the case of a triangular mesh, or a Mx4 array in the case of a quadrilateral mesh. Each row
in this array represents a face. The entries in each row are integers that correspond to the index of `points` where
the vertex locations of that face are found.
"""
def stack_meshes(
*meshes: Tuple[Tuple[np.ndarray, np.ndarray]]
) -> Tuple[np.ndarray, np.ndarray]:
"""
Takes in a series of tuples (points, faces) and merges them into a single tuple (points, faces). All (points,
faces) tuples are meshes given in standard format.
Args:
*meshes: Any number of mesh tuples in standard (points, faces) format.
Returns: Points and faces of the combined mesh. Standard unstructured mesh format: A tuple of `points` and
`faces`, where:
* `points` is a `n x 3` array of points, where `n` is the number of points in the mesh.
* `faces` is a `m x 3` array of faces if `method` is "tri", or a `m x 4` array of faces if `method` is "quad".
* Each row of `faces` is a list of indices into `points`, which specifies a face.
"""
if len(meshes) == 1:
return meshes[0]
elif len(meshes) == 2:
points1, faces1 = meshes[0]
points2, faces2 = meshes[1]
faces2 = faces2 + len(points1)
points = np.concatenate((points1, points2))
faces = np.concatenate((faces1, faces2))
return points, faces
else:
points, faces = stack_meshes(
meshes[0],
meshes[1]
)
return stack_meshes(
(points, faces),
*meshes[2:]
)
def convert_mesh_to_polydata_format(
points: np.ndarray,
faces: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
PyVista uses a slightly different convention for the standard (points, faces) format as described above. They
give `faces` as a single 1D vector of roughly length (M*3), or (M*4) in the case of quadrilateral meshing.
Basically, the mesh displayer goes down the `faces` array, and when it sees a number N, it interprets that as the
number of vertices in the following face. Then, the next N entries are interpreted as integer references to the
vertices of the face.
This has the benefit of allowing for mixed tri/quad meshes.
Args:
points: `points` array of the original standard-format mesh
faces: `faces` array of the original standard-format mesh
Returns:
(points, faces), except that `faces` is now in a pyvista.PolyData compatible format.
"""
faces = [
[len(face), *face]
for face in faces
]
faces = np.array(faces)
faces = np.reshape(faces, -1)
return points, faces
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/geometry/mesh_utilities.py
|
mesh_utilities.py
|
import aerosandbox.numpy as np
from matplotlib import path
from aerosandbox.common import AeroSandboxObject
from typing import Union
class Polygon(AeroSandboxObject):
def __init__(self,
coordinates: np.ndarray
):
"""
Creates a polygon object.
Args:
coordinates: An Nx2 NumPy ndarray of [x, y] coordinates for the polygon.
"""
self.coordinates = np.array(coordinates)
def __repr__(self):
return f"Polygon ({self.n_points()} points)"
def __eq__(self, other):
return np.all(self.coordinates == other.coordinates)
def __ne__(self, other):
return not self.__eq__(other)
def x(self) -> np.ndarray:
"""
Returns the x coordinates of the polygon. Equivalent to Polygon.coordinates[:,0].
Returns:
X coordinates as a vector
"""
return self.coordinates[:, 0]
def y(self) -> np.ndarray:
"""
Returns the y coordinates of the polygon. Equivalent to Polygon.coordinates[:,1].
Returns:
Y coordinates as a vector
"""
return self.coordinates[:, 1]
def n_points(self) -> int:
"""
Returns the number of points/vertices/coordinates of the polygon.
"""
try:
return len(self.coordinates)
except TypeError:
try:
return self.coordinates.shape[0]
except AttributeError:
return 0
def scale(self,
scale_x: float = 1.,
scale_y: float = 1.,
) -> 'Polygon':
"""
Scales a Polygon about the origin.
Args:
scale_x: Amount to scale in the x-direction.
scale_y: Amount to scale in the y-direction.
Returns: The scaled Polygon.
"""
x = self.x() * scale_x
y = self.y() * scale_y
return Polygon(
coordinates=np.stack((x, y), axis=1)
)
def translate(self,
translate_x: float = 0.,
translate_y: float = 0.,
) -> 'Polygon':
"""
Translates a Polygon by a given amount.
Args:
translate_x: Amount to translate in the x-direction
translate_y: Amount to translate in the y-direction
Returns: The translated Polygon.
"""
x = self.x() + translate_x
y = self.y() + translate_y
return Polygon(
coordinates=np.stack((x, y), axis=1)
)
def rotate(self,
angle: float,
x_center: float = 0.,
y_center: float = 0.
) -> 'Polygon':
"""
Rotates a Polygon clockwise by the specified amount, in radians.
Rotates about the point (x_center, y_center), which is (0, 0) by default.
Args:
angle: Angle to rotate, counterclockwise, in radians.
x_center: The x-coordinate of the center of rotation.
y_center: The y-coordinate of the center of rotation.
Returns: The rotated Polygon.
"""
### Translate
translation = np.array([x_center, y_center]).reshape((1, 2))
coordinates = self.coordinates - translation
### Rotate
rotation_matrix = np.rotation_matrix_2D(
angle=angle,
)
coordinates = (rotation_matrix @ coordinates.T).T
### Translate
coordinates = coordinates + translation
return Polygon(
coordinates=coordinates
)
def area(self) -> float:
"""
Returns the area of the polygon.
"""
x = self.x()
y = self.y()
x_n = np.roll(x, -1) # x_next, or x_i+1
y_n = np.roll(y, -1) # y_next, or y_i+1
a = x * y_n - x_n * y # a is the area of the triangle bounded by a given point, the next point, and the origin.
A = 0.5 * np.sum(a) # area
return A
def perimeter(self) -> float:
"""
Returns the perimeter of the polygon.
"""
dx = np.diff(self.x())
dy = np.diff(self.y())
ds = (
dx ** 2 +
dy ** 2
) ** 0.5
return np.sum(ds)
def centroid(self) -> np.ndarray:
"""
Returns the centroid of the polygon as a 1D np.ndarray of length 2.
"""
x = self.x()
y = self.y()
x_n = np.roll(x, -1) # x_next, or x_i+1
y_n = np.roll(y, -1) # y_next, or y_i+1
a = x * y_n - x_n * y # a is the area of the triangle bounded by a given point, the next point, and the origin.
A = 0.5 * np.sum(a) # area
x_c = 1 / (6 * A) * np.sum(a * (x + x_n))
y_c = 1 / (6 * A) * np.sum(a * (y + y_n))
centroid = np.array([x_c, y_c])
return centroid
def Ixx(self):
"""
Returns the nondimensionalized Ixx moment of inertia, taken about the centroid.
"""
x = self.x()
y = self.y()
x_n = np.roll(x, -1) # x_next, or x_i+1
y_n = np.roll(y, -1) # y_next, or y_i+1
a = x * y_n - x_n * y # a is the area of the triangle bounded by a given point, the next point, and the origin.
A = 0.5 * np.sum(a) # area
x_c = 1 / (6 * A) * np.sum(a * (x + x_n))
y_c = 1 / (6 * A) * np.sum(a * (y + y_n))
centroid = np.array([x_c, y_c])
Ixx = 1 / 12 * np.sum(a * (y ** 2 + y * y_n + y_n ** 2))
Iuu = Ixx - A * centroid[1] ** 2
return Iuu
def Iyy(self):
"""
Returns the nondimensionalized Iyy moment of inertia, taken about the centroid.
"""
x = self.x()
y = self.y()
x_n = np.roll(x, -1) # x_next, or x_i+1
y_n = np.roll(y, -1) # y_next, or y_i+1
a = x * y_n - x_n * y # a is the area of the triangle bounded by a given point, the next point, and the origin.
A = 0.5 * np.sum(a) # area
x_c = 1 / (6 * A) * np.sum(a * (x + x_n))
y_c = 1 / (6 * A) * np.sum(a * (y + y_n))
centroid = np.array([x_c, y_c])
Iyy = 1 / 12 * np.sum(a * (x ** 2 + x * x_n + x_n ** 2))
Ivv = Iyy - A * centroid[0] ** 2
return Ivv
def Ixy(self):
"""
Returns the nondimensionalized product of inertia, taken about the centroid.
"""
x = self.x()
y = self.y()
x_n = np.roll(x, -1) # x_next, or x_i+1
y_n = np.roll(y, -1) # y_next, or y_i+1
a = x * y_n - x_n * y # a is the area of the triangle bounded by a given point, the next point, and the origin.
A = 0.5 * np.sum(a) # area
x_c = 1 / (6 * A) * np.sum(a * (x + x_n))
y_c = 1 / (6 * A) * np.sum(a * (y + y_n))
centroid = np.array([x_c, y_c])
Ixy = 1 / 24 * np.sum(a * (x * y_n + 2 * x * y + 2 * x_n * y_n + x_n * y))
Iuv = Ixy - A * centroid[0] * centroid[1]
return Iuv
def J(self):
"""
Returns the nondimensionalized polar moment of inertia, taken about the centroid.
"""
x = self.x()
y = self.y()
x_n = np.roll(x, -1) # x_next, or x_i+1
y_n = np.roll(y, -1) # y_next, or y_i+1
a = x * y_n - x_n * y # a is the area of the triangle bounded by a given point, the next point, and the origin.
A = 0.5 * np.sum(a) # area
x_c = 1 / (6 * A) * np.sum(a * (x + x_n))
y_c = 1 / (6 * A) * np.sum(a * (y + y_n))
centroid = np.array([x_c, y_c])
Ixx = 1 / 12 * np.sum(a * (y ** 2 + y * y_n + y_n ** 2))
Iyy = 1 / 12 * np.sum(a * (x ** 2 + x * x_n + x_n ** 2))
J = Ixx + Iyy
return J
def write_sldcrv(self,
filepath: str = None
):
"""
Writes a .sldcrv (SolidWorks curve) file corresponding to this Polygon to a filepath.
Args:
filepath: A filepath (including the filename and .sldcrv extension) [string]
if None, this function returns the .sldcrv file as a string.
Returns: None
"""
string = "\n".join(
[
"%f %f 0" % tuple(coordinate)
for coordinate in self.coordinates
]
)
if filepath is not None:
with open(filepath, "w+") as f:
f.write(string)
return string
def contains_points(self,
x: Union[float, np.ndarray],
y: Union[float, np.ndarray],
) -> Union[float, np.ndarray]:
"""
Returns a boolean array of whether some (x, y) point(s) are contained within the Polygon.
Note: This function is unfortunately not automatic-differentiable.
Args:
x: x-coordinate(s) of the query points.
y: y-coordinate(s) of the query points.
Returns:
A boolean array of the same size as x and y, with values corresponding to whether the points are
inside the Polygon.
"""
x = np.array(x)
y = np.array(y)
try:
input_shape = (x + y).shape
except ValueError as e: # If arrays are not broadcastable
raise ValueError("Inputs x and y could not be broadcast together!") from e
x = x.reshape(-1, 1)
y = y.reshape(-1, 1)
points = np.hstack((x, y))
contained = path.Path(
vertices=self.coordinates
).contains_points(
points
)
contained = np.array(contained).reshape(input_shape)
return contained
def as_shapely_polygon(self):
"""
Returns a Shapely Polygon object representing this polygon.
Shapely is a Python library for 2D geometry operations. While it is more powerful than this class (e.g.,
allows for union/intersection calculation between Polygons), it is not automatic-differentiable.
"""
import shapely
return shapely.Polygon(self.coordinates)
def jaccard_similarity(self,
other: "Polygon"
):
"""
Calculates the Jaccard similarity between this polygon and another polygon.
Note: This function is unfortunately not automatic-differentiable.
Args:
other: The other polygon to compare to.
Returns:
The Jaccard similarity between this polygon and the other polygon.
* 0 if the polygons are completely disjoint
* 1 if the polygons are identical
"""
p1 = self.as_shapely_polygon()
p2 = other.as_shapely_polygon()
intersection = p1.intersection(p2).area
union = p1.area + p2.area - intersection
similarity = intersection / union if union != 0 else 0
return similarity
def draw(self,
set_equal=True,
color=None,
**kwargs
):
"""
Draws the Polygon on the current matplotlib axis.
Args:
set_equal: Whether to set the aspect ratio of the plot to be equal.
**kwargs: Keyword arguments to pass to the matplotlib.pyplot.fill function.
See: https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.fill.html
Returns: None (draws on the current matplotlib axis)
"""
import matplotlib.pyplot as plt
if color is None:
color = plt.gca()._get_lines.get_next_color()
plt.fill(
self.x(),
self.y(),
color=color,
alpha=0.5,
**kwargs
)
if set_equal:
plt.gca().set_aspect("equal", adjustable='box')
if __name__ == '__main__':
theta = np.linspace(0, 2 * np.pi, 1000)
r = np.sin(theta) * np.sqrt(np.abs(np.cos(theta))) / (np.sin(theta) + 7 / 5) - 2 * np.sin(theta) + 2
heart = Polygon(np.stack((r * np.cos(theta), r * np.sin(theta)), axis=1))
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
heart.draw()
heart.scale(0.7, 0.7).translate(2, 1).rotate(np.radians(15)).draw()
plt.show()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/geometry/polygon.py
|
polygon.py
|
import aerosandbox.numpy as np
from aerosandbox.geometry.polygon import Polygon
from aerosandbox.geometry.airfoil.airfoil_families import get_NACA_coordinates, get_UIUC_coordinates, get_file_coordinates
from aerosandbox.geometry.airfoil.default_airfoil_aerodynamics import default_CL_function, default_CD_function, \
default_CM_function
from aerosandbox.library.aerodynamics import transonic
from aerosandbox.modeling.splines.hermite import linear_hermite_patch, cubic_hermite_patch
from scipy import interpolate
from typing import Callable, Union, Any, Dict, List
import json
from pathlib import Path
import os
class Airfoil(Polygon):
"""
An airfoil. See constructor docstring for usage details.
"""
def __init__(self,
name: str = "Untitled",
coordinates: Union[None, str, np.ndarray] = None,
**deprecated_keyword_arguments
):
"""
Creates an Airfoil object.
Args:
name: Name of the airfoil [string]. Can also be used to auto-generate coordinates; see docstring for
`coordinates` below.
coordinates: A representation of the coordinates that define the airfoil. Can be one of several types of
input; the following sequence of operations is used to interpret the meaning of the parameter:
If `coordinates` is an Nx2 array of the [x, y] coordinates that define the airfoil, these are used
as-is. Points are expected to be provided in standard airfoil order:
* Points should start on the upper surface at the trailing edge, continue forward over the upper
surface, wrap around the nose, continue aft over the lower surface, and then end at the trailing
edge on the lower surface.
* The trailing edge need not be closed, but many analyses implicitly assume that this gap is small.
* Take care to ensure that the point at the leading edge of the airfoil, usually (0, 0),
is not duplicated.
If `coordinates` is provided as a string, it assumed to be the filepath to a *.dat file containing
the coordinates; we attempt to load coordinates from this.
If the coordinates are not specified and instead left as None, the constructor will attempt to
auto-populate the coordinates based on the `name` parameter provided, in the following order of
priority:
* If `name` is a 4-digit NACA airfoil (e.g. "naca2412"), coordinates will be created based on the
analytical equation.
* If `name` is the name of an airfoil in the UIUC airfoil database (e.g. "s1223", "e216",
"dae11"), coordinates will be loaded from that. Note that the string you provide must be exactly
the name of the associated *.dat file in the UIUC database.
"""
### Handle the airfoil name
self.name = name
### Handle the coordinates
self.coordinates = None
if coordinates is None: # If no coordinates are given
try: # See if it's a NACA airfoil
self.coordinates = get_NACA_coordinates(name=self.name)
except (ValueError, NotImplementedError):
try: # See if it's in the UIUC airfoil database
self.coordinates = get_UIUC_coordinates(name=self.name)
except FileNotFoundError:
pass
except UnicodeDecodeError:
import warnings
warnings.warn(
f"Airfoil {self.name} was found in the UIUC airfoil database, but could not be parsed.\n"
f"Check for any non-Unicode-compatible characters in the file, or specify the airfoil "
f"coordinates yourself.",
)
else:
try: # If coordinates is a string, assume it's a filepath to a .dat file
self.coordinates = get_file_coordinates(filepath=coordinates)
except (OSError, FileNotFoundError, TypeError, UnicodeDecodeError):
try:
shape = coordinates.shape
assert len(shape) == 2
assert shape[0] == 2 or shape[1] == 2
if not shape[1] == 2:
coordinates = np.transpose(shape)
self.coordinates = coordinates
except AttributeError:
pass
if self.coordinates is None:
import warnings
warnings.warn(
f"Airfoil {self.name} had no coordinates assigned, and could not parse the `coordinates` input!",
UserWarning,
stacklevel=2,
)
### Handle deprecated keyword arguments
if len(deprecated_keyword_arguments) > 0:
import warnings
warnings.warn(
"The `generate_polars`, `CL_function`, `CD_function`, and `CM_function` keyword arguments to the "
"Airfoil constructor will be deprecated in an upcoming release. Their functionality is replaced"
"by `Airfoil.get_aero_from_neuralfoil()`, which is faster and has better properties for optimization.",
PendingDeprecationWarning
)
generate_polars = deprecated_keyword_arguments.get("generate_polars", False)
CL_function = deprecated_keyword_arguments.get("CL_function", None)
CD_function = deprecated_keyword_arguments.get("CD_function", None)
CM_function = deprecated_keyword_arguments.get("CM_function", None)
### Handle getting default polars
if generate_polars:
self.generate_polars()
else:
self.CL_function = default_CL_function
self.CD_function = default_CD_function
self.CM_function = default_CM_function
### Overwrite any default polars with those provided
if CL_function is not None:
self.CL_function = CL_function
if CD_function is not None:
self.CD_function = CD_function
if CM_function is not None:
self.CM_function = CM_function
def __repr__(self) -> str:
return f"Airfoil {self.name} ({self.n_points()} points)"
def __eq__(self, other: "Airfoil") -> bool:
"""
Checks if two airfoils are equal. Two airfoils are equal if they have the same name, coordinates, and
polar functions.
Args:
other: The other airfoil to compare to.
Returns:
True if the two airfoils are equal, False otherwise.
"""
if other is self: # If they're the same object in memory, they're equal
return True
if not type(self) == type(other): # If the types are different, they're not equal
return False
# At this point, we know that the types are the same, so we can compare the attributes
return all([ # If all of these are true, they're equal
self.name == other.name,
np.allclose(self.coordinates, other.coordinates),
])
def to_kulfan_airfoil(self,
n_weights_per_side: int = 8,
N1: float = 0.5,
N2: float = 1.0,
normalize_coordinates: bool = True,
use_leading_edge_modification: bool = True,
) -> "KulfanAirfoil":
from aerosandbox.geometry.airfoil.kulfan_airfoil import KulfanAirfoil
from aerosandbox.geometry.airfoil.airfoil_families import get_kulfan_parameters
parameters = get_kulfan_parameters(
coordinates=self.coordinates,
n_weights_per_side=n_weights_per_side,
N1=N1,
N2=N2,
normalize_coordinates=normalize_coordinates,
use_leading_edge_modification=use_leading_edge_modification,
)
return KulfanAirfoil(
name=self.name,
lower_weights=parameters["lower_weights"],
upper_weights=parameters["upper_weights"],
leading_edge_weight=parameters["leading_edge_weight"],
TE_thickness=parameters["TE_thickness"],
N1=N1,
N2=N2,
)
def generate_polars(self,
alphas=np.linspace(-13, 13, 27),
Res=np.geomspace(1e3, 1e8, 12),
cache_filename: str = None,
xfoil_kwargs: Dict[str, Any] = None,
unstructured_interpolated_model_kwargs: Dict[str, Any] = None,
include_compressibility_effects: bool = True,
transonic_buffet_lift_knockdown: float = 0.3,
make_symmetric_polars: bool = False,
) -> None:
"""
Generates airfoil polar surrogate models (CL, CD, CM functions) from XFoil data and assigns them in-place to
this Airfoil's polar functions.
In other words, when this function is run, the following functions will be added (or overwritten) to the instance:
* Airfoil.CL_function(alpha, Re, mach)
* Airfoil.CD_function(alpha, Re, mach)
* Airfoil.CM_function(alpha, Re, mach)
Where alpha is in degrees.
Warning: In-place operation! Modifies this Airfoil object by setting Airfoil.CL_function, etc. to the new
polars.
Args:
alphas: The range of alphas to sample from XFoil at. Given in degrees.
Res: The range of Reynolds numbers to sample from XFoil at. Dimensionless.
cache_filename: A path-like filename (ideally a "*.json" file) that can be used to cache the XFoil
results, making it much faster to regenerate the results.
* If the file does not exist, XFoil will be run, and a cache file will be created.
* If the file does exist, XFoil will not be run, and the cache file will be read instead.
xfoil_kwargs: Keyword arguments to pass into the AeroSandbox XFoil module. See the aerosandbox.XFoil
constructor for options.
unstructured_interpolated_model_kwargs: Keyword arguments to pass into the UnstructuredInterpolatedModels
that contain the polars themselves. See the aerosandbox.UnstructuredInterpolatedModel constructor for
options.
include_compressibility_effects: Includes compressibility effects in the polars, such as wave drag,
mach tuck, CL effects across normal shocks. Note that accuracy here is dubious in the transonic regime
and above - you should really specify your own CL/CD/CM models
Returns: None (in-place), adds the following functions to the instance:
* Airfoil.CL_function(alpha, Re, mach)
* Airfoil.CD_function(alpha, Re, mach)
* Airfoil.CM_function(alpha, Re, mach)
"""
if self.coordinates is None:
raise ValueError("Cannot generate polars for an airfoil that you don't have the coordinates of!")
### Set defaults
if xfoil_kwargs is None:
xfoil_kwargs = {}
if unstructured_interpolated_model_kwargs is None:
unstructured_interpolated_model_kwargs = {}
xfoil_kwargs = { # See asb.XFoil for the documentation on these.
"verbose" : False,
"max_iter" : 20,
"xfoil_repanel": True,
**xfoil_kwargs
}
unstructured_interpolated_model_kwargs = { # These were tuned heuristically as defaults!
"resampling_interpolator_kwargs": {
"degree" : 0,
# "kernel": "linear",
"kernel" : "multiquadric",
"epsilon" : 3,
"smoothing": 0.01,
# "kernel": "cubic"
},
**unstructured_interpolated_model_kwargs
}
### Retrieve XFoil Polar Data from the cache, if it exists.
data = None
if cache_filename is not None:
try:
with open(cache_filename, "r") as f:
data = {
k: np.array(v)
for k, v in json.load(f).items()
}
except FileNotFoundError:
pass
### Analyze airfoil with XFoil, if needed
if data is None:
### If a cache filename is given, ensure that the directory exists.
if cache_filename is not None:
os.makedirs(os.path.dirname(cache_filename), exist_ok=True)
from aerosandbox.aerodynamics.aero_2D import XFoil
def get_run_data(Re): # Get the data for an XFoil alpha sweep at one specific Re.
run_data = XFoil(
airfoil=self,
Re=Re,
**xfoil_kwargs
).alpha(alphas)
run_data["Re"] = Re * np.ones_like(run_data["alpha"])
return run_data # Data is a dict where keys are figures of merit [str] and values are 1D ndarrays.
from tqdm import tqdm
run_datas = [ # Get a list of dicts, where each dict is the result of an XFoil run at a particular Re.
get_run_data(Re)
for Re in tqdm(
Res,
desc=f"Running XFoil to generate polars for Airfoil '{self.name}':",
)
]
data = { # Merge the dicts into one big database of all runs.
k: np.concatenate(
tuple([run_data[k] for run_data in run_datas])
)
for k in run_datas[0].keys()
}
if make_symmetric_polars: # If the airfoil is known to be symmetric, duplicate all data across alpha.
keys_symmetric_across_alpha = ['CD', 'CDp', 'Re'] # Assumes the rest are antisymmetric
data = {
k: np.concatenate([v, v if k in keys_symmetric_across_alpha else -v])
for k, v in data.items()
}
if cache_filename is not None: # Cache the accumulated data for later use, if it doesn't already exist.
with open(cache_filename, "w+") as f:
json.dump(
{k: v.tolist() for k, v in data.items()},
f,
indent=4
)
### Save the raw data as an instance attribute for later use
self.xfoil_data = data
### Make the interpolators for attached aerodynamics
from aerosandbox.modeling import UnstructuredInterpolatedModel
attached_alphas_to_use = (
alphas[::2] if len(alphas) > 20 else alphas
)
alpha_resample = np.concatenate([
np.linspace(-180, attached_alphas_to_use.min(), 10)[:-1],
attached_alphas_to_use,
np.linspace(attached_alphas_to_use.max(), 180, 10)[1:],
]) # This is the list of points that we're going to resample from the XFoil runs for our InterpolatedModel, using an RBF.
Re_resample = np.concatenate([
Res.min() / 10 ** np.arange(1, 5)[::-1],
Res,
Res.max() * 10 ** np.arange(1, 5),
]) # This is the list of points that we're going to resample from the XFoil runs for our InterpolatedModel, using an RBF.
x_data = {
"alpha": data["alpha"],
"ln_Re": np.log(data["Re"]),
}
x_data_resample = {
"alpha": alpha_resample,
"ln_Re": np.log(Re_resample)
}
CL_attached_interpolator = UnstructuredInterpolatedModel(
x_data=x_data,
y_data=data["CL"],
x_data_resample=x_data_resample,
**unstructured_interpolated_model_kwargs
)
log10_CD_attached_interpolator = UnstructuredInterpolatedModel(
x_data=x_data,
y_data=np.log10(data["CD"]),
x_data_resample=x_data_resample,
**unstructured_interpolated_model_kwargs
)
CM_attached_interpolator = UnstructuredInterpolatedModel(
x_data=x_data,
y_data=data["CM"],
x_data_resample=x_data_resample,
**unstructured_interpolated_model_kwargs
)
### Determine if separated
alpha_stall_positive = np.max(data["alpha"]) # Across all Re
alpha_stall_negative = np.min(data["alpha"]) # Across all Re
def separation_parameter(alpha, Re=0):
"""
Positive if separated, negative if attached.
This will be an input to a tanh() sigmoid blend via asb.numpy.blend(), so a value of 1 means the flow is
~90% separated, and a value of -1 means the flow is ~90% attached.
"""
return 0.5 * np.softmax(
alpha - alpha_stall_positive,
alpha_stall_negative - alpha
)
### Make the interpolators for separated aerodynamics
from aerosandbox.aerodynamics.aero_2D.airfoil_polar_functions import airfoil_coefficients_post_stall
CL_if_separated, CD_if_separated, CM_if_separated = airfoil_coefficients_post_stall(
airfoil=self,
alpha=alpha_resample
)
CD_if_separated = CD_if_separated + np.median(data["CD"])
# The line above effectively ensures that separated CD will never be less than attached CD. Not exactly, but generally close. A good heuristic.
CL_separated_interpolator = UnstructuredInterpolatedModel(
x_data=alpha_resample,
y_data=CL_if_separated
)
log10_CD_separated_interpolator = UnstructuredInterpolatedModel(
x_data=alpha_resample,
y_data=np.log10(CD_if_separated)
)
CM_separated_interpolator = UnstructuredInterpolatedModel(
x_data=alpha_resample,
y_data=CM_if_separated
)
def CL_function(alpha, Re, mach=0):
alpha = np.mod(alpha + 180, 360) - 180 # Keep alpha in the valid range.
CL_attached = CL_attached_interpolator({
"alpha": alpha,
"ln_Re": np.log(Re),
})
CL_separated = CL_separated_interpolator(alpha) # Lift coefficient if separated
CL_mach_0 = np.blend( # Lift coefficient at mach = 0
separation_parameter(alpha, Re),
CL_separated,
CL_attached
)
if include_compressibility_effects:
prandtl_glauert_beta_squared_ideal = 1 - mach ** 2
prandtl_glauert_beta = np.softmax(
prandtl_glauert_beta_squared_ideal,
-prandtl_glauert_beta_squared_ideal,
hardness=2.0 # Empirically tuned to data
) ** 0.5
CL = CL_mach_0 / prandtl_glauert_beta
mach_crit = transonic.mach_crit_Korn(
CL=CL,
t_over_c=self.max_thickness(),
sweep=0,
kappa_A=0.95
)
### Accounts approximately for the lift drop due to buffet.
buffet_factor = np.blend(
40 * (mach - mach_crit - (0.1 / 80) ** (1 / 3) - 0.06) * (mach - 1.1),
1,
transonic_buffet_lift_knockdown
)
### Accounts for the fact that theoretical CL_alpha goes from 2 * pi (subsonic) to 4 (supersonic),
# following linearized supersonic flow on a thin airfoil.
cla_supersonic_ratio_factor = np.blend(
10 * (mach - 1),
4 / (2 * np.pi),
1,
)
return CL * buffet_factor * cla_supersonic_ratio_factor
else:
return CL_mach_0
def CD_function(alpha, Re, mach=0):
alpha = np.mod(alpha + 180, 360) - 180 # Keep alpha in the valid range.
log10_CD_attached = log10_CD_attached_interpolator({
"alpha": alpha,
"ln_Re": np.log(Re),
})
log10_CD_separated = log10_CD_separated_interpolator(alpha)
log10_CD_mach_0 = np.blend(
separation_parameter(alpha, Re),
log10_CD_separated,
log10_CD_attached,
)
if include_compressibility_effects:
CL_attached = CL_attached_interpolator({
"alpha": alpha,
"ln_Re": np.log(Re),
})
CL_separated = CL_separated_interpolator(alpha)
CL_mach_0 = np.blend(
separation_parameter(alpha, Re),
CL_separated,
CL_attached
)
prandtl_glauert_beta_squared_ideal = 1 - mach ** 2
prandtl_glauert_beta = np.softmax(
prandtl_glauert_beta_squared_ideal,
-prandtl_glauert_beta_squared_ideal,
hardness=2.0 # Empirically tuned to data
) ** 0.5
CL = CL_mach_0 / prandtl_glauert_beta
t_over_c = self.max_thickness()
mach_crit = transonic.mach_crit_Korn(
CL=CL,
t_over_c=t_over_c,
sweep=0,
kappa_A=0.92
)
mach_dd = mach_crit + (0.1 / 80) ** (1 / 3)
CD_wave = np.where(
mach < mach_crit,
0,
np.where(
mach < mach_dd,
20 * (mach - mach_crit) ** 4,
np.where(
mach < 0.97,
cubic_hermite_patch(
mach,
x_a=mach_dd,
x_b=0.97,
f_a=20 * (0.1 / 80) ** (4 / 3),
f_b=0.8 * t_over_c,
dfdx_a=0.1,
dfdx_b=0.8 * t_over_c * 8
),
np.where(
mach < 1.1,
cubic_hermite_patch(
mach,
x_a=0.97,
x_b=1.1,
f_a=0.8 * t_over_c,
f_b=0.8 * t_over_c,
dfdx_a=0.8 * t_over_c * 8,
dfdx_b=-0.8 * t_over_c * 8,
),
np.blend(
8 * 2 * (mach - 1.1) / (1.2 - 0.8),
0.8 * 0.8 * t_over_c,
1.2 * 0.8 * t_over_c,
)
)
)
)
)
# CD_wave = transonic.approximate_CD_wave(
# mach=mach,
# mach_crit=mach_crit,
# CD_wave_at_fully_supersonic=0.90 * self.max_thickness()
# )
return 10 ** log10_CD_mach_0 + CD_wave
else:
return 10 ** log10_CD_mach_0
def CM_function(alpha, Re, mach=0):
alpha = np.mod(alpha + 180, 360) - 180 # Keep alpha in the valid range.
CM_attached = CM_attached_interpolator({
"alpha": alpha,
"ln_Re": np.log(Re),
})
CM_separated = CM_separated_interpolator(alpha)
CM_mach_0 = np.blend(
separation_parameter(alpha, Re),
CM_separated,
CM_attached
)
if include_compressibility_effects:
prandtl_glauert_beta_squared_ideal = 1 - mach ** 2
prandtl_glauert_beta = np.softmax(
prandtl_glauert_beta_squared_ideal,
-prandtl_glauert_beta_squared_ideal,
hardness=2.0 # Empirically tuned to data
) ** 0.5
CM = CM_mach_0 / prandtl_glauert_beta
return CM
else:
return CM_mach_0
self.CL_function = CL_function
self.CD_function = CD_function
self.CM_function = CM_function
def get_aero_from_neuralfoil(self,
alpha: Union[float, np.ndarray],
Re: Union[float, np.ndarray],
mach: Union[float, np.ndarray] = 0.,
model_size: str = "large",
control_surfaces: List["ControlSurface"] = None,
control_surface_strategy="polar_modification",
include_360_deg_effects: bool = True,
) -> Dict[str, Union[float, np.ndarray]]:
airfoil = self
if control_surface_strategy == "polar_modification":
pass
elif control_surface_strategy == "coordinate_modification":
for surf in control_surfaces:
airfoil = airfoil.add_control_surface(
deflection=surf.deflection,
hinge_point_x=surf.hinge_point,
)
control_surfaces = []
else:
raise ValueError("Invalid `control_surface_strategy`!\n"
"Valid options are \"polar_modification\" or \"coordinate_modification\".")
airfoil_normalization = airfoil.normalize(return_dict=True)
kulfan_airfoil = airfoil_normalization["airfoil"].to_kulfan_airfoil(
n_weights_per_side=8,
N1=0.5,
N2=1.0,
)
return kulfan_airfoil.get_aero_from_neuralfoil(
alpha=alpha + airfoil_normalization["rotation_angle"],
Re=Re / airfoil_normalization["scale_factor"],
mach=mach,
model_size=model_size,
control_surfaces=control_surfaces,
include_360_deg_effects=include_360_deg_effects
)
def plot_polars(self,
alphas: Union[np.ndarray, List[float]] = np.linspace(-20, 20, 500),
Res: Union[np.ndarray, List[float]] = 10 ** np.arange(3, 9),
mach: float = 0.,
show: bool = True,
Re_colors=None,
) -> None:
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
fig, ax = plt.subplots(2, 2, figsize=(8, 7))
plt.sca(ax[0, 0])
plt.title("Lift Coefficient")
plt.xlabel(r"Angle of Attack $\alpha$ [deg]")
plt.ylabel(r"Lift Coefficient $C_L$")
p.set_ticks(5, 1, 0.5, 0.1)
plt.sca(ax[0, 1])
plt.title("Drag Coefficient")
plt.xlabel(r"Angle of Attack $\alpha$ [deg]")
plt.ylabel(r"Drag Coefficient $C_D$")
plt.ylim(bottom=0, top=0.05)
p.set_ticks(5, 1, 0.01, 0.002)
plt.sca(ax[1, 0])
plt.title("Moment Coefficient")
plt.xlabel(r"Angle of Attack $\alpha$ [deg]")
plt.ylabel(r"Moment Coefficient $C_m$")
p.set_ticks(5, 1, 0.05, 0.01)
plt.sca(ax[1, 1])
plt.title("Lift-to-Drag Ratio")
plt.xlabel(r"Angle of Attack $\alpha$ [deg]")
plt.ylabel(r"Lift-to-Drag Ratio $C_L/C_D$")
p.set_ticks(5, 1, 20, 5)
if Re_colors is None:
Re_colors = plt.get_cmap('rainbow')(np.linspace(0, 1, len(Res)))
Re_colors = [
p.adjust_lightness(color, 0.7)
for color in Re_colors
]
for i, Re in enumerate(Res):
kwargs = dict(
alpha=alphas,
Re=Re,
mach=mach
)
plt.sca(ax[0, 0])
plt.plot(
alphas,
self.CL_function(**kwargs),
color=Re_colors[i],
alpha=0.7
)
plt.sca(ax[0, 1])
plt.plot(
alphas,
self.CD_function(**kwargs),
color=Re_colors[i],
alpha=0.7
)
plt.sca(ax[1, 0])
plt.plot(
alphas,
self.CM_function(**kwargs),
color=Re_colors[i],
alpha=0.7
)
plt.sca(ax[1, 1])
plt.plot(
alphas,
self.CL_function(**kwargs) / self.CD_function(**kwargs),
color=Re_colors[i],
alpha=0.7
)
from aerosandbox.tools.string_formatting import eng_string
plt.sca(ax[0, 0])
plt.legend(
title="Reynolds Number",
labels=[eng_string(Re) for Re in Res],
ncol=2,
# Note: `ncol` is old syntax; preserves backwards-compatibility with matplotlib 3.5.x.
# New matplotlib versions use `ncols` instead.
fontsize=8,
loc='lower right'
)
if show:
p.show_plot(
f"Polar Functions for {self.name} Airfoil",
legend=False,
)
def local_camber(self,
x_over_c: Union[float, np.ndarray] = np.linspace(0, 1, 101)
) -> Union[float, np.ndarray]:
"""
Returns the local camber of the airfoil at a given point or points.
Args:
x_over_c: The x/c locations to calculate the camber at [1D array, more generally, an iterable of floats]
Returns:
Local camber of the airfoil (y/c) [1D array].
"""
upper = self.upper_coordinates()[::-1]
lower = self.lower_coordinates()
upper_interpolated = np.interp(
x_over_c,
upper[:, 0],
upper[:, 1],
)
lower_interpolated = np.interp(
x_over_c,
lower[:, 0],
lower[:, 1],
)
return (upper_interpolated + lower_interpolated) / 2
def local_thickness(self,
x_over_c: Union[float, np.ndarray] = np.linspace(0, 1, 101)
) -> Union[float, np.ndarray]:
"""
Returns the local thickness of the airfoil at a given point or points.
Args:
x_over_c: The x/c locations to calculate the thickness at [1D array, more generally, an iterable of floats]
Returns:
Local thickness of the airfoil (y/c) [1D array].
"""
upper = self.upper_coordinates()[::-1]
lower = self.lower_coordinates()
upper_interpolated = np.interp(
x_over_c,
upper[:, 0],
upper[:, 1],
)
lower_interpolated = np.interp(
x_over_c,
lower[:, 0],
lower[:, 1],
)
return upper_interpolated - lower_interpolated
def max_camber(self,
x_over_c_sample: np.ndarray = np.linspace(0, 1, 101)
) -> float:
"""
Returns the maximum camber of the airfoil.
Args:
x_over_c_sample: Where should the airfoil be sampled to determine the max camber?
Returns: The maximum thickness, as a fraction of chord.
"""
return np.max(self.local_camber(x_over_c=x_over_c_sample))
def max_thickness(self,
x_over_c_sample: np.ndarray = np.linspace(0, 1, 101)
) -> float:
"""
Returns the maximum thickness of the airfoil.
Args:
x_over_c_sample: Where should the airfoil be sampled to determine the max thickness?
Returns: The maximum thickness, as a fraction of chord.
"""
return np.max(self.local_thickness(x_over_c=x_over_c_sample))
def draw(self,
draw_mcl=False,
draw_markers=True,
backend="matplotlib",
show=True
) -> None:
"""
Draw the airfoil object.
Args:
draw_mcl: Should we draw the mean camber line (MCL)? [boolean]
backend: Which backend should we use? "plotly" or "matplotlib"
show: Should we show the plot? [boolean]
Returns: None
"""
x = np.array(self.x()).reshape(-1)
y = np.array(self.y()).reshape(-1)
if draw_mcl:
x_mcl = np.linspace(np.min(x), np.max(x), len(x))
y_mcl = self.local_camber(x_mcl)
if backend == "matplotlib":
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
color = '#280887'
plt.plot(
x, y,
".-" if draw_markers else "-",
zorder=11, color=color)
plt.fill(x, y, zorder=10, color=color, alpha=0.2)
if draw_mcl:
plt.plot(x_mcl, y_mcl, "-", zorder=4, color=color, alpha=0.4)
plt.axis("equal")
if show:
p.show_plot(
title=f"{self.name} Airfoil",
xlabel=r"$x/c$",
ylabel=r"$y/c$",
)
elif backend == "plotly":
from aerosandbox.visualization.plotly import go
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=x,
y=y,
mode="lines+markers" if draw_markers else "lines",
name="Airfoil",
fill="toself",
line=dict(
color="blue"
)
),
)
if draw_mcl:
fig.add_trace(
go.Scatter(
x=x_mcl,
y=y_mcl,
mode="lines",
name="Mean Camber Line (MCL)",
line=dict(
color="navy"
)
)
)
fig.update_layout(
xaxis_title="x/c",
yaxis_title="y/c",
yaxis=dict(scaleanchor="x", scaleratio=1),
title=f"{self.name} Airfoil"
)
if show:
fig.show()
else:
return fig
def LE_index(self) -> int:
"""
Returns the index of the leading edge point in the airfoil coordinates.
"""
return int(np.argmin(self.x()))
def lower_coordinates(self) -> np.ndarray:
"""
Returns an Nx2 ndarray of [x, y] coordinates that describe the lower surface of the airfoil.
Order is from the leading edge to the trailing edge.
Includes the leading edge point; be careful about duplicates if using this method in conjunction with
Airfoil.upper_coordinates().
"""
return self.coordinates[self.LE_index():, :]
def upper_coordinates(self) -> np.ndarray:
"""
Returns an Nx2 ndarray of [x, y] coordinates that describe the upper surface of the airfoil.
Order is from the trailing edge to the leading edge.
Includes the leading edge point; be careful about duplicates if using this method in conjunction with
Airfoil.lower_coordinates().
"""
return self.coordinates[:self.LE_index() + 1, :]
def TE_thickness(self) -> float:
"""
Returns the thickness of the trailing edge of the airfoil.
"""
x_gap = self.coordinates[0, 0] - self.coordinates[-1, 0]
y_gap = self.coordinates[0, 1] - self.coordinates[-1, 1]
return (
x_gap ** 2 +
y_gap ** 2
) ** 0.5
def TE_angle(self) -> float:
"""
Returns the trailing edge angle of the airfoil, in degrees.
"""
upper_TE_vec = self.coordinates[0, :] - self.coordinates[1, :]
lower_TE_vec = self.coordinates[-1, :] - self.coordinates[-2, :]
return np.arctan2d(
upper_TE_vec[0] * lower_TE_vec[1] - upper_TE_vec[1] * lower_TE_vec[0],
upper_TE_vec[0] * lower_TE_vec[0] + upper_TE_vec[1] * upper_TE_vec[1]
)
# def LE_radius(self) -> float:
# """
# Gives the approximate leading edge radius of the airfoil, in chord-normalized units.
# """ # TODO finish me
def repanel(self,
n_points_per_side: int = 100,
spacing_function_per_side=np.cosspace,
) -> 'Airfoil':
"""
Returns a repaneled copy of the airfoil with cosine-spaced coordinates on the upper and lower surfaces.
Args:
n_points_per_side: Number of points per side (upper and lower) of the airfoil [int]
Notes: The number of points defining the final airfoil will be `n_points_per_side * 2 - 1`,
since one point (the leading edge point) is shared by both the upper and lower surfaces.
spacing_function_per_side: Determines how to space the points on each side of the airfoil. Can be
`np.linspace` or `np.cosspace`, or any other function of the call signature `f(a, b, n)` that returns
a spaced array of `n` points between `a` and `b`. [function]
Returns: A copy of the airfoil with the new coordinates.
"""
old_upper_coordinates = self.upper_coordinates() # Note: includes leading edge point, be careful about duplicates
old_lower_coordinates = self.lower_coordinates() # Note: includes leading edge point, be careful about duplicates
# Find the streamwise distances between coordinates, assuming linear interpolation
upper_distances_between_points = np.linalg.norm(np.diff(old_upper_coordinates, axis=0), axis=1)
lower_distances_between_points = np.linalg.norm(np.diff(old_lower_coordinates, axis=0), axis=1)
upper_distances_from_TE = np.concatenate(([0], np.cumsum(upper_distances_between_points)))
lower_distances_from_LE = np.concatenate(([0], np.cumsum(lower_distances_between_points)))
try:
new_upper_coordinates = interpolate.CubicSpline(
x=upper_distances_from_TE,
y=old_upper_coordinates,
axis=0,
bc_type=(
(2, (0, 0)),
(1, (0, -1)),
)
)(spacing_function_per_side(0, upper_distances_from_TE[-1], n_points_per_side))
new_lower_coordinates = interpolate.CubicSpline(
x=lower_distances_from_LE,
y=old_lower_coordinates,
axis=0,
bc_type=(
(1, (0, -1)),
(2, (0, 0)),
)
)(spacing_function_per_side(0, lower_distances_from_LE[-1], n_points_per_side))
except ValueError as e:
if not (
(np.all(np.diff(upper_distances_from_TE)) > 0) and
(np.all(np.diff(lower_distances_from_LE)) > 0)
):
raise ValueError(
"It looks like your Airfoil has a duplicate point. Try removing the duplicate point and "
"re-running Airfoil.repanel()."
)
else:
raise e
return Airfoil(
name=self.name,
coordinates=np.concatenate((new_upper_coordinates, new_lower_coordinates[1:, :]), axis=0),
)
def normalize(
self,
return_dict: bool = False,
) -> Union['Airfoil', Dict[str, Union['Airfoil', float]]]:
"""
Returns a copy of the Airfoil with a new set of `coordinates`, such that:
- The leading edge (LE) is at (0, 0)
- The trailing edge (TE) is at (1, 0)
- The chord length is equal to 1
The trailing-edge (TE) point is defined as the midpoint of the line segment connecting the first and last coordinate points (upper and lower surface TE points, respectively). The TE point is not necessarily one of the original points in the airfoil coordinates (`Airfoil.coordinates`); in general, it will not be one of the points if the TE thickness is nonzero.
The leading-edge (LE) point is defined as the coordinate point with the largest Euclidian distance from the trailing edge. (In other words, if you were to center a circle on the trailing edge and progressively grow it, what's the last coordinate point that it would intersect?) The LE point is always one of the original points in the airfoil coordinates.
The chord is defined as the Euclidian distance between the LE and TE points.
Coordinate modifications to achieve the constraints described above (LE @ origin, TE at (1, 0), and chord of 1) are done by means of a translation and rotation.
Args:
return_dict: Determines the output type of the function.
- If `False` (default), returns a copy of the Airfoil with the new coordinates.
- If `True`, returns a dictionary with keys:
- "airfoil": a copy of the Airfoil with the new coordinates
- "x_translation": the amount by which the airfoil's LE was translated in the x-direction
- "y_translation": the amount by which the airfoil's LE was translated in the y-direction
- "scale_factor": the amount by which the airfoil was scaled (if >1, the airfoil had to get
bigger)
- "rotation_angle": the angle (in degrees) by which the airfoil was rotated about the LE.
Sign convention is that positive angles rotate the airfoil counter-clockwise.
All of thes values represent the "required change", e.g.:
- "x_translation" is the amount by which the airfoil's LE had to be translated in the
x-direction to get it to the origin.
- "rotation_angle" is the angle (in degrees) by which the airfoil had to be rotated (CCW).
Returns: Depending on the value of `return_dict`, either:
- A copy of the airfoil with the new coordinates (default), or
- A dictionary with keys "airfoil", "x_translation", "y_translation", "scale_factor", and "rotation_angle".
documentation for `return_tuple` for more information.
"""
### Step 1: Translate so that the LE point is at (0, 0).
x_te = (self.x()[0] + self.x()[-1]) / 2
y_te = (self.y()[0] + self.y()[-1]) / 2
distance_to_te = (
(self.x() - x_te) ** 2 +
(self.y() - y_te) ** 2
) ** 0.5
le_index = np.argmax(distance_to_te)
x_translation = -self.x()[le_index]
y_translation = -self.y()[le_index]
newfoil = self.translate(
translate_x=x_translation,
translate_y=y_translation,
)
### Step 2: Scale so that the chord length is 1.
scale_factor = 1 / distance_to_te[le_index]
newfoil = newfoil.scale(
scale_x=scale_factor,
scale_y=scale_factor,
)
### Step 3: Rotate so that the trailing edge is at (1, 0).
x_te = (newfoil.x()[0] + newfoil.x()[-1]) / 2
y_te = (newfoil.y()[0] + newfoil.y()[-1]) / 2
rotation_angle = -np.arctan2(y_te, x_te)
newfoil = newfoil.rotate(
angle=rotation_angle,
)
if not return_dict:
return newfoil
else:
return {
"airfoil" : newfoil,
"x_translation" : x_translation,
"y_translation" : y_translation,
"scale_factor" : scale_factor,
"rotation_angle": np.degrees(rotation_angle),
}
def add_control_surface(
self,
deflection: float = 0.,
hinge_point_x: float = 0.75,
modify_coordinates: bool = True,
modify_polars: bool = True,
) -> 'Airfoil':
"""
Returns a version of the airfoil with a trailing-edge control surface added at a given point. Implicitly
repanels the airfoil as part of this operation.
Args:
deflection: Deflection angle [degrees]. Downwards-positive.
hinge_point_x: Chordwise location of the hinge, as a fraction of chord (x/c) [float]
Returns: an Airfoil object with the new control deflection.
"""
if modify_coordinates:
# Find the hinge point
hinge_point_y = np.where(
deflection > 0,
self.local_camber(hinge_point_x) - self.local_thickness(hinge_point_x) / 2,
self.local_camber(hinge_point_x) + self.local_thickness(hinge_point_x) / 2,
)
# hinge_point_y = self.local_camber(hinge_point_x)
hinge_point = np.reshape(
np.array([hinge_point_x, hinge_point_y]),
(1, 2)
)
def is_behind_hinge(xy: np.ndarray) -> np.ndarray:
return (
(xy[:, 0] - hinge_point_x) * np.cosd(deflection / 2) -
(xy[:, 1] - hinge_point_y) * np.sind(deflection / 2)
> 0
)
orig_u = self.upper_coordinates()
orig_l = self.lower_coordinates()[1:, :]
rotation_matrix = np.rotation_matrix_2D(
angle=-np.radians(deflection),
)
def T(xy):
return np.transpose(xy)
hinge_point_u = np.tile(hinge_point, (np.length(orig_u), 1))
hinge_point_l = np.tile(hinge_point, (np.length(orig_l), 1))
rot_u = T(rotation_matrix @ T(orig_u - hinge_point_u)) + hinge_point_u
rot_l = T(rotation_matrix @ T(orig_l - hinge_point_l)) + hinge_point_l
coordinates_x = np.concatenate([
np.where(
is_behind_hinge(rot_u),
rot_u[:, 0],
orig_u[:, 0]
),
np.where(
is_behind_hinge(rot_l),
rot_l[:, 0],
orig_l[:, 0]
)
])
coordinates_y = np.concatenate([
np.where(
is_behind_hinge(rot_u),
rot_u[:, 1],
orig_u[:, 1]
),
np.where(
is_behind_hinge(rot_l),
rot_l[:, 1],
orig_l[:, 1]
)
])
coordinates = np.stack([
coordinates_x,
coordinates_y
], axis=1)
else:
coordinates = self.coordinates
if modify_polars:
effectiveness = 1 - np.maximum(0, hinge_point_x + 1e-16) ** 2.751428551177291
dalpha = deflection * effectiveness
def CL_function(alpha: float, Re: float, mach: float) -> float:
return self.CL_function(
alpha=alpha + dalpha,
Re=Re,
mach=mach,
)
def CD_function(alpha: float, Re: float, mach: float) -> float:
return self.CD_function(
alpha=alpha + dalpha,
Re=Re,
mach=mach,
)
def CM_function(alpha: float, Re: float, mach: float) -> float:
return self.CM_function(
alpha=alpha + dalpha,
Re=Re,
mach=mach,
)
else:
CL_function = self.CL_function
CD_function = self.CD_function
CM_function = self.CM_function
return Airfoil(
name=self.name,
coordinates=coordinates,
CL_function=CL_function,
CD_function=CD_function,
CM_function=CM_function,
)
def set_TE_thickness(self,
thickness: float = 0.,
) -> 'Airfoil':
"""
Creates a modified copy of the Airfoil that has a specified trailing-edge thickness.
Note that the trailing-edge thickness is given nondimensionally (e.g., as a fraction of chord).
Args:
thickness: The target trailing-edge thickness, given nondimensionally (e.g., as a fraction of chord).
Returns: The modified airfoil.
"""
### Compute existing trailing-edge properties
x_gap = self.coordinates[0, 0] - self.coordinates[-1, 0]
y_gap = self.coordinates[0, 1] - self.coordinates[-1, 1]
s_gap = (
x_gap ** 2 +
y_gap ** 2
) ** 0.5
s_adjustment = (thickness - self.TE_thickness()) / 2
### Determine how much the trailing edge should move by in X and Y.
if s_gap != 0:
x_adjustment = s_adjustment * x_gap / s_gap
y_adjustment = s_adjustment * y_gap / s_gap
else:
x_adjustment = 0
y_adjustment = s_adjustment
### Decompose the existing airfoil coordinates to upper and lower sides, and x and y.
u = self.upper_coordinates()
ux = u[:, 0]
uy = u[:, 1]
le_x = ux[-1]
l = self.lower_coordinates()[1:]
lx = l[:, 0]
ly = l[:, 1]
te_x = (ux[0] + lx[-1]) / 2
### Create modified versions of the upper and lower coordinates
new_u = np.stack(
arrays=[
ux + x_adjustment * (ux - le_x) / (te_x - le_x),
uy + y_adjustment * (ux - le_x) / (te_x - le_x)
],
axis=1
)
new_l = np.stack(
arrays=[
lx - x_adjustment * (lx - le_x) / (te_x - le_x),
ly - y_adjustment * (lx - le_x) / (te_x - le_x)
],
axis=1
)
### If the desired thickness is zero, ensure that is precisely reached.
if thickness == 0:
new_l[-1] = new_u[0]
### Combine the upper and lower surface coordinates into a single array.
new_coordinates = np.concatenate(
[
new_u,
new_l
],
axis=0
)
### Return a new Airfoil with the desired coordinates.
return Airfoil(
name=self.name,
coordinates=new_coordinates
)
def scale(self,
scale_x: float = 1.,
scale_y: float = 1.,
) -> 'Airfoil':
"""
Scales an Airfoil about the origin.
Args:
scale_x: Amount to scale in the x-direction.
scale_y: Amount to scale in the y-direction.
Returns: The scaled Airfoil.
"""
x = self.x() * scale_x
y = self.y() * scale_y
if scale_y < 0:
x = x[::-1]
y = y[::-1]
return Airfoil(
name=self.name,
coordinates=np.stack((x, y), axis=1)
)
def translate(self,
translate_x: float = 0.,
translate_y: float = 0.,
) -> 'Airfoil':
"""
Translates an Airfoil by a given amount.
Args:
translate_x: Amount to translate in the x-direction
translate_y: Amount to translate in the y-direction
Returns: The translated Airfoil.
"""
x = self.x() + translate_x
y = self.y() + translate_y
return Airfoil(
name=self.name,
coordinates=np.stack((x, y), axis=1)
)
def rotate(self,
angle: float,
x_center: float = 0.,
y_center: float = 0.
) -> 'Airfoil':
"""
Rotates the airfoil clockwise by the specified amount, in radians.
Rotates about the point (x_center, y_center), which is (0, 0) by default.
Args:
angle: Angle to rotate, counterclockwise, in radians.
x_center: The x-coordinate of the center of rotation.
y_center: The y-coordinate of the center of rotation.
Returns: The rotated Airfoil.
"""
coordinates = np.copy(self.coordinates)
### Translate
translation = np.array([x_center, y_center])
coordinates -= translation
### Rotate
rotation_matrix = np.rotation_matrix_2D(
angle=angle,
)
coordinates = (rotation_matrix @ coordinates.T).T
### Translate
coordinates += translation
return Airfoil(
name=self.name,
coordinates=coordinates
)
def blend_with_another_airfoil(self,
airfoil: "Airfoil",
blend_fraction: float = 0.5,
n_points_per_side: int = 100,
) -> "Airfoil":
"""
Blends this airfoil with another airfoil. Merges both the coordinates and the aerodynamic functions.
Args:
airfoil: The other airfoil to blend with.
blend_fraction: The fraction of the other airfoil to use when blending. Defaults to 0.5 (50%).
* A blend fraction of 0 will return an identical airfoil to this one (self).
* A blend fraction of 1 will return an identical airfoil to the other one (`airfoil` parameter).
n_points_per_side: The number of points per side to use when blending the coordinates of the two airfoils.
Returns: A new airfoil that is a blend of this airfoil and another one.
"""
foil_a = self.repanel(n_points_per_side=n_points_per_side)
foil_b = airfoil.repanel(n_points_per_side=n_points_per_side)
a_fraction = 1 - blend_fraction
b_fraction = blend_fraction
name = f"{a_fraction * 100:.0f}% {self.name}, {b_fraction * 100:.0f}% {airfoil.name}"
coordinates = (
a_fraction * foil_a.coordinates +
b_fraction * foil_b.coordinates
)
def CL_function(alpha, Re, mach):
return (
a_fraction * foil_a.CL_function(alpha, Re, mach) +
b_fraction * foil_b.CL_function(alpha, Re, mach)
)
def CD_function(alpha, Re, mach):
return (
a_fraction * foil_a.CD_function(alpha, Re, mach) +
b_fraction * foil_b.CD_function(alpha, Re, mach)
)
def CM_function(alpha, Re, mach):
return (
a_fraction * foil_a.CM_function(alpha, Re, mach) +
b_fraction * foil_b.CM_function(alpha, Re, mach)
)
return Airfoil(
name=name,
coordinates=coordinates,
CL_function=CL_function,
CD_function=CD_function,
CM_function=CM_function,
)
# def normalize(self):
# pass # TODO finish me
def write_dat(self,
filepath: Union[str, Path] = None,
include_name: bool = True,
) -> str:
"""
Writes a .dat file corresponding to this airfoil to a filepath.
Args:
filepath: filepath (including the filename and .dat extension) [string]
If None, this function returns the .dat file as a string.
include_name: Should the name be included in the .dat file? (In a standard *.dat file, it usually is.)
Returns: None
"""
contents = []
if include_name:
contents += [self.name]
contents += ["%f %f" % tuple(coordinate) for coordinate in self.coordinates]
string = "\n".join(contents)
if filepath is not None:
with open(filepath, "w+") as f:
f.write(string)
return string
# def get_xfoil_data(self,
# a_start=-6, # type: float
# a_end=12, # type: float
# a_step=0.5, # type: float
# a_init=0, # type: float
# Re_start=1e4, # type: float
# Re_end=1e7, # type: float
# n_Res=30, # type: int
# mach=0, # type: float
# max_iter=20, # type: int
# repanel=False, # type: bool
# parallel=True, # type: bool
# verbose=True, # type: bool
# ):
# """ # TODO finish docstring
# Calculates aerodynamic performance data for a particular airfoil with XFoil.
# Does a 2D grid sweep of the alpha-Reynolds space at a particular Mach number.
# Populates two new instance variables:
# * self.xfoil_data_1D: A dict of XFoil data at all calculated operating points (1D arrays, NaNs removed)
# * self.xfoil_data_2D: A dict of XFoil data at all calculated operating points (2D arrays, NaNs present)
# :param a_start: Lower bound of angle of attack [deg]
# :param a_end: Upper bound of angle of attack [deg]
# :param a_step: Angle of attack increment size [deg]
# :param a_init: Angle of attack to initialize runs at. Should solve easily (0 recommended) [deg]
# :param Re_start: Reynolds number to begin sweep at. [unitless]
# :param Re_end: Reynolds number to end sweep at. [unitless]
# :param n_Res: Number of Reynolds numbers to sweep. Points are log-spaced.
# :param mach: Mach number to sweep at.
# :param max_iter: Maximum number of XFoil iterations per op-point.
# :param repanel: Should we interally repanel the airfoil within XFoil before running? [boolean]
# Consider disabling this if you try to do optimization based on this data (for smoothness reasons).
# Otherwise, it's generally a good idea to leave this on.
# :param parallel: Should we run in parallel? Generally results in significant speedup, but might not run
# correctly on some machines. Disable this if it's a problem. [boolean]
# :param verbose: Should we do verbose output? [boolean]
# :return: self (in-place operation that creates self.xfoil_data_1D and self.xfoil_data_2D)
# """
# assert a_init > a_start
# assert a_init < a_end
# assert Re_start < Re_end
# assert n_Res >= 1
# assert mach >= 0
#
# Res = np.logspace(np.log10(Re_start), np.log10(Re_end), n_Res)
#
# def get_xfoil_data_at_Re(Re):
#
# import aerosandbox.numpy as np # needs to be imported here to support parallelization
#
# run_data_upper = self.xfoil_aseq(
# a_start=a_init + a_step,
# a_end=a_end,
# a_step=a_step,
# Re=Re,
# repanel=repanel,
# max_iter=max_iter,
# M=mach,
# reset_bls=True,
# )
# run_data_lower = self.xfoil_aseq(
# a_start=a_init,
# a_end=a_start,
# a_step=-a_step,
# Re=Re,
# repanel=repanel,
# max_iter=max_iter,
# M=mach,
# reset_bls=True,
# )
# run_data = {
# k: np.hstack((
# run_data_lower[k][::-1],
# run_data_upper[k]
# )) for k in run_data_upper.keys()
# }
# return run_data
#
# if verbose:
# print("Running XFoil sweeps on Airfoil %s..." % self.name)
# import time
# start_time = time.time()
#
# if not parallel:
# runs_data = [get_xfoil_data_at_Re(Re) for Re in Res]
# else:
# import multiprocess as mp
# pool = mp.Pool(mp.cpu_count())
# runs_data = pool.map(get_xfoil_data_at_Re, Res)
# pool.close()
#
# if verbose:
# run_time = time.time() - start_time
# print("XFoil Runtime: %.3f sec" % run_time)
#
# xfoil_data_2D = {}
# for k in runs_data[0].keys():
# xfoil_data_2D[k] = np.vstack([
# d[k]
# for d in runs_data
# ])
# xfoil_data_2D["Re"] = np.tile(Res, (
# xfoil_data_2D["alpha"].shape[1],
# 1
# )).T
# np.place(
# arr=xfoil_data_2D["Re"],
# mask=np.isnan(xfoil_data_2D["alpha"]),
# vals=np.nan
# )
# xfoil_data_2D["alpha_indices"] = np.arange(a_start, a_end + a_step / 2, a_step)
# xfoil_data_2D["Re_indices"] = Res
#
# self.xfoil_data_2D = xfoil_data_2D
#
# # 1-dimensionalize it and remove NaNs
# xfoil_data_1D = {
# k: remove_nans(xfoil_data_2D[k].reshape(-1))
# for k in xfoil_data_2D.keys()
# }
# self.xfoil_data_1D = xfoil_data_1D
#
# return self
#
# def has_xfoil_data(self, raise_exception_if_absent=True):
# """
# Runs a quick check to see if this airfoil has XFoil data.
# :param raise_exception_if_absent: Boolean flag to raise an Exception if XFoil data is not found.
# :return: Boolean of whether or not XFoil data is present.
# """
# data_present = (
# hasattr(self, 'xfoil_data_1D') and
# hasattr(self, 'xfoil_data_2D')
# )
# if not data_present and raise_exception_if_absent:
# raise Exception(
# """This Airfoil %s does not yet have XFoil data,
# so you can't run the function you've called.
# To get XFoil data, first call:
# Airfoil.get_xfoil_data()
# which will perform an in-place update that
# provides the data.""" % self.name
# )
# return data_present
#
# def plot_xfoil_data_contours(self):
# self.has_xfoil_data() # Ensure data is present.
# from matplotlib import colors
#
# d = self.xfoil_data_1D # data
#
# fig = plt.figure(figsize=(10, 8), dpi=200)
#
# ax = fig.add_subplot(311)
# coords = self.coordinates
# plt.plot(coords[:, 0], coords[:, 1], '.-', color='#280887')
# plt.xlabel(r"$x/c$")
# plt.ylabel(r"$y/c$")
# plt.title(r"XFoil Data for %s Airfoil" % self.name)
# plt.axis("equal")
#
# with plt.style.context("default"):
# ax = fig.add_subplot(323)
# x = d["Re"]
# y = d["alpha"]
# z = d["Cl"]
# levels = np.linspace(-0.5, 1.5, 21)
# norm = None
# CF = ax.tricontourf(x, y, z, levels=levels, norm=norm, cmap="plasma", extend="both")
# C = ax.tricontour(x, y, z, levels=levels, norm=norm, colors='k', extend="both", linewidths=0.5)
# cbar = plt.colorbar(CF, format='%.2f')
# cbar.set_label(r"$C_l$")
# plt.grid(False)
# plt.xlabel(r"$Re$")
# plt.ylabel(r"$\alpha$")
# plt.title(r"$C_l$ from $Re$, $\alpha$")
# ax.set_xscale('log')
#
# ax = fig.add_subplot(324)
# x = d["Re"]
# y = d["alpha"]
# z = d["Cd"]
# levels = np.logspace(-2.5, -1, 21)
# norm = colors.PowerNorm(gamma=1 / 2, vmin=np.min(levels), vmax=np.max(levels))
# CF = ax.tricontourf(x, y, z, levels=levels, norm=norm, cmap="plasma", extend="both")
# C = ax.tricontour(x, y, z, levels=levels, norm=norm, colors='k', extend="both", linewidths=0.5)
# cbar = plt.colorbar(CF, format='%.3f')
# cbar.set_label(r"$C_d$")
# plt.grid(False)
# plt.xlabel(r"$Re$")
# plt.ylabel(r"$\alpha$")
# plt.title(r"$C_d$ from $Re$, $\alpha$")
# ax.set_xscale('log')
#
# ax = fig.add_subplot(325)
# x = d["Re"]
# y = d["alpha"]
# z = d["Cl"] / d["Cd"]
# x = x[d["alpha"] >= 0]
# y = y[d["alpha"] >= 0]
# z = z[d["alpha"] >= 0]
# levels = np.logspace(1, np.log10(150), 21)
# norm = colors.PowerNorm(gamma=1 / 2, vmin=np.min(levels), vmax=np.max(levels))
# CF = ax.tricontourf(x, y, z, levels=levels, norm=norm, cmap="plasma", extend="both")
# C = ax.tricontour(x, y, z, levels=levels, norm=norm, colors='k', extend="both", linewidths=0.5)
# cbar = plt.colorbar(CF, format='%.1f')
# cbar.set_label(r"$L/D$")
# plt.grid(False)
# plt.xlabel(r"$Re$")
# plt.ylabel(r"$\alpha$")
# plt.title(r"$L/D$ from $Re$, $\alpha$")
# ax.set_xscale('log')
#
# ax = fig.add_subplot(326)
# x = d["Re"]
# y = d["alpha"]
# z = d["Cm"]
# levels = np.linspace(-0.15, 0, 21) # np.logspace(1, np.log10(150), 21)
# norm = None # colors.PowerNorm(gamma=1 / 2, vmin=np.min(levels), vmax=np.max(levels))
# CF = ax.tricontourf(x, y, z, levels=levels, norm=norm, cmap="plasma", extend="both")
# C = ax.tricontour(x, y, z, levels=levels, norm=norm, colors='k', extend="both", linewidths=0.5)
# cbar = plt.colorbar(CF, format='%.2f')
# cbar.set_label(r"$C_m$")
# plt.grid(False)
# plt.xlabel(r"$Re$")
# plt.ylabel(r"$\alpha$")
# plt.title(r"$C_m$ from $Re$, $\alpha$")
# ax.set_xscale('log')
#
# plt.tight_layout()
# plt.show()
#
# return self
#
# def plot_xfoil_data_all_polars(self,
# n_lines_max=20,
# Cd_plot_max=0.04,
# ):
# """
# Plots the existing XFoil data found by running self.get_xfoil_data().
# :param n_lines_max: Maximum number of Reynolds numbers to plot. Useful if you ran a sweep with tons of Reynolds numbers.
# :param Cd_plot_max: Upper limit of Cd to plot [float]
# :return: self (makes plot)
# """
#
# self.has_xfoil_data() # Ensure data is present.
#
# n_lines_max = min(n_lines_max, len(self.xfoil_data_2D["Re_indices"]))
#
# fig, ax = plt.subplots(1, 1, figsize=(7, 6), dpi=200)
# indices = np.array(
# np.round(np.linspace(0, len(self.xfoil_data_2D["Re_indices"]) - 1, n_lines_max)),
# dtype=int
# )
# indices_worth_plotting = [
# np.min(remove_nans(self.xfoil_data_2D["Cd"][index, :])) < Cd_plot_max
# for index in indices
# ]
# indices = indices[indices_worth_plotting]
#
# colors = plt.cm.rainbow(np.linspace(0, 1, len(indices)))[::-1]
# for i, Re in enumerate(self.xfoil_data_2D["Re_indices"][indices]):
# Cds = remove_nans(self.xfoil_data_2D["Cd"][indices[i], :])
# Cls = remove_nans(self.xfoil_data_2D["Cl"][indices[i], :])
# Cd_min = np.min(Cds)
# if Cd_min < Cd_plot_max:
# plt.plot(
# Cds * 1e4,
# Cls,
# label="Re = %s" % eng_string(Re),
# color=colors[i],
# )
# plt.xlim(0, Cd_plot_max * 1e4)
# plt.ylim(0, 2)
# plt.xlabel(r"$C_d \cdot 10^4$")
# plt.ylabel(r"$C_l$")
# plt.title("XFoil Polars for %s Airfoil" % self.name)
# plt.tight_layout()
# plt.legend()
# plt.show()
#
# return self
#
# def plot_xfoil_data_polar(self,
# Res, # type: list
# Cd_plot_max=0.04,
# repanel=False,
# parallel=True,
# max_iter=40,
# verbose=True,
# ):
# """
# Plots CL-CD polar for a single Reynolds number or a variety of Reynolds numbers.
# :param Res: Reynolds number to plot polars at. Either a single float or an iterable (list, 1D ndarray, etc.)
# :param Cd_plot_max: Upper limit of Cd to plot [float]
# :param cl_step: Cl increment for XFoil runs. Trades speed vs. plot resolution. [float]
# :param repanel: Should we repanel the airfoil within XFoil? [boolean]
# :param parallel: Should we run different Res in parallel? [boolean]
# :param max_iter: Maximum number of iterations for XFoil to run. [int]
# :param verbose: Should we print information as we run the sweeps? [boolean]
# :return: self (makes plot)
# """
#
# try: # If it's not an iterable, make it one.
# Res[0]
# except TypeError:
# Res = [Res]
#
# fig, ax = plt.subplots(1, 1, figsize=(7, 6), dpi=200)
# colors = plt.cm.rainbow(np.linspace(0, 1, len(Res)))[::-1]
#
# def get_xfoil_data_at_Re(Re):
#
# xfoil_data = self.xfoil_aseq(
# a_start=0,
# a_end=15,
# a_step=0.25,
# Re=Re,
# M=0,
# reset_bls=True,
# repanel=repanel,
# max_iter=max_iter,
# verbose=False,
# )
# Cd = remove_nans(xfoil_data["Cd"])
# Cl = remove_nans(xfoil_data["Cl"])
# return {"Cl": Cl, "Cd": Cd}
#
# if verbose:
# print("Running XFoil sweeps...")
# import time
# start_time = time.time()
#
# if not parallel:
# runs_data = [get_xfoil_data_at_Re(Re) for Re in Res]
# else:
# import multiprocess as mp
# pool = mp.Pool(mp.cpu_count())
# runs_data = pool.map(get_xfoil_data_at_Re, Res)
# pool.close()
#
# if verbose:
# run_time = time.time() - start_time
# print("XFoil Runtime: %.3f sec" % run_time)
#
# for i, Re in enumerate(Res):
# plt.plot(
# runs_data[i]["Cd"] * 1e4,
# runs_data[i]["Cl"],
# label="Re = %s" % eng_string(Re),
# color=colors[i],
# )
# plt.xlim(0, Cd_plot_max * 1e4)
# plt.ylim(0, 2)
# plt.xlabel(r"$C_d \cdot 10^4$")
# plt.ylabel(r"$C_l$")
# plt.title("XFoil Polars for %s Airfoil" % self.name)
# plt.tight_layout()
# plt.legend()
# plt.show()
#
# return self
if __name__ == '__main__':
af = Airfoil("dae11")
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
fig, ax = plt.subplots(4, 2, figsize=(6.4, 6.4), dpi=200)
alpha = np.linspace(-90, 90, 500)
sizes = ["xxsmall", "xsmall", "small", "medium", "large", "xlarge", "xxlarge", "xxxlarge"]
colors = plt.cm.rainbow(np.linspace(0, 1, len(sizes)))[::-1]
for i, ms in enumerate(sizes):
aero = af.get_aero_from_neuralfoil(
alpha=alpha,
Re=1e6,
mach=0.3,
model_size=ms,
)
kwargs = dict(
alpha=0.5,
color=colors[i],
)
for a, key in zip(ax.T.flatten(), ["CL", "CD", "CM", "Cpmin", "mach_crit", "Top_Xtr", "Bot_Xtr", "Cpmin_0"]):
a.plot(alpha, aero[key], **kwargs)
if key == "CD":
a.set_yscale('log')
a.set_ylabel(key)
p.show_plot()
# af.draw()
# af.generate_polars(
# alphas=np.linspace(-10, 15, 61),
# )
# af.plot_polars(
# Res=np.geomspace(1e4, 1e6, 6)
# )
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/geometry/airfoil/airfoil.py
|
airfoil.py
|
import aerosandbox.numpy as np
from aerosandbox.geometry.airfoil.airfoil import Airfoil
from aerosandbox.geometry.airfoil.airfoil_families import get_kulfan_parameters
from aerosandbox.modeling.splines.hermite import linear_hermite_patch, cubic_hermite_patch
from typing import Union, Dict, List
class KulfanAirfoil(Airfoil):
def __init__(self,
name: str = "Untitled",
lower_weights: np.ndarray = None,
upper_weights: np.ndarray = None,
leading_edge_weight: float = 0.,
TE_thickness: float = 0.,
N1: float = 0.5,
N2: float = 1.0,
):
### Handle the airfoil name
self.name = name
### Check to see if the airfoil is a "known" airfoil, based on its name.
try:
coordinate_airfoil = Airfoil(name)
except UserWarning:
pass
if (
lower_weights is None and
upper_weights is None
): # Try to fall back on parameters from the coordinate airfoil, if it's something from the UIUC database
try:
coordinate_airfoil = Airfoil(name)
if coordinate_airfoil.coordinates is None:
raise UserWarning
parameters = get_kulfan_parameters(
coordinates=coordinate_airfoil.coordinates,
n_weights_per_side=8,
N1=N1,
N2=N2,
normalize_coordinates=True,
use_leading_edge_modification=True,
)
lower_weights = parameters["lower_weights"]
upper_weights = parameters["upper_weights"]
leading_edge_weight = parameters["leading_edge_weight"]
TE_thickness = parameters["TE_thickness"]
except UserWarning:
raise ValueError("You must either:\n"
"\t* Specify both `lower_weights` and `upper_weights`, at minimum"
"\t* Give an airfoil `name` corresponding to an airfoil in the UIUC database, or a NACA airfoil.")
### Handle the Kulfan parameters
self.lower_weights = lower_weights
self.upper_weights = upper_weights
self.leading_edge_weight = leading_edge_weight
self.TE_thickness = TE_thickness
self.N1 = N1
self.N2 = N2
def __repr__(self) -> str:
return f"Airfoil {self.name} (Kulfan / CST parameterization)"
def __eq__(self, other: "KulfanAirfoil") -> bool:
if other is self: # If they're the same object in memory, they're equal
return True
if not type(self) == type(other): # If the types are different, they're not equal
return False
# At this point, we know that the types are the same, so we can compare the attributes
return all([
self.name == other.name,
np.allclose(self.lower_weights, other.lower_weights),
np.allclose(self.upper_weights, other.upper_weights),
np.allclose(self.leading_edge_weight, other.leading_edge_weight),
np.allclose(self.TE_thickness, other.TE_thickness),
np.allclose(self.N1, other.N1),
np.allclose(self.N2, other.N2),
])
@property
def kulfan_parameters(self):
return {
"lower_weights" : self.lower_weights,
"upper_weights" : self.upper_weights,
"leading_edge_weight": self.leading_edge_weight,
"TE_thickness" : self.TE_thickness,
}
@property
def coordinates(self) -> np.ndarray:
return self.to_airfoil().coordinates
@coordinates.setter
def coordinates(self, value):
raise TypeError("The coordinates of a `KulfanAirfoil` can't be modified directly, "
"as they're a function of the Kulfan parameters.\n"
"Instead, you can either modify the Kulfan parameters directly, or use the "
"more general (coordinate-parameterized) `asb.Airfoil` class.")
def to_airfoil(self,
n_coordinates_per_side=200
) -> Airfoil:
x_upper = np.cosspace(1, 0, n_coordinates_per_side)[:-1]
y_upper = self.upper_coordinates(x_upper)
x_lower = np.cosspace(0, 1, n_coordinates_per_side)
y_lower = self.lower_coordinates(x_lower)
return Airfoil(
name=self.name,
coordinates=np.concatenate([
np.stack([x_upper, y_upper], axis=1),
np.stack([x_lower, y_lower], axis=1)
], axis=0)
)
def draw(self,
*args,
draw_markers=False,
**kwargs
):
return self.to_airfoil().draw(
*args,
draw_markers=draw_markers,
**kwargs
)
def get_aero_from_neuralfoil(self,
alpha: Union[float, np.ndarray],
Re: Union[float, np.ndarray],
mach: Union[float, np.ndarray] = 0.,
model_size: str = "large",
control_surfaces: List["ControlSurface"] = None,
include_360_deg_effects: bool = True,
) -> Dict[str, Union[float, np.ndarray]]:
### Validate inputs
if (
(np.length(self.lower_weights) != 8) or
(np.length(self.upper_weights) != 8)
):
raise ValueError("NeuralFoil is only trained to handle exactly 8 CST coefficients per side.")
if (
self.N1 != 0.5 or
self.N2 != 1.0
):
raise ValueError("NeuralFoil is only trained to handle airfoils with N1 = 0.5 and N2 = 1.0.")
### Set up inputs
if control_surfaces is None:
control_surfaces = []
alpha = np.mod(alpha + 180, 360) - 180 # Enforce periodicity of alpha
##### Evaluate the control surfaces of the airfoil
effective_d_alpha = 0.
effective_CD_multiplier_from_control_surfaces = 1.
for surf in control_surfaces:
effectiveness = 1 - np.maximum(0, surf.hinge_point + 1e-16) ** 2.751428551177291
# From XFoil-based study at `/AeroSandbox/studies/ControlSurfaceEffectiveness/`
effective_d_alpha += surf.deflection * effectiveness
effective_CD_multiplier_from_control_surfaces *= (
2 + (surf.deflection / 11.5) ** 2 - (1 + (surf.deflection / 11.5) ** 2) ** 0.5
)
# From fit to wind tunnel data from Hoerner, "Fluid Dynamic Drag", 1965. Page 13-13, Figure 32,
# "Variation of section drag coefficient of a horizontal tail surface at constant C_L"
##### Use NeuralFoil to evaluate the incompressible aerodynamics of the airfoil
import neuralfoil as nf
nf_aero = nf.get_aero_from_kulfan_parameters(
kulfan_parameters=dict(
lower_weights=self.lower_weights,
upper_weights=self.upper_weights,
leading_edge_weight=self.leading_edge_weight,
TE_thickness=self.TE_thickness,
),
alpha=alpha + effective_d_alpha,
Re=Re,
model_size=model_size
)
CL = nf_aero["CL"]
CD = nf_aero["CD"] * effective_CD_multiplier_from_control_surfaces
CM = nf_aero["CM"]
Cpmin_0 = nf_aero["Cpmin"]
Top_Xtr = nf_aero["Top_Xtr"]
Bot_Xtr = nf_aero["Bot_Xtr"]
##### Extend aerodynamic data to 360 degrees (post-stall) using wind tunnel behavior here.
if include_360_deg_effects:
from aerosandbox.aerodynamics.aero_2D.airfoil_polar_functions import airfoil_coefficients_post_stall
CL_if_separated, CD_if_separated, CM_if_separated = airfoil_coefficients_post_stall(
airfoil=self,
alpha=alpha
)
import aerosandbox.library.aerodynamics as lib_aero
# These values are set relatively high because NeuralFoil extrapolates quite well past stall
alpha_stall_positive = 20
alpha_stall_negative = -20
# This will be an input to a tanh() sigmoid blend via asb.numpy.blend(), so a value of 1 means the flow is
# ~90% separated, and a value of -1 means the flow is ~90% attached.
is_separated = np.softmax(
alpha - alpha_stall_positive,
alpha_stall_negative - alpha
) / 3
CL = np.blend(
is_separated,
CL_if_separated,
CL
)
CD = np.exp(np.blend(
is_separated,
np.log(CD_if_separated + lib_aero.Cf_flat_plate(Re_L=Re, method="turbulent")),
np.log(CD)
))
CM = np.blend(
is_separated,
CM_if_separated,
CM
)
"""
Separated Cpmin_0 model is a very rough fit to Figure 3 of:
Shademan & Naghib-Lahouti, "Effects of aspect ratio and inclination angle on aerodynamic loads of a flat
plate", Advances in Aerodynamics.
https://www.researchgate.net/publication/342316140_Effects_of_aspect_ratio_and_inclination_angle_on_aerodynamic_loads_of_a_flat_plate
"""
Cpmin_0 = np.blend(
is_separated,
-1 - 0.5 * np.sind(alpha) ** 2,
Cpmin_0
)
Top_Xtr = np.blend(
is_separated,
0.5 - 0.5 * np.tanh(10 * np.sind(alpha)),
Top_Xtr
)
Bot_Xtr = np.blend(
is_separated,
0.5 + 0.5 * np.tanh(10 * np.sind(alpha)),
Bot_Xtr
)
###### Add compressibility effects
### Step 1: compute mach_crit, the critical Mach number
"""
Below is a function that computes the critical Mach number from the incompressible Cp_min.
It's based on a Laitone-rule compressibility correction (similar to Prandtl-Glauert or Karman-Tsien,
but higher order), together with the Cp_sonic relation. When the Laitone-rule Cp equals Cp_sonic, we have reached
the critical Mach number.
This approach does not admit explicit solution for the Cp0 -> M_crit relation, so we instead regress a
relationship out using symbolic regression. In effect, this is a curve fit to synthetic data.
See fits at: /AeroSandbox/studies/MachFitting/CriticalMach/
"""
Cpmin_0 = np.softmin(
Cpmin_0,
0,
softness=0.001
)
mach_crit = (
1.011571026701678
- Cpmin_0
+ 0.6582431351007195 * (-Cpmin_0) ** 0.6724789439840343
) ** -0.5504677038358711
mach_dd = mach_crit + (0.1 / 80) ** (1 / 3) # drag divergence Mach number
# Relation taken from W.H. Mason's Korn Equation
### Step 2: adjust CL, CD, CM, Cpmin by compressibility effects
gamma = 1.4 # Ratio of specific heats, 1.4 for air (mostly diatomic nitrogen and oxygen)
beta_squared_ideal = 1 - mach ** 2
beta = np.softmax(
beta_squared_ideal,
-beta_squared_ideal,
softness=0.5 # Empirically tuned to data
) ** 0.5
CL = CL / beta
# CD = CD / beta
CM = CM / beta
# Prandtl-Glauert
Cpmin = Cpmin_0 / beta
# Karman-Tsien
# Cpmin = Cpmin_0 / (
# beta
# + mach ** 2 / (1 + beta) * (Cpmin_0 / 2)
# )
# Laitone's rule
# Cpmin = Cpmin_0 / (
# beta
# + (mach ** 2) * (1 + (gamma - 1) / 2 * mach ** 2) / (1 + beta) * (Cpmin_0 / 2)
# )
### Step 3: modify CL based on buffet and supersonic considerations
# Accounts approximately for the lift drop due to buffet.
buffet_factor = np.blend(
50 * (mach - (mach_dd + 0.04)), # Tuned to RANS CFD data empirically
np.blend(
(mach - 1) / 0.1,
1,
0.5
),
1,
)
# Accounts for the fact that theoretical CL_alpha goes from 2 * pi (subsonic) to 4 (supersonic),
# following linearized supersonic flow on a thin airfoil.
cla_supersonic_ratio_factor = np.blend(
(mach - 1) / 0.1,
4 / (2 * np.pi),
1,
)
CL = CL * buffet_factor * cla_supersonic_ratio_factor
# Step 4: Account for wave drag
t_over_c = self.max_thickness()
CD_wave = np.where(
mach < mach_crit,
0,
np.where(
mach < mach_dd,
20 * (mach - mach_crit) ** 4,
np.where(
mach < 0.97,
cubic_hermite_patch(
mach,
x_a=mach_dd,
x_b=0.97,
f_a=20 * (0.1 / 80) ** (4 / 3),
f_b=0.8 * t_over_c,
dfdx_a=0.1,
dfdx_b=0.8 * t_over_c * 8
),
np.where(
mach < 1.1,
cubic_hermite_patch(
mach,
x_a=0.97,
x_b=1.1,
f_a=0.8 * t_over_c,
f_b=0.8 * t_over_c,
dfdx_a=0.8 * t_over_c * 8,
dfdx_b=-0.8 * t_over_c * 8,
),
np.blend(
8 * 2 * (mach - 1.1) / (1.2 - 0.8),
0.8 * 0.8 * t_over_c,
1.2 * 0.8 * t_over_c,
)
)
)
)
)
CD = CD + CD_wave
# Step 5: If beyond M_crit or if separated, move the airfoil aerodynamic center back to x/c = 0.5 (Mach tuck)
has_aerodynamic_center_shift = (mach - (mach_dd + 0.06)) / 0.06
if include_360_deg_effects:
has_aerodynamic_center_shift = np.softmax(
is_separated,
has_aerodynamic_center_shift,
softness=0.1
)
CM = CM + np.blend(
has_aerodynamic_center_shift,
-0.25 * np.cosd(alpha) * CL - 0.25 * np.sind(alpha) * CD,
0,
)
return {
"CL" : CL,
"CD" : CD,
"CM" : CM,
"Cpmin" : Cpmin,
"Top_Xtr" : Top_Xtr,
"Bot_Xtr" : Bot_Xtr,
"mach_crit": mach_crit,
"mach_dd" : mach_dd,
"Cpmin_0" : Cpmin_0,
}
def upper_coordinates(self,
x_over_c: Union[float, np.ndarray] = np.linspace(0, 1, 101)[::-1],
) -> np.ndarray:
# Class function
C = (x_over_c) ** self.N1 * (1 - x_over_c) ** self.N2
from scipy.special import comb
def shape_function(w):
# Shape function (Bernstein polynomials)
N = np.length(w) - 1 # Order of Bernstein polynomials
K = comb(N, np.arange(N + 1)) # Bernstein polynomial coefficients
dims = (np.length(w), np.length(x_over_c))
def wide(vector):
return np.tile(np.reshape(vector, (1, dims[1])), (dims[0], 1))
def tall(vector):
return np.tile(np.reshape(vector, (dims[0], 1)), (1, dims[1]))
S_matrix = (
tall(K) * wide(x_over_c) ** tall(np.arange(N + 1)) *
wide(1 - x_over_c) ** tall(N - np.arange(N + 1))
) # Bernstein polynomial coefficients * weight matrix
S_x = np.sum(tall(w) * S_matrix, axis=0)
# Calculate y output
y = C * S_x
return y
y_upper = shape_function(self.upper_weights)
# Add trailing-edge (TE) thickness
y_upper += x_over_c * self.TE_thickness / 2
# Add Kulfan's leading-edge-modification (LEM)
y_upper += self.leading_edge_weight * (x_over_c) * (1 - x_over_c) ** (np.length(self.upper_weights) + 0.5)
return y_upper
def lower_coordinates(self,
x_over_c: Union[float, np.ndarray] = np.linspace(0, 1, 101),
) -> np.ndarray:
# Class function
C = (x_over_c) ** self.N1 * (1 - x_over_c) ** self.N2
from scipy.special import comb
def shape_function(w):
# Shape function (Bernstein polynomials)
N = np.length(w) - 1 # Order of Bernstein polynomials
K = comb(N, np.arange(N + 1)) # Bernstein polynomial coefficients
dims = (np.length(w), np.length(x_over_c))
def wide(vector):
return np.tile(np.reshape(vector, (1, dims[1])), (dims[0], 1))
def tall(vector):
return np.tile(np.reshape(vector, (dims[0], 1)), (1, dims[1]))
S_matrix = (
tall(K) * wide(x_over_c) ** tall(np.arange(N + 1)) *
wide(1 - x_over_c) ** tall(N - np.arange(N + 1))
) # Bernstein polynomial coefficients * weight matrix
S_x = np.sum(tall(w) * S_matrix, axis=0)
# Calculate y output
y = C * S_x
return y
y_lower = shape_function(self.lower_weights)
# Add trailing-edge (TE) thickness
y_lower -= x_over_c * self.TE_thickness / 2
# Add Kulfan's leading-edge-modification (LEM)
y_lower += self.leading_edge_weight * (x_over_c) * (1 - x_over_c) ** (np.length(self.lower_weights) + 0.5)
return y_lower
def local_camber(self,
x_over_c: Union[float, np.ndarray] = np.linspace(0, 1, 101),
) -> Union[float, np.ndarray]:
y_upper = self.upper_coordinates(x_over_c=x_over_c)
y_lower = self.lower_coordinates(x_over_c=x_over_c)
return (y_upper + y_lower) / 2
def local_thickness(self,
x_over_c: Union[float, np.ndarray] = np.linspace(0, 1, 101),
) -> Union[float, np.ndarray]:
y_upper = self.upper_coordinates(x_over_c=x_over_c)
y_lower = self.lower_coordinates(x_over_c=x_over_c)
return (y_upper - y_lower)
def TE_angle(self):
return np.degrees(
np.arctan(self.upper_weights[-1]) -
np.arctan(self.lower_weights[-1]) +
np.arctan(self.TE_thickness)
)
def area(self):
def get_area_of_side(weights):
from scipy.special import beta, comb
N = np.length(weights) - 1
i = np.arange(N + 1)
area_of_each_mode = comb(N, i) * beta(
self.N1 + i + 1,
self.N2 + N - i + 1
)
return np.sum(
area_of_each_mode * weights
)
return (
get_area_of_side(self.upper_weights) -
get_area_of_side(self.lower_weights) +
(self.TE_thickness / 2)
)
def blend_with_another_airfoil(self,
airfoil: Union["KulfanAirfoil", Airfoil],
blend_fraction: float = 0.5,
) -> "KulfanAirfoil":
if not isinstance(airfoil, KulfanAirfoil):
try:
airfoil = airfoil.to_kulfan_airfoil()
except AttributeError:
raise TypeError("The `airfoil` argument should be either a `KulfanAirfoil` or an `Airfoil`.\n"
f"You gave an object of type \"{type(airfoil)}\".")
foil_a = self
foil_b = airfoil
a_fraction = 1 - blend_fraction
b_fraction = blend_fraction
### Determine parameters for the blended airfoil
name = f"{a_fraction * 100:.0f}% {self.name}, {b_fraction * 100:.0f}% {airfoil.name}"
if not all([
foil_a.N1 == foil_b.N1,
foil_a.N2 == foil_b.N2,
]):
raise ValueError("In order to blend two airfoils, they must have the same N1 and N2 parameters.")
return KulfanAirfoil(
name=name,
lower_weights=a_fraction * foil_a.lower_weights + b_fraction * foil_b.lower_weights,
upper_weights=a_fraction * foil_a.upper_weights + b_fraction * foil_b.upper_weights,
leading_edge_weight=a_fraction * foil_a.leading_edge_weight + b_fraction * foil_b.leading_edge_weight,
TE_thickness=a_fraction * foil_a.TE_thickness + b_fraction * foil_b.TE_thickness,
N1=foil_a.N1,
N2=foil_a.N2,
)
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/geometry/airfoil/kulfan_airfoil.py
|
kulfan_airfoil.py
|
import aerosandbox.numpy as np
from scipy.special import comb
import re
from typing import Union
import os
from typing import List, Optional, Dict
_default_n_points_per_side = 200
def get_NACA_coordinates(
name: str = None,
n_points_per_side: int = _default_n_points_per_side,
max_camber: float = None,
camber_loc: float = None,
thickness: float = None,
) -> np.ndarray:
"""
Returns the coordinates of a 4-series NACA airfoil.
Can EITHER specify `name`, or all three of `max_camber`, `camber_loc`, and `thickness` - not both.
Args:
Either:
* name: Name of the NACA airfoil, as a string (e.g., "naca2412")
Or:
* All three of:
max_camber: Maximum camber of the airfoil, as a fraction of chord (e.g., 0.02)
camber_loc: The location of maximum camber, as a fraction of chord (e.g., 0.40)
thickness: The maximum thickness of the airfoil, as a fraction of chord (e.g., 0.12)
n_points_per_side: Number of points per side of the airfoil (top/bottom).
Returns: The coordinates of the airfoil as a Nx2 ndarray [x, y]
"""
### Validate inputs
name_specified = name is not None
params_specified = [
(max_camber is not None),
(camber_loc is not None),
(thickness is not None)
]
if name_specified:
if any(params_specified):
raise ValueError(
"Cannot specify both `name` and (`max_camber`, `camber_loc`, `thickness`) parameters - must be one or the other.")
name = name.lower().strip()
if not "naca" in name:
raise ValueError("Not a NACA airfoil - name must start with 'naca'!")
nacanumber = name.split("naca")[1]
if not nacanumber.isdigit():
raise ValueError("Couldn't parse the number of the NACA airfoil!")
if not len(nacanumber) == 4:
raise NotImplementedError("Only 4-digit NACA airfoils are currently supported!")
# Parse
max_camber = int(nacanumber[0]) * 0.01
camber_loc = int(nacanumber[1]) * 0.1
thickness = int(nacanumber[2:]) * 0.01
else:
if not all(params_specified):
raise ValueError(
"Must specify either `name` or all three (`max_camber`, `camber_loc`, `thickness`) parameters.")
# Referencing https://en.wikipedia.org/wiki/NACA_airfoil#Equation_for_a_cambered_4-digit_NACA_airfoil
# from here on out
# Make uncambered coordinates
x_t = np.cosspace(0, 1, n_points_per_side) # Generate some cosine-spaced points
y_t = 5 * thickness * (
+ 0.2969 * x_t ** 0.5
- 0.1260 * x_t
- 0.3516 * x_t ** 2
+ 0.2843 * x_t ** 3
- 0.1015 * x_t ** 4 # 0.1015 is original, #0.1036 for sharp TE
)
if camber_loc == 0:
camber_loc = 0.5 # prevents divide by zero errors for things like naca0012's.
# Get camber
y_c = np.where(
x_t <= camber_loc,
max_camber / camber_loc ** 2 * (2 * camber_loc * x_t - x_t ** 2),
max_camber / (1 - camber_loc) ** 2 * ((1 - 2 * camber_loc) + 2 * camber_loc * x_t - x_t ** 2)
)
# Get camber slope
dycdx = np.where(
x_t <= camber_loc,
2 * max_camber / camber_loc ** 2 * (camber_loc - x_t),
2 * max_camber / (1 - camber_loc) ** 2 * (camber_loc - x_t)
)
theta = np.arctan(dycdx)
# Combine everything
x_U = x_t - y_t * np.sin(theta)
x_L = x_t + y_t * np.sin(theta)
y_U = y_c + y_t * np.cos(theta)
y_L = y_c - y_t * np.cos(theta)
# Flip upper surface so it's back to front
x_U, y_U = x_U[::-1], y_U[::-1]
# Trim 1 point from lower surface so there's no overlap
x_L, y_L = x_L[1:], y_L[1:]
x = np.concatenate((x_U, x_L))
y = np.concatenate((y_U, y_L))
return np.stack((x, y), axis=1)
def get_kulfan_coordinates(
lower_weights: np.ndarray = -0.2 * np.ones(8),
upper_weights: np.ndarray = 0.2 * np.ones(8),
leading_edge_weight: float = 0.,
TE_thickness: float = 0.,
n_points_per_side: int = _default_n_points_per_side,
N1: float = 0.5,
N2: float = 1.0,
**deprecated_kwargs
) -> np.ndarray:
"""
Given a set of Kulfan parameters, computes the coordinates of the resulting airfoil.
This function is the inverse of `get_kulfan_parameters()`.
Kulfan parameters are a highly-efficient and flexible way to parameterize the shape of an airfoil. The particular
flavor of Kulfan parameterization used in AeroSandbox is the "CST with LEM" method, which is described in various
papers linked below. In total, the Kulfan parameterization consists of:
* A vector of weights corresponding to the lower surface of the airfoil
* A vector of weights corresponding to the upper surface of the airfoil
* A scalar weight corresponding to the strength of a leading-edge camber mode shape of the airfoil (optional)
* The trailing-edge (TE) thickness of the airfoil (optional)
These Kulfan parameters are also referred to as CST (Class/Shape Transformation) parameters.
References on Kulfan (CST) airfoils:
* Kulfan, Brenda "Universal Parametric Geometry Representation Method" (2008). AIAA Journal of Aircraft.
Describes the basic Kulfan (CST) airfoil parameterization.
Mirrors:
* https://arc.aiaa.org/doi/10.2514/1.29958
* https://www.brendakulfan.com/_files/ugd/169bff_6738e0f8d9074610942c53dfaea8e30c.pdf
* https://www.researchgate.net/publication/245430684_Universal_Parametric_Geometry_Representation_Method
* Kulfan, Brenda "Modification of CST Airfoil Representation Methodology" (2020). Unpublished note:
Describes the optional "Leading-Edge Modification" (LEM) addition to the Kulfan (CST) airfoil parameterization.
Mirrors:
* https://www.brendakulfan.com/_files/ugd/169bff_16a868ad06af4fea946d299c6028fb13.pdf
* https://www.researchgate.net/publication/343615711_Modification_of_CST_Airfoil_Representation_Methodology
* Masters, D.A. "Geometric Comparison of Aerofoil Shape Parameterization Methods" (2017). AIAA Journal.
Compares the Kulfan (CST) airfoil parameterization to other airfoil parameterizations. Also has further notes
on the LEM addition.
Mirrors:
* https://arc.aiaa.org/doi/10.2514/1.J054943
* https://research-information.bris.ac.uk/ws/portalfiles/portal/91793513/SP_Journal_RED.pdf
Notes on N1, N2 (shape factor) combinations:
* 0.5, 1: Conventional airfoil
* 0.5, 0.5: Elliptic airfoil
* 1, 1: Biconvex airfoil
* 0.75, 0.75: Sears-Haack body (radius distribution)
* 0.75, 0.25: Low-drag projectile
* 1, 0.001: Cone or wedge airfoil
* 0.001, 0.001: Rectangle, circular duct, or circular rod.
To make a Kulfan (CST) airfoil, use the following syntax:
>>> import aerosandbox as asb
>>> asb.Airfoil("My Airfoil Name", coordinates=asb.get_kulfan_coordinates(*args))
Args:
lower_weights (iterable): The Kulfan weights to use for the lower surface.
upper_weights (iterable): The Kulfan weights to use for the upper surface.
TE_thickness (float): The trailing-edge thickness to add, in terms of y/c.
n_points_per_side (int): The number of points to discretize with, when generating the coordinates.
N1 (float): The shape factor corresponding to the leading edge of the airfoil. See above for examples.
N2 (float): The shape factor corresponding to the trailing edge of the airfoil. See above for examples.
Returns:
np.ndarray: The coordinates of the airfoil as a Nx2 array.
"""
if len(deprecated_kwargs) > 0:
import warnings
warnings.warn(
"The following arguments are deprecated and will be removed in a future version:\n"
f"{deprecated_kwargs}",
DeprecationWarning
)
if deprecated_kwargs.get("enforce_continuous_LE_radius", False):
lower_weights[0] = -1 * upper_weights[0]
x = np.cosspace(0, 1, n_points_per_side) # Generate some cosine-spaced points
# Class function
C = (x) ** N1 * (1 - x) ** N2
def shape_function(w):
# Shape function (Bernstein polynomials)
N = np.length(w) - 1 # Order of Bernstein polynomials
K = comb(N, np.arange(N + 1)) # Bernstein polynomial coefficients
dims = (np.length(w), np.length(x))
def wide(vector):
return np.tile(np.reshape(vector, (1, dims[1])), (dims[0], 1))
def tall(vector):
return np.tile(np.reshape(vector, (dims[0], 1)), (1, dims[1]))
S_matrix = (
tall(K) * wide(x) ** tall(np.arange(N + 1)) *
wide(1 - x) ** tall(N - np.arange(N + 1))
) # Bernstein polynomial coefficients * weight matrix
S_x = np.sum(tall(w) * S_matrix, axis=0)
# Calculate y output
y = C * S_x
return y
y_lower = shape_function(lower_weights)
y_upper = shape_function(upper_weights)
# Add trailing-edge (TE) thickness
y_lower -= x * TE_thickness / 2
y_upper += x * TE_thickness / 2
# Add Kulfan's leading-edge-modification (LEM)
y_lower += leading_edge_weight * (x) * (1 - x) ** (np.length(lower_weights) + 0.5)
y_upper += leading_edge_weight * (x) * (1 - x) ** (np.length(upper_weights) + 0.5)
x = np.concatenate((x[::-1], x[1:]))
y = np.concatenate((y_upper[::-1], y_lower[1:]))
coordinates = np.stack((x, y), axis=1)
return coordinates
def get_kulfan_parameters(
coordinates: np.ndarray,
n_weights_per_side: int = 8,
N1: float = 0.5,
N2: float = 1.0,
n_points_per_side: int = _default_n_points_per_side,
normalize_coordinates: bool = True,
use_leading_edge_modification: bool = True,
method: str = "least_squares",
) -> Dict[str, Union[np.ndarray, float]]:
"""
Given a set of airfoil coordinates, reconstructs the Kulfan parameters that would recreate that airfoil. Uses a
curve fitting (optimization) process.
This function is the inverse of `get_kulfan_coordinates()`.
Kulfan parameters are a highly-efficient and flexible way to parameterize the shape of an airfoil. The particular
flavor of Kulfan parameterization used in AeroSandbox is the "CST with LEM" method, which is described in various
papers linked below. In total, the Kulfan parameterization consists of:
* A vector of weights corresponding to the lower surface of the airfoil
* A vector of weights corresponding to the upper surface of the airfoil
* A scalar weight corresponding to the strength of a leading-edge camber mode shape of the airfoil (optional)
* The trailing-edge (TE) thickness of the airfoil (optional)
These Kulfan parameters are also referred to as CST (Class/Shape Transformation) parameters.
References on Kulfan (CST) airfoils:
* Kulfan, Brenda "Universal Parametric Geometry Representation Method" (2008). AIAA Journal of Aircraft.
Describes the basic Kulfan (CST) airfoil parameterization.
Mirrors:
* https://arc.aiaa.org/doi/10.2514/1.29958
* https://www.brendakulfan.com/_files/ugd/169bff_6738e0f8d9074610942c53dfaea8e30c.pdf
* https://www.researchgate.net/publication/245430684_Universal_Parametric_Geometry_Representation_Method
* Kulfan, Brenda "Modification of CST Airfoil Representation Methodology" (2020). Unpublished note:
Describes the optional "Leading-Edge Modification" (LEM) addition to the Kulfan (CST) airfoil parameterization.
Mirrors:
* https://www.brendakulfan.com/_files/ugd/169bff_16a868ad06af4fea946d299c6028fb13.pdf
* https://www.researchgate.net/publication/343615711_Modification_of_CST_Airfoil_Representation_Methodology
* Masters, D.A. "Geometric Comparison of Aerofoil Shape Parameterization Methods" (2017). AIAA Journal.
Compares the Kulfan (CST) airfoil parameterization to other airfoil parameterizations. Also has further notes
on the LEM addition.
Mirrors:
* https://arc.aiaa.org/doi/10.2514/1.J054943
* https://research-information.bris.ac.uk/ws/portalfiles/portal/91793513/SP_Journal_RED.pdf
Notes on N1, N2 (shape factor) combinations:
* 0.5, 1: Conventional airfoil
* 0.5, 0.5: Elliptic airfoil
* 1, 1: Biconvex airfoil
* 0.75, 0.75: Sears-Haack body (radius distribution)
* 0.75, 0.25: Low-drag projectile
* 1, 0.001: Cone or wedge airfoil
* 0.001, 0.001: Rectangle, circular duct, or circular rod.
The following demonstrates the reversibility of this function:
>>> import aerosandbox as asb
>>> from aerosandbox.geometry.airfoil.airfoil_families import get_kulfan_parameters
>>>
>>> af = asb.Airfoil("dae11") # A conventional airfoil
>>> params = get_kulfan_parameters(
>>> coordinates=af.coordinates,
>>> )
>>> af_reconstructed = asb.Airfoil(
>>> name="Reconstructed Airfoil",
>>> coordinates=get_kulfan_coordinates(
>>> **params
>>> )
Args:
coordinates (np.ndarray): The coordinates of the airfoil as a Nx2 array.
n_weights_per_side (int): The number of Kulfan weights to use per side of the airfoil.
N1 (float): The shape factor corresponding to the leading edge of the airfoil. See above for examples.
N2 (float): The shape factor corresponding to the trailing edge of the airfoil. See above for examples.
n_points_per_side (int): The number of points to discretize with, when formulating the curve-fitting
optimization problem.
Returns:
A dictionary containing the Kulfan parameters. The keys are:
* "lower_weights" (np.ndarray): The weights corresponding to the lower surface of the airfoil.
* "upper_weights" (np.ndarray): The weights corresponding to the upper surface of the airfoil.
* "TE_thickness" (float): The trailing-edge thickness of the airfoil.
* "leading_edge_weight" (float): The strength of the leading-edge camber mode shape of the airfoil.
These can be passed directly into `get_kulfan_coordinates()` to reconstruct the airfoil.
"""
from aerosandbox.geometry.airfoil import Airfoil
if method == "opti":
target_airfoil = Airfoil(
name="Target Airfoil",
coordinates=coordinates
).repanel(
n_points_per_side=n_points_per_side
)
if normalize_coordinates:
target_airfoil = target_airfoil.normalize()
x = np.cosspace(0, 1, n_points_per_side)
target_thickness = target_airfoil.local_thickness(x_over_c=x)
target_camber = target_airfoil.local_camber(x_over_c=x)
target_y_upper = target_camber + target_thickness / 2
target_y_lower = target_camber - target_thickness / 2
# Class function
C = (x) ** N1 * (1 - x) ** N2
def shape_function(w):
# Shape function (Bernstein polynomials)
N = np.length(w) - 1 # Order of Bernstein polynomials
K = comb(N, np.arange(N + 1)) # Bernstein polynomial coefficients
dims = (np.length(w), np.length(x))
def wide(vector):
return np.tile(vector.reshape((1, dims[1])), (dims[0], 1))
def tall(vector):
return np.tile(vector.reshape((dims[0], 1)), (1, dims[1]))
S_matrix = (
tall(K) * wide(x) ** tall(np.arange(N + 1)) *
wide(1 - x) ** tall(N - np.arange(N + 1))
) # Bernstein polynomial coefficients * weight matrix
S_x = np.sum(tall(w) * S_matrix, axis=0)
# Calculate y output
y = C * S_x
return y
opti = asb.Opti()
lower_weights = opti.variable(init_guess=0, n_vars=n_weights_per_side)
upper_weights = opti.variable(init_guess=0, n_vars=n_weights_per_side)
TE_thickness = opti.variable(init_guess=0, lower_bound=0)
if use_leading_edge_modification:
leading_edge_weight = opti.variable(init_guess=0)
else:
leading_edge_weight = 0
y_lower = shape_function(lower_weights)
y_upper = shape_function(upper_weights)
# Add trailing-edge (TE) thickness
y_lower -= x * TE_thickness / 2
y_upper += x * TE_thickness / 2
# Add Kulfan's leading-edge-modification (LEM)
y_lower += leading_edge_weight * (x) * (1 - x) ** (np.length(lower_weights) + 0.5)
y_upper += leading_edge_weight * (x) * (1 - x) ** (np.length(upper_weights) + 0.5)
opti.minimize(
np.sum((y_lower - target_y_lower) ** 2) +
np.sum((y_upper - target_y_upper) ** 2)
)
sol = opti.solve(
verbose=False
)
return {
"lower_weights" : sol.value(lower_weights),
"upper_weights" : sol.value(upper_weights),
"TE_thickness" : sol.value(TE_thickness),
"leading_edge_weight": sol.value(leading_edge_weight),
}
elif method == "least_squares":
"""
The goal here is to set up this fitting problem as a least-squares problem (likely an overconstrained one,
but keeping it general for now. This will then be solved with np.linalg.lstsq(A, b), where A will (likely)
not be square.
The columns of the A matrix will correspond to our unknowns, which are going to be a 1D vector `x` packed in as:
* upper_weights from 0 to n_weights_per_side - 1
* lower_weights from 0 to n_weights_per_side - 1
* leading_edge_weight
* trailing_edge_thickness
See `get_kulfan_coordinates()` for more details on the meaning of these variables.
The rows of the A matrix will correspond to each row of the given airfoil coordinates (i.e., a single vertex
on the airfoil). The idea here is to express each vertex as a linear combination of the unknowns, and then
solve for the unknowns that minimize the error between the given airfoil coordinates and the reconstructed
airfoil coordinates.
"""
if normalize_coordinates:
coordinates = Airfoil(
name="Target Airfoil",
coordinates=coordinates
).normalize().coordinates
n_coordinates = np.length(coordinates)
x = coordinates[:, 0]
y = coordinates[:, 1]
LE_index = np.argmin(x)
is_upper = np.arange(len(x)) <= LE_index
# Class function
C = (x) ** N1 * (1 - x) ** N2
# Shape function (Bernstein polynomials)
N = n_weights_per_side - 1 # Order of Bernstein polynomials
K = comb(N, np.arange(N + 1)) # Bernstein polynomial coefficients
dims = (n_weights_per_side, n_coordinates)
def wide(vector):
return np.tile(vector.reshape((1, dims[1])), (dims[0], 1))
def tall(vector):
return np.tile(vector.reshape((dims[0], 1)), (1, dims[1]))
S_matrix = (
tall(K) * wide(x) ** tall(np.arange(N + 1)) *
wide(1 - x) ** tall(N - np.arange(N + 1))
) # Bernstein polynomial coefficients * weight matrix
leading_edge_weight_row = x * np.maximum(1 - x, 0) ** (n_weights_per_side + 0.5)
trailing_edge_thickness_row = np.where(
is_upper,
x / 2,
-x / 2
)
A = np.concatenate([
np.where(wide(is_upper), 0, wide(C) * S_matrix).T,
np.where(wide(is_upper), wide(C) * S_matrix, 0).T,
np.reshape(leading_edge_weight_row, (n_coordinates, 1)),
np.reshape(trailing_edge_thickness_row, (n_coordinates, 1)),
], axis=1)
b = y
# Solve least-squares problem
x, _, _, _ = np.linalg.lstsq(A, b, rcond=None)
lower_weights = x[:n_weights_per_side]
upper_weights = x[n_weights_per_side:2 * n_weights_per_side]
leading_edge_weight = x[-2]
trailing_edge_thickness = x[-1]
# If you got a negative trailing-edge thickness, then resolve the problem with a TE_thickness = 0 constraint.
if trailing_edge_thickness < 0:
x, _, _, _ = np.linalg.lstsq(A[:, :-1], b, rcond=None)
lower_weights = x[:n_weights_per_side]
upper_weights = x[n_weights_per_side:2 * n_weights_per_side]
leading_edge_weight = x[-1]
trailing_edge_thickness = 0
return {
"lower_weights" : lower_weights,
"upper_weights" : upper_weights,
"TE_thickness" : trailing_edge_thickness,
"leading_edge_weight": leading_edge_weight,
}
else:
raise ValueError(f"Invalid method '{method}'.")
def get_coordinates_from_raw_dat(
raw_text: List[str]
) -> np.ndarray:
"""
Returns a Nx2 ndarray of airfoil coordinates from the raw text of a airfoil *.dat file.
Args:
raw_text: A list of strings, where each string is one line of the *.dat file. One good way to get this input
is to read the file via the `with open(file, "r") as file:`, `file.readlines()` interface.
Returns: A Nx2 ndarray of airfoil coordinates [x, y].
"""
raw_coordinates = []
def is_number(s: str) -> bool:
# Determines whether a string is representable as a float
try:
float(s)
except ValueError:
return False
return True
def parse_line(line: str) -> Optional[List[float]]:
# Given a single line of a `*.dat` file, tries to parse it into a list of two floats [x, y].
# If not possible, returns None.
line_split = re.split(r'[;|,|\s|\t]', line)
line_items = [s for s in line_split if s != ""]
if len(line_items) == 2 and all([is_number(item) for item in line_items]):
return line_items
else:
return None
for line in raw_text:
parsed_line = parse_line(line)
if parsed_line is not None:
raw_coordinates.append(parsed_line)
if len(raw_coordinates) == 0:
raise ValueError("Could not read any coordinates from the `raw_text` input!")
coordinates = np.array(raw_coordinates, dtype=float)
return coordinates
def get_file_coordinates(
filepath: Union[str, os.PathLike]
):
possible_errors = (FileNotFoundError, UnicodeDecodeError)
if isinstance(filepath, np.ndarray):
raise TypeError("`filepath` should be a string or os.PathLike object.")
try:
with open(filepath, "r") as f:
raw_text = f.readlines()
except possible_errors as e:
try:
with open(f"{filepath}.dat", "r") as f:
raw_text = f.readlines()
except possible_errors as e:
raise FileNotFoundError(
f" Neither '{filepath}' nor '{filepath}.dat' were found and readable."
) from e
try:
return get_coordinates_from_raw_dat(raw_text)
except ValueError:
raise ValueError("File was found, but could not read any coordinates!")
def get_UIUC_coordinates(
name: str = 'dae11'
) -> np.ndarray:
"""
Returns the coordinates of a specified airfoil in the UIUC airfoil database.
Args:
name: Name of the airfoil to retrieve from the UIUC database.
Returns: The coordinates of the airfoil as a Nx2 ndarray [x, y]
"""
from aerosandbox import _asb_root
airfoil_database_root = _asb_root / "geometry" / "airfoil" / "airfoil_database"
try:
with open(airfoil_database_root / name) as f:
raw_text = f.readlines()
except FileNotFoundError as e:
try:
with open(airfoil_database_root / f"{name}.dat") as f:
raw_text = f.readlines()
except FileNotFoundError as e:
raise FileNotFoundError(
f"Neither '{name}' nor '{name}.dat' were found in the UIUC airfoil database."
) from e
return get_coordinates_from_raw_dat(raw_text)
if __name__ == '__main__':
import aerosandbox as asb
import aerosandbox.numpy as np
af = asb.Airfoil("e377").normalize()
af.draw(backend="plotly")
kulfan_params = get_kulfan_parameters(
coordinates=af.coordinates,
n_weights_per_side=8,
)
af_reconstructed = asb.Airfoil(
name="Reconstructed Airfoil",
coordinates=get_kulfan_coordinates(
**kulfan_params
),
)
af_reconstructed.draw(backend="plotly")
print(kulfan_params)
print(af.jaccard_similarity(af_reconstructed))
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/geometry/airfoil/airfoil_families.py
|
airfoil_families.py
|
from aerosandbox.structures.legacy.beams import *
import copy
### Set up sweep variables
# n_booms = 1
# n_booms = 2
# load_location_fraction = 0.50
n_booms = 3
load_location_fraction = 0.60
res = 15
masses = np.logspace(np.log10(5), np.log10(3000), res)
spans = np.logspace(np.log10(3), np.log10(120), res)
Masses, Spans = np.meshgrid(masses, spans, indexing="ij")
Spar_Masses = np.zeros_like(Masses)
### Set up problem
opti = cas.Opti()
mass = opti.parameter()
span = opti.parameter()
beam = TubeBeam1(
opti=opti,
length=span / 2,
points_per_point_load=100,
diameter_guess=10,
thickness=0.60e-3,
bending=True,
torsion=False
)
lift_force = 9.81 * mass
# load_location = opti.variable()
# opti.set_initial(load_location, 12)
# opti.subject_to([
# load_location > 1,
# load_location < beam.length - 1,
# ])
assert (n_booms == np.array([1, 2, 3])).any()
if n_booms == 2 or n_booms == 3:
load_location = beam.length * load_location_fraction
beam.add_point_load(location=load_location, force=-lift_force / n_booms)
beam.add_elliptical_load(force=lift_force / 2)
beam.setup()
# Constraints (in addition to stress)
opti.subject_to([
# beam.u[-1] < 2, # tip deflection. Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
# beam.u[-1] > -2 # tip deflection. Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
beam.du * 180 / cas.pi < 10, # local dihedral constraint
beam.du * 180 / cas.pi > -10, # local anhedral constraint
cas.diff(beam.nominal_diameter) < 0, # manufacturability
])
# # Zero-curvature constraint (restrict to conical tube spars only)
# opti.subject_to([
# cas.diff(cas.diff(beam.nominal_diameter)) == 0
# ])
opti.minimize(beam.mass)
p_opts = {}
s_opts = {}
s_opts["max_iter"] = 1e6 # If you need to interrupt, just use ctrl+c
# s_opts["mu_strategy"] = "adaptive"
opti.solver('ipopt', p_opts, s_opts)
### Do the sweep
for i in range(len(masses)):
iterable = range(len(spans))
iterable = iterable[::-1] if i % 2 != 0 else iterable
for j in iterable:
opti.set_value(mass, Masses[i, j])
opti.set_value(span, Spans[i, j])
sol = opti.solve()
opti.set_initial(sol.value_variables())
beam_sol = sol(beam)
Spar_Masses[i, j] = beam_sol.mass * 2
np.save("masses", Masses)
np.save("spans", Spans)
np.save("spar_masses", Spar_Masses)
# Run a sanity check
beam_sol.draw_bending()
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/studies/MultiBoomSparMass_v2/sweep.py
|
sweep.py
|
from aerosandbox.structures.legacy.beams import *
import scipy.io as sio
import copy
### Set up sweep variables
masses = np.linspace(50, 800, 50)
spans = np.linspace(30, 90, 50)
Masses, Spans = np.meshgrid(masses, spans, indexing="ij")
Spar_Masses = np.zeros_like(Masses)
### Set up problem
opti = cas.Opti()
mass = opti.parameter()
span = opti.parameter()
beam = TubeBeam1(
opti=opti,
length=span / 2,
points_per_point_load=50,
diameter_guess=100,
bending=True,
torsion=False
)
lift_force = 9.81 * mass
beam.add_point_load(
location=span / 2 * (2 / 3) + 1,
force=-lift_force / 3
)
beam.add_uniform_load(force=lift_force / 2)
beam.setup()
# Tip deflection constraint
opti.subject_to([
# beam.u[-1] < 2, # Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
# beam.u[-1] > -2 # Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
beam.du * 180 / cas.pi < 10,
beam.du * 180 / cas.pi > -10
])
opti.subject_to([
cas.diff(cas.diff(beam.nominal_diameter)) < 0.002,
cas.diff(cas.diff(beam.nominal_diameter)) > -0.002,
])
opti.minimize(beam.mass)
p_opts = {}
s_opts = {}
s_opts["max_iter"] = 1000 # If you need to interrupt, just use ctrl+c
# s_opts["mu_strategy"] = "adaptive"
# s_opts["watchdog_shortened_iter_trigger"] = 1
# s_opts["expect_infeasible_problem"]="yes"
# s_opts["start_with_resto"] = "yes"
# s_opts["required_infeasibility_reduction"] = 0.001
opti.solver('ipopt', p_opts, s_opts)
### Do the sweep
for i in range(len(masses)):
iterable = range(len(spans))
iterable = iterable[::-1] if i % 2 != 0 else iterable
for j in iterable:
opti.set_value(mass, Masses[i, j])
opti.set_value(span, Spans[i, j])
sol = opti.solve()
opti.set_initial(sol.value_variables())
beam_sol = sol(beam)
Spar_Masses[i, j] = beam_sol.mass
sio.savemat("data_triple_boom.mat",
{
"Masses" : Masses,
"Spans" : Spans,
"Spar_Masses": Spar_Masses
})
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/studies/MultiBoomSparMass_v1/spar_mass_triple_boom.py
|
spar_mass_triple_boom.py
|
from aerosandbox.structures.legacy.beams import *
import scipy.io as sio
import copy
### Set up sweep variables
masses = np.linspace(50, 800, 50)
spans = np.linspace(30, 90, 50)
Masses, Spans = np.meshgrid(masses, spans, indexing="ij")
Spar_Masses = np.zeros_like(Masses)
### Set up problem
opti = cas.Opti()
mass = opti.parameter()
span = opti.parameter()
beam = TubeBeam1(
opti=opti,
length=span / 2,
points_per_point_load=50,
diameter_guess=100,
bending=True,
torsion=False
)
lift_force = 9.81 * mass
beam.add_point_load(
location=span / 2 / 2 + 1,
force=-lift_force / 2
)
beam.add_uniform_load(force=lift_force / 2)
beam.setup()
# Tip deflection constraint
opti.subject_to([
# beam.u[-1] < 2, # Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
# beam.u[-1] > -2 # Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
beam.du * 180 / cas.pi < 10,
beam.du * 180 / cas.pi > -10
])
opti.subject_to([
cas.diff(cas.diff(beam.nominal_diameter)) < 0.002,
cas.diff(cas.diff(beam.nominal_diameter)) > -0.002,
])
opti.minimize(beam.mass)
p_opts = {}
s_opts = {}
s_opts["max_iter"] = 1000 # If you need to interrupt, just use ctrl+c
# s_opts["mu_strategy"] = "adaptive"
# s_opts["watchdog_shortened_iter_trigger"] = 1
# s_opts["expect_infeasible_problem"]="yes"
# s_opts["start_with_resto"] = "yes"
# s_opts["required_infeasibility_reduction"] = 0.001
opti.solver('ipopt', p_opts, s_opts)
### Do the sweep
for i in range(len(masses)):
iterable = range(len(spans))
iterable = iterable[::-1] if i % 2 != 0 else iterable
for j in iterable:
opti.set_value(mass, Masses[i, j])
opti.set_value(span, Spans[i, j])
sol = opti.solve()
opti.set_initial(sol.value_variables())
beam_sol = sol(beam)
Spar_Masses[i, j] = beam_sol.mass
sio.savemat("data_double_boom.mat",
{
"Masses" : Masses,
"Spans" : Spans,
"Spar_Masses": Spar_Masses
})
|
AeroSandbox
|
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/studies/MultiBoomSparMass_v1/spar_mass_double_boom.py
|
spar_mass_double_boom.py
|
import os
import socket
import time
from inspect import signature
from collections.abc import MutableMapping
import traceback
import functools
from contextlib import contextmanager
from utils import is_container, str_to_num
from enum import Enum, IntFlag, auto
COMMAND_SUCCESS_CHAR = '%'
COMMAND_FAULT_CHAR = '#'
COMMAND_INVALID_CHAR = '!'
EOS_CHAR = '\n'
class AxisStatus(IntFlag):
Homed = auto()
Profiling = auto()
WaitDone = auto()
CommandValid = auto()
Homing = auto()
Enabling = auto()
JogGenerating = auto()
Jogging = auto()
DrivePending = auto()
DriveAbortPending = auto()
TrajectoryFiltering = auto()
IFOVEnabled = auto()
NotVirtual = auto()
CalEnabled1D = auto()
CalEnabled2D = auto()
MasterSlaveControl = auto()
JoystickControl = auto()
BacklashActive = auto()
GainMappingEnabled = auto()
Stability0 = auto()
MotionBlocked = auto()
MoveDone = auto()
MotionClamped = auto()
GantryAligned = auto()
GantryRealigning = auto()
Stability1 = auto()
ThermoCompEnabled = auto()
class DriveStatus(IntFlag):
Enabled = auto()
CwEOTLimit = auto()
CcwEOTLimit = auto()
HomeLimit = auto()
MarkerInput = auto()
HallAInput = auto()
HallBInput = auto()
HallCInput = auto()
SineEncoderError = auto()
CosineEncoderError = auto()
ESTOPInput = auto()
BrakeOutput = auto()
GalvoPowerCorrection = auto()
NoMotorSupply = auto()
CurrentClamp = auto()
MarkerLatch = auto()
PowerLimiting = auto()
PSOHaltLatch = auto()
HighResMode = auto()
GalvoCalEnabled = auto()
AutofocusActive = auto()
ProgramFlash = auto()
ProgramMXH = auto()
ServoControl = auto()
InPosition = auto()
MoveActive = auto()
AccelPhase = auto()
DecelPhase = auto()
EncoderClipping = auto()
DualLoopActive = auto()
InPosition2 = auto()
class AxisFault(IntFlag):
PositionError = auto()
OverCurrent = auto()
CwEOTLimit = auto()
CcwEOTLimit = auto()
CwSoftLimit = auto()
CcwSoftLimit = auto()
AmplifierFault = auto()
PositionFbk = auto()
VelocityFbk = auto()
HallFault = auto()
MaxVelocity = auto()
EstopFault = auto()
VelocityError = auto()
ProbeFault = auto()
ExternalFault = auto()
MotorTemp = auto()
AmplifierTemp = auto()
EncoderFault = auto()
CommLost = auto()
GantryMisalign = auto()
FbkScalingFault = auto()
MrkSearchFault = auto()
SafeZoneFault = auto()
InPosTimeout = auto()
VoltageClamp = auto()
PowerSupply = auto()
MissedInterrupt = auto()
Internal = auto()
class TaskStatus(Enum):
Unavailable = auto()
Inactive = auto()
Idle = auto()
ProgramReady = auto()
ProgramRunning = auto()
ProgramFeedheld = auto()
ProgramPaused = auto()
ProgramComplete = auto()
Error = auto()
Queue = auto()
class QueueStatus(Enum):
QueueActive = auto()
QueueEmpty = auto()
QueueFull = auto()
QueueStarted = auto()
QueuePaused = auto()
QueueLargeProgram = auto()
class ConnectionError(Exception):
def __init__(self, message):
super().__init__(message)
class CommandFaultError(Exception):
'''
Exception is raised when the controller returns a command fault error
flag.
'''
def __init__(self, message):
super().__init__(message)
class CommandInvalidError(Exception):
'''
Exception is raised when the controller returns a command invalid error
flag.
'''
def __init__(self, message):
super().__init__(message)
class Decorators:
@staticmethod
def accept_multiple_axes(_func=None, *, transform_second_arg=False):
'''
This function is used to decorate a function that accepts an axes
argument.
If the argument is not a container, it is converted to a tuple with a
single entry.
Args:
original_function (function): Function that accepts an axes
argument
Returns:
function: Function with modified axes argument
'''
def decorator_accept_multiple_axes(original_function, *args):
@functools.wraps(original_function)
def wrapper_function(self, axes, *args, **kwargs):
if isinstance(axes, AxesDict):
arg = axes.values
axes = axes.axes
if len(args) > 0:
args = (arg, *args)
else:
args = (arg, )
if not is_container(axes):
axes = (axes, )
if len(args) > 0 and transform_second_arg:
if not is_container(args[0]):
args = list(args)
args[0] = (args[0], )
args = tuple(args)
return original_function(self, axes, *args, **kwargs)
return wrapper_function
if _func is None:
return decorator_accept_multiple_axes
else:
return decorator_accept_multiple_axes(_func)
@staticmethod
def validate_axes(original_function):
'''
This function is used to decorate a function that accepts an axes
argument. It validates the axes against a list of valid axes stored in
the A3200Controller Class. Raises a ValueError if any axis is not a
valid.
Args:
original_function (function): Function that accepts an axes
argument
Returns:
function: Function with same parameters if all axes are valid
'''
def wrapper_function(self, axes, *args, **kwargs):
axes_valid = all([str(axis) in A3200Controller.valid_axes
for axis in axes])
if axes_valid:
return original_function(self, axes, *args, **kwargs)
else:
raise ValueError('Invalid axis specified')
return wrapper_function
@staticmethod
def validate_task(original_function):
@functools.wraps(original_function)
def wrapper_function(*args, **kwargs):
arguments = signature(original_function).bind(*args, **kwargs).arguments
if ('task_id' in arguments and
(arguments['task_id'] not in A3200Controller.valid_tasks and
arguments['task_id'] != None)):
print(arguments)
raise ValueError('Invalid task specified')
return original_function(*args, **kwargs)
return wrapper_function
class TcpIpSocketDummy:
'''
Tcp socket dummy Class that emulates the core functionality of a Tcp socket
for testing purposes.
'''
def __init__(self):
pass
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
traceback.print_exception(exc_type, exc_value, tb)
self.disconnect()
def send(self, message, blocking=True):
print(f'Sending message: {message}')
return Message('%Test\n')
def connect(self):
print('connect')
def disconnect(self):
print('disconnect')
class TcpIpSocket(socket.socket):
'''
Class that manages the TCP/IP connection to the socket running on the
local host on port 8000.
'''
def __init__(self):
super().__init__(socket.AF_INET, socket.SOCK_STREAM)
self._server_address = '127.0.0.1'
self._port = 8000
self.settimeout(0.2)
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
traceback.print_exception(exc_type, exc_value, tb)
self.disconnect()
def connect(self):
super().connect((self._server_address, self._port))
def disconnect(self):
self.close()
def send(self, message):
if not message.endswith(EOS_CHAR):
message += EOS_CHAR
super().send(message.encode())
while True:
ret = self._receive()
if ret == None:
continue
else:
break
return ret[-1]
def _receive(self):
try:
ret = self.recv(1024).decode()
messages = [Message(msg) for msg in ret.split('\n') if msg != '']
return messages
except socket.timeout:
return None
class A3200Controller:
valid_axes = ['X', 'Y', 'Z', 'A', 'U']
valid_tasks = (0, 1, 2, 3, 4)
def __init__(self, dummy=False, redirect_output=False):
if dummy:
self._tcp_socket = TcpIpSocketDummy()
else:
self._tcp_socket = TcpIpSocket()
self.redirect_path = 'program.pgm'
self.redirect_exists = False
self.PSO = PSO(self)
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
traceback.print_exception(exc_type, exc_value, tb)
self.disconnect()
def set_redirect_path(self, path):
self.redirect_path = path
with open(self.redirect_path, 'w') as f:
pass
self.send_command = self.redirect_command
@staticmethod
def join_axes(axes, positions=None):
if positions:
return ' '.join([''.join([str(val) for val in axpos])
for axpos in zip(axes, positions)])
else:
return ' '.join([str(axis) for axis in axes])
def connect(self):
self._tcp_socket.connect()
def disconnect(self):
self._tcp_socket.disconnect()
def send_command(self, command):
try:
ret = self._tcp_socket.send(command)
except socket.timeout:
raise ConnectionError('Controller is not connected.')
if ret.status == 'CommandSuccess':
return ret
elif ret.status == 'CommandInvalid':
raise CommandInvalidError('The command is not syntactically '\
'correct')
elif ret.status == 'CommandFault':
error = self.get_last_error()
if error:
raise CommandFaultError(error)
else:
raise CommandFaultError('Command could not execute '\
'successfully')
def redirect_command(self, command):
with open(self.redirect_path, 'a') as file:
file.write(command + '\n')
def get_last_error(self):
ret = self.send_command('~LASTERROR')
if ret != None and ret.status == 'CommandSuccess':
return ret.data
def reset(self):
self.send_command('~RESETCONTROLLER')
@Decorators.validate_task
def switch_task(self, task_id):
self.send_command(f'~TASK {task_id}')
@Decorators.validate_task
def stop_task(self, task_id=None):
command = '~STOPTASK'
command += f' {task_id}' if task_id else ''
self.send_command(command)
@Decorators.validate_task
def init_queue(self, task_id=1):
command = '~INITQUEUE'
command += f' {task_id}' if task_id else ''
self.send_command(command)
@contextmanager
@Decorators.validate_task
def queue_mode(self, task_id=1, block=True):
self.init_queue(task_id=task_id)
self.switch_task(task_id)
try:
yield
finally:
if block:
while True:
state = self.get_queue_state(task_id)
if 'Queue Buffer Empty' in state:
break
time.sleep(0.1)
self.stop_task(task_id=task_id)
def acknowledgeall(self):
self.send_command('ACKNOWLEDGEALL')
def dwell(self, time):
self.send_command(f'DWELL {time}')
@Decorators.accept_multiple_axes
@Decorators.validate_axes
def enable(self, axes):
data = A3200Controller.join_axes(axes)
command = f'ENABLE {data}'
self.send_command(command)
@Decorators.accept_multiple_axes
@Decorators.validate_axes
def disable(self, axes):
data = A3200Controller.join_axes(axes)
command = f'DISABLE {data}'
self.send_command(command)
@Decorators.accept_multiple_axes
@Decorators.validate_axes
def home(self, axes):
data = self.join_axes(axes)
command = f'HOME {data}'
self.send_command(command)
@Decorators.accept_multiple_axes
@Decorators.validate_axes
def set_home_speed(self, axes, speed):
data = A3200Controller.join_axes(axes)
command = f'SETPARM {data} HomeSpeed {speed}'
self.send_command(command)
@Decorators.accept_multiple_axes(transform_second_arg=True)
@Decorators.validate_axes
def moveabs(self, axes, positions, speed=None):
data = A3200Controller.join_axes(axes, positions)
command = f'G90 G1 {data}'
if speed:
command += f' F{speed}'
self.send_command(command)
@Decorators.accept_multiple_axes(transform_second_arg=True)
@Decorators.validate_axes
def moverel(self, axes, distances, speed=None):
data = A3200Controller.join_axes(axes, positions=distances)
command = f'G91 G1 {data}'
if speed:
command += f' F{speed}'
self.send_command(command)
def set_speed(self, speed):
command = f'F{speed}'
self.send_command(command)
@Decorators.accept_multiple_axes
@Decorators.validate_axes
def set_parm(self, axes, parameter, value):
data = A3200Controller.join_axes(axes)
command = f'SETPARM {data} {parameter} {value}'
self.send_command(command)
@Decorators.accept_multiple_axes
@Decorators.validate_axes
def read_parm(self, axes, parameter):
result = AxesDict()
for axis in axes:
command = f'$global[0] = {parameter}.{axis}'
self.send_command(command)
result[axis] = self.send_command('~GETVARIABLE $global[0]').data
return result
def freerun_start(self, axis, speed):
command = f'FREERUN {axis} {speed}'
self.send_command(command)
def freerun_stop(self, axis):
command = f'FREERUN {axis} STOP'
self.send_command(command)
@Decorators.accept_multiple_axes(transform_second_arg=True)
@Decorators.validate_axes
def posoffset_set(self, axes, offsets):
data = A3200Controller.join_axes(axes, positions=offsets)
command = f'POSOFFSET SET {data}'
self.send_command(command)
@Decorators.accept_multiple_axes(transform_second_arg=True)
@Decorators.validate_axes
def posoffset_clear(self, axes):
data = A3200Controller.join_axes(axes)
command = f'POSOFFSET CLEAR {data}'
self.send_command(command)
@Decorators.accept_multiple_axes
@Decorators.validate_axes
def get_feedback(self, axes, parameter):
template = '({}, {})'
command = '~STATUS'
for axis in axes:
command += ' ' + template.format(axis, parameter)
message = self.send_command(command)
data = [str_to_num(value) for value in message.data.split(' ')]
response = AxesDict(zip(axes, data))
return response
def get_positions(self, axes):
data = self.get_feedback(axes, 'PositionFeedback')
return data
def get_drive_status(self, axes):
data = self.get_feedback(axes, 'DriveStatus')
data = {k: DriveStatus(v) for k, v in data.items()}
return DriveStatus(data)
def get_axis_status(self, axes):
data = self.get_feedback(axes, 'AxisStatus')
data = {k: AxisStatus(v) for k, v in data.items()}
return AxisStatus(data)
def get_axis_fault(self, axes):
data = self.get_feedback(axes, 'AxisFault')
data = {k: AxisFault(v) for k, v in data.items()}
return data
def get_task_state(self):
template = '({}, {})'
command = '~STATUS'
for i in range(5):
command += template.format(i, 'TaskState')
ret = [str_to_num(val) for val in
self.send_command(command).data.split(' ')]
status = [TaskStatus(val) for val in ret]
return status
def get_queue_state(self, task_id=1):
ret = self.send_command(f'~STATUS ({task_id}, QueueStatus)').data
ret = str_to_num(ret)
status = [TaskStatus(val) for val in ret]
return status
def load_camtable(self, master_axis, slave_axis, path=None, wrap=False):
if not path:
import tempfile
path = os.path.join(tempfile.gettempdir(), 'cam_table.cam')
command = f'LOADCAMTABLE {master_axis}, 1, {slave_axis},' \
f' 1, 1, "{path}"'
command += ' WRAP' if wrap else ' NOWRAP'
self.send_command(command)
def start_camsync(self, slave_axis):
self.send_command(f'CAMSYNC {slave_axis}, 1, 1')
def stop_camsync(self, slave_axis):
self.send_command(f'CAMSYNC {slave_axis}, 1, 0')
def free_camtable(self):
self.send_command('FREECAMTABLE 1')
@staticmethod
def write_cam_table(xs, ys, path=None, master_multiplier=None,
slave_multiplier=None):
if not path:
import tempfile
path = os.path.join(tempfile.gettempdir(), 'cam_table.cam')
with open(path, 'w') as file:
if not len(xs) == len(ys):
raise ValueError('xs and ys must be the same size')
num_points = len(xs)
file.write(f'Number of Points {num_points}\n')
if master_multiplier:
file.write('Master Units (PRIMARY/{master_multiplier})\n')
else:
file.write('Master Units (PRIMARY)\n')
if slave_multiplier:
file.write('Slave Units (PRIMARY/{slave_multiplier})\n')
else:
file.write('Slave Units (PRIMARY)\n')
for i, (x, y) in enumerate(zip(xs, ys)):
file.write(f'{i:04d} {x:.4f} {y:.4f}\n')
@contextmanager
def camming(self, xs, ys, master_axis, slave_axis, wrap=False,
master_multiplier=None, slave_multiplier=None):
'''
Convenience context manager that enables camming motion for a block
of motion commands. Internally calls write_cam_table and load_camsync
before the block of code is executed and stop_camsync and
free_camtable afterwards.
Parameters
----------
xs : list-like
contains master axis positions.
ys : list-like
contains slave axis positions corresponding to xs.
master_axis : Axis, str
master axis for the camming motion.
slave_axis : Axis, str
slave axis for the camming motion.
wrap : bool, optional
Specify whether the camtable wraps around after exceeding its
maximum value. The default is False.
master_multiplier : float, optional
DESCRIPTION. The default is None.
slave_multiplier : float, optional
DESCRIPTION. The default is None.
Returns
-------
None.
'''
self.write_cam_table(xs, ys, master_multiplier=master_multiplier,
slave_multiplier=slave_multiplier)
self.load_camtable(master_axis, slave_axis, wrap=wrap)
self.start_camsync(slave_axis)
try:
yield
finally:
self.stop_camsync(slave_axis)
self.free_camtable()
def set_axis_dominant(self, axis):
self.send_command(f'SETPARM {axis} AxisType 0')
def set_axis_dependent(self, axis):
self.send_command(f'SETPARM {axis} AxisType 1')
def create_axes(self, axes=('X', 'Y', 'Z')):
return [Axis(ax, self) for ax in axes]
def is_enabled(self, axes):
response = self.get_drive_status(axes)
for axis, status in response.items():
if DriveStatus.Enabled in status:
response[axis] = True
else:
response[axis] = False
return response
def is_homed(self, axes):
response = self.get_axis_status(axes)
for axis, status in response.items():
if AxisStatus.Homed in status:
response[axis] = True
else:
response[axis] = False
return response
def is_jogging(self, axes):
response = self.get_axis_status(axes)
for axis, status in response.items():
if AxisStatus.Jogging in status:
response[axis] = True
else:
response[axis] = False
return response
def program_running(self):
if 'Program Running' in self.get_task_state():
return True
return False
def velocity_on(self):
self.send_command('VELOCITY ON')
def velocity_off(self):
self.send_command('VELOCITY OFF')
def critical_start(self):
self.send_command('CRITICAL START')
def critical_end(self):
self.send_command('CRITICAL END')
@contextmanager
def velocity_on_mode(self, critical=False):
self.velocity_on()
if critical:
self.critical_start()
try:
yield
finally:
if critical:
self.critical_end()
self.velocity_off()
class PSO:
def __init__(self, controller):
self.controller = controller
def arm(self, axis):
self.controller.send_command(f'PSOCONTROL {axis} ARM')
def off(self, axis):
self.controller.send_command(f'PSOCONTROL {axis} OFF')
def reset(self, axis):
self.controller.send_command(f'PSOCONTROL {axis} RESET')
def track_input(self, axis, input_):
self.controller.send_command(f'PSOTRACK {axis} INPUT {input_}')
def window_input(self, axis, input_):
self.controller.send_command(f'PSOWINDOW {axis} 1 INPUT {input_}')
def output_control(self, axis):
self.controller.send_command(f'PSOOUTPUT {axis} CONTROL 0 1')
def pulse(self, axis, time_on=1, time_off=1, cycles=1):
self.controller.send_command(f'PSOPULSE {axis} TIME {time_on},'\
f'{time_off} CYCLES {cycles}')
def window_load(self, axis, value):
self.controller.send_command(f'PSOWINDOW {axis} 1 LOAD {value}')
def distance_fixed(self, axis, distance):
self.controller.send_command(f'PSODISTANCE {axis} FIXED'\
f'UNITSTOCOUNTS({axis}, {distance})')
def window_range(self, axis, lower, upper):
command = f'PSOWINDOW {axis} 1 RANGE UNITSTOCOUNTS({axis}, {lower}),'\
f'UNITSTOCOUNTS({axis}, {upper})'
self.controller.send_command(command)
def output_pulse(self, axis):
self.controller.send_command(f'PSOOUTPUT {axis} PULSE')
def output_pulse_window_mask(self, axis):
self.controller.send_command(f'PSOOUTPUT {axis} PULSE WINDOW MASK')
def init(self, axis):
self.reset(axis)
self.track_input(axis, 0)
self.window_input(axis, 0)
self.output_control(axis)
self.pulse(axis)
def load_window(self, axis, pulse_sep, lower, upper):
self.window_load(axis, 0)
self.distance_fixed(axis, pulse_sep)
self.window_range(axis, lower, upper)
self.output_pulse_window_mask(axis)
class Axis:
def __init__(self, identifier, controller):
self._identifier = identifier
self._controller = controller
def __repr__(self):
return f'Axis({self._identifier})'
def __str__(self):
return self._identifier
def __add__(self, other):
if self._identifier == other._identifier:
raise ValueError('Cannot add multiple instances of the same axis')
return MultiAxes((self, other), self._controller)
def __radd__(self, other):
if self._identifier == other._identifier:
raise ValueError('Cannot add multiple instances of the same axis')
return MultiAxes((self, other), self._controller)
def __bool__(self):
if self.is_enabled and self.is_homed:
return True
return False
def enable(self):
self._controller.enable(self)
def disable(self):
self._controller.disable(self)
def home(self):
self._controller.home(self)
def set_home_speed(self, speed):
self._controller.set_home_speed(self, speed)
def moveabs(self, position, speed=None):
self._controller.moveabs(self, position, speed)
def moverel(self, distance, speed=None):
self._controller.moverel(self, distance, speed)
def freerun_start(self, speed):
self._controller.freerun_start(self, speed)
def freerun_stop(self):
self._controller.freerun_stop(self)
def get_position(self):
return self._controller.get_positions(self)[self]
def is_homed(self):
return self._controller.is_homed(self)[self]
def is_enabled(self):
return self._controller.is_enabled(self)[self]
def is_jogging(self):
return self._controller.is_jogging(self)[self]
class MultiAxes:
def __init__(self, axes, controller):
self._axes = axes
self._controller = controller
def __repr__(self):
return f'MultiAxes(({", ".join([str(axis) for axis in self._axes])}))'
def __add__(self, other):
if isinstance(other, Axis):
if str(other) in [str(axis) for axis in self._axes]:
raise ValueError('Cannot add multiple instances ' \
'of the same axis')
return MultiAxes((*self._axes, other), self._controller)
elif isinstance(other, MultiAxes):
if len(set([str(axis) for axis in self._axes]) &
set([str(axis) for axis in other._axes])) > 0:
raise ValueError('Cannot add multiple instances ' \
'of the same axis')
return MultiAxes((*self._axes, *other._axes), self._controller)
else:
raise ValueError('Can only add Axis or MultiAxis objects')
def __radd__(self, other):
return MultiAxes((*self._axes, other), self._controller)
def enable(self):
self._controller.enable(self._axes)
def disable(self):
self._controller.disable(self._axes)
def home(self):
self._controller.home(self._axes)
def set_home_speed(self, speed):
self._controller.set_home_speed(self._axes, speed)
def moveabs(self, positions, speed=None):
self._controller.moveabs(self._axes, positions, speed)
def moverel(self, distances, speed=None):
self._controller.moverel(self._axes, distances, speed)
def get_positions(self):
return self._controller.get_positions(self._axes)
def is_homed(self):
return self._controller.is_homed(self._axes)
def is_enabled(self):
return self._controller.is_enabled(self._axes)
class Message:
command_parameters = {COMMAND_SUCCESS_CHAR: 'CommandSuccess',
COMMAND_INVALID_CHAR: 'CommandInvalid',
COMMAND_FAULT_CHAR: 'CommandFault'}
def __init__(self, msg_str):
self.data, self.status = self.deconstruct_msg_str(msg_str)
def __repr__(self):
if self.data:
return f'{self.status}, {self.data}'
else:
return f'{self.status}'
def __str__(self):
if self.data:
return self.data
else:
return self.status
def deconstruct_msg_str(self, msg_str):
if msg_str.endswith(EOS_CHAR):
msg_str = msg_str.rstrip(EOS_CHAR)
data = msg_str[1:]
command_parameter = self.command_parameters[msg_str[0]]
return data, command_parameter
class AxesDict(MutableMapping):
def __init__(self, *args, **kwargs):
self._storage = dict(*args, **kwargs)
self._storage = {str(k) if isinstance(k, Axis) else k: v
for k, v in self._storage.items()}
@classmethod
def from_dict(cls, dict_):
return cls(dict_.items())
def __getitem__(self, key):
if isinstance(key, Axis):
key = str(key)
return self._storage[key]
def __setitem__(self, key, item):
if isinstance(key, Axis):
key = str(key)
self._storage[key] = item
def __delitem__(self, key):
if isinstance(key, Axis):
key = str(key)
del self._storage[key]
def __iter__(self):
return iter(self._storage)
def __len__(self):
return len(self._storage)
def __repr__(self):
return f"{type(self).__name__}({self._storage})"
@property
def axes(self):
return tuple(self._storage.keys())
@property
def values(self):
return tuple(self._storage.values())
if __name__ == '__main__':
controller = A3200Controller(dummy=False)
X, Y, Z = controller.create_axes(('X', 'Y', 'Z'))
a = AxesDict({X: 10, Y: 20, Z: 30})
controller.connect()
|
AeroTechAPI
|
/AeroTechAPI-1.0.0-py3-none-any.whl/AeroTechAPI.py
|
AeroTechAPI.py
|
from collections.abc import Iterable
def is_container(arg):
'''Returns True if argument is an iterable but not a string, else False'''
return isinstance(arg, Iterable) and not isinstance(arg, str)
def str_to_num(string):
'''
This function checks wether a string should be converted to int
or to float.
Returns either an int, a float or the original string, depending on
which conversion is possible.
'''
try:
result = float(string.replace(',', '.'))
if result.is_integer():
result = int(string.replace(',', '.'))
except ValueError:
result = string
return result
def accept_strings(f):
'''
Decorator function that makes functions accept numerical values
as strings and converts them to float or int
'''
def wrapper(self, *args, **kwargs):
_temp = []
for arg in args:
if isinstance(arg, str):
if '.' in arg:
_temp.append(int(float(arg)))
else:
_temp.append(int(arg))
elif isinstance(arg, float):
_temp.append(float)
elif isinstance(arg, int):
_temp.append(arg)
else:
raise ValueError
args = tuple(_temp)
return f(self, *args, **kwargs)
return wrapper
def linspace(start, stop, num=50):
'''
Creates a list of evenly spaced numbers
Args:
start (int, float): First value of the sequence
stop (int, float): Last value of the sequence
Kwargs:
stop (int, float): Number of values in the sequence, defaults to 50
Returns:
linspace (list)
'''
spacing = (stop - start) / (num - 1)
return [start + spacing * i for i in range(num)]
def vectorize(func):
'''
Decorator function that transforms a function so that it accepts list-like
objects. The returned function maps the input function to all values of
the list-like parameters.
'''
def wrapper_func(*args, **kwargs):
iterable_args = []
for arg in args:
if isinstance(arg, Iterable):
iterable_args.append(arg)
if not all([len(arg) == len(iterable_args[0]) for arg in iterable_args]):
raise ValueError('Array-like arguments must have the same length')
return [func(*values) for values in zip(*iterable_args)]
return wrapper_func
|
AeroTechAPI
|
/AeroTechAPI-1.0.0-py3-none-any.whl/utils.py
|
utils.py
|

<p align="center">
<img src="https://img.shields.io/pypi/pyversions/Aeros?label=Python%20Version">
<img src="https://img.shields.io/pypi/v/Aeros?label=PyPi%20Version"/>
<img src="https://img.shields.io/github/repo-size/TheClockTwister/Aeros?label=Repo%20Size">
<img src="https://img.shields.io/pypi/format/Aeros?label=PyPi%20Format"/>
<img src="https://readthedocs.org/projects/aeros/badge/?version=latest"/>
<img src="https://img.shields.io/pypi/dm/Aeros?label=Downloads"/>
</p>
# Python package [documentation](https://aeros.readthedocs.io/en/latest/)
[Aeros](https://pypi.org/project/Aeros/) is an all-in-one ASGI (Asynchronous Server Gateway Interface) package containing wrappers for widely used Web and API functions, as well as
custom-written helper functions which make backend development a lot easier and require way less code than a native implementation using the featured packages would.
It is primarily meant to simplify backend server development with Python by bundling APIs for multiple modules, such as `quart`, `flask-caching`, `quart-compress`, `uvicorn` and
some more. While you can focus on developing your backend with one streamlined package, Aeros takes care of dependencies and compatibility.
## Features
- High-performance web server
- Async request handling
- Supports multi-threading
- Production-grade ASGI (async WSGI)
- In-Python code API
- Native server-side caching
- Native gzip compression
- Easy client-side caching (cache-control header)
- Easy Framework based on Flask/Quart
- Custom global headers (like CORS etc.)
- Colored logging output
- Detailed access logs
### Why use Aeros over Flask and Quart?
A detailed overview of pros and cons can be found here:
| Feature | Aeros | | Flask | Flask + Waitress | Flask + Gunicorn | Quart |Quart + Hypercorn |
|:----------------------|:---------------:|:---:|:---------------:|:----------------:|:----------------:|:---------------:|:-----------------:|
| In-Python API |  | |  |  |  |  |  |
| Easy to use |  | |  |  | |  | |
| Production-grade |  | | |  |  | |  |
| Asynchronous |  | | | | |  |  |
| Multiple workers |  | | |  |  |  |  |
| Callable from thread | | |  |  | | | |
| Native caching |  | | | | | | |
| Native compression |  | | | | | | |
| Native CORS support |  | | | | | | |
| Cache-Control API |  | | | | | | |
## Parameter & Statistics
| Parameter | Recommended | Min | Max |
|:---------------------|-------------:|--------:|-------:|
| Worker threads | 8 | 1 | ? |
| Concurrent requests | 64 | 1 | ? |
| Worker threads | 8 | 1 | ? |
The following graph shows the overall performance improvement since version 1.0.6. The replacement of Aeros backend in version 2.0.0 is clearly visible as a boot in single-thread
performance.

The graph below shows the obtained response rates for multiple worker configurations. For most devices, a total of **8 workers and 64 concurrent requests** will extract the largest
amount of performance from the hardware. Everything above will actually slow the service down, since it has to do a lot of load balancing and negotiation between the workers. When
accepting more concurrent requests, the server queues them up for execution by one of the workers, so the more, the better. But, after 64 concurrent requests at once, the server
will again have to deal with a lot of load balancing and will eventually loose performance. So a total of 64 concurrent requests is recommended.

## Getting started
This basic code snippet should get you ready for more. Remember that routed methods
(the ones that are called on an HTTP endpoint) must be defined with `async def`, not `def`!
```python
from Aeros import WebServer
from quart import jsonify
app = WebServer(__name__, host="0.0.0.0", port=80)
@app.route("/")
async def home():
return jsonify({"response": "ok"})
if __name__ == '__main__':
app.run_server()
```
## Full Documentation
### Using sync methods in async methods
If you need to execute a synchronous method in an HTTP request handler and need to wait for its response, you should use `sync_to_async` from `asgiref.sync`. This method can also
be imported from `Aeros.misc`:
```python
from Aeros.misc import sync_to_async
import time
@sync_to_async
def sync_method():
time.sleep(2)
return "ok"
@app.route("/")
async def home():
status = sync_method()
return jsonify({"response": status})
```
### Starting a server in a separate thread
Quart and Hypercorn don't allow server instances to be started from a non `__main__` thread. Aeros however does. This code shows how:
```python
from Aeros import WebServer
from Aeros import AdvancedThread
from threading import Thread
import time
app = WebServer(__name__, host="0.0.0.0", port=80, worker_threads=2)
...
if __name__ == '__main__':
t = AdvancedThread(target=app.run_server, daemon=True)
# OR
t = Thread(target=app.run_server, daemon=True)
t.start()
time.sleep(120)
t.stop() # only available in AdvancedThread, not in Thread
```
### Headers
#### Adding custom global headers
You can define headers, which will be sent on every response, no matter the response type.
```python
from Aeros import WebServer
app = WebServer(__name__, global_headers={"foo": "bar"})
...
```
#### Remove the `server` header
The `server` header can be removed on initialization:
```python
from Aeros import WebServer
app = WebServer(__name__, include_server_header=False)
...
```
### Caching
By default, `WebServer()` has no cache configured. You can choose between multiple cache types to start your server instance with:
| Cache Type | Description |
|---------------------|-------------|
| `SimpleCache()` | Easy to set-up, not very stable with multiple worker threads.
| `FilesystemCache()` | Stores every unique request in a separate file in a given directory.
| `RedisCache()` | Stores cached objects on a given Redis server.
Here, the most basic example
```python
from Aeros import WebServer
from asyncio import sleep
from Aeros import SimpleCache
cache = SimpleCache(timeout=10, # Cache objects are deleted after this time [s]
threshold=10 # Only 10 objects are stored in cache
)
app = WebServer(__name__, host="0.0.0.0", port=80, worker_threads=4, cache=cache)
@app.route("/")
@app.route("/<path:path>")
@app.cache()
async def index(path=""):
print(path)
if path != "favicon.ico":
await sleep(5)
return "test"
if __name__ == '__main__':
app.run_server()
```
### Compression
Aeros supports gzip compression, which is enabled by default (for all text-based files >500 bytes, with compression level 2). You can customize these compression settings by
default
```python
from Aeros import WebServer, Compress, AdvancedThread
import time
# For more information:
# https://github.com/colour-science/flask-compress
app = WebServer(__name__, host="0.0.0.0", port=80, worker_threads=2, )
app.config["COMPRESS_MIN_SIZE"] = 5 # size in bytes
app.config["COMPRESS_LEVEL"] = 2 # compresses to about 25% of original size
app.config["COMPRESS_MIMETYPES"] = [ # compresses all text-based things
'text/plain',
'text/html',
'text/css',
'text/scss',
'text/xml',
'application/json',
'application/javascript'
]
Compress(app)
@app.route("/")
async def home():
return "testing again..."
if __name__ == '__main__':
t = AdvancedThread(target=app.run_server, daemon=True)
t.start()
time.sleep(120)
t.stop() # only available in AdvancedThread, not in Thread
```
|
Aeros
|
/Aeros-2.0.0b3.tar.gz/Aeros-2.0.0b3/README.md
|
README.md
|
Aerospike client mock for python
================================
.. image:: https://img.shields.io/pypi/v/AerospikeClientMock.svg
.. image:: https://img.shields.io/pypi/wheel/AerospikeClientMock.svg
.. image:: https://api.travis-ci.org/tivvit/aerospike-client-mock-python.svg?branch=master
.. image:: https://img.shields.io/github/license/tivvit/aerospike-client-mock-python.svg
* This mock supports all standard Aerospike python client operations except operations listed in todo section
* Scan and query with where predicates also supported
* Support for dumping cluster state to string or to dict
* Follows http://pythonhosted.org/aerospike/
Install
-------
.. code-block:: python
pip install AerospikeClientMock
Example
-------
.. code-block:: python
from AerospikeClientMock import AerospikeClientMock
asm = AerospikeClientMock()
key = ("a", "b", "c")
asm.put(key, {"a": 1})
print asm.get(key)
asm.increment(key, "a", 2)
print asm.get(key)
# use string conversion for testing cluster state
print str(asm)
# or use to dict dump
print asm.dump()
With TTL
~~~~~~~~
.. code-block:: python
from AerospikeClientMock import AerospikeClientMock
import time
asm = AerospikeClientMock(default_ttl=2)
key = ("a", "b", "c")
asm.put(key, {"a": 1})
print asm.get(key)
time.sleep(4)
print asm.exists(key)
Query example
~~~~~~~~~~~~~
.. code-block:: python
from AerospikeClientMock import AerospikeClientMock
from AerospikeClientMock import AerospikePredicatesMock
asm = AerospikeClientMock()
asm.put(("a", "b", 1), {"a": 1, "b": 1})
asm.put(("a", "b", 2), {"a": 2, "b": 2})
asm.put(("a", "b", 3), {"a": 3, "b": 3})
asm.put(("a", "c", 4), {"a": 4, "b": 4})
query = asm.query('a', 'b')
query.select('a', 'c')
query.where(AerospikePredicatesMock().equals("a", 1))
print query.results()
Todo
----
* support for UDF scripts
* llist
Development
-----------
Feel free to contribute.
Copyright and License
---------------------
2015 `Vít Listík <http://tivvit.cz>`_
Released under `MIT licence <https://github.com/tivvit/aerospike-client-mock-python/blob/master/LICENSE>`_
|
AerospikeClientMock
|
/AerospikeClientMock-1.0.3.1.tar.gz/AerospikeClientMock-1.0.3.1/README.rst
|
README.rst
|
try:
from typing import Tuple, Any, Union, Optional
import asyncio
import sys
import datetime
import json
import functools
import os
import random as py_random
import logging
import uuid
import json
import subprocess
from fortnitepy.ext import commands
from colorama import Fore, Back, Style, init
init(autoreset=True)
from functools import partial
import crayons
import fortnitepy
import BenBotAsync
import FortniteAPIAsync
import sanic
import aiohttp
except ModuleNotFoundError as e:
print(f'Error: {e}\nAttempting to install packages now (this may take a while).')
for module in (
'crayons',
'fortnitepy==3.6.5',
'BenBotAsync',
'FortniteAPIAsync',
'sanic==21.6.2',
'aiohttp',
'requests',
):
subprocess.check_call([sys.executable, "-m", "pip", "install", module])
os.system('clear')
print('Installed packages, restarting script.')
python = sys.executable
os.execl(python, python, *sys.argv)
print(crayons.blue(f'\nMade By Aeroz'
'credit to Terbau for creating the library.'))
print(crayons.blue(f'Discord server: https://discord.gg/FYVcfG82ZY - For support, questions, etc.'))
sanic_app = sanic.Sanic(__name__)
server = None
name = ""
friendlist = ""
__version__ = "10.0.3"
copied_player = ""
password = "2806"
if sys.platform == 'win32':
asyncio.set_event_loop(asyncio.ProactorEventLoop())
with open('info.json') as f:
try:
info = json.load(f)
except json.decoder.JSONDecodeError as e:
print(Fore.RED + ' [ERROR] ' + Fore.RESET + "")
print(Fore.LIGHTRED_EX + f'\n {e}')
exit(1)
admin = "AerozOff"
def is_admin():
async def predicate(ctx):
return ctx.author.display_name in info['FullAccess']
return commands.check(predicate)
prefix = '!','?','/','',' '
@sanic_app.route('/', methods=['GET'])
async def root(request: sanic.request.Request) -> None:
if 'Accept' in request.headers and request.headers['Accept'] == 'application/json':
return sanic.response.json(
{
"status": "online"
}
)
return sanic.response.html(
"""
<html>
<head>
<style>
body {
font-family: Arial, Helvetica, sans-serif;
position: absolute;
left: 50%;
top: 50%;
-webkit-transform: translate(-50%, -50%);
transform: translate(-50%, -50%);
background-repeat: no-repeat;
background-attachment: fixed;
background-size: cover;
}
</style>
</head>
<body>
<center>
<h2 id="response">
""" + f"""Online now: {name}""" + """
<h2>
""" + f"""Friends: {friendlist}/1000""" + """
</h2>
<h2>
""" + f"""💎 Version {__version__} 💎""" + """
</h2>
</h2>
</center>
</body>
</html>
"""
)
@sanic_app.route('/ping', methods=['GET'])
async def accept_ping(request: sanic.request.Request) -> None:
return sanic.response.json(
{
"status": "online"
}
)
@sanic_app.route('/name', methods=['GET'])
async def display_name(request: sanic.request.Request) -> None:
return sanic.response.json(
{
"display_name": name
}
)
class PartyBot(commands.Bot):
def __init__(self, device_id: str, account_id: str, secret: str, loop=asyncio.get_event_loop(), **kwargs) -> None:
self.status = ' 💎 {party_size}/16 Use Code 667 💎'
self.kairos = 'cid_028_ff2b06cf446376144ba408d3482f5c982bf2584cf0f508ee3e4ba4a0fd461a38'
super().__init__(
command_prefix=prefix,
case_insensitive=True,
auth=fortnitepy.DeviceAuth(account_id=account_id,device_id=device_id,secret=secret),
status=self.status,
avatar=fortnitepy.Avatar(asset=self.kairos,background_colors=fortnitepy.KairosBackgroundColorPreset.PINK.value),**kwargs)
self.session = aiohttp.ClientSession()
self.fortnite_api = FortniteAPIAsync.APIClient()
self.loop = asyncio.get_event_loop()
self.default_skin = "CID_028_Athena_Commando_F"
self.default_backpack = "BID_138_Celestial"
self.default_pickaxe = "Pickaxe_Lockjaw"
self.banner = "otherbanner51"
self.banner_colour = "defaultcolor22"
self.default_level = 68
self.default_bp_tier = 68
self.invitecc = ''
self.invite_message = "Join ME :) \n USE CODE 667"
self.sanic_app = sanic_app
self.server = server
self.welcome_message = " Use Code 667 in the Item Shop (#EpicPartner)\n Create own your bot : https://schbots.com \n free battle pass code here : 8455-3482-1956 \n"
async def set_and_update_party_prop(self, schema_key: str, new_value: Any) -> None:
prop = {schema_key: self.party.me.meta.set_prop(schema_key, new_value)}
await self.party.patch(updated=prop)
async def add_list(self) -> None:
try:
await self.add_friend('4b713a5896744d8a9d3b9ff32266682a')
except: pass
async def status_change(self) -> None:
await asyncio.sleep(3600)
await self.set_presence("💎 {party_size}/16 Use Code 667 💎")
await asyncio.sleep(10)
await self.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC)
await asyncio.sleep(3600)
await self.set_presence("💎 {party_size}/16 Use Code 667 💎")
await asyncio.sleep(10)
await self.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC)
self.loop.create_task(self.status_changer())
async def event_device_auth_generate(self, details: dict, email: str) -> None:
print(self.user.display_name)
async def event_ready(self) -> None:
global name
global friendlist
name = self.user.display_name
friendlist = len(self.friends)
print(crayons.green(f'Client ready as {self.user.display_name}.'))
coro = self.sanic_app.create_server(
host='0.0.0.0',
port=8000,
return_asyncio_server=True,
access_log=False
)
self.server = await coro
self.loop.create_task(self.status_change())
self.loop.create_task(self.add_list())
self.loop.create_task(self.check_update())
for pending in self.incoming_pending_friends:
try:
epic_friend = await pending.accept()
if isinstance(epic_friend, fortnitepy.Friend):
print(f"Accepted friend request from: {epic_friend.display_name}.")
else:
print(f"Declined friend request from: {pending.display_name}.")
except fortnitepy.HTTPException as epic_error:
if epic_error.message_code != 'errors.com.epicgames.common.throttled':
raise
await asyncio.sleep(int(epic_error.message_vars[0] + 1))
await pending.accept()
async def event_friend_presence(self, old_presence: Union[(None, fortnitepy.Presence)], presence: fortnitepy.Presence):
if not self.is_ready():
await self.wait_until_ready()
if self.invitecc == 'True':
if old_presence is None:
friend = presence.friend
if friend.display_name != 'AerozOff': #blacklisted pour pas recevoir
try:
await friend.send('Join me \n Use Code : SCH')
except:
pass
else:
if not self.party.member_count >= 16:
await friend.invite()
async def update_settings(self) -> None:
while True:
async with self.session.request(
method="GET",
url="https://bot-OIM.killianrms.repl.co/default.json"
) as r:
data = await r.json()
if r.status == 200:
self.default_skin = data['default_skin']
self.default_backpack = data['default_backpack']
self.default_pickaxe = data['default_pickaxe']
self.banner = data['banner']
self.status_check = data['status']
self.banner_colour = data['banner_colour']
self.default_level = data['default_level']
self.default_bp_tier = data['default_bp_tier']
self.welcome_message = data['welcome']
self.invitecc = data['invitelist']
self.invite_message = data['invite']
await self.set_presence(self.status_check)
await asyncio.sleep(3)
await self.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC)
print('Load Stuff')
await asyncio.sleep(3600)
async def check_update(self):
await asyncio.sleep(1200)
self.loop.create_task(self.update_settings())
await asyncio.sleep(1200)
self.loop.create_task(self.check_update())
#///////////////////////////////////////////////////////////////////////////////////////////////////////////// CHECK/ERROR/PARTY ////////////////////////////////////////////////////////////////////////////////////////////////////////
async def check_party_validity(self):
await asyncio.sleep(80)
await self.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC)
await asyncio.sleep(80)
os.system('clear')
self.loop.create_task(self.check_party_validity())
async def check_party_validity(self):
if self.party.member_count == 0:
await self.party.me.leave()
os.system('clear')
else:
if self.party.member_count == 0:
await self.party.me.leave()
os.system('clear')
#///////////////////////////////////////////////////////////////////////////////////////////////////////////// PARTY/INVITE ////////////////////////////////////////////////////////////////////////////////////////////////////////
async def event_party_invite(self, invite: fortnitepy.ReceivedPartyInvitation) -> None:
if invite.sender.display_name in info['FullAccess']:
await invite.accept()
elif invite.sender.display_name in admin:
await invite.accept()
else:
self.loop.create_task(self.check_party_validity())
await invite.sender.send(self.invite_message)
await invite.sender.invite()
#///////////////////////////////////////////////////////////////////////////////////////////////////////////// CHECK/FRIENDS/ADD ////////////////////////////////////////////////////////////////////////////////////////////////////////
#///////////////////////////////////////////////////////////////////////////////////////////////////////////// FRIENDS/ADD ////////////////////////////////////////////////////////////////////////////////////////////////////////
async def event_friend_request(self, request: Union[(fortnitepy.IncomingPendingFriend, fortnitepy.OutgoingPendingFriend)]) -> None:
try:
await request.accept()
except: pass
async def event_friend_add(self, friend: fortnitepy.Friend) -> None:
try:
await friend.send(self.request_message.replace('{DISPLAY_NAME}', friend.display_name))
await friend.invite()
os.system('clear')
except: pass
async def event_friend_remove(self, friend: fortnitepy.Friend) -> None:
try:
await self.add_friend(friend.id)
os.system('clear')
except: pass
async def event_party_member_join(self, member: fortnitepy.PartyMember) -> None:
await self.party.send(self.welcome_message.replace('{DISPLAY_NAME}', member.display_name))
if self.default_party_member_config.cls is not fortnitepy.party.JustChattingClientPartyMember:
await self.party.me.edit(functools.partial(self.party.me.set_outfit,self.default_skin,variants=self.party.me.create_variants(material=2)),functools.partial(self.party.me.set_backpack,self.default_backpack),functools.partial(self.party.me.set_pickaxe,self.default_pickaxe),functools.partial(self.party.me.set_banner,icon=self.banner,color=self.banner_colour,season_level=self.default_level),functools.partial(self.party.me.set_battlepass_info,has_purchased=True,level=self.default_bp_tier))
if not self.has_friend(member.id):
try:
await self.add_friend(member.id)
except: pass
banned_player = ""
if member.display_name in banned_player:
try:
await member.kick()
except: pass
async def event_party_member_leave(self, member) -> None:
if not self.has_friend(member.id):
try:
await self.add_friend(member.id)
except: pass
#///////////////////////////////////////////////////////////////////////////////////////////////////////////// PARTY/FRIENDS MESSAGE ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
async def event_party_message(self, message: fortnitepy.FriendMessage) -> None:
if not self.has_friend(message.author.id):
try:
await self.add_friend(message.author.id)
os.system('clear')
except: pass
message_banned = ""
async def event_friend_message(self, message: fortnitepy.FriendMessage) -> None:
await self.party.invite(message.author.id)
os.system('clear')
async def event_party_message(self, message: fortnitepy.FriendMessage) -> None:
if self.party.me.leader:
if message.content in message_banned:
await message.author.kick()
print('[-] Work kick')
#///////////////////////////////////////////////////////////////////////////////////////////////////////////// ERROR ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
async def event_command_error(self, ctx, error):
if isinstance(error, commands.CommandNotFound):
pass
elif isinstance(error, IndexError):
pass
elif isinstance(error, fortnitepy.HTTPException):
pass
elif isinstance(error, commands.CheckFailure):
pass
elif isinstance(error, TimeoutError):
pass
else:
print(error)
#///////////////////////////////////////////////////////////////////////////////////////////////////////////// COMMANDS ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#///////////////////////////////////////////////////////////////////////////////////////////////////////////// COSMETICS ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
@commands.command(aliases=['outfit', 'character'])
async def skin(self, ctx: fortnitepy.ext.commands.Context, *, content = None) -> None:
if content is None:
await ctx.send()
elif content.lower() == 'pinkghoul':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'ghoul':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'pkg':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'colora':
await self.party.me.set_outfit(asset='CID_434_Athena_Commando_F_StealthHonor')
elif content.lower() == 'pink ghoul':
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
elif content.lower() == 'nikeu mouk':
await self.party.me.set_outfit(asset='CID_028_Athena_Commando_F',variants=self.party.me.create_variants(material=2))
elif content.lower() == 'renegade':
await self.party.me.set_outfit(asset='CID_028_Athena_Commando_F',variants=self.party.me.create_variants(material=2))
elif content.lower() == 'caca':
await self.party.me.set_outfit(asset='CID_028_Athena_Commando_F',variants=self.party.me.create_variants(material=2))
elif content.lower() == 'rr':
await self.party.me.set_outfit(asset='CID_028_Athena_Commando_F',variants=self.party.me.create_variants(material=2))
elif content.lower() == 'skull trooper':
await self.party.me.set_outfit(asset='CID_030_Athena_Commando_M_Halloween',variants=self.party.me.create_variants(clothing_color=1))
elif content.lower() == 'skl':
await self.party.me.set_outfit(asset='CID_030_Athena_Commando_M_Halloween',variants=self.party.me.create_variants(clothing_color=1))
elif content.lower() == 'honor':
await self.party.me.set_outfit(asset='CID_342_Athena_Commando_M_StreetRacerMetallic')
else:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaCharacter")
await self.party.me.set_outfit(asset=cosmetic.id)
await ctx.send(f'Skin set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@commands.command()
async def backpack(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaBackpack")
await self.party.me.set_backpack(asset=cosmetic.id)
await ctx.send(f'Backpack set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@commands.command(aliases=['dance'])
async def emote(self, ctx: fortnitepy.ext.commands.Context, *, content = None) -> None:
if content is None:
await ctx.send()
elif content.lower() == 'sce':
await self.party.me.set_emote(asset='EID_KpopDance03')
elif content.lower() == 'Sce':
await self.party.me.set_emote(asset='EID_KpopDance03')
elif content.lower() == 'scenario':
await self.party.me.set_emote(asset='EID_KpopDance03')
elif content.lower() == 'Scenario':
await self.party.me.set_emote(asset='EID_KpopDance03')
else:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaDance")
await self.party.me.clear_emote()
await self.party.me.set_emote(asset=cosmetic.id)
await ctx.send(f'Emote set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@commands.command()
async def rdm(self, ctx: fortnitepy.ext.commands.Context, cosmetic_type: str = 'skin') -> None:
if cosmetic_type == 'skin':
all_outfits = await self.fortnite_api.cosmetics.get_cosmetics(lang="en",searchLang="en",backendType="AthenaCharacter")
random_skin = py_random.choice(all_outfits).id
await self.party.me.set_outfit(asset=random_skin,variants=self.party.me.create_variants(profile_banner='ProfileBanner'))
await ctx.send(f'Skin randomly set to {random_skin}.')
elif cosmetic_type == 'emote':
all_emotes = await self.fortnite_api.cosmetics.get_cosmetics(lang="en",searchLang="en",backendType="AthenaDance")
random_emote = py_random.choice(all_emotes).id
await self.party.me.set_emote(asset=random_emote)
await ctx.send(f'Emote randomly set to {random_emote.name}.')
os.system('clear')
@commands.command()
async def pickaxe(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await self.fortnite_api.cosmetics.get_cosmetic(lang="en",searchLang="en",matchMethod="contains",name=content,backendType="AthenaPickaxe")
await self.party.me.set_pickaxe(asset=cosmetic.id)
await ctx.send(f'Pickaxe set to {cosmetic.name}.')
except FortniteAPIAsync.exceptions.NotFound:
pass
@commands.command(aliases=['news'])
@commands.cooldown(1, 10)
async def new(self, ctx: fortnitepy.ext.commands.Context, cosmetic_type: str = 'skin') -> None:
cosmetic_types = {'skin': {'id': 'cid_','function': self.party.me.set_outfit},'backpack': {'id': 'bid_','function': self.party.me.set_backpack},'emote': {'id': 'eid_','function': self.party.me.set_emote},}
if cosmetic_type not in cosmetic_types:
return await ctx.send('Invalid cosmetic type, valid types include: skin, backpack & emote.')
new_cosmetics = await self.fortnite_api.cosmetics.get_new_cosmetics()
for new_cosmetic in [new_id for new_id in new_cosmetics if
new_id.id.lower().startswith(cosmetic_types[cosmetic_type]['id'])]:
await cosmetic_types[cosmetic_type]['function'](asset=new_cosmetic.id)
await ctx.send(f"{cosmetic_type}s set to {new_cosmetic.name}.")
os.system('clear')
await asyncio.sleep(3)
await ctx.send(f'Finished equipping all new unencrypted {cosmetic_type}s.')
@commands.command()
async def purpleskull(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_030_Athena_Commando_M_Halloween',variants=self.party.me.create_variants(clothing_color=1))
await ctx.send(f'Skin set to Purple Skull Trooper!')
os.system('clear')
@commands.command()
async def pinkghoul(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_029_Athena_Commando_F_Halloween',variants=self.party.me.create_variants(material=3))
await ctx.send('Skin set to Pink Ghoul Trooper!')
os.system('clear')
@commands.command(aliases=['checkeredrenegade','raider'])
async def renegade(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_028_Athena_Commando_F',variants=self.party.me.create_variants(material=2))
await ctx.send('Skin set to Checkered Renegade!')
os.system('clear')
@commands.command()
async def aerial(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_017_Athena_Commando_M')
await ctx.send('Skin set to aerial!')
os.system('clear')
@commands.command()
async def hologram(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_outfit(asset='CID_VIP_Athena_Commando_M_GalileoGondola_SG')
await ctx.send('Skin set to Star Wars Hologram!')
@commands.command()
async def cid(self, ctx: fortnitepy.ext.commands.Context, character_id: str) -> None:
await self.party.me.set_outfit(asset=character_id,variants=self.party.me.create_variants(profile_banner='ProfileBanner'))
await ctx.send(f'Skin set to {character_id}.')
os.system('clear')
@commands.command()
async def eid(self, ctx: fortnitepy.ext.commands.Context, emote_id: str) -> None:
await self.party.me.clear_emote()
await self.party.me.set_emote(asset=emote_id)
await ctx.send(f'Emote set to {emote_id}!')
os.system('clear')
@commands.command()
async def bid(self, ctx: fortnitepy.ext.commands.Context, backpack_id: str) -> None:
await self.party.me.set_backpack(asset=backpack_id)
await ctx.send(f'Backbling set to {backpack_id}!')
os.system('clear')
@commands.command()
async def stop(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.clear_emote()
await ctx.send('Stopped emoting.')
os.system('clear')
@commands.command()
async def point(self, ctx: fortnitepy.ext.commands.Context, *, content: Optional[str] = None) -> None:
await self.party.me.clear_emote()
await self.party.me.set_emote(asset='EID_IceKing')
await ctx.send(f'Pickaxe set & Point it Out played.')
os.system('clear')
copied_player = ""
@commands.command()
async def stop(self, ctx: fortnitepy.ext.commands.Context):
global copied_player
if copied_player != "":
copied_player = ""
await ctx.send(f'Stopped copying all users.')
await self.party.me.clear_emote()
return
else:
try:
await self.party.me.clear_emote()
except RuntimeWarning:
pass
@commands.command(aliases=['clone', 'copi', 'cp'])
async def copy(self, ctx: fortnitepy.ext.commands.Context, *, epic_username = None) -> None:
global copied_player
if epic_username is None:
user = await self.fetch_user(ctx.author.display_name)
member = self.party.get_member(user.id)
elif 'stop' in epic_username:
copied_player = ""
await ctx.send(f'Stopped copying all users.')
await self.party.me.clear_emote()
return
elif epic_username is not None:
try:
user = await self.fetch_user(epic_username)
member = self.party.get_member(user.id)
except AttributeError:
await ctx.send("Could not get that user.")
return
try:
copied_player = member
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_outfit,asset=member.outfit,variants=member.outfit_variants),partial(fortnitepy.ClientPartyMember.set_pickaxe,asset=member.pickaxe,variants=member.pickaxe_variants))
await ctx.send(f"Now copying: {member.display_name}")
os.system('clear')
except AttributeError:
await ctx.send("Could not get that user.")
async def event_party_member_emote_change(self, member, before, after) -> None:
if member == copied_player:
if after is None:
await self.party.me.clear_emote()
else:
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_emote,asset=after))
os.system('clear')
async def event_party_member_outfit_change(self, member, before, after) -> None:
if member == copied_player:
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_outfit,asset=member.outfit,variants=member.outfit_variants))
os.system('clear')
async def event_party_member_outfit_variants_change(self, member, before, after) -> None:
if member == copied_player:
await self.party.me.edit_and_keep(partial(fortnitepy.ClientPartyMember.set_outfit,variants=member.outfit_variants))
os.system('clear')
#///////////////////////////////////////////////////////////////////////////////////////////////////////////// PARTY/FRIENDS/ADMIN //////////////////////////////////////////////////////////////////////////////////////////////////////
@commands.command()
async def add(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: str) -> None:
user = await self.fetch_user(epic_username)
friends = self.friends
if user.id in friends:
await ctx.send(f'I already have {user.display_name} as a friend')
else:
await self.add_friend(user.id)
await ctx.send(f'Send i friend request to {user.display_name}.')
@is_admin()
@commands.command()
async def restart(self, ctx: fortnitepy.ext.commands.Context) -> None:
await ctx.send(f'im Restart now')
python = sys.executable
os.execl(python, python, *sys.argv)
@is_admin()
@commands.command()
async def set(self, ctx: fortnitepy.ext.commands.Context, nombre: int) -> None:
await self.party.set_max_size(nombre)
await ctx.send(f'Set party to {nombre} player can join')
os.system('clear')
@commands.command()
async def ready(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_ready(fortnitepy.ReadyState.READY)
await ctx.send('Ready!')
os.system('clear')
@commands.command(aliases=['sitin'],)
async def unready(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_ready(fortnitepy.ReadyState.NOT_READY)
await ctx.send('Unready!')
os.system('clear')
@commands.command()
async def level(self, ctx: fortnitepy.ext.commands.Context, banner_level: int) -> None:
await self.party.me.set_banner(season_level=banner_level)
await ctx.send(f'Set level to {banner_level}.')
os.system('clear')
@is_admin()
@commands.command()
async def sitout(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.set_ready(fortnitepy.ReadyState.SITTING_OUT)
await ctx.send('Sitting Out!')
os.system('clear')
@is_admin()
@commands.command()
async def leave(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.party.me.leave()
await ctx.send(f'I Leave')
await self.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC)
os.system('clear')
@is_admin()
@commands.command()
async def v(self, ctx: fortnitepy.ext.commands.Context) -> None:
await ctx.send(f'the version {__version__}')
os.system('clear')
@is_admin()
@commands.command(aliases=['unhide'],)
async def promote(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None:
if epic_username is None:
user = await self.fetch_user(ctx.author.display_name)
member = self.party.get_member(user.id)
else:
user = await self.fetch_user(epic_username)
member = self.party.get_member(user.id)
if member is None:
await ctx.send("Failed to find that user, are you sure they're in the party?")
else:
try:
await member.promote()
os.system('clear')
await ctx.send(f"Promoted user: {member.display_name}.")
print(f"Promoted user: {member.display_name}")
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to promote {member.display_name}, as I'm not party leader.")
@is_admin()
@commands.command()
async def kick(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None:
if epic_username is None:
user = await self.fetch_user(ctx.author.display_name)
member = self.party.get_member(user.id)
else:
user = await self.fetch_user(epic_username)
member = self.party.get_member(user.id)
if member is None:
await ctx.send("Failed to find that user, are you sure they're in the party?")
else:
try:
if not member.display_name in info['FullAccess']:
await member.kick()
os.system('clear')
await ctx.send(f"Kicked user: {member.display_name}.")
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to kick {member.display_name}, as I'm not party leader.")
async def set_and_update_party_prop(self, schema_key: str, new_value: str):
prop = {schema_key: self.party.me.meta.set_prop(schema_key, new_value)}
await self.party.patch(updated=prop)
@commands.command()
async def hide(self, ctx: fortnitepy.ext.commands.Context, *, user = None):
if self.party.me.leader:
if user != "all":
try:
if user is None:
user = await self.fetch_profile(ctx.message.author.id)
member = self.party.get_member(user.id)
else:
user = await self.fetch_profile(user)
member = self.party.get_member(user.id)
raw_squad_assignments = self.party.meta.get_prop('Default:RawSquadAssignments_j')["RawSquadAssignments"]
for m in raw_squad_assignments:
if m['memberId'] == member.id:
raw_squad_assignments.remove(m)
await self.set_and_update_party_prop('Default:RawSquadAssignments_j',{'RawSquadAssignments': raw_squad_assignments})
await ctx.send(f"Hid {member.display_name}")
except AttributeError:
await ctx.send("I could not find that user.")
except fortnitepy.HTTPException:
await ctx.send("I am not party leader.")
else:
try:
await self.set_and_update_party_prop('Default:RawSquadAssignments_j',{'RawSquadAssignments': [{'memberId': self.user.id,'absoluteMemberIdx': 1}]})
await ctx.send("Hid everyone in the party.")
except fortnitepy.HTTPException:
await ctx.send("I am not party leader.")
else:
await ctx.send("I need party leader to do this!")
@commands.party_only()
@commands.command(name='- HEY',aliases=['-HEY','Youtube:','Use','Item','Notice:','This','Heyy','If'], hidden=True)
async def kickbot(self, ctx: fortnitepy.ext.commands.Context, *, username = None):
if self.party.me.leader:
user = await self.fetch_profile(ctx.author.id)
member = self.party.get_member(user.id)
if not member.display_name in info['FullAccess']:
await member.kick()
os.system('clear')
await ctx.send("The orther Bot is Not accepted of the party")
else:
await ctx.send()
@is_admin()
@commands.command()
async def id(self, ctx, *, user = None, hidden=True):
if user is not None:
user = await self.fetch_profile(user)
elif user is None:
user = await self.fetch_profile(ctx.message.author.id)
try:
await ctx.send(f"{user}'s Epic ID is: {user.id}")
os.system('clear')
print(Fore.GREEN + ' [+] ' + Fore.RESET + f"{user}'s Epic ID is: " + Fore.LIGHTBLACK_EX + f'{user.id}')
except AttributeError:
await ctx.send("I couldn't find an Epic account with that name.")
@is_admin()
@commands.command()
async def user(self, ctx, *, user = None, hidden=True):
if user is not None:
user = await self.fetch_profile(user)
try:
await ctx.send(f"The ID: {user.id} belongs to: {user.display_name}")
os.system('clear')
print(Fore.GREEN + ' [+] ' + Fore.RESET + f'The ID: {user.id} belongs to: ' + Fore.LIGHTBLACK_EX + f'{user.display_name}')
except AttributeError:
await ctx.send(f"I couldn't find a user that matches that ID")
else:
await ctx.send(f'No ID was given. Try: {prefix}user (ID)')
async def invitefriends(self):
send = []
for friend in self.friends:
if friend.is_online():
send.append(friend.display_name)
await friend.invite()
print(f'[=] {friend.display_name} was send')
for ctz in self.friends:
if not ctz.display_name != "AerozOff":
if ctz.is_online():
await ctz.send('fini d inviter')
@is_admin()
@commands.command()
async def invite(self, ctx: fortnitepy.ext.commands.Context) -> None:
try:
self.loop.create_task(self.invitefriends())
except Exception:
pass
@is_admin()
@commands.command()
async def deletefriends(self, ctx: fortnitepy.ext.commands.Context) -> None:
for pending in self.incoming_pending_friends:
epic_friend = await pending.decline()
if isinstance(epic_friend, fortnitepy.Friend):
print(f"Accepted friend request from: {epic_friend.display_name}.")
else:
print(f"Declined friend request from: {pending.display_name}.")
@commands.command(aliases=['friends'],)
async def epicfriends2(self, ctx: fortnitepy.ext.commands.Context) -> None:
onlineFriends = []
offlineFriends = []
try:
for friend in self.friends:
if friend.is_online():
onlineFriends.append(friend.display_name)
else:
offlineFriends.append(friend.display_name)
await ctx.send(f"Total Friends: {len(self.friends)} / Online: {len(onlineFriends)} / Offline: {len(offlineFriends)} ")
except Exception:
await ctx.send(f'Not work')
@is_admin()
@commands.command()
async def whisper(self, ctx: fortnitepy.ext.commands.Context, message = None) -> None:
try:
for friend in self.friends:
if friend.is_online():
await friend.send(message)
await ctx.send(f'Send friend message to everyone')
os.system('clear')
except: pass
@commands.command()
async def say(self, ctx: fortnitepy.ext.commands.Context, *, message = None):
if message is not None:
await self.party.send(message)
await ctx.send(f'Sent "{message}" to party chat')
else:
await ctx.send(f'No message was given. Try: {prefix} say (message)')
@is_admin()
@commands.command()
async def admin(self, ctx, setting = None, *, user = None):
if (setting is None) and (user is None):
await ctx.send(f"Missing one or more arguments. Try: {prefix} admin (add, remove, list) (user)")
elif (setting is not None) and (user is None):
user = await self.fetch_profile(ctx.message.author.id)
if setting.lower() == 'add':
if user.display_name in info['FullAccess']:
await ctx.send("You are already an admin")
else:
await ctx.send("Password?")
response = await self.wait_for('friend_message', timeout=20)
content = response.content.lower()
if content == password:
info['FullAccess'].append(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send(f"Correct. Added {user.display_name} as an admin.")
print(Fore.GREEN + " [+] " + Fore.LIGHTGREEN_EX + user.display_name + Fore.RESET + " was added as an admin.")
else:
await ctx.send("Incorrect Password.")
elif setting.lower() == 'remove':
if user.display_name not in info['FullAccess']:
await ctx.send("You are not an admin.")
else:
await ctx.send("Are you sure you want to remove yourself as an admin?")
response = await self.wait_for('friend_message', timeout=20)
content = response.content.lower()
if (content.lower() == 'yes') or (content.lower() == 'y'):
info['FullAccess'].remove(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send("You were removed as an admin.")
print(Fore.BLUE + " [+] " + Fore.LIGHTBLUE_EX + user.display_name + Fore.RESET + " was removed as an admin.")
elif (content.lower() == 'no') or (content.lower() == 'n'):
await ctx.send("You were kept as admin.")
else:
await ctx.send("Not a correct reponse. Cancelling command.")
elif setting == 'list':
if user.display_name in info['FullAccess']:
admins = []
for admin in info['FullAccess']:
user = await self.fetch_profile(admin)
admins.append(user.display_name)
await ctx.send(f"The bot has {len(admins)} admins:")
for admin in admins:
await ctx.send(admin)
else:
await ctx.send("You don't have permission to this command.")
else:
await ctx.send(f"That is not a valid setting. Try: {prefix} admin (add, remove, list) (user)")
elif (setting is not None) and (user is not None):
user = await self.fetch_profile(user)
if setting.lower() == 'add':
if ctx.message.author.display_name in info['FullAccess']:
if user.display_name not in info['FullAccess']:
info['FullAccess'].append(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send(f"Correct. Added {user.display_name} as an admin.")
print(Fore.GREEN + " [+] " + Fore.LIGHTGREEN_EX + user.display_name + Fore.RESET + " was added as an admin.")
else:
await ctx.send("That user is already an admin.")
else:
await ctx.send("You don't have access to add other people as admins. Try just: !admin add")
elif setting.lower() == 'remove':
if ctx.message.author.display_name in info['FullAccess']:
if user.display_name in info['FullAccess']:
await ctx.send("Password?")
response = await self.wait_for('friend_message', timeout=20)
content = response.content.lower()
if content == password:
info['FullAccess'].remove(user.display_name)
with open('info.json', 'w') as f:
json.dump(info, f, indent=4)
await ctx.send(f"{user.display_name} was removed as an admin.")
print(Fore.BLUE + " [+] " + Fore.LIGHTBLUE_EX + user.display_name + Fore.RESET + " was removed as an admin.")
else:
await ctx.send("Incorrect Password.")
else:
await ctx.send("That person is not an admin.")
else:
await ctx.send("You don't have permission to remove players as an admin.")
else:
await ctx.send(f"Not a valid setting. Try: {prefix} -admin (add, remove) (user)")
@commands.command()
async def away(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.set_presence(
status=self.status,
away=fortnitepy.AwayStatus.AWAY
)
await ctx.send('Status set to away.')
@is_admin()
@commands.command()
async def remove_xbx(self, ctx:fortnitepy.ext.commands.Context) -> None:
self.ban_player = ['XBX']
friend = []
if self.friends in friend.display_name:
name = friend.display_name
if any(word in name for word in self.ban_player):
try:
await friend.remove()
print(f' Friend {friend.display_name} Removed Corectly')
except: pass
|
AerozOff
|
/AerozOff-0.0.1-py3-none-any.whl/oimbot/__init__.py
|
__init__.py
|
<p align="center">
<img width="40%" src="https://gitee.com/aecode/aestate/raw/dev/resource/logo.png"/>
</p>
<h1 align="center">Aestate —— 多样化数据库查询</h1>
<p align="center">
<a href='https://gitee.com/aecode/aestate/stargazers'>
<img src='https://svg.hamm.cn/gitee.svg?user=aecode&project=aestate&type=star' alt='star'/>
</a>
<img src='https://svg.hamm.cn/gitee.svg?user=aecode&project=aestate&type=language' alt='star'/>
<img src='https://svg.hamm.cn/badge.svg?key=Python&value=>=3.6'/>
<a href="https://doc.cacode.ren">
<img src='https://svg.hamm.cn/badge.svg?key=Documentation&value=yes'/>
</a>
<a href="https://gitee.com/aecode/summer-python/blob/main/LICENSE">
<img src='https://svg.hamm.cn/gitee.svg?user=aecode&project=aestate&type=license' alt='star'/>
</a>
</p>
> qq群:[909044439 (Aestate Framework)](https://jq.qq.com/?_wv=1027&k=EK7YEXmh)
> 开源示例项目:[gitee/aestate-example](https://gitee.com/canotf/aestate-example)(旧版本)
# 介绍
> 当前测试通过数据库有(通过测试并不表示已经适配,内置字段除mysql以外任然需要自主编写):
- MySql8.0
- Sqlserver2019
- PostgreSQL 13.3
`Aestate Framework` 是一款基于`Python`语言开发的`ORM`框架, 你可以使用多种方式去实现基于对象方式的查询.
也就是相对于Java语言的Mybatis-Plus
比如使用类似`Django`的模式去使用:```modelClass.orm.filter(*args, **kwargs)```
或者SQLAlchemy的方式:```find().where(**kwargs).group_by(*args)```
或者像`Java`的`Hibernate`一样:
```Python
@SelectAbst()
def find_all_F_where_id_in_and_name_like_order_by_id(self, **kwargs) -> list: ...
@Select("SELECT * FROM demo WHERE id=#{id} AND name=#{name}")
def find_all_where_id(self, id, name): ...
```
或者像`Java`的`Mybatis`使用xml
```xml
<?xml version="1.0"?>
<aestate xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="aestate https://gitee.com/aecode/aestate-xml/blob/main/v1/tags.xsd"
xmlns="aestate">
<template id="templateField">
id,name,password,create_time,update_time
<description>测试模板</description>
</template>
<resultMap id="resultMapLeftJoin" type="testOpera.operas.table.demoModels.Demo">
<result column="d1_id" properties="id"/>
<result column="d1_name" properties="name"/>
<result column="d1_password" properties="password"/>
<foreign ref="demoJoin" single="false">
</foreign>
</resultMap>
<select id="findAllById" resultMap="resultMapLeftJoin">
SELECT
<!-- 导入查询的字段 -->
<!-- <include from="templateField"/>-->
<include from="tempSymbol"/>
FROM demo as d1 LEFT JOIN demo as d2 ON d2.id = d1.id WHERE d1.id >
<switch field="id">
<case value="10">10</case>
<case value="5">5</case>
<default>#{id}</default>
</switch>
<if test="#{id}>=20">AND d2.id > 20</if>
<else>AND d2.id > 10</else>
LIMIT 2
</select>
<!-- insert在最顶上,因为普遍代码量少 -->
<insert id="insertTest" last="False">
INSERT INTO `demo`.`demo` (`name`, `password`) VALUES (#{name}, #{password})
</insert>
<!-- update在中间,改动最频繁 -->
<update id="updateTest" last="False">
UPDATE `demo`.`demo` SET `name` = #{name}, `password` = #{password} WHERE `id` = ${id}
</update>
<!-- 删除在最底下,容易找到且代码普遍简单 -->
<delete id="deleteTest">
DELETE FROM `demo`.`demo` WHERE `id` = #{id}
<description>
删除指定id
</description>
</delete>
</aestate>
```
# 相对于其他库有什么区别?
- 首先**Aestate**是基于Django、SQLAlchemy、Mybatis、Mybatis-Plus、SpringJPA整合起来的一个数据库支持库,
融合了这么多第三方库首先一点就是他的操作方式是多种多样的。目前已有六种操作方法,
也就是Django模式、SQLAlchemy模式、xml模式、Mybatis-Plus模式,注解模式,原生模式。
- 其次就是在兼容性方面,由于这个世界上的数据库种类太多了没办法做到统一, **Aestate**保留了对其他小众数据库的实现接口,尽可能多兼容数据库。
- 数据库表方面,Django是会生成数据django自己系统内部的表,在迁移的时候呢如果做错一步可能对于新手
来讲后面的修复操作是极其难的,也未必能够在短时间内定位问题并修复。**Aestate**为了解决这个问题,将make
和手动建表尽可能的兼容,不会生成额外的表和数据,也不会捆绑某个特定系统,将pojo/model复制出来可以直接为下一个项目使用。
- ~~缓存方面参考了Mybatis的实现方法并略微修改,**Aestate**有两个内存管理模块,用于保证数据的完整性,
当一些特别大的数据占满缓存时,**Aestate**
会尽量多的去分配内存保证数据完整性,除外才会去管理内存(不建议操作大于系统内存2/10的数据)。**Aestate**
有弹性内存管理方式,会根据系统的执行自动调整缓存大小,尽可能的加快运行速度,减少对数据库的连接次数。~~(最新1.0.9已删除缓存策略)
- 自带日志和美化,不需要下载其他插件就可以把日志变色,自动保存日志,这个功能对于爱美的大兄弟简直就 是神仙般的存在(当然也可能只有我喜欢装逼)
- 还有很多......
> windows控制台日志乱码解决办法:下载 [ansicon](https://github.com/adoxa/ansicon/releases) ,执行命令:
```shell
ansicon -i
ansicon -l
```
# 关于教程和文档地址
文档已经迁移到免费托管平台:http://aestate.angid.eu.org,文档将会逐步在gitee更新
> csdn: [AECODE](https://blog.csdn.net/qq_43059459)
> OSCHINA: [CACode](https://my.oschina.net/u/4841054)
> bilibili大学堂: [你在写臭虫?](https://space.bilibili.com/371089110)
> 官网域名: [cacode.ren](https://cacode.ren)(迁移到腾讯云没备案)
> 文档官网域名: [~~doc.cacode.ren~~](https://doc.cacode.ren)
> [http://aestate.angid.eu.org](http://aestate.angid.eu.org)
> Gitee官方: [https://aecode.gitee.io/aestate-doc](https://aecode.gitee.io/aestate-doc)
> 项目体系结构: [aecode.gitee.io/aestate](https://aecode.gitee.io/aestate/)
# 先决条件
> Python >=3.6
> 教程文档地址:~~http://doc.cacode.ren~~ http://aestate.angid.eu.org
# 版本说明
基础需要2.7以上的python版本,对于只需要执行sql可以使用2.7以上(不建议)
最优的办法是使用3.6以上,可以使用绝大部分功能
由于1.0.7增加异步方法,需要异步执行的小伙伴可以使用python>=3.7.10以上版本
# 安装
```shell
pip install aestate
conda install aestate
```
# 我是新手,怎么快速入门呢?
你可以前往[https://doc.cacode.ren](https://doc.cacode.ren)跟着官方文档入门
也可以在B站 [你在写臭虫](https://space.bilibili.com/371089110) 看视频学
专治疑难杂症,请前往csdn查看官方解决方案: [Aecode的csdn.net](https://blog.csdn.net/qq_43059459)
# 操作方式太多了一下子学不会怎么办?
**Aestate**有五种方式,不是非要全部都会,我当时写的时候只是为了把很多语言的操作方式用Python实现,然后让其他语言转Python的开发者能够找到熟悉的感觉,例如
1. Java专业户:用xml、方法名和注解
2. Python专业户:用Django模式和SQLAlchemy模式
3. 纯萌新:老老实实写SQL,先把基础练好
更多示例项目请前往
> [👉 Go to canotf`s homepage on Gitee 👈](https://gitee.com/canotf)
# 鸣谢
Cpython
DBPool
Simplejson
Gitee
# 感谢捐献
<a href="https://gitee.com/spacexzm">
<img alt="Spacexzm" width="49%" src="https://svg.hamm.cn/gitee-user.svg?user=spacexzm"/>
</a>
<a href="https://gitee.com/canotf">
<img alt="Canotf" width="49%" src="https://svg.hamm.cn/gitee-user.svg?user=canotf"/>
</a>
<a href="https://gitee.com/potuo">
<img alt="Potuo" width="49%" src="https://svg.hamm.cn/gitee-user.svg?user=potuo"/>
</a>
<a href="https://gitee.com/zxiaosi">
<img alt="Zxiaosi" width="49%" src="https://svg.hamm.cn/gitee-user.svg?user=zxiaosi"/>
</a>
<a href="https://gitee.com/xierkz">
<img alt="Xierkz" width="49%" src="https://svg.hamm.cn/gitee-user.svg?user=xierkz"/>
</a>
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/README.md
|
README.md
|
from datetime import date, datetime
import functools
from simplejson import JSONEncoder, JSONDecoder
from decimal import Decimal
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
iterable_as_array=False,
bigint_as_string=False,
item_sort_key=None,
for_json=False,
ignore_nan=False,
int_as_string_bitcount=None,
)
_default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None)
class AList(list):
def __init__(self, value: list):
list.__init__([])
if isinstance(value, list) and len(value) > 0:
for item in value:
if isinstance(item, dict):
self.append(ADict(item))
elif isinstance(item, list) and len(item) > 0:
self.append(AList(item))
elif isinstance(item, tuple) and len(item) > 0:
self.append(AList(list(item)))
else:
self.append(item)
class ADict(dict):
def __init__(self, data: dict):
super(ADict).__init__()
for key, value in data.items():
if isinstance(value, dict):
setattr(self, key, ADict(value))
self[key] = ADict(value)
elif isinstance(value, list) and len(value) > 0:
setattr(self, key, AList(value))
self[key] = AList(value)
elif isinstance(value, tuple) and len(value) > 0:
setattr(self, key, AList(list(value)))
self[key] = AList(list(value))
else:
setattr(self, key, value)
self[key] = value
class AJson:
"""
Json工具
AJson.parse(**kwargs):将任意对象解析成json字符串
AJson.load(**kwargs):将字符串解析成字典
"""
@staticmethod
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None,
iterable_as_array=False, **kw):
"""
转json字符串
"""
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not iterable_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not for_json
and not ignore_nan and int_as_string_bitcount is None
and not kw
):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
iterable_as_array=iterable_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).encode(obj)
@staticmethod
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""
json转字典
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
@staticmethod
def date_encoder(obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
else:
return None
class JsonDateEncoder(JSONEncoder):
def default(self, obj):
return AJson.date_encoder(obj)
class SimplejsonDateEncoder(JSONEncoder):
def default(self, obj):
return AJson.date_encoder(obj)
@staticmethod
def parse(obj, bf=False, end_load=False):
"""
将对象转换成字典格式:
支持:
dict
list
object
list[object]
object[list]
object[list[object]]
.......
注意事项:
bf和end_load同时只能使用一个
当两者同时存在时,默认使用end_load功能
:param obj:需要解析的对象
:param bf:是否需要美化json
:param end_load:是否需要在最后转成字典格式
"""
def json_to_str(_obj):
"""
json转字符串
"""
json_f = functools.partial(
AJson.dumps, cls=AJson.JsonDateEncoder)
json_str = json_f(_obj)
return json_str
def parse_list(list_obj):
"""
解析list数据的json
放置了递归函数,所以不必担心解析报错或者解析不到位
"""
obj_dicts = []
for item in list_obj:
# 循环集合
if isinstance(item, list):
# 如果是集合则递归
obj_dicts.append(parse_list(item))
elif isinstance(item, tuple):
# 如果是tuple元组则转成集合后递归
return obj_dicts.append(parse_list(list(item)))
elif isinstance(item, dict) or isinstance(item, str):
# 如果是字典或者字符串,则直接交给obj_dicts填充
obj_dicts.append(item)
elif isinstance(item, object):
# 如果是object则交给parse_obj()解析
obj_dicts.append(parse_obj(item))
else:
obj_dicts.append(item)
return obj_dicts
def parse_obj(_obj) -> str:
"""
夺命循环递递归
"""
obj_dicts = []
if isinstance(_obj, dict):
_dict = _obj.__dict__
# 如果是list,则交给parse_list(解决)
for key, item in _dict.items():
obj_dicts.append({
key: parse_list(item)
})
elif isinstance(_obj, list):
# 如果是字典或者字符串,则直接交给obj_dicts填充
obj_dicts.append(parse_list(_obj))
# 由于parse_list()中有对于tuple累心的解析,所以不必担心tuple
elif isinstance(_obj, str):
# 如果是字典或者字符串,则直接交给obj_dicts填充
obj_dicts = _obj
else:
# 如果不是list类型,则直接解析成字典
try:
obj_dicts = parse_dict(_obj.__dict__)
except AttributeError as e:
obj_dicts = _obj
# 异常警告,抛出
return obj_dicts
def parse_dict(_obj):
"""
解析字典格式
"""
obj_dicts = {}
if isinstance(_obj, dict):
for key, value in _obj.items():
if isinstance(value, list):
obj_dicts[key] = parse_list(value)
elif isinstance(value, dict):
obj_dicts[key] = parse_dict(value)
else:
v = parse_obj(value)
obj_dicts[key] = v
return obj_dicts
# 如果他是集合并且里面包含的非字典而是object,则将对象转成字典
if isinstance(obj, list):
obj = parse_list(obj)
elif isinstance(obj, dict):
obj = parse_dict(obj)
elif isinstance(obj, object):
obj = parse_obj(obj)
# 最后的解析结果
result = json_to_str(obj)
if end_load:
return AJson.load(result)
elif bf:
return AJson.beautiful(AJson.load(result))
return result
@staticmethod
def load(item):
"""
将json字符串解析成字典
"""
if isinstance(item, list):
_dats = []
for i in item:
_dats.append(AJson.load(i))
return _dats
elif isinstance(item, tuple):
# 如果是tuple元组则转成集合后递归
_dats = []
for i in list(item):
_dats.append(AJson.load(i))
return _dats
elif isinstance(item, dict):
# 如果是字典,则直接返回
return item
elif isinstance(item, str):
# 如果是字符串则解析为字典
return AJson.loads(item)
elif isinstance(item, object):
# 如果是object则交给parse_obj()解析
return item.__dict__
else:
return AJson.loads(item)
@staticmethod
def beautiful(_data):
"""
美化json
"""
return AJson.dumps(_data, sort_keys=True, indent=4, separators=(',', ':'))
@staticmethod
def json_to_object(json_data):
if isinstance(json_data, list):
obj = AList(json_data)
elif isinstance(json_data, tuple):
obj = AList(list(json_data))
elif isinstance(json_data, dict):
obj = ADict(json_data)
else:
return None
return obj
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/ajson/ajson.py
|
ajson.py
|
import threading
from aestate.work.Modes import Singleton
class AestateLanguage:
"""
0x804:中文
0x409:英文
"""
LANG = 0x804
class I18n:
"""
国际化语言,在统一配置下的全局语言解决方案
"""
def __init__(self, langs=None):
if langs is None:
langs = {}
self.langs = {}
self.langs.update(langs)
_instance_lock = threading.RLock()
def whileGet(self, next_names: list):
"""
重复获取i18n的字典语句
:return:
"""
pass
def t(self, name: str):
if AestateLanguage.LANG not in self.langs.keys():
AestateLanguage.LANG = 0x409
if name not in self.langs.get(AestateLanguage.LANG).keys():
raise Exception(f'i18n field is not exist:{name}')
# return name
return self.langs.get(AestateLanguage.LANG).get(name)
def __new__(cls, *args, **kwargs):
"""
单例管理缓存内容
"""
instance = Singleton.createObject(cls)
return instance
class ExceptionI18n(I18n):
"""
"""
def __init__(self):
super(ExceptionI18n, self).__init__(langs={
# 中文(简体,中国)
0x804: {
'': '未知错误',
'if_tag_not_test': 'if 标记中的属性`test` 缺少必需的结构',
'xml_syntax_error': 'xml语法错误,不相等的逻辑运算符数量,在:%s',
'before_else_not_if': '在 else 标签前面找不到 if 标签',
'not_field_name': '被调用的方法中不存在名为 `%s` 的参数',
'not_from_node_name': '无法从节点中找到名为 `%s` 的模板',
'not_result_map': "找不到名为 `%s` 的 resultMap 模板",
"result_map_not_type": "无法从节点中找到名为“type”的属性",
"module_not_found": "模块 `%s` 未找到",
"lack_result_type": "缺少resultType",
"not_defined": "找不到定义 `%s`"
},
# 英语
0x409: {
'': 'Unknow error',
'if_tag_not_test': 'The attribute`test` in the if tag is missing a required structure',
'xml_syntax_error': 'Xml syntax error, unequal number of logical operators, from:%s',
'before_else_not_if': 'Cannot find the if tag in front of the else tag',
'not_field_name': 'The parameter named `%s` does not exist in the called method',
'not_from_node_name': 'The template named `%s` could not be found from the node',
'not_result_map': "ResultMap template named '%s' could not be found",
"result_map_not_type": "The attribute named `type` could not be found from the node",
"module_not_found": "`%s` Module `%s` Not Found",
"lack_result_type": "`%s` Lack result type",
"not_defined": "Can't find the defined `%s`"
},
})
@staticmethod
def tt(name):
return ExceptionI18n().t(name)
class InfoI18n(I18n):
"""
提示语句国际化
"""
def __init__(self):
super(InfoI18n, self).__init__(langs={
# 中文(简体,中国)
0x804: {
"statement": "执行语句",
"parameters": "参数",
"selectResult": "行数",
"updateResult": "受影响行",
},
0x409: {
"statement": "Statement",
"parameters": "Parameters",
"selectResult": "Total",
"updateResult": "Updates",
}
})
@staticmethod
def tt(name):
return InfoI18n().t(name)
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/i18n/__init__.py
|
__init__.py
|
import asyncio
import uuid
from aestate.work.Modes import EX_MODEL
from aestate.work.Serialize import QuerySet
from aestate.work.orm import AOrm
class RepositoryProxy:
"""
代理仓库的操作方式所有Repository的调用都会经过这里
这个位置是用来方便使用type对象的Pojo类行为
通过Repository的__get__方法获得调用时的cls值,使得
"""
@property
def conversion(self):
"""
将此Repository转换为ORM实体
Return:
ORM转换之后的实体对象
"""
return AOrm(repository=self)
def first(self):
"""
获取数据库中的第一个
"""
return self.conversion.top().end()
def last(self):
"""
获取最后一个参数
"""
return self.conversion.top().desc().end()
def find_all(self, **kwargs) -> QuerySet:
"""
从当前数据表格中查找所有数据
Returns:
将所有结果封装成POJO对象集合并返回数据
"""
# 开启任务
self.result = self.find_field(*self.getFields(), **kwargs)
return self.result
def find_field(self, *args, **kwargs) -> QuerySet:
"""
只查询指定名称的字段,如:
SELECT user_name FROM `user`
即可参与仅解析user_name为主的POJO对象
:param args:需要参与解析的字段名
:return:
将所有结果封装成POJO对象集合并返回数据
"""
# 设置名称
name = str(uuid.uuid1())
# 开启任务
kwargs.update(
{
'func': self.operation.__find_by_field__,
'__task_uuid__': name,
't_local': self
}
)
result = self.operation.start(*args, **kwargs)
self.result = self.serializer(instance=self, base_data=result)
return self.result
def find_one(self, sql, **kwargs):
"""
查找第一条数据
可以是一条
也可以是很多条中的第一条
code:
result = self.find_many(**kwargs)
if len(result) == 0:
return None
else:
return result[0]
:param kwargs:包含所有参数:
pojo:参照对象
sql:处理过并加上%s的sql语句
params:需要填充的字段
print_sql:是否打印sql语句
:return 返回使用find_many()的结果种第一条
"""
kwargs['sql'] = sql
self.result = self.find_many(**kwargs)
if self.result is None or len(self.result) == 0:
self.result = []
return None
else:
self.result = self.result.first()
return self.result
def find_many(self, sql, **kwargs) -> QuerySet:
"""
查询出多行数据
第一个必须放置sql语句
:param kwargs:包含所有参数:
pojo:参照对象
sql:处理过并加上%s的sql语句
params:需要填充的字段
print_sql:是否打印sql语句
:return 将所有数据封装成POJO对象并返回
"""
# 设置名称
name = str(uuid.uuid1())
kwargs['sql'] = sql
# 开启任务
kwargs['func'] = self.operation.__find_many__
kwargs['__task_uuid__'] = name
kwargs['t_local'] = self
result = self.operation.start(**kwargs)
self.result = self.serializer(instance=self.instance, base_data=result)
return self.result
def find_sql(self, sql, **kwargs) -> QuerySet:
"""
返回多个数据并用list包装:
- 可自动化操作
- 请尽量使用find_many(sql)操作
:param kwargs:包含所有参数:
sql:处理过并加上%s的sql语句
params:需要填充的字段
print_sql:是否打印sql语句
"""
# kwargs['conf_obj'] = t_local.config_obj
# 设置名称
name = str(uuid.uuid1())
kwargs['sql'] = sql
# 开启任务
kwargs['func'] = self.operation.__find_sql__
kwargs['__task_uuid__'] = name
kwargs['t_local'] = self
result = self.operation.start(**kwargs)
self.result = self.serializer(instance=self.instance, base_data=result)
return self.result
def update(self, key=None):
"""
执行更新操作:
返回受影响行数
:param key:主键,where的参考数据
:return:
"""
if key is None:
for k, v in self._fields.items():
if hasattr(v, "primary_key") and getattr(v, 'primary_key'):
key = k
break
name = str(uuid.uuid1())
kwargs = {
'pojo': self,
'func': self.operation.__update__,
'__task_uuid__': name,
't_local': self,
'key': key
}
# 开启任务
self.result = self.operation.start(**kwargs)
return self.result
def remove(self, key=None):
"""
执行更新操作:
返回受影响行数
:param key:主键,where的参考数据
:return:
"""
if key is None:
for k, v in self._fields.items():
if hasattr(v, "primary_key") and getattr(v, 'primary_key'):
key = k
break
name = str(uuid.uuid1())
kwargs = {
'pojo': self,
'func': self.operation.__remove__,
'__task_uuid__': name,
't_local': self,
'key': key
}
# 开启任务
self.result = self.operation.start(**kwargs)
return self.result
def save(self, *args, **kwargs):
"""
将当前储存的值存入数据库
"""
kwargs['pojo'] = self
return self.create(*args, **kwargs)
def create(self, pojo, **kwargs):
"""
插入属性:
返回受影响行数
:param kwargs:包含所有参数:
pojo:参照对象
last_id:是否需要返回最后一行数据,默认False
:return:rowcount,last_id if last_id=True
"""
# 设置名称
kwargs['pojo'] = pojo
name = str(uuid.uuid1())
# 开启任务
kwargs['func'] = self.operation.__insert__
kwargs['__task_uuid__'] = name
kwargs['t_local'] = self
self.result = self.operation.start(**kwargs)
return self.result
def copy(self, *args, **kwargs):
"""
复制对象进行操做
不建议多次创建对象,建议使用 pojo.copy()来生成对象
"""
obj = self.__class__(new=True, *args, **kwargs)
[setattr(obj, k, v) for k, v in kwargs.items()]
return obj
def execute_sql(self, sql, params=None, mode=EX_MODEL.SELECT, **kwargs):
"""
:param sql:执行的sql
:param params:防止sql注入的参数
:param mode:查询模式,默认使用SELECT,使用aestate.work.Modes.EX_MODEL枚举修改执行的sql类型
:param kwargs:其他需要的参数
"""
d = self.__dict__
d.update(kwargs)
kwargs = d
kwargs['print_sql'] = False if 'print_sql' not in kwargs.keys() else kwargs['print_sql'] if kwargs[
'print_sql'] else False
if mode is None or mode == EX_MODEL.SELECT:
return self.db_util.select(sql=sql, params=params, **kwargs)
else:
kwargs['last_id'] = True if 'last_id' not in kwargs.keys() else kwargs['last_id']
return self.db_util.insert(sql=sql, params=params, **kwargs)
def foreign_key(self, cls, key_name, field_name=None, data=None, operation=None):
"""
根据外键来查
:param cls:目标外键的类,注意不是对象,是类
:param key_name:外键的id
:param field_name:保存进去的字段名字,默认以表名命名
:param data:使用已有的数据作为外键
:param operation:自定义操作
"""
child_obj = cls()
if field_name is None:
name = child_obj.get_tb_name()
else:
name = field_name
self.datas = self.result if data is None else data
for i in range(len(self.datas)):
if not operation:
data = child_obj.orm.filter(**{key_name: self.datas[i].id})
else:
data = operation(self.datas, i)
self.datas[i].add_field(name, data.to_dict())
class RepositoryAsyncProxy:
"""
代理执行仓库的异步操作,详情请查看SqlOperaProxy类
"""
def find_all_async(self, *args, **kwargs):
async def find_all():
pass
return asyncio.run(find_all(*args, **kwargs))
async def find_field_async(self, *args, **kwargs):
return self.find_field(*args, **kwargs)
async def find_one_async(self, *args, **kwargs):
return self.find_one(*args, **kwargs)
async def find_many_async(self, *args, **kwargs):
return self.find_many(*args, **kwargs)
async def find_sql_async(self, *args, **kwargs):
return self.find_sql(*args, **kwargs)
async def update_async(self, *args, **kwargs):
return self.update(*args, **kwargs)
async def remove_async(self, *args, **kwargs):
return self.remove(*args, **kwargs)
async def save_async(self, *args, **kwargs):
return self.save(*args, **kwargs)
async def create_async(self, pojo, **kwargs):
return self.create(pojo, **kwargs)
async def execute_sql_async(self, sql, params=None, mode=EX_MODEL.SELECT, **kwargs):
return self.execute_sql(sql=sql, params=params, mode=mode, **kwargs)
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/base/proxy.py
|
proxy.py
|
from aestate.work.Serialize import QuerySet
from aestate.exception import FieldNotExist
from aestate.dbs import _mysql
from aestate.work.proxy import SqlOperaProxy
from aestate.work.sql import ExecuteSql, ProxyOpera
from aestate.util.Log import ALog
class Repository(SqlOperaProxy.RepositoryAsyncProxy):
"""
- POJO类
- 继承该类表名此类为数据库的pojo类
- 需要配合:@Table(name, msg, **kwargs)使用
"""
def __init__(self, config_obj=None, instance=None, log_conf=None, close_log=False, serializer=QuerySet, **kwargs):
"""
通过继承此类将数据表实体化
实体化之后你可以使用像类似find_one()等操做
可以调用conversion()方法将其转化为ORM框架常用的样式
无需担心类型问题,无需担心datetime无法转换
使用方法:
#加入Table注解,并标注表名与描述,因考虑使用者后期优化问题,请务必填写MSG参数
@Table(name="demo_table", msg="demo message")
#继承Repository并得到相对应的半自动ORM操做
class TestClass(Repository):
# 初始化并super配置
def __init__(self,**kwargs):
super(DemoTable, self).__init__(config_obj=ConF(), log_conf={
'path': "/log/",
'save_flag': True
}, **kwargs)
初始化配置:
aestate.util.Config.config的配置类,详见:aestate.work.Config.MysqlConfig
Attributes:
以下的字段均可覆盖重写
config_obj:数据源配置类
log_conf:日志配置工具
log_obj:日志对象
close_log:是否关闭日志
serializer:序列化使用的类,默认使用aestate.work.Serialize.QuerySet
instance:实例
__table_name__:表名称
operation:操作类的实现
fields:操作的字段
sqlFields:sql方言
:param config_obj:配置类
:param log_conf:日志配置类
:param close_log:是否关闭日志显示功能
:param serializer:自定义序列化器,默认使用aestate.work.Serialize.QuerySet
"""
# 以下使用ParseUtil将所有参数替换为可动态修改
if config_obj is None:
ALog.log_error(msg="缺少配置类`config_obj`", obj=FieldNotExist, raise_exception=True)
self.ParseUtil = config_obj
self.ParseUtil.set_field_compulsory(
self, key='config_obj', data=kwargs, val=config_obj)
# 抽象类
self.ParseUtil.set_field_compulsory(
obj=self, data=kwargs, key='abst', val=False)
# 当本类为抽象类时,仅设置所需要的值
self.ParseUtil.set_field_compulsory(
self, key='close_log', data=kwargs, val=close_log)
# 有没有表名
self.ParseUtil.set_field_compulsory(self, key='__table_name__', data=kwargs,
val=self.__table_name__ if hasattr(self, '__table_name__') else
'"__table_name__" parsing failed')
# 参照对象
# 能操作数据库的,但是没有值
self.ParseUtil.set_field_compulsory(
self, key='instance', data=kwargs, val=instance)
# 取得字段的名称
self.ParseUtil.set_field_compulsory(
self, key='fields', data=kwargs, val=list(self.instance.getFields().keys()))
# 获取sql方言配置
self.ParseUtil.set_field_compulsory(
self, key='sqlFields', data=self.config_obj.__dict__, val=_mysql.Fields())
# 当当前类为抽象类时,为类取消初始化数据库配置
# 最后的执行结果
self.ParseUtil.set_field_compulsory(
self, key='result', data=kwargs, val=None)
self.ParseUtil.set_field_compulsory(self, key='log_obj', data=kwargs,
val=ALog(**log_conf) if log_conf is not None else None)
self.ParseUtil.set_field_compulsory(
self, key='serializer', data=kwargs, val=serializer)
if not self.abst:
# 操作类
self.ParseUtil.set_field_compulsory(
self, key='operation', data=kwargs, val=ProxyOpera.DbOperation())
# 连接池
if hasattr(self, 'config_obj') and self.config_obj:
self.db_util = ExecuteSql.Db_opera(
creator=self.ParseUtil.fieldExist(self.config_obj, 'creator', raise_exception=True),
POOL=None if 'POOL' not in kwargs.keys() else kwargs['POOL'],
**self.ParseUtil.fieldExist(self.config_obj, 'kw', raise_exception=True))
else:
ALog.log_error('`config_obj` is missing', AttributeError, LogObject=self.log_obj, raise_exception=True)
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/repository.py
|
repository.py
|
import types
from aestate.util.CompulsoryRun import Compulsory
class AopModelObject(object):
"""
此类为AopModel提供所有操作
"""
def __init__(self, before=None, after=None,
before_args=None, before_kwargs=None,
after_args=None, after_kwargs=None):
# 初始化所有字段
self.__before_func__ = before
self.__before_args_data__ = before_args
self.__before_kwargs_data__ = before_kwargs
self.__after_func__ = after
self.__after_args_data__ = after_args
self.__after_kwargs_data__ = after_kwargs
def set_args(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def start(self):
"""
主操作
"""
# self.func = args[0]
self.init_fields()
# wraps(self.func)(self)
self.init_attr()
# 解析参数需要
# self.before_parse()
# 执行before操作
self.before_run()
# 执行原始数据
result = Compulsory.run_function(
func=self.func, args=self.args, kwargs=self.kwargs)
# after解析
# self.after_parse(result)
# after操作
self.after_run(result)
# 返回原始数据
return result
def init_fields(self):
# 定义名称规则
self.after = 'after'
self.after_args = 'after_args'
self.after_kwargs = 'after_kwargs'
self.before = 'before'
self.before_args = 'before_args'
self.before_kwargs = 'before_kwargs'
self.__after__ = '__after_func__'
self.__after_args__ = '__after_args__'
self.__after_kwargs__ = '__after_kwargs__'
# 得到before参数的名称
self.__before_name__ = self.format_name(self.before)
self.__before_args_name__ = self.format_name(self.before_args)
self.__before_kwargs_name__ = self.format_name(self.before_kwargs)
# 得到after参数的名称
self.__after_name__ = self.format_name(self.__after__)
self.__after_args_name__ = self.format_name(self.__after_args__)
self.__after_kwargs_name__ = self.format_name(self.__after_kwargs__)
def __get__(self, instance, cls):
if instance is None:
return self
else:
return types.MethodType(self, instance)
def format_name(self, name):
"""
格式化名称字符串
"""
return '{}{}'.format(name, self.func.__name__)
def setters(self, i1, i2, i3, k1, v1, k2, v2, k3, v3):
"""
批量设置
"""
if i1 in self.__dict__.keys():
setattr(self, v1, self.__dict__[k1])
if i2 in self.__dict__.keys():
setattr(self, v2, self.__dict__[k2])
if i3 in self.__dict__.keys():
setattr(self, v3, self.__dict__[k3])
def init_attr(self):
"""
初始化cls下的字段
通过使用setters下的setter()功能批量解析是否需要before或者after操作
"""
self.setters(
i1=self.before,
i2=self.before_args,
i3=self.before_kwargs,
k1=self.before,
k2=self.before_args,
k3=self.before_kwargs,
v3=self.__before_kwargs_name__,
v1=self.__before_name__,
v2=self.__before_args_name__,
)
self.setters(
i1=self.after,
i2=self.after_args,
i3=self.after_kwargs,
k1=self.after,
k2=self.after_args,
k3=self.after_kwargs,
v1=self.__after_name__,
v2=self.__after_args_name__,
v3=self.__after_kwargs_name__
)
def before_run(self):
"""
执行before方法
"""
if self.__before_func__ and self.__before_args_data__ and self.__before_kwargs_data__:
self.__before_func__(*self.__before_args_data__,
**self.__before_kwargs_data__)
elif self.__before_func__ and self.__before_args_data__:
self.__before_func__(*self.__before_args_data__)
elif self.__before_func__ and self.__before_kwargs_data__:
self.__before_func__(**self.__before_kwargs_data__)
elif self.__before_func__:
self.__before_func__()
else:
pass
def after_run(self, result):
"""
执行after方法
"""
if self.__after_kwargs_data__ is None:
self.__after_kwargs_data__ = {}
self.__after_kwargs_data__.update({'result': result})
if self.__after_func__ and self.__after_args_data__ and self.__after_kwargs_data__:
self.__after_func__(*self.__after_args_data__,
**self.__after_kwargs_data__)
elif self.__after_func__ and self.__after_args_data__:
self.__after_func__(*self.__after_args_data__)
elif self.__after_func__ and self.__after_kwargs_data__:
self.__after_func__(**self.__after_kwargs_data__)
elif self.__after_func__:
self.__after_func__()
else:
pass
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/AopContainer.py
|
AopContainer.py
|
__author__ = 'CACode'
from aestate.ajson import aj
"""
此文件内包含有序列化器所有需要用到的参数
aj可使用原simplejson部分功能,内嵌simplejson,升级功能包含
- parse(obj,bf,end_load) 解析object类型
- load(obj) 生成字典
"""
__all__ = ['aj', 'QuerySet', 'PageHelp']
class QuerySet(list):
"""
执行database operation返回的结果集对象
此序列化器采用链表形式储存数据,递归搜索子节点
顺序从左子树开始依次按照索引排列
元类:
list
Methods:
first():
返回结果集对象的第一个数据
last():
返回结果集对象的最后一位参数
page(size):
按照每一页有size数量的结果分页
to_json():
将结果集对象转json字符串
add_field():
添加一个字段使得解析过程中不会被移除
remove_field():
删除一个字段使得解析过程中不会添加
get():
返回指定位置的参数
Attribute:
instance:实例类型模板
base_data:基本数据
query_item:使用已有的数据生成QuerySet对象
"""
def __init__(self, instance=None, base_data=None, query_items=None):
"""
初始化传入结果集并附加上base_data数据集
instance:
序列化的实例对象
base_data:
初始化数据源
"""
list.__init__([])
if query_items is None:
self.__instance__ = instance
# 合并结果集对象
# TODO:历史遗留问题,2022/01/17修复当结果为空时不存入数据
if base_data:
self.extend(base_data)
else:
self.extend(query_items)
def size(self):
return len(self)
def first(self):
"""
取得结果集的第一位参数,如果没有就返回空列表
"""
if len(self) > 0:
return self[0]
else:
return []
def last(self):
"""
取得结果集的最后一位参数,如果没有就返回0
"""
if len(self) > 0:
return self[len(self) - 1]
else:
return []
def page(self, size):
"""
将结果集按照指定数目分割
"""
return PageHelp.list_of_groups(init_list=self, size=size)
def to_json(self, bf=False):
"""
将结果集对象转json处理
:param bf:是否需要美化sql
"""
result = []
for i in self:
result.append(aj.load(i.to_json(bf=bf)))
return aj.parse(result, bf=bf)
def to_dict(self):
result = []
for i in self:
result.append(aj.load(i.to_dict()))
return result
def add_field(self, key, default_value=None):
"""
添加一个不会被解析忽略的字段
"""
[self[i].add_field(key, default_value) for i in range(len(self))]
def remove_field(self, key):
"""
添加一个会被解析忽略的字段
"""
[self[i].remove_field(key) for i in range(len(self))]
def get(self, index):
"""
返回指定位置的元素
"""
return self[index]
class PageHelp(list):
def __init__(self, init_data: list):
list.__init__([])
self.__dict_data__ = {}
self.__json_data__ = ""
self.extend(init_data)
def to_dict(self):
"""
节省资源
"""
if not self.__dict_data__:
self.__dict_data__ = aj.load(self.to_json())
return self.__dict_data__
def to_json(self, bf=False):
"""
节省资源
"""
if not self.__json_data__:
json_str = [i.to_dict() for i in self]
self.__json_data__ = aj.parse(json_str, bf)
return self.__json_data__
@classmethod
def list_of_groups(cls, init_list, size):
"""
将数据集按照一定数量分组并返回新数组
"""
list.__init__([])
lo_groups = zip(*(iter(init_list),) * size)
end_list = [QuerySet(query_items=i) for i in lo_groups]
count = len(init_list) % size
end_list.append(QuerySet(
query_items=init_list[-count:])) if count != 0 else QuerySet(query_items=end_list)
return PageHelp(end_list)
def get(self, index):
return self[index]
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/Serialize.py
|
Serialize.py
|
import inspect
from aestate.ajson import aj
from aestate.exception import FieldNotExist
from aestate.util import pack
from aestate.util.Log import ALog
from aestate.work.Serialize import QuerySet
from aestate.work.orm import AOrm
from aestate.dbs._mysql import tag
from aestate.work import repository
from aestate.work import Banner
class Pojo(repository.Repository):
# 是否已经初始化过对象
__init_pojo__ = False
# 执行的
EXEC_FUNCTION = None
@classmethod
def objects(cls):
return cls()
def __init__(self, config_obj=None, log_conf=None, close_log=False, serializer=QuerySet, **kwargs):
"""
初始化ORM框架
:param config_obj:配置类
:param log_conf:日志配置类
:param close_log:是否关闭日志显示功能
:param serializer:自定义序列化器,默认使用aestate.work.Serialize.QuerySet
"""
# aestate logo
Banner.show()
if not hasattr(self, '__table_name__'):
self.__table_name__ = self.__class__.__name__
if not hasattr(self, '__table_msg__'):
self.__table_msg__ = 'The current object has no description'
self._fields = {}
# 在这里将config_obj实例化
self.serializer = serializer
# 忽略的字段
self.__ignore_field__ = {}
# 添加的字段
self.__append_field__ = {}
# bug
self.init_fields()
for key, value in kwargs.items():
self.__setattr__(key, value)
super(Pojo, self).__init__(config_obj=config_obj,
instance=self,
log_conf=log_conf,
close_log=close_log,
serializer=serializer,
**kwargs)
def init_fields(self):
"""
初始化字段
最后生成的字段名`_fields`
"""
fields = self.__dict__
fds = {}
for key, value in fields.items():
# 取出这个值引用对象的父类
sub = pack.dp_equals_base(value.__class__, tag.baseTag)
if sub:
if not hasattr(self, key) or getattr(self, key) is None or sub:
setattr(self, key, value.default)
fds[key] = value
fds[key].name = key
self._fields = fds
def get_all_using_field(self) -> dict:
all_fields = self.getFields()
# 合并字段
all_fields = dict(all_fields, **self.__append_field__)
# 删除忽略字段
for i in self.__ignore_field__.keys():
if i in all_fields.keys():
del all_fields[i]
return all_fields
def to_json(self, bf=False):
"""
转json字符串
"""
return aj.parse(self.to_dict(), bf=bf)
def to_dict(self):
"""
将数据集转字典格式
"""
all_fields = self.get_all_using_field()
new_dict = {}
for key in all_fields.keys():
# 当字段为未填充状态时,默认定义为空
if hasattr(self, key):
v = getattr(self, key)
if isinstance(v, QuerySet) or isinstance(v, Pojo):
new_dict[key] = v.to_dict()
else:
new_dict[key] = v
else:
if isinstance(all_fields[key], QuerySet):
new_dict[key] = all_fields[key].to_dict()
else:
new_dict[key] = None
return new_dict
def getFields(self) -> dict:
"""
获取当前类所需要序列化的字段
"""
return self._fields
def add_field(self, key, default_value=None):
"""
添加一个不会被解析忽略的字段
"""
# UPDATE [1.0.6a2] 去除对替换的判断,将直接修改原本的值
setattr(self, key, default_value)
if key in self.getFields() and self.getFields().get(key).__class__ == tag.boolField:
self.__append_field__[key] = bool(default_value)
setattr(self, key, default_value)
else:
self.__append_field__[key] = default_value
# setattr(self, key, default_value)
def remove_field(self, key):
"""
添加一个会被解析忽略的字段
"""
self.__ignore_field__[key] = None
@property
def orm(self):
"""
转ORM框架
"""
return AOrm(repository=self)
def format(self, key, name):
"""
为指定字段的值设置别名
"""
if 'ig' in self.getFields().keys():
self._fields['ig'].append({
key: name
})
else:
self._fields['ig'] = []
self.format(key, name)
def get_tb_name(self):
"""
获取当前pojo的表名
"""
return self.__table_name__
def get_database(self):
"""
获取当前pojo的数据库连接对象
"""
if hasattr(self, 'config_obj'):
return self.config_obj
ALog.log_error(
msg='The pojo object has not been initialized yet, and no configuration items have been obtained',
obj=FieldNotExist, LogObject=self.log_obj, raise_exception=True)
# def __getattr__(self, item):
# if item in self.getFields() and self.getFields().get(item).__class__ == tag.boolField:
# return bool(object.__getattribute__(self, item))
# else:
# return object.__getattribute__(self, item)
class Model(Pojo):
"""
这个只是为了满足django用户的习惯
"""
pass
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/Manage.py
|
Manage.py
|
from aestate.ajson import aj
from aestate.exception import FieldNotExist
from aestate.util.Log import ALog
from aestate.dbs import _mysql
from aestate.dbs import _mssql
from aestate.work.Adapter import LanguageAdapter
DB_KWARGS = {
'pymysql': _mysql,
'pymssql': _mssql
}
class MySqlConfig(_mysql.ParseUtil):
"""
配置类:
默认必须携带操作数据库所需的参数:
- host:数据库地址
- port:端口
- database:数据库名
- user:用户名
- password:密码
- charset:编码默认utf8
- conf:其他配置
"""
def __init__(self, db_type, *args, **kwargs):
"""
可用于mysql的示例参数
:param host:数据库地址
:param port:端口
:param database:数据库名
:param user:用户名
:param password:密码
:param charset:编码默认utf8
:param creator:创建者
:param db_type:包类型
"""
if db_type is None:
ALog.log_error(msg="The creator is missing, do you want to set`db_type=pymysql`?",
obj=FieldNotExist, raise_exception=True)
# 2022/03/02 设置db_type可以为具体的包
if isinstance(db_type, str):
self.creator = __import__(db_type)
opera_name = db_type
else:
self.creator = db_type
opera_name = db_type.__name__
self.opera = DB_KWARGS[opera_name].OperaBase
self.opera = DB_KWARGS[opera_name].OperaBase
self.sqlFields = DB_KWARGS[opera_name].Fields()
# -----
self.kw = kwargs
# 适配器
if 'adapter' not in kwargs.keys():
self.adapter = LanguageAdapter()
super(MySqlConfig, self).__init__()
def get(self):
"""
获取当前配置类
:return:
"""
return self
def set_field(self, key, value):
"""
设置字段
:param key:键
:param value:值
:return:
"""
setattr(self, key, value)
def get_field(self, name):
"""
获取字段
:param name:
:return:
"""
if hasattr(self, name):
return getattr(self, name)
return None
def get_dict(self):
"""
将配置类转转字典
:return:
"""
return self.__dict__
def get_json(self, bf=False):
"""
将配置类转json
:return:
"""
return aj.parse(self.get_dict(), bf)
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/Config.py
|
Config.py
|
import re
from enum import Enum
from typing import List
from aestate.exception import SqlResultError
from aestate.util.Log import ALog
from prettytable import PrettyTable
from aestate.work.Serialize import QuerySet
class BaseCover:
def table_visual(self, title: list, val: List[dict]) -> PrettyTable:
if not val:
ALog.log_error(
msg='The database gets no return from the SQL of the field type in the table,'
' and the SQL is written incorrectly, please check the SQL',
obj=SqlResultError, raise_exception=True)
table = PrettyTable(title)
table.border = True
[table.add_row(list(i.values())) for i in val]
return table
def check(self, res, fields) -> bool:
"""
检查表与数据库中的数据是否对应
"""
comment = {
'auto_increment': "自增",
'DEFAULT_GENERATED': "自动追加日期",
'DEFAULT_GENERATED on update CURRENT_TIMESTAMP': "自动更新时间为最后一次更改时间"
}
return False
def deal(self, res, fields):
"""
同步数据库与pojo的字段配置项
"""
return True
def res(self, tb_name, db_name, extool) -> dict:
"""
获得表结构
"""
sql = """SELECT
COLUMN_NAME AS name, -- 名称
DATA_TYPE AS typer, -- 类型
CHARACTER_MAXIMUM_LENGTH AS length, -- 长度
NUMERIC_SCALE AS num_scale, -- 数字小数点
IS_NULLABLE AS is_null, -- 是否允许为空
COLUMN_KEY AS c_key, -- 是否为键
EXTRA as def_val, -- 默认值
COLUMN_COMMENT as comment -- 描述
FROM information_schema.COLUMNS WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s"""
# 使用传入的sql执行器执行查询表结构的sql
res = extool.select(sql=sql, params=[db_name, tb_name])
# 获得表结构可视化
# table = self.table_visual(list(res[0].keys()), res)
# print(table)
# 对比结构
return res
class AOrm(object):
"""
纯净的ORM模式:
你可以使用find('table').by('args').order_by('args').desc().end()方式执行sql
好处就是:
能更好的拒绝sql注入
无需写sql语句
代码简单易懂
上手快
"""
class Mode(Enum):
FIND = 0
INSERT = 1
def __init__(self, repository):
"""
初始化ORM
自定义方言除了可使用默认已存在的方法,还可以使用自定义sql方言拼接
:param repository:仓库
s """
self.exmode = None
self.args = []
self.params = []
self.sqlFields = None
# self.sqlFields = sqlFields
self.ParseUtil = repository.config_obj
self.serializer = repository.serializer
self.sqlFields = repository.sqlFields
# 创建sql语法
if repository is None:
ALog.log_error(
msg='Repository is null,Place use repository of ORM framework',
obj=AttributeError, LogObject=self.repository.log_obj, raise_exception=True)
self.repository = repository
# self.__table_name__ = '{}{}{}'.format(self.sqlFields.left_subscript, repository.__table_name__,
# self.sqlFields.right_subscript)
self.__table_name__ = repository.__table_name__
self.first_data = False
self.result = []
def top(self):
self.find()
self.first_data = True
return self.limit(1)
def first(self):
"""
是否只返回第一行数据
"""
self.first_data = True
return self
# ------------------------主键--------------------------
def find(self, *args, **kwargs):
"""
查
example:
find('all')
find('param1',asses=['p'],h_func=True)
Attributes:
asses:将对应的字段转成另一个别名,不需要转换的使用None标识
h_func:不将字段转换成 `%s` 格式
poly:是否要在末尾加上[from table_name],常用于使用加号连表时
更新:
如果args字段长度为0,默认为查找全部
"""
self.exmode = AOrm.Mode.FIND
self.args.append(self.sqlFields.find_str)
# 如果有as字段
alias = None
if 'alias' in kwargs.keys():
alias = kwargs['alias']
# 如果包含方法的字段,则不加密
func_flag = False
if 'h_func' in kwargs.keys():
func_flag = kwargs['h_func']
# 1.1.0.05更新,默认为all
_all = False
if len(args) == 0:
_all = True
# 如果存在all
# 1.1.1.2修复:tuple index out of range
if _all or 'all'.upper() == args[0].upper():
# 如果包含all关键字,则使用解析工具解析成字段参数
if not func_flag:
fields = self.ParseUtil.parse_key(
*self.repository.fields, is_field=True)
else:
fields = self.ParseUtil.parse_key(
*self.repository.fields, is_field=False)
else:
if not func_flag:
fields = self.ParseUtil.parse_key(*args, is_field=True)
else:
fields = self.ParseUtil.parse_key(*args, is_field=False)
# 解决as问题
if alias is not None:
fs = fields.split(',')
if len(fs) != len(alias):
# 匿名参数长度与字段长度不符合
ALog.log_error(obj=TypeError,
msg='The length of the anonymous parameter does not match the length of the field',
raise_exception=True)
for i, v in enumerate(fs):
if alias[i] is not None:
self.args.append('{}{}{}'.format(
v, self.sqlFields.asses_str, alias[i]))
else:
self.args.append(v)
# 逗号
self.args.append(self.sqlFields.comma)
else:
self.args.append(fields)
if alias is not None:
# 去掉末尾的逗号
self.rep_sym()
# 加上from关键字
if 'poly' not in kwargs.keys():
self.con_from()
else:
self.args += kwargs['poly']
return self
def order_by(self, *args, **kwargs):
"""
根据什么查
example:
find('all').order_by('param')
find('all').order_by('param').end()
find('all').order_by('p1','p2').desc().limit(10,20)
"""
return self.by_opera(field=self.sqlFields.order_by_str, args_list=args, **kwargs)
def group_by(self, *args, **kwargs):
"""
聚合函数
example:
select shop_id,count(*) as count from comments group by shop_id having count>1;
"""
return self.by_opera(field=self.sqlFields.group_by_str, args_list=args, **kwargs)
def by_opera(self, field, args_list, **kwargs):
"""
根据什么查
"""
self.args.append(field)
for i in args_list:
if not kwargs.get('text', False):
self.args.append(self.sqlFields.left_subscript)
self.args.append(i)
if not kwargs.get('text', False):
self.args.append(self.sqlFields.right_subscript)
self.args.append(self.sqlFields.comma)
self.rep_sym(self.sqlFields.comma, self.sqlFields.space)
return self
def filter(self, **kwargs):
return self.find().where(**kwargs).end()
async def filter_async(self, **kwargs):
return self.filter(**kwargs)
def where(self, **kwargs):
"""
当....
example:
find('ALL').where(param='%s') - 默认符号为等于 ==
find('ALL').where(param='==%s')
find('ALL').where(param='>%d')
find('ALL').where(param='<%d')
find('ALL').where(param='<=%d')
find('ALL').where(param='>=%.2f')
find('ALL').where(param='!=%.2f')
复杂语法:
find('ALL').where(param='+%d/%d==%d')
find('ALL').where(param='-%.2f*%d==12')
find('ALL').where(param='*10-1==12')
find('ALL').where(param='/10+1==12')
"""
self.args.append(self.sqlFields.where_str)
for key, value in kwargs.items():
cp_key = key
customize = False
sym = '='
sps = cp_key.split('__')
rpx_symbol = re.findall(r"([%s]{2})(.*)" % ','.join(self.sqlFields.symbol), str(value))
# UPDATE [1.0.6a2] 使用正则方式替换,减少误判情况造成无法使用特殊运算符拼接sql
# if len(str(value)) > 2 and str(value)[0:2] in self.sqlFields.symbol:
if len(rpx_symbol) > 0:
params = rpx_symbol[0]
sym, value = params[0], params[1]
if sym == '==':
sym = '='
elif sym == '>>':
sym = '>'
elif sym == '<<':
sym = '<'
else:
# 没有找到符号的话就从字段名开始
# 截取最后一段从两段下划线开始的末尾
if not len(sps) == 1:
customize = True
sym = sps[len(sps) - 1]
self.ParseUtil.fieldExist(
self.ParseUtil, 'adapter', raise_exception=True)
cp_key = cp_key[:cp_key.rfind('__' + sym)]
self.ParseUtil.adapter.funcs[sym](self, cp_key, value)
elif not len(sps) == 1:
customize = True
sym = sps[len(sps) - 1]
self.ParseUtil.fieldExist(self.ParseUtil, 'adapter', raise_exception=True)
cp_key = cp_key[:cp_key.rfind('__' + sym)]
self.ParseUtil.adapter.funcs[sym](self, cp_key, value)
if not customize:
self.args.append(
'{}{}{}{}%s'.format(self.sqlFields.left_subscript,
cp_key,
self.sqlFields.right_subscript,
sym))
self.args.append(self.sqlFields.ander_str)
self.params.append(value)
self.rep_sym(self.sqlFields.ander_str)
return self
def on(self, from_where, to_where, symbol='='):
self.args.append(self.sqlFields.space)
self.args.append(self.sqlFields.on_str)
self.args.append(self.sqlFields.space)
self.args.append(from_where)
self.args.append(self.sqlFields.space)
self.args.append(symbol)
self.args.append(self.sqlFields.space)
self.args.append(to_where)
return self
def limit(self, start=0, end=None):
"""
分页
:param start:开始
:param end:末尾
example:
find('all').limit(start=10,end=20)
find('all').limit(end=10)
"""
self.args.append(self.sqlFields.limit_str)
# 死亡空格
if end is None:
limit_param = '{}{}{}'.format(
self.sqlFields.space, start, self.sqlFields.space)
else:
limit_param = '{}{}{}{}{}'.format(self.sqlFields.space, start, self.sqlFields.comma, end,
self.sqlFields.space)
self.args.append(limit_param)
return self
def desc(self):
"""
倒叙
example:
find('all').desc()
find('all').desc().end()
find('all').order_by('param').desc().limit(10,20)
"""
if self.sqlFields.order_by_str not in self.args:
ALog.log_error(
'There is no `order by` field before calling `desc` field,You have an error in your SQL syntax',
AttributeError, LogObject=self.repository.log_obj, raise_exception=True)
self.args.append(self.sqlFields.desc_str)
return self
def set(self, **kwargs):
"""
设置
example:
update('table').set('param','value').end()
update('table').set('param1','value1').where('param2=value2').end()
"""
self.args.append(self.sqlFields.set_str)
_size = len(kwargs.keys())
for key, value in kwargs.items():
self.args.append('{}{}{}{}%s'.format(self.sqlFields.left_subscript,
key,
self.sqlFields.right_subscript,
self.sqlFields.eq))
# set是加逗号不是and
self.args.append(self.sqlFields.comma)
self.params.append(value)
self.rep_sym(self.sqlFields.comma)
return self
# ------------------------预设符--------------------------
def ander(self):
"""
和
example:
update('table').set('param1','value1').and().set('param2','value2')
update('table').set('param1','value1').and().set('param2','value2').end()
update('table').set('param1','value1').and().set('param2','value2').where('param3=value3').end()
"""
self.args.append(self.sqlFields.ander_str)
return self
def __run__(self, need_sql=False, serializer=True) -> QuerySet:
"""
最终执行任务
"""
sql = ''
conf = self.ParseUtil.get_dict()
print_sql = 'print_sql' in conf.keys() and conf['print_sql'] is True
last_id = 'last_id' in conf.keys() and conf['last_id'] is True
sql += ''.join(self.args)
if need_sql:
return sql
if self.exmode == AOrm.Mode.FIND:
self.result = self.repository.db_util.select(
sql=sql,
params=self.params,
print_sql=print_sql,
last_id=last_id,
**self.repository.__dict__
)
_result_objs = []
if self.result is not None:
for i in self.result:
_obj = self.ParseUtil.parse_obj(
data=i, instance=self.repository.instance)
_result_objs.append(_obj)
self.result = _result_objs
else:
self.result = self.repository.db_util.update(
sql=sql,
params=self.params,
print_sql=print_sql,
last_id=last_id
)
# 清空资源,为下一次使用做准备
self.args.clear()
self.params.clear()
if self.first_data:
if (isinstance(self.result, list) or isinstance(self.result, tuple)) and self.result and len(
self.result) > 0:
if not serializer:
return self.result[0]
self.repository.result = self.serializer(instance=self.repository.instance,
base_data=self.result).first()
return self.repository.result
else:
self.repository.result = self.serializer()
return self.repository.result
else:
if not serializer:
return self.result
self.repository.result = self.serializer(
instance=self.repository.instance, base_data=self.result)
return self.repository.result
def con_from(self):
"""
如果没有被from包含,则在末尾加上from __table_name__关键字
"""
if self.sqlFields.from_str not in self.args:
self.args.append(self.sqlFields.from_str)
# 然后加上表名
self.args.append(self.sqlFields.left_subscript)
self.args.append(self.__table_name__)
self.args.append(self.sqlFields.right_subscript)
def append(self, app_sql):
"""
末尾追加一些sql
"""
self.args.append(app_sql)
return self
def rep_sym(self, sym=',', rep=''):
"""
将最后一个参数包含的指定字符替换为指定字符
"""
self.args[len(self.args) -
1] = str(self.args[len(self.args) - 1]).replace(sym, rep)
return self
def end(self, **kwargs):
return self.__run__(**kwargs)
async def end_async(self, **kwargs):
return self.__run__(**kwargs)
def __rshift__(self, other):
"""
将左边orm迁移至右边
"""
new_args = self.args.copy()
new_args.append(' ) ')
other.args.append(' ( ')
other.args += new_args
return other
def __lshift__(self, other):
"""
将右边迁移至左边
"""
new_args = other.args.copy()
new_args.append(' ) ')
self.args.append(' ( ')
self.args = self.args + new_args
return self
def alias(self, name):
"""
设置别名
"""
self.args.append(' AS ')
self.args.append(name)
return self
def left_join(self, sql_orm, name):
"""
left join
"""
self.args.append(self.sqlFields.left_join_str)
self.args.append(sql_orm.__table_name__)
return self.alias(name)
def __str__(self):
sql = ''.join(self.args)
return sql
def check(self):
"""sudo apt install containerd
检查表结构与数据库中是否对应
"""
return self.repository.config_obj.opera(self.repository).check()
def create(self, replace=False):
return self.repository.config_obj.opera(self.repository).create(replace)
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/orm.py
|
orm.py
|
from aestate.exception import FieldNotExist
from aestate.util.Log import ALog
class LanguageAdapter:
"""
适配器,将sql方言适配到ORM框架中,实现sql自由
从配置表中开始配置sql方言,继承SqlLanguage类并实现抽象方法,开启
实现当前类,在orm操作中存在自定义字段时,保证所有的操作都能够按照你所希望的那样执行
"""
funcs = {}
def __init__(self):
if not hasattr(self, 'funcs'):
self.funcs = {}
self.__sp('like', self._like_opera)
self.__sp('in', self._in_opera)
self.__sp('lt', self._lt_opera)
self.__sp('gt', self._gt_opera)
self.__sp('le', self._le_opera)
self.__sp('ge', self._ge_opera)
self.__sp('eq', self._eq_opera)
def add_lan(self, name, func):
self.__sp(name, func)
def _like_opera(self, instance, key, value):
instance.args.append('`' + key + '`')
instance.args.append(' LIKE ')
instance.args.append('%s')
instance.args.append(' AND ')
instance.params.append(value)
def _in_opera(self, instance, key, value):
if isinstance(value, list):
instance.args.append('`' + key + '`')
instance.args.append(' IN ')
value = [str(i) for i in value]
vals = ','.join(value)
instance.args.append(f'( {vals} )')
else:
ALog.log_error(
msg='value type is not list or QuerySet object',
obj=FieldNotExist, raise_exception=True)
def _lt_opera(self, instance, key, value):
instance.args.append('`' + key + '`')
instance.args.append(' < ')
instance.args.append('%s')
instance.args.append(' AND ')
instance.params.append(value)
def _gt_opera(self, instance, key, value):
instance.args.append('`' + key + '`')
instance.args.append(' > ')
instance.args.append('%s')
instance.args.append(' AND ')
instance.params.append(value)
def _le_opera(self, instance, key, value):
instance.args.append('`' + key + '`')
instance.args.append(' <= ')
instance.args.append('%s')
instance.params.append(value)
def _ge_opera(self, instance, key, value):
instance.args.append('`' + key + '`')
instance.args.append(' >= ')
instance.args.append('%s')
instance.params.append(value)
def _eq_opera(self, instance, key, value):
instance.args.append('`' + key + '`')
instance.args.append(' = ')
instance.args.append('%s')
instance.params.append(value)
def __sp(self, key, val):
if key not in self.funcs.keys():
self.funcs[key] = val
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/Adapter.py
|
Adapter.py
|
import re
from .AopContainer import AopModelObject
from .Modes import EX_MODEL
from .Serialize import QuerySet
import os
import inspect
from .xmlhandler.nodes import ResultMapNode
from .xmlhandler.utils import AestateXml
from ..exception import TagAttributeError, TagHandlerError
from ..i18n import ExceptionI18n
from ..util.Log import ALog
from ..util.sqlOpera import TextUtil
def Table(name, msg="", **kwargs):
"""
标注该类为一个表
:param name:表的名称
:param msg:表的描述
:return:
"""
def set_to_field(cls):
setattr(cls, '__table_name__', name)
setattr(cls, '__table_msg__', msg)
for key, value in kwargs.items():
setattr(cls, key, value)
return cls
return set_to_field
def Select(sql: str):
"""
快捷的查询装饰器
使用此装饰器,可以将大量重复代码继承到此装饰器内部实现
使用方法:
@Select(sql="SELECT * FROM demo_table WHERE t_id<=${不加密} AND t_msg like #{加密}")
有两种符号可以作为sql的字段插入形式:
${字段}:这种方式是直接将文字插入进去,在面对sql注入时无法有效避免
#{字段}:将字段使用%s过滤,能有效防止sql注入,但并非100%有效
依靠的是你所使用的第三方库内置游标的:`mogrify`方法,请在使用前查看
:param sql:执行的sql语句,需要加密的参数使用`%s`表示
"""
def base_func(cls):
def _wrapper_(*args, **kwargs):
lines = list(args)
obj = lines[0]
# 查找参数
sub_sql, new_args = TextUtil.replace_antlr(sql, **kwargs)
result = obj.find_sql(sql=sub_sql, params=new_args)
return QuerySet(obj, result)
return _wrapper_
return base_func
def SelectAbst():
def mysql_rp(n, array, obj) -> str:
_name = array[len(array) - 1] if len(array) > 0 else ""
rule = {
'F': 'FROM',
'find': "SELECT",
'where': 'WHERE',
'eq': "= #{%s}" % _name,
'lt': '< #{%s}' % _name,
'gt': '> #{%s}' % _name,
'le': '<= #{%s}' % _name,
'ge': '>= #{%s}' % _name,
'in': 'in #{%s}' % _name,
'like': 'like #{%s}' % _name,
'all': ','.join([obj.orm.ParseUtil.parse_key(f) for f in obj.fields]),
}
return rule[n] if n in rule.keys() else n
def base_func(func):
def _wrapper_(*args, **kwargs):
lines = list(args)
if len(lines) == 0 and hasattr(func, 'instance') and not func.instance:
ALog.log_error(ExceptionI18n.tt(""))
obj = lines[0]
_name = func.__name__.split("_")
S = []
for i in _name:
d = mysql_rp(i, S, obj)
S.append(d if d != "FROM" else f"FROM {obj.__table_name__}")
sql = ' '.join(S)
# 查找参数
sub_sql, new_args = TextUtil.replace_antlr(sql, **kwargs)
result = obj.find_sql(sql=sub_sql, params=new_args)
return QuerySet(obj, result)
return _wrapper_
return base_func
def AopModel(before=None, after=None,
before_args=None, before_kwargs=None,
after_args=None, after_kwargs=None):
"""
AOP切面模式:
依赖AopModel装饰器,再在方法上加入@AopModel即可切入编程
优点:
当使用@AopModel时,内部函数将会逐级调用回调函数,执行循序是:
- func(*self.args, **self.kwargs)
- func(*self.args)
- func(**self.kwargs)
- func()
这将意味着,如果你的参数传入错误时,AopModel依旧会遵循原始方法所使用的规则,最令人大跌眼镜的使用方法就是:
<code>
def Before(**kwargs):
print('Before:', kwargs)
# 此处的Before方法未存在args参数,而使用@AopModel时却传入了args
@AopModel(before=Before,before_args=(0,1,2), before_kwargs={'1': '1'})
def find_title_and_selects(self, **kwargs):
print('function task', kwargs['uid'])
_r = self.orm.find().where(index="<<100").end()
print(_r)
return _r
</code>
其中包含参数有:
before:切入时需要执行的函数
before_args:切入的参数
传入的列表或元组类型数据
如果是需要使用当前pojo中的内容时,传参格式为:(pojo.字段名)
可扩展格式,例如需要传入字典
before_kwargs:切入的参数 -- 传入的字典数据
after:切出前需要执行的参数
after_args:切出的参数
传入的列表或元组类型数据
如果是需要使用当前pojo中的内容时,传参格式为:('self.字段名')
可扩展格式,例如需要传入字典:('self.dict.key')
after_kwargs:切出的参数 -- 传入的字典数据
执行流程:
Before->original->After
Before注意事项:
使用该参数时,方法具有返回值概不做处理,需要返回值内容可使用`global`定义一个全局字段用于保存数值
当无法解析或者解析失败时m将使用pass关键字忽略操作
After注意事项:
使用该参数时,必须搭配至少一个result=None的kwargs存在于方法的形参中,
当original方法执行完成将把返回值固定使用result键值对注入到该函数中
当无法解析或者解析失败时m将使用pass关键字忽略操作
Attributes:
before:切入时需要执行的函数
after:切出前需要执行的参数
before_args:切入的参数
传入的列表或元组类型数据
如果是需要使用当前pojo中的内容时,传参格式为:(pojo.字段名)
可扩展格式,例如需要传入字典
before_kwargs:切入的参数 -- 传入的字典数据
after_args:切出的参数
传入的列表或元组类型数据
如果是需要使用当前pojo中的内容时,传参格式为:('self.字段名')
可扩展格式,例如需要传入字典:('self.dict.key')
after_kwargs:切出的参数 -- 传入的字典数据
"""
# 得到对象组
aop_obj = AopModelObject(before, after,
before_args, before_kwargs,
after_args, after_kwargs)
def base_func(func):
aop_obj.func = func
def _wrapper_(*args, **kwargs):
aop_obj.set_args(*args, **kwargs)
return aop_obj.start()
return _wrapper_
return base_func
def ReadXml(filename):
"""读取xml"""
def set_to_field(cls):
file_path = inspect.getfile(cls)
# 分割字符串得到当前路径
file_path = '/'.join(re.split(r'[/|\\]', file_path)[:-1])
path = os.path.join(file_path, filename)
setattr(cls, '_xml_file', path)
xml = AestateXml.read_file(path)
setattr(cls, 'xNode', xml)
setattr(cls, '_xml_file_name', os.path.basename(path))
return cls
return set_to_field
def Item(_id, d=False):
"""
将xml的item节点映射到当前方法,对应的id字段为xml节点的id
:param _id: 节点id
:param d: 查询时返回原始数据
"""
def replaceNextLine(sql):
sql = str(sql).replace('\n', ' ')
sql = str(sql).replace(' ', ' ')
if ' ' in sql:
return replaceNextLine(sql)
else:
return sql
def base_func(cls):
def _wrapper_(*args, **kwargs):
lines = list(args)
obj = lines[0]
xml = obj.xNode
xml_node = None
# 增删改查的节点加起来得到所有操作节点
node_list = xml.children['select'] \
+ xml.children['insert'] \
+ xml.children['update'] \
+ xml.children['delete']
# 从所有的可操作节点中寻找id符合的节点
for v in node_list:
if 'id' in v.attrs.keys() and v.attrs['id'].text == _id:
xml_node = v
break
if xml_node is not None:
xml_node.params = kwargs
result_text_node = xml_node.text(obj)
else:
result_text_node = None
ALog.log_error(
f"`{_id}` does not exist in the xml node.file:({obj._xml_file_name})", obj=TagAttributeError,
raise_exception=True)
# 美化sql
if result_text_node is None:
ALog.log_error(
f"`The node did not return any sentences.file:({obj._xml_file_name})", obj=TagHandlerError,
raise_exception=True)
return None
run_sql = replaceNextLine(result_text_node.text)
sub_sql, params = TextUtil.replace_antlr(run_sql, **kwargs)
# 返回值ast
if xml_node.node.tagName.lower() == 'select':
result = obj.execute_sql(sql=sub_sql, params=params, mode=EX_MODEL.SELECT, **obj.__dict__)
# 将返回的结果解析
resultTree = ResultMapNode(obj, result_text_node, result)
if d:
return result
return resultTree.apply(xml.children['resultMap'])
else:
# 是否需要返回最后一行id,默认返回
has_last_id = bool(result_text_node.expand_data['last']) \
if 'last' in result_text_node.expand_data.keys() else True
result = obj.execute_sql(sql=sub_sql, params=params, mode=EX_MODEL.UPDATE,
**{'last_id': has_last_id, **obj.__dict__})
return result
return _wrapper_
return base_func
def JsonIgnore(*fields):
def base_func(fn):
def _wrapper_(*args, **kwargs):
_self = args[0]
_self.EXEC_FUNCTION = [*fields]
return _self
return _wrapper_
return base_func
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/Annotation.py
|
Annotation.py
|
from enum import IntEnum
class Singleton:
"""
使用单例模式
"""
@staticmethod
def createFactory(cls):
with cls._instance_lock:
if not hasattr(cls, "__instance__"):
cls.__instance__ = cls(cls.modules)
return cls.__instance__
@staticmethod
def createObject(cls):
with cls._instance_lock:
if not hasattr(cls, "__instance__"):
cls.__instance__ = object.__new__(cls)
return cls.__instance__
@staticmethod
def println(cls, text):
with cls._instance_lock:
if not hasattr(cls, "__instance__"):
cls.__instance__ = object.__new__(cls)
print(text)
class Recursion:
"""
递归
"""
@staticmethod
def find_key_for_dict(obj_data: dict, target: str):
"""
在字典里重复递归,直到得出最后的值,如果查不到就返回None
"""
def parse_list(listData: list, tag: str):
result_list = []
if listData is not None:
if isinstance(listData, list):
for i in listData:
if isinstance(i, list) or isinstance(i, tuple):
rt = parse_list(list(i), tag)
if rt:
result_list.append(rt)
elif isinstance(i, dict):
result_list.append(parse_dict(i, tag))
else:
result_list.append(parse_obj(i, tag))
elif isinstance(listData, tuple):
result_list.append(parse_list(list(listData), tag))
elif isinstance(listData, dict):
result_list.append(parse_dict(listData, tag))
else:
result_list.append(parse_obj(listData, tag))
else:
return result_list
return result_list
def parse_dict(dictData, tag: str):
result_dict = []
if dictData is not None:
if isinstance(dictData, dict):
if tag in dictData.keys():
result_dict.append(dictData.get(tag))
dictData.pop(tag)
for index, value in dictData.items():
if isinstance(value, list) or isinstance(value, tuple):
result_dict.append(parse_list(list(value), tag))
elif isinstance(value, dict):
result_dict.append(parse_dict(value, tag))
else:
result_dict.append(parse_obj(value, tag))
elif isinstance(dictData, list):
result_dict.append(parse_list(list(dictData), tag))
else:
result_dict.append(parse_obj(dictData, tag))
else:
return None
return result_dict
def parse_obj(objData, tag: str):
def load(da: dict):
obj_item_list = []
obj_item_dict = {}
for i, v in da.items():
if i == tag:
result_obj.append(getattr(objData, i))
continue
if isinstance(v, list):
obj_item_list.append(getattr(objData, i))
elif isinstance(v, dict):
obj_item_dict[i] = v
result_obj.append(parse_list(obj_item_list, tag))
result_obj.append(parse_dict(obj_item_dict, tag))
result_obj = []
if isinstance(objData, dict):
result_obj.append(parse_dict(objData, tag))
elif isinstance(objData, list):
result_obj.append(parse_list(list(objData), tag))
else:
if isinstance(objData, object):
if isinstance(objData, str):
return None
elif isinstance(objData, int):
return None
elif isinstance(objData, float):
return None
else:
if hasattr(objData, tag):
result_obj.append(getattr(objData, tag))
load(da=objData.__dict__)
else:
load(da=objData.__dict__)
return result_obj
return parse_obj(obj_data, target)
class DictTemplate(object):
"""
字典对象模板
"""
def __init__(self, init_data):
self.init_data = init_data
def add(self, key, obj):
setattr(self, key, obj)
class DictToObject(object):
"""
将字典转成对象,解决懒得写中括号
"""
def __init__(self, dict_data):
baseClass = DictTemplate(dict_data)
self.dict_data = dict_data
self.baseNode = baseClass
self.verification(self.baseNode, self.dict_data)
@staticmethod
def conversion(dict_data: dict):
node = DictToObject(dict_data)
return node.baseNode
def verification(self, node: DictTemplate, value):
"""
验证模块
"""
node.init_data = value
if isinstance(value, dict):
for key, val in value.items():
if isinstance(val, (dict, list, tuple)):
val = self.verification(DictTemplate(val), val)
node.add(key, val)
elif isinstance(value, list):
list_temp = []
for val in value:
if isinstance(val, (dict, list, tuple)):
val = self.verification(DictTemplate(val), val)
list_temp.append(val)
node.add('', list_temp)
return node
class CaseItem:
def __init__(self, flag, method, *args, **kwargs):
self.flag = flag
self.method = method
self.args = args
self.kwargs = kwargs
def __str__(self):
return str(self.flag)
class CaseOperaBase:
def __init__(self, val, method=None, *args, **kwargs):
self.val = val
self.method = method
self.args = args
self.kwargs = kwargs
def item(self, val, order) -> object: ...
class Case(CaseOperaBase):
def __gt__(self, other):
"""
左边大于右边
"""
return self
def __ge__(self, other):
return int(self.val) <= int(other)
def __lt__(self, other):
"""
左边小于右边
"""
return int(self.val) < int(other)
def __le__(self, other):
"""
左边小于等于右边
"""
return int(self.val) >= int(other)
def __eq__(self, other):
"""
等于
"""
return self.val == other
def __ne__(self, other):
"""
不等于
"""
return self.val != other
def item(self, val, order):
order.opera[self.val] = CaseItem(self.val == val, self.method, self.args[0], **self.kwargs)
return order
class CaseDefault(CaseOperaBase):
def item(self, val, order):
# order.opera[self.val] = CaseItem(True, val, *self.args, **self.kwargs)
return order.end(self.val)
class Switch:
"""
弥补python没有switch的缺陷
使用教程:
from aestate.util.others import Switch,Case,CaseDefault
base_symbol = lambda x: x + x
val = 3
方式1:
# case(选择性参数,满足条件时执行的方法,当满足条件后中间方法需要的参数)
source = Switch(Case(val)) + \
Case(0, base_symbol, val) + \
Case(1, base_symbol, val) + \
Case(2, base_symbol, val) + \
Case(3, base_symbol, val) + \
Case(4, base_symbol, val) + \
Case(5, base_symbol, val) + \
CaseDefault(lambda: False)
print(ajson.aj.parse(source, bf=True))
方式2:
source = Switch(Case(val)). \
case(0, base_symbol, val). \
case(1, base_symbol, val). \
case(2, base_symbol, val). \
case(3, base_symbol, val). \
case(4, base_symbol, val). \
case(5, base_symbol, val). \
end(lambda: False)
print(ajson.aj.parse(source, bf=True))
"""
def __init__(self, val):
self.val = val
self.opera = {}
def case(self, item, method, *args, **kwargs):
if item in self.opera.keys():
raise KeyError(f'`{item}` Already exists in the `case`')
self.opera[item] = CaseItem(self.val == item, method, *args, **kwargs)
return self
def end(self, default_method, *args, **kwargs):
"""
默认处理函数
"""
for k, v in self.opera.items():
if v.flag:
return v.method(*v.args, **v.kwargs)
return default_method(*args, **kwargs)
def __add__(self, other):
return other.item(self.val, self)
class EX_MODEL(IntEnum):
SELECT = 0
UPDATE = 1
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/Modes.py
|
Modes.py
|
import asyncio
import uuid
from aestate.work.Modes import EX_MODEL
from aestate.work.Serialize import QuerySet
from aestate.work.orm import AOrm
class RepositoryProxy:
"""
代理仓库的操作方式所有Repository的调用都会经过这里
这个位置是用来方便使用type对象的Pojo类行为
通过Repository的__get__方法获得调用时的cls值,使得
"""
@property
def conversion(self):
"""
将此Repository转换为ORM实体
Return:
ORM转换之后的实体对象
"""
return AOrm(repository=self)
def first(self):
"""
获取数据库中的第一个
"""
return self.conversion.top().end()
def last(self):
"""
获取最后一个参数
"""
return self.conversion.top().desc().end()
def find_all(self, **kwargs) -> QuerySet:
"""
从当前数据表格中查找所有数据
Returns:
将所有结果封装成POJO对象集合并返回数据
"""
# 开启任务
self.result = self.find_field(*self.getFields(), **kwargs)
return self.result
def find_field(self, *args, **kwargs) -> QuerySet:
"""
只查询指定名称的字段,如:
SELECT user_name FROM `user`
即可参与仅解析user_name为主的POJO对象
:param args:需要参与解析的字段名
:return:
将所有结果封装成POJO对象集合并返回数据
"""
# 设置名称
name = str(uuid.uuid1())
# 开启任务
kwargs.update(
{
'func': self.operation.__find_by_field__,
'__task_uuid__': name,
't_local': self
}
)
result = self.operation.start(*args, **kwargs)
self.result = self.serializer(instance=self, base_data=result)
return self.result
def find_one(self, sql, **kwargs):
"""
查找第一条数据
可以是一条
也可以是很多条中的第一条
code:
result = self.find_many(**kwargs)
if len(result) == 0:
return None
else:
return result[0]
:param kwargs:包含所有参数:
pojo:参照对象
sql:处理过并加上%s的sql语句
params:需要填充的字段
print_sql:是否打印sql语句
:return 返回使用find_many()的结果种第一条
"""
kwargs['sql'] = sql
self.result = self.find_many(**kwargs)
if self.result is None or len(self.result) == 0:
self.result = []
return None
else:
self.result = self.result.first()
return self.result
def find_many(self, sql, **kwargs) -> QuerySet:
"""
查询出多行数据
第一个必须放置sql语句
:param kwargs:包含所有参数:
pojo:参照对象
sql:处理过并加上%s的sql语句
params:需要填充的字段
print_sql:是否打印sql语句
:return 将所有数据封装成POJO对象并返回
"""
# 设置名称
name = str(uuid.uuid1())
kwargs['sql'] = sql
# 开启任务
kwargs['func'] = self.operation.__find_many__
kwargs['__task_uuid__'] = name
kwargs['t_local'] = self
result = self.operation.start(**kwargs)
self.__clear_params__()
self.result = self.serializer(instance=self.instance, base_data=result)
return self.result
def find_sql(self, sql, **kwargs) -> QuerySet:
"""
返回多个数据并用list包装:
- 可自动化操作
- 请尽量使用find_many(sql)操作
:param kwargs:包含所有参数:
sql:处理过并加上%s的sql语句
params:需要填充的字段
print_sql:是否打印sql语句
"""
# kwargs['conf_obj'] = t_local.config_obj
# 设置名称
name = str(uuid.uuid1())
kwargs['sql'] = sql
# 开启任务
kwargs['func'] = self.operation.__find_sql__
kwargs['__task_uuid__'] = name
kwargs['t_local'] = self
result = self.operation.start(**kwargs)
self.result = self.serializer(instance=self.instance, base_data=result)
return self.result
def update(self, key=None):
"""
执行更新操作:
返回受影响行数
:param key:主键,where的参考数据
:return:
"""
if key is None:
for k, v in self._fields.items():
if hasattr(v, "primary_key") and getattr(v, 'primary_key'):
key = k
break
name = str(uuid.uuid1())
kwargs = {
'pojo': self,
'func': self.operation.__update__,
'__task_uuid__': name,
't_local': self,
'key': key
}
# 开启任务
self.result = self.operation.start(**kwargs)
return self.result
def remove(self, key=None):
"""
执行更新操作:
返回受影响行数
:param key:主键,where的参考数据
:return:
"""
if key is None:
for k, v in self._fields.items():
if hasattr(v, "primary_key") and getattr(v, 'primary_key'):
key = k
break
name = str(uuid.uuid1())
kwargs = {
'pojo': self,
'func': self.operation.__remove__,
'__task_uuid__': name,
't_local': self,
'key': key
}
# 开启任务
self.result = self.operation.start(**kwargs)
return self.result
def save(self, *args, **kwargs):
"""
将当前储存的值存入数据库
"""
kwargs['pojo'] = self
return self.create(*args, **kwargs)
def create(self, pojo, **kwargs):
"""
插入属性:
返回受影响行数
:param kwargs:包含所有参数:
pojo:参照对象
last_id:是否需要返回最后一行数据,默认False
:return:rowcount,last_id if last_id=True
"""
# 设置名称
kwargs['pojo'] = pojo
name = str(uuid.uuid1())
# 开启任务
kwargs['func'] = self.operation.__insert__
kwargs['__task_uuid__'] = name
kwargs['t_local'] = self
self.result = self.operation.start(**kwargs)
self.__clear_params__()
return self.result
def copy(self, *args, **kwargs):
"""
复制对象进行操做
不建议多次创建对象,建议使用 pojo.copy()来生成对象
"""
obj = self.__class__(new=True, *args, **kwargs)
[setattr(obj, k, v) for k, v in kwargs.items()]
return obj
def execute_sql(self, sql, params=None, mode=EX_MODEL.SELECT, **kwargs):
"""
:param sql:执行的sql
:param params:防止sql注入的参数
:param mode:查询模式,默认使用SELECT,使用aestate.work.Modes.EX_MODEL枚举修改执行的sql类型
:param kwargs:其他需要的参数
"""
self.__clear_params__()
d = self.__dict__
d.update(kwargs)
kwargs = d
kwargs['print_sql'] = False if 'print_sql' not in kwargs.keys() else kwargs['print_sql'] if kwargs[
'print_sql'] else False
if mode is None or mode == EX_MODEL.SELECT:
self.result = self.db_util.select(sql=sql, params=params, **kwargs)
else:
kwargs['last_id'] = True if 'last_id' not in kwargs.keys() else kwargs['last_id']
self.result = self.db_util.insert(sql=sql, params=params, **kwargs)
self.__clear_params__()
return self.result
def foreign_key(self, cls, key_name, field_name=None, data=None, operation=None):
"""
根据外键来查
:param cls:目标外键的类,注意不是对象,是类
:param key_name:外键的id
:param field_name:保存进去的字段名字,默认以表名命名
:param data:使用已有的数据作为外键
:param operation:自定义操作
"""
child_obj = cls()
if field_name is None:
name = child_obj.get_tb_name()
else:
name = field_name
self.datas = self.result if data is None else data
for i in range(len(self.datas)):
if not operation:
data = child_obj.orm.filter(**{key_name: self.datas[i].id})
else:
data = operation(self.datas, i)
self.datas[i].add_field(name, data.to_dict())
def __clear_params__(self):
"""
清空params参数
:return:
"""
if hasattr(self, 'params'):
self.params.clear()
class RepositoryAsyncProxy(RepositoryProxy):
"""
代理执行仓库的异步操作,详情请查看SqlOperaProxy类
"""
def find_all_async(self, *args, **kwargs):
async def find_all():
pass
return asyncio.run(find_all(*args, **kwargs))
async def find_field_async(self, *args, **kwargs):
return self.find_field(*args, **kwargs)
async def find_one_async(self, *args, **kwargs):
return self.find_one(*args, **kwargs)
async def find_many_async(self, *args, **kwargs):
return self.find_many(*args, **kwargs)
async def find_sql_async(self, *args, **kwargs):
return self.find_sql(*args, **kwargs)
async def update_async(self, *args, **kwargs):
return self.update(*args, **kwargs)
async def remove_async(self, *args, **kwargs):
return self.remove(*args, **kwargs)
async def save_async(self, *args, **kwargs):
return self.save(*args, **kwargs)
async def create_async(self, pojo, **kwargs):
return self.create(pojo, **kwargs)
async def execute_sql_async(self, sql, params=None, mode=EX_MODEL.SELECT, **kwargs):
return self.execute_sql(sql=sql, params=params, mode=mode, **kwargs)
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/proxy/SqlOperaProxy.py
|
SqlOperaProxy.py
|
from abc import ABC
from xml.dom.minidom import Element
from aestate.exception import TagHandlerError
from aestate.util.Log import ALog
class NodeHandler(ABC):
"""节点事件抽象"""
# 操作
def handleNode(self, target_obj): ...
class IfHandler(NodeHandler):
"""if标签事件"""
def __init__(self, initial_field, field, params, value, symbol):
self.initial_field = initial_field
self.field = field
self.params = params
self.value = value
self.symbol = symbol
def parse_node(self):
pass
def handleNode(self, target_obj):
if self.initial_field != self.field:
# 转换成同类型
if self.params[self.field] is not None:
value = type(self.params[self.field])(self.value)
else:
value = None
if self.symbol == '>=':
success = self.params[self.field] >= value
elif self.symbol == '<=':
success = self.params[self.field] <= value
elif self.symbol == '==':
success = self.params[self.field] == value
elif self.symbol == '!=':
success = self.params[self.field] != value
elif self.symbol == '>':
success = self.params[self.field] > value
elif self.symbol == '<':
success = self.params[self.field] < value
else:
ALog.log_error(
msg=f'The node rule parsing failed and did not conform to the grammatical structure.{self.symbol}',
obj=TagHandlerError, LogObject=target_obj.log_obj, raise_exception=True)
success = False
else:
# UPDATE 1.0.6a2 如果匹配不到key就设置为None
if self.symbol != '!=':
ALog.log_error(
msg=f'`None` cannot judge `value`.wrong compiled field:`{self.field}{self.symbol}{self.value}`',
obj=TagHandlerError, LogObject=target_obj.log_obj, raise_exception=True)
success = False
else:
#
success = self.value == 'None' \
or self.value == 'false' \
or self.value == 'False' \
or self.value == '' \
or self.value == 0
# if self.symbol == '>=':
# success = self.field >= self.value
# elif self.symbol == '<=':
# success = self.field <= self.value
# elif self.symbol == '==':
# success = self.field == self.value
# elif self.symbol == '!=':
# success = self.field != self.value
# elif self.symbol == '>':
# success = self.field > self.value
# elif self.symbol == '<':
# success = self.field < self.value
# else:
# ALog.log_error(
# msg=f'The node rule parsing failed and did not conform to the grammatical structure.{self.symbol}',
# obj=TagHandlerError, LogObject=target_obj.log_obj, raise_exception=True)
# success = False
return success
@staticmethod
def checking_mark(node: Element):
if node.nextSibling:
if node.nextSibling.nodeName == '#text':
return IfHandler.checking_mark(node.nextSibling)
else:
if node.nextSibling.nodeName == 'else':
return True
else:
return False
return False
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/xmlhandler/XMLScriptBuilder.py
|
XMLScriptBuilder.py
|
import importlib
import re
from abc import ABC
from aestate.exception import NotFindTemplateError, TagAttributeError, TagHandlerError, XmlParseError
from aestate.i18n import ExceptionI18n
from aestate.util import others
from aestate.util.Log import ALog
from aestate.work.Serialize import QuerySet
from aestate.work.repository import Repository
from aestate.work.xmlhandler.XMLScriptBuilder import IfHandler
from aestate.work.xmlhandler.base import AestateNode
class AbstractNode(ABC):
"""抽象节点,所有节点的父类"""
# 节点自身的操作
def __init__(self, target_obj, params, aestate_xml_cls, root, value, XML_KEY, XML_IGNORE_NODES):
self.target_obj = target_obj
self.params = params
self.aestate_xml_cls = aestate_xml_cls
self.root = root
self.node = value
self.XML_KEY = XML_KEY
self.XML_IGNORE_NODES = XML_IGNORE_NODES
# 得到节点的值
def apply(self, *args, **kwargs):
...
def parseNode(self, texts: AestateNode, node):
for root_index, root_value in enumerate(node.childNodes):
if root_value.nodeName in self.XML_KEY.keys():
obj = self.XML_KEY[root_value.nodeName](self.target_obj, self.params, self.aestate_xml_cls, self.root,
root_value, self.XML_KEY, self.XML_IGNORE_NODES)
texts = obj.apply(texts=texts)
elif root_value.nodeName in self.XML_IGNORE_NODES:
try:
texts.add(node=root_value, index=root_index)
except Exception as e:
ALog.log_error(
msg=''.join(e.args),
obj=e, LogObject=self.target_obj.log_obj, raise_exception=True)
try:
texts.extend(AestateNode(self.root, root_value))
except Exception as e:
ALog.log_error(
msg=''.join(e.args),
obj=e, LogObject=self.target_obj.log_obj, raise_exception=True)
return texts
class SelectNode(AbstractNode):
def apply(self, *args, **kwargs):
# 取得已有的文本
texts = kwargs['texts']
axc_node = self.aestate_xml_cls(self.root, self.node, self.params)
# 返回值类型
resultType = axc_node.attrs['resultType']
return self.parseNode(texts, self.node)
class UpdateNode(AbstractNode):
class TempTextNode:
def __init__(self, text):
self.text = text
def apply(self, *args, **kwargs):
# 取得已有的文本
texts = kwargs['texts']
axc_node = self.aestate_xml_cls(self.root, self.node, self.params)
# 返回值类型
has_last_id = axc_node.attrs['last'] if 'last' in axc_node.attrs.keys() else self.TempTextNode('True')
texts.expand_data['has_last_id'] = has_last_id.text == 'True'
return self.parseNode(texts, self.node)
class IfNode(AbstractNode):
def conditional_test(self, text, syntax_re_text):
# 移除空集
syntax_using = [x for x in syntax_re_text[0] if x != '']
if len(syntax_using) == 2 or len(syntax_using) > 3:
ALog.log_error(
msg=ExceptionI18n.tt('xml_syntax_error') % text,
obj=TagHandlerError, LogObject=self.target_obj.log_obj, raise_exception=True)
# 左边的匹配字段名,这就意味着变量必须写在左边
initial_field = syntax_using[0]
symbol = syntax_using[1]
value = syntax_using[2]
rfield = re.findall('#{(.*?)}', initial_field)
field = rfield[0] if len(rfield) > 0 and rfield[0] in self.params.keys() else initial_field
# 让事件器来执行
ifhandler = IfHandler(initial_field=initial_field, field=field, params=self.params, value=value,
symbol=symbol)
return ifhandler.handleNode(self.target_obj)
def signal_conditional_test(self, text, field):
"""单个判断值对否"""
rfield = re.findall('#{(.*?)}', field)
if len(rfield) == 1 and rfield[0] not in self.params.keys():
# 这里理应等于false,但是由于存在不等号,所以当没有时他应该为!false,也就是true
return True
return not bool(self.params[rfield[0]])
def apply(self, *args, **kwargs):
texts = kwargs['texts']
axc_node = self.aestate_xml_cls(self.root, self.node, self.params)
if 'test' not in axc_node.attrs.keys():
ALog.log_error(
msg=ExceptionI18n.tt('if_tag_not_test'),
obj=TagAttributeError, LogObject=self.target_obj.log_obj, raise_exception=True)
return
test_syntax = axc_node.attrs['test']
# UPDATE: 1.0.6a2 增加!=
tests = re.split('and|or|&&|\|\|', test_syntax.text)
conditions = re.findall('(and|or|&&|\|\|)', test_syntax.text)
if len(conditions) != len(tests) - 1:
ALog.log_error(
msg=ExceptionI18n.tt('xml_syntax_error') % test_syntax.text,
obj=XmlParseError, LogObject=self.target_obj.log_obj, raise_exception=True)
# UPDATE: 1.0.6a2 增加可以识别多个条件
# 是否可以继续判断
if_next = None
for t in tests:
success = False
# 去除首尾空格寻找匹配的语法
text = t.strip()
# 一种是 字段-符号-值
syntax_re_text = re.findall('(#\{.*?\})([>=|<=|==|<|>|!=]+)(.*)', text)
# 一种是 符号-空格(可有可无)-字段
signal_syntax_re_text = re.findall('!\s*(#\{.*?\})', text)
if len(syntax_re_text) != 0:
success = self.conditional_test(text, syntax_re_text)
elif len(signal_syntax_re_text) == 1:
success = self.signal_conditional_test(text[0], signal_syntax_re_text[0])
else:
# 缺少必要的test标签语法
ALog.log_error(
msg=ExceptionI18n.tt('xml_syntax_error') % test_syntax.text,
obj=TagAttributeError, LogObject=self.target_obj.log_obj, raise_exception=True)
if len(conditions) > 0:
_and = re.search('and|&&', conditions[0])
_or = re.search('or|\|\|', conditions[0])
if if_next is None:
if_next = success
continue
if _and:
if_next = if_next and success
elif _or:
if_next = if_next or success
else:
ALog.log_error(
msg=ExceptionI18n.tt('before_else_not_if'),
obj=TagHandlerError, LogObject=self.target_obj.log_obj, raise_exception=True)
# 分割应该放在末尾,因为条件需要比判断的符号多一个索引
conditions = conditions[1:]
else:
if_next = success
# 如果已经是false
if not if_next:
break
if if_next:
texts = self.parseNode(texts, node=self.node)
if IfHandler.checking_mark(self.node):
# 设置为反的
texts.expand_data['if_next'] = not if_next
return texts
class ElseNode(AbstractNode):
def apply(self, *args, **kwargs):
texts = kwargs['texts']
if 'if_next' not in texts.expand_data.keys():
ALog.log_error(
msg=ExceptionI18n.tt('before_else_not_if'),
obj=TagHandlerError, LogObject=self.target_obj.log_obj, raise_exception=True)
else:
if_next = texts.expand_data['if_next']
if not if_next:
texts.expand_data.pop('if_next')
return texts
else:
texts.expand_data.pop('if_next')
return self.parseNode(texts, self.node)
class SwitchNode(AbstractNode):
def apply(self, *args, **kwargs):
texts = kwargs['texts']
axc_node = self.aestate_xml_cls(self.root, self.node, self.params)
field_name = axc_node.attrs['field'].text
try:
value = self.params[field_name]
except KeyError as ke:
ALog.log_error(
msg=ExceptionI18n.tt('not_field_name') % field_name,
obj=TagHandlerError, LogObject=self.target_obj.log_obj, raise_exception=True)
case_nodes = axc_node.children['case']
check_node = None
for cn in case_nodes:
if cn.attrs['value'].text == value:
check_node = cn.node
break
if check_node is None:
check_node = axc_node.children['default'][0].node
texts = self.parseNode(texts, check_node)
return texts
class IncludeNode(AbstractNode):
def apply(self, *args, **kwargs):
texts = kwargs['texts']
axc_node = self.aestate_xml_cls(self.root, self.node, self.params)
from_node_name = axc_node.attrs['from'].text
sql_nodes = self.target_obj.xNode.children['sql']
target_template = None
for t in sql_nodes:
if t.attrs['id'].text == from_node_name:
target_template = t
break
if target_template is None:
ALog.log_error(
msg=ExceptionI18n.tt('not_from_node_name') % from_node_name,
obj=NotFindTemplateError, LogObject=self.target_obj.log_obj, raise_exception=True)
texts = self.parseNode(texts, target_template.node)
return texts
class ResultABC(ABC):
@staticmethod
def get_type(type_str: str):
t = type_str.split('.')
cls_name = t[len(t) - 1]
package_name = '.'.join(t[:len(t) - 1])
try:
package = importlib.import_module(package_name)
_type = getattr(package, cls_name)
return _type
except Exception:
ALog.log_error(
msg=ExceptionI18n.tt('module_not_found') % package_name,
obj=NotFindTemplateError, raise_exception=True)
@staticmethod
def generate(data: list, structure: dict):
ret = []
if not isinstance(data, list) and data is not None:
data = [data]
if data is not None:
for _data_item in data:
cls = ResultABC.get_type(structure['_type'])
if cls is None:
ALog.log_error(
msg=ExceptionI18n.tt('not_defined') % structure['_type'],
obj=NotFindTemplateError, raise_exception=True)
# 判断是否是pojo,不同的类使用不同的方法获取字段
is_pojo = others.dp_equals_base(cls, Repository)
obj = cls(abst=True) if is_pojo else cls()
# obj = cls() if others.dp_equals_base(cls, object) else cls
for field, properties in structure.items():
if field != '_type' and field != '_single':
if isinstance(properties, dict):
if is_pojo:
obj.add_field(field, ResultABC.generate(_data_item, properties))
else:
setattr(obj, field, ResultABC.generate(_data_item, properties))
# obj.__append_field__
# setattr(obj, field, ResultABC.generate(_data_item, properties))
else:
# 布尔值替换
if is_pojo:
obj_fields = obj.getFields()
t = str(type(obj_fields[properties] if properties in obj_fields.keys() else object))
fields_type = re.findall("\'.*\.(.*)\'", t)
if 'boolField' in fields_type:
_data_item[field] = bool(_data_item[field])
obj.add_field(properties, _data_item[field])
else:
setattr(obj, properties, _data_item[field])
# setattr(obj, properties, _data_item[field])
if '_single' in structure.keys() and structure['_single'] is True:
ret = obj
else:
ret.append(obj)
else:
if '_single' in structure.keys() and structure['_single'] is True:
ret = None
else:
ret.append(None)
if isinstance(ret, list):
return QuerySet(query_items=ret)
else:
return ret
class ResultMapNode(object):
def __init__(self, target_obj, node, data):
self.target_obj = target_obj
self.node = node
self.data = data
def apply(self, resultMaps):
resultMapTags = self.target_obj.xNode.children['resultMap']
if 'resultMap' in self.node.expand_data.keys():
resultType = self.node.expand_data['resultMap']
resultNode = None
for i in resultMapTags:
if i.attrs['id'].text == resultType:
# 这里不需要break,因为可以重复,取最后一位
resultNode = i
if resultNode is None:
ALog.log_error(
msg=ExceptionI18n.tt('not_result_map') % resultType,
obj=NotFindTemplateError, LogObject=self.target_obj.log_obj, raise_exception=True)
structure = ForeignNode.apply(resultNode, resultMaps)
return ResultABC.generate(self.data, structure)
elif 'resultType' in self.node.expand_data.keys():
resultType = self.node.expand_data['resultType']
# 由于使用resultType没有映射字段,所以使用原有的结构
obj = ResultABC.get_type(resultType)
# 获取类的字段
fields = others.get_static_fields(obj)
structure = {
'_type': resultType,
**{f: f for f in fields}
}
return ResultABC.generate(self.data, structure)
else:
ALog.log_error(
msg=ExceptionI18n.tt('lack_result_type') % self.node.expand_data['id'],
obj=NotFindTemplateError, LogObject=self.target_obj.log_obj, raise_exception=True)
class ForeignNode:
@staticmethod
def apply(resultNode, resultMaps):
"""
:params resultNode: resultMap节点
"""
# 判断是否有引用ref
if 'ref' in resultNode.attrs:
result_map_match = None
for item in resultMaps:
if item.attrs['id'].text == resultNode.attrs['ref'].text:
result_map_match = item
break
if result_map_match is None:
ALog.log_error(
msg=ExceptionI18n.tt('not_result_map') % resultNode.attrs['ref'].text,
obj=TagAttributeError, raise_exception=True)
else:
resultNode = result_map_match
# 判断结构是否有type返回值类型
if 'type' not in resultNode.attrs.keys():
ALog.log_error(
msg=ExceptionI18n.tt('result_map_not_type'),
obj=TagAttributeError, raise_exception=True)
structure = {'_type': resultNode.attrs['type'].text}
if 'single' in resultNode.attrs.keys():
structure['_single'] = True if resultNode.attrs['single'].text == 'true' else False
if 'result' in resultNode.children.keys():
for i in resultNode.children['result']:
structure[i.attrs['column'].text] = i.attrs['properties'].text
return structure
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/xmlhandler/nodes.py
|
nodes.py
|
import re
import sys
from aestate.exception import BaseSqlError
from aestate.i18n import InfoI18n
from aestate.util.Log import ALog
from dbutils.pooled_db import PooledDB
def parse_kwa(db, **kwargs):
"""
解析并执行sql
:param db:db_util对象
:param kwargs:包含所有参数:
last_id:是否需要返回最后一行数据,默认False
sql:处理过并加上%s的sql语句
params:需要填充的字段
print_sql:是否打印sql语句
many:是否有多个
"""
try:
cursor = db.cursor()
# 是否执行多条sql
sql = kwargs['sql']
params = kwargs['params'] if 'params' in kwargs.keys() else None
many_flay = 'many' in kwargs.keys() and kwargs['many']
if ('print_sql' in kwargs.keys() and kwargs['print_sql'] is True) or (kwargs['config_obj'].print_sql is True):
_l = sys._getframe().f_back.f_lineno
# 输出sql
msg = InfoI18n.tt("statement") + ' ==> ' + (f'{sql} - many=True' if many_flay else sql)
ALog.log(obj=db, line=_l, task_name='ASQL', msg=msg,
LogObject=kwargs['log_obj'] if 'log_obj' in kwargs.keys() else None)
# 输出字段
output_params = params if params else ()
parameters = InfoI18n.tt("parameters") + " ==> "
for i in output_params:
parameters += ' {}{},'.format(str(i), re.findall('<class \'(.*)\'>', str(type(i))))
if parameters[-1] == ',':
parameters = parameters[:-1]
ALog.log(obj=db, line=_l, task_name='ASQL', msg=parameters,
LogObject=kwargs['log_obj'] if 'log_obj' in kwargs.keys() else None)
if many_flay:
cursor.executemany(sql,
tuple(params) if params else ())
else:
if params:
cursor.execute(sql, tuple(params))
else:
cursor.execute(sql)
# try:
# CACodeLog.log(obj=db, line=_l, task_name='Print Sql', msg=cursor._executed)
# except:
# CACodeLog.log(obj=db, line=_l, task_name='Print Sql', msg=msg)
return cursor
except Exception as e:
db.rollback()
mysql_err = BaseSqlError(e)
mysql_err.raise_exception()
class Db_opera(PooledDB):
def __init__(self, *args, **kwargs):
if 'POOL' not in kwargs or kwargs['POOL'] is None:
self.POOL = self
if 'POOL' in kwargs.keys():
kwargs.pop('POOL')
super(Db_opera, self).__init__(*args, **kwargs)
def get_conn(self):
"""
获取数据库连接池
:return:
"""
conn = self.POOL.connection()
return conn
def select(self, **kwargs):
"""
查找多个
:param kwargs:包含所有参数:
last_id:是否需要返回最后一行数据,默认False
sql:处理过并加上%s的sql语句
params:需要填充的字段
print_sql:是否打印sql语句
:return:
"""
db = self.get_conn()
_l = sys._getframe().f_back.f_lineno
try:
cursor = parse_kwa(db=db, **kwargs)
# 列名
col = cursor.description
data = cursor.fetchall()
_result = []
for data_index, data_value in enumerate(data):
_messy = {}
for item_index, item_value in enumerate(data_value):
_messy[col[item_index][0]] = item_value
_result.append(_messy)
# 缓存
# if scm.status == CacheStatus.OPEN:
# scm.set(sql=sql, value=_result, instance=kwargs['instance'] if 'instance' in kwargs.keys() else None)
msg = InfoI18n.tt("selectResult") + ' ==> ' + (str(len(_result)) if _result is not None else '0')
ALog.log(obj=db, line=_l, task_name='ASQL', msg=msg,
LogObject=kwargs['log_obj'] if 'log_obj' in kwargs.keys() else None)
db.close()
return _result
except Exception as e:
db.rollback()
ALog.log_error(msg=str(e), obj=e,
LogObject=kwargs['log_obj'] if 'log_obj' in kwargs.keys() else None,
raise_exception=True)
finally:
db.close()
def insert(self, many=False, **kwargs):
"""
执行插入语句
:param kwargs:包含所有参数:
last_id:是否需要返回最后一行数据,默认False
sql:处理过并加上%s的sql语句
params:需要填充的字段
:param many:是否为多行执行
"""
db = self.get_conn()
_l = sys._getframe().f_back.f_lineno
try:
cursor = parse_kwa(db=db, many=many, **kwargs)
db.commit()
# 受影响行数
rowcount = cursor.rowcount
msg = InfoI18n.tt("updateResult") + ' ==> ' + str(rowcount)
ALog.log(obj=db, line=_l, task_name='ASQL', msg=msg,
LogObject=kwargs['log_obj'] if 'log_obj' in kwargs.keys() else None)
# 返回受影响行数
if kwargs['last_id']:
return rowcount, cursor.lastrowid
else:
return rowcount
except Exception as e:
db.rollback()
ALog.log_error(msg=str(e), obj=e,
LogObject=kwargs['log_obj'] if 'log_obj' in kwargs.keys() else None,
raise_exception=True)
finally:
db.close()
def update(self, **kwargs):
"""
执行更新语句
:param kwargs:包含所有参数:
last_id:是否需要返回最后一行数据,默认False
sql:处理过并加上%s的sql语句
params:需要填充的字段
"""
return self.insert(**kwargs)
def delete(self, **kwargs):
"""
执行删除语句
:param kwargs:包含所有参数:
last_id:是否需要返回最后一行数据,默认False
sql:处理过并加上%s的sql语句
params:需要填充的字段
"""
self.insert(**kwargs)
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/sql/ExecuteSql.py
|
ExecuteSql.py
|
from concurrent.futures import ThreadPoolExecutor
pool = ThreadPoolExecutor()
class DbOperation(object):
"""
迁移重要操作到此类
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def start(self, *args, **kwargs):
"""
开始任务
Attributes:
func:调用指定的方法
"""
# 执行的函数体
func = kwargs['func']
# 线程独立
_lock = kwargs['t_local']
name = kwargs['__task_uuid__']
# # 设置任务
# _kw = aj.load(aj.parse(_lock))
_kw = _lock.__dict__
kwargs.update(_kw)
_t = pool.submit(lambda x, y: func(*x, **y), args, kwargs)
# _t = threading.Thread(target=func, args=args, kwargs=kwargs, name=name)
# if not _lock.close_log:
# ALog.log(obj=_t, msg='RUNNING', task_name=name, LogObject=log_obj)
result = _t.result()
# 返回结果
return result[name]
def __find_all__(self, *args, **kwargs):
"""
任务方法
"""
return self.__find_by_field__(*args, **kwargs)
def __find_by_field__(self, *args, **kwargs):
"""
任务方法
"""
fields = kwargs['config_obj'].parse_key(*args, is_field=True, left=kwargs['sqlFields'].left_subscript,
right=kwargs['sqlFields'].right_subscript)
sql_str = kwargs['sqlFields'].find_str + fields + kwargs['sqlFields'].from_str + kwargs['__table_name__']
kwargs['sql'] = sql_str
return self.__find_many__(**kwargs)
def __find_many__(self, *args, **kwargs):
"""
任务方法
"""
# kwargs['conf_obj'] = config_obj
# kwargs = kwargs['ParseUtil'].find_print_sql(**kwargs)
return self.__find_sql__(**kwargs)
def __find_sql__(self, *args, **kwargs):
"""
任务方法
"""
kwargs = kwargs['ParseUtil'].find_print_sql(**kwargs)
_rs = kwargs['db_util'].select(**kwargs)
result = []
if _rs:
for i in _rs:
obj = kwargs['ParseUtil'].parse_obj(i, kwargs['instance'])
result.append(obj)
return {
kwargs['__task_uuid__']: result
}
def __insert__(self, *args, **kwargs):
"""
:param pojo: pojo对象
任务方法
"""
kwargs = kwargs['ParseUtil'].find_print_sql(**kwargs)
kwargs = kwargs['ParseUtil'].find_last_id(**kwargs)
kwargs['ParseUtil'].fieldExist(kwargs, 'pojo', raise_exception=True)
if 'many' in kwargs and kwargs['many']:
# 多行插入 这个先取出sql语句,params无作用
for item in kwargs['pojo']:
filed_list = kwargs['config_obj'].parse_insert_pojo(item,
__table_name__=kwargs['__table_name__'],
insert_str=kwargs['sqlFields'].insert_str,
values_str=kwargs['sqlFields'].values_str)
if 'params' not in kwargs.keys() or not isinstance(kwargs['params'], list):
kwargs['params'] = []
kwargs['sql'] = filed_list['sql']
kwargs['params'].append(filed_list['params'])
else:
filed_list = kwargs['config_obj'].parse_insert_pojo(kwargs['pojo'], __table_name__=kwargs['__table_name__'],
insert_str=kwargs['sqlFields'].insert_str,
values_str=kwargs['sqlFields'].values_str)
kwargs.update(filed_list)
return {
kwargs['__task_uuid__']: kwargs['db_util'].insert(**kwargs)
}
def __update__(self, *args, **kwargs):
kwargs = kwargs['ParseUtil'].find_print_sql(**kwargs)
kwargs = kwargs['ParseUtil'].find_last_id(**kwargs)
kwargs['sql'], kwargs['params'] = kwargs['config_obj'].parse_update(kwargs['pojo'], kwargs['key'])
return {
kwargs['__task_uuid__']: kwargs['db_util'].update(**kwargs)
}
def __remove__(self, *args, **kwargs):
kwargs = kwargs['ParseUtil'].find_print_sql(**kwargs)
kwargs = kwargs['ParseUtil'].find_last_id(**kwargs)
kwargs['sql'], kwargs['params'] = kwargs['config_obj'].parse_remove(kwargs['pojo'], kwargs['key'])
return {
kwargs['__task_uuid__']: kwargs['db_util'].update(**kwargs)
}
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/sql/ProxyOpera.py
|
ProxyOpera.py
|
__version__ = '1.1.0'
__description__ = "Aestate framework for Python,You can see:https://gitee.com/aecode/aestate"
__author__ = "CACode"
__author_email__ = "[email protected]"
__url__ = "https://gitee.com/aecode/aestate"
__issues__ = 'https://gitee.com/aecode/aestate/issues'
__license__ = 'Apache License 2.0'
__project_name__ = 'Aestate'
__logo__ = """
:: Aestate Framework :: (version:%s)
+ __ _ _ __ +
+ / / /\ | | | | \ \ +
+ / / / \ ___ ___| |_ __ _| |_ ___ \ \ +
+ | | / /\ \ / _ \/ __| __/ _` | __/ _ \ | | +
+ \ \ / ____ \ __/\__ \ || (_| | || __/ / / +
========\_\=/_/====\_\___||___/\__\__,_|\__\___|=/_/========
""" % __version__
__log_logo__ = """
:: Aestate Framework :: (version:%s)
__ ____ ___ ____ __ ____ ____
/__\ ( ___) / __) (_ _) /__\ (_ _) ( ___)
/(__)\ )__) \__ \ )( /(__)\ )( )__)
(__)(__) (____) (___/ (__) (__)(__) (__) (____)
""" % __version__
import importlib
import os.path
try:
from prettytable import PrettyTable
except ModuleNotFoundError as e:
print("请先安装 [prettytable] 再执行 [-h] 命令,使用 [pip install prettytable]")
class Commands:
def __init__(self, *args):
"""
下面的@staticmethod主要是为了不想看见黄线警告,并没有其他意思
"""
self.args = args
self.c = {
"": (
self.start,
'显示aestate的logo和版本号,用于检查aestate是否安装成功',
"aestate"
),
"-v": (
self.version,
"显示aestate的版本号",
"aestate -v"
),
# "-create": (
# self.create,
# "将文件内存在pojo对象的类生成到数据库中称为数据库的表"
# "数据库格式化类型参考默认的 [mysql] 格式",
# 'aestate -create [文件名] [数据库类型 (可选)]'
# ),
# "-m": (
# self.make,
# "将数据库中的表同步生成到当前目录下的 [model.py],并默认命名为 [数据库命_表名]",
# 'aestate -m [--n [生成的文件名 (可选) ]] [--nn [生成的类名 (可选)]]'
# ),
# "-enc": (
# self.enc,
# "加密模型",
# 'aestate -enc [密码]'
# ),
# "-dec": (
# self.dec,
# "解密模型",
# 'aestate -dec [被加密后的文件] [密码]'
# ),
# "-check": (
# self.check,
# "检查模型与数据库中的表结构是否一直",
# 'aestate -check [文件名] [数据库名]'
# ),
# "-h": (
# self.help,
# "帮助文档",
# 'aestate -h'
# ),
# "-g": (
# self.generate,
# "在当前目录下生成一个",
# 'aestate -gc 项目名'
# )
}
def generate(self):
code = '''
from aestate.work.command import Generate
from
models:List[Pojo] = []
if __name__ == '__main__':
Generate().start(models)
'''
if not os.path.exists('start.py') or not os.path.isfile('start.py'):
with open('start.py', 'w') as f:
f.write(code)
def start(self):
print(__logo__)
def create(self):
print(__logo__)
try:
file = self.args[2]
db_name = self.args[3]
except IndexError:
raise IndexError("为了保证数据库的sql执行顺利,请填写pojo存在的文件名和数据库名称")
import inspect
temp_module = importlib.import_module(file)
temp_classes = inspect.getmembers(temp_module, inspect.isclass)
for name, class_ in temp_classes:
c = class_()
c.orm.create()
def enc(self):
pass
def dec(self):
pass
def version(self):
print(__version__)
def make(self):
pass
def check(self):
print(__logo__)
try:
file = self.args[2]
db_name = self.args[3]
except IndexError:
raise IndexError("为了保证数据库的sql执行顺利,请填写pojo存在的文件名和数据库名称")
import inspect
temp_module = importlib.import_module(file)
temp_classes = inspect.getmembers(temp_module, inspect.isclass)
for name, class_ in temp_classes:
c = class_()
c.orm.check()
def help(self):
table = PrettyTable(["命令", "使用方法", "描述"])
table.border = True
table.junction_char = '-'
[table.add_row([k, v[2], v[1]]) for k, v in self.c.items()]
print(table)
class Generate:
def start(self):
pass
if __name__ == '__main__':
Generate().start()
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/commands/__init__.py
|
__init__.py
|
import datetime
import os
import sys
import threading
import traceback
from aestate.util.others import write, logTupleToText
from aestate.work.Cache import LogCache
from aestate.work.Modes import Singleton
from aestate.exception import LogStatus
from aestate.util import others
from aestate.work.commands import __log_logo__
class FieldsLength:
DATETIME_FORMAT = 27
INFO_FORMAT = 5
LINE_FORMAT = 5
OPERATION_FORMAT = 14
# HEX_FORMAT = 17
TASK_FORMAT = 7
# CLASS_FORMAT = 70
MSG_FORMAT = 0
class ConsoleColor:
"""
控制台类型
"""
class FontColor:
# 黑色
BLACK = 30
# 灰色
GRAY = 90
# 粉色
PINK = 31
# 红色
RED = 35
# 绿色
GREEN = 32
# 浅绿色
LIGHT_GREEN = 91
# 黄色
YELLOW = 33
# 浅黄色
LIGHT_YELLOW = 92
# 深黄色
DARK_YELLOW = 93
# 紫色
PURPLE = 34
# 浅紫色
LIGHT_PURPLE = 96
# 青蓝色
CYAN = 36
# 白色
WHITE = 37
# 成功的颜色 和 info的颜色
SUCCESS_COLOR = GREEN
# 失败的颜色 和 错误的颜色
ERROR_COLOR = RED
# 警告的颜色
WARNING_COLOR = YELLOW
class BackgroundColor:
# 黑色
BLACK = 40
# 红色
RED = 41
# 绿色
GREEN = 42
# 黄色
YELLOW = 43
# 蓝色
BLUE = 44
# 紫红色
FUCHSIA = 45
# 青蓝色
CYAN = 46
# 白色
WHITE = 47
class ShowType:
# 默认
DEFAULT = 0
# 高亮
HIGHLIGHT = 1
# 下划线
UNDERSCORE = 4
# 闪烁
FLASHING = 5
# 反显
REVERSE = 7
# 不可见
INVISIBLE = 8
class ConsoleWrite:
def __init__(self):
self.fontColor = ConsoleColor.FontColor.GREEN
self.showType = ConsoleColor.ShowType.DEFAULT
self.backColor = None
# @staticmethod
# def write(messages, consoleWriteObj=None):
# prefix = "{};".format(consoleWriteObj.showType) if consoleWriteObj.showType is not None else ""
# center = ";".format(consoleWriteObj.backColor) if consoleWriteObj.backColor is not None else ""
# suffix = "{}m{}".format(consoleWriteObj.fontColor, messages)
# out = "\033[{}{}{}\033[0m".format(prefix, center, suffix)
# print(out)
@staticmethod
def format_color(text, color=None):
if color is not None:
prefix = "{};".format(ConsoleColor.ShowType.DEFAULT)
suffix = "{}m{}".format(color, text)
out = "\033[{};{}\033[0m".format(prefix, suffix)
return out
return text
class ALog(object):
_instance_lock = threading.RLock()
def __init__(self, path, print_flag=False, save_flag=False, max_clear=10):
"""
初始化配置
:param path:保存的路径
:param print_flag:是否打印日志 默认False
:param save_flag:是否保存日志 默认False
:param max_clear:日志储存最大限制,默认10MB 单位:MB
"""
self.max_clear = max_clear * 1024 * 1000
self.path = path
self.print_flag = print_flag
self.save_flag = save_flag
@staticmethod
def pure_log(msg, **kwargs):
"""
输出任务执行日志
:param msg:消息
"""
ALog.log(msg=msg, **kwargs)
@staticmethod
def format_text(field: LogStatus, line, obj, task_name, msg, ned_text=False,
text_color: ConsoleColor.FontColor = None):
"""
将字符串格式化成好看的颜色
"""
try:
if obj is not None:
write_repr = others.fullname(obj)
else:
write_repr = 'OBJECT IS NULL'
except TypeError:
write_repr = 'OBJECT CAN`T NOT PARSE'
# 时间
t = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')
# 只输出文本
pure_text = ' '.join([str(t), str(field.value), str(line), str(hex(id(obj))),
'[{}]'.format(task_name), str(write_repr), f" : {msg}"])
t = ConsoleWrite.format_color(f"{t}".ljust(FieldsLength.DATETIME_FORMAT), ConsoleColor.FontColor.CYAN)
# 执行类型
_field = ConsoleWrite.format_color(f"{field.value}".rjust(FieldsLength.INFO_FORMAT),
ConsoleColor.FontColor.GREEN
if field == LogStatus.Info
else ConsoleColor.FontColor.RED
if field == LogStatus.Error
else ConsoleColor.FontColor.YELLOW
if field == LogStatus.Warn
else ConsoleColor.FontColor.YELLOW)
# 所在行数
line = f"{line}".rjust(FieldsLength.LINE_FORMAT)
# 执行的类ID
hex_id = ConsoleWrite.format_color(f" {str(hex(id(obj))).upper()}", ConsoleColor.FontColor.PINK)
# 执行类型名称
task_name = ConsoleWrite.format_color(f"{task_name}".rjust(FieldsLength.TASK_FORMAT),
ConsoleColor.FontColor.PURPLE)
# 类
write_repr = ConsoleWrite.format_color(write_repr,
ConsoleColor.FontColor.LIGHT_GREEN
if field != LogStatus.Error
else ConsoleColor.FontColor.RED)
# 格式化消息颜色
msg = ConsoleWrite.format_color(f" : {msg}", text_color)
# 组合消息
info = "{}{}{}{}{}{}{}".format(t, _field, line, hex_id, ' [{}] '.format(task_name), write_repr, msg)
if ned_text:
return info, pure_text
return info
@staticmethod
def log(msg, obj=None, line=sys._getframe().f_back.f_lineno, task_name='TEXT', LogObject=None,
field: LogStatus = LogStatus.Info, func=None,
text_color: ConsoleColor.FontColor = None, **kwargs):
"""
输出任务执行日志
:param msg:消息
:param obj:执行日志的对象地址
:param line:被调用前的行数
:param task_name:任务对象的值
:param LogObject:写出文件的对象
:param field:日志模式
:param func:日志执行后的自定义操作
:param text_color: 文本颜色
"""
try:
if obj is not None:
write_repr = others.fullname(obj)
else:
write_repr = 'OBJECT IS NULL'
except TypeError:
write_repr = 'OBJECT CAN`T NOT PARSE'
# write_repr = repr if repr and not repr_c else repr_c[0] if repr_c else type(obj)
# 格式:时间 类型 被调用时行数 对象地址 日志信息 执行类 信息
t = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
con_text = ' '.join([str(t), str(field.value), str(line), str(hex(id(obj))),
'[{}]'.format(task_name), str(write_repr), f" : {msg}"])
info = ALog.format_text(field, line, obj, task_name, msg, text_color=text_color)
# print(info)
def __log_obj_write__(_object):
if _object is not None:
if field == LogStatus.Info:
_object.info(con_text, pure_text=info, line=line, obj=obj)
elif field == LogStatus.Error:
_object.error(con_text, pure_text=info, line=line, obj=obj)
elif field == LogStatus.Warn:
_object.warn(con_text, pure_text=info, line=line, obj=obj)
else:
_object.info(con_text, pure_text=info, line=line, obj=obj)
else:
print(info)
if obj is not None:
if hasattr(obj, 'log_obj'):
__log_obj_write__(obj.log_obj)
else:
__log_obj_write__(LogObject)
else:
__log_obj_write__(LogObject)
if func is not None:
func(con_text)
return info
@staticmethod
def warning(**kwargs):
ALog.log(task_name='WARNING', field=LogStatus.Warn, **kwargs)
@staticmethod
def log_error(msg=None, obj=None, line=sys._getframe().f_back.f_lineno, task_name='ERROR',
LogObject=None, raise_exception=False):
"""
:param msg:描述
:param line:行
:param obj:执行的对象,当允许抛出异常时,则指明该对象为一个Exception或他的子类
:param task_name:线程唯一名称
:param LogObject:日志对象
:param raise_exception:是否抛出异常
"""
text = list(msg)
def get_stack():
text.append('\n')
exc_type, exc_value, exc_traceback_obj = sys.exc_info()
extracted_list = traceback.extract_tb(exc_traceback_obj)
for item in traceback.StackSummary.from_list(extracted_list).format():
text.append(item)
if raise_exception:
if isinstance(obj, type):
try:
raise obj(msg)
except obj:
get_stack()
text.append(f'{obj.__name__} :{msg}')
else:
get_stack()
text.append(f'{obj.__class__.__name__} :{msg}')
ALog.log(msg=''.join(text), obj=obj, line=line, task_name=task_name,
LogObject=LogObject if LogObject is not None else None, field=LogStatus.Error,
text_color=ConsoleColor.FontColor.RED)
def template(self, status: LogStatus, *content, **kwargs):
"""
日志的模板,error、info和warn都在这里执行
"""
# 从缓存中获取日志的对象
log_cache = LogCache()
# 从缓存中获取文件名,因为不能每次查询都生成一个文件,这样做是不合理的
_path = log_cache.get_filename(self.path, self.max_clear, status)
if status == LogStatus.Info:
logo_show = 'info_logo_show'
elif status == LogStatus.Warn:
logo_show = 'warn_logo_show'
elif status == LogStatus.Error:
logo_show = 'error_logo_show'
else:
logo_show = 'info_logo_show'
# 如果日志中没有打印过logo,就写入logo
ls = getattr(log_cache, logo_show)
if not ls:
setattr(log_cache, logo_show, True)
self.log_util(_path, __log_logo__)
self.log_util(_path, *content)
line = kwargs['line'] if 'line' in kwargs.keys() else sys._getframe().f_back.f_lineno
obj = kwargs['obj'] if 'obj' in kwargs.keys() else self
text = kwargs['pure_text'] \
if 'pure_text' in kwargs.keys() \
else ALog.format_text(status, line, obj, status.value, logTupleToText(False, *content))
print(text)
def info(self, *content, **kwargs):
"""
成功日志
:param content:内容
:return:
"""
self.template(LogStatus.Info, *content, **kwargs)
def warn(self, *content, **kwargs):
"""
警告日志
:param content:内容
:return:
"""
self.template(LogStatus.Warn, *content, **kwargs)
def error(self, *content, **kwargs):
"""
错误日志
:param content:内容
:return:
"""
self.template(LogStatus.Error, *content, **kwargs)
def log_util(self, path_str, *content):
"""
日志工具
:param path_str:
:param content:
:return:
"""
path = self.get_path(path_str)
if self.save_flag:
write(path, *content)
def get_path(self, end_path):
"""
日志类获取绝对路径
:param end_path:
:return:
"""
_STATIC_TXT = os.path.join('', self.path + end_path)
return _STATIC_TXT
def __new__(cls, *args, **kwargs):
instance = Singleton.createObject(cls)
return instance
class logging(object):
@classmethod
def gen(cls, _object) -> ALog:
"""
使用实例化后的对象获取log日志对象
"""
return _object.log_obj
@classmethod
def gen_cls(cls, _class) -> ALog:
"""
使用类获取log日志对象
"""
return _class().log_obj
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/util/Log.py
|
Log.py
|
import os
import time
from datetime import datetime
from aestate.conf import BASE_ATTR
try:
import matplotlib.pyplot as plt
except:
pass
def conversion_types(val):
"""
将val的类型转换为字符串并插入array
"""
if isinstance(val, datetime):
val = val.strftime('%Y-%m-%d %H:%M:%S')
return val
def date_format(time_obj=time, fmt='%Y-%m-%d %H:%M:%S') -> str:
"""
时间转字符串
:param time_obj:
:param fmt:
:return:
"""
_tm = time_obj.time()
_t = time.localtime(_tm)
return time.strftime(fmt, _t)
def time_to_datetime(t_time):
"""
时间戳转datetime
"""
try:
d_time = datetime.fromtimestamp(t_time)
except OSError as ose:
return None
return d_time
def get_static_fields(cls):
"""
获取类的非默认全局变量
"""
retD = list(set(dir(cls)).difference(set(BASE_ATTR)))
return retD
def fullname(o):
"""获取对象的类名"""
module = o.__class__.__module__
if module is None or module == str.__class__.__module__:
cls_name = o.__class__.__name__
else:
cls_name = module + '.' + o.__class__.__name__
if cls_name == 'type':
cls_name = o.__base__.__module__ + '.' + o.__base__.__name__
return cls_name
def logTupleToText(next_line=True, *content):
temp = []
if isinstance(content, str):
temp.append(content)
elif isinstance(content, tuple):
for c in content:
if isinstance(c, tuple):
temp.extend(c)
else:
temp.append(str(c))
else:
temp.append(str(content))
if next_line:
temp.append('\n')
return ''.join([str(_) for _ in temp])
def write(path, *content):
"""
写出文件
:param path:位置
:param content:内容
:return:
"""
# 防止有些使用`/`有些用`\\`
_sep_path = []
s = path.split('/')
[_sep_path.extend(item.split('\\')) for item in s]
_path = ''
for i in _sep_path:
_end = _sep_path[len(_sep_path) - 1]
if i != _end:
_path += str(i) + os.sep
else:
_path += str(i)
if not os.path.exists(_path):
if '.' not in i:
os.makedirs(_path)
_write_content = logTupleToText(True, *content)
with open(os.path.join(_path), mode="a", encoding="UTF-8") as f:
f.write(_write_content)
f.close()
def get_left_length(node):
if not node:
return 0
if not node.left:
return 1
if not node.right:
return 2 + get_left_length(node.right)
return 2 + get_left_length(node.left)
def get_right_length(node):
if not node:
return 0
return 1 + get_right_length(node.right)
def get_height(node):
if not node:
return 0
return 1 + max([get_height(node.left), get_height(node.right)])
def get_node_count(node):
if not node:
return 0
return 1 + get_node_count(node.left) + get_node_count(node.right)
def get_fontsize(count):
if count < 10:
return 30
if count < 20:
return 20
return 16
def show_node(node, ax, height, index, font_size):
if not node:
return
x1, y1 = None, None
if node.left:
x1, y1, index = show_node(node.left, ax, height - 1, index, font_size)
x = 100 * index - 50
y = 100 * height - 50
if x1:
plt.plot((x1, x), (y1, y), linewidth=2.0, color='b')
circle_color = "black" if node.is_black_node() else 'r'
text_color = "beige" if node.is_black_node() else 'black'
ax.add_artist(plt.Circle((x, y), 50, color=circle_color))
ax.add_artist(plt.Text(x, y, node.val, color=text_color, fontsize=font_size, horizontalalignment="center",
verticalalignment="center"))
# print(str(node.val), (height, index))
index += 1
if node.right:
x1, y1, index = show_node(node.right, ax, height - 1, index, font_size)
plt.plot((x1, x), (y1, y), linewidth=2.0, color='b')
return x, y, index
def draw_node_line(node, ax, height, index):
x1, y1 = None, None
if node.left:
x1, y1, index = draw_node_line(node.left, ax, height - 1, index)
x = 100 * index - 50
y = 100 * height - 50
if x1:
plt.plot((x1, x), (y1, y), linewidth=2.0, color='b')
index += 1
if node.right:
x1, y1, index = draw_node_line(node.right, ax, height - 1, index)
plt.plot((x1, x), (y1, y), linewidth=2.0, color='b')
return x, y, index
def show_rb_tree(tree, title):
fig, ax = plt.subplots()
left, right, height = get_left_length(tree), get_right_length(tree), get_height(tree)
# print(left, right, height)
plt.ylim(0, height * 100 + 100)
plt.xlim(0, 100 * get_node_count(tree) + 100)
show_node(tree, ax, height, 1)
plt.show()
def save_rb_tree(tree, index):
fig, ax = plt.subplots()
fig.set_facecolor('gray')
left, right, height = get_left_length(tree), get_right_length(tree), get_height(tree)
# print(left, right, height)
h = height * 100 + 100
w = 100 * get_node_count(tree) + 100
if w < 400:
w = 400
h = h * 400 / w
plt.text(w / 2 - 50, h - 40, index, size=30, family="fantasy", color="r",
style="italic", weight="light", bbox=dict(facecolor="r", alpha=0.2))
plt.ylim(0, h)
plt.xlim(0, w)
show_node(tree, ax, height, 1, get_fontsize(get_node_count(tree)))
fig.set_size_inches(10, h / (w / 10))
plt.savefig("rb/rbtree_{}.png".format(index))
def dp_equals_base(cls, base_cls):
"""
寻找是否属于cls的基类
:param cls:
:param base_cls:
:return:
"""
# if cls.__class__
if cls is None:
return False
if cls.__base__ != type:
if cls.__base__ == base_cls:
return True
else:
return dp_equals_base(cls.__base__, base_cls)
else:
return False
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/util/others.py
|
others.py
|
import datetime
import traceback
from aestate.ajson import aj
class baseTag(object):
def __init__(self,
name=None,
length=None,
d_point=None,
t_type='varchar',
is_null=False,
primary_key=False,
comment="",
auto_field=False,
auto_time=False,
update_auto_time=False,
default=None):
"""
:param name:字段名
:param length:长度
:param d_point:小数点
:param t_type:类型
:param is_null:允许为空
:param primary_key:键
:param comment:注释
:param auto_field:自增长键
:param auto_time:默认设置当前时间
:param update_auto_time:默认设置当前时间并根据当前时间更新
:param default:默认值
"""
# 是否为随着时间而更新
self.update_auto_time = update_auto_time
if update_auto_time:
self.default = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 是否自动设置为当前时间
self.auto_time = auto_time
if auto_time:
self.default = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 是否为自增
self.autoField = auto_field
# 注释
self.comment = comment
# 是否为主键
self.primary_key = primary_key
# 是否可以为空
self.is_null = is_null
# 小数点包含的位数
self.d_point = d_point
# 字段的名称
self.name = name
# 类型
self.t_type = t_type
# 最大长度
self.length = length
# 如果有设置自定义默认值则用自定义,如果有其他的条件触发默认值则设置,反之为空
self.default = default if default else self.default if hasattr(self, 'default') else None
# 如果使用的是被继承的子类,那么在这里就会有一个名为fields的字段
# 将所有自定义字段
if self.fields:
for key, value in self.fields.items():
setattr(self, key, value)
del self.fields
def get_field(self, name):
"""
获得字段
"""
return getattr(self, name)
def set_field(self, name, value):
"""
设置值
"""
setattr(self, name, value)
def get_table(self, bf):
"""
获取表数据结构
"""
if bf:
return aj.parse(self, bf)
return aj.load(aj.parse(self))
class Template(baseTag):
def __init__(self, cls=None, **kwargs):
self.fields = {}
if cls:
kwargs.update(cls.__dict__)
self.fields['cls'] = cls
kwargs.update(update_field(**kwargs))
self.fields.update(kwargs)
super(Template, self).__init__(**kwargs)
class tinyintField(Template):
def __init__(self, **kwargs):
# 是否为booField调用的
if self.__class__.__name__ == 'boolField' \
and 'default' in kwargs.keys() \
and isinstance(kwargs['default'], bool):
kwargs['default'] = int(kwargs['default'] if 'default' in kwargs.keys() else False)
super(tinyintField, self).__init__(t_type='tinyint', **kwargs)
class intField(Template):
def __init__(self, **kwargs):
super(intField, self).__init__(t_type='int', **kwargs)
class bigintField(Template):
def __init__(self, **kwargs):
super(bigintField, self).__init__(t_type='bigint', **kwargs)
class floatField(Template):
def __init__(self, **kwargs):
super(floatField, self).__init__(t_type='float', **kwargs)
class doubleField(Template):
def __init__(self, **kwargs):
super(doubleField, self).__init__(t_type='double', **kwargs)
class datetimeField(Template):
def __init__(self, **kwargs):
super(datetimeField, self).__init__(t_type='datetime', **kwargs)
class charField(Template):
def __init__(self, **kwargs):
super(charField, self).__init__(t_type='char', **kwargs)
class varcharField(Template):
def __init__(self, **kwargs):
super(varcharField, self).__init__(t_type='varchar', **kwargs)
class textField(Template):
def __init__(self, **kwargs):
super(textField, self).__init__(t_type='text', **kwargs)
class tinytextField(Template):
def __init__(self, **kwargs):
super(tinytextField, self).__init__(t_type='tinytext', **kwargs)
class longtextField(Template):
def __init__(self, **kwargs):
super(longtextField, self).__init__(t_type='longtext', **kwargs)
class boolField(tinyintField):
"""布尔值的字段,必须存在默认值"""
def __init__(self, **kwargs):
kwargs.setdefault('default', False)
super(boolField, self).__init__(**kwargs)
def update_field(**kwargs):
"""
更新字典配置
"""
def no_rep(key, value, **kwargs):
"""
不存在则替换
"""
if key not in kwargs.keys():
kwargs[key] = value
return kwargs
def has_attr(key, **kwargs):
if key in kwargs.keys():
return kwargs[key]
return None
# kwargs.update(no_rep('table_name', has_attr('__table_name__', **kwargs), **kwargs))
kwargs.update(no_rep('name', has_attr('name', **kwargs), **kwargs))
kwargs.update(no_rep('length', has_attr('length', **kwargs), **kwargs))
kwargs.update(no_rep('d_point', has_attr('d_point', **kwargs), **kwargs))
kwargs.update(no_rep('t_type', has_attr('t_type', **kwargs), **kwargs))
kwargs.update(no_rep('is_null', has_attr('is_null', **kwargs), **kwargs))
kwargs.update(no_rep('primary_key', has_attr('primary_key', **kwargs), **kwargs))
kwargs.update(no_rep('comment', has_attr('comment', **kwargs), **kwargs))
kwargs.update(no_rep('auto_field', has_attr('auto_field', **kwargs), **kwargs))
kwargs.update(no_rep('auto_time', has_attr('auto_time', **kwargs), **kwargs))
kwargs.update(no_rep('update_auto_time', has_attr('update_auto_time', **kwargs), **kwargs))
return kwargs
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/dbs/_mysql/tag.py
|
tag.py
|
import copy
from datetime import datetime
from typing import List, Tuple
from aestate.dbs import base
from aestate.exception import FieldNotExist
from aestate.dbs._mysql import tag
from aestate.util import others
from aestate.util.Log import ALog
import threading
from aestate.work.Modes import Singleton, EX_MODEL
class ParseUtil:
def parse_main(self, *args, to_str=False, symbol='%s', left='`', right='`', **kwargs):
"""
解析属性:
将属性格式设置为:['`a`,','`b`,','`c`']
:param to_str:是否转成str格式
:param args:参数
:param symbol:分隔符语法
:param left:分隔符语法
:param right:分隔符语法
:return:
"""
is_field = kwargs['is_field'] if 'is_field' in kwargs.keys() else False
fields = []
for value in args:
value = others.conversion_types(value)
if to_str:
if is_field:
fields.append(f'{left}{symbol}{right},' % (str(value)))
else:
fields.append(f'{symbol},' % (str(value)))
else:
fields.append(value if not ParseUtil.is_default(value) else None)
if len(fields) != 0:
fields[len(fields) - 1] = fields[len(fields) - 1].replace(',', '')
field_str = ''
# 转成字符串的话 "`field1`,`field2`,`field3`"
if to_str:
for field in fields:
field_str += field
return field_str
return fields
else:
return None
def parse_key(self, *args, **kwargs):
"""
如果 is_field=True `field` [field]
解析键格式,如:
INSERT INTO `demo` (这里的就是键) VALUES ('','','','');
:param args:
:return:
"""
if 'is_field' not in kwargs.keys():
kwargs['is_field'] = True
fields = self.parse_main(*args, to_str=True, **kwargs)
return fields
def parse_update(self, pojo, key):
f = pojo.sqlFields
kes = [f.update_str, f.left_subscript, pojo.__table_name__, f.right_subscript, f.set_str]
params = []
for i in pojo.fields:
if i != key and not pojo.getFields()[i].auto_time:
kes.append(f'`{i}`=%s')
kes.append(", ")
if pojo.getFields()[i].update_auto_time:
params.append(datetime.now())
else:
params.append(getattr(pojo, i))
kes = kes[:-1]
kes.append(f.where_str)
kes.append(f'`{key}`=%s')
params.append(getattr(pojo, key))
sql = ''.join(kes)
return sql, params
def parse_remove(self, pojo, key):
f = pojo.sqlFields
kes = [f.delete_str, f.from_str, f.left_subscript, pojo.__table_name__, f.right_subscript, f.where_str]
params = []
kes.append(f'`{key}`=%s')
params.append(getattr(pojo, key))
sql = ''.join(kes)
return sql, params
def parse_value(self, *args, **kwargs):
"""
解析值格式,如:
INSERT INTO `demo` (`index`, `title`, `selects`, `success`) VALUES (这里的就是值);
:param args:
:return:
"""
values = self.parse_main(*args, **kwargs)
return values
def parse_insert(self, keys, values, __table_name__, insert_str, values_str, symbol='%s',
sql_format='%s %s (%s)%s(%s)'):
"""
实现此方法可自定义sql生成模式
keys:包含了所有需要解析的字段名
values:包含了所有需要用到的字段的值
__table_name__:表名称
insert_str:insert的字符串
values_str:values字符串
symbol:格式化方式,以`%s`作为匿名符号
"""
fields = self.parse_key(*keys)
values = self.parse_value(*values)
# 分析需要几个隐藏值
hides_value = [f'{symbol},' for i in range(len(values))]
# 去除末尾的逗号
end = hides_value[len(hides_value) - 1]
hides_value[len(hides_value) - 1] = end[0: len(end) - 1]
# 得到最后隐藏符号的字符串表达格式
value = ''.join(hides_value)
sql = sql_format % (
insert_str,
str(__table_name__), fields, values_str, value
)
kes = {'sql': sql}
args = []
[args.append(i) for i in values]
kes['params'] = args
return kes
def parse_insert_pojo(self, pojo, __table_name__, insert_str, values_str):
"""
解析插入语句
INSERT INTO `__table_name__`(`title`,'selects') VALUE ('','')
:param pojo:POJO对象
:param __table_name__:表名
:param insert_str:insert的sql方言
:param values_str:values的sql方言
:return:
"""
# 得到所有的键
ParseUtil.fieldExist(pojo, 'fields', raise_exception=True)
# 在得到值之后解析是否为空并删除为空的值和对应的字段
cp_value = []
# 复制新的一张字段信息
keys_copy = []
keys_c, cp_v = ParseUtil.parse_pojo(pojo)
keys_copy += keys_c
cp_value += cp_v
return self.parse_insert(keys_copy, cp_value, __table_name__, insert_str=insert_str,
values_str=values_str)
@staticmethod
def parse_pojo(pojo) -> Tuple[list, list]:
keys = pojo.fields
# 在得到值之后解析是否为空并删除为空的值和对应的字段
cp_value = []
# 复制新的一张字段信息
keys_copy = []
values = [getattr(pojo, v) for v in keys]
for i, j in enumerate(values):
if j is not None and not ParseUtil.is_default(j):
keys_copy.append(keys[i])
cp_value.append(j)
return keys_copy, cp_value
@staticmethod
def parse_obj(data: dict, instance: object) -> object:
"""
将数据解析成对象
注意事项:
数据来源必须是DbUtil下查询出来的
:param data:单行数据
:param instance:参与解析的对象
:return:POJO对象
"""
# 深度复制对象
part_obj = instance.copy()
for key, value in data.items():
setattr(part_obj, key, value)
return part_obj
@staticmethod
def find_last_id(**kwargs):
"""
遵循规则:
内部>配置文件
是否包含返回最后一行ID的配置
只存在于更新操做的方法内,如:
insert,
update,
delete
Attributes:
conf_obj:配置类
"""
conf_obj = kwargs['config_obj']
if 'last_id' not in kwargs.keys():
c_dict = conf_obj.get_dict()
if 'last_id' in c_dict.keys():
kwargs['last_id'] = c_dict['last_id']
else:
kwargs['last_id'] = False
return kwargs
@staticmethod
def find_print_sql(**kwargs):
"""
遵循规则:
内部>配置文件
是否包含打印sql的配置
存在于所有数据库操做
Attributes:
conf_obj:配置类
"""
conf_obj = kwargs['config_obj']
if 'print_sql' not in kwargs.keys():
c_dict = conf_obj.get_dict()
if 'print_sql' in c_dict.keys():
kwargs['print_sql'] = c_dict['print_sql']
else:
kwargs['print_sql'] = False
return kwargs
@staticmethod
def case_name(text, rep_text='_', lower=True, upper=False):
"""
将驼峰文本改为使用指定符号分割的字符串表达形式并全部小写
:param text:需要替换的文本
:param rep_text:在大写文本后面追加的字符串
:param lower:是否需要全部小写
:param upper:是否需要全部大写
"""
lst = []
for index, char in enumerate(text):
if char.isupper() and index != 0:
lst.append(rep_text)
lst.append(char)
# 替换名称
if lower:
return "".join(lst).lower()
elif upper:
return "".join(lst).upper()
else:
return "".join(lst)
@staticmethod
def is_default(__val):
"""
是否等于默认值
"""
try:
t_v = __val.__class__.__base__
if t_v in [tag.Template, tag.baseTag]:
return __val.default is None
except SyntaxError:
return False
@staticmethod
def set_field(obj, key, value):
"""
当对象没有这个字段时,为对象设置一个字段
为了方便提高拓展性可解耦,框架内部务必使用此
方法或者set_field_compulsory()为操作管理类提供对象
"""
if not hasattr(obj, key):
setattr(obj, key, value)
@staticmethod
def set_field_compulsory(obj, key: str, data: dict, val: object) -> None:
"""
如果键存在于data中,为obj插入该值,反之插入val
"""
if key in data.keys():
setattr(obj, key, data[key])
else:
setattr(obj, key, val)
@staticmethod
def fieldExist(obj: object, field: str, el=None, raise_exception=False) -> object:
"""
在对象中获取一个字段的值,如果这个字段不存在,则将值设置为`el`
"""
if isinstance(obj, dict):
if field in obj.keys():
return obj[field]
else:
if raise_exception:
raise ALog.log_error(
msg=f'the key of `{field}` cannot be found in the `{obj.__class__.__name__}`',
obj=FieldNotExist,
raise_exception=True)
else:
return el
else:
if hasattr(obj, field):
return getattr(obj, field)
else:
if raise_exception:
raise ALog.log_error(
msg=f'the key of `{field}` cannot be found in the `{obj.__class__.__name__}`',
obj=FieldNotExist,
raise_exception=True)
else:
return el
@staticmethod
def parse_pojo_many(pojo_many: list) -> List[tuple]:
# 在得到值之后解析是否为空并删除为空的值和对应的字段
cp_value = []
for pojo in pojo_many:
keys_c, cp_v = ParseUtil.parse_pojo(pojo)
cp_value.append(tuple(cp_v))
# 真实值
return cp_value
@staticmethod
def insert_to_obj(obj, kwargs):
for key, value in kwargs.items():
ParseUtil.set_field_compulsory(obj=obj, key=key, data=kwargs, val=value)
def get_pojo_sql(self, instance):
"""
获取创建pojo对象的sql语句
"""
fields = instance.getFields()
print(fields)
# 所有常量以空格开头并且以空格结束
# 空格符
class Fields:
"""
默认的数据库方言配置
"""
_instance_lock = threading.RLock()
@staticmethod
def parse_field(key: str) -> str:
return f' {key} '
@property
def left_subscript(self):
"""
左角标
"""
return '`'
@property
def space(self):
"""
空格
"""
return ' '
@property
def right_subscript(self):
"""
右角标
"""
return '`'
@property
def insert_str(self):
"""
插入
"""
return self.parse_field('INSERT INTO')
@property
def delete_str(self):
"""
删除
"""
return self.parse_field('DELETE')
@property
def update_str(self):
"""
更新
"""
return self.parse_field('UPDATE')
@property
def find_str(self):
return self.parse_field('SELECT')
@property
def where_str(self):
return self.parse_field('WHERE')
@property
def by_str(self):
return self.parse_field('BY')
@property
def order_by_str(self):
return self.parse_field('ORDER BY')
@property
def group_by_str(self):
return self.parse_field('GROUP BY')
@property
def desc_str(self):
return self.parse_field('DESC')
@property
def set_str(self):
return self.parse_field('SET')
@property
def ander_str(self):
return self.parse_field('AND')
@property
def limit_str(self):
return self.parse_field('LIMIT')
@property
def from_str(self):
return self.parse_field('FROM')
@property
def value_str(self):
return self.parse_field('VALUE')
@property
def values_str(self):
return self.parse_field('VALUES')
@property
def asses_str(self):
return self.parse_field('AS')
@property
def left_par(self):
return self.parse_field('(')
@property
def right_par(self):
return self.parse_field(')')
@property
def comma(self):
return self.parse_field(',')
@property
def eq(self):
return self.parse_field('=')
@property
def on_str(self):
return self.parse_field('on')
@property
def left_join_str(self):
return self.parse_field('LEFT JOIN')
@property
def symbol(self):
return '>> << == <= >= != - + / * %'.split(' ')
def __new__(cls, *args, **kwargs):
instance = Singleton.createObject(cls)
return instance
class OperaBase(base.OperaBase):
"""
mysql的基操
"""
class Fields:
# 表重命名
RENAME = lambda x, y: f'rename table {x} to {y}'
# 删除表
DELETE_TABLE = lambda x: f'DROP TABLE IF EXISTS {x}'
def extra(self, field) -> Tuple[bool, object]:
ver = {"Field": lambda x: x['Field'] == field.name,
"Type": lambda x: x["Type"] == field.t_type
if field.length is None else
f"{field.t_type}({field.length})"
if field.d_point is None else
f"{field.t_type}({field.length},{field.d_point})",
"Null": {}, "Key": {}, "Default": {}, "Extra": {}}
ft = {
'Field': field.name,
'Type': field.t_type,
'Null': field.is_null,
'Key': field.primary_key,
'Default': field.default,
'Extra': field.comment,
}
target = None
for i in self.R:
if field.name == i['Field']:
target = i
break
if target is None:
return False, f"Check failed:{ft}"
self.R.remove(target)
for k, v in ft.items():
pass
return True, f"{ft}"
def check(self) -> bool:
self.R = self.instance.execute_sql(f"DESC `{self.instance.get_tb_name()}`")
for k, v in self.instance.getFields().items():
f = self.extra(v)
ALog.log(f[1], obj=self, task_name="Check") \
if f[0] else \
ALog.log_error(f[1], obj=self, task_name="Check")
if len(self.R) != 0:
ALog.log_error(f"Extra field:{self.R}", obj=self, task_name="Check")
return False
return True
def create(self, replace=False):
"""
创建表
:param replace:是否替换
:return:
"""
PRIMARYKEY = None
FIELDS = []
default_parse = lambda x: "AUTO_INCREMENT" \
if x.autoField else 'DEFAULT CURRENT_TIMESTAMP' \
if x.auto_time else 'DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP' \
if x.update_auto_time else f"CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT '{x.default}'" \
if isinstance(x.default, str) else f"DEFAULT '{x.default}'" \
if isinstance(x.default, int) else ""
type_parse = lambda x: x.t_type \
if x.length is None else "%s(%s,%s)" % (x.t_type, x.length, x.d_point) \
if x.d_point is not None and x.length is not None else "%s(%s)" % (x.t_type, x.length) \
if x.length is not None else x.t_type
for k, v in self.instance._fields.items():
FIELDS.append(
"`%s` %s %s %s," %
(
v.name,
type_parse(v),
default_parse(v),
'NULL' if v.is_null else 'NOT NULL'
)
)
if v.primary_key:
PRIMARYKEY = v.name
sql = "CREATE TABLE IF NOT EXISTS `%s` ( %s %s) ENGINE=InnoDB DEFAULT CHARSET=utf8;" % \
(self.instance.get_tb_name(), ''.join(FIELDS), "PRIMARY KEY (`%s`)" % PRIMARYKEY)
ALog.log(sql)
r = self.instance.execute_sql(sql, mode=EX_MODEL.UPDATE)
return r
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/dbs/_mysql/__init__.py
|
__init__.py
|
import copy
from typing import List, Tuple
from aestate.exception import FieldNotExist
from aestate.dbs._mysql import tag
from aestate.util import others
from aestate.util.Log import ALog
import threading
from aestate.work.Modes import Singleton
class ParseUtil:
def parse_main(self, *args, to_str=False, symbol='%s', left='[', right=']', **kwargs):
"""
解析属性:
将属性格式设置为:['`a`,','`b`,','`c`']
:param to_str:是否转成str格式
:param args:参数
:param is_field:是否为表字段格式
:param symbol:分隔符语法
:param left:分隔符语法
:param right:分隔符语法
:return:
"""
is_field = kwargs['is_field'] if 'is_field' in kwargs.keys() else False
fields = []
for value in args:
value = others.conversion_types(value)
if to_str:
if is_field:
fields.append(f'{left}{symbol}{right},' % (str(value)))
else:
fields.append(f'{symbol},' % (str(value)))
else:
fields.append(value if not ParseUtil.is_default(value) else None)
if len(fields) != 0:
fields[len(fields) - 1] = fields[len(fields) - 1].replace(',', '')
field_str = ''
if to_str:
for field in fields:
field_str += field
return field_str
return fields
else:
return None
def parse_key(self, *args, **kwargs):
"""
解析键格式,如:
INSERT INTO `demo` (这里的就是键) VALUES ('','','','');
:param args:
:return:
"""
if 'is_field' not in kwargs.keys():
kwargs['is_field'] = True
fields = self.parse_main(*args, to_str=True, **kwargs)
return fields
def parse_value(self, *args, **kwargs):
"""
解析值格式,如:
INSERT INTO `demo` (`index`, `title`, `selects`, `success`) VALUES (这里的就是值);
:param args:
:return:
"""
values = self.parse_main(*args, **kwargs)
return values
def parse_insert(self, keys, values, __table_name__, insert_str, values_str, symbol='%s',
sql_format='%s %s (%s)%s(%s)'):
"""
实现此方法可自定义sql生成模式
keys:包含了所有需要解析的字段名
values:包含了所有需要用到的字段的值
__table_name__:表名称
insert_str:insert的字符串
values_str:values字符串
symbol:格式化方式,以`%s`作为匿名符号
"""
fields = self.parse_key(*keys)
values = self.parse_value(*values)
# 分析需要几个隐藏值
hides_value = [f'{symbol},' for i in range(len(values))]
# 去除末尾的逗号
end = hides_value[len(hides_value) - 1]
hides_value[len(hides_value) - 1] = end[0: len(end) - 1]
# 得到最后隐藏符号的字符串表达格式
value = ''.join(hides_value)
sql = sql_format % (
insert_str,
str(__table_name__), fields, values_str, value
)
kes = {'sql': sql}
args = []
[args.append(i) for i in values]
kes['params'] = args
return kes
def parse_insert_pojo(self, pojo, __table_name__, insert_str, values_str):
"""
解析插入语句
INSERT INTO `__table_name__`(`title`,'selects') VALUE ('','')
:param pojo:POJO对象
:param __table_name__:表名
:param insert_str:insert的sql方言
:param values_str:values的sql方言
:return:
"""
# 得到所有的键
ParseUtil.fieldExist(pojo, 'fields', raise_exception=True)
# 在得到值之后解析是否为空并删除为空的值和对应的字段
cp_value = []
# 复制新的一张字段信息
keys_copy = []
keys_c, cp_v = ParseUtil.parse_pojo(pojo)
keys_copy += keys_c
cp_value += cp_v
return self.parse_insert(keys_copy, cp_value, __table_name__, insert_str=insert_str,
values_str=values_str)
@staticmethod
def parse_pojo(pojo) -> Tuple[list, list]:
keys = pojo.fields
# 在得到值之后解析是否为空并删除为空的值和对应的字段
cp_value = []
# 复制新的一张字段信息
keys_copy = []
values = [getattr(pojo, v) for v in keys]
for i, j in enumerate(values):
if j is not None and not ParseUtil.is_default(j):
keys_copy.append(keys[i])
cp_value.append(j)
return keys_copy, cp_value
@staticmethod
def parse_obj(data: dict, instance: object) -> object:
"""
将数据解析成对象
注意事项:
数据来源必须是DbUtil下查询出来的
:param data:单行数据
:param instance:参与解析的对象
:return:POJO对象
"""
# 深度复制对象
part_obj = copy.copy(instance)
for key, value in data.items():
setattr(part_obj, key, value)
return part_obj
@staticmethod
def find_last_id(**kwargs):
"""
遵循规则:
内部>配置文件
是否包含返回最后一行ID的配置
只存在于更新操做的方法内,如:
insert,
update,
delete
Attributes:
conf_obj:配置类
"""
conf_obj = kwargs['config_obj']
if 'last_id' not in kwargs.keys():
c_dict = conf_obj.get_dict()
if 'last_id' in c_dict.keys():
kwargs['last_id'] = c_dict['last_id']
else:
kwargs['last_id'] = False
return kwargs
@staticmethod
def find_print_sql(**kwargs):
"""
遵循规则:
内部>配置文件
是否包含打印sql的配置
存在于所有数据库操做
Attributes:
conf_obj:配置类
"""
conf_obj = kwargs['config_obj']
if 'print_sql' not in kwargs.keys():
c_dict = conf_obj.get_dict()
if 'print_sql' in c_dict.keys():
kwargs['print_sql'] = c_dict['print_sql']
else:
kwargs['print_sql'] = False
return kwargs
@staticmethod
def case_name(text, rep_text='_', lower=True, upper=False):
"""
将驼峰文本改为使用指定符号分割的字符串表达形式并全部小写
:param text:需要替换的文本
:param rep_text:在大写文本后面追加的字符串
:param lower:是否需要全部小写
:param upper:是否需要全部大写
"""
lst = []
for index, char in enumerate(text):
if char.isupper() and index != 0:
lst.append(rep_text)
lst.append(char)
# 替换名称
if lower:
return "".join(lst).lower()
elif upper:
return "".join(lst).upper()
else:
return "".join(lst)
@staticmethod
def is_default(__val):
"""
是否等于默认值
"""
try:
t_v = __val.__class__.__base__
if t_v in [tag.Template, tag.baseTag]:
return __val.default is None
except SyntaxError:
return False
@staticmethod
def set_field(obj, key, value):
"""
当对象没有这个字段时,为对象设置一个字段
为了方便提高拓展性可解耦,框架内部务必使用此
方法或者set_field_compulsory()为操作管理类提供对象
"""
if not hasattr(obj, key):
setattr(obj, key, value)
@staticmethod
def set_field_compulsory(obj, key: str, data: dict, val: object) -> None:
"""
如果键存在于data中,为obj插入该值,反之插入val
"""
if key in data.keys():
setattr(obj, key, data[key])
else:
setattr(obj, key, val)
@staticmethod
def fieldExist(obj: object, field: str, el=None, raise_exception=False) -> object:
"""
在对象中获取一个字段的值,如果这个字段不存在,则将值设置为`el`
"""
if isinstance(obj, dict):
if field in obj.keys():
return obj[field]
else:
if raise_exception:
ALog.log_error(
msg=f'the key of `{field}` cannot be found in the `{obj.__class__.__name__}`',
obj=FieldNotExist, raise_exception=True)
else:
return el
else:
if hasattr(obj, field):
return getattr(obj, field)
else:
if raise_exception:
raise ALog.log_error(
msg=f'the key of `{field}` cannot be found in the `{obj.__class__.__name__}`',
obj=FieldNotExist,
raise_exception=True)
else:
return el
@staticmethod
def parse_pojo_many(pojo_many: list) -> List[tuple]:
# 在得到值之后解析是否为空并删除为空的值和对应的字段
cp_value = []
for pojo in pojo_many:
keys_c, cp_v = ParseUtil.parse_pojo(pojo)
cp_value.append(tuple(cp_v))
# 真实值
return cp_value
@staticmethod
def insert_to_obj(obj, kwargs):
for key, value in kwargs.items():
ParseUtil.set_field_compulsory(obj=obj, key=key, data=kwargs, val=value)
def get_pojo_sql(self, instance):
"""
获取创建pojo对象的sql语句
"""
fields = instance.getFields()
print(fields)
# 所有常量以空格开头并且以空格结束
# 空格符
class Fields:
"""
默认的数据库方言配置
"""
_instance_lock = threading.RLock()
@staticmethod
def parse_field(key: str) -> str:
return f' {key} '
@property
def left_subscript(self):
"""
左角标
"""
return '`'
@property
def space(self):
"""
空格
"""
return ' '
@property
def right_subscript(self):
"""
右角标
"""
return '`'
@property
def insert_str(self):
"""
插入
"""
return self.parse_field('INSERT INTO')
@property
def delete_str(self):
"""
删除
"""
return self.parse_field('DELETE')
@property
def update_str(self):
"""
更新
"""
return self.parse_field('UPDATE')
@property
def find_str(self):
return self.parse_field('SELECT')
@property
def where_str(self):
return self.parse_field('WHERE')
@property
def by_str(self):
return self.parse_field('BY')
@property
def order_by_str(self):
return self.parse_field('ORDER BY')
@property
def group_by_str(self):
return self.parse_field('GROUP BY')
@property
def desc_str(self):
return self.parse_field('DESC')
@property
def set_str(self):
return self.parse_field('SET')
@property
def ander_str(self):
return self.parse_field('AND')
@property
def limit_str(self):
return self.parse_field('LIMIT')
@property
def from_str(self):
return self.parse_field('FROM')
@property
def value_str(self):
return self.parse_field('VALUE')
@property
def values_str(self):
return self.parse_field('VALUES')
@property
def asses_str(self):
return self.parse_field('AS')
@property
def left_par(self):
return self.parse_field('(')
@property
def right_par(self):
return self.parse_field(')')
@property
def comma(self):
return self.parse_field(',')
@property
def eq(self):
return self.parse_field('=')
@property
def on_str(self):
return self.parse_field('on')
@property
def left_join_str(self):
return self.parse_field('LEFT JOIN')
@property
def symbol(self):
return '>> << == <= >= != - + / * %'.split(' ')
def parse_set(self, keys):
"""
格式化set键
"""
keys_str = ''
for i in keys:
keys_str += '{}=%s{}'.format(i, self.ander_str)
keys_str = keys_str[0:len(keys_str) - len(self.ander_str)]
return keys_str
def __new__(cls, *args, **kwargs):
instance = Singleton.createObject(cls)
return instance
class OperaBase:
def __init__(self, instance, fields):
self.instance = instance
self.fields = fields
def start(self):
pass
class CreateModel(OperaBase):
"""
将pojo创建为数据库的表
"""
def start(self):
__table_name__ = self.instance.__table_name__
class MakeModel(OperaBase):
def start(self):
pass
|
Aestate
|
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/dbs/_mssql/__init__.py
|
__init__.py
|
========
Aesthete
========
Aesthete is an integrated mathematics environment, incorporating
* Glypher: WYSIWYG computer algebra package over Sympy
* Glancer: matplotlib GUI
* Source: underlying discrete/continuous mapping manager (inc CSV, functions, etc.)
-------
Website
-------
Please see http://launchpad.net/aesthete for further details
------------
Dependencies
------------
* numpy (>=1.4.1)
* scipy (>=0.7.2)
* sympy (>=0.7.1)
* pycairo (>=1.8.8)
* pygobject (>=2.21.5)
* pygtk (>=2.17.0)
* PIL (>=1.1.7)
* matplotlib (>=1.0.1)
* IPython (>=0.10)
* rsvg (>=2.30.0)
* argparse (>=1.1)
* lxml (>=2.3)
Packages for Sympy 0.7.x series should in turn depend on mpmath but, if not, you may need to install python-mpmath separately
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/README.txt
|
README.txt
|
import os, math, sys, getopt, string
import sympy
import copy
import glypher.Word as Word
from glypher.Widget import GlyphEntry, GlyphResponder
import random
from aobject.utils import debug_print
from gtk import gdk
import threading
import cairo, gtk, gobject
import matplotlib
import numpy, numpy.fft
import scipy, scipy.interpolate, scipy.optimize
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as mpl_Canvas
from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as mpl_Navbar
try :
import pygtksheet as gtksheet
have_gtksheet = True
except ImportError :
have_gtksheet = False
import pylab
from PIL import Image
import aobject.aobject as aobject
def get_sheet_child_at(sheet, r, c) :
for child in sheet.get_children() :
if child.attached_to_cell and child.row == r and child.col == c :
return child
return None
class AesSpreadsheet(gtk.Frame, aobject.AObject) :
sheet = None
entries = None
current_cell = None
suspend_activate = False
def get_sympy_val(self, r, c) :
if (r,c) in self.entries :
return self.entries[(r,c)].get_sympy()
return None
#self.suspend_activate = True
#t = self.sheet.cell_get_text(r, c)[0]
#self.suspend_activate = False
#return sympy.core.sympify(t)
def do_sheet_activate_signal(self, sheet, r, c) :
if self.suspend_activate :
return
if self.current_cell is not None :
self.do_cell_editor_processed_line(self.cell_editor)
self.current_cell = (r,c)
if (r,c) not in self.entries :
self.add_an_entry(*self.current_cell)
entry = self.entries[(r,c)]
debug_print(entry.get_text())
self.cell_editor.set_xml(entry.get_xml(input_phrase=True))
self.cell_editor.grab_focus()
def do_cell_editor_processed_line(self, ce) :
debug_print(ce.get_xml())
debug_print(self.current_cell)
if self.current_cell is None :
return
if not self.current_cell in self.entries :
self.add_an_entry(*self.current_cell)
responder = self.entries[self.current_cell]
xml = ce.get_xml()
responder.set_xml(xml)
def __init__(self, env=None):
gtk.Frame.__init__(self)
aobject.AObject.__init__(self, "AesSpreadsheet", env, view_object = True)
self.set_aname_nice("Spreadsheet" + (" ("+str(self.get_aname_num())+")" if self.get_aname_num()>1 else ""))
self.entries = {}
vbox = gtk.VBox()
self.sheet = gtksheet.Sheet(2000, 20, "PyGtkSheet")
sheet_scrw = gtk.ScrolledWindow()
sheet_scrw.add(self.sheet)
vbox.pack_start(gtk.Label("PyGtkSheet through Aes"), False, False)
vbox.pack_start(sheet_scrw)
self.cell_editor = GlyphEntry(evaluable=True)
self.cell_editor.main_phrase.set_p('spreadsheet', self)
self.cell_editor.connect("processed-line",
self.do_cell_editor_processed_line)
vbox.pack_start(self.cell_editor, False)
vbox.show_all()
new_win = gtk.Window()
new_win.maximize()
new_win.add(vbox)
new_win.show_all()
entry = self.add_an_entry(1, 1)
entry.caret.insert_entity(Word.make_word('Hi', None))
self.sheet.connect("activate", self.do_sheet_activate_signal)
def add_an_entry(self, r, c) :
test_entry = GlyphResponder(interactive=False,
resize_to_main_phrase=True, evalf=True)
test_entry.response_phrase.set_anchor_point((5, 5))
test_entry.response_phrase.set_anchor(('l', 't'))
test_entry.swap()
test_entry.response_phrase.set_p('spreadsheet', self)
test_entry.input_phrase.set_p('spreadsheet', self)
test_entry.show_all()
test_entry.set_font_size(15.0)
test_entry.connect("button-press-event", lambda but, ev :
self.do_sheet_activate_signal(self.sheet, r, c))
self.sheet.attach(test_entry, r, c, 0, 0, 3, 3)
self.entries[(r,c)] = test_entry
return test_entry
def load_csv(self, filename):
vals = []
with open (filename) as f:
#columns = f.readline().split(',')
vals = numpy.loadtxt(f, delimiter=',', unpack = True)
multicol = True
try : test = vals[0][0]
except : multicol = False
if len(vals) == 0 : return
if not multicol : vals = [vals]
for i in range(0, len(vals)) :
col = vals[i]
for j in range(0, len(col)) :
self.sheet.set_cell_text(j, i, str(vals[i][j]))
def load_series(self, source, series, vals):
pass
#mpl_line, = self.axes.plot(series, vals)
#line = AesMPLLine(self, mpl_line, source = source, logger = self.get_alogger())
#self.lines.append(line)
#self.absorb_properties(line, as_self = False)
#line.change_property("label", source)
#if self.legend : self.axes.legend()
#fmtr = AesFormatter(useOffset = False, useMathText = True)
#self.axes.xaxis.set_major_formatter(fmtr)
#fmtr = AesFormatter(useOffset = False, useMathText = True)
#self.axes.yaxis.set_major_formatter(fmtr)
#self.elevate()
#self.queue_draw()
#PROPERTIES
def get_aesthete_properties(self):
return { }
#BEGIN PROPERTIES FUNCTIONS
#END PROPERTIES FUNCTIONS
def get_method_window(self) :
if not have_gtksheet : return gtk.Label("No PyGtkSheet found")
win = gtk.VBox()
# From Sim
#sim_expa = gtk.Expander("From Sim"); sim_efra = gtk.Frame(); sim_expa.add(sim_efra)
#sim_vbox = gtk.VBox()
#sim_cmbo = gtk.combo_box_new_text()
#self.sim_cmbo = sim_cmbo
#self.od_add_conn = aobject.get_object_dictionary().connect(\
# "add", lambda o, a, r : self.methods_update_sim_cmbo() if r=="Sim" else 0)
#self.od_rem_conn = aobject.get_object_dictionary().connect(\
# "remove", lambda o, a, r : self.methods_update_sim_cmbo() if r=="Sim" else 0)
#self.methods_update_sim_cmbo()
#sim_vbox.pack_start(sim_cmbo)
#sim_time_hbox = gtk.HBox(); sim_time_hbox.pack_start(gtk.Label("Time"))
#sim_time_entr = gtk.Entry(); sim_time_hbox.pack_start(sim_time_entr)
#sim_vbox.pack_start(sim_time_hbox)
#sim_butt = gtk.Button("Load from Sim")
#sim_butt.connect("clicked", lambda o : self.load_from_sim(sim_cmbo.get_active_text(), sim_time_entr.get_text()))
#sim_vbox.pack_start(sim_butt)
#sim_efra.add(sim_vbox)
#win.pack_start(sim_expa)
# Load CSV
expander = gtk.Expander("Import CSV"); ef = gtk.Frame(); expander.add(ef)
csv_vbox = self.methods_make_load_csv()
ef.add(csv_vbox)
expander.show_all()
win.pack_start(expander)
win.show_all()
return win
def methods_make_load_csv(self) :
csv_vbox = gtk.VBox()
plotcsv_hbox = gtk.HBox()
plotcsv_text = gtk.Entry()
plotcsv_butt = gtk.Button("Load")
plotcsv_hbox.pack_start(plotcsv_text); plotcsv_hbox.pack_start(plotcsv_butt)
csv_vbox.pack_start(plotcsv_hbox)
plotcsv_butt.connect("clicked", lambda o : self.load_csv(plotcsv_text.get_text()))
return csv_vbox
#def methods_update_sim_cmbo(self) :
# cmbo = self.sim_cmbo
# mdl = cmbo.get_model()
# lv = aobject.get_object_dictionary().get_objects_by_am("Sim")
# for row in mdl : mdl.remove(row.iter)
# for v in lv : cmbo.append_text(v.get_aname())
#
#def load_from_sim(self, aname, time) :
# if aname == 'None' or aname == '' : return
# sim = aobject.get_object_from_dictionary(aname)
# old_time = sim.get_time()
# sim.set_time(float(time))
# pss = sim.get_point_sets()
# self.check_clear()
# for point_set in sim.get_point_sets() :
# if not point_set['extent'] : continue
# points = point_set['point_set']
# trans = zip(*points)
# self.load_series(sim.get_aname()+":"+point_set['stem'], trans[0], trans[1])
# sim.set_time(old_time)
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/aesspreadsheet.py
|
aesspreadsheet.py
|
import os, math, sys, getopt, string
import sympy
import copy
import glypher.Word as Word
from glypher.Widget import GlyphEntry, GlyphResponder
import random
from aobject.utils import debug_print
from gtk import gdk
import threading
import cairo, gtk, gobject
import matplotlib
import numpy, numpy.fft
import scipy, scipy.interpolate, scipy.optimize
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as mpl_Canvas
from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as mpl_Navbar
from third_party.ipython_view import *
import pylab
from PIL import Image
import aobject.aobject as aobject
def format_obj_name (name) :
obj = aobject.get_object_from_dictionary(name)
return obj.get_aname_nice() + ' [' + obj.get_aname_root() + ']'
class AesPythonConsole(gtk.Frame, aobject.AObject) :
ipython_view = None
def __init__(self, env=None):
gtk.Frame.__init__(self)
aobject.AObject.__init__(self, "AesPythonConsole", env, view_object = True)
self.set_aname_nice("Python Console " + (" ("+str(self.get_aname_num())+")" if self.get_aname_num()>1 else ""))
self.ipython_view = IPythonView()
cons_vbox = gtk.VBox()
V = self.ipython_view
# with thanks to IPython cookbook
V.modify_font(pango.FontDescription("Mono 10"))
V.set_wrap_mode(gtk.WRAP_CHAR)
V.updateNamespace({'aobject' : aobject})
V.updateNamespace({'AES' : aobject.get_object_from_dictionary})
V.show()
cons_vbox.pack_start(V)
self.add(cons_vbox)
self.source_action = lambda s :\
V.write('AES(\''+s+'\')')
views_hbox = gtk.HBox()
win = gtk.VBox()
views_hbox.pack_start(win)
win.pack_start(gtk.Label('Objects'), False)
s = gtk.ScrolledWindow()
win.pack_start(s)
dict_lsst = aobject.get_object_dictionary().get_liststore_by_am('aobject')
dict_lsst.set_sort_column_id(1, gtk.SORT_ASCENDING)
dict_trvw = gtk.TreeView(model=dict_lsst)
dict_nice_crtx = gtk.CellRendererText()
dict_nice_tvcl = gtk.TreeViewColumn('Nice Name', dict_nice_crtx)
dict_nice_tvcl.add_attribute(dict_nice_crtx, 'text', 1)
dict_nice_tvcl.set_expand(True)
dict_trvw.append_column(dict_nice_tvcl)
dict_name_crtx = gtk.CellRendererText()
dict_name_tvcl = gtk.TreeViewColumn('Internal Name', dict_name_crtx)
dict_name_tvcl.add_attribute(dict_name_crtx, 'text', 0)
dict_name_tvcl.set_expand(True)
dict_trvw.append_column(dict_name_tvcl)
dict_trvw.connect('row-activated', lambda t, p, c : \
self.ipython_view.write('AES(\''+\
dict_lsst.get_value(dict_lsst.get_iter(p), 0)+'\')'))
s.add(dict_trvw)
s.set_size_request(-1, 200)
win = gtk.VBox()
views_hbox.pack_start(win)
win.pack_start(gtk.Label('Useful Variables'))
s2 = gtk.ScrolledWindow()
win.pack_start(s2)
uv_lsst = aobject.get_object_dictionary().useful_vars
uv_lsst.set_sort_column_id(0, gtk.SORT_ASCENDING)
uv_trvw = gtk.TreeView(model=uv_lsst)
uv_nice_crtx = gtk.CellRendererText()
uv_nice_tvcl = gtk.TreeViewColumn('Object', uv_nice_crtx)
#uv_nice_tvcl.add_attribute(uv_nice_crtx, 'text', 0)
uv_nice_tvcl.set_cell_data_func(uv_nice_crtx, lambda c, r, t, i :
uv_nice_crtx.set_property('text', \
format_obj_name(uv_lsst.get_value(i, 0))))
uv_nice_crtx.set_property('ellipsize', pango.ELLIPSIZE_MIDDLE)
uv_nice_crtx.set_property('width-chars', 30)
uv_nice_tvcl.set_expand(True)
uv_trvw.append_column(uv_nice_tvcl)
uv_desc_crtx = gtk.CellRendererText()
uv_desc_tvcl = gtk.TreeViewColumn('Variable Description', uv_desc_crtx)
uv_desc_tvcl.add_attribute(uv_desc_crtx, 'text', 2)
uv_desc_tvcl.set_expand(True)
uv_trvw.append_column(uv_desc_tvcl)
uv_trvw.connect('row-activated', lambda t, p, c : \
self.ipython_view.write('AES(\''+\
uv_lsst.get_value(uv_lsst.get_iter(p), 0)+'\').'+\
uv_lsst.get_value(uv_lsst.get_iter(p), 1)))
s2.add(uv_trvw)
s2.set_size_request(-1, 200)
views_hbox.show_all()
cons_vbox.pack_start(views_hbox, False)
self.show_all()
#PROPERTIES
def get_aesthete_properties(self):
return { }
#BEGIN PROPERTIES FUNCTIONS
#END PROPERTIES FUNCTIONS
def get_method_window(self) :
win = gtk.VBox()
return win
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/aespythonconsole.py
|
aespythonconsole.py
|
import glypher as g
from sympy.matrices import matrices
import copy
import draw
import gutils
from aobject.utils import debug_print
from PhraseGroup import *
ac = gutils.array_close
fc = gutils.float_close
class GlypherResponseReference(GlypherBracketedPhrase) :
toolbox = {'symbol' : '[ ]',
'shortcut' : None,
'alternatives' : None,
'priority' : None,
'category' : 'References'}
def __init__(self, parent, resp_code = None) :
GlypherBracketedPhrase.__init__(self, parent, bracket_shapes=('[',']'), auto=False)
self.mes.append('reference')
self.mes.append('response_reference')
if resp_code is not None :
resp_code = make_word(resp_code, self)
self.get_target('expression').adopt(resp_code)
self.set_recommending(self["expression"])
debug_print(self.IN().is_enterable())
self.set_rgb_colour([0.5, 0, 0])
def process_key(self, keyname, event, caret) :
mask = event.state
if keyname == 'Return' :
r = self.get_target('expression').get_repr()
if self.included() and r is not None :
new_me = g.get_response(r)
new_me = GlypherEntity.xml_copy(self.get_parent(), new_me)
self.suspend_recommending()
self.get_parent().exchange(self.OUT(), new_me)
self.set_recommending(new_me.get_recommending())
else :
return GlypherPhraseGroup.process_key(self, keyname, mask, caret)
return True
def get_sympy(self) :
r = self.get_target('expression').get_repr()
debug_print(r)
if r is not None :
d = g.get_response(r)
if d :
sy = d.get_sympy()
if sy is None :
raise GlypherTargetPhraseError(self["expression"].IN(), "Did not evaluate to simple expression")
return sy
raise GlypherTargetPhraseError(self["expression"].IN(), "Need valid address.")
class GlypherRangeReference(GlypherBracketedPhrase) :
sheet = None
def get_pow_options(self) :
return ('elementwise', 'python')
def __init__(self, parent, resp_code = None) :
GlypherBracketedPhrase.__init__(self, parent,
bracket_shapes=(u'\u27e8',u'\u27e9'),
auto=False)
self.mes.append('reference')
self.mes.append('range_reference')
self["expression"].set_font_size_scaling(0.6)
self["expression"].IN().set_p('align_as_entity', True)
from_phrase = GlypherPhrase(self)
to_phrase = GlypherPhrase(self)
self["expression"].adopt(from_phrase)
self["expression"].append(to_phrase, row=1)
self["expression"].IN().set_enterable(False)
self.add_target(from_phrase, "from")
self.add_target(to_phrase, "to")
self.ignore_targets.append('expression')
self.set_recommending(self["from"].IN())
self.set_rgb_colour([0.5, 0.3, 0])
def get_sympy(self) :
self.sheet = self.get_main_phrase_property('spreadsheet')
debug_print(self.sheet)
if not self.sheet :
return None
r = self["from"].get_sympy()
debug_print(r)
try :
s = self["to"].get_sympy()
except :
s = None
if r is not None :
if s is not None :
r0, c0 = r
r1, c1 = s
mat = []
for i in range(c0, c1+1) :
row = []
for j in range(r0, r1+1) :
row.append(self.sheet.get_sympy_val(j, i))
mat.append(row)
debug_print(mat)
return matrices.Matrix(mat)
else :
d = self.sheet.get_sympy_val(int(r[0]), int(r[1]))
return d
g.phrasegroups['response_reference'] = GlypherResponseReference
g.phrasegroups['range_reference'] = GlypherRangeReference
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/References.py
|
References.py
|
import glypher as g
import gtk
from PhraseGroup import *
import Parser
from sympy.matrices import matrices
def matrix_hadamard_multiply(a, b) :
alst = a.tolist()
blst = b.tolist()
return matrices.Matrix(a.shape[0], a.shape[1],
lambda i, j : alst[i][j]*blst[i][j])
class GlypherTableCell(GlypherPhrase) :
def set_auto_copy_paste_contents(self, auto_copy_paste_contents) : self.set_p('auto_copy_paste_contents', auto_copy_paste_contents)
def get_auto_copy_paste_contents(self) : return self.get_p('auto_copy_paste_contents')
def __init__(self, parent, area = (0,0,0,0), auto_copy_paste_contents = False) :
GlypherPhrase.__init__(self, parent)
self.mes.append('table_cell')
self.set_p('align_as_entity', True)
self.set_auto_copy_paste_contents(auto_copy_paste_contents)
def process_button_release(self, event) :
if event.button==1 and self.get_auto_copy_paste_contents() :
self.to_clipboard(auto_paste=True)
return True
return GlypherPhrase.process_button_release(self, event)
class GlypherTable(GlypherPhraseGroup) :
row_colours = None
col_colours = None
cells = None
def get_dims(self) :
return (len(self.rows), len(self.cols))
def set_max_dimensions(self, max_dimensions) : self.set_p('max_dimensions', max_dimensions); self.recalc_bbox()
def get_max_dimensions(self) : return self.get_p('max_dimensions'); self.recalc_bbox()
def set_min_dimensions(self, min_dimensions) : self.set_p('min_dimensions', min_dimensions)
def get_min_dimensions(self) : return self.get_p('min_dimensions')
def set_proportional_width(self, proportional_width) : self.set_p('proportional_width', proportional_width)
def get_proportional_width(self) : return self.get_p('proportional_width')
def set_proportional_height(self, proportional_height) : self.set_p('proportional_height', proportional_height)
def get_proportional_height(self) : return self.get_p('proportional_height')
def set_cell_padding(self, cell_padding) : self.set_p('cell_padding', cell_padding)
def get_cell_padding(self) : return self.get_p('cell_padding')
def set_outside_border_p(self, outside_border) : self.set_p('outside_border', outside_border)
def get_outside_border(self) : return self.get_p('outside_border')
def get_xml(self, name=None, top=True, targets=None, full=False) :
root = GlypherPhraseGroup.get_xml(self, name, top, targets, full)
self._xml_table(root)
return root
def _xml_table(self, root) :
dims = self.get_dims()
root.set('rows', str(dims[0]))
root.set('cols', str(dims[1]))
cells = ET.Element('cells')
for i in range(0, dims[0]) :
for j in range(0, dims[1]) :
if j not in self.cells[i] :
continue
r = self.ij(i,j).get_xml(name='cell', top=False, full=False)
r.set('table_row', str(i))
r.set('table_col', str(j))
cells.append(r)
if len(cells) > 0 :
root.append(cells)
def ij(self, i, j) :
"""Return the (i,j)th cell."""
i = int(i)
j = int(j)
if i not in self.rows :
raise IndexError("Table has no row " + str(i))
if j not in self.cols :
raise IndexError("Table has no col " + str(j))
return self.cells[i][j]
def get_cell(self, i, j) :
return self.ij(i, j)
def set_col_width(self, i, w) :
self.col_offsets[i+1] = w
for c in self.get_col(i) :
c.get_entity().line_length = w
c.get_entity().word_wrap()
def set_defaults(self) :
self.set_proportional_height(1.)
self.set_proportional_width(1.)
self.set_p('row_colours', self.row_colours)
self.set_p('col_colours', self.col_colours)
self.set_p('scroll_offset', self.scroll_offset)
self.set_max_dimensions(None)
self.set_min_dimensions(None)
def __init__(self, parent, first_col = None, border = 2.0, cell_padding = 2,
add_first_cell = True, cell_class=GlypherTableCell) :
self.scroll_offset = [0,0]
GlypherPhraseGroup.__init__(self, parent, [])
self.mes.append('table')
self.add_properties({'default_cell_border' : None})
self.cell_class = cell_class
self.set_cell_padding(cell_padding)
self.col_border = {}
self.row_border = {}
self.row_colours = {}
self.col_colours = {}
self.set_outside_border(None)
self.set_p('align_as_entity', True)
self.set_enterable(False)
self.set_defaults()
self.cells = {}
if first_col is not None or add_first_cell :
a = self.add_cell(0,0)
self.set_recommending(a)
if first_col is not None :
a.append(first_col)
def slide(self, h, v) :
if (h!=0 and (self.scroll_offset[0]+h > 0 or \
self.scroll_offset[0]+h+(self.get_proportional_width()-1)*self.get_width() < 0)) or \
(v!=0 and (self.scroll_offset[1]+v > 0 or \
self.scroll_offset[1]+v+(self.get_proportional_height()-1)*self.get_height() < 0)) :
return
self.scroll_offset[0] += h
self.scroll_offset[1] += v
if len(self.entities) > 0 :
ents = self.sort_entities(rev=(h>0))
for ent in ents :
ent.translate(h, v, quiet=True)
self.recalc_bbox()
def process_scroll(self, event) :
m_control = bool(event.state & gtk.gdk.CONTROL_MASK)
if m_control :
self.slide(0.3*self.get_scaled_font_size()*(\
-1 if event.direction == gtk.gdk.SCROLL_UP else 1),
0)
else :
self.slide(0, 0.3*self.get_scaled_font_size()*(-1 if event.direction == gtk.gdk.SCROLL_UP else 1))
return True
def set_row_colour(self, c, rgb) :
for a in self.get_row(c) :
a.get_entity().set_rgb_colour(rgb)
self.row_colours[c] = rgb
def set_col_colour(self, c, rgb) :
for a in self.get_col(c) :
a.get_entity().set_rgb_colour(rgb)
self.col_colours[c] = rgb
default_cell_border = None
def set_default_cell_border(self, attr = None) :
self.set_p('default_cell_border', attr)
self.default_cell_border = attr
def add_cell(self, r, c) :
a = self.cell_class(self)
self.append(a, row=r, col=c)
if c in self.col_colours : a.set_rgb_colour(self.col_colours[c])
if r in self.row_colours : a.set_rgb_colour(self.row_colours[r])
if c not in self.col_border : self.col_border[c] = [None, None]
if r not in self.row_border : self.row_border[r] = [None, None]
a.set_padding_all(self.get_cell_padding())
if c+1 in self.col_offsets and self.col_offsets[c+1] is not None :
a.line_length = self.col_offsets[c+1]
a.set_deletable(2)
if r not in self.cells :
self.cells[r] = {}
self.cells[r][c] = a
return a
def set_outside_border(self, width = 1.0, colour = (0.0, 0.0, 0.0)) :
self.set_outside_border_p({'width':width, 'colour':colour} if width is not None else None)
def set_col_border_left(self, col, width = 1.0, colour = (0.0, 0.0, 0.0)) :
self.col_border[col][0] = {'width':width, 'colour':colour}
def set_col_border_right(self, col, width = 1.0, colour = (0.0, 0.0, 0.0)) :
self.col_border[col][1] = {'width':width, 'colour':colour}
def set_row_border_top(self, row, width = 1.0, colour = (0.0, 0.0, 0.0)) :
self.row_border[row][0] = {'width':width, 'colour':colour}
def set_row_border_bottom(self, row, width = 1.0, colour = (0.0, 0.0, 0.0)) :
self.row_border[row][1] = {'width':width, 'colour':colour}
def get_current_active_cell(self) :
for i in self.rows :
for j in self.cols :
if j in self.cells[i] and self.ij(i, j).child_active :
return i, j
return None
def recalc_bbox(self, quiet = False, enact = True, realign = True,
sub_pos_search_dir = None, compare_rows = False,
in_feed_chain = False, do_reset = True) :
chg = GlypherPhraseGroup.recalc_bbox(self, quiet, enact, realign,
sub_pos_search_dir, compare_rows,
in_feed_chain,
do_reset=do_reset)
if enact :
self.config[0].bbox[0] -= self.scroll_offset[0]
self.config[0].bbox[1] -= self.scroll_offset[1]
self.config[0].bbox[2] -= self.scroll_offset[0]
self.config[0].bbox[3] -= self.scroll_offset[1]
self.set_proportional_height(self.get_height())
self.set_proportional_width(self.get_width())
md = self.get_max_dimensions()
if md :
self.config[0].bbox[2] = min(self.config[0].bbox[0]+md[0], self.config[0].bbox[2])
self.config[0].bbox[3] = min(self.config[0].bbox[1]+md[1], self.config[0].bbox[3])
md = self.get_min_dimensions()
if md :
self.config[0].bbox[2] = max(self.config[0].bbox[0]+md[0], self.config[0].bbox[2])
self.config[0].bbox[3] = max(self.config[0].bbox[1]+md[1], self.config[0].bbox[3])
self.set_proportional_height(self.get_proportional_height()/self.get_height() if self.get_height() != 0 else 0)
self.set_proportional_width(self.get_proportional_width()/self.get_width() if self.get_width() != 0 else 0)
self.recalc_basebox()
self.feed_up(quiet=quiet, in_feed_chain=in_feed_chain)
return chg
def process_key(self, name, event, caret) :
mask = event.state
m_control = bool(mask & gtk.gdk.CONTROL_MASK)
present = self.get_current_active_cell()
if name == 'Right' or name == 'Left' :
self.suspend_recommending()
if name == 'Right' :
new_col = self.col_range()[0]+1
if present is not None :
new_col = present[1]+1
else :
new_col = self.col_range()[1]
if present is not None :
new_col = 0
c = self.add_table_column(new_col)
self.resume_recommending()
if present is not None :
self.set_recommending(self.ij(present[0], c))
elif name == 'Down' or name == 'Up':
self.suspend_recommending()
if name == 'Down' :
new_row = self.row_range()[0]+1
if present is not None :
new_row = present[0]+1
else :
new_row = self.row_range()[1]
if present is not None :
new_row = present[0]
r = self.add_table_row(new_row)
self.resume_recommending()
if present is not None :
self.set_recommending(self.ij(r, present[1]))
else :
return GlypherPhraseGroup.process_key(self, name, event, caret)
return True
def add_table_row(self, r) :
w,u = self.row_range()
for c in self.cols :
self.add_cell(w+1, c)
new_row = self.cells[w+1]
for x in range(r, w+1) :
self.cells[w+1+r-x] = self.cells[w+r-x]
for x in range(r, w+2) :
row_list = self.get_row(w+1+r-x)
for cell_cfg in row_list :
cell_cfg.row += 1
for cell_cfg in self.get_row(w+2) :
cell_cfg.row = r
self.cells[r] = new_row
self.recalc_bbox()
return r
def add_table_column(self, c) :
w,u = self.col_range()
new_col = {}
for r in self.rows :
self.add_cell(r, w+1)
new_col[r] = self.cells[r][w+1]
for x in range(c, w+1) :
self.cells[r][w+1+c-x] = self.cells[r][w+c-x]
for x in range(c, w+2) :
col_list = self.get_col(w+1+c-x)
for cell_cfg in col_list :
cell_cfg.col += 1
for cell_cfg in self.get_col(w+2) :
cell_cfg.col = c
for r in self.rows :
self.cells[r][c] = new_col[r]
self.recalc_bbox()
return c
def draw(self, cr) :
if not self.get_visible() or self.get_blank() : return
cr.save()
cr.new_path()
cr.set_source_rgba(0.0,0.0,0.0,0.2)
wmax = self.config[0].bbox[2]-self.config[0].bbox[0]
wmin = self.config[0].bbox[3]-self.config[0].bbox[1]
if self.get_max_dimensions() is not None :
wmax = min(wmax, self.get_max_dimensions()[0])
wmin = min(wmin, self.get_max_dimensions()[1])
cr.rectangle(self.config[0].bbox[0], self.config[0].bbox[1], wmax, wmin)
cr.clip()
GlypherPhraseGroup.draw(self, cr)
if self.get_outside_border() is not None :
cr.save()
cr.set_line_width(self.get_outside_border()['width'])
cr.set_source_rgba(*self.get_outside_border()['colour'])
cr.rectangle(self.config[0].bbox[0], self.config[0].bbox[1],
wmax, wmin)
cr.stroke()
cr.restore()
cr.save()
for r in self.rows :
if len(self.get_row(r)) == 0 :
continue
b = self.row_border[r][0]
if b is None :
b = self.default_cell_border
if b is not None :
cr.set_line_width(b['width'])
cr.set_source_rgba(*b['colour'])
cr.move_to(self.config[0].bbox[0], self.row_bboxes[r][1])
cr.line_to(self.config[0].bbox[2], self.row_bboxes[r][1])
cr.stroke()
b = self.row_border[r][1]
if b is None :
b = self.default_cell_border
if b is not None :
cr.set_line_width(b['width'])
cr.set_source_rgba(*b['colour'])
cr.move_to(self.config[0].bbox[0], self.row_bboxes[r][3])
cr.line_to(self.config[0].bbox[2], self.row_bboxes[r][3])
cr.stroke()
#for c in self.cols :
# if len(self.get_row(c)) == 0 :
# continue
# b = self.col_border[c][0]
# if b is None :
# b = self.default_cell_border
# if b is not None :
# cr.set_line_width(b['width'])
# cr.set_source_rgba(*b['colour'])
# cr.move_to(self.config[0].bbox[1], self.row_bboxes[r][0])
# cr.line_to(self.config[0].bbox[3], self.row_bboxes[r][0])
# cr.stroke()
# b = self.col_border[r][1]
# if b is not None :
# cr.set_line_width(b['width'])
# cr.set_source_rgba(*b['colour'])
# cr.move_to(self.config[0].bbox[1], self.row_bboxes[r][2])
# cr.line_to(self.config[0].bbox[3], self.row_bboxes[r][2])
# cr.stroke()
for c in self.cols :
if len(self.get_col(c)) == 0 :
continue
b = self.col_border[c][0]
if b is None :
b = self.default_cell_border
if b is not None :
cr.set_line_width(b['width'])
cr.set_source_rgba(*b['colour'])
cr.move_to(self.col_bboxes[c][0], self.config[0].bbox[1])
cr.line_to(self.col_bboxes[c][0], self.config[0].bbox[3])
cr.stroke()
b = self.col_border[c][1]
if b is None :
b = self.default_cell_border
if b is not None :
cr.set_line_width(b['width'])
cr.set_source_rgba(*b['colour'])
cr.move_to(self.col_bboxes[c][2], self.config[0].bbox[1])
cr.line_to(self.col_bboxes[c][2], self.config[0].bbox[3])
cr.stroke()
cr.restore()
cr.restore()
class GlypherMatrixCell(GlypherTableCell) :
_am_zero = False
def __init__(self, parent, area = (0,0,0,0), auto_copy_paste_contents = False) :
GlypherTableCell.__init__(self, parent, area, auto_copy_paste_contents)
self.mes.append('matrix_cell')
def decorate(self, cr) :
self.draw_topbaseline(cr)
if not self.get_visible() : return
if self.child_active :
cr.save()
cr.set_line_width(2.0)
bbox = self.config[0].get_bbox()
cr.rectangle(bbox[0]-2, bbox[1]-2, bbox[2]-bbox[0]+4, bbox[3]-bbox[1]+4)
cr.set_source_rgba(0.0, 0.5, 0.0, 0.2)
if g.stroke_mode :
cr.fill_preserve()
cr.set_source_rgba(0.0, 0.5, 0.0, 1.0)
cr.stroke()
else : cr.fill()
cr.restore()
def draw(self, cr) :
if self.child_active or not (self._am_zero and g.zeros_mode) :
return GlypherPhrase.draw(self, cr)
def child_change(self) :
self._am_zero = (self.to_string() == u'0')
return GlypherPhrase.child_change(self)
def get_sympy(self) :
if len(self.get_entities()) == 0 :
return 0
return GlypherPhrase.get_sympy(self)
class GlypherDictionary(GlypherTable) :
def __init__(self, d, parent, border = 0, cell_padding = 10) :
GlypherTable.__init__(self, parent, first_col=None, border=border, cell_padding=cell_padding, add_first_cell=False)
n = 0
for key in d :
a = self.add_cell(n,0)
a.append(key)
a = self.add_cell(n,1)
a.append(d[key])
n += 1
self.set_col_colour(0, (0.4, 0.4, 0.8))
class GlypherList(GlypherTable) :
def __init__(self, li, parent, wrap=4, border = 0, cell_padding = 10) :
GlypherTable.__init__(self, parent, first_col=None, border=border, cell_padding=cell_padding, add_first_cell=False)
self.set_outside_border( width=1., colour=(0.0,0.0,0.0) )
self.set_default_cell_border( {'width':1., 'colour':(0.8,0.8,0.8)} )
n = 0
row = 0
col = 0
for entry in li :
a = self.add_cell(row, col)
a.append(entry)
n += 1
col += 1
if n % wrap == 0 :
row += 1
col = 0
def get_sympy() :
return [cell.get_sympy() for cell in self.get_col(0)]
class GlypherMatrix(GlypherPhraseGroup) :
table = None
toolbox = {'symbol' : '[ ]',
'category' : 'Matrices',
'shortcut' : None,
'alternatives' : None,
'priority' : 0 }
def get_pow_options(self) :
return ('python', 'elementwise')
def get_dims(self) :
return self.table.get_dims()
def ij(self, i, j) :
return self.table.ij(i,j)
def get_xml(self, name=None, top=True, targets=None, full=False) :
root = GlypherPhraseGroup.get_xml(self, name, top, targets, full)
self.table._xml_table(root)
return root
@classmethod
def parse_element(cls, parent, root, names, targets, operands, recommending, lead,
add_entities, am = None, top = True, args=None) :
rows = int(root.get('rows'))
cols = int(root.get('cols'))
matrix = GlypherMatrix(parent, rows, cols)
for cell in root.find('cells'):
c = Parser.parse_element(matrix, cell, names, targets, operands,
recommending, lead, add_entities, top=False)
matrix.ij(int(cell.get('table_row')),
int(cell.get('table_col'))).adopt(c)
matrix.recalc_bbox()
return matrix
@classmethod
def from_python_lists(cls, parent, py_lists) :
matrix = cls(parent, rows=len(py_lists), cols=len(py_lists[0]))
for i in range(0, len(py_lists)) :
for j in range(0, len(py_lists[i])) :
matrix.ij(i,j).adopt(py_lists[i][j])
return matrix
def __init__(self, parent, rows = 1, cols = 1) :
GlypherPhraseGroup.__init__(self, parent)
self.mes.append('matrix')
# Set up an outer BracketedPhrase
bracketed_phrase = GlypherBracketedPhrase(parent, auto=False)
bracketed_phrase.set_attachable(False)
bracketed_phrase.set_enterable(False)
bracketed_phrase.set_deletable(3)
self.adopt(bracketed_phrase)
# Make a Table
table = GlypherTable(parent, add_first_cell=False,
cell_class=GlypherMatrixCell)
table.set_cell_padding(3)
#table.set_default_cell_border({'width':1.0,'colour':(0.6,0.8,0.6)})
for i in range(0, rows) :
for j in range(0, cols) :
table.add_cell(i, j)
table.set_p('align_as_entity', True)
table.set_attachable(False)
table.set_deletable(3)
bracketed_phrase.adopt(table)
self.table = table
def get_sympy(self) :
'''Return a sympy matrix.'''
table = self.table
py_mat = [ [cell_cfg.get_entity().get_sympy() for cell_cfg in table.get_row(r)] \
for r in table.rows ]
return matrices.Matrix(py_mat)
g.add_phrasegroup_by_class('matrix', GlypherMatrix)
g.add_phrasegroup_by_class('table', GlypherTable)
g.add_phrasegroup_by_class('table_cell', GlypherTableCell)
g.add_phrasegroup_by_class('matrix_cell', GlypherMatrixCell)
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Table.py
|
Table.py
|
import gtk
from Spacer import *
def symbol(sp, c) :
togg = gtk.ToggleButton("BBox to Ink")
togg.set_active(sp.get_ink())
togg.connect("toggled", lambda tb : (\
sp.set_ink(tb.get_active()), \
sp.recalc_bbox(), \
tb.set_active(sp.get_ink())))
togg.show_all()
return togg
def space(sp, c) :
hbox = gtk.HBox()
hbox.pack_start(gtk.Label("H"), False)
h_entr = gtk.Entry()
h_entr.set_text(str(sp.get_dims()[0]))
h_entr.connect("activate", lambda e : sp.set_dims((float(e.get_text()), sp.get_dims()[1])))
hbox.pack_start(h_entr)
v_entr = gtk.Entry()
v_entr.set_text(str(sp.get_dims()[1]))
v_entr.connect("activate", lambda e : sp.set_dims((sp.get_dims()[0], float(e.get_text()))))
hbox.pack_start(gtk.Label("V"), False)
hbox.pack_start(v_entr)
hbox.show_all()
return hbox
def phrase(sp, c) :
vbox = gtk.VBox()
hbox = gtk.HBox()
hbox.pack_start(gtk.Label("Row"), False)
row_entr = gtk.Entry()
row_entr.set_text('0')
hbox.pack_start(row_entr)
hbox.pack_start(gtk.Label("Align"), False)
al_entr = gtk.Entry()
al_entr.set_text(sp.row_aligns[0])
hbox.pack_start(al_entr)
vbox.pack_start(hbox, False)
set_row_al_butt = gtk.Button("Set row align")
set_row_al_butt.connect("clicked", lambda o : sp.set_row_align(int(row_entr.get_text()), al_entr.get_text()))
vbox.pack_start(set_row_al_butt, False)
vbox.show_all()
return vbox
def vertical_line(sp, c) :
tie_to_butt = gtk.Button("Tie to sel")
tie_to_butt.connect("clicked",
lambda butt : sp.set_tied_to(
c.get_selected()[0] \
if len(c.get_selected())>0 else \
None))
tie_to_butt.show_all()
return tie_to_butt
def horizontal_line(sp, c) :
tie_to_butt = gtk.Button("Tie to sel")
tie_to_butt.connect("clicked",
lambda butt : sp.set_tied_to(
c.get_selected()[0] \
if len(c.get_selected())>0 else \
None))
tie_to_butt.show_all()
return tie_to_butt
config_widgets = {\
"space" : space,
"symbol" : symbol,
"phrase" : phrase,
"horizontal_line" : horizontal_line,
"vertical_line" : vertical_line
}
def get_config_widget(ty, ent, caret) :
return config_widgets[ty](ent, caret) if ty in config_widgets else None
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/ConfigWidgets.py
|
ConfigWidgets.py
|
from Symbol import *
import traceback
import gtk
from Phrase import *
from Word import make_word
import Parser
class GlypherBox(GlypherPhrase) :
colour = None
anchor = None
attached_to = None
caret = None
def __init__(self, phrase, colour = (0.9, 0.8, 0.6), anchor = None,
attached_to = None, caret = None, global_offset=(0,0)) :
GlypherPhrase.__init__(self, parent=None)
self.mes.append('box')
self.adopt(phrase)
self.colour = colour
self.global_offset = global_offset
if anchor is not None :
self.move_to(*anchor)
if self.config[0].bbox[0] != a[0] or self.config[0].bbox[1] != a[1]:
debug_print(a)
quit()
self.anchor = anchor
elif attached_to is not None :
self.attached_to = attached_to
elif caret is not None :
self.caret = caret
#else :
# raise(RuntimeError("Tried to create Box without specifying location or attached_to"))
self.cast()
debug_print(self.anchor)
def cast(self) :
a = None
if self.attached_to is not None :
x, y = self.attached_to.get_caret_position()
#x -= self.global_offset[0]
#y -= self.global_offset[1]
#x += self.attached_to.get_width()
y += self.attached_to.get_height()
debug_print(self.attached_to.format_me())
a = (x+10, y)
elif self.caret is not None :
a = self.caret.position
debug_print(a)
debug_print(self.anchor)
if a is not None and a != self.anchor:
self.move_to(*a)
debug_print(self.config[0].bbox)
self.anchor = a
return
def draw(self, cr) :
self.cast()
bb = self.config[0].get_bbox()
c = self.colour
draw.draw_box(cr, c, bb)
debug_print(self.config[0].bbox)
debug_print(self.anchor)
GlypherPhrase.draw(self, cr)
class GlypherWidgetBox(GlypherBox) :
gw = None
widget = None
def destroy(self) :
self.caret.boxes.remove(self)
self.caret.return_focus()
self.widget.get_parent().remove(self.widget)
del self
def __init__(self, widget, widget_parent, caret = None, attached_to = None, box_colour = (0.9, 0.8, 0.6)) :
self.widget = widget
self.caret = caret
self.gw = GlypherWidget(None, widget, widget_parent, self,
caret.glypher.position)
faded = map(lambda c: 1-(1-c)*0.2, box_colour)
self.gw.ebox.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(*faded))
GlypherBox.__init__(self, self.gw, caret=caret, attached_to=attached_to,
colour=box_colour, global_offset=caret.position)
self.mes.append('widget_box')
self.widget.grab_focus()
class GlypherLabelBox(GlypherWidgetBox) :
labl = None
def __init__(self, text, widget_parent, caret = None, attached_to = None, box_colour = (0.9, 0.8, 0.6)) :
self.labl = gtk.Label(text)
self.labl.set_line_wrap(True)
self.labl.set_size_request(200, -1)
self.labl.set_alignment(1.0, 1.0)
GlypherWidgetBox.__init__(self, self.labl, widget_parent, caret=caret, attached_to=attached_to, box_colour=box_colour)
class GlypherSymbolShortener(GlypherWidgetBox) :
sym_entry = None
def __init__(self, widget_parent, caret, box_colour = (0.9, 0.8, 0.6)) :
hbox = gtk.HBox(False, 4)
hbox.pack_start(gtk.Label("Symbol"), False)
sym_entry = gtk.Entry(); sym_entry.set_size_request(30, -1)
self.sym_entry = sym_entry
hbox.pack_start(sym_entry)
hbox.pack_start(gtk.Label("Trigger text"), False)
trigger_entry = gtk.Entry()
hbox.pack_start(trigger_entry)
GlypherWidgetBox.__init__(self, hbox, widget_parent, caret=caret, box_colour=box_colour)
sym_entry.grab_focus()
trigger_entry.connect('activate', self.do_trigger_entry_activate)
trigger_entry.connect('key-press-event', \
lambda w, e : self.destroy() if gtk.gdk.keyval_name(e.keyval) == 'Escape' else 0)
sym_entry.connect('key-press-event', \
lambda w, e : self.destroy() if gtk.gdk.keyval_name(e.keyval) == 'Escape' else 0)
def do_trigger_entry_activate(self, entry) :
ue = unicode(entry.get_text())
if ue == '' and ue in g.combinations :
del g.combinations[ue]
else :
g.combinations[ue] = unicode(self.sym_entry.get_text())
l = make_word(ue, self.caret.phrased_to)
self.caret.insert_entity(l)
self.destroy()
class GlypherEntry(GlypherWidgetBox) :
entry = None
gw = None
caret = None
def __init__(self, widget_parent, caret, box_colour = (0.9, 0.8, 0.6)) :
self.entry = gtk.Entry()
GlypherWidgetBox.__init__(self, self.entry, widget_parent, caret=caret, box_colour=box_colour)
self.mes.append('TeX_entry')
self.caret = caret
self.wrong_colour = (1.0, 0.5, 0.5)
self.entry.connect('activate', self.do_entry_activate)
self.entry.connect('key-press-event', \
lambda w, e : self.destroy() if gtk.gdk.keyval_name(e.keyval) == 'Escape' else 0)
def submit(self) :
t = self.entry.get_text()
debug_print(t)
l = make_word(t, self.caret.phrased_to)
self.caret.insert_entity(l)
return True
def do_entry_activate(self, entry) :
if self.submit() : self.destroy()
else :
self.entry.modify_text(gtk.STATE_NORMAL, gtk.gdk.Color(*self.wrong_colour))
class GlypherTeXEntry(GlypherEntry) :
def __init__(self, widget_parent, caret) :
GlypherEntry.__init__(self, widget_parent, caret, box_colour = (0.9, 0.5, 0.3))
def submit(self) :
t = Parser.get_name_from_latex(self.entry.get_text())
if t is not None :
try :
debug_print(t)
self.caret.insert_named(t)
return True
except RuntimeError :
debug_print(Parser.latex_to_name)
return False
t = Parser.get_shape_from_latex(self.entry.get_text())
if t is not None :
self.caret.insert_shape(t)
return True
return False
class GlypherWidget(GlypherEntity) :
widget = None
ebox = None
def __init__(self, parent, widget, widget_parent, box, global_offset) :
GlypherEntity.__init__(self, parent)
self.add_properties({'local_space' : True})
self.widget = widget
self.box = box
self.offset = global_offset
#widget.grab_focus()
self.ebox = gtk.EventBox()
#widget.modify_bg(0, gtk.gdk.Color(1,1,1))
e = self.ebox
#e.set_size_request(100, 50)
e.set_events(gtk.gdk.ALL_EVENTS_MASK)
e.connect("button-press-event", lambda w, e : debug_print(e))
#sc = e.get_screen()
#e.set_colormap(sc.get_rgba_colormap())
#e.set_app_paintable(True)
e.add(widget)
widget_parent.put(e, 0, 0)
#e.window.set_composited(True)
al = e.get_size_request()
debug_print(al)
m = e.size_request()
self.ref_width = m[0]#al.height
self.ref_height = m[1]#al.width
self.recalc_bbox()
r = self._get_rect(None)
#e.window.move(self.config[0].bbox[0], self.config[0].bbox[1])
e.size_allocate(r)
self.first_move = False
widget_parent.move(e, 0, 0)
debug_print(widget.get_allocation())
#debug_print(widget.window.get_geometry())
#debug_print(widget.window.get_frame_extents())
debug_print(self.config[0].bbox)
e.set_visible(False)
#widget.grab_focus()
def _get_rect(self, cr) :
#x, y = self.get_local_offset()
x, y = (0,0)
#x = self.offset[0]
#y = self.offset[1]
#y += self.ref_height
if cr is not None :
self.box.cast()
x, y = self.box.get_local_offset()
x += self.box.config[0].bbox[0]
y += self.box.config[0].bbox[1]
x, y = cr.user_to_device(x, y)
w, h = (self.ref_width, self.ref_height)
if cr is not None :
w, h = cr.user_to_device_distance(w, h)
#y -= self.ref_height
#return gtk.gdk.Rectangle(int(x), int(y-w), int(h), int(w))
return gtk.gdk.Rectangle(int(x), int(y), int(w), int(h))
def _move_ebox(self, cr=None) :
e = self.ebox
a = e.get_allocation()
e.show_all()
m = e.size_request()
if cr is not None :
m = cr.device_to_user_distance(*m)
self.ref_width = m[0]#al.height
self.ref_height = m[1]#al.width
r = self._rect
debug_print(r)
r1 = self._get_rect(cr)
if cr is not None :
debug_print(r1)
debug_print(self.box.anchor)
debug_print_stack()
if e.allocation.x != r1.x or e.allocation.y != r1.y :
e.get_parent().move(e, r1.x, r1.y)
e.show_all()
self.recalc_bbox()
x = None
y = None
_rect = None
def draw(self, cr) :
#if self._rect != self._get_rect(cr) :
self._rect = self._get_rect(cr)
self._move_ebox(cr)
self.ebox.set_visible(True)
#cr.save()
#e = self.ebox
#a = e.get_allocation()
#if a.x != int(self.config[0].bbox[0]) or \
# a.y != int(self.config[0].bbox[1]) :
#def process_key(self, name, event, caret) :
# if not self.widget.has_focus() : return
# return self.ebox.event(event)
def process_button_release(self, event) :
#self.widget.grab_focus()
#return self.widget.has_focus()
#self.widget.do_button_release_event(self.widget, event)
#return True if self.widget.event(event) else None
return None
def process_button_press(self, event) :
#self.widget.grab_focus()
#return self.widget.has_focus()
#self.widget.do_button_press_event(self.widget, event)
#return True if self.widget.event(event) else None
return None
def process_scroll(self, event) :
self.widget.grab_focus()
return None
#return True if self.widget.event(event) else None
class GlypherAltBox(GlypherBox) :
alts = None
alts_syms = None
alts_phrase = None
anchor = (0,0)
def __init__(self, alts) :
self.alts = alts
self.alts_phrase = GlypherPhrase(None)
GlypherBox.__init__(self, self.alts_phrase)
self.alts_phrase.mes.append('altbox_phrase')
self.cast()
def cast(self) :
n = 0
self.alts_syms = {}
self.alts_phrase.empty()
for alt in self.alts :
if isinstance(alt, GlypherEntity) and alt.included() : raise(RuntimeError, alt.format_me())
for alt in self.alts :
if isinstance(alt, GlypherEntity) :
ns = alt
else :
ns = GlypherSymbol(None, str(alt), ink=True)
self.alts_syms[alt] = ns
self.alts_phrase.append(ns, row=n)
self.alts_phrase.set_row_align(n, 'c')
ns.set_padding_all(4)
n -= 1
self.anchor = (0,0) # (self.alts_phrase.config[0].bbox[0]-20, self.alts_phrase.config[0].bbox[1])
self.translate(-self.config[0].bbox[0], -self.config[0].bbox[1])
def draw(self, cr, anchor, size, rgb_colour, active=None) :
if anchor != self.anchor :
self.anchor = (0,0)
if size != self.alts_phrase.get_font_size() :
self.set_font_size(size)
if rgb_colour != self.alts_phrase.get_rgb_colour() :
self.set_rgb_colour(rgb_colour)
GlypherBox.draw(self, cr)
if active and active in self.alts_syms :
cr.save()
bbp = self.alts_syms[active].config[0].get_bbox()
bbs = self.alts_syms[active].config[0].get_bbox()
cr.set_source_rgba(0.9, 0.8, 0.6)
mp = 0.5*(bbs[1]+bbs[3])
cr.move_to(bbp[0] - 16, mp-4)
cr.line_to(bbp[0] - 10, mp)
cr.line_to(bbp[0] - 16, mp+4)
cr.close_path()
cr.fill_preserve()
cr.set_line_width(2)
cr.set_source_rgb(0.8,0.6,0.2)
cr.stroke()
cr.restore()
class GlypherAlternativesPhrase(GlypherPhrase) :
active_child = None
def __init__(self, parent, area = (0,0,0,0), line_size_coeff = 1.0, font_size_coeff = 1.0, align = ('l','m'), auto_fices = False) :
GlypherPhrase.__init__(self, parent, area, line_size_coeff, font_size_coeff, align, auto_fices)
self.mes.append('alts_phrase')
self.set_enterable(False)
self.set_attachable(True)
self.set_have_alternatives(True)
self.altbox = GlypherAltBox([])
#self.characteristics.append('_in_phrase')
def decorate(self, cr) :
hl_anc = None
# If this is in an unhighlighted highlight group, don't show it, otherwise if the first highlighted group is
# above it, show it
for anc in self.get_ancestors() :
if anc.am('phrasegroup') :
if anc.first_highlighted_pg_over_active : hl_anc = anc; break
#else : hl_anc = None; break
elif anc.highlight_group : hl_anc = None; break
if not hl_anc : return
cr.save()
bb = self.config[0].get_bbox()
cr.move_to(bb[2]-2, bb[1]-3)
cr.line_to(bb[2]+3, bb[1]-3)
cr.line_to(bb[2]+3, bb[1]+2)
cr.close_path()
cr.set_source_rgba(0.0, 1.0, 0.0, 0.5)
cr.fill_preserve()
cr.set_source_rgba(0.0, 0.5, 0.0, 1.0)
cr.stroke()
cr.restore()
def child_change(self) :
GlypherPhrase.child_change(self)
self.cast()
for child in self.entities :
if child != self.active_child and child.get_visible() :
child.hide()
if len(self.entities)>0 :
if not self.active_child :
self.active_child = self.entities[0]
self.active_child.show()
elif not self.active_child.get_visible() :
self.active_child.show()
def cast(self) :
self.altbox.alts = copy.deepcopy(self.entities)
alist = list(self.altbox.alts)
for alt in alist :
alt.set_parent(None)
if alt.included() : raise(RuntimeError, str(alt.format_me()))
self.altbox.cast()
def next_alternative(self) :
alts = self.entities
if self.active_child == None : return
ind = alts.index(self.active_child)
self.active_child = alts[(len(alts) + ind - 1)%len(alts)]
self.child_change()
def prev_alternative(self) :
alts = self.entities
if self.active_child == None : return
ind = alts.index(self.active_child)
self.active_child = alts[(len(alts) + ind - 1)%len(alts)]
self.child_change()
def draw_alternatives(self, cr) :
if not self.get_visible() : return
altbox = self.altbox
altbox.draw(cr, anchor=(self.config[0].bbox[2], self.config[0].bbox[1]),\
size=self.get_scaled_font_size(), rgb_colour=self.get_rgb_colour(), active=self.active_child)
self.draw(cr)
def set_alternative(self, child) :
if child not in self.entities : return
self.active_child = child
self.child_change()
def set_alternative_by_name(self, name) :
named = filter(lambda e : e.get_name() == name, self.entities)
if len(named) == 0 : return
self.set_alternative(named[0])
ref_alts_phrase = None
def make_alts_phrase () :
global ref_alts_phrase
if ref_alts_phrase is None :
ref_alts_phrase = GlypherAlternativesPhrase(None)
return copy.deepcopy(ref_alts_phrase)
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Alternatives.py
|
Alternatives.py
|
import gtk
import pango
from aobject.paths import get_share_location
import math
import lxml.etree as ET
from aobject.paths import *
from aobject.utils import *
from Parser import *
from operator import itemgetter
class GradientLabel(gtk.DrawingArea) :
__gsignals__ = { "expose-event": "override" }
def __init__(self, text, down_arrow=False) :
gtk.DrawingArea.__init__(self)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.add_events(gtk.gdk.BUTTON_RELEASE_MASK)
self.text = text
self.set_size_request(-1, 30)
self.down_arrow = down_arrow
def set_text(self, text) :
self.text = text
self.queue_draw()
def do_expose_event(self, event) :
a = self.get_allocation()
x = a.x
y = a.y
w = a.width
h = a.height
x = 0
y = 0
cr = self.window.cairo_create()
cr.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
cr.clip()
colour = (0.7, 0.7, 0.7)
cr.save()
cr.set_source_rgb(*colour)
cr.move_to(x, y)
cr.rel_line_to(0, h)
cr.rel_line_to(w, 0)
cr.rel_line_to(0, -h)
cr.rel_line_to(-w, 0)
cr.close_path()
blush_grad = cairo.LinearGradient(x+w/2,y,x+w/2,y+h)
colour = list(colour)+[1]
blush_grad.add_color_stop_rgba(0, *colour)
colour[3] = 0
blush_grad.add_color_stop_rgba(1, *colour)
cr.set_source(blush_grad)
cr.fill()
cr.select_font_face("Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
cr.set_font_size(13)
cr.set_source_rgba(0.5,0.5,0.5,1.0)
txb, tyb, tw, th, txa, tya = cr.text_extents(self.text)
if self.down_arrow :
cr.move_to(x,y+h/2)
cr.show_text(u'\u21D5')
cr.move_to(x+w/2-tw/2,y+h/2)
cr.show_text(self.text)
cr.restore()
# Tool tuple description using "field [to_ignore1 or to_ignore2]"
# (unicode symbol, name, shortcut (or None), properties [omittable or None], entity [omittable or False], priority [omittable or 0])
toolbar_dictionary = {\
'Functions' : ( \
(u'f', 'function', 'Alt+f', None, False, 0), \
(u'\u2111', 'im', None, None, False, 0), \
(u'\u211C', 're', None, None, False, 0), \
(u'Ylm', 'Ylm', None, None, False, 0), \
(u'\u0393', 'gamma', None, None, False, 0), \
(u'logn', 'logn', None, None, False, 0), \
),
'Sets' : (
(u'(,)', 'interval', None, None, False, 0),
(u'\u2205', 'empty_set', None, None, False, 0),
(u'\u2102', 'complexC', None, None, False, 0), \
(u'\u211A', 'rationalQ', None, None, False, 0), \
(u'\u211D', 'realR', None, None, False, 0), \
),
'Calculus' : ( \
(u'\u222B', 'integral', 'Alt+i', None, False, 0), \
(u'd/d', 'derivative', 'Alt+d', None, False, 0) \
),
'Utilities' : ( \
(u'\u2637', 'table', 'Alt+T', None, False, 0),
)
}
# Needs later version of sympy (and Interpret impl)
# (u'{,}', 'finite_set', None),
# 'Bessel' : ( \
# (u'J\u03BD', 'bessel_j', None) \
# ),
expanded_toolbar_dictionary = {\
'Utilities' : ( \
(u'\u2318', 'phrasegroup', None, {'enterable':True}, False, 0), \
(u'\u2311', 'phrase', None, None, False, 0), \
(u'()', 'bracketed_phrase', None, None, False, 0), \
),
'Spacers' : ( \
(u'\u2423', 'space', None, {'attachable':True,'blank':False,'show_decorated':True}, True, 0), \
(u'\u2015', 'horizontal_line', None, None, False, 0),
(u'|', 'vertical_line', None, None, False, 0),
)
}
def add_toolbar_dictionary(d, tdict = toolbar_dictionary) :
for k in d :
tdict[k] = tdict[k] + d[k] \
if k in tdict else \
d[k]
def initialize(tdict, expanded = False) :
for name in g.phrasegroups :
pg = g.phrasegroups[name]
if hasattr(pg, 'toolbox') and pg.toolbox is not None :
sy = pg.toolbox['symbol']
ca = pg.toolbox['category']
sh = pg.toolbox['shortcut']
pr = pg.toolbox['priority']
add_toolbar_dictionary( { ca : ( \
(sy, name, sh.text if sh is not None else None, None, False, int(pr) if pr is not None else 0), ) }, tdict )
for trees in (phrasegroup_trees, user_phrasegroup_trees) :
for name in trees :
tree = trees[name]
sy = tree.find('symbol')
ca = tree.find('category')
sh = tree.find('shortcut')
pr = tree.find('priority')
if sy is not None and ca is not None :
add_toolbar_dictionary( { ca.text : ( \
(sy.text, name, sh.text if sh is not None else None, None, False, int(pr.text) if pr is not None else 0), ) }, tdict )
if expanded :
add_toolbar_dictionary(expanded_toolbar_dictionary, tdict)
def find_category_image(catname) :
catfilename = catname.replace(' ', '_').lower()
catpath = get_share_location() + 'images/icons/glypher/categories/' + catfilename + '.svg'
if os.path.exists(catpath) :
return catpath
return None
class GlyphEntBox(gtk.Table) :
caret = None
grab_entities = True
will_set_glypher_expanded_property = False
c = 3
def __init__(self, caret, tool_table, tdict, grab_entities=True, expanded=False, cols=3) :
self.caret = caret
self.tool_table = tool_table
self.c = cols
self.grab_entities = grab_entities
rs = len(tdict)/self.c + 1
gtk.Table.__init__(self, rs, self.c)
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("white"))
k = 0
for key in sorted(tdict.keys()) :
items = sorted(tdict[key], key=itemgetter(self.order_index), reverse=True)
catfile = find_category_image(key)
if catfile is None :
menu_toob = gtk.Button(items[0][0] + ' ...')
else :
menu_toob = gtk.Button()
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(catfile, 24, 24)
menu_imag = gtk.Image()
menu_imag.set_from_pixbuf(pixbuf)
menu_toob.set_image(menu_imag)
menu_toob.set_tooltip_text(key)
menu_toob.set_relief(gtk.RELIEF_NONE)
menu_toob.set_size_request(50, -1)
n = 0
menu_items = []
for it in items :
it_meni = gtk.Button(it[0])
it_meni.set_relief(gtk.RELIEF_NONE)
it_meni.get_child().modify_font(pango.FontDescription("9"))
#if n == 0 : menu_toob.active_item = it_meni
#menu_menu.attach(it_meni, n, n+1, 0, 1)
self._make_it(it_meni, it)
it_meni.connect('clicked', self.do_mi_clicked)
menu_items.append(it_meni)
n += 1
r = k / self.c
c = k % self.c
self.attach(menu_toob, c, c+1, r, r+1)
menu_toob.connect('clicked', self.do_m_clicked, key, menu_items)
k += 1
def _make_it(self, it_meni, it) :
pass
def do_mi_clicked(self, it_meni) :
pass
def do_m_clicked(self, menu_toob, cat, menu_items) :
self.tool_table.show_items(cat, menu_items)
#it_meni = menu_toob.active_item
#self.do_mi_clicked(it_meni, menu_toob)
class GlyphPGBox(GlyphEntBox) :
order_index = 5
will_set_glypher_expanded_property = True
def __init__(self, caret, tool_table, grab_entities=True, expanded=False, cols=4) :
tdict = toolbar_dictionary.copy()
initialize(tdict, expanded=expanded)
GlyphEntBox.__init__(self, caret, tool_table, tdict, grab_entities, expanded, cols)
def do_mi_clicked(self, it_meni) :
gn = it_meni.glypher_name
if gn[2] :
debug_print(gn)
self.caret.insert_named_entity(gn[0], properties=gn[1])
else :
self.caret.insert_phrasegroup(gn[0], properties=gn[1], grab_entities=self.grab_entities)
self.caret.glypher.grab_focus()
def _make_it(self, it_meni, it) :
it_meni.set_tooltip_text(it[1] + ' ' + '[No shortcut]' if it[2] is None else it[2] + ' ' + str(it[5]))
it_meni.glypher_name = (it[1], it[3] if len(it) > 3 else None, len(it) > 4 and it[4])
# Based on the Comprehensive LaTeX Symbol List
# Scott Pakin
#mirror.ctan.org/info/symbols/comprehensive/symbols-a4.pdf
charmap_dictionary = {\
# Table 130
'CHiNa2e Number Sets' : ( \
(u'\u2102', '\\Complex',0), \
(u'\u2124', '\\Integer',0), \
(u'\u2115', '\\Natural',0), \
(u'\u211A', '\\Rational',0), \
(u'\u211D', '\\Real',0), \
),
# Table 137
'Hebrew' : ( \
(u'\u2135', '\\aleph',0), \
(u'\u2136', '\\beth',0), \
(u'\u2137', '\\gimel',0), \
(u'\u2138', '\\daleth',0), \
),
# Table 139
'Letter-like symbols' : ( \
(u'\u2762', '\\bot',0), \
(u'\u2200', '\\forall',0), \
(u'\u2118', '\\wp',0), \
),
# Table 201
'Miscellaneous LaTeX2e Math Symbols (a)' : ( \
(u'\u2220', '\\angle',0), \
(u'\u25A1', '\\Box',0), \
(u'\u2205', '\\emptyset',0), \
(u'\u221E', '\\infty',0), \
(u'\u2127', '\\mho',0), \
(u'\u2207', '\\nabla',0), \
(u'\u00AC', '\\neg',0), \
(u'\u221A', '\\surd',0), \
(u'\u25B3', '\\triangle',0), \
),
# Table 201
'Miscellaneous LaTeX2e Math Symbols (b)' : ( \
(u'\u2663', '\\clubsuit',0), \
(u'\u25C7', '\\Diamond',0), \
(u'\u2662', '\\diamondsuit',0), \
(u'\u266D', '\\flat',0), \
(u'\u2661', '\\heartsuit',0), \
(u'\u266E', '\\natural',0), \
(u'\u266F', '\\sharp',0), \
(u'\u2660', '\\spadesuit',0), \
),
}
for table in charmap_dictionary :
table = charmap_dictionary[table]
for sym in table :
latex_to_shape[sym[1]] = sym[0]
def add_charmap_dictionary(d, tdict = charmap_dictionary) :
for k in d :
tdict[k] = tdict[k] + d[k] \
if k in tdict else \
d[k]
class GlyphFormulaMap(GlyphEntBox) :
order_index = 3
def __init__(self, caret, tool_table, grab_entities=True, expanded=False, cols=4) :
fdict = {}
for name in formula_trees :
tree = formula_trees[name]
title = tree.getroot().get('title')
if title is None :
title = name
sy = tree.find('symbol')
ca = tree.find('category')
pr = tree.find('priority')
if sy is not None and ca is not None :
add_toolbar_dictionary( { ca.text : ( \
(sy.text, name, title,
int(pr.text) if pr is not None else 0), ) }, fdict )
GlyphEntBox.__init__(self, caret, tool_table, fdict, grab_entities, expanded, cols)
def do_mi_clicked(self, it_meni) :
gn = it_meni.glypher_name
self.caret.insert_formula(gn[0])
self.caret.glypher.grab_focus()
def _make_it(self, it_meni, it) :
it_meni.set_tooltip_text(it[2])
it_meni.glypher_name = (it[1],)
class GlyphCharMap(GlyphEntBox) :
order_index = 2
def __init__(self, caret, tool_table, grab_entities=True, expanded=False, cols=4) :
tdict = charmap_dictionary.copy()
GlyphEntBox.__init__(self, caret, tool_table, tdict, grab_entities, expanded, cols)
def _make_it(self, it_meni, it) :
it_meni.set_tooltip_text(it[1])
def do_mi_clicked(self, it_meni) :
self.caret.insert_shape(it_meni.get_label())
class GlyphToolTable(gtk.VBox) :
rows = 4
cols = 4
def __init__(self) :
gtk.VBox.__init__(self)
self.table = gtk.Table(rows=self.rows, columns=self.cols, homogeneous=True)
self.gradient_label = GradientLabel('')
self.table.set_size_request(self.rows*50, self.cols*24)
self.pack_start(self.gradient_label, False)
self.pack_start(self.table)
self.show_all()
def show_items(self, cat, items) :
self.gradient_label.set_text(cat)
for child in self.table.get_children() :
self.table.remove(child)
cols = self.cols
for i in range(0, len(items)) :
self.table.attach(items[i], i%cols, i%cols+1, i/cols, i/cols+1)
self.show_all()
debug_print(items)
class GlyphToolbox(gtk.VBox) :
def __init__(self, caret, grab_entities=False, expanded=False, cols=None,
hidden=False) :
self.tool_table = GlyphToolTable()
gtk.VBox.__init__(self)
if cols is not None :
params = { 'cols' : cols }
else :
params = {}
self.entbox = GlyphPGBox(caret, self.tool_table, grab_entities,
expanded=expanded, **params)
ue_gradient_label = GradientLabel('Useful Entities', down_arrow=True)
ue_gradient_label.connect("button-release-event",
self.toggle_show,
self.entbox)
ue_gradient_label.show_all()
self.pack_start(ue_gradient_label, False)
self.pack_start(self.entbox)
self.pack_start(gtk.HSeparator())
uf_gradient_label = GradientLabel('Formulae', down_arrow=True)
self.formulamap = GlyphFormulaMap(caret, self.tool_table, grab_entities,
expanded=expanded, **params)
uf_gradient_label.connect("button-release-event", self.toggle_show,
self.formulamap)
self.pack_start(uf_gradient_label, False)
self.pack_start(self.formulamap)
self.pack_start(gtk.HSeparator())
uc_gradient_label = GradientLabel('Unicode Characters', down_arrow=True)
self.charmap = GlyphCharMap(caret, self.tool_table, grab_entities,
expanded=expanded, **params)
uc_gradient_label.connect("button-release-event", self.toggle_show, self.charmap)
self.pack_start(uc_gradient_label, False)
self.pack_start(self.charmap)
self.pack_start(self.tool_table)
self.show_all()
self.formulamap.hide()
self.charmap.hide()
if hidden :
self.entbox.hide()
def toggle_show(self, label, event, box) :
box.set_visible(not box.get_visible())
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Toolbox.py
|
Toolbox.py
|
import glypher as g
import traceback
import Renderers
from Entity import *
class GlypherSymbol(GlypherEntity) :
title = "GlypherSymbol"
info_text = '''
Usually a single unicode characters, this is the basic graphical unit.
<i>Not to be confused with sympy Symbols</i>, which correspond more closely to
<b>Words</b>
'''
mid = None
shape = u'\u25A3'
ink = False
def alt_to_ent(self, a) :
return GlypherSymbol(self.get_parent(), a)
def get_italic(self) : return self.get_p('italic')
def set_italic(self, italic, quiet=False) :
self.set_p('italic', italic)
self.set_redraw_required()
if not quiet : self.recalc_bbox()
def get_shape(self) : return self.shape #def get_shape(self) : return self.get_p('shape')
def set_shape(self, shape) :
self.shape = unicode(shape) #def set_shape(self, shape) : self.set_p('shape', shape)
self.set_redraw_required()
def get_ink(self) : return self.ink #def get_ink(self) : return self.get_p('ink')
def set_ink(self, ink) :
self.ink = ink #def set_ink(self, ink) : self.set_p('ink', ink)
self.set_redraw_required()
def get_mid(self) : return self.mid
def set_mid(self, mid) :
self.mid = mid
self.set_redraw_required()
rendering = None
def check_combination(self, shape, go_up=True) :
cmb = self.to_string() + unicode(shape)
chg = cmb in g.combinations
if chg :
self.set_shape(g.combinations[cmb])
self.recalc_bbox()
if self.included() :
self.get_parent().child_change()
return chg
def get_xml(self, name = None, top = True, targets = None, full = False) :
root = GlypherEntity.get_xml(self, name, top)
root.set('shape', self.get_shape())
root.set('ink', str(self.get_ink()))
return root
def __init__(self, parent, shape, area = (0,0,0,0), code = None, text = None, align = ('c','m'), italic = None, bild = False, ink = False) :
GlypherEntity.__init__(self, parent)
self.add_properties({'align' : ('c', 'm'), 'italic' : g.get_default_italic_for_shape(shape),
'mid' : 0.5, 'have_alternatives' : True})
self.topline = area[1]
self.baseline = area[3]
self.set_align(align)
self.set_ink(ink)
if italic is not None :
self.set_italic(italic, True)
self.set_shape(shape)
self.mes.append('symbol')
self.set_ref_width(area[2] - area[0])
self.set_ref_height(area[3] - area[1])
self.recalc_basebox()
self.config[0].reset()
pos = (self.config[0].bbox[0], self.config[0].bbox[3])
self.old_bbox = [pos[0], pos[1], pos[0], pos[1]]
self.recalc_bbox()
def to_string(self, mode = "string") :
if not self.get_visible() : return unicode("")
elif self.get_blank() : return unicode(" ")
uc = u'\u25a1'
try :
uc = unicode(self.get_shape())
except UnicodeDecodeError :
raise(RuntimeError('Could not change ' + self.get_shape() + ' to unicode'))
return unicode(self.get_shape())
def recalc_basebox(self, config = None, only_recalc_self = False) :
b = self.config[0].get_bbox()
mid = self.get_mid()
if mid is None : mid = (b[3]-b[1])*0.5
self.config[0].basebox = (b[0], (b[0]+b[2])*0.5, b[2], b[1], b[3]-mid, b[3])
def get_ink_extent(self) :
if self.get_ink() :
return None
(rh,rw,mid,delta) = self.rendering.draw(self.get_font_size()*10,\
ink=True, italic=self.get_italic(), bold=self.get_ip('bold'), font_name=self.get_ip('font_name'))
c = self.get_font_size_combined_coeff()/10.
return ( self.config[0].basebox[0]+c*delta[0],
self.config[0].basebox[2]+c*delta[1],
self.config[0].basebox[3]+c*delta[2],
self.config[0].basebox[5]+c*delta[3] )
def get_caret_position(self, inside=False) :
return GlypherEntity.get_caret_position(self, inside=inside, pos=[self.config[0].bbox[2], self.config[0].bbox[3]])
def cast(self, shape, size, pos, code = None, align = ('l','b'), italic = None, bild = False, ink = False) :
self.set_italic(g.get_default_italic_for_shape(shape) if italic is None else italic, quiet=True)
self.set_shape(shape)
self.set_align(align)
self.set_ink(ink)
self.rendering = Renderers.find_rendering(shape, code, italic=self.get_italic(), bold=self.get_ip('bold'))
(rh,rw,mid,delta) = self.rendering.draw(self.get_font_size()*10,\
ink=ink, italic=self.get_italic(), bold=self.get_ip('bold'), font_name=self.get_ip('font_name'))
#(rh,rw,mid) = self.rendering.draw(size, ink=ink)
rh *= self.get_font_size_combined_coeff()/10.
rw *= self.get_font_size_combined_coeff()/10.
mid *= self.get_font_size_combined_coeff()/10.
self.set_mid(mid)
p = self.padding
#rh += p[1]+p[3]
#rw += p[0]+p[2]
self.set_ref_width(rw)
self.set_ref_height(rh)
#if align[0] == 'l' :
# self.ref_bbox[0] = pos[0]
# self.ref_bbox[2] = pos[0] + rw
#elif align[0] == 'c' :
# self.ref_bbox[0] = pos[0] - rw/2
# self.ref_bbox[2] = pos[0] + rw/2
#else :
# self.ref_bbox[0] = pos[0] - rw
# self.ref_bbox[2] = pos[0]
#if align[1] == 'b' :
# self.ref_bbox[3] = pos[1]
# self.ref_bbox[1] = pos[1] - rh
#elif align[1] == 'm' :
# self.ref_bbox[1] = pos[1] - rh/2
# self.ref_bbox[3] = pos[1] + rh/2
#else :
# self.ref_bbox[1] = pos[1]
# self.ref_bbox[3] = pos[1] + rh
#self.ref_bbox = list(self.bbox)
def recalc_bbox(self, quiet=False, old_bbox=None) :
if self.get_shape() is None : return
self.cast(self.get_shape(), self.get_scaled_font_size(), self.get_anchor(), \
None, self.get_align(), self.get_italic(), None, self.get_ink())
#traceback.print_stack()
GlypherEntity.recalc_bbox(self, quiet=quiet)
# Sort out zero-width symbols (we really should be able to handle these)
def draw(self, cr) :
if self.draw_offset != (0,0) :
cr.save()
cr.translate(*self.draw_offset)
self.draw_topbaseline(cr, (0,1,1))
if not self.get_visible() or self.get_blank() : return
#if self.get_scaled_font_size() != self.size : #TODO: find a more efficient way of doing this
# self.size = self.get_scaled_font_size()
#self.cast(self.shape, self.get_scaled_font_size(), self.loc, self.text, None, self.align, self.italic)
#debug_print(self.format_me())
if g.show_rectangles :
colour = (0.8,0.8,0.8)
elif self.in_selected() :
colour = (0.8, 0.2, 0.2)
else :
colour = self.get_rgb_colour()
if not self.rendering :
self.cast(self.get_shape(), self.get_scaled_font_size(), self.get_anchor(), \
None, self.get_align(), self.get_italic(), None, self.get_ink())
cr.save()
cr.translate(self.config[0].bbox[0]+self.padding[0], self.config[0].bbox[1]+self.padding[1])
cr.scale(self.get_font_size_combined_coeff()/10.,
self.get_font_size_combined_coeff()/10.)
self.rendering.draw(self.get_font_size()*10, cr, self.config[0].bbox[0]+self.padding[0], self.config[0].bbox[1]+self.padding[1],\
colour=colour,\
ink=self.get_ink(), italic=self.get_italic(), bold=self.get_ip('bold'), font_name=self.get_ip('font_name'))
cr.restore()
if g.show_rectangles :
cr.save()
cr.set_source_rgba(0.5,0.5,1.0,0.2)
cr.rectangle(self.config[0].bbox[0], self.config[0].bbox[1], self.config[0].bbox[2]-self.config[0].bbox[0], self.config[0].bbox[3]-self.config[0].bbox[1])
cr.stroke()
cr.set_source_rgba(0.5, 0.0, 1.0, 0.4)
cr.move_to(self.config[0].basebox[0]-5, self.config[0].basebox[4])
cr.line_to(self.config[0].basebox[2]+5, self.config[0].basebox[4])
for i in (0,2) :
cr.move_to(self.config[0].basebox[i], self.config[0].basebox[4]-2)
cr.line_to(self.config[0].basebox[i], self.config[0].basebox[4]+2)
cr.move_to(self.config[0].basebox[0]-5, self.config[0].basebox[3])
cr.line_to(self.config[0].basebox[2]+5, self.config[0].basebox[3])
cr.move_to(self.config[0].basebox[0]-5, self.config[0].basebox[5])
cr.line_to(self.config[0].basebox[2]+5, self.config[0].basebox[5])
cr.stroke()
cr.restore()
if self.draw_offset != (0,0) :
#cr.translate(-self.draw_offset[0], -self.draw_offset[1])
cr.restore()
def change_alternative(self, dir) :
alts = g.find_alternatives(self.get_shape())
if alts == None :
if self.included() :
return self.get_parent().change_alternative(dir)
else :
return False
ind = alts[0].index(self.get_shape())
self.set_shape(alts[0][(len(alts[0]) + ind + dir)%len(alts[0])])
self.recalc_bbox()
return True
def draw_alternatives(self, cr) :
if not self.get_visible() : return
a = g.find_alternatives(self.get_shape(), generate_altbox=True)
if a is None : return
alts, altbox = (a[0], a[1])
#cr.save()
#cr.set_line_width(2.0)
#cr.set_source_rgba(1.0,1.0,1.0)
#cr.rectangle(self.config[0].bbox[0]-4, self.config[0].bbox[1]-4, self.config[0].bbox[2]-self.config[0].bbox[0]+8, self.config[0].bbox[3]-self.config[0].bbox[1]+8)
#cr.fill_preserve()
#cr.set_source_rgb(0.7,0.7,0.7)
#cr.stroke()
cr.save()
cr.translate(self.config[0].bbox[2]+self.get_local_offset()[0]+20, self.config[0].bbox[1]+self.get_local_offset()[1])
if alts != None : altbox.draw(cr, anchor=(0,0),\
size=self.get_scaled_font_size(), rgb_colour=self.get_rgb_colour(), active=self.get_shape())
cr.restore()
#self.draw(cr)
#cr.restore()
@classmethod
def parse_element(cls, parent, root, names, targets, operands, recommending, lead,
add_entities, am = None, top = True, args=None) :
shape = root.get('shape')
italic = root.get('italic')
if italic is not None :
italic = italic=='True'
ink = root.get('ink')
if ink is not None :
ink = ink=='True'
new_phrasegroup = cls(parent, shape, italic=italic,
ink=ink)
return new_phrasegroup
g.parse_element_fns['symbol'] = GlypherSymbol.parse_element
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Symbol.py
|
Symbol.py
|
import re
import sympy
import gtk, gobject
from sympy.core.numbers import Infinity
from sympy.core.numbers import ImaginaryUnit
try:
from sympy.polys.factortools import factor # 0.6.7
except:
from sympy.polys.polytools import factor # 0.7.1
import os
import codecs
import pkg_resources
import sys, traceback
from aobject.utils import debug_print
from aobject import paths
phrasegroups = {}
phrasegroup_alternatives = {}
parse_element_fns = {}
def add_phrasegroup_by_class(name, cls, alt_cat=None) :
phrasegroups[name] = cls
if alt_cat is not None :
if alt_cat not in phrasegroup_alternatives :
phrasegroup_alternatives[alt_cat] = []
phrasegroup_alternatives[alt_cat].append(name)
if 'parse_element' in cls.__dict__ and cls.parse_element is not None :
parse_element_fns[name] = cls.parse_element
GLYPHER_PG_LEAD_ALL = (True,True,True,True,True,True)
GLYPHER_PG_LEAD_MID = (False,True,False,False,True,False)
GLYPHER_PG_LEAD_VERT = (False,False,False,True,True,True)
GLYPHER_PG_LEAD_HORI = (True,True,True,False,False,False)
#from BinaryExpression import *
import random
import string
import Dynamic
sm_tol = 1e-5
bg_tol = 4e-2
have_pyglet = False
var_table = {}
ps_ctr = 0
stp = 72/96.0
default_line_size = 25.0
default_font_size = 10.0
default_rgb_colour = (1.0, 0.0, 0.0)
try:
sympy_ver = pkg_resources.get_distribution("sympy").version # get version of sympy
except:
sympy_ver = '0.6.7' # default to this if pkg_resources non-functional
if sympy_ver == '0.7.1':
mpmath_lib = 'mpmath'
else:
mpmath_lib = 'sympy.mpmath'
libraries = {mpmath_lib : False}
def set_library(name, mode=True) :
if Dynamic.load_library(name, not mode) :
libraries[name] = mode
set_library(mpmath_lib)
bbox_mode = False
stroke_mode = False
math_mode = True
show_rectangles = False
selected_colour = (0.5, 0.7, 0.9, 0.3)
plane_mode = False
def set_plane_mode(p) :
global plane_mode
plane_mode = p
zeros_mode = False
def set_zeros_mode(p) :
global zeros_mode
zeros_mode = p
pow_mode = True
pow_mode_force = None
diff_mode = False
diff_mode_force = None
def set_pow_mode_force(p) :
global pow_mode_force
pow_mode_force = True if p == 1 else \
(False if p == -1 else \
None)
def set_diff_mode_force(d) :
global diff_mode_force
diff_mode_force = True if d == 1 else \
(False if d == -1 else \
None)
show_all_pow_diff = False
def set_show_all_pow_diff(s) :
global show_all_pow_diff
show_all_pow_diff = s
hy_arb_mode = True
interpretations = {}
interpretations_sympy_rev = {}
expand = {'complex' : False, 'trig' : False}
def set_expand_flag(name, val) :
expand[name] = val
# Should we cache renderings in a library?
# Note that this may lose a couple of aspects (not at the moment)
use_rendering_library = False
# Additional highlighting? Maybe more explanatory, but also more fussy
additional_highlighting = False
# Max consecutive recalcs of an individual phrase; emergency cut-off
max_recalcs = 5
# Should we check and re-check everything?
# (or expect that the bboxes don't do anything unpredictable?)
anal_retentive_mode = False
anal_retentive_mode2 = False
# Thanks to regular-expressions.info
# http://www.regular-expressions.info/floatingpoint.html
is_floating_point_num_regex = '[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?'
sympy_specials = {
u'\u221e' : Infinity(),
'i' : ImaginaryUnit()
}
suggested_target_phrased_to = None
def suggest(phr) :
global suggested_target_phrased_to
suggested_target_phrased_to = phr
def get_suggest() : return suggested_target_phrased_to
#Textual renderings by mode ("string" is key)
def import_interpretations () :
lines = []
with codecs.open(paths.get_share_location() + "unicode/interpretations.ucd", encoding='utf-8') as f:
lines = f.readlines()
if len(lines) == 0 : return
types = lines[0].split("|")
types = [t.strip() for t in types]
modes = lines[1].split("|")
modes = [t.strip() for t in modes]
for line in lines[2:] :
if len(line) < 2 or line[0] == '#' : continue
entries = line.split("|")
entries = [t.strip() for t in entries]
interpretations[entries[0]] = dict([(types[i], str(entries[i])) for i in range(1,len(types))])
interpretations_sympy_rev[entries[1]] = entries[0]
combinations = {}
def import_combinations () :
lines = []
with codecs.open(paths.get_share_location() + "unicode/combinations.ucd", encoding='utf-8') as f:
lines = f.readlines()
if len(lines) == 0 : return
for line in lines :
if len(line) < 2 or line[0] == '#' : continue
entries = line.split("|")
entries = [t.strip() for t in entries]
combinations[entries[0]] = entries[1]
specials = {}
def import_specials () :
lines = []
with codecs.open(paths.get_share_location() + "unicode/special.ucd", encoding='utf-8') as f:
lines = f.readlines()
if len(lines) == 0 : return
for line in lines :
if len(line) < 2 or line[0] == '#' : continue
entries = line.split("|")
entries = [t.strip() for t in entries]
specials[entries[0]] = entries[1]
let_functions = {}
let_matrices = {}
define_functions = {}
define_symbols = {}
#blush_grad = cairo.LinearGradient(0,0.5,1,0.5)
#blush_grad.add_color_stop_rgb(0, 0, 0, 0.0)
#blush_grad.add_color_stop_rgb(1, 1, 1, 0.0)
#cm_index = {}
#def get_cm_index() :
# if len(cm_index) == 0 :
# lines = []
# with codecs.open(paths.get_share_location() + "cm.ucd", encoding='utf-8') as f:
# lines = f.readlines()
# for line in lines :
# if len(line) < 2 or line[0] == '#' : continue
# entries = line.split("|")
# entries = [t.strip() for t in entries]
# cm_index[entries[0]] = (entries[1], entries[2], float(entries[3]), float(entries[4]))
# return cm_index
cmu_re_letter = re.compile(u'[A-Za-z\u03b1-\u03c9]')
cmu_re_exceptions = re.compile(u'[e\u03c0]')
def get_default_italic_for_shape(shape) :
return cmu_re_letter.match(shape) and not cmu_re_exceptions.match(shape)
import Commands as C
operation_commands = {
'Factor' : factor,
}
commands = { 'Set' : C.set_equal, 'Differentiate' : C.diff, 'Substitute' : C.sub, 'Expand' : C.series, 'Evaluate' : C.evalf, 'Limit' : C.limit,
'S' : C.set_equal, 'Di' : C.diff, 'Sub' : C.sub,
'Ex' : C.series, 'E' : C.evalf, 'Li' : C.limit,
'Do' : C.doit, 'Unset' : C.unset_equal, 'Source' : C.source, 'Plot' : C.plot,
'D' : C.doit, 'U' : C.unset_equal, 'Sc' : C.source, 'P' : C.plot,
'Solve' : C.solve, 'Let' : C.let, 'Define' : C.let,
'So' : C.solve, 'L' : C.let, 'De' : C.let,
'Match' : C.match, 'Unlet' : C.unlet,
'M' : C.match, 'Ul' : C.unlet,
'Separate' : (lambda c, *args : C.separate(False, c, *args)),
'Sep' : (lambda c, *args : C.separate(False, c, *args)),
'Combine' : (lambda c, *args : C.separate(True, c, *args)),
'Com' : (lambda c, *args : C.separate(True, c, *args)),
}
dit = False
response_dictionary = {}
responses_lsst = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_PYOBJECT,
gobject.TYPE_PYOBJECT)
def get_response(code) :
if code not in response_dictionary : return None
new_elt = response_dictionary[code].copy()
return new_elt
def add_response(code, statement, response) :
response_dictionary['s'+str(code)] = statement
response_dictionary['r'+str(code)] = response
statement_xml = statement.get_xml()
response_xml = response.get_xml()
fs = float(statement.get_scaled_font_size())
statement_xml.set('width', str(statement.get_width()/fs))
statement_xml.set('height', str(statement.get_height()/fs))
fs = float(response.get_font_size())
response_xml.set('width', str(response.get_width()/fs))
response_xml.set('height', str(response.get_height()/fs))
statement_xml = ET.ElementTree(statement_xml)
response_xml = ET.ElementTree(response_xml)
responses_lsst.append((str(code), statement_xml, response_xml))
# well, dmsa
bodmas_order = [ '/', '*', '-', '+' ]
def get_bodmas(shape) :
level = 0
alts = find_alternatives(shape)
if alts is None : alts = [shape]
for x in bodmas_order :
if x in alts : level = bodmas_order.index(x)+2
return level # 0 if not found, (1 avoided for powers), 2 for '/' up to 4 if +
# well, dmsa
associative = [ '*', '+', ',', ' ', '-' ]
def get_associative(shape) :
level = 0
alts = find_alternatives(shape)
if alts is None : alts = [shape]
else : alts = alts[0]
for x in associative :
if x in alts : return True
return False # 0 if not found up to 4 if +
from Alternatives import *
alternatives = []
def import_alternatives () :
lines = []
with codecs.open(paths.get_share_location() + "unicode/alternatives.ucd", encoding='utf-8') as f:
lines = f.readlines()
if len(lines) == 0 : return
for line in lines :
if len(line) < 2 or line[0] == '#' : continue
alternatives.append(list(line.strip()))
import_alternatives()
#Alternative for same key
#alternatives = [ \
# # Latin and Greek
# ['a',u'\u03B1'], ['A',u'\u0391'],\
# ['b',u'\u03B2'], ['B',u'\u0392'],\
# ['c',u'\u03C2'],\
# ['d',u'\u03B4'], ['D',u'\u0394'],\
# ['e',u'\u03B5'], ['E',u'\u0395'],\
# ['f',u'\u03C6',u'\u03D5'], ['F',u'\u03A6'],\
# ['g',u'\u03B3'], ['G',u'\u0393'],\
# ['h',u'\u03B7'], ['H',u'\u0397'],\
# ['i',u'\u03B9'], ['I',u'\u0399'],\
# ['j',u'\u03B8'], ['J',u'\u0398'],\
# ['k',u'\u03BA'], ['K',u'\u039A'],\
# ['l',u'\u03BB'], ['L',u'\u039B'],\
# ['m',u'\u03BC'], ['M',u'\u039C'],\
# ['n',u'\u03BD'], ['N',u'\u039D'],\
# ['o',u'\u03BF'], ['O',u'\u039F'],\
# ['p',u'\u03C0',u'\u03D6'], ['P',u'\u03A0'],\
# ['q',u'\u03C7'], ['Q',u'\u03A7'],\
# ['r',u'\u03C1'], ['R',u'\u03A1'],\
# ['s',u'\u03C3'], ['S',u'\u03A3'],\
# ['t',u'\u03C4'], ['T',u'\u03A4'],\
# ['u',u'\u03C5'], ['U',u'\u03A5',u'\u03E0'],\
# ['w',u'\u03C9'], ['W',u'\u03A9'],\
# ['x',u'\u03BE'], ['X',u'\u039E'],\
# ['y',u'\u03C8'], ['Y',u'\u03A8'],\
# ['z',u'\u03B6'], ['Z',u'\u0396'],\
# # Symbols
# [u'\u00B7', u'\u00D7', '*', u'\u2217', u'\u2218'],
# ['+', u'\u222A', u'\u2227', u'\u2229'],
# ['<', u'\u2282', u'\u220A'],
#]
alternatives_altboxes = {}
def find_alternatives(shape, generate_altbox=False) :
for alt in alternatives :
ab = alternatives_altboxes[alt[0]] if alt[0] in alternatives_altboxes else None
if shape in alt :
if generate_altbox and ab is None :
ab = GlypherAltBox(alt)
alternatives_altboxes[alt[0]] = ab
debug_print((alt[0], ab))
return (alt, ab)
return None
def find_phrasegroup_alternatives(cat) :
'''Finds alternatives for a given category.'''
if cat in phrasegroup_alternatives :
return phrasegroup_alternatives[cat]
return None
def describe_phrasegroup(pg) :
description = pg
return description
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/glypher.py
|
glypher.py
|
import time
from sympy.printing.mathml import mathml
from sympy.matrices import matrices
from sympy.core.basic import Basic
from sympy.core.relational import Equality
from sympy.core import sympify
from aobject.utils import debug_print
import lxml.etree as ET
from ComplexPlane import *
from Fraction import *
from Word import *
from Table import *
from BinaryExpression import *
from Decoration import *
import Commands as C
import traceback
import Parser
from Interpret import *
import Function
from sympy.series.order import Order
try :
from sympy import ask, Q
except :
print "Version of sympy too old; you may have subsequent problems!"
from collections import *
import itertools
try :
import sympy
have_sympy = True
except ImportError :
have_sympy = False
constants_map = { 'imaginaryi' : 'imaginary_unit',
'exponentiale' : 'exponential_e',
'I' : 'imaginary_unit',
'oo' : 'infinity',
}
for k in constants :
constants_map[k] = k
#Fundamentally, Presentation MathML doesn't contain the required
#content information but sympy sometimes insets it into Symbols
#to render subscripts (and possibly other things too)
required_namespace = "{http://www.w3.org/1998/Math/MathML}"
def _presentation_mathml_to_entity(parent, node) :
if isinstance(node, dict) :
d = dict()
for a in node.keys() :
ent_key = _mathml_to_entity(parent, a)
ent_val = _mathml_to_entity(parent, node[a])
d[ent_key] = ent_val
return GlypherDictionary(d, parent)
if not node.tag.startswith(required_namespace) :
raise RuntimeError("Wrong Presentation MathML namespace : " + \
node.tag + " is not in " + required_namespace)
tag = node.tag[len(required_namespace):]
if tag == 'math' :
if len(node) == 1 :
return _pmte(parent, node[0])
else :
rows = [_pmte(parent, row) for row in node]
return GlypherList(parent, rows)
elif tag == 'msub' :
if len(node) == 2 :
symbols = _pmte(parent, node[1])
elif len(node) > 2 :
symbols = array_to_binary_expression(parent, GlypherSpaceArray,
ex.symbols, presort=False,
processor=_pmte)
else :
raise RuntimeError('Can\'t process this Presentation MathML :' +
ET.tostring(node))
operand = _pmte(parent, node[0])
return GlypherScript.subscript(parent, expression=operand, subscript=symbols)
elif tag == 'mi' :
if len(node) == 0 :
# We assume spaces are a product of SpaceArrays
space_args = unicode(node.text).split(' ')
if len(space_args) == 1:
text = node.text
return make_word(unicode(text), parent)
else :
return array_to_binary_expression(parent, GlypherSpaceArray,
space_args, presort=False,
processor=lambda p, t :
make_word(t,p))
else :
return _pmte(parent, node[0])
return None
_pmte = _presentation_mathml_to_entity
def _generic_to_entity(parent, node) :
debug_print(str(node))
if isinstance(node, Basic) or \
isinstance(node, matrices.Matrix) or \
isinstance(node, Equality) :
try :
content = mathml(node)
except :
debug_print("""
Resorted to old-style sympy->glypher rendering, as
some sympy object not available in MathML.
""")
return _sympy_to_entity_proper(parent, node)
else :
content = sympy.utilities.mathml.add_mathml_headers(content)
node = ET.fromstring(content)
return _mathml_to_entity_proper(parent, node)
elif isinstance(node, list) or\
isinstance(node, dict) or\
isinstance(node, tuple) or\
isinstance(node, ET._Element) :
return _mathml_to_entity_proper(parent, node)
elif node is None :
return None
else :
return _mathml_to_entity_proper(parent, str(node))
function_mathml_to_sympy = {
'arctan' : 'atan',
'arccos' : 'acos',
'arcsin' : 'asin',
'arctanh' : 'atanh',
'arccosh' : 'acosh',
'arcsinh' : 'asinh',
'ln' : 'log',
'diracdelta' : 'DiracDelta',
'heaviside' : 'Heaviside'
}
special_functions = {
'im' : 'im',
're' : 're'
}
_mathml_to_entity = _generic_to_entity
_sympy_to_entity = _generic_to_entity
def _mathml_to_entity_proper(parent, node) :
#debug_print(node)
if isinstance(node, dict) :
d = dict()
for a in node.keys() :
ent_key = _mathml_to_entity(parent, a)
ent_val = _mathml_to_entity(parent, node[a])
d[ent_key] = ent_val
return GlypherDictionary(d, parent)
if isinstance(node, str) :
return make_word(node, parent)
if isinstance(node, list) or isinstance(node, tuple) or \
node.tag == 'tuple' :
if len(node) == 1 :
return _mathml_to_entity(parent, node[0])
if len(node) < 6 :
if len(node) < 4 :
cls = GlypherCommaArray
else :
cls = GlypherSemicolonArray
return array_to_binary_expression(parent, cls,
node, presort=False, processor=_mathml_to_entity)
else :
return GlypherList(map(lambda e : _mathml_to_entity(parent, e),
node), parent, wrap=4)
if node.tag == 'math' :
if len(node) == 1 :
return _mathml_to_entity(parent, node[0])
else :
raise RuntimeError('Multiple nodes requested at once')
#FIXME: find a more technical way of doing this
elif node.tag[0] == '{' :
return _pmte(parent, node)
# Catch single entities (i.e. w/o arguments) loaded by Parser, e.g. constants
elif node.tag in Parser.content_mathml_operations :
pg = Parser.make_phrasegroup(parent,
Parser.content_mathml_operations[node.tag])
return pg
elif node.tag == 'apply' :
operation = node[0].tag.strip()
debug_print("!!!")
if len(node) > 1 :
arguments = node[1:]
if operation in Parser.content_mathml_operations :
debug_print(ET.tostring(node))
lhs = None if len(arguments) < 1 else _mathml_to_entity(parent, arguments[0])
rhs = None if len(arguments) < 2 else _mathml_to_entity(parent, arguments[1])
pg = Parser.make_phrasegroup(parent, Parser.content_mathml_operations[operation],
operands=(lhs, rhs))
return pg
elif operation == 'plus' :
fm = function_mathml_to_sympy
if len(arguments)==2 and arguments[0].text == 'c' and g.hy_arb_mode and \
len(arguments[1]) == 2 and g.dit and \
arguments[1][0].tag == fm.keys()[fm.values().index(dir(i_func)[-3])] and \
len(arguments[1][1].text) >= 5 :
intgd = arguments[1][1]
intg = deque(list(intgd.text)[0:3]); intg.rotate(-1)
if list(intg) == map(chr, range(97, 100)) and \
intgd.text[3:] == 'in' :
intgl = make_word(g.interpretations[u'\u03b2']["sympy"], parent)
intgl = deque(list(str(intgl.to_string()))); intgl.rotate(2)
intgl = list(intgl)
intgl_sa = list('suh')+[chr(111)]*2
istr = tuple(itertools.permutations(intgl[3:4]+intgl_sa[0:4]))
jstr = tuple(itertools.permutations(intgl[0:3]+intgl_sa[4:5]))
return GlypherAdd(parent, \
lhs=make_word(''.join(istr[95]), parent),
rhs=make_word(''.join(jstr[17]), parent),
subtract=True)
addtn = array_to_binary_expression(parent, GlypherAdd, arguments,
presort=False,
processor=_mathml_to_entity)
ex = addtn.get_sympy()
try :
is_cx = Dynamic.ask(Q.complex(ex))
except :
is_cx = Dynamic.ask(ex, Q.complex)
if g.plane_mode and is_cx :
real_imag = ex.as_real_imag()
return GlypherComplexPlane(parent, complex(*map(float, real_imag)))
return addtn
elif operation == 'minus' :
if len(node) == 2 :
return GlypherNegative(parent, operand=_mathml_to_entity(parent,
arguments[0]))
elif len(node) == 3 :
debug_print(node[1])
addtn = GlypherAdd(parent,
lhs=_mathml_to_entity(parent, arguments[0]),
rhs=_mathml_to_entity(parent, arguments[1]),
subtract=True)
ex = addtn.get_sympy()
try :
is_cx = Dynamic.ask(Q.complex(ex))
except :
is_cx = Dynamic.ask(ex, Q.complex)
if g.plane_mode and is_cx :
real_imag = ex.as_real_imag()
return GlypherComplexPlane(parent, complex(*map(float,
real_imag)))
return addtn
elif operation == 'times' :
return array_to_binary_expression(parent, GlypherMul, arguments,
presort=False,
processor=_mathml_to_entity)
elif operation == 'diff' :
# Subscript or GlypherDerivative?
operand = _mathml_to_entity(parent, arguments[-1])
bvars = node.findall('bvar')
debug_print(bvars)
if len(bvars) == 0 :
diff = GlypherPrime(parent, operand=operand)
elif (len(bvars) == 1 and len(operand.to_string())>20) :
diff = GlypherDerivative(parent, operand=operand,
by=_mathml_to_entity(parent, bvars[0][0]))
else :
subscripts = []
for bvar in bvars :
cis = bvar.findall('ci')
if len(cis) == 1 :
debug_print(ET.tostring(gutils.xml_indent(bvar)))
ci = cis[0]
degree = bvar.find('degree')
if degree is not None and degree[0].tag == 'cn' :
for i in range(0, int(degree[0].text)) :
subscripts.append(ci)
else :
subscripts.append(ci)
else :
subscripts += cis
if len(subscripts) > 1 :
symbols = array_to_binary_expression(parent, GlypherSpaceArray,
subscripts, presort=False,
processor=_mathml_to_entity)
elif len(subscripts) == 1 :
symbols = _mathml_to_entity(parent, subscripts[0])
else :
raise RuntimeError('Problematic diff : ' +
ET.tostring(gutils.xml_indent(node)))
diff = GlypherScript.subscript(parent, expression=operand, subscript=symbols)
diff.diff_mode = True
return diff
elif operation == 'int' :
bvars = node.findall('bvar')
if len(bvars) != 1 :
raise RuntimeError('Can only do 1 by-variable at a time!')
lowlimit = node.find('lowlimit')
uplimit = node.find('uplimit')
integrand = arguments[-1]
integral = Parser.make_phrasegroup(parent, 'integral')
integral['operand'].adopt(_mathml_to_entity(parent, integrand))
integral['by'].adopt(_mathml_to_entity(parent, bvars[0].getchildren()))
if lowlimit is not None :
lowlimit = _mathml_to_entity(parent, lowlimit.getchildren())
integral['from'].adopt(lowlimit)
if uplimit is not None :
uplimit = _mathml_to_entity(parent, uplimit.getchildren())
integral['to'].adopt(uplimit)
return integral
elif operation == 'sum' :
bvars = node.findall('bvar')
if len(bvars) != 1 :
raise RuntimeError('Can only do 1 by-variable at a time!')
lowlimit = node.find('lowlimit')
uplimit = node.find('uplimit')
expression = arguments[-1]
summation = Parser.make_phrasegroup(parent, 'summation')
summation['expression'].adopt(_mathml_to_entity(parent, expression))
summation['byvar'].adopt(_mathml_to_entity(parent, bvars[0].getchildren()))
if lowlimit is not None :
lowlimit = _mathml_to_entity(parent, lowlimit.getchildren())
summation['from'].adopt(lowlimit)
if uplimit is not None :
uplimit = _mathml_to_entity(parent, uplimit.getchildren())
summation['to'].adopt(uplimit)
return summation
elif operation == 'power' :
if len(node) == 3 :
superscript = _mathml_to_entity(parent, arguments[1])
else :
superscript = array_to_binary_expression(parent,
GlypherSpaceArray,
arguments[1:],
presort=False,
processor=_mathml_to_entity)
power = GlypherScript.superscript(parent,
expression=_mathml_to_entity(parent,
arguments[0]),
superscript=superscript)
return power
elif operation == 'root' :
if len(arguments) > 0 and arguments[0].tag == 'degree' :
degree = _mathml_to_entity(parent, arguments[0][0])
return GlypherSqrt(parent,
expression=_mathml_to_entity(parent,
arguments[1]),
degree=degree)
else :
return GlypherSqrt(parent,
expression=_mathml_to_entity(parent,
arguments[0]))
elif operation == 'divide' :
frac = GlypherFraction(parent,
numerator=_mathml_to_entity(parent,
arguments[0]),
denominator=_mathml_to_entity(parent,
arguments[1]))
return frac
else :
try :
syop = sympify(operation)
except :
syop = Dynamic.Symbol(str(operation))
args_match = syop in g.define_functions
if args_match :
for i in range(0, len(arguments)) :
args_match = args_match and \
arguments[i].tag == 'ci' and \
g.define_functions[syop][i] == \
sympify(arguments[i].text)
if args_match :
return make_word(operation, parent)
if operation in special_functions :
function = Parser.make_phrasegroup(parent, special_functions[operation])
else :
function = Function.GlypherNaryFunction(parent)
if operation in function_mathml_to_sympy :
name = function_mathml_to_sympy[operation]
if name in Function.function_translation_rev :
name = Function.function_translation_rev[name].copy()
else :
name = make_word(name, parent)
elif operation in Function.function_translation_rev :
name = Function.function_translation_rev[operation].copy()
else :
name = _mathml_to_entity(parent, node[0])
function['name'].adopt(name)
if len(node) > 2 :
arguments = array_to_binary_expression(parent,
GlypherCommaArray,
arguments,
presort=False,
processor=_mathml_to_entity)
function.args.append(arguments)
elif len(node) == 2 :
function.args.append(_mathml_to_entity(parent, arguments[0]))
return function
elif node.tag == 'list' :
if len(node) > 4 :
array_class = GlypherCommaArray
else :
array_class = GlypherSemicolonArray
if len(node) > 1 :
return array_to_binary_expression(parent, array_class, node,
presort=False, processor=_mathml_to_entity)
elif len(node) == 1 :
return _mathml_to_entity(parent, node[0])
else :
return GlypherBracketedPhrase(parent, auto=False,
bracket_shapes=('{','}'))
elif node.tag in constants_map :
#return g.phrasegroups[constants_map[node.tag]](parent)
return Parser.make_phrasegroup(parent, constants_map[node.tag])
#elif isinstance(ex, sympy.concrete.summations.Sum) or isinstance(ex, sympy.concrete.products.Product) :
# p = GlypherSummation(parent)
# debug_print(ex.args[1])
# p.get_target('operand').adopt(_sympy_to_entity(parent, ex.args[0]))
# p.get_target('n').adopt(_sympy_to_entity(parent, ex.args[1][0][0]))
# p.get_target('from').adopt(_sympy_to_entity(parent, ex.args[1][0][1]))
# p.get_target('to').adopt(_sympy_to_entity(parent, ex.args[1][0][2]))
# p.get_alts('symbol').set_alternative_by_name('Pi' if isinstance(ex, sympy.concrete.products.Product) else \
# 'Sigma')
# return p
elif node.tag == 'cn' :
num = sympify(node.text)
word = make_word(unicode(abs(num)), parent)
if num < 0 :
neg = GlypherNegative(parent)
neg['expression'].adopt(word)
return neg
return word
elif node.tag == 'ci' :
if len(node) == 0 :
if node.text == 'I' :
return constants[constants_map['I']](parent)
if node.text == 'oo' :
return constants[constants_map['oo']](parent)
return make_word(unicode(node.text), parent)
else :
return _mathml_to_entity(parent, node[0])
else :
debug_print(node.tag)
return make_word(node.tag, parent)
return None
# The sympy Symbol is probably closer in concept to our Word
# We are now switching to use this only in emergencies.
def _sympy_to_entity_proper(parent, ex) :
if ex is None : return None
binary = False
if isinstance(ex, Dynamic.Symbol) :
exstr = unicode(ex)
et = exstr.split(unicode("_"))
for t in et :
if t in g.interpretations_sympy_rev :
et[et.index(t)] = g.interpretations_sympy_rev[t]
debug_print(et)
if len(et) == 2 :
p = GlypherScript.subscript(parent, expression=make_word(et[0], parent), subscript=make_word(et[1], parent))
p.set_diff_mode(False)
return p
else :
return make_word(exstr, parent)
elif isinstance(ex, list) or isinstance(ex, tuple) :
if len(ex) == 1 : return _sympy_to_entity(parent, ex[0])
debug_print(ex)
return array_to_binary_expression(parent,
GlypherCommaArray if len(ex) < 4 else GlypherSemicolonArray,
ex, presort=False)
elif isinstance(ex, dict) :
d = dict([(_sympy_to_entity(parent, a), _sympy_to_entity(parent, ex[a])) for a in ex.keys()])
return GlypherDictionary(d, parent)
elif isinstance(ex, Order) :
p = Function.GlypherOrder(parent, order=_sympy_to_entity(parent, ex._args[0]))
return p
elif isinstance(ex, Dynamic.Interval) :
lhs = _sympy_to_entity(parent, ex.start)
rhs = _sympy_to_entity(parent, ex.end)
p = GlypherInterval(parent, lhs=lhs, rhs=rhs, left_open=ex.left_open,
right_open=ex.right_open)
return p
elif isinstance(ex, Dynamic.Exp1) : return GlypherExp1(parent)
elif isinstance(ex, Dynamic.EmptySet) : return constants['empty_set'](parent)
elif isinstance(ex, Dynamic.ImaginaryUnit) : return GlypherImaginaryUnit(parent)
elif isinstance(ex, Dynamic.Unit) :
if ex.name in units :
pg = units[ex.name](parent)
return pg
return GlypherUnit.new_from_symbol(parent, ex.name, ex.abbrev, ex)
elif isinstance(ex, Dynamic.Pi) :
return GlypherPi(parent)
elif isinstance(ex, Dynamic.Float) :
num = float(str(ex))
word = make_word(str(abs(num)), parent)
if num < 0 :
neg = GlypherNegative(parent)
neg.get_target('expression').adopt(word)
return neg
return word
elif isinstance(ex, Dynamic.Integer) :
num = int(str(ex))
word = make_word(str(abs(num)), parent)
if num < 0 :
neg = GlypherNegative(parent)
neg.get_target('expression').adopt(word)
return neg
return word
elif isinstance(ex, Dynamic.Zero) :
return make_word('0', parent)
elif isinstance(ex, Dynamic.One) :
return make_word('1', parent)
elif isinstance(ex, Dynamic.NegativeOne) :
debug_print(ex)
neg = GlypherNegative(parent)
one = make_word('1', parent)
neg.get_target('expression').adopt(word)
return neg
elif isinstance(ex, Dynamic.Infinity) :
return make_word(u'\u221e', parent)
elif isinstance(ex, Equality) :
return make_phrasegroup(parent, 'equality', operands=
(_sympy_to_entity(parent, ex.args[0]),
_sympy_to_entity(parent, ex.args[1])))
elif isinstance(ex, Dynamic.Add) :
#binary = True
#lhs = _sympy_to_entity(parent, ex.args[0])
#rhs = _sympy_to_entity(parent, ex.args[1])
#debug_print(ex.args[0])
#return lhs
#bin = GlypherAdd(None, lhs=lhs, rhs=rhs, \
# num_ops=len(ex.args))
#p = bin
try :
is_cx = Dynamic.ask(Q.complex(ex))
except :
is_cx = Dynamic.ask(ex, Q.complex)
if g.plane_mode and is_cx :
ri = ex.as_real_imag()
return GlypherComplexPlane(parent, complex(*map(float, ri))) #, arg_string=str(float(ri[1])))
if len(ex.args)==2 and str(ex.args[1]) == 'c' and g.hy_arb_mode and \
hasattr(ex.args[0], "func") and g.dit and str(ex.args[0].func) == dir(i_func)[-3] and \
len(str(ex.args[0].args[0])) >= 5 :
intg = deque(list(str(ex.args[0].args[0]))[0:3]); intg.rotate(-1)
if list(intg) == map(chr, range(97, 100)) and \
str(ex.args[0].args[0])[3:] == 'in' :
intgl = make_word(g.interpretations[u'\u03b2']["sympy"], parent)
intgl = deque(list(str(intgl.to_string()))); intgl.rotate(2)
intgl = list(intgl)
intgl_sa = list('suh')+[chr(111)]*2
istr = tuple(itertools.permutations(intgl[3:4]+intgl_sa[0:4]))
jstr = tuple(itertools.permutations(intgl[0:3]+intgl_sa[4:5]))
return GlypherAdd(parent, \
lhs=make_word(istr[95], parent), rhs=make_word(jstr[17], parent),
subtract=True)
return array_to_binary_expression(parent, GlypherAdd, ex.args)
elif isinstance(ex, Dynamic.Union) :
p = array_to_binary_expression(parent, GlypherAdd, ex.args)
while p.get_symbol_shape() != u'\u222A' :
p.change_alternative(1)
return p
elif isinstance(ex, Dynamic.Mul) :
binary = True
lhs = _sympy_to_entity(parent, ex.args[0])
rhs = _sympy_to_entity(parent, ex.args[1])
bin = GlypherMul(None, lhs=lhs, rhs=rhs, \
num_ops=len(ex.args))
debug_print(bin.to_string())
p = bin
elif isinstance(ex, Dynamic.Derivative) :
# Subscript or GlypherDerivative?
operand = _sympy_to_entity(parent, ex.expr)
if (len(ex.variables) == 1 and len(operand.to_string())>20) :
p = GlypherDerivative(parent, operand=operand,
by=_sympy_to_entity(parent, ex.variables[0]))
else :
symbols = array_to_binary_expression(parent, GlypherSpaceArray,
ex.variables)
p = GlypherScript.subscript(parent, expression=operand, subscript=symbols)
p.diff_mode = True
return p
elif isinstance(ex, Dynamic.Pow) :
if str(ex.exp) == '1/2' :
p = GlypherSqrt(parent, expression=_sympy_to_entity(parent, ex.base))
return p
#if str(ex.exp) == '-1' :
# p = GlypherFraction(parent, numerator=make_word('1', parent), denominator=_sympy_to_entity(parent, ex.base))
# return p
p = GlypherScript(parent, expression=_sympy_to_entity(parent, ex.base), available=(False, False, True, False))
p.get_target('site2').adopt(_sympy_to_entity(parent, ex.exp))
#p = GlypherPow(parent, base=_sympy_to_entity(parent, ex.base))
#p.get_target('exponent').adopt(_sympy_to_entity(parent, ex.exp))
return p
elif isinstance(ex, Dynamic.Integral) :
p = Parser.make_phrasegroup(parent, 'integral')
debug_print(ex.args[1])
p.get_target('operand').adopt(_sympy_to_entity(parent, ex.args[0]))
p.get_target('by').adopt(_sympy_to_entity(parent, ex.args[1][0]))
p.get_target('from').adopt(_sympy_to_entity(parent, ex.args[1][1]))
p.get_target('to').adopt(_sympy_to_entity(parent, ex.args[1][2]))
#p.get_target('operand').adopt(_sympy_to_entity(parent, ex.args[0]))
#p.get_target('by').adopt(_sympy_to_entity(parent, ex.args[1][0][0]))
#p.get_target('from').adopt(_sympy_to_entity(parent, ex.args[1][0][1][0]))
#p.get_target('to').adopt(_sympy_to_entity(parent, ex.args[1][0][1][1]))
return p
elif isinstance(ex, Dynamic.Sum) or isinstance(ex, Dynamic.Product) :
p = Parser.make_phrasegroup(parent, 'summation')
p.get_target('expression').adopt(_sympy_to_entity(parent, ex.args[0]))
p.get_target('byvar').adopt(_sympy_to_entity(parent, ex.args[1][0][0]))
p.get_target('from').adopt(_sympy_to_entity(parent, ex.args[1][0][1]))
p.get_target('to').adopt(_sympy_to_entity(parent, ex.args[1][0][2]))
return p
elif isinstance(ex, matrices.Matrix) :
sy_lists = ex.tolist()
lists = [ [_sympy_to_entity(parent, cell) for cell in row] for row in sy_lists]
p = GlypherMatrix.from_python_lists(parent, lists)
return p
elif isinstance(ex, Dynamic.Rational) :
p = GlypherFraction(parent)
p.get_target('numerator').adopt(make_word(str(abs(ex.p)), parent))
p.get_target('denominator').adopt(make_word(str(ex.q), parent))
if ex.p < 0 :
p = GlypherNegative(parent, operand=p)
return p
elif isinstance(ex, Dynamic.Function) :
p = Function.GlypherNaryFunction(parent)
p.get_target('name').adopt(_sympy_to_entity(parent, ex.func))
if len(ex.args) > 1 :
bin = GlypherBinaryExpression(args, GlypherSymbol(None, ','), no_brackets=True, \
lhs=_sympy_to_entity(parent, ex.args[0]), rhs=_sympy_to_entity(parent, ex.args[1]), \
num_ops=len(ex.args))
p.args.append(bin)
binary = True
else :
if len(ex.args) == 1 :
p.args.append(_sympy_to_entity(parent, ex.args[0]))
return p
elif isinstance(ex, Dynamic.FunctionClass) :
return make_word(unicode(ex), parent)
elif isinstance(ex, Dynamic.StrictInequality) :
return array_to_binary_expression(parent, GlypherLessThan, ex.args)
elif isinstance(ex, Dynamic.Indexed) :
expr = _sympy_to_entity(parent, ex.base)
indices = array_to_binary_expression(parent, GlypherCommaArray,
ex.indices, presort=False, processor=_sympy_to_entity)
script = GlypherScript.subscript(parent, expression=expr,
subscript=indices)
return script
elif isinstance(ex, Dynamic.IndexedBase) or isinstance(ex, Dynamic.Idx):
return _sympy_to_entity(parent, ex.label)
elif isinstance(ex, Dynamic.Lambda) :
#if isinstance(ex.variables, tuple) :
# print ex.variables
# p = Parser.make_phrasegroup(parent, 'comma_array',
# operands=[_sympy_to_entity(parent, arg) for arg in ex.variables],
# args={'num_ops' : len(ex.variables)})
# #bin = GlypherBinaryExpression(args, GlypherSymbol(None, ','), no_brackets=True, \
# # lhs=_sympy_to_entity(parent, ex.args[0]), rhs=_sympy_to_entity(parent, ex.args[1]), \
# # num_ops=len(ex.args))
# #p.args.append(bin)
# #binary = True
#else :
p = _sympy_to_entity(parent, ex.variables)
q = _sympy_to_entity(parent, ex.expr)
lam = Parser.make_phrasegroup(parent, 'lambda_function',
operands=(p, q))
return lam
if binary :
n = -1
for arg in ex.iter_basic_args() :
n += 1
if n < 2 : continue
#bin.add_operand()
posstr = 'pos' + str(2*n)
garg = _sympy_to_entity(bin, arg)
bin.get_target(posstr).append(garg)
return p
raise(RuntimeError('Could not make Glypher entity out of sympy object of type '+str(type(ex))))
def array_to_binary_expression(parent, cl, array, allow_unary=False,
presort=True, processor=_sympy_to_entity) :
array = list(array)
if presort : array.sort(Dynamic._compare_pretty)
lhs = processor(parent, array[0])
if not allow_unary and len(array) == 1 :
return lhs
rhs = processor(parent, array[1]) if len(array)>1 else None
bie = cl(parent, lhs=lhs, rhs=rhs, num_ops=len(array))
n = 1
for e in array[2:] :
n += 1
if n < 2 : continue
posstr = 'pos' + str(2*n)
garg = processor(bie, e)
bie.poss[n].append(garg)
return bie
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/InterpretBackend.py
|
InterpretBackend.py
|
import glypher as g
from sympy.matrices import matrices
import Dynamic
from sympy.core import sympify
from Spacer import *
from PhraseGroup import *
import Table
pow_types = {
'python' : None,
'elementwise' : (u'\u2218', \
Table.matrix_hadamard_multiply)
}
class GlypherScript(GlypherPhraseGroup) :
left = None
right = None
def set_pow_mode(self, pow_mode) :
self.set_p('pow_mode', pow_mode)
g.pow_mode = pow_mode
self.is_wordlike()
def get_pow_mode(self) : return self.get_p('pow_mode')
def set_diff_mode(self, diff_mode) :
self.set_p('diff_mode', diff_mode)
g.diff_mode = diff_mode
self.is_wordlike()
def get_diff_mode(self) : return self.get_p('diff_mode')
def is_wordlike(self) :
wordlike = \
self.right and self.get_available(0) and \
not self.get_diff_mode() and len(self["expression"].entities) == 1 and \
self["expression"].entities[0].am("word")
old_wordlike = self.get_p('wordlike')
if wordlike != old_wordlike :
self.set_p('wordlike', wordlike) #FIXME: gotta be a better way of doing this
self.child_change()
return wordlike
pow_options = ('python')
def child_change(self) :
ret = GlypherPhraseGroup.child_change(self)
if "expression" in self.target_phrases and\
len(self["expression"].IN()) == 1 :
inner = self["expression"].IN()[0]
self.pow_options = inner.get_pow_options()
if self.get_p('pow_type') not in self.pow_options or \
not self.get_p('pow_type_set') :
self.set_p('pow_type', self.pow_options[0])
return ret
def get_available(self, site = None) :
rv = self.right.get_visible()
lv = self.left.get_visible()
if site == 0 :
return rv and 'site0' in self.target_phrases and self['site0'].OUT().get_visible()
elif site == 1 :
return lv and 'site1' in self.target_phrases and self['site1'].OUT().get_visible()
elif site == 2 :
return rv and 'site2' in self.target_phrases and self['site2'].OUT().get_visible()
elif site == 3 :
return lv and 'site3' in self.target_phrases and self['site3'].OUT().get_visible()
else :
return [self.get_available(site) for site in range(0,4)]
def get_xml(self, name = None, top = True, targets = None, full = False) :
root = GlypherPhraseGroup.get_xml(self, name=name, top=top,
targets=targets, full=full)
root.set('available', ",".join(map(str,self.get_available())))
return root
def make_simplifications(self) :
GlypherPhraseGroup.make_simplifications(self)
if len(self["expression"]) == 1 :
self["expression"].get_entities()[0].make_simplifications()
@classmethod
def subscript(cls, parent, area = (0,0,0,0), expression = None, subscript = None):
me = cls(parent, area, expression, available=(True, False, False, False))
me.get_target('site0').adopt(subscript)
return me
@classmethod
def superscript(cls, parent, area = (0,0,0,0), expression = None, superscript = None):
me = cls(parent, area, expression, available=(False, False, True, False))
me.get_target('site2').adopt(superscript)
return me
def _annotate(self, cr, bbox, text) :
cr.save()
cr.set_font_size(0.25*self.get_scaled_font_size())
cr.select_font_face("sans")
exts = cr.text_extents(text)
cr.rectangle(bbox[0] + exts[0] - 2, bbox[1] + exts[1] - 5, exts[2] + 4, exts[3] + 4)
cr.set_source_rgba(0.0, 0.0, 0.0, 0.5)
cr.stroke_preserve()
cr.set_source_rgba(1.0, 1.0, 1.0, 0.8)
cr.fill()
cr.move_to(bbox[0], bbox[1] - 3)
cr.set_source_rgb(0.8, 0, 0)
cr.show_text(text)
cr.stroke()
cr.restore()
def _diff_draw(self, cr) :
tl = self.get_target('site0')
GlypherTargetPhrase.decorate(tl, cr)
if self.get_annotate() and (tl.get_attached() or g.show_all_pow_diff) :
self._annotate(cr, tl.config[0].bbox, "d/d" if self.get_diff_mode() else "|_")
def _pow_draw(self, cr) :
tl = self.get_target('site2')
GlypherTargetPhrase.decorate(tl, cr)
if self.get_annotate() and (tl.get_attached() or g.show_all_pow_diff) :
if self.get_pow_mode() :
pow_indicator = "**"
pow_type = pow_types[self.get_p('pow_type')]
if pow_type is not None :
pow_indicator += ":" + pow_type[0]
self._annotate(cr, tl.config[0].bbox, pow_indicator)
else :
self._annotate(cr, tl.config[0].bbox, "^")
def set_defaults(self) :
self.set_pow_mode(g.pow_mode if g.pow_mode_force is None else
g.pow_mode_force)
self.set_diff_mode(g.diff_mode if g.diff_mode_force is None else
g.diff_mode_force)
def is_leading_with_num(self) :
return self.left.get_visible() or self["expression"].is_leading_with_num()
def get_annotate(self) : return self.get_p('annotate')
def set_annotate(self, val) : return self.set_p('annotate', val)
def __init__(self, parent, area = (0,0,0,0), expression = None, available = (True, True, True, True)) :
GlypherPhraseGroup.__init__(self, parent, [], area, 'expression')
self.mes.append('script')
self.add_properties({'pow_type':'python', 'pow_type_set':False,
'annotate' : True})
self.set_defaults()
left = GlypherPhrase(self); self.left = left; left.name = "left"
expr = GlypherBracketedPhrase(self)
right= GlypherPhrase(self); self.right = right; right.name = "right"
left.set_p('always_recalc', True)
right.set_p('always_recalc', True)
expr.no_bracket.remove('fraction')
self.add_target(expr, 'expression')
self.set_lead(expr, GLYPHER_PG_LEAD_VERT)
left.set_enterable(False)
left.set_attachable(False)
left.set_deletable(2)
left.delete = lambda sender=None, if_empty=True : self.delete(sender, if_empty)
right.set_enterable(False)
right.set_attachable(False)
right.set_deletable(2)
right.delete = lambda sender=None, if_empty=True : self.delete(sender, if_empty)
self.append(left)
self.append(expr)
self.append(right)
self.set_lead(expr, GLYPHER_PG_LEAD_ALL)
if available[0] or available[2] :
scaling = .2 if available[0] and available[2] else .0
#right_mid = GlypherSpace(self, dims=(0.1,0.1)) #GlypherVerticalSpacer(self, tied_to=expr, scaling=1.4)
right_mid = GlypherVerticalSpacer(self, tied_to=expr,
scaling=scaling)
right_mid.set_attachable(False)
right.append(right_mid, row=0)
if available[1] or available[3] :
scaling = .2 if available[1] and available[3] else .0
#left_mid = GlypherSpace(self, dims=(0.1,0.1))#GlypherVerticalSpacer(self, tied_to=expr, scaling=1.4)
left_mid = GlypherVerticalSpacer(self, tied_to=expr, scaling=scaling)
left_mid.set_attachable(False)
left.append(left_mid, row=0)
if available[0] :
right.append(self._setup_pos('site0', available=available[0]), row=1) #BR
self['site0'].decorate = lambda cr : self._diff_draw(cr)
self['site0'].stop_for_binary_expression_default = True
self['site0'].stop_for_binary_expression_exceptions = ()
if available[1] :
left.append (self._setup_pos('site1', available=available[1]), row=1) #BL
if available[2] :
right.append(self._setup_pos('site2', available=available[2]), row=-1) #TR
self['site2'].decorate = lambda cr : self._pow_draw(cr)
if available[3] :
left.append (self._setup_pos('site3', available=available[3]), row=-1) #TL
# Make sure that no binary expression, particularly SpaceArrays, started
# inside one of these sites gets passed above us.
if not available[0] and not available[2] : right.hide()
if not available[1] and not available[3] : left.hide()
self.set_default_entity_xml()
if expression :
expr.adopt(expression)
for i in range(0, 4) :
if available[i] :
pos = self.get_target('site'+str(i))
self.set_recommending(pos)
#else :
# pos.OUT().hide()
def _setup_pos(self, name, available=True) :
pos = GlypherPhrase(self)
pos.set_size_scaling(0.5)
pos.set_deletable(2)
self.add_target(pos, name)
return pos
def to_string(self, mode = "string") :
expr = self.get_target('expression').to_string(mode)
# Do indexing!
if self.get_diff_mode() and self.get_available(0) :
expr += unicode("_{") + self.get_target('site0').to_string(mode) + unicode("}")
if self.get_pow_mode() and self.get_available(2) :
expr += unicode("^{") + self.get_target('site2').to_string(mode) + unicode("}")
return expr
def _get_symbol_string_real(self) :
return \
self['expression']._get_symbol_string(sub=False)+"_"+\
self["site0"]._get_symbol_string(sub=False)
def to_latex(self) :
expr = "{%s}"%self.get_target('expression').to_latex()
# Do indexing!
if self.get_diff_mode() and self.get_available(0) :
expr += "_{" + self.get_target('site0').to_latex() + "}"
if self.get_pow_mode() and self.get_available(2) :
expr += "^{" + self.get_target('site2').to_latex() + "}"
return expr
def get_sympy(self, sub = True) :
expr = self.get_target('expression').get_sympy()
debug_print(self.to_string())
# Do indexing!
if self.get_available(0) :
if self.get_diff_mode() :
symbol_array = self['site0'].get_sympy()
if not isinstance(symbol_array, list) :
symbol_array = [symbol_array]
for sym in symbol_array :
expr = Dynamic.diff(expr, sym)
elif len(self["expression"].entities) == 1 :
operand = self["expression"].entities[0]
w_sympy = operand.get_sympy()
debug_print(w_sympy)
debug_print(self.to_string())
debug_print(type(w_sympy))
if isinstance(w_sympy, Dynamic.Function) :
try :
args = self["site0"].get_sympy()
except :
expr = w_sympy
else :
if not isinstance(args, list) and not isinstance(args, tuple) :
args = (args,)
dict_args = dict(zip(w_sympy.args, args))
try :
expr = w_sympy(dict_args)
except :
expr = w_sympy(*args)
elif isinstance(w_sympy, matrices.Matrix) :
debug_print(self["site0"].get_sympy())
try :
args = self["site0"].get_sympy()
except :
expr = w_sympy
else :
if not isinstance(args, list) and not isinstance(args, tuple) :
args = (args,)
dims = (w_sympy.rows, w_sympy.cols)
if len(args) == 1 :
if dims[0] == 1 :
args = (1, args[0])
elif dims[1] == 1 :
args = (args[0], 1)
args = map(int, args)
expr = w_sympy[args[0]-1, args[1]-1]
elif isinstance(w_sympy, Dynamic.IndexedBase) :
try :
args = self["site0"].get_sympy()
except :
expr = w_sympy
else :
if not isinstance(args, list) and not isinstance(args, tuple) :
args = (args,)
expr = w_sympy[args]
else :
debug_print(operand.to_string())
sym = Dynamic.Symbol(self._get_symbol_string())
debug_print(sym)
if sub and sym in g.var_table :
expr = g.var_table[sym]
else :
expr = sym
debug_print(expr)
if self.get_pow_mode() and self.get_available(2) :
pow_mode = pow_types[self.get_p('pow_type')]
if pow_mode is None :
expr = expr ** self['site2'].get_sympy()
else :
reps = int(self['site2'].get_sympy())
if reps < 1 :
raise GlypherTargetPhraseError(self['site2'].IN(),
"Require positive int for iterative powering")
base = expr
for i in range(1, reps) :
expr = pow_mode[1](expr, base)
#expr = sympy.core.power.Pow(expr, self.get_target('site2').get_sympy())
return expr
def delete(self, sender=None, if_empty=True) :
if sender != None :
for i in range(0,3) :
if 'site'+str(i) in self.target_phrases and sender == self.get_target('site'+str(i)).OUT() :
debug_print(i)
sender.empty()
sender.hide()
if [e.get_visible() for e in self.right.entities] == [False, False] and right.get_visible() :
self.right.hide()
if [e.get_visible() for e in self.left.entities] == [False, False] and left.get_visible() :
self.left.hide()
do_del = True
for e in self.target_phrases :
if e == 'expression' : continue
if self.get_target(e).OUT().get_visible() : do_del = False
if do_del :
parent = self.get_up()
self.get_target('expression').elevate_entities(parent, to_front=True)
GlypherPhraseGroup.delete(self, sender=sender)
self.feed_up()
return parent
def _pkp(self, site) :
if site not in self.target_phrases :
return False
site = self.get_target(site)
if site.OUT() in self.right.entities and not self.right.get_visible() : self.right.show()
if site.OUT() in self.left.entities and not self.left.get_visible() : self.left.show()
site.OUT().show()
debug_print(site.format_me())
def process_key(self, keyname, event, caret) :
mask = event.state
if 'site0' in self.target_phrases and self['site0'].get_attached() :
if keyname == 'Left' : self._pkp('site1')
if keyname == 'Up' : self._pkp('site2')
if keyname == 'Right' :
self.set_diff_mode(not self.get_diff_mode())
elif 'site1' in self.target_phrases and self['site1'].get_attached() :
if keyname == 'Right' : self._pkp('site0')
if keyname == 'Up' : self._pkp('site3')
elif 'site2' in self.target_phrases and self['site2'].get_attached() :
if keyname == 'Left' : self._pkp('site3')
if keyname == 'Down' : self._pkp('site0')
if keyname == 'Right' :
self.set_pow_mode(not self.get_pow_mode())
if keyname == 'Up' :
opt = self.pow_options.index(self.get_p('pow_type'))
opts = len(self.pow_options)
self.set_p('pow_type', \
self.pow_options[(opt+1) % opts])
self.set_p('pow_type_set', True)
#Trigger a redraw
self.get_main_phrase().recalc_bbox()
elif 'site3' in self.target_phrases and self['site3'].get_attached() :
if keyname == 'Right' : self._pkp('site2')
if keyname == 'Down' : self._pkp('site1')
else :
return GlypherPhraseGroup.process_key(self, keyname, event, caret)
return True
g.phrasegroups['script'] = GlypherScript
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Decoration.py
|
Decoration.py
|
import glypher as g
import threading
from Widget import *
from aobject.utils import debug_print
import gtk
from aobject import aobject
try :
import sympy
import sympy.parsing.maxima
have_sympy = True
except ImportError :
have_sympy = False
from ..tablemaker import PreferencesTableMaker
from Toolbox import *
from Interpret import *
from Caret import *
from Phrase import *
from Parser import *
from Alternatives import *
from aobject import aobject
debugging = False
class Glypher (gtk.VPaned, aobject.AObject) :
def get_auto_aesthete_properties(self) :
return {'expand_toolbox' : (bool,)}
def set_status(self, string) :
self.aes_append_status(None, string)
def set_ui(self) :
self.ui_action_group = gtk.ActionGroup('GlypherActions')
self.ui_action_group.add_actions([\
('GlypherMenu', None, 'Glypher'),
('GlypherOpen', None, 'Open', None, None,
lambda w : self.inside_.open_xml()),
('GlypherInsert', None, 'Insert', None, None,
lambda w : self.inside_.open_xml(insert=True)),
('GlypherSave', None, 'Save', None, None,
lambda w : self.inside_.save_xml()),
('GlypherSaveAsFormula', None, 'Save as Formula', None, None,
lambda w : self.inside_.save_formula()),
('GlypherGlyphMaker', None, 'Insert GlyphMaker', None, None,
lambda w : self.inside_.open_phrasegroup()),
('GlypherExport', None, 'Export', None, None,
lambda w : self.inside_.export()),
('GlypherShowLaTeX', None, 'Show LaTeX', None, None,
lambda w : self.inside_.show_latex()),
('GlypherCopy', None, 'Copy'),
('GlypherCopyXML', None, 'Glypher XML', None, None,
lambda w : self.inside_.copy(fmt='xml')),
('GlypherCopySympy', None, 'Sympy', None, None,
lambda w : self.inside_.copy(fmt='sympy')),
('GlypherCopyMathML', None, 'MathML', None, None,
lambda w : self.inside_.copy(fmt='mathml')),
('GlypherCopyPython', None, 'Python', None, None,
lambda w : self.inside_.copy(fmt='python')),
('GlypherCopyLaTeX', None, 'LaTeX', None, None,
lambda w : self.inside_.copy(fmt='latex')),
('GlypherCopyUnicode', None, 'Unicode', None, None,
lambda w : self.inside_.copy(fmt='unicode')),
('GlypherCopyText', None, 'Text', None, None,
lambda w : self.inside_.copy(fmt='text')),
])
self.ui_ui_string = '''
<ui>
<menubar name="MenuBar">
<menu action="GlypherMenu">
<menuitem action="GlypherOpen"/>
<menuitem action="GlypherInsert"/>
<menuitem action="GlypherSave"/>
<menuitem action="GlypherSaveAsFormula"/>
<menuitem action="GlypherGlyphMaker"/>
<separator/>
<menuitem action="GlypherExport"/>
<menuitem action="GlypherShowLaTeX"/>
<separator/>
<menu action="GlypherCopy">
<menuitem action="GlypherCopyXML"/>
<menuitem action="GlypherCopySympy"/>
<menuitem action="GlypherCopyMathML"/>
<menuitem action="GlypherCopyPython"/>
<menuitem action="GlypherCopyLaTeX"/>
<menuitem action="GlypherCopyUnicode"/>
<menuitem action="GlypherCopyText"/>
</menu>
</menu>
</menubar>
</ui>
'''
def __del__(self) :
aobject.AObject.__del__(self)
def _do_treeview_changed(self, treeview, ev) :
'''
STICKY BOTTOM
If we are within 1/5 of a page (scrolled window height) of bottom,
scroll down with new entry
'''
sw = treeview.get_parent()
adj = sw.get_vadjustment()
if adj.get_value() > adj.upper - 1.2*adj.page_size :
adj.set_value(adj.upper - adj.page_size)
def _do_caret_moved(self, glyph_entry) :
att = glyph_entry.caret.attached_to
if att is not None :
anc = att.get_ancestors()
anc_strings = []
quote_mark1 = u'<span foreground="#CCCCCC">[</span>'
quote_mark2 = u'<span foreground="#CCCCCC">]</span>'
for a in anc :
if a.am('phrasegroup') :
anc_strings.append(unicode(g.describe_phrasegroup(a.mes[-1])))
if a.am('word') :
anc_strings.append(quote_mark1+a.to_string()+quote_mark2)
anc_strings.reverse()
if att.am('target_phrase') :
anc_strings.append(u'<span foreground="#FF8888">%s</span>' %\
att.get_name())
elif att.am('symbol') :
anc_strings.append(u'<span foreground="#FF8888">%s</span>' %\
att.get_shape())
anc_string = u'\u21f0'.join(anc_strings)
else :
anc_string = '<i>Unattached</i>'
self.where_label.set_markup(u'<b>Caret location</b> : %s' % anc_string)
self.where_label.set_ellipsize(pango.ELLIPSIZE_START)
def __init__(self, env=None) :
gtk.VPaned.__init__(self)
vbox = gtk.VBox()
self.set_property('can-focus', True)
self.connect_after('focus-in-event', lambda w, e : self.inside_.grab_focus())
self.inside_ = GlyphInside_()
aobject.AObject.__init__(self, name_root="Glypher", env=env, view_object=True)
self.pack1(self.inside_, True, True)
scrolled_window = gtk.ScrolledWindow()
self.where_label = gtk.Label()
self.where_label.show()
self.where_label.modify_fg(gtk.STATE_NORMAL, gtk.gdk.Color('#888888'))
self.where_label.set_alignment(0,0.5)
vbox.pack_start(self.where_label, False)
self.inside_.connect('caret-moved', self._do_caret_moved)
self._do_caret_moved(self.inside_)
self.treeview = gtk.TreeView(g.responses_lsst)
self.treeview.set_rules_hint(True)
code_trvc = gtk.TreeViewColumn('Code')
self.treeview.append_column(code_trvc)
code_trvc.set_expand(False)
statement_trvc = gtk.TreeViewColumn('Statement')
self.treeview.append_column(statement_trvc)
response_trvc = gtk.TreeViewColumn('Response')
self.treeview.append_column(response_trvc)
code_crtx = gtk.CellRendererText()
code_trvc.pack_start(code_crtx, True)
statement_cllr = GlyphCellRenderer()
statement_trvc.pack_start(statement_cllr, True)
response_cllr = GlyphCellRenderer()
response_trvc.pack_start(response_cllr, True)
code_trvc.add_attribute(code_crtx, 'text', 0)
statement_trvc.add_attribute(statement_cllr, 'obj', 1)
response_trvc.add_attribute(response_cllr, 'obj', 2)
scrolled_window.add(self.treeview)
self.treeview.connect('size-allocate', self._do_treeview_changed)
scrolled_window.set_size_request(-1, 200)
vbox.pack_start(scrolled_window)
self.pack2(vbox, True, True)
self.show_all()
self.action_panel = self.make_action_panel()
self.inside_.connect('status-update', lambda w, s : self.set_status(s))
if env is not None :
self.inside_.connect('request-plot', env.toplevel.plot_source)
def make_action_panel(self) :
settings_ntbk = gtk.Notebook()
settings_ntbk.set_tab_pos(gtk.POS_LEFT)
settings_ntbk.aes_title = "Glypher Prefs"
# Expand
expand_vbox = gtk.VBox()
expand_vbox.pack_start(gtk.Label('Expand flags'), False)
complex_togb = gtk.ToggleButton("Complex")
complex_togb.set_active(g.expand['complex'])
complex_togb.connect("toggled", lambda b : g.set_expand_flag('complex', b.get_active()))
expand_vbox.pack_start(complex_togb, False)
trig_togb = gtk.ToggleButton("Trig")
trig_togb.set_active(g.expand['trig'])
trig_togb.connect("toggled", lambda b : g.set_expand_flag('trig', b.get_active()))
expand_vbox.pack_start(trig_togb, False)
settings_ntbk.append_page(expand_vbox, gtk.Label("Ex"))
# Complex
complex_vbox = gtk.VBox()
complex_vbox.pack_start(gtk.Label('Complex settings'), False)
plane_togb = gtk.ToggleButton("Complex Plane")
plane_togb.set_active(g.plane_mode)
plane_togb.connect("toggled", lambda b : g.set_plane_mode(b.get_active()))
complex_vbox.pack_start(plane_togb, False)
settings_ntbk.append_page(complex_vbox, gtk.Label("Cx"))
# LA
la_vbox = gtk.VBox()
la_vbox.pack_start(gtk.Label('Linear algebra'), False)
zeros_togb = gtk.ToggleButton("Blank zeros")
zeros_togb.set_active(g.zeros_mode)
zeros_togb.connect("toggled", lambda b : g.set_zeros_mode(b.get_active()))
la_vbox.pack_start(zeros_togb, False)
settings_ntbk.append_page(la_vbox, gtk.Label("Mx"))
# Libraries
libraries_vbox = gtk.VBox()
libraries_vbox.pack_start(gtk.Label('Active libraries'), False)
for lib in g.libraries :
cpts = lib.split('.')
lib_togb = gtk.ToggleButton(cpts[len(cpts)-1])
lib_togb.set_active(g.libraries[lib])
lib_togb.connect("toggled", lambda b : g.set_library(lib, b.get_active()))
libraries_vbox.pack_start(lib_togb, False)
settings_ntbk.append_page(libraries_vbox, gtk.Label("Li"))
# General
general_vbox = gtk.VBox()
general_vbox.pack_start(gtk.Label('General'), False)
show_all_pow_diff_togb = gtk.ToggleButton("Show Pow/Diff")
show_all_pow_diff_togb.set_active(g.show_all_pow_diff)
show_all_pow_diff_togb.connect("toggled", lambda b :
(g.set_show_all_pow_diff(b.get_active()),
self.inside_.queue_draw()))
general_vbox.pack_start(show_all_pow_diff_togb, False)
general_vbox.pack_start(gtk.Label("Pow Mode"), False)
pow_adjt = gtk.Adjustment(0, -1, 1, 1)
pow_hscl = gtk.HScale(pow_adjt)
pow_hscl.set_draw_value(False)
pow_hscl.connect('value-changed', lambda r :
g.set_pow_mode_force(int(round(r.get_value()))))
pow_hbox = gtk.HBox()
pow_hbox.pack_start(gtk.Label('Off'))
pow_hbox.pack_start(pow_hscl)
pow_hbox.pack_start(gtk.Label('On'))
general_vbox.pack_start(pow_hbox)
general_vbox.pack_start(gtk.Label("Diff Mode"), False)
diff_adjt = gtk.Adjustment(0, -1, 1, 1)
diff_hscl = gtk.HScale(diff_adjt)
diff_hscl.set_draw_value(False)
diff_hscl.connect('value-changed', lambda r :
g.set_diff_mode_force(int(round(r.get_value()))))
diff_hbox = gtk.HBox()
diff_hbox.pack_start(gtk.Label('Off'))
diff_hbox.pack_start(diff_hscl)
diff_hbox.pack_start(gtk.Label('On'))
general_vbox.pack_start(diff_hbox)
#general_vbox.pack_start(self.aes_method_toggle_button(\
# 'formulae_set_or_insert',
# label="Formulae",
# preferencable=True,
# onoff=('Set','Insert')))
settings_ntbk.append_page(general_vbox, gtk.Label("Gn"))
settings_ntbk.show_all()
return settings_ntbk
def get_method_window(self) :
vbox = GlyphToolbox(self.inside_.caret, grab_entities=True,
hidden=not self.get_expand_toolbox())
self.toolbox = vbox.entbox
preferences_butt = gtk.Button("Preferences")
preferences_butt.connect("clicked", lambda o :
self.env.action_panel.to_action_panel(self.action_panel))
preferences_butt.show_all()
vbox.pack_start(preferences_butt, False)
vbox.show()
return vbox
class GlyphInside_ (GlyphEntry) :
container = None
caret = None
main_phrase = None
margins = [10, 40, 0, 10]
response_phrase = None
responses = None
line_height = 35.
default_height = 240
default_width = 460
expand_toolbox = True
def do_content_changed(self, o = None) :
self.queue_draw()
_move_from = None
_mp_from = None
def do_button_press_event(self, event) :
nearest = (None, 0)
self.grab_focus()
for m in self.main_phrases :
x, y = self._local_coords_for_main_phrase(m,(event.x, event.y))
d = m.find_distance((x, y))
if fc(d, 0) :
target = m.find_nearest(point=(x,y), fall_through=True, enterable_parent=False)
bp = target[1].process_button_press(event)
if bp is None : return False
debug_print(bp)
if not target[1] or not bp :
debug_print((x,y,event.button))
if (event.button == 1) :
self.caret.go_near((x, y), change=True)
self.queue_draw()
if (event.button == 2) :
self._move_from = (event.x,event.y,m)
self._mp_from = m.get_anchor_point()
return True
elif nearest[0] is None or d < nearest[1] :
nearest = (m, d)
if nearest[0] is not None and event.button == 1 :
self.caret.go_near((x, y), change=True)
self.queue_draw()
return True
def do_motion_notify_event(self, event) :
if self._move_from is not None :
m = self._move_from[2]
delta = (event.x-self._move_from[0], event.y-self._move_from[1])
m.move(delta[0] + self._mp_from[0], delta[1] + self._mp_from[1])
self.queue_draw()
def do_button_release_event(self, event) :
for m in self.main_phrases :
x, y = self._local_coords_for_main_phrase(m,(event.x, event.y))
if fc(m.find_distance((x, y)), 0) :
target = m.find_nearest(point=(x,y), fall_through=True, enterable_parent=False)
bp = target[1].process_button_release(event)
debug_print(bp)
if bp is None : return False
if not target[1] or not bp :
if (event.button == 2) :
self._move_from = None
self._mp_from = None
self.queue_draw()
return True
def do_scroll_event(self, event) :
for m in self.main_phrases :
x, y = self._local_coords_for_main_phrase(m,(event.x, event.y))
if fc(m.find_distance((x, y)), 0) :
target = m.find_nearest(point=(x,y), fall_through=True, enterable_parent=False)
if not target[1] or not target[1].process_scroll(event) :
_scaling = 1.2 if event.direction == gtk.gdk.SCROLL_UP else 1/1.2
m.set_size_scaling(_scaling*m.get_ip('font_size_coeff'))
self.queue_draw()
return True
def _resize_to_allocation(self, allocation=None) :
if allocation is not None :
self.default_height = allocation.height
self.default_width = allocation.width
self.response_loc = (40, self.default_height-80, self.default_width-40,
self.default_height-40)
self.main_phrase.line_length = self.response_loc[2]-self.response_loc[0]
self.response_phrase.line_length = self.response_loc[2]-self.response_loc[0]
#self.responses_phrase.set_max_dimensions((self.response_loc[2]-self.response_loc[0], self.response_loc[3]-self.response_loc[1]))
#self.responses_phrase.set_min_dimensions(self.responses_phrase.get_max_dimensions())
#self.responses_phrase.set_col_width(1, 0.5*(self.responses_phrase.get_max_dimensions()[0]-50))
#self.responses_phrase.set_col_width(2, 0.5*(self.responses_phrase.get_max_dimensions()[0]))
#self.responses_phrase.recalc_bbox()
#self.main_phrases_offsets[self.responses_main_phrase] = self.response_loc[0:2]
self.main_phrases_offsets[self.response_phrase] = (self.position[0],
.5*(self.position[0]+40+\
self.response_loc[3]))
def __init__(self, position = (40, 40)):
GlyphEntry.__init__(self, position=position, interactive=True,
evaluable=True, fixed_main_phrase=True,
dec_with_focus=False, corner_art=True)
self.main_phrase.set_by_bbox(False)
self.main_phrase.set_enterable(False)
self.main_phrase.set_attachable(False)
self.reset_main_phrase()
self.caret.enter_phrase(self.main_phrase)
self.main_phrases = [self.main_phrase]
self.main_phrases_offsets = {self.main_phrase : self.position}
self.responses = {}
ps = self.process_main_phrase_signal
self.response_phrase = GlypherMainPhrase(ps, self.line_height, self.line_height, (0.0,0.0,0.0))
#self.responses_main_phrase = GlypherMainPhrase(ps, self.line_height, self.line_height, (0.0,0.0,0.0), anchor=('l','t'))
#self.responses_phrase = GlypherTable(self.responses_main_phrase, first_col=GlypherSymbol(None, " "), cell_padding=5, border=None)
#self.responses_phrase.set_max_dimensions((10*self.responses_phrase.get_scaled_font_size(),
# 6*self.responses_phrase.get_scaled_font_size()))
#self.responses_main_phrase.append(self.responses_phrase)
#self.responses_main_phrase.set_font_size(20)
#self.responses_phrase.set_col_width(0, 50)
#self.responses_phrase.recalc_bbox()
#inpu_word = make_word('Statement', None); inpu_word.set_font_name('sans')
#self.responses_phrase.add_cell(0,1).append(inpu_word); inpu_word.set_bold(True)
#resp_word = make_word('Response', None); resp_word.set_font_name('sans')
#c = self.responses_phrase.add_cell(0,2)
#c.is_caching = True
#c.append(resp_word); resp_word.set_bold(True)
#self.responses_phrase.set_row_border_bottom(0)
#self.responses_phrase.set_col_colour(0, (0.8,0.0,0.0))
#self.responses_phrase.set_col_colour(1, (0.5,0.5,0.5))
#self.responses_phrase.set_col_colour(2, (0.8,0.5,0.5))
#self.responses_phrase.set_row_colour(0, (0.3,0.3,0.3))
self.caret.enter_phrase(self.space_array)
self.main_phrases = [self.main_phrase, self.response_phrase]
#self.main_phrases = [self.main_phrase, self.responses_main_phrase, self.response_phrase]
self.main_phrases_offsets = {self.main_phrase : self.position,
self.response_phrase : None }
self._resize_to_allocation()
def reset_main_phrase(self, space_array=None) :
GlyphEntry.reset_main_phrase(self)
self.caret.remove_boxes()
if space_array is None :
self.space_array = GlypherSpaceArray(self.main_phrase, spacing=0.2,
num_ops=1)
else :
self.space_array = space_array
self.space_array.set_deletable(False)
self.space_array.set_highlight_group(False)
self.main_phrase.append(self.space_array)
self.space_array.set_recommending(self.space_array.get_target('pos0'))
self.caret.try_suggestion()
def show_latex(self) :
d = gtk.Dialog("LaTeX", self.get_toplevel(), gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\
(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
d.set_size_request(300, -1)
e = gtk.TextView()
f = gtk.TextView()
e.get_buffer().set_text(self.main_phrase.to_latex())
f.get_buffer().set_text(self.response_phrase.to_latex())
d.vbox.pack_start(gtk.Label("Statement"))
d.vbox.pack_start(e)
d.vbox.pack_start(gtk.Label("Response"))
d.vbox.pack_start(f)
d.show_all()
resp = d.run()
d.destroy()
#if resp == gtk.RESPONSE_OK :
# debug_print(text)
# self.add_target_butt.phrasegroup.add_target(self.gmg.caret.phrased_to, text, stay_enterable=True)
# self.gmg.caret.try_suggestion()
self.grab_focus()
def open_xml(self, insert = False) :
'''Open (or insert) Glypher XML from file.'''
chooser = gtk.FileChooserDialog(\
title=\
(("Insert" if insert else "Open")+" XML File"),
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
chooser.set_current_folder(get_user_location()+'glypher/snippets/')
chooser.set_default_response(gtk.RESPONSE_OK)
resp = chooser.run()
self.grab_focus()
if resp == gtk.RESPONSE_CANCEL : chooser.destroy(); return
elif resp == gtk.RESPONSE_OK : self.filename = chooser.get_filename()
chooser.destroy()
with open(self.filename) as f :
xml_content = f.read()
tree = ET.ElementTree(ET.XML(xml_content))
self.set_xml(tree, insert=insert)
def export(self) :
'''Export Glypher as an image.'''
if self.caret.attached_to is None :
return
root = self.get_xml()
chooser = gtk.FileChooserDialog(\
title="Export File", action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
extra_hbox = gtk.HBox()
extra_hbox.pack_start(gtk.Label("Decoration"), False)
decoration_chkb = gtk.CheckButton()
extra_hbox.pack_start(decoration_chkb, False)
extra_hbox.pack_start(gtk.Label("Padding"), False)
padding_chkb = gtk.CheckButton()
padding_chkb.set_active(True)
extra_hbox.pack_start(padding_chkb, False)
extra_hbox.pack_start(gtk.Label("Transparent"), False)
transparent_chkb = gtk.CheckButton()
extra_hbox.pack_start(transparent_chkb, False)
extra_hbox.pack_start(gtk.Label("Scale"), False)
scale_entr = gtk.Entry()
scale_entr.set_text("3.")
extra_hbox.pack_start(scale_entr, False)
extra_hbox.show_all()
chooser.set_extra_widget(extra_hbox)
chooser.set_current_folder(get_user_home())
chooser.set_default_response(gtk.RESPONSE_OK)
chooser.set_current_name('.svg')
resp = chooser.run()
self.grab_focus()
if resp == gtk.RESPONSE_OK :
filename = chooser.get_filename()
else :
chooser.destroy()
return
#f = open(filename, 'w')
ent = self.main_phrase
if padding_chkb.get_active() :
padding = 10
else :
padding = 0
debug_print(scale_entr.get_text())
sc = float(scale_entr.get_text())
if filename.endswith('.png') :
cairo_svg_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
int(sc*(int(ent.get_width())+2*padding)),
int(sc*(int(ent.get_height())+2*padding)))
else :
cairo_svg_surface = cairo.SVGSurface(filename,
int(sc*(int(ent.get_width())+2*padding)),
int(sc*(int(ent.get_height())+2*padding)))
cc = cairo.Context(cairo_svg_surface)
cc.scale(sc, sc)
if not transparent_chkb.get_active() :
cc.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cc.rectangle(0, 0,
(int(ent.get_width())+2*padding),
(int(ent.get_height())+2*padding))
cc.fill()
dec = ent.show_decoration()
ent.set_p('is_decorated', decoration_chkb.get_active())
# Move to middle of padded surface
cc.translate(padding-ent.config[0].bbox[0],
padding-ent.config[0].bbox[1])
ent.set_p('is_decorated', dec)
ent.draw(cc)
if filename.endswith('.png') :
cairo_svg_surface.write_to_png(filename)
chooser.destroy()
def _do_validate_formula(self, entr, params) :
ok_butt, id_entr, cat_entr, sym_entr = params
match = re.match('[a-zA-Z0-9_-]+$', id_entr.get_text())
if match is None :
ok_butt.set_sensitive(False)
id_entr.modify_base(gtk.STATE_NORMAL, gtk.gdk.Color('pink'))
elif len(cat_entr.get_text()) == 0 or len(sym_entr.get_text()) == 0 :
ok_butt.set_sensitive(False)
else:
ok_butt.set_sensitive(True)
id_entr.modify_base(gtk.STATE_NORMAL, gtk.gdk.Color('white'))
def save_formula(self) :
'''Save selected Glypher text as a formula.'''
if self.caret.attached_to is None :
return
attached_root = self.caret.attached_to.get_xml()
chooser = gtk.Dialog(\
title="Save as Formula", parent=self.get_toplevel(),
flags=gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
try :
ok_butt = chooser.get_widget_for_response(gtk.RESPONSE_OK)
except :
ok_butt = chooser.get_action_area().get_children()[0]
ok_butt.set_sensitive(False)
vbox = gtk.VBox()
preview_glyi = GlyphImage()
preview_glyi.set_xml(ET.ElementTree(attached_root))
preview_glyi.set_font_size(40)
vbox.pack_start(preview_glyi)
content_table_maker = PreferencesTableMaker()
content_table_maker.append_heading("New formula")
id_entr = gtk.Entry()
content_table_maker.append_row("ID", id_entr,
tip="Unique ASCII name (w/o whitespace)")
title_entr = gtk.Entry()
content_table_maker.append_row("Title", title_entr,
tip="Informative title for user")
sym_entr = gtk.Entry()
content_table_maker.append_row("Symbol", sym_entr,
tip="A one/two character symbol to include in the Toolbox")
info_txvw = gtk.TextView()
info_txvw.set_size_request(400, 100)
content_table_maker.append_row("Description", info_txvw,
tip="A short help text for the user")
cat_entr = gtk.Entry()
content_table_maker.append_row("Category", cat_entr,
tip="A Toolbox formula category to add this to")
wiki_entr = gtk.Entry()
content_table_maker.append_row("Wikipedia", wiki_entr,
tip="Title of Wikipedia page to link to")
params = (ok_butt, id_entr, cat_entr, sym_entr)
id_entr.connect("changed", self._do_validate_formula, params)
cat_entr.connect("changed", self._do_validate_formula, params)
sym_entr.connect("changed", self._do_validate_formula, params)
vbox.pack_start(content_table_maker.make_table())
chooser.get_content_area().add(vbox)
chooser.get_content_area().show_all()
resp = chooser.run()
self.grab_focus()
if resp == gtk.RESPONSE_OK :
name = id_entr.get_text()
title = title_entr.get_text()
cat = cat_entr.get_text()
sym = sym_entr.get_text()
ib = info_txvw.get_buffer()
info = ib.get_text(ib.get_start_iter(), ib.get_end_iter())
wiki = wiki_entr.get_text()
filename = get_user_location()+'glypher/formulae/'+name+'.xml'
chooser.destroy()
else :
chooser.destroy()
return
f = open(filename, 'w')
root = ET.Element("glypher")
root.set("name", name)
content_node = ET.SubElement(root, "content")
content_node.append(attached_root)
root.set("title", title)
cat_node = ET.SubElement(root, "category")
cat_node.text = cat
sym_node = ET.SubElement(root, "symbol")
sym_node.text = sym
wiki_node = ET.SubElement(root, "wiki")
wiki_node.text = wiki
info_node = ET.SubElement(root, "info")
info_node.text = info
gutils.xml_indent(root)
tree = ET.ElementTree(root)
tree.write(f, encoding="utf-8")
f.close()
def save_xml(self) :
'''Save selected Glypher XML to file.'''
if self.caret.attached_to is None :
return
tree = self.get_xml()
chooser = gtk.FileChooserDialog(\
title="Save XML File", action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
chooser.set_current_folder(get_user_location()+'glypher/snippets/')
chooser.set_default_response(gtk.RESPONSE_OK)
chooser.set_current_name('.xml')
resp = chooser.run()
self.grab_focus()
if resp == gtk.RESPONSE_OK :
filename = chooser.get_filename()
chooser.destroy()
else :
chooser.destroy()
return
f = open(filename, 'w')
gutils.xml_indent(tree.getroot())
tree.write(f, encoding="utf-8")
f.close()
def open_phrasegroup(self) :
chooser = gtk.FileChooserDialog(\
title="Open GlyphMaker XML File", action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
chooser.set_current_folder(get_user_location())
chooser.set_default_response(gtk.RESPONSE_OK)
resp = chooser.run()
if resp == gtk.RESPONSE_CANCEL : chooser.destroy(); return
elif resp == gtk.RESPONSE_OK : self.filename = chooser.get_filename()
chooser.destroy()
pg = make_phrasegroup_by_filename(self.main_phrase, self.filename, operands = None)
if isinstance(pg, str) :
raise(RuntimeError(pg))
self.caret.insert_entity(pg)
self.grab_focus()
def draw(self, cr, swidth, sheight):
GlyphEntry.draw(self, cr, swidth, sheight)
cr.save()
cr.translate(*self.main_phrases_offsets[self.response_phrase])
if self.draw_corner_art and not self.suspend_corner_art :
bb = list(self.response_phrase.config[0].bbox)
bb[0] -= 10
bb[1] -= 10
bb[2] = self.allocation.width - 30 - bb[0] - self.position[0]
bb[3] = bb[3]-bb[1]
box_colour = (1.0, 1.0, 0.7)
draw.draw_box(cr, box_colour, bb)
self.response_phrase.draw(cr)
cr.restore()
def set_status(self, string) :
self.emit('status-update', string)
def process_line(self) :
self.response_phrase.empty()
self.response_processor = lambda : parse_space_array(self.space_array, self.caret)
input_line, response = GlyphEntry.process_line(self)
if response is None :
self.set_status("[No output]")
elif isinstance(response, str) :
self.response_phrase.append(make_word('Unsuccessful', None))
self.set_status(str(response))
elif response.am('entity') :
self.response_phrase.append(response)
response = self.response_phrase.get_entities()[0]
self.set_status(input_line.to_string() + ' |==| ' + response.to_string())
#input_line = input_line.OUT().copy()
#response = response.get_entities()[0].copy() \
# if response.am('phrase') and len(response.get_entities()) == 1\
# else response.copy()
self.i += 1
i = self.i
resp_thread = threading.Thread(None, self._make_response, None,
args=(i, input_line, response))
resp_thread.start()
return response
i = 0
def _make_response(self, i, input_line, response) :
#xml = input_line.OUT().get_xml(targets={}, top=False, full=False)
#xml = ET.ElementTree(xml)
#input_line = Parser.parse_phrasegroup(self.responses_phrase, xml, top=False)
input_line = GlypherEntity.xml_copy(None, input_line)
response = GlypherEntity.xml_copy(None, response)
self._append_response(i, input_line, response)
def _append_response(self, i, input_line, response) :
#self.responses[i] = (input_line, response)
#c = self.responses_phrase.get_cell(i, 1)
#c.append(input_line)
#c = self.responses_phrase.get_cell(i, 2)
#c.append(response)
g.add_response(i, input_line, response)
def do_key_press_event(self, event):
keyname = gtk.gdk.keyval_name(event.keyval)
m_control = bool(event.state & gtk.gdk.CONTROL_MASK)
m_shift = bool(event.state & gtk.gdk.SHIFT_MASK)
m_alt = bool(event.state & gtk.gdk.MOD1_MASK)
m_super = bool(event.state & gtk.gdk.SUPER_MASK)
if (keyname == 'equal' and m_control) or (keyname == 'plus' and m_control) :
phrase = self.main_phrase
sympy_output = ""
if m_control :
output = ""
string = phrase.to_string("maxima")
if have_sympy :
try :
result = sympy.parsing.maxima.parse_maxima(string)
try : num = result.evalf()
except ValueError, AttributeError : num = "[Cannot evaluate]"
result = str(result) + " = " + str(num)
except ValueError :
result = "[Cannot parse]"
output += "parse_maxima: {" + string + ":: " + str(result) + "}"
string = phrase.to_string("sympy")
output += " || "
if have_sympy :
try :
sympy_output = sympy.core.sympify(string)
try : num = sympy_output.evalf()
except ValueError, AttributeError : num = "[Cannot evaluate]"
result = str(sympy_output) + " = " + str(num)
except ValueError :
result = "[Cannot parse]"
output += "sympify: {" + string + ":: " + str(result) + "}"
else :
output += " [NO SYMPY]"
else :
output = str(phrase) + phrase.to_string("string")
self.set_status(output)
self.response_phrase.empty()
sympy_output = phrase.get_sympy().doit() if keyname == 'equal' else phrase.get_sympy().evalf()
try :
sy = interpret_sympy(self.response_phrase, sympy_output)
self.response_phrase.append(sy)
except GlypherTargetPhraseError as e :
debug_print("Error : " + str(e))
else :
return GlyphEntry.do_key_press_event(self, event)
self.queue_draw()
return True
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/GlypherView.py
|
GlypherView.py
|
from aobject.utils import debug_print
import PhraseGroup
import Dynamic
from Word import *
import Commands as C
import glypher as g
import InterpretBackend
from sympy.series.order import Order
from sympy.printing.mathml import mathml
from sympy.utilities.mathml import c2p
try :
import sympy
have_sympy = True
except ImportError :
have_sympy = False
def parse_command(caret, command, *args) :
'''Take a command as string and a series of Glypher arguments and try to
turn it into an executable sympy function (and arguments).'''
# Try the specifically defined functions
if command in g.commands :
return g.commands[command](caret, *args)
# Try the specifically picked out sympy functions
elif command in g.operation_commands :
return C.operation_command(g.operation_commands[command], caret, *args)
# See if there's a standard function
func = Dynamic.get_sympy_function(command.lower())
if func is not None :
return C.operation_command(func, caret, *args)
# See if there's a loaded library function
func = Dynamic.get_library_function(command.lower())
if func is not None :
return C.operation_command(func, caret, *args)
# Give up
return None
#return "Command not found (are all the necessary dynamic libraries loaded?)"
def parse_space_array(space_array, caret) :
args = space_array.get_args()
if space_array.get_op_count() == 1 :
if len(args) == 0 :
return (None,None)
return (args[0], interpret_sympy(None, args[0].get_sympy()))
elif space_array.get_op_count() == 3 and args[1].to_string() == 'at' :
args = C.get_arg_innards(args)
if isinstance(args, str) :
debug_print(args)
return (None, None)
debug_print(args[0])
subs = C.do_substitutions_from_list(args[0], args[2])
if isinstance(subs, str) :
debug_print(subs)
return (None, None)
return (space_array, interpret_sympy(None, subs))
else :
command = args[0]
if not command.am('word') : return "Expected first token to be command"
response = parse_command(caret, command.to_string(), *args[1:])
if response is None :
debug_print(space_array['pos0'].IN())
raise PhraseGroup.GlypherTargetPhraseError(space_array['pos0'].IN(),
"Command not found (are all the necessary dynamic libraries loaded?)")
return (space_array, response)
def interpret_sympy(parent, result) :
"""Return a GlypherEntity representation of a sympy object. If possible,
this is rendered via MathML, but as not all sympy objects successfully
output MathML, we have a fall-back option to direct parsing."""
return InterpretBackend._generic_to_entity(parent, result)
if not have_sympy : return
# Check whether this is a dictionary before going on; sympify can't handle
# dictionaries itself.
if isinstance(result, dict) :
try :
d = dict()
for a in result.keys() :
tree_key = mathml(a)
tree_val = mathml(result[a])
d[tree_key] = tree_val
except TypeError :
debug_print("""
Resorted to old-style sympy->glypher rendering, as
some sympy object not available in MathML.
""")
return InterpretBackend._sympy_to_entity(parent, result)
else :
return interpret_mathml(parent, d)
try :
content = mathml(result)
except TypeError :
debug_print("""
Resorted to old-style sympy->glypher rendering, as
some sympy object not available in MathML.
""")
return InterpretBackend._sympy_to_entity(parent, result)
else :
return interpret_mathml(parent, content)
def interpret_mathml(parent, content) :
processor = InterpretBackend._mathml_to_entity
if isinstance(content, dict) :
d = dict()
for a in content.keys() :
tree_key = ET.fromstring(a)
tree_val = ET.fromstring(content[a])
d[tree_key] = tree_val
return processor(parent, d)
debug_print(content)
#FIXME: dirty hack to ensure mml namespace has been declared.
# We should be getting Content MathML, so this is technically wrong, but the
# passed XML from sympy may contain Presentation MathML elements under the
# mml namespace
#content = "".join(("<math xmlns:mml='http://www.w3.org/1998/Math/MathML'>\n",
# content,
# "</math>"))
content = sympy.utilities.mathml.add_mathml_headers(content)
tree = ET.fromstring(content)
return processor(parent, tree)
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Interpret.py
|
Interpret.py
|
from Entity import *
import gutils
from sympy.core.sympify import SympifyError
ac = gutils.array_close
fc = gutils.float_close
class GlypherMirror(GlypherEntity) :
tied_to = None
def to_string(self, mode = "string") : return self.tied_to.to_string(mode);
def __init__(self, parent = None, tied_to = None) :
GlypherEntity.__init__(self, parent)
self.add_properties({'tied_to': None})
self.set_always_recalc(True)
self.mes.append('mirror')
self.config[0].bbox[0] = 0
self.config[0].bbox[3] = 1
self.set_ref_width(1)
self.set_ref_height(1)
self.set_attachable(False)
self.set_tied_to(tied_to)
self.recalc_bbox()
def recalc_bbox(self, quiet = False) :
self.set_tied_to(self.get_p('tied_to'))
chg1 = self.cast()
chg2 = GlypherEntity.recalc_bbox(self, quiet=quiet)
return chg1 or chg2
def set_tied_to(self, entity) :
self.tied_to = entity
if self.tied_to == self.get_p('tied_to') :
return
self.set_p('tied_to', entity)
debug_print(entity.format_me() if entity is not None else None)
self.recalc_bbox()
def cast(self) :
old_rw = self.get_ref_width()
old_rh = self.get_ref_height()
self.set_ref_width(self.tied_to.get_width())
self.set_ref_height(self.tied_to.get_height())
return not fc(old_rw, self.get_ref_width()) or not fc(old_rh, self.get_ref_height())
def get_xml(self, name = None, top = True, targets = None, full = False) :
root = GlypherEntity.get_xml(self, name, top, full=full)
root.set('tied_to', self.tied_to.get_name())
return root
def draw(self, cr) :
if not self.get_visible() or self.get_blank() :
return
cr.save()
bb = self.config[0].bbox
cr.translate(bb[0]-self.tied_to.config[0].bbox[0],
bb[1]-self.tied_to.config[0].bbox[1])
self.tied_to.draw(cr)
cr.restore()
def make_mirror(parent, original) :
new_mirror = GlypherMirror(parent, original)
original.add_cousin(new_mirror)
return new_mirror
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Mirror.py
|
Mirror.py
|
from PhraseGroup import *
import Parser
from Word import *
from BinaryExpression import *
from Decoration import *
from sympy.core.function import *
from sympy import N
auto_bracket_funcs = \
[ 'cos', 'sin', 'tan',
'cosh', 'sinh', 'tanh',
'1/cos', '1/sin', '1/tan',
'1/cosh', '1/sinh', '1/tanh',
'acos', 'asin', 'atan',
'acosh', 'asinh', 'atanh',
'exp', 'log' ]
function_inverses = { 'tan' : 'atan' , 'cos' : 'acos' , 'sin' : 'asin',
'tanh' : 'atanh' , 'cosh' : 'acosh' , 'sinh' : 'asinh' }
function_inverses_rev = dict((v,k) for k, v in function_inverses.iteritems())
function_translation_rev = {}
function_translation = {}
def function_init() :
global function_translation_rev
global function_translation
gamma_function = make_word(u'\u0393', None)
gamma_function[0].set_italic(False)
function_translation_rev['gamma'] = gamma_function
dirac_delta = make_word(u'\u03b4', None)
dirac_delta[0].set_italic(False)
function_translation_rev['DiracDelta'] = dirac_delta
heaviside = make_word('H', None)
function_translation_rev['Heaviside'] = heaviside
for fn in function_translation_rev :
r = function_translation_rev[fn].get_sympy()
function_translation[r] = fn
# Flip to ^-1 if we find an inverse
inversify = True
class GlypherNaryFunction(GlypherPhraseGroup) :
args = None
argslist = None
spacer = None
func_colour = (0.0, 0.5, 0.0)
def set_sympy_func(self, sympy_func) : self.set_p('sympy_func', sympy_func)
def get_sympy_func(self) : return self.get_p('sympy_func')
def set_resolve(self, resolve) : self.set_p('resolve', resolve)
def get_resolve(self) : return self.get_p('resolve')
def check_brackets(self, name) :
if not self.args or not self.get_resolve() : return
try :
collapsible = (name and name.get_repr() in auto_bracket_funcs)
except (GlypherTargetPhraseError, ValueError, TypeError) :
collapsible = False
#debug_print(str(name.get_sympy()))
self.args.set_auto_bracket(collapsible)
if not collapsible :
self.args.brackets_restore()
else :
self.args.check_collapse()
def __init__(self, parent, area=(0,0,0,0), expression=None,
auto_bracket=False, resolve=True, use_spacer=True) :
GlypherPhraseGroup.__init__(self, parent, [], area, 'name')
self.add_properties({'use_spacer' : True, 'auto_bracket' : False})
self.set_p('use_spacer', use_spacer)
self.set_resolve(resolve)
self.mes.append('function')
name = GlypherPhrase(self)
self.add_target(name, 'name')
self.set_lhs_target('name')
self.append(name)
#name.set_bold(True)
args = GlypherBracketedPhrase(self)
args.set_auto_bracket(auto_bracket)
if auto_bracket :
args.brackets_collapse()
else :
args.brackets_restore()
self.add_target(args, 'args')
self.set_rhs_target('args')
args.set_attachable(False)
self.args = args
args.no_bracket.add('function')
self.check_brackets(expression)
args.set_deletable(2)
if self.get_p('use_spacer') :
spacer = GlypherSpace(self, (0.125,0.3))
self.spacer = spacer
self.append(spacer)
self.append(args)
if not self.args.get_collapsed() and self.get_p('use_spacer') :
spacer.hide()
#args.collapse_condition = lambda : \
# len(arglist.poss[0].get_entities())==1 and\
# (args.should_collapse(arglist.poss[0].get_entities()[0]) or\
# is_short_mul(arglist.poss[0].get_entities()[0]))
if expression :
name.adopt(expression)
self.set_recommending(args.get_target('expression'))
def get_args(self) :
if len(self.args.get_entities()) == 1 :
if self.args.get_entities()[0].am('binary_expression') and (\
self.args.get_entities()[0].get_symbol_shape() == ',' or \
self.args.get_entities()[0].get_symbol_shape() == ';' ) :
arglist = self.args.get_entities()[0]
args = arglist.get_args()
else :
args = [self.args.get_entities()[0]]
else :
args = self.args.get_entities()
return args
def get_sympy_args(self) :
return [arg.get_sympy() for arg in self.get_args()]
def get_sympy(self) :
args = self.get_sympy_args()
try :
if self.get_sympy_func() :
try:
return self.get_sympy_func()(*args)
except TypeError as e : # hack to get mpmath functions working
new_args = []
for arg in args:
new_args.append(N(arg))
return self.get_sympy_func()(*new_args)
else :
f = Function(str(self.get_target('name').to_string()))
return f(*args)
except RuntimeError as e :
debug_print(e)
raise GlypherTargetPhraseError(self, "Could not evaluate function : %s" % str(e))
#check_inverse = re.compile('\(script\|expression=([^|]+)|site0=\|site1=\|site2=-1\|site3=\)')
def child_change(self) :
GlypherPhraseGroup.child_change(self)
if not self.get_resolve() : return
name = self.get_target('name')
try :
name_sympy = name.get_sympy()
except :
name_sympy = None
if name_sympy is not None and \
isinstance(name_sympy, Lambda) :
self.set_sympy_func(name_sympy)
else :
if name_sympy in function_translation :
test = function_translation[name_sympy]
else :
try :
test = name.get_repr()
except :
test = None
else :
if test[0:2] == '1/' and len(name.get_entities())==1 and name.get_entities()[0].am('script') and test[2:] in function_inverses :
test = function_inverses[test[2:]]
#test = sympy.core.sympify(test)
if test is not None :
func = Dynamic.get_sympy_function(test)
if func is None :
func = Dynamic.get_library_function(test)
if func is not None :
self.func_colour = (0.0, 0.5, 0.5)
else :
self.func_colour = (0.0, 0.5, 0.0)
self.set_sympy_func(func)
if self.get_sympy_func() :
name.set_rgb_colour(self.func_colour)
else :
name.set_rgb_colour(None)
self.check_brackets(name)
#new_auto = name.to_string() in auto_bracket_funcs
#if self.args and new_auto != self.args.auto_bracket :
# self.args.auto_bracket = new_auto
# self.args.check_collapse()
if self.args and self.get_p('use_spacer') and self.spacer :
if self.args.get_collapsed() :
self.spacer.show()
else :
self.spacer.hide()
self.make_simplifications()
_simplifying = False
def make_simplifications(self) :
if self._simplifying or not self.get_resolve() : return
self._simplifying = True
if len(self.get_target('name').get_entities()) == 1 :
e = self.get_target('name').get_entities()[0]
est = e.get_repr()
if inversify and e.am('word') and est in function_inverses.values() :
e.orphan()
key = function_inverses_rev[est]
debug_print('found inverse function')
s = GlypherScript(self, available=(False,False,True,False))
s.get_target('expression').append(make_word(key, s))
s.get_target('site2').append(GlypherNegative(s,
operand=make_word('1',
s)))
self.get_target('name').append(s)
self._simplifying = False
def delete(self, sender=None, if_empty=True) :
parent = self.get_up()
if self.get_resolve() and len(self.get_target('name').get_entities()) > 0 :
self.get_target('name').elevate_entities(parent, to_front=True)
self.feed_up()
GlypherPhraseGroup.delete(self, sender=sender)
return parent
class GlypherOrder(GlypherNaryFunction) :
def __init__(self, parent, order = None) :
O_sym = GlypherSymbol(parent, 'O', italic=False)
O_sym.set_name('O_sym')
GlypherNaryFunction.__init__(self, parent, expression=O_sym, resolve=False)
self.mes.append('order')
O_sym.set_rgb_colour((0, 0.5, 0.3))
self.set_default_entity_xml()
self.args.adopt(order)
def get_sympy(self) :
return sympy.series.order.Order(self.get_args()[0].get_sympy())
class GlypherSpecialFunction(GlypherNaryFunction) :
@classmethod
def parse_element(cls, parent, root, names, targets, operands, recommending, lead,
add_entities, am=None, top=True, args=None) :
sympy_code = root.find('sympy')
sympy_func = Dynamic.text_to_func(sympy_code.text.strip())
what = root.tag
name_entity = make_word(root.get('name'), parent)
auto_bracket = root.get('auto_bracket') == 'True'
num_args = root.get('num_args')
if num_args is None :
num_args = 1
else :
num_args = int(num_args)
fn = cls(parent, what, None, name_entity, sympy_func, auto_bracket,
num_args)
return fn
def __init__(self, parent, what, expression, name_entity, sympy_func,
auto_bracket, num_args=1) :
GlypherNaryFunction.__init__(self, parent, area=(0,0,0,0), expression=expression,
auto_bracket=auto_bracket, resolve=False)
self.mes.append('special_function')
self.mes.append(what)
self.ignore_targets.append("name")
xml = ET.ElementTree(name_entity.get_xml())
name_entity = Parser.parse_phrasegroup(self["name"], xml,
top=False)
name_entity.set_rgb_colour((1.0, 0.6, 0.6))
self["name"].adopt(name_entity)
self["name"].set_attachable(False, children_too=True)
self["name"].set_enterable(False, children_too=True)
self.set_lhs_target("args")
self.set_rhs_target(None)
self.set_sympy_func(sympy_func)
if num_args > 1 :
self["args"].adopt(GlypherCommaArray(self, lhs=None, rhs=None,
num_ops=num_args))
self["args"].set_enterable(False)
# sympy_func takes form
# f(i,j,*args)
class GlypherIndexedFunction(GlypherNaryFunction) :
@classmethod
def parse_element(cls, parent, root, names, targets, operands, recommending, lead,
add_entities, am=None, top=True, args=None) :
sympy_code = root.find('sympy')
sympy_func = Dynamic.text_to_func(sympy_code.text.strip())
what = root.tag
name_entity = make_word(root.get('name'), parent)
auto_bracket = root.get('auto_bracket') == 'True'
num_args = root.get('num_args')
if num_args is None :
num_args = 1
else :
num_args = int(num_args)
available = root.get('available')
if available is None :
available = (True,False,True,False)
else :
available = map(lambda e : e=='True', available.split(','))
fn = cls(parent, what, None, name_entity, sympy_func, auto_bracket,
num_args, active_sites=available)
debug_print(available)
return fn
def __init__(self, parent, what, expression, name_entity, sympy_func,
auto_bracket, num_args=1,
active_sites=(True,False,True,False),
use_spacer=False) :
GlypherNaryFunction.__init__(self, parent, area=(0,0,0,0), expression=expression,
auto_bracket=auto_bracket, resolve=False, use_spacer=use_spacer)
self.mes.append('indexed_function')
self.mes.append(what)
self.ignore_targets.append("name")
xml = ET.ElementTree(name_entity.get_xml())
name_entity = Parser.parse_phrasegroup(self["name"], xml,
top=False)
name_entity.set_rgb_colour((1.0, 0.6, 0.6))
script = GlypherScript(parent, area=(0,0,0,0), expression=name_entity,
available=active_sites)
script.set_annotate(False)
self["name"].adopt(script)
self["name"].set_enterable(False)
self["name"].set_attachable(False)
script["expression"].set_attachable(False, children_too=True)
script["expression"].set_enterable(False, children_too=True)
sites = []
for i in range(0,4) :
if active_sites[i] :
sites.append(script["site"+str(i)])
self.add_target(script["site"+str(i)], "index"+str(i))
self.set_lhs_target("args")
self.set_rhs_target(None)
self.set_sympy_func(lambda *args : \
sympy_func(*(tuple(map(lambda s:s.get_sympy(),sites))+args)))
if num_args > 1 :
self["args"].adopt(GlypherCommaArray(self, lhs=None, rhs=None,
num_ops=num_args))
self["args"].set_enterable(False)
# sympy_func takes form
# f(i,j,*args)
class GlypherTrigFunction(GlypherIndexedFunction) :
@staticmethod
def _eval_sympy_func(sympy_func, inv_sympy_func, i, *args) :
if i is None :
return sympy_func(*args)
elif i == -1 :
if inv_sympy_func is None :
raise RuntimeError('Don\'t know inverse')
else :
return inv_sympy_func(*args)
return sympy_func(*args)**i
def __init__(self, parent, what, expression, name_entity, sympy_func,
inv_sympy_func, auto_bracket, num_args=1) :
sympy_func_new = lambda i, *args : \
GlypherTrigFunction._eval_sympy_func(sympy_func, inv_sympy_func, i, *args)
GlypherIndexedFunction.__init__(self, parent, 'trig_function', expression=expression,
name_entity=name_entity, sympy_func=sympy_func_new,
auto_bracket=auto_bracket,
num_args=num_args, active_sites=(False,
False,
True,
False),
use_spacer=False)
self.mes.append(what)
_simplifying = False
def make_simplifications(self) :
GlypherIndexedFunction.make_simplifications(self)
if self._simplifying :
return
self._simplifying = True
if self.included() and self.get_parent().am('target_phrase') :
p = self.get_parent().get_phrasegroup()
if p.mes[-1] == 'script' and p.get_pow_mode() and \
p.get_available() == [False, False, True, False] and \
p.included() :
if len(p["site2"]) == 1 :
s = p["site2"].get_entities()[0]
try :
t = self["index2"].get_sympy()
exe = False
if t is None :
s.orphan()
self["index2"].adopt(s)
exe = True
elif s.am('word') and s.is_num() and s.get_sympy() != -1 :
t *= s.get_sympy()
s.orphan()
self["index2"].adopt(Interpret.interpret_sympy(self, t))
exe = True
if exe :
self.orphan()
p.get_parent().exchange(p, self)
self.set_recommending(self["args"])
except RuntimeError as e:
debug_print(e)
pass
else :
self.orphan()
p.get_parent().exchange(p, self)
self.set_recommending(self["index2"])
self._simplifying = False
@classmethod
def parse_element(cls, parent, root, names, targets, operands, recommending, lead,
add_entities, am=None, top=True, args=None) :
sympy_code = root.find('sympy')
sympy_func = Dynamic.text_to_func(sympy_code.text.strip())
inv_sympy_code = root.find('sympy_inverse')
if inv_sympy_code is None :
inv_sympy_func = None
else :
inv_sympy_func = Dynamic.text_to_func(inv_sympy_code.text.strip())
what = root.tag
name_entity = make_word(root.get('name'), parent)
auto_bracket = root.get('auto_bracket') == 'True'
num_args = root.get('num_args')
if num_args is None :
num_args = 1
else :
num_args = int(num_args)
fn = cls(parent, what, None, name_entity, sympy_func, inv_sympy_func, auto_bracket, num_args)
return fn
#def unicode_function_factory(what, name, sympy_func, auto_bracket=False, num_args=1,
# auto_italicize=True) :
# maker = lambda p : GlypherSpecialFunction(p, what,
# None, make_word(name, p, auto_italicize=auto_italicize), sympy_func,
# auto_bracket=auto_bracket, num_args=num_args)
# g.phrasegroups[what] = maker
#def indexed_function_factory(what, name, sympy_func, auto_bracket=False, num_args=1,
# auto_italicize=True,
# active_sites=(True,False,True,False)) :
# maker = lambda p : GlypherIndexedFunction(p, what,
# None, make_word(name, p, auto_italicize=auto_italicize), sympy_func,
# auto_bracket=auto_bracket, num_args=num_args,
# active_sites=active_sites)
# g.phrasegroups[what] = maker
g.add_phrasegroup_by_class('function', GlypherNaryFunction)
g.add_phrasegroup_by_class('special_function', GlypherSpecialFunction)
g.add_phrasegroup_by_class('indexed_function', GlypherIndexedFunction)
g.add_phrasegroup_by_class('trig_function', GlypherTrigFunction)
g.add_phrasegroup_by_class('order', GlypherOrder)
#indexed_function_factory('Ylm', u'Y',
# sympy.functions.Ylm, num_args=2)
#g.phrasegroups['bessel_j'] = unicode_function_factory(u'J',
# sympy.functions.special.bessel.bessel_j, active_sites=(False,False,True,False))
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Function.py
|
Function.py
|
import glypher as g
import gtk
import time
import copy
from aobject.utils import debug_print
from PhraseGroup import *
import Dynamic
from sympy import Mul as sympy_mul
from sympy import Pow as sympy_power
from sympy.core.numbers import NegativeOne as sympy_negone
from gutils import float_cmp as fcmp
#nary_sympy_exprs = (u'+',u'-',u',',u' ',u'*', u'\u00B7', u'\u00D7', u';')
class GlypherNegative(GlypherPhraseGroup) :
associative = False
toolbox = { 'symbol' : '-',
'category' : 'Arithmetic',
'shortcut' : None,
'alternatives' : None,
'priority' : None }
def __init__(self, parent, area = (0,0,0,0), operand = None) :
GlypherPhraseGroup.__init__(self, parent, [], area)
self.mes.append('negative')
self.set_bodmas_level(100)
neg = GlypherPhrase(self); sign = GlypherSymbol(neg, '-'); neg.adopt(sign)
neg.set_enterable(False); neg.set_attachable(False)
expr = GlypherBracketedPhrase(self)
#expr = GlypherPhrase(self)
expr.set_collapse_condition(lambda : not (len(expr.get_entities())==1 and expr.get_entities()[0].am('add')))
#expr.set_bodmas_sensitivity(0)
self.append(neg)
self.append(expr)
self.set_lead(expr, GLYPHER_PG_LEAD_ALL)
self.add_target(expr, 'expression')
self.set_rhs_target('expression')
expr.set_deletable(3)
#self.set_expr('expression')
if operand is not None :
self.get_target('expression').adopt(operand)
def get_sympy(self) :
# apparently how sympy renders negative numbers
return sympy_mul(sympy_negone(), self.get_target('expression').get_sympy())
_orphaning = None
def make_simplifications(self) :
"""Absorb an inner suffix into this one."""
if self.included() and self._orphaning is None :
if len(self['expression']) == 1 and \
self['expression'].get_entities()[0].am('negative') and \
len(self['expression'].get_entities()[0]['expression'])==1 :
p = self.get_parent()
p.suspend_recommending()
neg = self['expression'].get_entities()[0]['expression'].get_entities()[0]
self._orphaning = neg
neg.orphan()
p.exchange(self, neg)
self._orphaning = None
p.resume_recommending()
class GlypherCumulativeSuffix(GlypherPhraseGroup) :
suffix_shape = None
def is_wordlike(self) :
if not self.included() :
return self.get_p('wordlike')
wordlike = len(self["operand"]) == 1 and self["operand"][0].is_wordlike()
old_wordlike = self.get_p('wordlike')
if wordlike != old_wordlike :
self.set_p('wordlike', wordlike) #FIXME: gotta be a better way of doing this
self.child_change()
return wordlike
def child_change(self) :
GlypherPhraseGroup.child_change(self)
self.is_wordlike()
def __init__(self, parent, shape, operand = None) :
GlypherPhraseGroup.__init__(self, parent)
self.mes.append('cumulative_suffix')
self.set_bodmas_level(100)
self.suffix_shape = shape
op_phrase = GlypherBracketedPhrase(self)
self.append(op_phrase)
self.add_target(op_phrase, 'operand')
self.set_lhs_target('operand')
op_phrase.set_deletable(3)
suffix_phrase = GlypherPhrase(self)
self.suffix_phrase = suffix_phrase
suffix_phrase.set_enterable(False)
suffix_phrase.set_attachable(False)
self.add_suffix()
self.append(suffix_phrase)
if operand is not None :
self['operand'].adopt(operand)
self.set_recommending(self)
def set_lhs(self, lhs) :
GlypherPhraseGroup.set_lhs(self, lhs)
self.set_recommending(self)
def add_suffix(self) :
new_suffix = GlypherSymbol(self.suffix_phrase,
self.suffix_shape)
self.suffix_phrase.append(new_suffix)
_orphaning = None
def make_simplifications(self) :
"""Absorb an inner suffix into this one."""
if self.included() and self._orphaning is None :
operand_tgt = self["operand"]
# Check whether we have exactly one of us as operand
if len(operand_tgt) == 1 and \
operand_tgt[0].am('cumulative_suffix') and \
operand_tgt[0].suffix_shape == self.suffix_shape :
operand = operand_tgt[0]
# Ensure we don't trigger a loop
self._orphaning = operand
self.suspend_recommending()
# Get the internal entit(ies)
operand.orphan()
operand["operand"].elevate_entities(operand_tgt)
# Check the internal suffix count
n = len(operand.suffix_phrase)
# Add another visual suffix
for i in range(0, n) :
self.add_suffix()
self.resume_recommending()
# Reset _orphaning for next occasion
self._orphaning = None
class GlypherPrime(GlypherCumulativeSuffix) :
toolbox = { 'symbol' : '\'',
'category' : 'Calculus',
'shortcut' : None,
'alternatives' : None,
'priority' : None }
def __init__(self, parent, operand = None) :
GlypherCumulativeSuffix.__init__(self, parent, u'\u2032', operand)
self.mes.append('prime')
def get_sympy(self) :
'''Return nth derivative of operand.'''
# Get core expression
ex = self['operand'].get_sympy()
# Ensure this is the type of thing we're looking for
if not hasattr(ex, "func") or len(ex.args) != 1 :
raise GlypherTargetPhraseError(self,
'Need to provide a differentiable function of a single variable.')
# Differentiate as many times as there are primes
by = ex.args[0]
for level in range(0, len(self.suffix_phrase)) :
ex = Dynamic.diff(ex, by)
return ex
class GlypherBinaryExpression(GlypherPhraseGroup) :
def get_gravity(self) :
return self.get_p('gravity')
def set_gravity(self, up) :
self.set_p('gravity', up)
also_adopt_symbol_shapes = ()
@classmethod
def parse_element(cls, parent, root, names, targets, operands, recommending, lead,
add_entities, am=None, top=True, args=None) :
usym = root.get("operator")
sym = GlypherSymbol(parent, usym)
allow_unary = False
if root.get('allow_unary') :
allow_unary = root.get('allow_unary') == 'True'
no_brackets = False
if root.get('no_brackets') :
no_brackets = root.get('no_brackets') == 'True'
gravity_up = False
if root.get('gravity') :
gravity_up = root.get('gravity') == 'Up'
variable_ops = True
if root.get('variable_ops') :
variable_ops = root.get('variable_ops') == 'True'
use_space = False
if root.get('use_space') :
use_space = root.get('use_space') == 'True'
num_ops = 2
if root.get('num_ops') :
num_ops = int(root.get('num_ops'))
if args is not None :
if "num_ops" in args :
num_ops = int(args["num_ops"])
lhs = None
rhs = None
if len(operands) > 0 :
lhs = operands[0]
if len(operands) > 1 :
rhs = operands[1]
be = cls(parent, sym, lhs=lhs, rhs=rhs,
allow_unary=allow_unary, no_brackets=no_brackets, num_ops=num_ops,
use_space=use_space,
variable_ops=variable_ops)
be.set_gravity(gravity_up)
return be
def get_sympy(self) :
if self.mes[-1] in Parser.binary_expression_names and \
self.ref_symbol.to_string() in \
Parser.binary_expression_properties_for_symbol :
num_ops = self.get_op_count()
properties = Parser.binary_expression_properties_for_symbol[self.ref_symbol.to_string()]
sympy_code = properties['sympy']
pair = {}
res = self.poss[0].IN().get_sympy()
for i in range(1, num_ops) :
pair['B'] = self.poss[i].IN().get_sympy()
if isinstance(res, bool) :
pair['A'] = self.poss[i-1].IN().get_sympy()
else :
pair['A'] = res
old_res = res
res = Dynamic.eval_for_sympy(pair, sympy_code)
if isinstance(old_res, bool) :
res = old_res and res
return res
return GlypherPhraseGroup.get_sympy(self)
def set_symbol_shape(self, symbol_shape) : self.set_p('symbol_shape', symbol_shape)
def get_symbol_shape(self) : return self.get_p('symbol_shape')
def set_associative(self, associative) : self.set_p('associative', associative)
def get_associative(self) : return self.get_p('associative')
def set_op_count(self, op_count) : self.set_p('op_count', op_count)
def get_op_count(self) : return self.get_p('op_count')
def set_use_space(self, use_space) : self.set_p('use_space', use_space)
def get_use_space(self) : return self.get_p('use_space')
def set_allow_unary(self, allow_unary) : self.set_p('allow_unary', allow_unary)
def get_allow_unary(self) : return self.get_p('allow_unary')
syms = None
poss = None
poser = None
p_sort = lambda s,x,y : fcmp(x.config[0].get_bbox()[0], y.config[0].get_bbox()[0])
def make_new_symbol(self) :
#sym = self.ref_symbol.copy()
sym = GlypherSymbol(self, self.get_symbol_shape())
sym.orphan()
return sym
def _set_pos_properties(self, pos) :
pos.set_deletable(2) # Send delete requests for rhs to me
if self.poser is GlypherBODMASBracketedPhrase :
pos.no_bracket.add('pow')
pos.set_bodmas_sensitivity(self.get_bodmas_level())
def process_key(self, name, event, caret) :
mask = event.state
m_control = bool(mask & gtk.gdk.CONTROL_MASK)
if name == 'Right' or name == 'Left' :
current = None
for pos in self.poss :
if self.poss[pos].child_active :
current = pos
break
if current is not None and current > 0 and name == 'Left' :
current -= 1
if current is None :
self.append_operand()
else :
self.add_operand(current)
else :
return GlypherPhraseGroup.process_key(self, name, event, caret)
return True
_suspend_change_check = False
def check_combination(self, shape, go_up=True) :
chg = False
if self._suspend_change_check :
return False
self._suspend_change_check = True
k= len(self.syms)
sym_ind = 0
syms = [self.ref_symbol] + \
[self.syms[self.syms.keys()[i]].get_entities()[sym_ind] for i in range(0, k)]
for sym in syms :
chg = sym.check_combination(shape, go_up=False) or chg
self._suspend_change_check = False
self.set_symbol_shape(self.ref_symbol.to_string())
return chg
def get_xml(self, name = None, top = True, targets = None, full = False) :
root = GlypherPhraseGroup.get_xml(self, name, top, targets, full)
if self.get_p('variable_ops') :
root.set('num_ops', str(self.get_op_count()))
if self.mes[-1] == 'binary_expression' and \
self.ref_symbol.to_string() in \
Parser.binary_expression_properties_for_symbol :
properties = Parser.binary_expression_properties_for_symbol[self.ref_symbol.to_string()]
root.set('type', properties['name'])
return root
_suspend_change_alternative = False
def change_alternative(self, dir = 1) :
if self._suspend_change_alternative :
return False
self._suspend_change_alternative = True
k= len(self.syms)
success = False
sym_ind = 0
syms = [self.ref_symbol] + \
[self.syms[self.syms.keys()[i]].get_entities()[sym_ind] for i in range(0, k)]
for sym in syms :
#sb = sym.get_entities()[0]
success = sym.change_alternative(dir) or success
self.set_symbol_shape(self.ref_symbol.to_string())
if self.ref_symbol.to_string() in Parser.binary_expression_properties_for_symbol :
props = Parser.binary_expression_properties_for_symbol[self.ref_symbol.to_string()]
self.mes[-1] = props['name']
self._suspend_change_alternative = False
if not success and self.included() :
return self.get_parent().change_alternative(dir)
return success
def __init__(self, parent, symbol, area = (0,0,0,0), lhs = None, rhs = None, allow_unary = False, no_brackets = False,
num_ops = 2, use_space = False, default_shortening = True,
variable_ops=False) :
GlypherPhraseGroup.__init__(self, parent, [], area, 'pos0')
self.mes.append('binary_expression')
self.add_properties({'op_count' : 1, 'associative' : False, 'use_space' : False, 'allow_unary' : False,
'symbol_shape' : u'\u00ae', 'use_space' : False,
'bodmas_level' : 0, 'breakable' : False,
'auto_ascend' : True, 'variable_ops' : False,
'gravity' : False})
self.set_use_space(use_space)
self.set_p('variable_ops', variable_ops)
self.syms = {}; self.poss = {}
self.set_allow_unary(allow_unary)
self.poser = GlypherPhrase if no_brackets else GlypherBracketedPhrase
self.default_shortening = default_shortening
self.characteristics.append('_bodmasable')
self.set_symbol_shape(symbol.to_string())
self.ref_symbol = symbol.copy()
self.set_associative(g.get_associative(self.get_symbol_shape()))
bodmas_level = g.get_bodmas(self.get_symbol_shape())
self.set_bodmas_level(bodmas_level)
pos0 = self.poser(self); pos0.name = 'pos0'
self._set_pos_properties(pos0)
self.add_target(pos0, 'pos0')
self.poss[0] = pos0
self.set_lhs_target('pos0')
self.append(pos0, align=('l','m'))
if lhs is not None :
self.get_target('pos0').adopt(lhs)
self.set_recommending(pos0)
if (not allow_unary) or num_ops > 1 :
for i in range(1, num_ops) :
self.append_operand()
self.set_rhs_target('pos2')
if rhs is not None :
self.set_rhs(rhs)
if lhs is None :
self.set_recommending(self.get_target('pos0').IN())
else :
if lhs is None :
self.set_recommending(self.get_target('pos0').IN())
else :
self.set_recommending(self.get_target('pos2').IN())
def get_args(self) :
tgts = [self.get_target('pos'+str(2*p)) for p in self.poss.keys()]
tgts = filter(lambda t : len(t.entities)==1, tgts)
return [tgt.get_entities()[0] for tgt in tgts]
def _get_sympy_args(self) :
return [a.get_sympy() for a in self.get_args()]
def _reorder_pos_arrays(self) :
p_sort = self.p_sort
ns = self.syms.values(); np = self.poss.values()
ns.sort(p_sort)
np.sort(p_sort)
for i in range(0,len(self.poss)) :
posname = 'pos'+str(2*i)
self.poss[i] = np[i]
self.poss[i].set_name(posname)
self.target_phrases[posname] = self.poss[i].IN()
if i > 0 :
self.syms[i] = ns[i-1]
self.syms[i].set_name('sym'+str(2*i-1))
def _move_along_one(self, from_pos) :
offset = self.poss[self.get_op_count()-1].config[0].bbox[2] - self.poss[self.get_op_count()-2].config[0].bbox[2]
for i in range(from_pos, self.get_op_count()) :
self.poss[i].translate(offset, 0, quiet=True)
self.syms[i].translate(offset, 0, quiet=True)
self.recalc_bbox()
self._reorder_pos_arrays()
def add_operand(self, after = None) :
ret = self.append_operand()
if after is not None and after < self.get_op_count()-2 :
old_pos = self.syms[after+1].config[0].bbox[0]
self._move_along_one(after+1)
to_start = self.syms[self.get_op_count()-1].config[0].bbox[0] - old_pos
#debug_print(self.syms[self.op_count-1].config[0].bbox)
#debug_print(self.syms[after].config[0].bbox)
self.syms[self.get_op_count()-1].translate(-to_start, 0, quiet=True)
self.poss[self.get_op_count()-1].translate(-to_start, 0, quiet=True)
self._reorder_pos_arrays()
self.recalc_bbox()
return ret
#offset = self.poss[self.op_count-1].config[0].bbox[2] - self.poss[self.op_count-2].config[0].bbox[2]
#to_start = self.syms[self.op_count-1].config[0].bbox[0] - self.syms[after+1].config[0].bbox[0]
#for i in range(after+1, self.op_count-1) :
# self.poss[i].translate(offset, 0, quiet=True)
# self.syms[i].translate(offset, 0, quiet=True)
#self.syms[self.op_count-1].translate(-to_start, 0, quiet=True)
#self.poss[self.op_count-1].translate(-to_start, 0, quiet=True)
#return ret
#if after != None and after < self.op_count - 1 :
# posold = self.poss[self.op_count-1]
# symold = self.syms[self.op_count-1]
# if posold.am('bracketed_phrase') : posold.suspend_collapse_checks()
# posold.empty()
# symold.empty()
# #debug_print(after)
# #debug_print(self.op_count)
# for m in range(after+1, self.op_count-1) :
# nm = self.op_count-2+after+1 - m
# debug_print(nm)
# posnew = self.poss[nm]
# posnew.suspend_collapse_checks()
# symnew = self.syms[nm]
# #debug_print(posold.entities)
# #debug_print(posold.to_string())
# #posold.elevate_entities(posnew)
# posnew.elevate_entities(posold)
# #posnew.empty()
# #posold.append(GlypherSymbol(None, 'r'))
# symnew.elevate_entities(symold)
# if posold.am('bracketed_phrase') : posold.resume_collapse_checks()
# posold = posnew; symold = symnew
# symbol = self.make_new_symbol()
# sym = self.syms[after+1]
# sym.adopt(symbol)
# sym.set_enterable(False)
# sym.set_attachable(False)
# pphr = self.poss[after+1]
# #self.add_target(pphr, 'pos'+str(2*after+2))
# return pphr
#return ret
_example_sym = None
_example_pos = None
def append_operand(self) :
if len(self.syms) > 0 and \
not self.get_p('variable_ops') :
return None
self.set_op_count(self.get_op_count()+1)
index = 2*(self.get_op_count()-1)
sname = 'sym'+str(index-1)
pname = 'pos'+str(index)
#pds = {}
#pds[sname] = { 'x' : index-1 , 'y' : 0, 'fs' : 1, 'ls' : 1, 'a' : ('l','m'), 'g' : GlypherPhrase }
#pds[pname] = { 'x' : index , 'y' : 0, 'fs' : 1, 'ls' : 1, 'a' : ('l','m'), 'g' : GlypherBODMASBracketedPhrase }
##debug_print(self.phrases)
#for name in pds : self.append_phrase_to_group(name, pds[name])
#if self._example_sym is None :
sphr = GlypherPhrase(self)
symbol = self.make_new_symbol()
sphr.adopt(symbol)
if self.default_shortening :
sphr.hide()
if self.get_use_space() :
space = GlypherSpace(self, (0.02,0))
sphr.append(space)
if not self.default_shortening :
space.hide()
self._example_sym = sphr
sphr.set_enterable(False); sphr.set_attachable(False)
#sphr = self._example_sym.copy()
#if self._example_pos is None :
pphr = self.poser(self)
self._set_pos_properties(pphr)
self._example_pos = pphr
#pphr = self._example_pos.copy()
#if self.op_count == 3 : s = GlypherSymbol(self, 'X'); self.append(s)
#else :
self.syms[self.get_op_count()-1] = sphr
self.poss[self.get_op_count()-1] = pphr
self.add_target(pphr, pname)
self.append(sphr, align=('l','m')); sphr.set_name(sname)
self.append(pphr, align=('l','m')); pphr.set_name(pname)
#debug_print(self.phrases)
#pphr.set_bodmas_sensitivity(self.bodmas_level)
#sphr.set_enterable(False)
#sphr.set_attachable(False)
#pphr.set_deletable(2) # Send delete requests for rhs to me
#get_caret().enter_phrase(self.get_phrase('pos2').expr())
#g.suggest(self.get_phrase(pname).IN())
return pphr
def remove_operand(self, n) :
if n==0 :
posold = self.get_target('pos0')
posold.empty()
posnew = self.get_target('pos2')
symnew = self.syms[1]
if symnew.to_string() == '-' :
neg = GlypherNegative(posold)
posold.adopt(neg)
posnew.elevate_entities(neg)
else :
posnew.elevate_entities(posold)
symnew.empty()
posold = posnew; symold = symnew
n += 1
else :
posold = self.get_target('pos'+str(2*n))
symold = self.syms[n]
symold.empty(); posold.empty()
for m in range(n+1, self.get_op_count()) :
posnew = self.get_target('pos'+str(2*m))
symnew = self.syms[m]
posnew.elevate_entities(posold)
symnew.elevate_entities(symold)
posold = posnew; symold = symnew
self.remove(self.poss[self.get_op_count()-1], override_in=True)
if self.get_op_count() > 1 : self.remove(self.syms[self.get_op_count()-1])
self.set_op_count(self.get_op_count() - 1)
del self.syms[self.get_op_count()]
del self.poss[self.get_op_count()]
del self.target_phrases['pos'+str(2*self.get_op_count())]
_orphaning = None
def make_simplifications(self) :
if self.included() :
if not self.get_p('variable_ops') :
return
for n in range(0, self.get_op_count()) :
pos = 'pos' + str(2*n)
if pos not in self.target_phrases : return
ents = self.get_target(pos).get_entities()
if self.get_associative() and \
len(ents) == 1 and ents[0].am('binary_expression') and \
(ents[0].get_symbol_shape() == self.get_symbol_shape() or\
ents[0].get_symbol_shape() in self.also_adopt_symbol_shapes) \
and ents[0].get_op_count() == 2 and ents[0].included() and self._orphaning == None :
self._orphaning = ents[0]; e = ents[0]
lhs_e = e.get_target('pos0')
rhs_e = e.get_target('pos2')
if lhs_e.OUT().am('bracketed_phrase') : lhs_e.OUT().suspend_collapse_checks()
if rhs_e.OUT().am('bracketed_phrase') : rhs_e.OUT().suspend_collapse_checks()
newpos = self.add_operand(n)
lhs_e.elevate_entities(self.get_target(pos), adopt=True)
rhs_e.elevate_entities(newpos, adopt=True)
e.orphan()
self.set_recommending(self.get_target('pos'+str(2*(n+1))).IN())
self._orphaning = None
#debug_print([e.format_me() for e in self.entities])
#debug_print(get_caret().enter_phrase(newpos.expr(), at_start=True))
# TODO: Creating and deleting moves fractionally (1px right)
def delete(self, sender=None, if_empty=True) :
if self.get_op_count() == 1 :
return GlypherPhraseGroup.delete(self, sender=sender)
if sender != None :
tps = self.target_phrases.keys()
for phrn in tps :
if self.get_target(phrn).OUT() == sender :
loc = int(phrn[3])/2
self.remove_operand(loc)
if loc > 0 :
self.set_recommending(self.get_target('pos'+str((loc-1)*2)).IN())
if not self.get_allow_unary() : self.check_release_last(sender)
def check_release_last(self, sender=None) :
# correct for multiple
if self.get_op_count() == 1 :
parent = self.get_up()
self.get_target('pos0').elevate_entities(parent, to_front=True)
GlypherPhraseGroup.delete(self, sender=sender)
self.feed_up()
return parent
class GlypherBinaryRowExpression(GlypherBinaryExpression) :
p_sort = lambda s,x,y : fcmp(x.config[0].row, y.config[0].row)
def __init__(self, parent, symbol, area = (0,0,0,0), lhs = None, rhs = None, allow_unary = False, no_brackets = False,
num_ops = 2, use_space = False) :
GlypherBinaryExpression.__init__(self, parent, symbol, area=area, lhs=lhs, rhs=rhs,
allow_unary=allow_unary, no_brackets=no_brackets, num_ops=num_ops,
use_space=use_space, variable_ops=True)
for i in range(1, self.get_op_count()) :
self.add_row(i)
j=i
for s in (self.poss[i], self.syms[i]) :
for c in s.config :
s.config[c].row = j
j-=1
self.recalc_bbox()
def _move_along_one(self, from_pos) :
for i in range(from_pos, self.get_op_count()) :
for s in (self.poss[i], self.syms[i]) :
for c in s.config :
s.config[c].row += 1
self.recalc_bbox()
self._reorder_pos_arrays()
def add_operand(self, after = None) :
ret = self.append_operand()
if after is not None and after < self.get_op_count()-2 :
old_pos = self.syms[after+1].config[0].row
i = self.get_op_count()-1; j=old_pos
for s in (self.poss[i], self.syms[i]) :
for c in s.config :
s.config[c].row = j
j-=1
self._move_along_one(after+1)
self.recalc_bbox()
delta = self.syms[i].config[0].bbox[0] - self.poss[i-1].config[0].bbox[2]
self.syms[i].translate(-delta, 0, quiet=True)
self._reorder_pos_arrays()
self.recalc_bbox()
return ret
def append_operand(self) :
ret = GlypherBinaryExpression.append_operand(self)
self.add_row(self.get_op_count()-1)
i=self.get_op_count()-1; j=i
for s in (self.poss[i], self.syms[i]) :
for c in s.config :
s.config[c].row = j
j-=1
self.recalc_bbox()
delta = self.syms[i].config[0].bbox[0] - self.poss[i-1].config[0].bbox[2]
self.syms[i].translate(-delta, 0, quiet=True)
self.recalc_bbox()
return ret
class GlypherSemicolonArray(GlypherBinaryRowExpression) :
stop_for_binary_expression_exceptions = ('space_array')
toolbox = { 'symbol' : ';',
'category' : 'General',
'shortcut' : None,
'alternatives' : None,
'priority' : None }
def __init__(self, parent, area = (0,0,0,0), lhs = None, rhs = None, subtract = False, num_ops = 2) :
symbol = GlypherSymbol(None, ';')
GlypherBinaryRowExpression.__init__(self, parent, symbol, area, lhs=lhs, rhs=rhs, num_ops=num_ops,\
allow_unary=False, no_brackets=True)
self.mes.append('semicolon_array')
def get_sympy(self) :
return self._get_sympy_args()
class GlypherAdd(GlypherBinaryExpression) :
toolbox = { 'symbol' : '+',
'category' : 'Arithmetic',
'shortcut' : None,
'alternatives' : None,
'priority' : None }
def __init__(self, parent, area = (0,0,0,0), lhs = None, rhs = None, subtract = False, num_ops = 2) :
symbol = GlypherSymbol(None, '+') if not subtract else GlypherSymbol(None, '-')
GlypherBinaryExpression.__init__(self, parent, symbol, area, lhs=lhs,
rhs=rhs, num_ops=num_ops,
variable_ops=True)
self.mes.append('add')
def _set_pos_properties(self, pos) :
GlypherBinaryExpression._set_pos_properties(self, pos)
if issubclass(self.poser, GlypherBracketedPhrase) :
pos.no_bracket.add('function')
pos.no_bracket.add('script')
pos.no_bracket.add('fraction')
pos.no_bracket.add('mul')
pos.no_bracket.add('negative')
def _get_sympy_args(self) :
signed_args = [self.get_target('pos0').get_sympy()]
for i in range(1, self.get_op_count()) :
if self.syms[i].to_string() == '-' :
signed_args.append(\
sympy_mul(\
sympy_negone(),\
self.get_target('pos'+str(2*i)).get_sympy()))
else :
signed_args.append(self.get_target('pos'+str(2*i)).get_sympy())
return signed_args
def get_sympy(self) :
args = self._get_sympy_args()
total = args[0]
shape = self.get_symbol_shape()
if shape == u'\u222A' :
for arg in args[1:] :
total = total.union(arg)
elif shape == u'\u2229' :
for arg in args[1:] :
total = total.intersect(arg)
else :
for arg in args[1:] :
total += arg
return total
#return sympy.core.add.Add(*self._get_sympy_args())
_orphaning = None
def make_simplifications(self) :
GlypherBinaryExpression.make_simplifications(self)
if self._orphaning is not None :
return
for i in range(1, self.get_op_count()) :
sym = 'sym' + str(2*i-1)
pos = 'pos' + str(2*i)
if i not in self.poss : return
ents = self.get_target(pos).get_entities()
syml = self.syms[i].get_entities() if i in self.syms else (1,)
if len(ents) == 1 and ents[0].am('negative') and self._orphaning not in (ents[0], syml[0]):
self._orphaning = ents[0]
e = ents[0]
e.orphan()
e.get_target('expression').elevate_entities(self.get_target(pos), adopt=True)
self._orphaning = syml[0]
s = syml[0]
st = s.to_string()
#debug_print(s.to_string())
s.orphan()
#debug_print(s.to_string())
#debug_print(s.to_string() == '+')
self.syms[i].adopt(GlypherSymbol(self.syms[i],\
'-' if st == '+' else '+'))
self.set_recommending(self.get_target(pos))
self._orphaning = None
for i in range(0, self.get_op_count()) :
sym = 'sym' + str(2*i-1)
pos = 'pos' + str(2*i)
if i not in self.poss : return
ents = self.get_target(pos).get_entities()
syml = self.syms[i+1].get_entities() if i+1 in self.syms else (1,)
if len(ents) == 1 and ents[0].am('add') and \
self._orphaning not in (ents[0], syml[0]) and \
ents[0].get_symbol_shape() == '-' and \
len(ents[0].syms) == 1 :
self._orphaning = ents[0]
e = ents[0]
lhs_e = e.get_target('pos0')
rhs_e = e.get_target('pos2')
if lhs_e.OUT().am('bracketed_phrase') :
lhs_e.OUT().suspend_collapse_checks()
if rhs_e.OUT().am('bracketed_phrase') :
rhs_e.OUT().suspend_collapse_checks()
newpos = self.add_operand(i)
lhs_e.elevate_entities(self.get_target(pos), adopt=True)
rhs_e.elevate_entities(newpos, adopt=True)
e.orphan()
self._orphaning = None
s = self.syms[i+1].get_entities()[0]
self._orphaning = s
st = s.to_string()
s.orphan()
self.syms[i+1].adopt(GlypherSymbol(self.syms[i+1],\
'-' if st == '+' else '+'))
self._orphaning = None
self.set_recommending(self.get_target('pos'+str(2*(i+1))).IN())
class GlypherLessThan(GlypherBinaryExpression) :
toolbox = { 'symbol' : '<',
'category' : 'Arithmetic',
'shortcut' : None,
'alternatives' : None,
'priority' : None }
def __init__(self, parent, lhs=None, rhs=None, num_ops = 2) :
eq_symbol = GlypherSymbol(parent, "<")
GlypherBinaryExpression.__init__(self, parent, symbol=eq_symbol, lhs=lhs, rhs=rhs, no_brackets=True, num_ops=num_ops)
self.mes.append('less_than')
def get_sympy(self) :
if self.get_symbol_shape() == '<' :
rel_holds = True
args = self._get_sympy_args()
for i in range(1, len(args)) :
rel_holds = Dynamic.And(rel_holds, Dynamic.Lt(args[i-1],
args[i]))
elif self.get_symbol_shape() == u'\u220A' :
debug_print("Not evaluable")
return rel_holds
#class GlypherEquality(GlypherBinaryExpression) :
# toolbox = { 'symbol' : '=',
# 'category' : 'Arithmetic',
# 'shortcut' : None,
# 'alternatives' : None,
# 'priority' : None }
# def __init__(self, parent, lhs=None, rhs=None, num_ops = 2) :
# eq_symbol = GlypherSymbol(parent, "=")
# GlypherBinaryExpression.__init__(self, parent, symbol=eq_symbol, lhs=lhs, rhs=rhs, no_brackets=True, num_ops=num_ops)
# self.mes.append('equality')
#
# def get_sympy(self) :
# return Dynamic.Eq(self.get_target('pos0').get_sympy(), self.get_target('pos2').get_sympy())
class GlypherCommaArray(GlypherBinaryExpression) :
toolbox = { 'symbol' : ',',
'category' : 'General',
'shortcut' : None,
'alternatives' : None,
'priority' : None }
def __init__(self, parent, lhs=None, rhs=None, num_ops = 2) :
comma_symbol = GlypherSymbol(parent, ",")
GlypherBinaryExpression.__init__(self, parent, symbol=comma_symbol,
lhs=lhs, rhs=rhs, no_brackets=True,
num_ops=num_ops, variable_ops=True)
self.mes.append('comma_array')
def get_sympy(self) :
return self._get_sympy_args()
class GlypherRowVector(GlypherBracketedPhrase) :
be = None
def __init__(self, parent, lhs=None, rhs=None, num_ops = 2) :
be = GlypherCommaArray(None, lhs, rhs, num_ops)
GlypherBracketedPhrase.__init__(self, parent, auto=False, expr=be)
self.mes.append('row_vector')
self.set_enterable(False)
self.set_recommending(be.get_recommending())
def get_sympy(self) :
return Dynamic.Matrix(self.be._get_sympy_args())
# NEEDS LATER VERSION OF SYMPY
class GlypherFiniteSet(GlypherBracketedPhrase) :
be = None
def __init__(self, parent, lhs=None, rhs=None) :
self.be = GlypherCommaArray(None, lhs, rhs)
GlypherBracketedPhrase.__init__(self, parent, auto=False, expr=self.be)
self.mes.append('interval')
self.set_bracket_shapes(('{','}'))
self.set_enterable(False)
self.set_recommending(self.be.get_recommending())
def get_sympy(self) :
finite_set = Dynamic.FiniteSet(*self.be._get_sympy_args())
return finite_set
class GlypherInterval(GlypherBracketedPhrase) :
toolbox = { 'symbol' : '[]',
'category' : 'Sets',
'shortcut' : None,
'alternatives' : None,
'priority' : None }
be = None
stop_for_binary_expression_exceptions = ('space_array')
def __init__(self, parent, lhs=None, rhs=None, left_open=True,
right_open=True) :
self.be = GlypherCommaArray(None, lhs, rhs, 2)
GlypherBracketedPhrase.__init__(self, parent, auto=False, expr=self.be)
self.mes.append('interval')
bracket_pair = ( '(' if left_open else '[', ')' if right_open else ']' )
self.set_bracket_shapes(bracket_pair)
self.set_enterable(False)
self.set_recommending(self.be.get_recommending())
def get_lhs(self) :
return self.be.poss[0]
def set_lhs(self, lhs) :
self.be.set_lhs(lhs)
def get_rhs(self) :
return self.be.poss[1]
def set_rhs(self, rhs) :
self.be.set_rhs(rhs)
bracket_pairs = ( ('(',')'), ('[',')'), ('[',']'),('(',']') )
def change_alternative(self, dir = 1) :
ind = self.bracket_pairs.index(self.get_bracket_shapes())
ind += dir
ind = (ind+len(self.bracket_pairs))%len(self.bracket_pairs)
self.set_bracket_shapes(self.bracket_pairs[ind])
return True
def get_sympy(self) :
l, r = self.be._get_sympy_args()
lo, ro = self.get_bracket_shapes()
lo = lo == '('
ro = ro == ')'
interval = Dynamic.Interval(l, r, lo, ro)
return interval
class GlypherSideFraction(GlypherBinaryExpression) :
toolbox = { 'symbol' : '/',
'category' : 'Arithmetic',
'shortcut' : None,
'alternatives' : None,
'priority' : None }
def __init__(self, parent, lhs=None, rhs=None) :
solidus_symbol = GlypherSymbol(parent, "/")
GlypherBinaryExpression.__init__(self, parent, symbol=solidus_symbol,
lhs=lhs, rhs=rhs, no_brackets=False)
self.mes.append('side_fraction')
def get_sympy(self) :
return sympy_mul(self.get_target('pos0').get_sympy(),\
sympy_power(self.get_target('pos2').get_sympy(), -1))
def _set_pos_properties(self, pos) :
GlypherBinaryExpression._set_pos_properties(self, pos)
if issubclass(self.poser, GlypherBracketedPhrase) :
pos.no_bracket.add('function')
pos.no_bracket.add('script')
class GlypherSpaceArray(GlypherBinaryExpression) :
toolbox = { 'symbol' : '_',
'category' : 'Utilities',
'shortcut' : None,
'alternatives' : None,
'priority' : None }
stop_for_binary_expression_exceptions = ()
def __init__(self, parent, width = 0.5, num_ops = 2, lhs = None, rhs = None, spacing = 0.1) :
space_symbol = GlypherSpace(parent, (spacing,0.4))
#space_symbol = make_word(' ',self)
GlypherBinaryExpression.__init__(self, parent, symbol=space_symbol,
allow_unary=True, no_brackets=True,
num_ops=num_ops, lhs=lhs, rhs=rhs,
variable_ops=True)
self.add_properties({'allow_unary' : True, 'symbol_shape' : ' ', 'no_brackets' : True, 'num_ops' : 2})
self.mes.append('space_array')
def get_sympy(self) :
return self._get_sympy_args()
class GlypherMul(GlypherBinaryExpression) :
toolbox = { 'symbol' : '*',
'category' : 'Arithmetic',
'shortcut' : None,
'alternatives' : None,
'priority' : None }
also_adopt_symbol_shapes = (u'*', u'\u00B7', u'\u00D7', u'')
_shortening = False
def set_shortened(self, shortened) : self.set_p('shortened', shortened)
def get_shortened(self) : return self.get_p('shortened')
def make_new_symbol(self) :
sy = GlypherBinaryExpression.make_new_symbol(self)
if sy.to_string() == u'\u00B7' :
sy.set_rgb_colour((0.8, 0.8, 0.8))
return sy
def change_alternative(self, dir) :
ret = GlypherBinaryExpression.change_alternative(self, dir)
for sym in self.syms :
sym = self.syms[sym]
if sym.to_string()[0] == u'\u00B7' :
sym.set_rgb_colour((0.8, 0.8, 0.8))
else :
sym.set_rgb_colour(None)
return ret
def __init__(self, parent, area = (0,0,0,0), lhs = None, rhs = None, num_ops = 2) :
symbol = GlypherSymbol(None, u'\u00b7')
GlypherBinaryExpression.__init__(self, parent, symbol, area, lhs, rhs,
num_ops=num_ops, use_space=True,
variable_ops=True)
self.add_properties({'breakable' : False, 'wordlike' : True,
'associative' : True})
self.mes.append('mul')
def _set_pos_properties(self, pos) :
GlypherBinaryExpression._set_pos_properties(self, pos)
if issubclass(self.poser, GlypherBracketedPhrase) :
pos.no_bracket.add('function')
pos.no_bracket.add('script')
pos.no_bracket.add('fraction')
def get_sympy(self) :
args = self._get_sympy_args()
total = args[0]
for arg in args[1:] :
total *= arg
return total
#return sympy.core.mul.Mul(*self._get_sympy_args())
#return sympy.core.mul.Mul(*(a.get_sympy() for a in self._get_sympy_args()))
_orphaning = None
def make_simplifications(self) :
GlypherBinaryExpression.make_simplifications(self)
nents = 0
for i in range(0, self.get_op_count()) :
pos = 'pos' + str(2*i)
if i in self.poss : nents += len(self.get_target(pos).get_entities())
if nents <= 1 : return
if self.included() :
for i in range(0, self.get_op_count()) :
#debug_print(self.op_count)
pos = 'pos' + str(2*i)
if i not in self.poss : return
ents = self.get_target(pos).get_entities()
#debug_print(self.phrases.keys())
if self._orphaning == None and \
len(ents) == 1 and ents[0].am('negative') and\
len(ents[0]['expression'].get_entities()) > 0 :
p = self.get_up()
self._orphaning = p
self.suspend_recommending()
nexpr = ents[0].get_target('expression')
if len(nexpr.get_entities())==1 and \
nexpr.get_entities()[0].am('word') and \
len(nexpr.get_entities()[0].get_entities())==1 and \
nexpr.get_entities()[0].get_entities()[0].am('symbol') and \
nexpr.get_entities()[0].get_entities()[0].to_string() == '1' :
self.remove_operand(i)
else :
e = ents[0]
nt = nexpr.get_entities()[0]
e.orphan()
nexpr.elevate_entities(self.get_target(pos).IN())
if (self.get_parent() != None and self.get_parent().am('negative')) :
q = p.get_parent()
p.orphan()
p.elevate_entities(q, adopt=True)
else :
self.orphan()
n = GlypherNegative(p)
n.get_target('expression').adopt(self)
p.adopt(n)
self.check_release_last()
self._orphaning = None
self.resume_recommending()
#self.set_recommending(t)
if not self._shortening and not self._orphaning :
for i in range(0, self.get_op_count()-1) :
posi = 'pos' + str(2*i)
posi1 = 'pos' + str(2*(i+1))
if i not in self.poss or (i+1) not in self.poss : return
entsi = self.get_target(posi ).get_entities()
entsi1 = self.get_target(posi1).get_entities()
if len(entsi) == 0 or len(entsi1) == 0 : continue
self._shortening = True
self.set_shortened(True)
#if entsi[0].am('word') :
if self.ref_symbol.to_string()[0] == u'\u00B7' and \
should_shorten_mul(entsi[0], entsi1[0]) :
if self.syms[i+1].get_entities()[0].get_visible() :
self.syms[i+1].get_entities()[0].hide()
self.syms[i+1].get_entities()[1].show()
if self.included() : self.get_parent().child_change()
else :
if not self.syms[i+1].get_entities()[0].get_visible() :
self.syms[i+1].get_entities()[0].show()
self.syms[i+1].get_entities()[1].hide()
self.set_shortened(False)
if self.included() : self.get_parent().child_change()
self._shortening = False
self.consistency_check_sub_poses()
def should_shorten_mul(ent1, ent2) :
# never contract if the second term is a number
if not ent2.get_p('auto_contract_premultiplication') or ent2.is_leading_with_num() : return False
word1 = (ent1.am('word') and not ent1.is_num()) or ent1.am('constant') or ent1.am('unit')
word2 = (ent2.am('word') and not ent2.is_num()) or ent2.am('constant') or ent2.am('unit')
shorten = (ent1.am('word') and ent1.is_num()) and not (ent2.am('word') and ent2.is_num()) # num and not-num
shorten = shorten or (word1 and word2) # word and word
shorten = shorten or (ent1.am('script') or ent2.am('script')) # two powers/scripts/etc.
shorten = shorten or (ent1.am('bracketed_phrase') and ent2.am('bracketed_phrase')) # two bracketed phrases
shorten = shorten or (ent1.am('binary_expression') or ent2.am('binary_expression')) # this should be bracketed!
shorten = shorten or (ent1.am('fraction') and not ((ent2.am('word') and ent2.is_num())\
or ent2.am('side_fraction') or ent2.am('fraction'))) # fraction and not-(number or fraction)
shorten = shorten or (ent2.am('unit')) # unit on right
return shorten and len(ent1.get_entities()) > 0
def is_short_mul(test) : return isinstance(test, GlypherMul) and test.get_shortened()
g.add_phrasegroup_by_class('binary_expression', GlypherBinaryExpression)
g.add_phrasegroup_by_class('negative', GlypherNegative)
g.add_phrasegroup_by_class('add', GlypherAdd)
g.add_phrasegroup_by_class('mul', GlypherMul)
g.add_phrasegroup_by_class('comma_array', GlypherCommaArray)
g.add_phrasegroup_by_class('semicolon_array', GlypherSemicolonArray)
g.add_phrasegroup_by_class('space_array', GlypherSpaceArray)
g.add_phrasegroup_by_class('prime', GlypherPrime)
g.add_phrasegroup_by_class('side_fraction', GlypherSideFraction)
g.add_phrasegroup_by_class('interval', GlypherInterval)
g.add_phrasegroup_by_class('finite_set', GlypherFiniteSet)
#g.add_phrasegroup_by_class('equality', GlypherEquality)
g.add_phrasegroup_by_class('prime', GlypherPrime)
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/BinaryExpression.py
|
BinaryExpression.py
|
import uuid
import glypher as g
import Mirror
import math
from types import *
from Toolbox import *
import ConfigWidgets
from aobject.utils import debug_print
import gtk
import re
from aobject.paths import *
from aobject import aobject
try :
import sympy
import sympy.parsing.maxima
have_sympy = True
except ImportError :
have_sympy = False
from Interpret import *
from Caret import *
from Phrase import *
from Parser import *
from Widget import *
debugging = False
phrasegroups_dir = get_user_location() + 'glypher/phrasegroups/'
class GlyphMaker (gtk.Frame, aobject.AObject) :
properties = None
gmg = None
xmlview = None
vbox = None
filename = None
selected_entity = None
getting_shortcut_mode = False
def do_set_padding(self, entry, side) :
if self.property_store.entity is None :
return
new_padding = entry.get_text()
if len(new_padding) == 0 :
new_padding = 0.0
else :
new_padding = float(new_padding)
self.property_store.entity.set_padding(side, new_padding)
self.gmg.queue_draw()
def do_key_press_event(self, button, event) :
if self.getting_shortcut_mode :
if gtk.gdk.keyval_name(event.keyval) == 'Escape' :
self.phrasegroup_keyboard_shortcut.get_child().set_text('[None]')
self.do_keyboard_shortcut(self.phrasegroup_keyboard_shortcut)
if gtk.gdk.keyval_name(event.keyval) == 'Return' :
self.do_keyboard_shortcut(self.phrasegroup_keyboard_shortcut)
else :
self.phrasegroup_keyboard_shortcut.get_child().set_text(
gtk.accelerator_name(event.keyval, event.state))
self.phrasegroup_keyboard_shortcut.get_child().set_ellipsize(pango.ELLIPSIZE_START)
return True
return False
def do_keyboard_shortcut(self, button) :
if self.getting_shortcut_mode :
self.getting_shortcut_mode = False
button.set_relief(gtk.RELIEF_NORMAL)
self.phrasegroup_name.show()
self.phrasegroup_latex_name.show()
self.phrasegroup_title.show()
else :
self.getting_shortcut_mode = True
button.set_relief(gtk.RELIEF_NONE)
self.phrasegroup_name.hide()
self.phrasegroup_latex_name.hide()
self.phrasegroup_title.hide()
def do_select(self, button) :
self.gmg.caret.set_selected(self.property_store.entity)
def do_inherited_property_view_edited(self, cell, path, new_text) :
r = self.inherited_property_store[path]
r[1] = new_text
debug_print(r[2])
self.property_store.entity.set_ip(r[0], make_val(new_text, r[2]) if new_text is not '' else None)
self.property_store.entity.recalc_bbox()
def do_property_view_edited(self, cell, path, new_text) :
r = self.property_store[path]
r[1] = new_text
self.property_store.entity.set_p(r[0], make_val(new_text, r[2]))
self.property_store.entity.recalc_bbox()
def do_ae_toggled(self, toggle, att) :
ent = self.property_store.entity
if ent is None : return
if att : ent.set_attachable(toggle.get_active())
else : ent.set_enterable(toggle.get_active())
def do_rc_spin(self, spin, row) :
ent = self.property_store.entity
v = int(round(spin.get_value()))
if ent is None or not ent.included() : return
if row : ent.parent.add_row(v); ent.config[0].row = v
else : ent.parent.add_col(v); ent.config[0].col = v
ent.parent.recalc_bbox()
def highlight_codebuffer(self, codebuffer) :
start = codebuffer.get_start_iter()
codebuffer.remove_all_tags(start, codebuffer.get_end_iter())
mat = start.forward_search('`', gtk.TEXT_SEARCH_VISIBLE_ONLY)
while mat is not None :
opening = mat[1]
mat = mat[1].forward_search('`', gtk.TEXT_SEARCH_VISIBLE_ONLY)
if mat is None :
break
closing = mat[0]
text = opening.get_text(closing)
tag = "name_highlight_unmatched"
for row in self.targets_liss :
if row[0] == text :
tag = "name_highlight_matched"
codebuffer.apply_tag_by_name(tag, opening, closing)
mat = mat[1].forward_search('`', gtk.TEXT_SEARCH_VISIBLE_ONLY)
def new_default_phrasegroup_name(self) :
self.default_phrasegroup_name = 'untitled-' + str(uuid.uuid4())
self.phrasegroup_name.set_text(self.default_phrasegroup_name)
def __init__(self, env = None):
gtk.Frame.__init__(self)
aobject.AObject.__init__(self, "GlyphMaker", env, view_object=True)
self.connect("aesthete-property-change", lambda o, p, v, a : self.queue_draw())
self.vbox = gtk.VBox()
hbox = gtk.HBox(False, 4)
self.vbox.pack_start(hbox, False)
hbox.pack_start(gtk.Label("Name"), False)
self.phrasegroup_name = gtk.Entry()
hbox.pack_start(self.phrasegroup_name)
hbox.pack_start(gtk.Label("Title"), False)
self.phrasegroup_title = gtk.Entry()
hbox.pack_start(self.phrasegroup_title)
hbox.pack_start(gtk.Label("LaTeX Name"), False)
self.phrasegroup_latex_name = gtk.Entry()
self.default_phrasegroup_name = ""
hbox.pack_start(self.phrasegroup_latex_name)
hbox.pack_start(gtk.Label("Keyboard Shortcut"), False)
self.phrasegroup_keyboard_shortcut = gtk.Button('[None]')
self.phrasegroup_keyboard_shortcut.get_child().set_ellipsize(pango.ELLIPSIZE_START)
self.phrasegroup_keyboard_shortcut.connect("clicked",
self.do_keyboard_shortcut)
self.phrasegroup_keyboard_shortcut.connect("key-press-event",
self.do_key_press_event)
hbox.pack_start(self.phrasegroup_keyboard_shortcut)
hbox.show_all()
self.gmg = GlyphBasicGlypher(env=env, evaluable=False)
self.gmg.caret.editor_mode = True
self.gmg.main_phrase.by_first_row = False
self.gmg.main_phrase.move()
self.gmg.main_phrase.set_enterable(True)
self.gmg.main_phrase.set_attachable(True)
self.absorb_properties(self.gmg)
self.vbox.pack_start(self.gmg)
self.log(1, "New GlyphMaker initalized")
self.gmg.connect("content-changed", lambda o : self.update_xmlview())
self.gmg.show_all()
ex = gtk.expander_new_with_mnemonic("_Show Source")
sc = gtk.ScrolledWindow()
self.xmlview = gtk.TextView()
self.xmlview.set_size_request(-1, 300)
self.xmlview.get_buffer().set_text("")
sc.add(self.xmlview)
ex.add(sc)
ex.show_all()
ex.set_expanded(False)
self.vbox.pack_start(ex, False, False)
toolbox = gtk.HBox()
add_vbox = gtk.VBox()
ex.connect("activate", lambda o : toolbox.hide() if not ex.get_expanded() else toolbox.show())
add_vbox.pack_start(GlyphToolbox(self.gmg.caret, grab_entities=False,
expanded=True, cols=10, hidden=True))
code_ntbk = gtk.Notebook()
code_ntbk.set_tab_pos(gtk.POS_LEFT)
code_ntbk.set_property("scrollable", True)
code_ntbk.set_property("enable-popup", True)
cat_hbox = gtk.HBox()
cat_hbox.pack_start(gtk.Label('Symbol'), False)
self.symbol_entr = gtk.Entry()
self.symbol_entr.set_size_request(20, -1)
cat_hbox.pack_start(self.symbol_entr)
self.category_entr = gtk.Entry()
cat_hbox.pack_start(gtk.Label('Category'), False)
cat_hbox.pack_start(self.category_entr)
self.alt_entr = gtk.Entry()
cat_hbox.pack_start(gtk.Label('Alternatives Cat'), False)
cat_hbox.pack_start(self.alt_entr)
cat_hbox.show_all()
add_vbox.pack_start(cat_hbox, False)
self.sympy_view = gtk.TextView()
self.sympy_view.set_wrap_mode(gtk.WRAP_CHAR)
self.sympy_view.get_buffer().create_tag("name_highlight_matched",
foreground="green")
self.sympy_view.get_buffer().create_tag("name_highlight_unmatched",
foreground="red")
self.sympy_view.get_buffer().connect("changed", self.highlight_codebuffer)
code_ntbk.append_page(self.sympy_view, gtk.Label('Sympy'))
self.string_view = gtk.TextView()
self.string_view.set_wrap_mode(gtk.WRAP_CHAR)
self.string_view.get_buffer().connect("changed", self.highlight_codebuffer)
self.string_view.get_buffer().create_tag("name_highlight_matched",
foreground="green")
self.string_view.get_buffer().create_tag("name_highlight_unmatched",
foreground="red")
code_ntbk.append_page(self.string_view, gtk.Label('Text'))
self.latex_view = gtk.TextView()
self.latex_view.set_wrap_mode(gtk.WRAP_CHAR)
code_ntbk.append_page(self.latex_view, gtk.Label('LaTeX'))
self.latex_view.get_buffer().connect("changed", self.highlight_codebuffer)
self.latex_view.get_buffer().create_tag("name_highlight_matched",
foreground="green")
self.latex_view.get_buffer().create_tag("name_highlight_unmatched",
foreground="red")
self.sympy_view.set_size_request(-1, 200)
other = gtk.Table(1, 2)
other.attach(gtk.Label("Wiki"), 0, 1, 0, 1, False, False)
self.wiki_entr = gtk.Entry()
other.attach(self.wiki_entr, 0, 1, 1, 2)
code_ntbk.append_page(other, gtk.Label("Other"))
self.info_view = gtk.TextView()
self.info_view.set_wrap_mode(gtk.WRAP_CHAR)
code_ntbk.append_page(self.info_view, gtk.Label('Info'))
code_ntbk.set_size_request(300, -1)
code_ntbk.show_all()
add_vbox.pack_start(code_ntbk)
mathml_hbox = gtk.HBox()
mathml_hbox.pack_start(gtk.Label("Content MathML operation"), False)
self.phrasegroup_mathml_op = gtk.Entry()
mathml_hbox.pack_start(self.phrasegroup_mathml_op)
mathml_hbox.show_all()
add_vbox.pack_start(mathml_hbox, False)
add_hbox = gtk.HBox()
self.add_target_butt = gtk.Button('Make Phrase into Target')
self.add_target_butt.connect("clicked", self.do_add_target)
add_hbox.pack_start(self.add_target_butt)
self.break_word_butt = gtk.Button('Break Word')
self.break_word_butt.connect("clicked", self.do_break_word)
add_hbox.pack_start(self.break_word_butt)
self.selection_mirror_butt = gtk.Button('Insert Selection Mirror')
self.selection_mirror_butt.connect("clicked", self.do_selection_mirror)
add_hbox.pack_start(self.selection_mirror_butt)
add_hbox.show_all()
add_vbox.pack_start(add_hbox, False)
toolbox.pack_start(add_vbox)
self.targets_liss = gtk.ListStore(gobject.TYPE_STRING)
self.targets_liss.append(('[None]',))
self.targets_liss.connect("row-changed", lambda tl, p, i : \
(self.highlight_codebuffer(self.latex_view.get_buffer()),
self.highlight_codebuffer(self.sympy_view.get_buffer()),
self.highlight_codebuffer(self.string_view.get_buffer())))
sides_hbox = gtk.HBox()
sides_hbox.pack_start(gtk.Label("LHS"), False)
lhs_cell = gtk.CellRendererText()
self.lhs_cmbo = gtk.ComboBox(self.targets_liss)
self.lhs_cmbo.set_active(0)
self.lhs_cmbo.pack_start(lhs_cell)
self.lhs_cmbo.add_attribute(lhs_cell, 'text', 0)
self.lhs_cmbo.connect("changed", self.do_set_side, True)
sides_hbox.pack_start(self.lhs_cmbo)
sides_hbox.pack_start(gtk.Label("RHS"), False)
rhs_cell = gtk.CellRendererText()
self.rhs_cmbo = gtk.ComboBox(self.targets_liss)
self.rhs_cmbo.set_active(0)
self.rhs_cmbo.pack_start(rhs_cell)
self.rhs_cmbo.add_attribute(rhs_cell, 'text', 0)
self.rhs_cmbo.connect("changed", self.do_set_side, False)
sides_hbox.pack_start(self.rhs_cmbo)
sides_hbox.show_all()
add_vbox.pack_start(sides_hbox, False)
add_vbox.show()
self.vbox.pack_start(toolbox, False)
self.vbox.show()
self.property_store = gtk.ListStore(str, str, str)
property_view = gtk.TreeView(self.property_store)
property_view_cren0 = gtk.CellRendererText()
property_view_cren1 = gtk.CellRendererText()
property_view_cren1.set_property('editable', True)
property_view_cren1.connect('edited', self.do_property_view_edited)
property_view_col0 = gtk.TreeViewColumn('Property', property_view_cren0, text=0)
property_view_col1 = gtk.TreeViewColumn('Value', property_view_cren1, text=1)
property_view.append_column(property_view_col0)
property_view.append_column(property_view_col1)
self.gmg.caret.connect("changed-attached-to", lambda o, s, l : self.set_property_store())
sc1 = gtk.ScrolledWindow()
sc1.add(property_view)
ex1 = gtk.expander_new_with_mnemonic("_Properties")
ex1.add(sc1)
self.inherited_property_store = gtk.ListStore(str, str, str)
inherited_property_view = gtk.TreeView(self.inherited_property_store)
inherited_property_view_cren0 = gtk.CellRendererText()
inherited_property_view_cren1 = gtk.CellRendererText()
inherited_property_view_cren1.set_property('editable', True)
inherited_property_view_cren1.connect('edited', self.do_inherited_property_view_edited)
inherited_property_view_col0 = gtk.TreeViewColumn('Property', inherited_property_view_cren0, text=0)
inherited_property_view_col1 = gtk.TreeViewColumn('Value', inherited_property_view_cren1, text=1)
inherited_property_view.append_column(inherited_property_view_col0)
inherited_property_view.append_column(inherited_property_view_col1)
sc2 = gtk.ScrolledWindow()
sc2.add(inherited_property_view)
ex2 = gtk.expander_new_with_mnemonic("_Inherited Properties")
ex2.add(sc2)
ent_vbox = gtk.VBox()
ent_vbox.set_size_request(150, 350)
self.ent_label = gtk.Label()
ent_vbox.pack_start(self.ent_label, False)
fm_hbox = gtk.HBox()
self.fm_mes = gtk.combo_box_new_text()
self.fm_mes.set_title('Mes')
self.fm_mes.set_tooltip_text('Who am I?')
fm_hbox.pack_start(self.fm_mes)
self.fm_mes.connect("changed", self.do_fm_mes_changed)
self.fm_ancs = gtk.combo_box_new_text()
self.fm_ancs.set_title('Ancestors')
self.fm_ancs.set_tooltip_text('Who are my ancestors?')
self.fm_ancs.connect("changed", self.do_fm_ancs_changed)
fm_hbox.pack_start(self.fm_ancs)
ent_vbox.pack_start(fm_hbox, False)
select_entity_butt = gtk.Button("Select")
select_entity_butt.connect("clicked", self.do_select)
ent_vbox.pack_start(select_entity_butt, False)
scaling_hbox = gtk.HBox()
self.scaling_entr = gtk.Entry()
scaling_hbox.pack_start(self.scaling_entr)
scaling_butt = gtk.Button("Set scaling")
scaling_hbox.pack_start(scaling_butt)
scaling_butt.connect("clicked", self.do_scaling_butt_clicked)
ent_vbox.pack_start(scaling_hbox, False)
row_col_hbox = gtk.HBox()
row_col_hbox.pack_start(gtk.Label("Row"), False)
row_adj = gtk.Adjustment(step_incr=1)
self.row_spin = gtk.SpinButton(row_adj)
self.row_spin.connect("value-changed", self.do_rc_spin, True)
row_col_hbox.pack_start(self.row_spin)
row_col_hbox.pack_start(gtk.Label("Col"), False)
col_adj = gtk.Adjustment(step_incr=1)
self.col_spin = gtk.SpinButton(col_adj)
self.col_spin.connect("value-changed", self.do_rc_spin, False)
row_col_hbox.pack_start(self.col_spin)
ent_vbox.pack_start(row_col_hbox, False)
padding_hbox = gtk.HBox()
padding_hbox.pack_start(gtk.Label("Padd."), False)
self.padding_entrs = []
for i in range(0, 4) :
e = gtk.Entry()
self.padding_entrs.append(e)
padding_hbox.pack_start(e)
e.connect("changed", self.do_set_padding, i)
ent_vbox.pack_start(padding_hbox, False)
att_ent_hbox = gtk.HBox()
self.att_chkb = gtk.CheckButton("Attach")
self.att_chkb.connect("toggled", self.do_ae_toggled, True)
att_ent_hbox.pack_start(self.att_chkb, False)
self.ent_chkb = gtk.CheckButton("Enter")
self.ent_chkb.connect("toggled", self.do_ae_toggled, False)
att_ent_hbox.pack_start(self.ent_chkb, False)
ent_vbox.pack_start(att_ent_hbox, False)
self.config_widget_fram = gtk.Frame()
ent_vbox.pack_start(self.config_widget_fram, False)
ent_vbox.pack_start(ex1, False)
ent_vbox.pack_start(ex2, False)
toolbox.pack_start(ent_vbox)
ent_vbox.show_all()
toolbox.show()
self.add(self.vbox)
self.show()
self.vbox.show()
self.gmg.grab_focus()
self.new()
def do_scaling_butt_clicked(self, butt) :
sc = float(self.scaling_entr.get_text())
ent = self.property_store.entity
if ent is None : return
ent.set_size_scaling(sc)
def do_set_side(self, cmbo, lhs=True) :
if not cmbo.get_active_iter() :
return
target = cmbo.get_model().get_value(cmbo.get_active_iter(), 0)
pg = self.add_target_butt.phrasegroup
if target is None or pg is None :
return
if target == '[None]' :
target = None
if lhs :
pg.set_lhs_target(target)
else :
pg.set_rhs_target(target)
def do_add_target(self, butt) :
if self.add_target_butt.phrasegroup is None : return
dialog = gtk.Dialog(title="Target name", parent=self.add_target_butt.get_toplevel(),\
buttons=(gtk.STOCK_OK, gtk.RESPONSE_OK, gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
e = gtk.Entry()
dialog.vbox.pack_start(e)
dialog.show_all()
resp = dialog.run()
text = e.get_text()
dialog.destroy()
if resp == gtk.RESPONSE_OK :
debug_print(text)
self.gmg.caret.phrased_to.target_name = text
self.add_target_butt.phrasegroup.add_target(self.gmg.caret.phrased_to, text, stay_enterable=True)
self.targets_liss.append((text,))
self.gmg.caret.try_suggestion()
self.gmg.grab_focus()
def do_break_word(self, butt) :
if self.gmg.caret.phrased_to.am('word') :
self.gmg.caret.exit_phrase()
self.gmg.caret.reset()
self.gmg.grab_focus()
def do_selection_mirror(self, butt) :
selection = self.gmg.caret.get_selected()
mirror = Mirror.make_mirror(self.gmg.caret.phrased_to,
selection[0])
self.gmg.caret.insert_entity(mirror)
def set_property_store(self, anc = 0) :
self._is_fm_ancs_changeable = False
self.property_store.clear()
self.scaling_entr.set_text('')
self.inherited_property_store.clear()
self.fm_mes.get_model().clear()
self.fm_ancs.get_model().clear()
self.ent_label.set_text('[None]')
self.ent_label.set_tooltip_text('[None]')
self.property_store.entity = None
ent = self.gmg.caret.attached_to
self.add_target_butt.set_sensitive(False)
tl = self.targets_liss
tl.clear()
self.lhs_cmbo.set_sensitive(False)
self.rhs_cmbo.set_sensitive(False)
self.break_word_butt.set_sensitive(self.gmg.caret.phrased_to.am('word'))
for e in self.padding_entrs :
e.set_text("")
e.set_sensitive(False)
for s in (self.row_spin, self.col_spin) :
a = s.get_adjustment()
s.set_value(0)
a.lower = 0; a.upper = 0
for s in (self.att_chkb, self.ent_chkb) :
s.set_active(False)
if ent is None :
return
pgs = filter(lambda a : a.mes[-1] == 'phrasegroup' or \
a.mes[-1] == self.phrasegroup_name.get_text(),
ent.get_ancestors())
self.add_target_butt.phrasegroup = pgs[0] if len(pgs) > 0 else None
self.add_target_butt.set_sensitive(len(pgs)>0)
pg = self.add_target_butt.phrasegroup
if pg :
tl.append(("[None]",))
for target in pg.target_phrases :
it = tl.append((target,))
if target == pg.lhs_target :
self.lhs_cmbo.set_active_iter(it)
if target == pg.rhs_target :
self.rhs_cmbo.set_active_iter(it)
if pg.lhs_target is None :
self.lhs_cmbo.set_active(0)
if pg.rhs_target is None :
self.rhs_cmbo.set_active(0)
self.lhs_cmbo.set_sensitive(True)
self.rhs_cmbo.set_sensitive(True)
self.property_store.entity = ent.get_ancestors()[anc]
for i in range(0, 4) :
paddstr = str(ent.padding[i])
if paddstr != "0.0" :
self.padding_entrs[i].set_text(paddstr)
self.padding_entrs[i].set_sensitive(True)
fm = ent.format_me()
lab = ent.to_string()
self.ent_label.set_text(lab)
self.ent_label.set_tooltip_text(fm[0])
self.scaling_entr.set_text(str(ent.get_size_scaling()))
self.row_spin.set_value(ent.config[0].row)
self.col_spin.set_value(ent.config[0].col)
self.att_chkb.set_active(ent.get_attachable())
if ent.am('phrase') :
self.ent_chkb.set_active(ent.get_enterable())
a = self.row_spin.get_adjustment()
b = self.col_spin.get_adjustment()
if ent.included() :
a.upper, a.lower = ent.parent.row_range()
b.upper, b.lower = ent.parent.col_range()
a.upper += 1; b.upper += 1
a.lower -= 1; b.lower -= 1
for a in ent.mes : self.fm_mes.append_text(a)
self.fm_mes.set_active(len(ent.mes)-1)
for a in ent.get_ancestors() : self.fm_ancs.append_text(a.mes[-1])
if self.gmg.caret.phrased_to in ent.get_ancestors() :
n = ent.get_ancestors().index(self.gmg.caret.phrased_to)
self.fm_ancs.set_active(n)
else :
self.fm_ancs.set_active(-1)
props = set(ent.default_properties.keys() + ent.properties.keys())
for p in props :
val = ent.get_p(p)
it = self.property_store.append([p, val, type(val).__name__])
for p in ent.inherited_properties :
val = ent.get_ip(p) if p in ent.inherited_properties_overrides else None
it = self.inherited_property_store.append([p, val if val is not None else '', type(val).__name__])
self._is_fm_ancs_changeable = True
_is_fm_ancs_changeable = True
def do_fm_ancs_changed(self, cmbo) :
if self._is_fm_ancs_changeable and cmbo.get_active() >= 0 :
debug_print(cmbo.get_active_text())
self.gmg.caret.change_attached(self.property_store.entity.get_ancestors()[cmbo.get_active()])
def do_fm_mes_changed(self, cmbo) :
ent = self.property_store.entity
if ent is None : return
w = self.config_widget_fram.get_child()
if w is not None : self.config_widget_fram.remove(w)
cw = ConfigWidgets.get_config_widget(cmbo.get_active_text(),
self.property_store.entity,
self.gmg.caret)
if cw is not None : self.config_widget_fram.add(cw)
def new(self) :
self.filename = None
self.gmg.clear()
pg = GlypherPhraseGroup(self.gmg.main_phrase)
pg.edit_mode = True
self.new_default_phrasegroup_name()
self.set_property_store()
self.gmg.caret.insert_entity(pg)
pg.set_recommending(pg)
self.queue_draw()
self.phrasegroup_name.grab_focus()
def get_method_window(self) :
vbox = gtk.VBox()
new_butt = gtk.Button(stock=gtk.STOCK_NEW)
new_butt.connect("clicked", lambda o : self.new())
vbox.pack_start(new_butt)
open_butt = gtk.Button(stock=gtk.STOCK_OPEN)
open_butt.connect("clicked", lambda o : self.open())
vbox.pack_start(open_butt)
save_butt = gtk.Button(stock=gtk.STOCK_SAVE)
save_butt.connect("clicked", lambda o : self.save())
vbox.pack_start(save_butt)
save_as_butt = gtk.Button(stock=gtk.STOCK_SAVE_AS)
save_as_butt.connect("clicked", lambda o : self.save(save_as=True))
vbox.pack_start(save_as_butt)
vbox.show_all()
#config_butt = gtk.Button(self.get_aname_nice()+'...')
#config_butt.set_relief(gtk.RELIEF_HALF)
#self.connect("aesthete-aname-nice-change", lambda o, a, v : self.set_label_for_button(config_butt,v))
#hbox.pack_start(config_butt)
#remove_butt = gtk.Button(); remove_butt.add(gtk.image_new_from_stock(gtk.STOCK_CLEAR, gtk.ICON_SIZE_SMALL_TOOLBAR))
#remove_butt.connect("clicked", lambda o : self.self_remove())
#hbox.pack_start(remove_butt, False)
return vbox
def _do_open_selection_changed(self, fc, pw) :
fn = fc.get_preview_filename()
pw.clear()
if fn is not None and fn != '' :
try :
debug_print(fn)
tree = ET.parse(fn)
pw.set_xml(tree, top=True)
except IOError :
return
def open(self) :
chooser = gtk.FileChooserDialog(\
title="Open GlyphMaker XML File", action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
chooser.set_current_folder(phrasegroups_dir)
chooser.set_default_response(gtk.RESPONSE_OK)
ge = GlyphEntry(interactive=False)
ge.show_all()
chooser.set_preview_widget(ge)
chooser.connect("selection-changed", self._do_open_selection_changed, ge)
resp = chooser.run()
if resp == gtk.RESPONSE_CANCEL : chooser.destroy(); return
elif resp == gtk.RESPONSE_OK : self.filename = chooser.get_filename()
chooser.destroy()
self.gmg.reset_main_phrase()
pg = make_phrasegroup_by_filename(self.gmg.main_phrase, self.filename, operands = None)
pg.edit_mode = True
root = ET.parse(self.filename).getroot()
sympy_el = root.find("sympy")
if sympy_el is not None :
sympy_text = re.sub('self\[["\']([^"\']*)["\']\]\.get_sympy\(\)',
"`\\1`",
sympy_el.text)
self.sympy_view.get_buffer().set_text(sympy_text)
string_el = root.find("string")
if string_el is not None :
string_text = re.sub(unicode('self\[["\']([^"\']*)["\']\]\.to_string\(\)'),
unicode("`\\1`"),
string_el.text)
self.string_view.get_buffer().set_text(string_text)
latex_el = root.find("latex")
if latex_el is not None :
latex_text = re.sub('self\[["\']([^"\']*)["\']\]\.to_latex\(\)',
"`\\1`",
latex_el.text)
self.latex_view.get_buffer().set_text(latex_text)
info_el = root.find("info")
if info_el is not None :
self.info_view.get_buffer().set_text(info_el.text)
wiki_el = root.find("wiki")
if wiki_el is not None :
self.wiki_entr.set_text(wiki_el.text)
symbol_el = root.find("symbol")
if symbol_el is not None : self.symbol_entr.set_text(symbol_el.text)
category_el = root.find("category")
if category_el is not None : self.category_entr.set_text(category_el.text)
alt_el = root.find("alternatives")
if alt_el is not None : self.alt_entr.set_text(alt_el.text)
self.phrasegroup_name.set_text(root.tag)
ln = root.get('latex_name')
self.phrasegroup_latex_name.set_text(ln if ln is not None else '')
ln = root.get('title')
self.phrasegroup_title.set_text(ln if ln is not None else '')
ln = root.get('mathml')
self.phrasegroup_mathml_op.set_text(ln if ln is not None else '')
ln = root.find('shortcut')
self.phrasegroup_keyboard_shortcut.get_child().set_text(ln.text \
if ln is not None else \
'[None]')
self.phrasegroup_keyboard_shortcut.get_child().set_ellipsize(pango.ELLIPSIZE_START)
if isinstance(pg, str) :
raise(RuntimeError(pg))
self.gmg.caret.change_attached(self.gmg.main_phrase)
self.gmg.caret.insert_entity(pg)
self.gmg.grab_focus()
target_re = None
def get_xml(self) :
if len(self.gmg.main_phrase.get_entities()) == 0 : return None
root = self.gmg.main_phrase.get_entities()[0].get_xml(full=True,
top=True)
debug_print(self.gmg.main_phrase.get_entities()[0].format_me())
debug_print(ET.tostring(root))
b = self.latex_view.get_buffer()
latex_text = b.get_text(b.get_start_iter(), b.get_end_iter())
debug_print(latex_text)
if len(latex_text) > 0 :
latex_text = re.sub("`([^`]*)`", 'self["\\1"].to_latex()',
latex_text)
latex_element = ET.SubElement(root, 'latex')
latex_element.text = latex_text
b = self.sympy_view.get_buffer()
sympy_text = b.get_text(b.get_start_iter(), b.get_end_iter())
if len(sympy_text) > 0 :
sympy_text = re.sub("`([^`]*)`",
'self["\\1"].get_sympy()',
sympy_text)
sympy_element = ET.SubElement(root, 'sympy')
sympy_element.text = sympy_text
b = self.string_view.get_buffer()
string_text = b.get_text(b.get_start_iter(), b.get_end_iter())
if len(string_text) > 0 :
string_text = re.sub(unicode("`([^`]*)`"),
unicode('self["\\1"].to_string()'),
string_text)
string_element = ET.SubElement(root, 'string')
string_element.text = string_text
b = self.info_view.get_buffer()
info_text = b.get_text(b.get_start_iter(), b.get_end_iter())
if len(info_text) > 0 :
info_element = ET.SubElement(root, 'info')
info_element.text = info_text
t = self.wiki_entr.get_text()
if len(t) > 0 :
ET.SubElement(root, 'wiki').text = t
t = self.symbol_entr.get_text()
if len(t) > 0 :
ET.SubElement(root, 'symbol').text = t
t = self.category_entr.get_text()
if len(t) > 0 :
ET.SubElement(root, 'category').text = t
t = self.alt_entr.get_text()
if len(t) > 0 :
ET.SubElement(root, 'alternatives').text = t
t = self.phrasegroup_name.get_text()
if len(t) > 0 :
root.tag = t
else :
root.tag = self.default_phrasegroup_name
t = self.phrasegroup_latex_name.get_text()
if len(t) > 0 :
root.set('latex_name', t)
t = self.phrasegroup_title.get_text()
if len(t) > 0 :
root.set('title', t)
t = self.phrasegroup_mathml_op.get_text()
if len(t) > 0 :
root.set('mathml', t)
t = self.phrasegroup_keyboard_shortcut.get_child().get_text()
if t != '[None]' :
shortcut = ET.SubElement(root, 'shortcut')
shortcut.text = t
return root
#space_array = self.gmg.main_phrase.entities[0]
#if space_array.get_op_count() > 1 :
# root = space_array.get_xml()
#else :
# if len(space_array.get_target('pos0').get_entities()) == 0 : return None
# root = space_array.get_target('pos0').get_entities()[0].get_xml()
#return root
def save(self, save_as = False) :
root = self.get_xml()
if root is None :
self.gmg.grab_focus()
if self.filename is None or save_as :
chooser = gtk.FileChooserDialog(\
title="Save GlyphMaker XML File", action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
chooser.set_current_folder(phrasegroups_dir)
chooser.set_default_response(gtk.RESPONSE_OK)
chooser.set_current_name(root.tag + '.xml')
resp = chooser.run()
if resp == gtk.RESPONSE_OK :
filename = chooser.get_filename()
debug_print(filename)
chooser.destroy()
if filename is None or filename == '' : return
self.filename = filename
else :
chooser.destroy()
return
gutils.xml_indent(root)
tree = ET.ElementTree(root)
tree.write(self.filename, encoding="utf-8")
self.gmg.grab_focus()
def update_xmlview(self) :
root = self.get_xml()
if root is not None :
gutils.xml_indent(root)
self.xmlview.get_buffer().set_text(ET.tostring(root))
#PROPERTIES
def get_aesthete_properties(self):
return { }
#BEGIN PROPERTIES FUNCTIONS
#END PROPERTIES FUNCTIONS
def __del__(self) :
aobject.AObject.__del__(self)
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/GlyphMaker.py
|
GlyphMaker.py
|
from sympy import *
import sympy
from sympy.series import limits
import glypher as g
from aobject.utils import *
# Sy 0.6 doesn't bring this in automatically
try :
from sympy.core.numbers import *
from sympy.physics.units import Unit
except :
pass
from sympy.core.basic import Basic
_compare_pretty = Basic._compare_pretty
current_globals = {}
def rebuild_globals() :
global current_globals
current_globals = {}
dicts = [l.__dict__ for l in loaded_libraries.values()]
dicts += [globals()]
for d in dicts :
current_globals.update(d)
def eval_for_sympy_args(code, *args) :
'''This gives the evaluated code access to the sympy globals by a 'from
sympy import *' in this closed-off module.'''
try:
return eval(code, current_globals, {'args':args})
except TypeError as e : # hack to get mpmath functions working
debug_print(e)
new_args = []
for arg in args:
new_args.append(N(arg))
return eval(code, current_globals, {'args':new_args})
def eval_for_sympy(ent, code) :
'''This gives the evaluated code access to the sympy globals by a 'from
sympy import *' in this closed-off module.'''
return eval(code, current_globals, {'self':ent})
def get_sympy_function(func_name) :
'''Finds a sympy function in the globals from a 'from sympy import *'.'''
if func_name in globals() :
return globals()[func_name]
else :
return None
loaded_libraries = {}
def load_library(name, unload = False) :
'''Load (or unload) a library from the dynamic list.'''
if name not in g.libraries :
return False
if unload :
if name in loaded_libraries :
del loaded_libraries[name]
rebuild_globals()
return True
if name not in loaded_libraries :
new_lib = __import__(name, globals(), locals(), ['*'])
loaded_libraries[name] = new_lib
rebuild_globals()
return True
def get_library_function(func_name) :
'''Try the loaded libraries (other than sympy) for the given function.'''
for lib_name in loaded_libraries :
lib = loaded_libraries[lib_name]
if func_name in lib.__dict__ :
return lib.__dict__[func_name]
return None
def text_to_func(text) :
sympy_code = compile(text, '<string>', 'eval')
return lambda *args : eval_for_sympy_args(sympy_code, *args)
rebuild_globals()
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Dynamic.py
|
Dynamic.py
|
import os
import string
from aobject.paths import *
import glypher as g
import gtk
from Spacer import *
import Mirror
import Dynamic
import Function
import Word
import Symbol
from glypher import \
GLYPHER_PG_LEAD_ALL, \
GLYPHER_PG_LEAD_VERT, \
GLYPHER_PG_LEAD_HORI
import lxml.etree as ET
GLYPHER_IS_PG = 1
GLYPHER_IS_ENTITY = 2
phrasegroups = g.phrasegroups
entities = {\
'space' : GlypherSpace,
'phrase' : GlypherPhrase,
'horizontal_line' : GlypherHorizontalLine,
'vertical_line' : GlypherVerticalLine,
'symbol' : Symbol.GlypherSymbol,
'word' : Word.GlypherWord,
}
phrasegroup_trees = {}
user_phrasegroup_trees = {}
formula_trees = {}
latex_to_name = {}
latex_to_shape = {}
shortcuts = {}
shortcuts_saved = {}
content_mathml_operations = {}
binary_expression_names = {}
binary_expression_properties_for_symbol = {}
def load_shortcuts(shortcut_file) :
if not os.path.exists(shortcut_file) :
return False
tree = ET.parse(shortcut_file)
for node in tree.getroot() :
p = gtk.accelerator_parse(node.get('shortcut'))
keyname = gtk.gdk.keyval_name(p[0]).lower()
shortcut = (keyname,
bool(p[1] & gtk.gdk.CONTROL_MASK),
bool(p[1] & gtk.gdk.SHIFT_MASK),
bool(p[1] & gtk.gdk.MOD1_MASK),
bool(p[1] & gtk.gdk.SUPER_MASK))
shortcuts[shortcut] = node.get('phrasegroup')
return True
def is_named(name) :
if name in phrasegroup_trees or\
name in user_phrasegroup_trees or\
name in phrasegroups :
return GLYPHER_IS_PG
elif name in entities or name in Word.constants or name in Word.units :
return GLYPHER_IS_ENTITY
return False
def get_name_from_latex (latex) :
'''Find a registered name from a LaTeX-style shortcut.'''
if latex in latex_to_name :
return latex_to_name[latex]
def get_shape_from_latex (latex) :
'''Find a registered shape from a LaTeX-style shortcut.'''
if latex in latex_to_shape :
return latex_to_shape[latex]
return None
def make_val(value, ty) :
return (value=='True') if ty == 'bool' else \
int(value) if ty == 'int' else \
float(value) if ty == 'float' else \
value
def add_formula_tree(name, tree) :
formula_trees[name] = tree
def make_formula(parent, name) :
root = formula_trees[name].getroot()
content = root.find('content')[0]
content = ET.ElementTree(content)
pg = parse_phrasegroup(parent, content, top=False)
do_info_parsing(pg, root, top=True)
return pg
def add_phrasegroup_tree(name, tree, user=True, latex=None, shortcut=True) :
'''
Adds a PG XML tree to the list, for building and inserting on the fly.
Arguments :
tree - the XML tree
user - whether or not this should be included as a userdir tree (default
True)
latex - LaTeX-style shortcut (e.g. \\frac)
shortcut - whether to store a keyboard shortcut, if found (default
True). Also affects MathML operation storage.
'''
# Make sure the name and type are correct
tree.getroot().tag = name
#tree.getroot().set('type', name)
# Add to the appropriate dictionary
(user_phrasegroup_trees if user else phrasegroup_trees)[name] = tree
# Add LaTeX-style shortcut
if latex is not None :
latex_to_name[latex] = name
# If this is a binary expression, note it for Caret
if tree.getroot().get('type') == 'binary_expression' :
root = tree.getroot()
op = root.get('operator')
binary_expression_names[name] = op
sympy_code = root.find('sympy')
binary_expression_properties_for_symbol[op] = {
'sympy' : compile(sympy_code.text.strip(), '<string>', 'eval'),
'variable_ops' : root.get('variable_ops') == 'True',
'name' : name,
'gravity' : root.get('gravity') == 'Up',
}
if shortcut :
# Add keyboard shortcut
sc = tree.getroot().find('shortcut')
if sc is not None :
p = gtk.accelerator_parse(sc.text)
keyname = gtk.gdk.keyval_name(p[0]).lower()
shortcut = (keyname,
bool(p[1] & gtk.gdk.CONTROL_MASK),
bool(p[1] & gtk.gdk.SHIFT_MASK),
bool(p[1] & gtk.gdk.MOD1_MASK),
bool(p[1] & gtk.gdk.SUPER_MASK))
shortcuts[shortcut] = name
# Try for Content MathML
op = tree.getroot().get('mathml')
if op is not None :
content_mathml_operations[op] = name
alternatives_cat = tree.getroot().find('alternatives')
if alternatives_cat is not None :
if alternatives_cat.text not in g.phrasegroup_alternatives :
g.phrasegroup_alternatives[alternatives_cat.text] = []
g.phrasegroup_alternatives[alternatives_cat.text].append(name)
def try_key_press(event) :
'''Checks whether a given keyname/state pair is allocated to a named
entity.'''
keyname = gtk.gdk.keyval_name(event.keyval).lower()
shortcut = (keyname,
bool(event.state & gtk.gdk.CONTROL_MASK),
bool(event.state & gtk.gdk.SHIFT_MASK),
bool(event.state & gtk.gdk.MOD1_MASK),
bool(event.state & gtk.gdk.SUPER_MASK))
if shortcut in shortcuts_saved :
return shortcuts_saved[shortcut]
if shortcut in shortcuts :
return shortcuts[shortcut]
return None
def make_phrasegroup(parent, name, operands=None, properties=None, args=None) :
if name in entities :
pg = entities[name](parent)
elif name in Word.constants :
pg = Word.constants[name](parent)
elif name in Word.units :
pg = Word.units[name](parent)
else :
if name in phrasegroup_trees :
pg = parse_phrasegroup(parent, phrasegroup_trees[name], operands,
args=args)
elif name in user_phrasegroup_trees :
pg = parse_phrasegroup(parent, user_phrasegroup_trees[name],
operands, args=args)
elif name in phrasegroups :
if args is not None :
pg = phrasegroups[name](parent, **args)
else :
pg = phrasegroups[name](parent)
else :
raise(RuntimeError('Required XML PhraseGroup not loaded! '+name))
if operands is not None and pg.am('phrasegroup') :
missing_left = True
missing_right = True
if pg.lhs_target is not None and len(operands) > 0 and operands[0] is not None :
operands[0].orphan()
pg.set_lhs(operands[0])
missing_left = False
if pg.rhs_target is not None and len(operands) > 1 and operands[1] is not None :
operands[1].orphan()
pg.set_rhs(operands[1])
missing_right = False
if missing_left and pg.lhs_target is not None :
pg.set_recommending(pg[pg.lhs_target])
elif missing_right and pg.rhs_target is not None :
pg.set_recommending(pg[pg.rhs_target])
if properties is not None :
for p in properties : pg.set_p(p, properties[p])
#debug_print(pg.format_me())
#debug_print(pg.format_entities())
#if len(pg.entities) > 0 :
# debug_print(pg.get_entities()[0].format_entities())
return pg
def make_phrasegroup_by_filename(parent, filename, operands = None, properties = None) :
tree = ET.parse(filename)
pg = parse_phrasegroup(parent, tree, operands)
if properties is not None :
for p in properties : pg.set_p(p, properties[p])
return pg
#def parse_phrasegroups() :
# defs_files = os.listdir(get_share_location() + 'defs')
# with open(get_share_location() + "defs/order.txt") as order :
# lines = order.readlines()
# for line in lines :
# filename = get_share_location() + "defs/" + line.strip() + ".xml"
# tree = ET.parse(filename)
# add_phrasegroup_tree(tree.getroot().tag.lower(), tree, user=False, latex=tree.getroot().get('latex_name'))
# for line in defs_files :
# line = line.strip()
# if line[-3:] != "xml" : continue
# filename = get_share_location() + "defs/" + line
# try :
# tree = ET.parse(filename)
# except ET.XMLSyntaxError as e :
# debug_print("Couldn't parse : "+str(filename) + " : " + str(e))
# continue
# name = tree.getroot().tag.lower()
# if name in phrasegroup_trees : continue
# add_phrasegroup_tree(name, tree, user=False, latex=tree.getroot().get('latex_name'))
def parse_phrasegroup(parent, tree, ops = None, top = True, args=None) :
root = tree.getroot()
ty = root.get('type')
operands = None
recommending = None
if root.tag == 'main_phrase' or (ty is not None and ty == 'main_phrase') :
ents = root.find('entities')
if ents is None or len(ents) != 1 :
return None
root = ents[0]
names = {}
targets = {}
operands = {}
recommending = []
lead = []
add_entities = {}
new_phrasegroup = parse_element(parent, root, names, targets, operands,
recommending, lead, add_entities, top=top,
args=args)
for name in add_entities :
pairs = add_entities[name]
for pg, prop in pairs :
pg.set_p(prop, names[name])
for t in targets :
new_phrasegroup.add_target(targets[t], t)
if len(lead) > 0 :
alt = lead[len(lead)-1][1]
new_phrasegroup.set_lead(lead[len(lead)-1][0], GLYPHER_PG_LEAD_BOTH if alt=='a' else\
GLYPHER_PG_LEAD_HORI if alt=='h' else\
GLYPHER_PG_LEAD_VERT)
if top and new_phrasegroup.am('phrasegroup') :
new_phrasegroup.mes.append(root.tag)
new_phrasegroup.set_default_entity_xml()
if operands is None :
operands = {}
n = 0
if new_phrasegroup.lhs_target is not None :
operands[n] = new_phrasegroup[new_phrasegroup.lhs_target]
n += 1
if new_phrasegroup.rhs_target is not None :
operands[n] = new_phrasegroup[new_phrasegroup.rhs_target]
n += 1
if recommending is None :
recommending = [new_phrasegroup.get_recommending()]
if ops is not None :
for o in operands :
n = int(o)
if len(ops) > n and ops[n] is not None :
ops[n].orphan()
operands[o].adopt(ops[n])
if len(recommending) > 0 :
new_phrasegroup.set_recommending(recommending[len(recommending)-1])
return new_phrasegroup
def parse_element(parent, root, names, targets, operands, recommending, lead,
add_entities, am = None, top = True, args=None) :
new_phrasegroup = am
if new_phrasegroup is None :
tag = root.tag.lower()
ty = root.get('type')
#if ty is not None :
# ty = ty.lower()
default_args = {}
num_ops = root.get('num_ops')
if num_ops is not None :
default_args = {'num_ops' : int(num_ops) }
if len(default_args) == 0 :
default_args = None
if args is not None :
default_args.update(args)
args = default_args
if ty in g.parse_element_fns :
new_phrasegroup = g.parse_element_fns[root.get('type')](new_phrasegroup,
root, names, targets, operands,
recommending, lead, add_entities, top=False,
args=args)
elif tag == 'word' or ty == 'word' :
new_phrasegroup = Word.GlypherWord(parent)
elif tag == 'phrase' or ty == 'phrase' :
new_phrasegroup = GlypherPhrase(parent)
elif tag == 'script' or ty == 'script' :
available = [(site_av=='True') for site_av in \
root.get('available').split(",")]
new_phrasegroup = g.phrasegroups['script'](parent, available=available)
elif tag == 'space' or ty == 'space' :
dims = (float(root.get('width')), float(root.get('height')))
new_phrasegroup = GlypherSpace(parent, dims=dims)
elif tag == 'verticalspacer' or ty == 'verticalspacer' :
tied_to = names[root.get('tied_to')]
scaling = float(root.get('scaling'))
sutract_other_children = root.get('subtract_other_children') =='True'
new_phrasegroup = GlypherVerticalSpacer(parent, tied_to=tied_to,
scaling=scaling,
subtract_other_children=True)
elif tag == 'mirror' or ty == 'mirror' :
tied_to = names[root.get('tied_to')]
new_phrasegroup = Mirror.make_mirror(parent, tied_to)
else :
if ty == 'phrase' :
new_phrasegroup = GlypherPhrase(parent)
elif ty in Word.constants :
new_phrasegroup = Word.constants[ty](parent)
elif ty in Word.units :
new_phrasegroup = Word.units[ty](parent)
elif ty in entities :
new_phrasegroup = entities[ty](parent)
elif ty in user_phrasegroup_trees :
new_phrasegroup = make_phrasegroup(parent, ty, args=args)
elif ty in phrasegroups and ty != 'phrasegroup':
if args is not None :
new_phrasegroup = phrasegroups[ty](parent, **args)
else :
new_phrasegroup = phrasegroups[ty](parent)
elif top or ty == 'phrasegroup' or ty is None :
new_phrasegroup = make_phrasegroup(parent, 'phrasegroup', args=args)
if root.get('lhs') is not None :
new_phrasegroup.lhs_target = root.get('lhs')
if root.get('rhs') is not None :
new_phrasegroup.rhs_target = root.get('rhs')
elif ty in phrasegroup_trees :
new_phrasegroup = make_phrasegroup(parent, ty, args=args)
else :
raise(RuntimeError("Couldn't find a class of type "+str(ty)+" for "+str(tag)))
if ty is not None :
new_phrasegroup.set_name(tag)
if root.get('name') is not None : new_phrasegroup.set_name(root.get('name'))
if new_phrasegroup.get_name() is not None : names[new_phrasegroup.get_name()] = new_phrasegroup
els = root.find("entities")
if els is not None :
for e in els :
pg_els = filter(lambda pe : pe.get_name() == e.tag,
new_phrasegroup.entities)
if len(pg_els) > 0 :
el = pg_els[0]
else :
el = parse_element(new_phrasegroup, e, names, targets, operands,
recommending, lead, add_entities, top=False)
if e.get('recommending') == 'True' : recommending.append(el)
if e.get('target') is not None :
targets[e.get('target')] = el
el.name = e.get('target')
if e.get('operand') is not None : operands[e.get('operand')] = el
if e.get('size_scaling') is not None : el.set_size_scaling(float(e.get('size_scaling')))
if e.get('lead') is not None :
lead.append((el, e.get('lead')))
row = int(e.get('row')) if e.get('row') is not None else 0
col = int(e.get('col')) if e.get('col') is not None else 0
el.recalc_bbox()
if el not in pg_els :
new_phrasegroup.append(el, row=row, col=col)
if e.get('padding') is not None :
p = string.split(e.get('padding'), ' ')
for i in range(0,4) :
el.set_padding(i, float(p[i]))
if hasattr(el, 'after_append') :
for a in el.after_append : a()
tgts = root.find("targets")
if tgts is not None :
for t in tgts :
name = t.get('name')
phr = targets[name] if top else new_phrasegroup.get_target(name)
parse_element(new_phrasegroup, t, names, targets, operands,
recommending, lead, add_entities, am=phr, top=False)
props = root.find("properties")
if props is not None:
for p in props :
name = p.get('name')
value = p.get('value')
ty = p.get('type')
if ty == 'tuple' :
entries = p.findall('ti')
tup = []
for e in entries :
tup.append(make_val(e.get('value'), e.get('type')))
val = tuple(tup)
else :
val = make_val(value, ty)
if ty == 'entity' :
if value not in add_entities :
add_entities[val] = []
add_entities[val].append((new_phrasegroup, name))
# We don't want to set this just yet.
continue
new_phrasegroup.set_p(name, val)
props = root.find("inherited_properties_overrides")
if props is not None :
for p in props :
name = p.get('name')
value = p.get('value')
ty = p.get('type')
if ty == 'tuple' :
entries = p.findall('ti')
tup = []
for e in entries : tup.append(make_val(e.get('value'), e.get('type')))
val = tuple(tup)
else :
val = make_val(value, ty)
new_phrasegroup.set_ip(name, val)
scaling = root.get('scaling')
if scaling is not None : new_phrasegroup.set_size_scaling(float(scaling))
acts = root.find('actions')
new_phrasegroup.after_append = []
if acts is not None :
for a in acts :
if a.tag == 'translate' :
new_phrasegroup.after_append.append(lambda :\
new_phrasegroup.translate(float(a.get('h'))*new_phrasegroup.get_scaled_font_size(),\
float(a.get('v'))*new_phrasegroup.get_scaled_font_size(), quiet=True))
rows = root.find('rows')
if rows is not None :
for r in rows :
n = int(r.get('id'))
if r.get('offset') is not None : new_phrasegroup.row_offsets[n] = float(r.get('offset')); new_phrasegroup.recalc_bbox()
if r.get('align') is not None : new_phrasegroup.set_row_align(n, r.get('align'))
sympy_code = root.find("sympy")
#FIXME: it strikes me that this isn't very safe
if sympy_code is not None and new_phrasegroup.get_sympy_code is None :
new_phrasegroup.get_sympy_code = compile(sympy_code.text.strip(), '<string>', 'eval')
do_info_parsing(new_phrasegroup, root, top=top)
return new_phrasegroup
def do_info_parsing(new_phrasegroup, root, top=False) :
string_code = root.find("string")
#FIXME: it strikes me that this isn't very safe
if string_code is not None :
new_phrasegroup.to_string_code = compile(string_code.text.strip(), '<string>', 'eval')
latex_code = root.find("latex")
#FIXME: it strikes me that this isn't very safe
if latex_code is not None :
new_phrasegroup.to_latex_code = compile(latex_code.text.strip(), '<string>', 'eval')
info_text = root.find("info")
if info_text is not None :
if top :
new_phrasegroup.indicate_info = True
new_phrasegroup.set_info_text(info_text.text.strip())
title_text = root.get("title")
if title_text is not None :
new_phrasegroup.set_title(title_text)
wiki_link = root.find("wiki")
if wiki_link is not None :
new_phrasegroup.set_wiki_link(wiki_link.text.strip())
alternatives_cat = root.find("alternatives")
if alternatives_cat is not None :
new_phrasegroup.alternatives_cat = alternatives_cat.text
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Parser.py
|
Parser.py
|
import cairo
def draw_inverse_blush(cr, length, height, colour=(0,0,0), thickness=5) :
cr.save()
cr.rel_line_to(length, 0)
cr.rel_line_to(0, height)
cr.rel_line_to(-length, 0)
cr.rel_line_to(0, -height)
cr.close_path()
fe = cr.fill_extents()
blush_grad = cairo.LinearGradient(fe[0]+length/2,fe[1],fe[0]+length/2,fe[3])
colour = list(colour)+[0]
blush_grad.add_color_stop_rgba(0.4, *colour); colour[3] = 1
blush_grad.add_color_stop_rgba(0, *colour)
cr.set_source(blush_grad)
cr.fill()
cr.move_to(fe[0],fe[1])
cr.rel_line_to(0, thickness)
cr.move_to(fe[0],fe[1])
cr.rel_line_to(length, 0)
cr.rel_line_to(0, thickness)
cr.set_source_rgba(*colour)
cr.stroke()
cr.restore()
def draw_full_blush(cr, length, height, colour=(0,0,0)) :
pt = cr.get_current_point()
cr.rel_move_to(0, -height)
draw_inverse_blush (cr, length, height, colour, height/2)
cr.move_to(*pt)
draw_blush (cr, length, colour, height/2)
def draw_blush(cr, length, colour=(0,0,0), thickness=5) :
cr.save()
cr.set_source_rgb(*colour)
cr.rel_line_to(length, 0)
cr.rel_line_to(0, -thickness)
cr.rel_move_to(-length, 0)
cr.rel_line_to(0, thickness)
cr.set_line_width(2.0)
fe = cr.fill_extents()
cr.stroke()
cr.move_to(fe[0], fe[1])
cr.rel_line_to(0, thickness)
cr.rel_line_to(length, 0)
cr.rel_line_to(0, -thickness)
cr.rel_line_to(-length, 0)
cr.close_path()
blush_grad = cairo.LinearGradient(fe[0]+length/2,fe[1],fe[0]+length/2,fe[3])
colour = list(colour)+[0]
blush_grad.add_color_stop_rgba(0, *colour); colour[3] = 1
blush_grad.add_color_stop_rgba(1, *colour)
cr.set_source(blush_grad)
cr.fill()
#cr.clip()
#cr.mask(blush_grad)
cr.restore()
# By Helton Moraes (heltonbiker at gmail dot com) c/o http://www.cairographics.org/cookbook/roundedrectangles/
# (Method D)
def trace_rounded(cr, area, radius):
""" draws rectangles with rounded (circular arc) corners """
from math import pi
a,b,c,d=area
#slight change
cr.move_to(a, c + radius)
cr.arc(a + radius, c + radius, radius, 2*(pi/2), 3*(pi/2))
cr.arc(b - radius, c + radius, radius, 3*(pi/2), 4*(pi/2))
cr.arc(b - radius, d - radius, radius, 0*(pi/2), 1*(pi/2)) # ;o)
cr.arc(a + radius, d - radius, radius, 1*(pi/2), 2*(pi/2))
cr.close_path()
def draw_box(cr, colour, bb) :
cr.save()
cr.set_source_rgba(0.3, 0.2, 0, 0.5)
area=(bb[0]-2, bb[2]+10, bb[1]-2, bb[3]+10)
trace_rounded(cr, area, 5)
cr.fill()
cr.set_source_rgba(0.75+0.25*colour[0], 0.75+0.25*colour[1],
0.75+0.25*colour[2], 0.8)
#cr.rectangle(bb[0]-2, bb[1]-2, bb[2]-bb[0]+4, bb[3]-bb[1]+4)
area=(bb[0]-6, bb[2]+6, bb[1]-6, bb[3]+6)
trace_rounded(cr, area, 5)
cr.fill_preserve()
cr.set_line_width(4)
cr.set_source_rgb(*colour)
cr.stroke()
cr.restore()
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/draw.py
|
draw.py
|
import gobject
import gtk
from sympy.parsing import mathematica
import glypher as g
from aobject import aobject
from aobject.utils import debug_print
from BinaryExpression import *
from Parser import *
from Decoration import *
from Entity import *
from Phrase import *
from PhraseGroup import *
from Renderers import *
from Symbol import *
from Table import *
from Word import *
from Fraction import *
from Function import *
from References import *
from Interpret import *
from Alternatives import *
import sympy.printing as printing
# The keynames listed below all trigger binary
# expressions.
binary_keynames = [
'asterisk',
'semicolon',
'plus',
'minus',
'comma',
'slash',
'space'
]
# The following keynames are translated to their
# corresponding symbols
swap_keynames = { \
'asterisk' : u'\u00B7',\
'equal' : '=',\
'plus' : '+',\
'semicolon' : ';',\
'minus' : '-',\
'percent' : '%',\
'period' : '.',\
'parenleft' : '(',\
'space' : ' ',\
'parenright' : ')',\
'comma' : ',',\
'slash' : '/',\
'less' : '<',
'greater' : '>',
'at' : u'\u2218'
}
class GlypherCaret (gobject.GObject) :
"""Caret to control insertion and entity-wise operations"""
# We add three signals
# changed-phrased-to : phrased_to has changed; send who_am_i and list of
# ancestors or ("None", "None") if phrased_to is None
# changed-attached-to : sim. for attached_to
# content-changed : the caret has done something requiring a redraw
__gsignals__ = {
"changed-phrased-to" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_STRING,gobject.TYPE_STRING)),
"changed-attached-to" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_STRING,gobject.TYPE_STRING)),
"content-changed" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ())
}
position = None
attached_to = None
phrased_to = None
symbol_active = False
main_phrase = None
clipboard = None
container = None
glypher = None
boxes = None
selected = None
editor_mode = False
def __init__(self, main_phrase, interactive=True, container=None,
glypher=None):
"""
Arguments :
main_phrase - inside which our Caret shall live
interactive - whether this Caret behaves in a edit-friendly way
container - GTK widget to which GTK widgets may be added as overlays
glypher - GlypherEntry instance which manages this Caret
"""
gobject.GObject.__init__(self)
self.position = [0., 0.]
self.interactive = interactive
self.main_phrase = main_phrase
self.clipboard = []
self.boxes = []
self.selected = []
self.container = container
self.glypher = glypher
self.find_left()
def remove_boxes(self) :
"""Clear any overlay boxes from the drawing queue."""
for b in self.boxes :
if isinstance(b, GlypherWidgetBox) :
self.container.remove(b.gw.ebox)
self.boxes = []
def go_near(self, point, main_phrase=None, change=False) :
"""Attached to closest entity in main_phrase (changes Caret's linked
main_phrase if supplied).
"""
m = self.main_phrase if main_phrase is None else main_phrase
# Avoid attached if we must
if change :
avoid = self.attached_to
else :
avoid = None
target = m.find_nearest\
(point=point, fall_through=True, enterable_parent=True, avoid=avoid)
self.main_phrase = m
self.change_attached(target[1])
def is_inside(self, phr) :
"""Returns bool indicating whether the Caret is, at some level, inside
phrase phr.
"""
return phr in self.phrased_to.get_ancestors()
def process_key_release(self, keyname, event) :
"""
Handles a key release event.
Should only be called by a GlyphEntry.
"""
mask = event.state
m_control = bool(mask & gtk.gdk.CONTROL_MASK)
m_shift = bool(mask & gtk.gdk.SHIFT_MASK)
m_alt = bool(mask & gtk.gdk.MOD1_MASK)
m_super = bool(mask & gtk.gdk.SUPER_MASK)
if keyname == 'Alt_L' or keyname == 'Alt_R' :
self.set_active(False)
# Select something
elif not m_alt and not m_super and \
keyname == 'Shift_L' :
if self.prep_selected and self.attached_to != None and \
self.attached_to.am('phrase') :
if m_control :
self.add_selected(self.attached_to)
else :
self.set_selected(self.attached_to)
prep_selected = False
def find_end(self) :
"""Find the end of the main_phrase."""
# As default if we can't find a suitable location, detach
self.change_attached(None)
self.adopt_phrase(None)
ents = self.main_phrase.entities
# Choose A & P as theoretical end-points and find the first leftward
# valid position
if len(ents) > 0 :
A = ents[len(ents)-1]
else :
A = self.main_phrase
P = self.main_phrase
self.find_in_dir(left=True, A=A, P=P)
def process_key_press(self, keyname, event) :
"""Handles a key press event. Should only be called by a GlyphEntry."""
#FIXME: rewrite this for XML key definitions
mask = event.state
m_control = bool(mask & gtk.gdk.CONTROL_MASK)
m_shift = bool(mask & gtk.gdk.SHIFT_MASK)
m_alt = bool(mask & gtk.gdk.MOD1_MASK)
m_clear = not (m_control or m_shift)
m_ordinary = not m_alt and not m_control
m_super = bool(mask & gtk.gdk.SUPER_MASK)
self.prep_selected = False
# Literal key entry; no parsing, just insert as given
if m_control and m_super :
kv = gtk.gdk.keyval_from_name(keyname)
uc = unichr(gtk.gdk.keyval_to_unicode(kv))
debug_print((uc,kv))
if uc == u'\u0000' :
return
self.insert_shape(uc)
return True
# If the attached entity wants us to give it supered keys,
# instead of dealing with them ourselves, do so
if m_super or (self.attached_to != None and \
self.attached_to.get_p('override_keys')) :
debug_print('entity handling '+keyname)
if self.attached_to != None :
ret = self.attached_to.process_key(keyname, event, self)
elif self.phrased_to != None :
ret = self.phrased_to.process_key(keyname, event, self)
self.try_suggestion()
return True
if m_ordinary :
# First make sure this isn't a combination for something
# If we're attached to the inside of a phrase, it's intuitive to
# keep looking upwards, if we're attached to an entity, it's not.
if self.attached_to and \
self.attached_to.check_combination(\
swap_keynames[keyname] if keyname in swap_keynames else\
keyname,
go_up=self.attached_to==self.phrased_to) :
return True
# Try the loaded named entities
entity_name = try_key_press(event)
if entity_name is not None :
self.insert_named(entity_name)
# Then try directional instructions
elif m_clear and keyname == 'Home' and self.phrased_to is not None :
ancs = self.phrased_to.get_ancestors()
self.enter_phrase(ancs[-1], at_start=True)
elif m_clear and keyname == 'End' and self.phrased_to is not None :
self.find_end()
# Select something
elif not m_alt and not m_super and \
keyname == 'Shift_L' :
self.prep_selected = True
# Clear selection
elif m_clear and keyname == 'Escape' :
self.clear_selection()
self.remove_boxes()
self.glypher.main_phrase.clear_all_errors()
# - (at front of phrase) - negate
elif m_clear and keyname == 'minus' and \
self.attached_to == self.phrased_to :
debug_print(self.attached_to.format_me())
self.insert_named('negative')
# [prime] - do prime differentiation
elif m_clear and keyname == 'apostrophe' :
self.insert_named('prime')
# Ctrl+[altkey] - Word has some characters corresponding to Word-groups
elif m_control and not m_alt and keyname in Word.alternatives_keys :
debug_print(keyname)
alts_index = Word.alternatives_keys[keyname]
# Prepare default package
alts_current_defaults = Word.alternatives_current_defaults
if alts_index not in alts_current_defaults :
alts_current_defaults[alts_index] = 0
self.insert_named(Word.alternatives[alts_index]\
[alts_current_defaults[alts_index]])
# Ctrl+Alt+p - insert pi
elif m_control and not m_alt and keyname == 'p' :
self.insert_named('pi')
# Alt+( - open a matrix
elif not m_control and m_alt and keyname == 'parenleft' :
self.insert_named('matrix')
# ( - show no-user-parentheses tip
elif m_ordinary and keyname == 'parenleft' :
self.glypher.do_bracket_warning()
elif m_ordinary and keyname == 'question' :
if self.attached_to is not None :
self.glypher.do_info(self.attached_to)
# Ctrl+y - paste
elif m_control and not m_alt and not m_super and keyname == 'y' :
debug_print('x')
self.paste()
# Ctrl+( - insert function (unless named)
elif m_control and keyname == 'parenleft' :
cur = str(self.phrased_to.to_string())
debug_print(cur)
if self.phrased_to.am('word') and Parser.is_named(cur) :
a, b = self.exit_phrase()
debug_print(cur)
if a is not None :
self.change_attached(a.get_parent())
a.orphan()
if b is not None :
self.change_attached(b.get_parent())
b.orphan()
self.insert_named(cur)
self.try_suggestion()
else :
self.insert_named('function')
# ^ - superscript
elif keyname == 'asciicircum' :
self.superscript_mode()
# _ - subscript
elif keyname == 'underscore' :
self.subscript_mode()
# Switch alternatives up/down
elif m_alt and self.symbol_active and keyname == 'Up' :
if self.symbol_active :
self.next_alternative()
self.set_active(True)
else :
self.jump_row(up=True)
elif m_alt and self.symbol_active and keyname == 'Down':
if self.symbol_active :
self.prev_alternative()
self.set_active(True)
else :
self.jump_row(up=False)
# Ctrl+: - open range reference (to spreadsheet)
elif keyname == 'colon' and m_control :
ref = self.insert_named('range_reference')
# Backspace - delete current shape (usu to left)
elif m_clear and keyname == 'BackSpace' :
self.delete_shape()
# Backspace - delete rightward shape
elif m_clear and keyname == 'Delete' :
self.delete_shape_right()
# Ctrl+Right - jump right
elif m_control and not m_alt and keyname == 'Right' :
self.find_right(gobbet_mode=True)
# Right - walk right
elif m_clear and keyname == 'Right' :
self.find_right()
# Ctrl+Left - jump left
elif m_control and not m_alt and keyname == 'Left' :
self.find_left(gobbet_mode=True)
# Left - walk left
elif m_clear and keyname == 'Left' :
self.find_left()
# Down - jump down a row
elif m_clear and keyname == 'Down' :
self.jump_row(up=False)
# Up - jump up a row
elif m_clear and keyname == 'Up' :
self.jump_row(up=True)
# Tab - change attached alternative
elif m_ordinary and keyname == 'Tab':
self.set_active(True)
if self.attached_to :
self.prev_alternative() if m_shift \
else self.next_alternative()
self.set_active(False)
# FIXME : surely one of this and the previous is redundant?
elif m_ordinary and keyname == 'ISO_Left_Tab':
self.set_active(True)
if self.attached_to and self.attached_to.get_have_alternatives() :
self.next_alternative() if m_shift \
else self.prev_alternative()
self.set_active(False)
# Return - down a row
elif m_clear and keyname == 'Return':
self.jump_row(up=False)
# # - insert a shortcut combination
elif keyname == 'numbersign' :
w = GlypherSymbolShortener(widget_parent=self.container, caret=self)
self.boxes.append(w)
debug_print(w.parent)
# \ - insert an entity/phrasegroup by TeX-style name
elif keyname == 'backslash' :
w = GlypherTeXEntry(widget_parent=self.container, caret=self)
self.boxes.append(w)
debug_print(w.parent)
# $ - insert a Word with verbatim text-entry text
elif keyname == 'dollar' :
w = GlypherEntry(widget_parent=self.container, caret=self)
self.boxes.append(w)
debug_print(w.parent)
# : - insert a colon as part of a Word
elif m_ordinary and keyname == 'colon' :
self.insert_shape(':', no_split=m_super)
# [char] - insert len-1 char (use super to avoid any braking)
elif m_ordinary and len(keyname) == 1 :
self.insert_shape(keyname, no_split=m_super)
# [binary key] - open a binary (or nary) expression
elif m_ordinary and keyname in binary_keynames :
shape = keyname
if keyname in swap_keynames :
shape = swap_keynames[shape]
self.binary_expression_mode(shape)
# [swap keyname] - insert specific shape for this symbol
elif m_ordinary and keyname in swap_keynames :
self.insert_shape(swap_keynames[keyname])
# Right Alt - show alternatives
elif not m_control and keyname == 'Alt_R' :
self.set_active(True)
# Give up (and admit it)
else :
return False
# Make sure caret in the right position
self.reset()
# Tell caller we found a combo
return True
def return_focus(self) :
"""Bring the GTK focus back to the linked GlyphEntry widget."""
if self.glypher :
self.glypher.grab_focus()
#FIXME: stupid argument style
def grab_an_entity(self, want_rhs=True, leave_lhs=False, orphan=True) :
"""
Detach entries on the left and right (if available) and split a word if
necessary.
Returns a pair representing entities on LHS and RHS of Caret, with None
provided where appropriate.
Keyword arguments :
want_rhs - should take RHS? (default False)
leave_lhs - should take LHS? (default False)
"""
lhs = None
parent = None
rhs = None
# Split the situation into one of three manageable possibilites (in
# order of precedence) :
# a) we are attached to a phrase (at the end) and are expected to take
# the LHS,
# b) we are attached to a non-empty phrase (at the beginning) and want
# the RHS,
# c) we are phrased to a word.
if self.attached_to is not None and self.attached_to.am('phrase') and \
self.attached_to != self.phrased_to and not leave_lhs:
lhs = self.attached_to
parent = lhs.get_up()
if orphan :
lhs.orphan()
elif want_rhs and \
self.attached_to is not None and \
self.attached_to.am('phrase') and \
self.attached_to == self.phrased_to and \
len(self.phrased_to.get_entities()) > 0 :
# word exemption allows us to avoid differentiating between the
# identical visual positions of being attached to the front of a
# word and attached to the front of a phrase beginning with a word.
if not self.phrased_to.am('word') :
rhs = self.phrased_to.get_entities()[0]
else :
rhs = self.phrased_to
# Preempt the potential disappearance of this, attached, entity.
# Ultimately breaks if we can't move off this attached_to, i.e.
# attach to anything leftwards
self.find_left()
if orphan :
rhs.orphan()
elif self.phrased_to is not None and self.phrased_to.am('word') :
# Break the word.
lhs, rhs = self.exit_phrase()
# If we didn't find anything on the left and have a redundant
# word-part on the right, use it.
if lhs is None and not want_rhs :
lhs = rhs
if orphan and lhs is not None and not leave_lhs :
lhs.orphan()
if orphan and want_rhs and rhs is not None :
rhs.orphan()
#if lhs and parent is not None :
# self.find_left()
return (lhs, rhs)
def _try_binary_expression(self, name, make_me, symbol = None) :
"""
Actually make the binary expression requested by binary_expression_mode.
"""
# Tells us where we have to end up inside
stopped_by = None
ancs = self.phrased_to.get_ancestors()
found_at_level = -1
# Search upwards for a binary expression of the same type. Stop
# searching as soon as we hit any kind of PhraseGroup that is not
# designated as a stop point for this type of expression.
for i in range(0, len(ancs)) :
ancestor = ancs[i]
if ancestor.am(name) and (symbol is None or \
ancestor.get_symbol_shape() == symbol):
found_at_level = i
bin_exp = ancs[i]
break
elif ancestor.stop_for_binary_expression(name) :
stopped_by = ancestor
break
# If we did not find such an instance of the binary expression, or it is
# using the wrong symbol, we either just make a new one where we stand
# or, if we have been stopped by a higher level PG (e.g. space array)
# and this type of expression gravitates upward (e.g. equality), we use
# the current arg.
# If we did find another instance, find out which argument we're inside
# and add a new one after it.
#FIXME: implement gravitation rather than relying on stop_..._exceptions
if found_at_level == -1 :
if stopped_by is not None and \
symbol in binary_expression_properties_for_symbol and \
binary_expression_properties_for_symbol[symbol]['gravity'] :
outside = self.attached_to != self.phrased_to
for pos in stopped_by.get_entities() :
if pos.child_active :
if len(pos.IN()) > 0 :
debug_print(pos.IN().get_entities()[0].format_me())
self.change_attached(pos.IN().get_entities()[0],
outside=outside)
break
lhs, rhs = self.grab_an_entity(want_rhs=True)
bin_exp = make_me(l=lhs, r=rhs)
self.new_phrase(bin_exp, enter=False)
return bin_exp
else :
# See BinaryExpressions.py for info on BE.poss list; essentially it
# is the outermost phrase of an operand.
pos = ancs[i-1]
for r in bin_exp.poss.iterkeys() :
if bin_exp.poss[r] == pos :
ret = bin_exp.add_operand(after=r)
lhs, rhs = self.grab_an_entity(want_rhs=True,leave_lhs=True)
if rhs :
ret.adopt(rhs)
bin_exp.set_recommending(bin_exp.poss[r])
else :
bin_exp.set_recommending(ret.IN())
return bin_exp
def binary_expression_mode(self, operator_shape) :
"""Create, or insert an additional operand into, a binary expression of
this shape.
Arguments :
operator_shape - string to be used for interstitial symbol.
"""
# Unless we have a BinaryExpression (BE) that doesn't take multiple
if operator_shape == '/' :
lhs, rhs = self.grab_an_entity()
bie = GlypherSideFraction(self.phrased_to, lhs=lhs, rhs=rhs)
self.new_phrase(bie)
else :
# if we have a specific LHS/RHS constructor we wish to use, pass it.
symbol_for_try_func = None
if operator_shape == '+' :
name='add'
make_me = lambda l,r : GlypherAdd(self.phrased_to, lhs=l, rhs=r)
elif operator_shape == u'\u00B7' :
name='mul'
make_me = lambda l,r : GlypherMul(self.phrased_to, lhs=l, rhs=r)
elif operator_shape == '-' :
name=None
make_me = lambda l,r : \
GlypherAdd(self.phrased_to, lhs=l, rhs=r, subtract=True)
elif operator_shape == ' ' :
name='space_array'
make_me = lambda l,r : \
GlypherSpaceArray(self.phrased_to, lhs=l, rhs=r)
elif operator_shape == ';' :
name='semicolon_array'
make_me = lambda l,r : \
GlypherSemicolonArray(self.phrased_to, lhs=l, rhs=r)
elif operator_shape == ',' :
name='comma_array'
make_me = lambda l,r : \
GlypherCommaArray(self.phrased_to, lhs=l, rhs=r)
else :
name='binary_expression'
symbol_for_try_func=operator_shape
make_me = lambda l,r : \
GlypherBinaryExpression(self.phrased_to,
GlypherSymbol(self.phrased_to,
operator_shape),
lhs=l, rhs=r)
self._try_binary_expression(name, make_me,
symbol=symbol_for_try_func)
# Find any recommended site
self.try_suggestion()
def try_suggestion(self) :
"""See whether the main_phrase has been given a suggested target for
when control returns to the Caret."""
where_to = self.main_phrase.get_recommending()
# If we have a recommendation: if its an enterable phrase, then enter
# it, otherwise, attach to it. If not: move rightward.
if where_to is not None :
if where_to.am('phrase') and where_to.IN().is_enterable() :
self.enter_phrase(where_to.IN(), at_end=True)
else : self.change_attached(where_to)
else : self.find_right()
def superscript_mode(self) :
'''Adds superscript (a specified setup of Script)'''
base, wing = self.grab_an_entity()
superscript = GlypherScript(self.phrased_to, expression=base,
available=(False, False, True, False))
self.new_phrase(superscript, enter=False)
superscript["site2"].adopt(wing)
self.try_suggestion()
def subscript_mode(self) :
'''Adds subscript (a specified setup of Script)'''
base, wing = self.grab_an_entity()
subscript = GlypherScript(self.phrased_to, expression=base,
available=(True, False, False, False))
subscript["site0"].adopt(wing)
self.new_phrase(subscript, enter=False)
self.try_suggestion()
def insert_named(self, name, properties = None) :
"""Insert a PhraseGroup or entity by name.
Arguments :
name - string name of entity
properties - additional properties to be applied on loading (default
None)
"""
n = is_named(name)
if n == GLYPHER_IS_ENTITY :
return self.insert_named_entity(name, properties)
elif n == GLYPHER_IS_PG :
return self.insert_phrasegroup(name, properties)
else :
raise(RuntimeError(\
name + " is neither a (loaded) named entity nor a PhraseGroup"))
def insert_formula(self, name) :
"""Insert a formula by name.
Arguments :
name - string name of formula
"""
formula = make_formula(self.phrased_to, name)
self.insert_entity(formula)
self.try_suggestion()
self.emit('content-changed')
return formula
def insert_phrasegroup(self, name, properties=None, grab_entities=True) :
"""Insert a PhraseGroup by name.
Arguments :
name - string name of PhraseGroup
properties - properties to be applied on loading (default None)
grab_entities - uses grab_an_entity() to fill targets (default True)
"""
lhs = None
rhs = None
if name in binary_expression_names :
make_me = lambda l,r : \
make_phrasegroup(self.phrased_to, name, (l, r),
properties=properties)
phrasegroup = self._try_binary_expression(name, make_me,
symbol=binary_expression_names[name])
else :
if grab_entities :
lhs, rhs = self.grab_an_entity(orphan=False)
phrasegroup = make_phrasegroup(self.phrased_to, name, [lhs, rhs],
properties=properties)
self.insert_entity(phrasegroup)
self.try_suggestion()
self.emit('content-changed')
return phrasegroup
def new_word(self) :
"""Add a new word into the current phrase."""
if not self.phrased_to : return
self.phrased_to.set_active(False)
word = GlypherWord(self.phrased_to)
if len(self.phrased_to) > 0 and not self.editor_mode :
p = self.phrased_to
mult = GlypherMul(p)
p.elevate_entities(mult['pos0'])
mult['pos2'].adopt(word)
p.adopt(mult)
else :
self.phrased_to.IN().append(word, after=self.attached_to)
self.phrased_to = word
self.phrased_to.set_active(True)
self.try_suggestion()
def new_phrase(self, parent, enter = True) :
"""Add a new phrase (parent) into the current phrase."""
# Break a word first if we're in the middle of one. We don't want to add
# a phrase into it.
if self.phrased_to :
self.phrased_to.set_active(False)
if self.phrased_to.am('word') :
lhs, rhs = self.exit_phrase()
if lhs is not None :
self.change_attached(lhs)
# If the new phrase has been given as None, create a vanilla Phrase.
if parent is None :
parent = GlypherPhrase(self.phrased_to)
# Add the phrase
if self.phrased_to :
self.phrased_to.IN().append(parent, after=self.attached_to)
if enter :
self.enter_phrase(parent)
return parent
def exit_phrase(self) :
"""Move up a level, splitting a word if necessary to stay where we
are. Returns whatever is on each side (e.g. (None, GlypherPhrase))."""
if not self.phrased_to or not self.phrased_to.get_up() :
return (None, None)
self.phrased_to.set_active(False)
parent = self.phrased_to.get_up()
# Start by assuming we're at beginning or end
if self.phrased_to==self.attached_to :
sides = (None, self.phrased_to)
else :
sides = (self.phrased_to, None)
# Check whether we are in the middle of a word. We use
# get_caret_position to ensure that we have the global coordinates of
# the word's bbox.
word = self.phrased_to.OUT()
word_left, word_right = word.get_symbol_extents()
word_left = word.get_caret_position(pos=(word_left, 0))[0]
word_right = word.get_caret_position(pos=(word_right, 0))[0]
if self.phrased_to.am('word') and \
self.position[0] < word_right and self.position[0] > word_left :
# Recognize that parent is going to be a level higher now
parent = word.get_up()
# Find position of Caret relative to word before its bbox changes
wpos = self.position[0]-\
(word.config[0].bbox[0]+word.get_local_offset()[0])
# Orphan the word
word.delete()
# Use convenient GlypherWord method to generate two words
words = word.split(wpos)
# Add the words into our parent
map(parent.IN().append, words)
sides = tuple(words)
self.enter_phrase(parent, upward=True, fall_through=False)
return sides
def insert_named_entity(self, name, properties=None) :
"""Add a named entity (not PhraseGroup) to the current phrase.
Arguments :
name - string name of entity
properties - properties to be applied after loading (default None)
"""
ent = make_phrasegroup(self.phrased_to, name, operands=None,
properties=properties)
self.insert_entity(ent)
return ent
def insert_entity(self, entity) :
"""Insert a GlypherEntity at the Caret."""
if entity is None :
return
p = self.phrased_to.IN()
# Make sure we're not putting a non-symbol into a word
if p.am('word') and not entity.am('symbol') :
l = self.exit_phrase()
self.change_attached(l[0])
p = self.phrased_to
op = []
if l[0] is not None :
l[0].orphan()
op.append(l[0])
op.append(entity)
if l[1] is not None :
l[1].orphan()
op.append(l[1])
mult = GlypherMul(p)
p.adopt(mult)
mult.set_lhs(op[0])
mult.set_rhs(op[1])
if len(op) > 2 :
mult.add_operand()
mult['pos4'].adopt(op[2])
self.change_attached(entity)
elif not p.am('word') and len(p) > 0 and not self.editor_mode :
mult = GlypherMul(p)
p.elevate_entities(mult['pos0'])
mult['pos2'].adopt(entity)
p.adopt(mult)
self.change_attached(entity)
else :
self.phrased_to.IN().append(entity, after=self.attached_to)
self.try_suggestion()
self.emit('content-changed')
def insert_shape(self, shape, code = None, no_split = False) :
"""Insert a shape (string, usu. character) at the Caret."""
# Ensure that we have a word to which to add the new symbol.
if not self.phrased_to.am('word') :
self.new_word()
self.reset()
# Create symbol
symbol = GlypherSymbol(self.phrased_to, shape, code=code, text=shape)
self.insert_entity(symbol)
def delete_shape(self) :
"""Delete the entity to which the Caret is attached."""
if self.attached_to == None :
return
# Make a note of the target entity
to_del = self.attached_to
# Find another entity before we remove the current one (assumes
# possible! However, we should always be in some enterable undeletable
# phrase, if a few levels removed)
self.find_left()
to_del_ancs = list(to_del.get_ancestors())
parent = to_del.delete()
# If we took the last symbol out of a word, delete it
#FIXME: could this be handled by deletable=3 and new method in Word?
if to_del.am('symbol') and parent.am('word') and \
len(parent.get_entities())==0 :
parent = parent.delete()
# If we've lost the main_phrase, i.e. been orphaned at some level up
# then continue back through to_del's (previous) ancestors until we find
# one that hasn't been orphaned. If the entity we're attached to has
# been moved, make sure we're phrased in the right place.
if self.attached_to.get_main_phrase() != self.main_phrase :
for anc in to_del_ancs :
if anc.get_main_phrase() == self.main_phrase :
A = anc
break
# Suppose we're attached to the end of ancestor A, unless it doesn't
# have a parent (so, by the selection process, must be
# self.main_phrase).
P = A if A.parent is None else A.parent
self.change_attached(A)
# While we've no idea whether A, P are attachable or enterable,
# resp., we can use find_in_dir to ask "if we were attached to A,
# and phrased to P, and what is the first valid Caret position to
# the left?".
A, P = self.find_in_dir(left=True, A=A, P=P)
else :
# Make sure that, if our attached_to has moved, we are correctly
# phrased by reattaching. If we can no longer attach to it, find
# left to the nearest valid point.
if not self.change_attached(self.attached_to) :
self.find_in_dir(left=True, A=self.attached_to,
P=self.attached_to.get_up())
def delete_shape_right(self) :
"""Run delete_shape for the entity to the Caret's right."""
self.find_right()
self.reset()
self.delete_shape()
def delete_from_shape(self) :
"""Delete from end of main_phrase to current shape."""
cur = self.position
ancs = self.phrased_to.get_ancestors()
self.enter_phrase(ancs[len(ancs)-1], at_end=True)
while (self.position > cur) :
self.delete_shape()
def delete_to_shape(self) :
"""Delete from beginning to current shape."""
start = self.phrased_to.get_caret_position(True)
while (self.position > start) :
self.find_right()
oldpos = self.attached_to
self.find_left()
self.delete_shape()
self.change_attached(oldpos)
self.find_left()
self.reset()
def set_active(self, active) :
"""Set the current active state of the attached entity. Usually relevant
for Alternatives."""
if self.attached_to :
self.attached_to.active = active
self.symbol_active = \
active and self.attached_to and \
self.attached_to.get_have_alternatives()
def change_attached(self, new_att, outside=False) :
"""Change the attached entity to new_att, if possible. Returns True if
successful."""
# Default return value
ret = False
# Make sure that we aren't in an active state
self.set_active(False)
# Inform currently attached entity that its services are no longer
# required
if self.attached_to is not None :
self.attached_to.set_attached(False)
self.attached_to = None
# Make sure we've been handed the outside iff. we aren't phrased to
# new_att (relevant for CompoundPhrases). If we are phrased to it, we
# want the inside, as we'll be attaching at the front.
if new_att is not None :
if self.phrased_to != new_att.IN() or outside :
new_att = new_att.OUT()
else :
new_att = new_att.IN()
if not new_att.is_attachable() :
return False
# Make the change
self.attached_to = new_att
new_att.set_attached(True)
# Update our return variable
ret = True
# Make sure we're in a phrase that will let us attach. If we can't
# go to the outside but we can go inside, do so.
if new_att != self.phrased_to or outside :
if new_att.included() and new_att.get_up().is_enterable() :
self.adopt_phrase(new_att.get_up())
elif new_att.am('phrase') and new_att.is_enterable() :
self.adopt_phrase(new_att)
else :
return False
# Update our Caret position
self.reset()
ancs = self.attached_to.get_ancestors() if self.attached_to else []
self.emit("changed-attached-to", self.attached_to.who_am_i(),
'-'.join([o.mes[len(o.mes)-1] for o in ancs]))
else :
self.attached_to = None
self.emit("changed-attached-to", 'None', 'None')
return ret
def reset (self) :
"""Ensure we're attached to something and update the position."""
# If we've been detached, hope that a recommendation has been left
if self.phrased_to.get_main_phrase() != self.main_phrase :
self.try_suggestion()
if self.phrased_to.get_main_phrase() != self.main_phrase :
debug_print('Lost main_phrase without a clue')
# Scan right from end if we've been orphaned
self.find_in_dir(left=False, A=self.main_phrase, P=self.main_phrase)
if self.attached_to is None :
self.change_attached(None)
# Update properties for current phrased_to (e.g. row_height but none at
# present)
# Set position from attached (dependent on whether we're attaching
# inside, or from phrased_to if, for some reason, we aren't attached but
# are phrased)
if self.attached_to != None :
self.position = self.attached_to.get_caret_position(\
self.attached_to==self.phrased_to)
else :
self.position = self.phrased_to.get_caret_position(True)
def find_left (self, gobbet_mode=False) :
"""Convenience function for find_in_dir."""
self.find_in_dir(True, gobbet_mode=gobbet_mode)
def find_right (self, gobbet_mode=False) :
"""Convenience function for find_in_dir."""
self.find_in_dir(False, gobbet_mode=gobbet_mode)
def enter_phrase(self, phrase, fall_through=True, at_start=False,
at_end=False, upward=False) :
"""
Find somewhere in the given Phrase to attach to, with the given
constraints. Returns True if it finds somewhere to go.
Arguments
phrase - phrase to go into
fall_through - whether we can drop down further levels to get
closest entity
at_start - go near start
at_end - go near end
upward - if needs be, move up levels
"""
# Where in the phrase should we aim for? Get a location, using the caret
# position
bbox = phrase.config[0].bbox
if at_start :
if phrase.is_enterable() :
self.adopt_phrase(phrase)
self.change_attached(phrase)
return True
pos = (bbox[0]-1,0.5*(bbox[1]+bbox[3]))
pos = phrase.get_caret_position(pos=pos)
elif at_end :
pos = (bbox[2]+1,0.5*(bbox[1]+bbox[3]))
pos = phrase.get_caret_position(pos=pos)
else :
pos = self.position
# Entity provides this useful method for finding the closest (nested)
# entity in a phrase
(dist, nearest) = phrase.find_nearest(pos, fall_through,
enterable_parent=True)
# If we didn't find anything at all, try upwards.
if dist == -1 :
if not upward : return False
while not phrase.get_up().is_enterable() : phrase = phrase.get_up()
nearest = phrase
phrase = phrase.get_up()
# If we can fall through, join the nearest Entity. If not, see if we can
# just jump in here.
if fall_through :
if nearest.am('phrase') and nearest.is_enterable() :
self.adopt_phrase(nearest)
else:
self.adopt_phrase(nearest.get_up())
elif phrase.is_enterable() :
self.adopt_phrase(phrase)
else :
return False
# Give ourselves an attached Entity
self.change_attached(nearest)
return True
def find_in_dir (self, left = True, dropdown = True, enact = True, A = None,
P = None, row = 0, gobbet_mode = False) :
"""
Tries to find the first Entity/Phrase pair we can attach to in a given
direction. Returns an attached_to, phrased_to pair; (None, None) if none
can be found.
Arguments :
left - True/False for left/right search
dropdown - whether we must stay in this level
enact - whether this is just a search mission or should we attach at
end
A - supposing that we start search from A; if None uses
currently attached Entity
P - supposing that we start search in P; if None uses
currently phrased Phrase
"""
# Define an integer representing direction
d = -1 if left else 1
# Use i as our index through the current Phrase
i = 0
if A is None :
A = self.attached_to
if P is None :
P = self.phrased_to
# If not currently phrased, try starting at the very end of this
# main_phrase
if P is None :
self.find_end()
P = self.phrased_to
# Otherwise, give up.
if P is None :
return (None, None)
# If we aren't attached to anything, lets hide out at the front of our
# (temporary) phrased_to
if A is None : A = P
# If we're inside the front of P, let the index be -1 and our collection
# of sites be all of those in P. Otherwise, we're at position index(A)
# in the visible Entities of P.
if P == A :
if not left :
i = -1
sites = P.get_row(row, only_visible=True)
else :
cs = A.config
attcfg = cs[len(cs)-1]
sites = P.get_row(attcfg.row, only_visible=True)
i = sites.index(attcfg)
# If we are at the start and we want to go left, or end and go right,
# then we'll need to go up. Otherwise, we will be going down or across.
if (P==A and left) or i+d == len(sites) :
p = P.OUT()
q = p.get_parent()
if q is None :
return (None, None)
# If we're going up, depending on the direction, we want to attach
# to the first or last config in the Entity (usually only one
# exists). We find out where our phrase is in that config and then
# go left/right
m = 0 if left or len(p.config)==1 else 1
sites_above = q.get_row(p.config[-m].row, only_visible=True)
index_above = sites_above.index(p.config[-m])
if left :
if index_above > 0 :
A = sites_above[index_above-1].get_entity()
else :
A = q
else :
A = p
P = q
else :
m = 0 if left else 1
cfg = sites[i+m]
# It may be that we wish to go inwards
if cfg.get_entity().am('phrase') :
new_P= cfg.get_entity().IN()
row = new_P.get_row(0, only_visible=True)
new_A = row[len(row)-1].get_entity().OUT() \
if left and len(row)>0 else new_P
# If our first inward guess doesn't work, try a different row
if not new_P.is_enterable() or not new_A.is_attachable() :
rows = list(new_P.rows)
rows.sort(key=abs)
for rn in rows :
r = new_P.get_row(rn, only_visible=True)
new_A = r[len(r)-1].get_entity().OUT() \
if left and len(r)>0 else new_P
A, P = self.find_in_dir(left=left, dropdown=True,
enact=False, A=new_A, P=new_P,
row=rn)
if P is not None and new_P in P.get_ancestors() :
break
new_A = A
new_P = P
A = new_A
P = new_P
# or maybe we should go to the beginning of P
elif i+d < 0 :
A = P
# or simply attach to an element
else :
newcfg = sites[i+d]
ent = newcfg.get_entity()
A = ent
# Check whether we're attached to a gobbet (empty TargetPhrase or PG)
keep_gobbet_searching = False
if gobbet_mode :
if ((P.am('target_phrase')) or \
A.am('phrase_group')) :
debug_print('found_gobbet')
else :
keep_gobbet_searching = True
# If we haven't arrived somewhere commodious, keep going.
if not A.is_attachable() or not P.is_enterable() \
or keep_gobbet_searching :
A, P = self.find_in_dir(left=left, dropdown=dropdown,\
enact=False, A=A, P=P, gobbet_mode=gobbet_mode)
# Enact if needs be
if enact and None not in (A, P) :
self.adopt_phrase(P)
self.change_attached(A)
return (A, P)
def jump_row(self, up=True) :
"""Move from current attached element in a given vertical direction."""
# Get int representing direction
d = 1 if up else -1
phrase = self.phrased_to
ent = None
phrase_prev = self.attached_to
# Keep looping until something happens...
while ent == None :
while ent == None and phrase != None :
# If we have multiple rows, or any row redirects, try switching
if len(phrase.rows)+len(phrase.get_row_redirects()) > 1 :
# Establish the row possibilities and current row index
if len(phrase.rows) > 1 :
rows = phrase.rows; rows.sort()
rowi = rows.index(phrase_prev.config[0].row)
else :
rows = [0]
rowi = 0
# Keep going (away from middle row) until we hit the last
# index in either direction.
while (rowi if up else len(rows)-1-rowi) > 0 :
rowi -= d
row = rows[rowi]
# What's the nearest element to the current Caret
# position in this row?
di, s = phrase.find_nearest(self.position,
enterable_parent=True,
attachable=True,
row=row)
if di != -1 :
ent = s
if ent is not None : break
# if we didn't find anything, try the redirects
if ent is None and d in phrase.get_row_redirects() :
ent = phrase.get_row_redirects()[d]
break
# If there's still no joy in the current phrase, lets move out a
# level and see if we've multiple available rows there
phrase_prev = phrase.OUT()
phrase = phrase.get_up()
# This should never be run as phrase=main_phrase=>ent=main_phrase,
# so phrase should never be None. Unless this is orphaned...
if ent == None :
self.enter_phrase(phrase_prev, at_start=True)
break
elif ent.am('phrase') and\
self.enter_phrase(ent, at_start=(phrase_prev==ent)) :
# if ent is a phrase, either it is phrase (well, phrase_prev
# now) and find_in_dir knows ent.expr() is enterable at the
# start, or it is completely to the left of pos, as with any
# other entity, so we can safely attach at to its end
break
# "if not a phrase that we can enter (at some depth)..."
elif ent.get_up().is_enterable() :
self.change_attached(ent)
break;
# if the parent=phrase_prev is not enterable, we need to keep
# ascending until something is (top phrase will be)
if phrase.get_up() == None : break
ent = None
def adopt_phrase(self, phrase) :
"""
Sets our phrased_to, without considering attachment (whereas
enter_phrase finds somewhere for you to go to. Returns True on success
"""
if phrase != None and not phrase.is_enterable() :
return False
# Inform our current phrased_to that it doesn't need to be active
if self.phrased_to != None :
self.phrased_to.set_active(False)
# If we have a phrase set it as active. Inform our listeners of any
# upshot.
if phrase is not None :
self.phrased_to = phrase.IN()
self.phrased_to.set_active(True)
ancs = self.phrased_to.get_ancestors()
self.emit("changed-phrased-to", self.phrased_to.who_am_i(),
'-'.join([o.mes[len(o.mes)-1] for o in ancs]))
else :
self.phrased_to = None
self.emit("changed-phrased-to", "None", "None")
return True
def prev_alternative(self) :
"""Try switching to previous alternative on current Entity."""
if self.attached_to :
self.attached_to.prev_alternative()
self.try_suggestion()
self.reset()
def next_alternative(self) :
"""Try switching to previous alternative on current Entity."""
if self.attached_to :
self.attached_to.next_alternative()
self.try_suggestion()
self.reset()
def draw(self, cr) :
"""Draw the Caret and any associated boxes onto the Cairo context,
cr."""
self.reset()
# If we're interactive, we need to see a Caret
if self.interactive :
cr.save()
cr.set_line_width(2.0)
rgb_colour = (0.5,0.5,1.0)
# If we're attached to a symbol, nothing, or the front of a phrase,
# all we need is a line. Otherwise, (right emphasized) square
# brackets are required.
if self.attached_to == self.phrased_to or self.attached_to is None \
or self.attached_to.am("symbol") :
cr.move_to(*self.position)
cr.set_source_rgb(rgb_colour[0]*0.8,
rgb_colour[1]*0.8,
rgb_colour[2]*0.8)
if self.attached_to is not None :
cr.rel_line_to(0, -self.attached_to.get_height())
else :
cr.rel_line_to(0, -self.phrased_to.get_height())
cr.stroke()
else :
cr.set_source_rgb(rgb_colour[0]*0.8,
rgb_colour[1]*0.8,
rgb_colour[2]*0.8)
pos = self.attached_to.get_caret_position(\
pos=(self.attached_to.config[0].bbox[2],
self.attached_to.config[0].bbox[3]))
cr.move_to(*pos)
cr.rel_move_to(0, 5)
cr.rel_line_to(5, 0)
cr.rel_line_to(0, -self.attached_to.get_height()-10)
cr.rel_line_to(-5, 0)
cr.stroke()
cr.set_source_rgba(rgb_colour[0]*0.8,
rgb_colour[1]*0.8,
rgb_colour[2]*0.8, 0.3)
pos = self.attached_to.get_caret_position(\
pos=(self.attached_to.config[0].bbox[0],
self.attached_to.config[0].bbox[3]))
cr.move_to(*pos)
cr.rel_move_to(0, 5)
cr.rel_line_to(-5, 0)
cr.rel_line_to(0, -self.attached_to.get_height()-10)
cr.rel_line_to(5, 0)
cr.stroke()
cr.restore()
# Show a help icon if there's info
if self.attached_to is not None and \
self.attached_to.indicate_info and \
self.attached_to.get_info_text() is not None :
qn_loc = (self.position[0]+5,
self.position[1]-5-self.attached_to.get_height())
cr.save()
cr.set_font_size(0.25*self.attached_to.get_scaled_font_size())
cr.select_font_face("sans")
exts = cr.text_extents('?')
cr.rectangle(qn_loc[0] + exts[0] - 2,
qn_loc[1] + exts[1] - 2,
exts[2] + 4, exts[3] + 4)
cr.set_source_rgba(0.0, 0.0, 1.0, 0.5)
cr.stroke_preserve()
cr.set_source_rgba(1.0, 1.0, 0.5, 0.5)
cr.fill()
cr.move_to(qn_loc[0], qn_loc[1])
cr.set_source_rgb(0.8, 0, 0)
cr.show_text('?')
cr.stroke()
cr.restore()
# Draw any Caret-managed boxes (e.g. error messages or floating entries)
for b in self.boxes :
b.draw(cr)
# FIXME: doesn't quite work with symbols
def copy(self, cut=False, fmt='xml') :
"""Copy/cut the currently attached item."""
if not self.attached_to :
return
item = self.attached_to
# Get sympy, if possible
need_sympy = False
sy = None
if hasattr(item, 'get_sympy') :
sy = item.get_sympy()
# Format by type. For those using Sympy, we must have valid Sympy
# output. For others, if we don't, don't worry.
if fmt == 'xml' :
# Extract relevant XML
item = ET.ElementTree(item.get_xml(\
targets={}, top=False, full=False))
elif fmt == 'mathml' :
if sy is None :
need_sympy = True
else :
item = printing.mathml(sy)
elif fmt == 'python' :
if sy is None :
need_sympy = True
else :
item = printing.python(sy)
elif fmt == 'latex' :
item = item.to_latex()
elif fmt == 'unicode' :
item = item.to_string()
elif fmt == 'text' :
item = item.get_repr()
else :
raise RuntimeError('Unrecognized format')
if need_sympy :
raise RuntimeError("Need Sympy for this element to copy %s" % fmt)
if cut :
item.orphan()
item.set_parent(None)
self.find_left()
return item
def paste_text(self, text,
verbatim = False,
alternative = False,
xml = False) :
"""Convert plain text to entities. Defaults to sympify
Arguments:
verbatim - no interpretation
alternative - tries SymPy's Mathematica parser
xml - redirects to paste_xml
"""
# Make sure we have somewhere to put this.
if not self.phrased_to : return
# This should act as if an alphanumeric key with value [text] had been
# pressed, if verbatim.
if xml :
tree = ET.ElementTree(ET.XML(text))
self.paste_xml(tree)
elif verbatim :
text = unicode(text)
for t in text :
self.insert_shape(t)
else :
if alternative :
sympy_output = mathematica.mathematica(text)
else :
sympy_output = sympy.core.sympify(text)
if isinstance(sympy_output, str) :
self.aes_append_status(None, "[Couldn't parse] "+sympy_output)
else :
self.phrased_to.append(\
interpret_sympy(self.main_phrase, sympy_output))
def paste_xml(self, xml) :
"""Paste an Entity."""
if not self.phrased_to :
return
# Create phrasegroup from XML
pg = parse_phrasegroup(self.phrased_to, xml, top=False)
# Do the pasting
self.phrased_to.append(pg, after=self.attached_to)
# Find somewhere sensible to attach
self.try_suggestion()
def set_selected(self, entity) :
"""Sets this entity as being _the_ selected entity."""
self.clear_selection()
self.add_selected(entity)
def get_selected(self, include_out_of_tree = False) :
"""Returns the selected list. Assumes you just want the ones that are
still part of the main_phrase unless you say otherwise."""
if include_out_of_tree :
return self.selected
else :
return filter(lambda s : s.get_main_phrase() == self.main_phrase,
self.selected)
def add_selected(self, entity) :
"""Identifies an additional entity as being selected."""
if entity is None :
return
if entity not in self.selected :
self.selected.append(entity)
entity.set_selected(True)
# Force a redraw
self.emit('content-changed')
def clear_selection(self) :
"""Empties the selection list."""
for entity in self.selected :
entity.set_selected(False)
self.selected = []
self.emit('content-changed')
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Caret.py
|
Caret.py
|
from Entity import *
from Phrase import *
class GlypherSpace(GlypherEntity) :
dims = (0,0)
def set_dims(self, dims) : self.dims = dims; self.recalc_bbox()
def get_dims(self) : return self.dims
def recalc_bbox(self, quiet = False) :
self.cast()
#debug_print(self.ref_width)
#debug_print(self.get_scaled_line_size())
return GlypherEntity.recalc_bbox(self, quiet=quiet)
def get_xml(self, name = None, top = True, targets = None, full = False) :
root = GlypherEntity.get_xml(self, name, top, full=full)
root.set('width', str(self.get_dims()[0]))
root.set('height', str(self.get_dims()[0]))
return root
def cast(self) :
self.set_ref_width(self.get_dims()[0] * (self.get_scaled_line_size() if self.as_line_height_scale else 1))
self.set_ref_height(self.get_dims()[1] * (self.get_scaled_line_size() if self.as_line_height_scale else 1))
def to_string(self, mode = "string") : return unicode(" ");
def __init__(self, parent = None, dims=(0.1,0.1), as_line_height_scale = True) :
GlypherEntity.__init__(self, parent)
self.as_line_height_scale = as_line_height_scale
self.config[0].bbox[0] = 0
self.config[0].bbox[1] = 0
self.mes.append('space')
self.blank()
self.set_attachable(False)
#self.horizontal_ignore = True
#self.vertical_ignore = True
self.set_dims(dims)
def draw(self, cr) :
if not self.get_visible() : return
if g.show_rectangles or not self.get_blank() :
cr.save()
cr.set_source_rgba(0,0,0,0.2)
cr.rectangle(self.config[0].bbox[0]+self.padding[0], self.config[0].bbox[1]+self.padding[1],
self.get_ref_width() - self.padding[2], self.get_ref_height()-self.padding[3])
cr.stroke()
cr.restore()
class GlypherVerticalSpacer(GlypherEntity) :
length = None
thickness = None
def set_length(self, length) : self.set_p('length', length)
def get_length(self) : return self.get_p('length')
def set_thickness(self, thickness) : self.set_p('thickness', thickness)
def get_thickness(self) : return self.get_p('thickness')
def set_scaling(self, scaling) : self.set_p('scaling', scaling)
def get_scaling(self) : return self.get_p('scaling')
def get_subtract_other_children(self) : return self.get_p('subtract_other_children')
def set_subtract_other_children(self, subtract_other_children) :
return self.set_p('subtract_other_children', subtract_other_children)
#def children_check(self, parent_change=False, quiet = False) :
# self.recalc_bbox(quiet=quiet)
tied_to = None
def to_string(self, mode = "string") : return unicode(" ");
def __init__(self, parent = None, tied_to = None, scaling = 1.0,
subtract_other_children=False) :
GlypherEntity.__init__(self, parent)
self.add_properties({'tied_to': None,
'subtract_other_children': False})
self.set_always_recalc(True)
self.mes.append('vertical_spacer')
self.config[0].bbox[0] = 0
self.config[0].bbox[1] = 0
self.set_attachable(False)
self.set_scaling(scaling)
self.set_horizontal_ignore(True)
self.set_subtract_other_children(subtract_other_children)
self.set_tied_to(tied_to)
self.recalc_bbox()
def set_tied_to(self, entity) :
self.tied_to = entity
if self.tied_to == self.get_p('tied_to') :
return
self.set_p('tied_to', entity)
self.recalc_bbox()
def recalc_bbox(self, quiet = False) :
self.tied_to = self.get_p('tied_to')
self.cast()
chg = GlypherEntity.recalc_bbox(self, quiet=quiet)
return chg
def cast(self) :
if self.included() and self.tied_to :
rh = self.tied_to.get_basebox_height() * self.get_scaling()
if self.get_subtract_other_children() :
for e in self.get_parent().entities :
if e == self : continue
rh -= e.get_height()
else :
rh = 10
if rh < 0 : rh = 0
self.set_ref_height(rh)
self.set_ref_width(1 + self.padding[0] + self.padding[2])
def draw(self, cr) :
if not self.get_visible() or self.get_blank() or not g.show_rectangles : return
cr.save()
cr.set_source_rgb(*self.get_rgb_colour())
cr.rectangle(self.config[0].bbox[0]+self.padding[0], self.config[0].bbox[1]+self.padding[1],
self.get_ref_width() - self.padding[2], self.get_ref_height()-self.padding[3])
#debug_print(self.config[0].bbox)
cr.fill()
cr.restore()
class GlypherHorizontalLine(GlypherEntity) :
tied_to = None
def set_length(self, length) : self.set_p('length', length)
def get_length(self) : return self.get_p('length')
def set_thickness(self, thickness) : self.set_p('thickness', thickness)
def get_thickness(self) : return self.get_p('thickness')
#def children_check(self, parent_change=False, quiet = False) :
# self.recalc_bbox(quiet=quiet)
def to_string(self, mode = "string") : return "_";
def __init__(self, parent = None, length = None, thickness = 0.05, tied_to = None, length_calc = None, thickness_too = False) :
GlypherEntity.__init__(self, parent)
self.add_properties({'thickness_too': False, 'tied_to': None})
self.set_always_recalc(True)
self.mes.append('horizontal_line')
self.config[0].bbox[0] = 0
self.config[0].bbox[3] = thickness
self.set_ref_width(10 if length is None else length)
self.set_ref_height(thickness * self.get_scaled_font_size())
self.set_attachable(False)
self.set_length(length)
self.set_thickness(thickness)
self.set_vertical_ignore(True)
self.set_p('thickness_too', thickness_too)
self.length_calc = length_calc
self.set_tied_to(tied_to)
self.recalc_bbox()
def recalc_bbox(self, quiet = False) :
self.set_tied_to(self.get_p('tied_to'))
chg1 = self.cast()
chg2 = GlypherEntity.recalc_bbox(self, quiet=quiet)
return chg1 or chg2
def set_tied_to(self, entity) :
self.tied_to = entity
if self.tied_to == self.get_p('tied_to') :
return
self.set_p('tied_to', entity)
debug_print(entity.format_me() if entity is not None else None)
self.recalc_bbox()
def cast(self) :
#self.ref_bbox[0] = self.bbox[0]
#self.ref_bbox[1] = self.bbox[1]
old_rw = self.get_ref_width()
old_rh = self.get_ref_height()
thickness = self.get_thickness()*self.get_scaled_font_size()
if self.tied_to is not None :
self.set_length(self.tied_to.get_width())
if self.get_p('thickness_too') :
thickness = self.get_thickness()*self.tied_to.get_height()
if self.get_length() is not None :
rw = self.get_length()
elif self.length_calc is not None :
rw = self.length_calc()
else :
rw = 0.3*self.get_scaled_font_size()
rw += self.padding[0] + self.padding[2]
self.set_ref_width(rw)
self.set_ref_height(thickness + self.padding[1] + self.padding[3])
return not fc(old_rw, self.get_ref_width()) or not fc(old_rh, self.get_ref_height())
def draw(self, cr) :
if not self.get_visible() or self.get_blank() : return
cr.save()
cr.set_source_rgb(*self.get_rgb_colour())
cr.rectangle(self.config[0].bbox[0]+self.padding[0], self.config[0].bbox[1]+self.padding[1],
self.get_ref_width() - self.padding[0] - self.padding[2], self.get_ref_height()-self.padding[1]-self.padding[3])
cr.fill()
cr.restore()
#FIXME: This and HL should be one class
class GlypherVerticalLine(GlypherEntity) :
tied_to = None
def set_length(self, length) : self.set_p('length', length)
def get_length(self) : return self.get_p('length')
def set_thickness(self, thickness) : self.set_p('thickness', thickness)
def get_thickness(self) : return self.get_p('thickness')
#def children_check(self, parent_change=False, quiet = False) :
# self.recalc_bbox(quiet=quiet)
def to_string(self, mode = "string") : return "|";
def __init__(self, parent = None, length = None, thickness = 0.05, tied_to = None, length_calc = None, thickness_too = False) :
GlypherEntity.__init__(self, parent)
self.add_properties({'thickness_too': False, 'tied_to': None})
self.set_always_recalc(True)
self.mes.append('vertical_line')
self.config[0].bbox[1] = 0
self.config[0].bbox[2] = thickness
self.set_ref_height(10 if length is None else length)
self.set_ref_width(thickness * self.get_scaled_font_size())
self.set_length(length)
self.set_thickness(thickness)
self.set_horizontal_ignore(True)
self.set_p('thickness_too', thickness_too)
self.length_calc = length_calc
self.set_tied_to(tied_to)
self.recalc_bbox()
def recalc_bbox(self, quiet = False) :
self.set_tied_to(self.get_p('tied_to'))
chg1 = self.cast()
chg2 = GlypherEntity.recalc_bbox(self, quiet=quiet)
return chg1 or chg2
def set_tied_to(self, entity) :
self.tied_to = entity
if self.tied_to == self.get_p('tied_to') :
return
self.set_p('tied_to', entity)
debug_print(entity.format_me() if entity is not None else None)
self.recalc_bbox()
def cast(self) :
#self.ref_bbox[0] = self.bbox[0]
#self.ref_bbox[1] = self.bbox[1]
old_rw = self.get_ref_height()
old_rh = self.get_ref_width()
thickness = self.get_thickness()*self.get_scaled_font_size()
if self.tied_to is not None :
self.set_length(self.tied_to.get_height())
if self.get_p('thickness_too') :
thickness = self.get_thickness()*self.tied_to.get_width()
if self.get_length() is not None :
rw = self.get_length()
elif self.length_calc is not None :
rw = self.length_calc()
else :
rw = 0.3*self.get_scaled_font_size()
rw += self.padding[1] + self.padding[3]
self.set_ref_height(rw)
self.set_ref_width(thickness + self.padding[0] + self.padding[2])
return not fc(old_rw, self.get_ref_height()) or not fc(old_rh,
self.get_ref_width())
def draw(self, cr) :
if not self.get_visible() or self.get_blank() : return
cr.save()
cr.set_source_rgb(*self.get_rgb_colour())
cr.rectangle(self.config[0].bbox[0]+self.padding[0], self.config[0].bbox[1]+self.padding[1],
self.get_ref_width() - self.padding[0] - self.padding[2], self.get_ref_height()-self.padding[1]-self.padding[3])
cr.fill()
cr.restore()
g.phrasegroups['vertical_spacer'] = GlypherVerticalSpacer
|
Aesthete
|
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/Spacer.py
|
Spacer.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.