code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
repo = require_repo(directory)
plugins = get_value(repo, 'plugins')
if not plugins or not isinstance(plugins, dict):
return None
return plugins.keys() | def list_plugins(directory=None) | Gets a list of the installed themes. | 5.305757 | 5.151694 | 1.029905 |
repo = require_repo(directory)
plugins = get_value(repo, 'plugins', expect_type=dict)
if plugin in plugins:
return False
plugins[plugin] = {}
set_value(repo, 'plugins', plugins)
return True | def add_plugin(plugin, directory=None) | Adds the specified plugin. This returns False if it was already added. | 4.097364 | 3.544657 | 1.155927 |
repo = require_repo(directory)
plugins = get_value(repo, 'plugins')
return plugins.get(plugin) if isinstance(plugins, dict) else None | def get_plugin_settings(plugin, directory=None) | Gets the settings for the specified plugin. | 5.781232 | 5.266206 | 1.097798 |
directory = directory or '.'
host = host or '127.0.0.1'
port = port or 5000
# TODO: admin interface
# TODO: use cache_only to keep from modifying output directly
out_directory = build(directory)
# Serve generated site
os.chdir(out_directory)
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer((host, port), Handler)
print ' * Serving on http://%s:%s/' % (host, port)
httpd.serve_forever() | def preview(directory=None, host=None, port=None, watch=True) | Runs a local server to preview the working directory of a repository. | 4.183256 | 4.144821 | 1.009273 |
if directory and not os.path.isdir(directory):
raise ValueError('Directory not found: ' + repr(directory))
repo = repo_path(directory)
if not os.path.isdir(repo):
raise RepositoryNotFoundError(directory)
return repo | def require_repo(directory=None) | Checks for a presentation repository and raises an exception if not found. | 3.262365 | 2.807144 | 1.162165 |
repo = repo_path(directory)
if os.path.isdir(repo):
raise RepositoryAlreadyExistsError(directory, repo)
# Initialize repository with default template
shutil.copytree(default_template_path, repo)
message = '"Default presentation content."'
subprocess.call(['git', 'init', '-q', repo])
subprocess.call(['git', 'add', '.'], cwd=repo)
subprocess.call(['git', 'commit', '-q', '-m', message], cwd=repo)
return repo | def init(directory=None) | Initializes a Gitpress presentation repository at the specified directory. | 4.059739 | 3.583848 | 1.132788 |
# Defaults
if includes is None:
includes = []
if excludes is None:
excludes = []
# Transform glob patterns to regular expressions
includes_pattern = r'|'.join([fnmatch.translate(x) for x in includes]) or r'$.'
excludes_pattern = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'
includes_re = re.compile(includes_pattern)
excludes_re = re.compile(excludes_pattern)
def included(root, name):
full_path = os.path.join(root, name)
# Explicitly included files takes priority
if includes_re.match(full_path):
return True
# Ignore special and excluded files
return (not specials_re.match(name)
and not excludes_re.match(full_path))
# Get a filtered list of paths to be built
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if included(root, d)]
files = [f for f in files if included(root, f)]
for f in files:
yield os.path.relpath(os.path.join(root, f), path) | def iterate_presentation_files(path=None, excludes=None, includes=None) | Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority. | 2.320955 | 2.30233 | 1.00809 |
try:
with open(path, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
except IOError as ex:
if ex != errno.ENOENT:
raise
return {} | def read_config_file(path) | Returns the configuration from the specified file. | 2.759351 | 2.701632 | 1.021365 |
return write_config_file(os.path.join(repo_directory, config_file), config) | def write_config(repo_directory, config) | Writes the specified configuration to the presentation repository. | 3.62955 | 4.567416 | 0.794662 |
contents = json.dumps(config, indent=4, separators=(',', ': ')) + '\n'
try:
with open(path, 'w') as f:
f.write(contents)
return True
except IOError as ex:
if ex != errno.ENOENT:
raise
return False | def write_config_file(path, config) | Writes the specified configuration to the specified file. | 2.584683 | 2.549666 | 1.013734 |
config = read_config(repo_directory)
value = config.get(key)
if expect_type and value is not None and not isinstance(value, expect_type):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(expect_type), repr(type(value))))
return value | def get_value(repo_directory, key, expect_type=None) | Gets the value of the specified key in the config file. | 2.666036 | 2.663738 | 1.000863 |
if value is None:
raise ValueError('Argument "value" must not be None.')
# Read values and do nothing if not making any changes
config = read_config(repo_directory)
old = config.get(key)
if old == value:
return old
# Check schema
if strict and old is not None and not isinstance(old, type(value)):
raise ConfigSchemaError('Expected config variable %s to be type %s, got %s'
% (repr(key), repr(type(value)), repr(type(old))))
# Set new value and save results
config[key] = value
write_config(repo_directory, config)
return old | def set_value(repo_directory, key, value, strict=True) | Sets the value of a particular key in the config file. This has no effect when setting to the same value. | 3.484492 | 3.449072 | 1.010269 |
content_directory = content_directory or '.'
out_directory = os.path.abspath(out_directory or default_out_directory)
repo = require_repo(content_directory)
# Prevent user mistakes
if out_directory == '.':
raise ValueError('Output directory must be different than the source directory: ' + repr(out_directory))
if os.path.basename(os.path.relpath(out_directory, content_directory)) == '..':
raise ValueError('Output directory must not contain the source directory: ' + repr(out_directory))
# TODO: read config
# TODO: use virtualenv
# TODO: init and run plugins
# TODO: process with active theme
# Collect and copy static files
files = presentation_files(repo)
remove_directory(out_directory)
copy_files(files, out_directory, repo)
return out_directory | def build(content_directory=None, out_directory=None) | Builds the site from its content and presentation repository. | 4.681705 | 4.493319 | 1.041926 |
if argv is None:
argv = sys.argv[1:]
usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:])
version = 'Gitpress ' + __version__
# Parse options
args = docopt(usage, argv=argv, version=version)
# Execute command
try:
return execute(args)
except RepositoryNotFoundError as ex:
error('No Gitpress repository found at', ex.directory) | def main(argv=None) | The entry point of the application. | 4.721552 | 4.641898 | 1.01716 |
def info(*message):
if not args['-q']:
print ' '.join(map(str, message))
if args['init']:
try:
repo = init(args['<directory>'])
info('Initialized Gitpress repository in', repo)
except RepositoryAlreadyExistsError as ex:
info('Gitpress repository already exists in', ex.repo)
return 0
if args['preview']:
directory, address = resolve(args['<directory>'], args['<address>'])
host, port = split_address(address)
if address and not host and not port:
error('Invalid address', repr(address))
return preview(directory, host=host, port=port)
if args['build']:
require_repo(args['<directory>'])
info('Building site', os.path.abspath(args['<directory>'] or '.'))
try:
out_directory = build(args['<directory>'], args['--out'])
except NotADirectoryError as ex:
error(ex)
info('Site built in', os.path.abspath(out_directory))
return 0
if args['themes']:
theme = args['<theme>']
if args['use']:
try:
switched = use_theme(theme)
except ConfigSchemaError as ex:
error('Could not modify config:', ex)
return 1
except ThemeNotFoundError as ex:
error('Theme %s is not currently installed.' % repr(theme))
return 1
info('Switched to theme %s' if switched else 'Already using %s' % repr(theme))
elif args['install']:
# TODO: implement
raise NotImplementedError()
elif args['uninstall']:
# TODO: implement
raise NotImplementedError()
else:
themes = list_themes()
if themes:
info('Installed themes:')
info(' ' + '\n '.join(themes))
else:
info('No themes installed.')
return 0
if args['plugins']:
plugin = args['<plugin>']
if args['add']:
try:
added = add_plugin(plugin)
except ConfigSchemaError as ex:
error('Could not modify config:', ex)
return 1
info(('Added plugin %s' if added else
'Plugin %s has already been added.') % repr(plugin))
elif args['remove']:
settings = get_plugin_settings(plugin)
if not args['-f'] and settings and isinstance(settings, dict):
warning = 'Plugin %s contains settings. Remove?' % repr(plugin)
if not yes_or_no(warning):
return 0
try:
removed = remove_plugin(plugin)
except ConfigSchemaError as ex:
error('Error: Could not modify config:', ex)
info(('Removed plugin %s' if removed else
'Plugin %s has already been removed.') % repr(plugin))
else:
plugins = list_plugins()
info('Installed plugins:\n ' + '\n '.join(plugins) if plugins else
'No plugins installed.')
return 0
return 1 | def execute(args) | Executes the command indicated by the specified parsed arguments. | 2.691783 | 2.716704 | 0.990827 |
if argv is None:
argv = sys.argv[1:]
argv.insert(0, 'preview')
return main(argv) | def gpp(argv=None) | Shortcut function for running the previewing command. | 3.874627 | 2.658748 | 1.457312 |
repo = require_repo(directory)
path = os.path.join(repo, themes_dir)
return os.listdir(path) if os.path.isdir(path) else None | def list_themes(directory=None) | Gets a list of the installed themes. | 4.6133 | 4.283348 | 1.077031 |
repo = require_repo(directory)
if theme not in list_themes(directory):
raise ThemeNotFoundError(theme)
old_theme = set_value(repo, 'theme', theme)
return old_theme != theme | def use_theme(theme, directory=None) | Switches to the specified theme. This returns False if switching to the already active theme. | 6.198123 | 5.568025 | 1.113164 |
if value.value:
_assert_is_type('fill_opacity.value', value.value,
(float, int))
if value.value < 0 or value.value > 1:
raise ValueError(
'fill_opacity must be between 0 and 1') | def fill_opacity(value) | ValueRef : int or float, opacity of the fill (0 to 1) | 3.691695 | 3.429751 | 1.076374 |
if value.value:
_assert_is_type('stroke_width.value', value.value, int)
if value.value < 0:
raise ValueError('stroke width cannot be negative') | def stroke_width(value) | ValueRef : int, width of the stroke in pixels | 5.167988 | 4.787665 | 1.079438 |
if value.value:
_assert_is_type('stroke_opacity.value', value.value,
(float, int))
if value.value < 0 or value.value > 1:
raise ValueError(
'stroke_opacity must be between 0 and 1') | def stroke_opacity(value) | ValueRef : number, opacity of the stroke (0 to 1) | 3.712029 | 3.400649 | 1.091565 |
if value.value:
_assert_is_type('size.value', value.value, int)
if value.value < 0:
raise ValueError('size cannot be negative') | def size(value) | ValueRef : number, area of the mark in pixels
This is the total area of a symbol. For example, a value of 500 and
a ``shape`` of ``'circle'`` would result in circles with an area of
500 square pixels. Only used if ``type`` is ``'symbol'``. | 5.666907 | 6.045386 | 0.937394 |
if value.value:
_assert_is_type('shape.value', value.value, str_types)
if value.value not in PropertySet._valid_shapes:
raise ValueError(value.value + ' is not a valid shape') | def shape(value) | ValueRef : string, type of symbol to use
Possible values are ``'circle'`` (default), ``'square'``,
``'cross'``, ``'diamond'``, ``'triangle-up'``, and
``'triangle-down'``. Only used if ``type`` is ``'symbol'``. | 6.191257 | 6.724831 | 0.920656 |
if value.value:
_assert_is_type('shape.value', value.value, str_types)
if value.value not in PropertySet._valid_methods:
raise ValueError(value.value + ' is not a valid method') | def interpolate(value) | ValueRef : string, line interpolation method to use
Possible values for ``area`` types are `'linear'`,
``'step-before'``, ``'step-after'``, ``'basis'``, ``'basis-open'``,
``'cardinal'``, ``'cardinal-open'``, ``'monotone'``. ``line`` types
have all values for ``area`` as well as ``'basis-closed'``,
``'bundle'``, and ``'cardinal-closed'``.
Only used if ``type`` is ``'area'`` or ``'line'``. | 9.401546 | 9.168451 | 1.025424 |
if value.value:
_assert_is_type('shape.value', value.value, str_types)
if value.value not in PropertySet._valid_align:
raise ValueError(value.value + ' is not a valid alignment') | def align(value) | ValueRef : string, horizontal alignment of mark
Possible values are ``'left'``, ``'right'``, and ``'center'``. Only
used if ``type`` is ``'image'`` or ``'text'``. | 8.06291 | 8.081691 | 0.997676 |
if value.value:
_assert_is_type('shape.value', value.value, str_types)
if value.value not in PropertySet._valid_baseline:
raise ValueError(value.value + ' is not a valid baseline') | def baseline(value) | ValueRef : string, vertical alignment of mark
Possible values are ``'top'``, ``'middle'``, and ``'bottom'``. Only
used if ``type`` is ``'image'`` or ``'text'``. | 7.792835 | 7.802797 | 0.998723 |
valid_transforms = frozenset([
'array', 'copy', 'cross', 'facet', 'filter',
'flatten', 'fold', 'formula', 'slice', 'sort', 'stats',
'truncate', 'unique', 'window', 'zip', 'force', 'geo', 'geopath',
'link', 'pie', 'stack', 'treemap', 'wordcloud'
])
if value not in valid_transforms:
raise ValueError('Transform type must be'
' one of {0}'.format(str(valid_transforms))) | def type(value) | string: property name in which to store the computed transform
value.
The valid transform types are as follows:
'array', 'copy', 'cross', 'facet', 'filter', 'flatten', 'fold',
'formula', 'slice', 'sort', 'stats', 'truncate', 'unique', 'window',
'zip', 'force', 'geo', 'geopath', 'link', 'pie', 'stack', 'treemap',
'wordcloud' | 7.034925 | 1.939279 | 3.627599 |
'''Data type check for automatic import'''
if iter_idx:
return Data.from_mult_iters(idx=iter_idx, **data)
if pd:
if isinstance(data, (pd.Series, pd.DataFrame)):
return Data.from_pandas(data, grouped=grouped, columns=columns,
key_on=key_on)
if isinstance(data, (list, tuple, dict)):
return Data.from_iter(data)
else:
raise ValueError('This data type is not supported by Vincent.') | def data_type(data, grouped=False, columns=None, key_on='idx', iter_idx=None) | Data type check for automatic import | 4.62525 | 3.946024 | 1.172129 |
self.data['table'] = Data.keypairs(
self.raw_data, columns=[self.data_key, column])
domain = [Data.serialize(self.raw_data[column].min()),
Data.serialize(self.raw_data[column].quantile(0.95))]
scale = Scale(name='color', type='quantize', domain=domain,
range=brews[brew])
self.scales['color'] = scale | def rebind(self, column=None, brew='GnBu') | Bind a new column to the data map
Parameters
----------
column: str, default None
Pandas DataFrame column name
brew: str, default None
Color brewer abbreviation. See colors.py | 5.925237 | 6.173779 | 0.959742 |
if len(value) != 2:
raise ValueError('viewport must have 2 dimensions')
for v in value:
_assert_is_type('viewport dimension', v, int)
if v < 0:
raise ValueError('viewport dimensions cannot be negative') | def viewport(value) | 2-element list of ints : Dimensions of the viewport
The viewport is a bounding box containing the visualization. If the
dimensions of the visualization are larger than the viewport, then
the visualization will be scrollable.
If undefined, then the full visualization is shown. | 3.679985 | 4.034875 | 0.912044 |
if isinstance(value, dict):
required_keys = ['top', 'left', 'right', 'bottom']
for key in required_keys:
if key not in value:
error = ('Padding must have keys "{0}".'
.format('", "'.join(required_keys)))
raise ValueError(error)
_assert_is_type('padding: {0}'.format(key), value[key], int)
if value[key] < 0:
raise ValueError('Padding cannot be negative.')
elif isinstance(value, int):
if value < 0:
raise ValueError('Padding cannot be negative.')
else:
if value not in ("auto", "strict"):
raise ValueError('Padding can only be auto or strict.') | def padding(value) | int or dict : Padding around visualization
The padding defines the distance between the edge of the
visualization canvas to the visualization box. It does not count as
part of the visualization width/height. Values cannot be negative.
If a dict, padding must have all keys ``''top'``, ``'left'``,
``'right'``, and ``'bottom'`` with int values. | 2.619602 | 2.507776 | 1.044592 |
for i, entry in enumerate(value):
_assert_is_type('data[{0}]'.format(i), entry, Data) | def data(value) | list or KeyedList of ``Data`` : Data definitions
This defines the data being visualized. See the :class:`Data` class
for details. | 8.582577 | 9.572338 | 0.896602 |
for i, entry in enumerate(value):
_assert_is_type('scales[{0}]'.format(i), entry, Scale) | def scales(value) | list or KeyedList of ``Scale`` : Scale definitions
Scales map the data from the domain of the data to some
visualization space (such as an x-axis). See the :class:`Scale`
class for details. | 7.340902 | 8.663894 | 0.847298 |
for i, entry in enumerate(value):
_assert_is_type('axes[{0}]'.format(i), entry, Axis) | def axes(value) | list or KeyedList of ``Axis`` : Axis definitions
Axes define the locations of the data being mapped by the scales.
See the :class:`Axis` class for details. | 7.187729 | 8.182513 | 0.878426 |
for i, entry in enumerate(value):
_assert_is_type('marks[{0}]'.format(i), entry, Mark) | def marks(value) | list or KeyedList of ``Mark`` : Mark definitions
Marks are the visual objects (such as lines, bars, etc.) that
represent the data in the visualization space. See the :class:`Mark`
class for details. | 7.105653 | 8.001483 | 0.888042 |
for i, entry in enumerate(value):
_assert_is_type('legends[{0}]'.format(i), entry, Legend) | def legends(value) | list or KeyedList of ``Legends`` : Legend definitions
Legends visualize scales, and take one or more scales as their input.
They can be customized via a LegendProperty object. | 7.386427 | 7.934309 | 0.930948 |
keys = self.axes.get_keys()
if keys:
for key in keys:
if key == 'x':
self.axes[key].title = x
elif key == 'y':
self.axes[key].title = y
else:
self.axes.extend([Axis(type='x', title=x),
Axis(type='y', title=y)])
return self | def axis_titles(self, x=None, y=None) | Apply axis titles to the figure.
This is a convenience method for manually modifying the "Axes" mark.
Parameters
----------
x: string, default 'null'
X-axis title
y: string, default 'null'
Y-axis title
Example
-------
>>>vis.axis_titles(y="Data 1", x="Data 2") | 2.938781 | 3.798805 | 0.773607 |
if not getattr(axis, 'properties'):
axis.properties = AxisProperties()
for prop in ['ticks', 'axis', 'major_ticks', 'minor_ticks',
'title', 'labels']:
setattr(axis.properties, prop, PropertySet()) | def _set_axis_properties(self, axis) | Set AxisProperties and PropertySets | 4.816238 | 3.728718 | 1.291661 |
for prop in ['ticks', 'axis', 'major_ticks', 'minor_ticks', 'title',
'labels']:
prop_set = getattr(axis.properties, prop)
if color and prop in ['title', 'labels']:
prop_set.fill = ValueRef(value=color)
elif color and prop in ['axis', 'major_ticks', 'minor_ticks',
'ticks']:
prop_set.stroke = ValueRef(value=color) | def _set_all_axis_color(self, axis, color) | Set axis ticks, title, labels to given color | 3.446602 | 3.277671 | 1.05154 |
if self.axes:
axis = [a for a in self.axes if a.scale == axis][0]
self._set_axis_properties(axis)
self._set_all_axis_color(axis, color)
if title_size:
axis.properties.title.font_size = ValueRef(value=title_size)
if label_angle:
axis.properties.labels.angle = ValueRef(value=label_angle)
if label_align:
axis.properties.labels.align = ValueRef(value=label_align)
if title_offset:
axis.properties.title.dy = ValueRef(value=title_offset)
else:
raise ValueError('This Visualization has no axes!') | def _axis_properties(self, axis, title_size, title_offset, label_angle,
label_align, color) | Assign axis properties | 2.655332 | 2.596656 | 1.022597 |
if self.axes:
for axis in self.axes:
self._set_axis_properties(axis)
self._set_all_axis_color(axis, color)
if title_size:
ref = ValueRef(value=title_size)
axis.properties.title.font_size = ref
else:
raise ValueError('This Visualization has no axes!')
return self | def common_axis_properties(self, color=None, title_size=None) | Set common axis properties such as color
Parameters
----------
color: str, default None
Hex color str, etc | 4.884216 | 5.870546 | 0.831987 |
self._axis_properties('x', title_size, title_offset, label_angle,
label_align, color)
return self | def x_axis_properties(self, title_size=None, title_offset=None,
label_angle=None, label_align=None, color=None) | Change x-axis title font size and label angle
Parameters
----------
title_size: int, default None
Title size, in px
title_offset: int, default None
Pixel offset from given axis
label_angle: int, default None
label angle in degrees
label_align: str, default None
Label alignment
color: str, default None
Hex color | 2.551095 | 4.358789 | 0.585276 |
self._axis_properties('y', title_size, title_offset, label_angle,
label_align, color)
return self | def y_axis_properties(self, title_size=None, title_offset=None,
label_angle=None, label_align=None, color=None) | Change y-axis title font size and label angle
Parameters
----------
title_size: int, default None
Title size, in px
title_offset: int, default None
Pixel offset from given axis
label_angle: int, default None
label angle in degrees
label_align: str, default None
Label alignment
color: str, default None
Hex color | 2.531944 | 4.292426 | 0.589863 |
self.legends.append(Legend(title=title, fill=scale, offset=0,
properties=LegendProperties()))
if text_color:
color_props = PropertySet(fill=ValueRef(value=text_color))
self.legends[0].properties.labels = color_props
self.legends[0].properties.title = color_props
return self | def legend(self, title=None, scale='color', text_color=None) | Convience method for adding a legend to the figure.
Important: This defaults to the color scale that is generated with
Line, Area, Stacked Line, etc charts. For bar charts, the scale ref is
usually 'y'.
Parameters
----------
title: string, default None
Legend Title
scale: string, default 'color'
Scale reference for legend
text_color: str, default None
Title and label color | 4.888274 | 5.362185 | 0.91162 |
if brew:
self.scales['color'].range = brews[brew]
elif range_:
self.scales['color'].range = range_
return self | def colors(self, brew=None, range_=None) | Convenience method for adding color brewer scales to charts with a
color scale, such as stacked or grouped bars.
See the colors here: http://colorbrewer2.org/
Or here: http://bl.ocks.org/mbostock/5577023
This assumes that a 'color' scale exists on your chart.
Parameters
----------
brew: string, default None
Color brewer scheme (BuGn, YlOrRd, etc)
range: list, default None
List of colors. Ex: ['#ac4142', '#d28445', '#f4bf75'] | 4.286402 | 4.656455 | 0.920529 |
super(self.__class__, self).validate()
required_attribs = ('data', 'scales', 'axes', 'marks')
for elem in required_attribs:
attr = getattr(self, elem)
if attr:
# Validate each element of the sets of data, etc
for entry in attr:
entry.validate()
names = [a.name for a in attr]
if len(names) != len(set(names)):
raise ValidationError(elem + ' has duplicate names')
elif require_all:
raise ValidationError(
elem + ' must be defined for valid visualization') | def validate(self, require_all=True, scale='colors') | Validate the visualization contents.
Parameters
----------
require_all : boolean, default True
If True (default), then all fields ``data``, ``scales``,
``axes``, and ``marks`` must be defined. The user is allowed to
disable this if the intent is to define the elements
client-side.
If the contents of the visualization are not valid Vega, then a
:class:`ValidationError` is raised. | 4.64601 | 3.93832 | 1.179693 |
vis_id = str(uuid4()).replace("-", "")
html = % (vis_id, self.to_json(pretty_print=False), vis_id)
return html | def _repr_html_(self) | Build the HTML representation for IPython. | 8.325017 | 7.41633 | 1.122525 |
from IPython.core.display import display, HTML
display(HTML(self._repr_html_())) | def display(self) | Display the visualization inline in the IPython notebook.
This is deprecated, use the following instead::
from IPython.display import display
display(viz) | 4.395799 | 3.73355 | 1.177378 |
super(self.__class__, self).validate(*args)
if not self.name:
raise ValidationError('name is required for Data') | def validate(self, *args) | Validate contents of class | 5.462384 | 5.112624 | 1.068411 |
if isinstance(obj, str_types):
return obj
elif hasattr(obj, 'timetuple'):
return int(time.mktime(obj.timetuple())) * 1000
elif hasattr(obj, 'item'):
return obj.item()
elif hasattr(obj, '__float__'):
if isinstance(obj, int):
return int(obj)
else:
return float(obj)
elif hasattr(obj, '__int__'):
return int(obj)
else:
raise LoadError('cannot serialize index of type '
+ type(obj).__name__) | def serialize(obj) | Convert an object into a JSON-serializable value
This is used by the ``from_pandas`` and ``from_numpy`` functions to
convert data to JSON-serializable types when loading. | 2.759774 | 2.818319 | 0.979227 |
# Note: There's an experimental JSON encoder floating around in
# pandas land that hasn't made it into the main branch. This
# function should be revisited if it ever does.
if not pd:
raise LoadError('pandas could not be imported')
if not hasattr(data, 'index'):
raise ValueError('Please load a Pandas object.')
if name:
vega_data = cls(name=name, **kwargs)
else:
vega_data = cls(name='table', **kwargs)
pd_obj = data.copy()
if columns:
pd_obj = data[columns]
if key_on != 'idx':
pd_obj.index = data[key_on]
if records:
# The worst
vega_data.values = json.loads(pd_obj.to_json(orient='records'))
return vega_data
vega_data.values = []
if isinstance(pd_obj, pd.Series):
data_key = data.name or series_key
for i, v in pd_obj.iteritems():
value = {}
value['idx'] = cls.serialize(i)
value['col'] = data_key
value['val'] = cls.serialize(v)
vega_data.values.append(value)
elif isinstance(pd_obj, pd.DataFrame):
# We have to explicitly convert the column names to strings
# because the json serializer doesn't allow for integer keys.
for i, row in pd_obj.iterrows():
for num, (k, v) in enumerate(row.iteritems()):
value = {}
value['idx'] = cls.serialize(i)
value['col'] = cls.serialize(k)
value['val'] = cls.serialize(v)
if grouped:
value['group'] = num
vega_data.values.append(value)
else:
raise ValueError('cannot load from data type '
+ type(pd_obj).__name__)
return vega_data | def from_pandas(cls, data, columns=None, key_on='idx', name=None,
series_key='data', grouped=False, records=False, **kwargs) | Load values from a pandas ``Series`` or ``DataFrame`` object
Parameters
----------
data : pandas ``Series`` or ``DataFrame``
Pandas object to import data from.
columns: list, default None
DataFrame columns to convert to Data. Keys default to col names.
If columns are given and on_index is False, x-axis data will
default to the first column.
key_on: string, default 'index'
Value to key on for x-axis data. Defaults to index.
name : string, default None
Applies to the ``name`` attribute of the generated class. If
``None`` (default), then the ``name`` attribute of ``pd_obj`` is
used if it exists, or ``'table'`` if it doesn't.
series_key : string, default 'data'
Applies only to ``Series``. If ``None`` (default), then defaults to
data.name. For example, if ``series_key`` is ``'x'``, then the
entries of the ``values`` list
will be ``{'idx': ..., 'col': 'x', 'val': ...}``.
grouped: boolean, default False
Pass true for an extra grouping parameter
records: boolean, defaule False
Requires Pandas 0.12 or greater. Writes the Pandas DataFrame
using the df.to_json(orient='records') formatting.
**kwargs : dict
Additional arguments passed to the :class:`Data` constructor. | 3.233727 | 3.127881 | 1.03384 |
if not np:
raise LoadError('numpy could not be imported')
_assert_is_type('numpy object', np_obj, np.ndarray)
# Integer index if none is provided
index = index or range(np_obj.shape[0])
# Explicitly map dict-keys to strings for JSON serializer.
columns = list(map(str, columns))
index_key = index_key or cls._default_index_key
if len(index) != np_obj.shape[0]:
raise LoadError(
'length of index must be equal to number of rows of array')
elif len(columns) != np_obj.shape[1]:
raise LoadError(
'length of columns must be equal to number of columns of '
'array')
data = cls(name=name, **kwargs)
data.values = [
dict([(index_key, cls.serialize(idx))] +
[(col, x) for col, x in zip(columns, row)])
for idx, row in zip(index, np_obj.tolist())]
return data | def from_numpy(cls, np_obj, name, columns, index=None, index_key=None,
**kwargs) | Load values from a numpy array
Parameters
----------
np_obj : numpy.ndarray
numpy array to load data from
name : string
``name`` field for the data
columns : iterable
Sequence of column names, from left to right. Must have same
length as the number of columns of ``np_obj``.
index : iterable, default None
Sequence of indices from top to bottom. If ``None`` (default),
then the indices are integers starting at 0. Must have same
length as the number of rows of ``np_obj``.
index_key : string, default None
Key to use for the index. If ``None`` (default), ``idx`` is
used.
**kwargs : dict
Additional arguments passed to the :class:`Data` constructor
Notes
-----
The individual elements of ``np_obj``, ``columns``, and ``index``
must return valid values from :func:`Data.serialize`. | 3.328766 | 3.271502 | 1.017504 |
if not name:
name = 'table'
lengths = [len(v) for v in kwargs.values()]
if len(set(lengths)) != 1:
raise ValueError('Iterables must all be same length')
if not idx:
raise ValueError('Must provide iter name index reference')
index = kwargs.pop(idx)
vega_vals = []
for k, v in sorted(kwargs.items()):
for idx, val in zip(index, v):
value = {}
value['idx'] = idx
value['col'] = k
value['val'] = val
vega_vals.append(value)
return cls(name, values=vega_vals) | def from_mult_iters(cls, name=None, idx=None, **kwargs) | Load values from multiple iters
Parameters
----------
name : string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
idx: string, default None
Iterable to use for the data index
**kwargs : dict of iterables
The ``values`` field will contain dictionaries with keys for
each of the iterables provided. For example,
d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30))
would result in ``d`` having a ``values`` field with
[{'idx': 0, 'col': 'y', 'val': 10},
{'idx': 1, 'col': 'y', 'val': 20}
If the iterables are not the same length, then ValueError is
raised. | 3.644909 | 3.049782 | 1.195138 |
if not name:
name = 'table'
if isinstance(data, (list, tuple)):
data = {x: y for x, y in enumerate(data)}
values = [{'idx': k, 'col': 'data', 'val': v}
for k, v in sorted(data.items())]
return cls(name, values=values) | def from_iter(cls, data, name=None) | Convenience method for loading data from an iterable.
Defaults to numerical indexing for x-axis.
Parameters
----------
data: iterable
An iterable of data (list, tuple, dict of key/val pairs)
name: string, default None
Name of the data set. If None (default), the name will be set to
``'table'``. | 3.996249 | 3.870817 | 1.032405 |
if not name:
name = 'table'
cls.raw_data = data
# Tuples
if isinstance(data, tuple):
values = [{"x": x[0], "y": x[1]} for x in data]
# Lists
elif isinstance(data, list):
values = [{"x": x, "y": y}
for x, y in zip(range(len(data) + 1), data)]
# Dicts
elif isinstance(data, dict) or isinstance(data, pd.Series):
values = [{"x": x, "y": y} for x, y in sorted(data.items())]
# Dataframes
elif isinstance(data, pd.DataFrame):
if len(columns) > 1 and use_index:
raise ValueError('If using index as x-axis, len(columns)'
'cannot be > 1')
if use_index or len(columns) == 1:
values = [{"x": cls.serialize(x[0]),
"y": cls.serialize(x[1][columns[0]])}
for x in data.iterrows()]
else:
values = [{"x": cls.serialize(x[1][columns[0]]),
"y": cls.serialize(x[1][columns[1]])}
for x in data.iterrows()]
# NumPy arrays
elif isinstance(data, np.ndarray):
values = cls._numpy_to_values(data)
else:
raise TypeError('unknown data type %s' % type(data))
return cls(name, values=values) | def keypairs(cls, data, columns=None, use_index=False, name=None) | This will format the data as Key: Value pairs, rather than the
idx/col/val style. This is useful for some transforms, and to
key choropleth map data
Standard Data Types:
List: [0, 10, 20, 30, 40]
Paired Tuples: ((0, 1), (0, 2), (0, 3))
Dict: {'A': 10, 'B': 20, 'C': 30, 'D': 40, 'E': 50}
Plus Pandas DataFrame and Series, and Numpy ndarray
Parameters
----------
data:
List, Tuple, Dict, Pandas Series/DataFrame, Numpy ndarray
columns: list, default None
If passing Pandas DataFrame, you must pass at least one column
name.If one column is passed, x-values will default to the index
values.If two column names are passed, x-values are columns[0],
y-values columns[1].
use_index: boolean, default False
Use the DataFrame index for your x-values | 2.374697 | 2.351368 | 1.009921 |
'''Convert a NumPy array to values attribute'''
def to_list_no_index(xvals, yvals):
return [{"x": x, "y": np.asscalar(y)}
for x, y in zip(xvals, yvals)]
if len(data.shape) == 1 or data.shape[1] == 1:
xvals = range(data.shape[0] + 1)
values = to_list_no_index(xvals, data)
elif len(data.shape) == 2:
if data.shape[1] == 2:
# NumPy arrays and matrices have different iteration rules.
if isinstance(data, np.matrix):
xidx = (0, 0)
yidx = (0, 1)
else:
xidx = 0
yidx = 1
xvals = [np.asscalar(row[xidx]) for row in data]
yvals = [np.asscalar(row[yidx]) for row in data]
values = [{"x": x, "y": y} for x, y in zip(xvals, yvals)]
else:
raise ValueError('arrays with > 2 columns not supported')
else:
raise ValueError('invalid dimensions for ndarray')
return values | def _numpy_to_values(data) | Convert a NumPy array to values attribute | 2.689877 | 2.631855 | 1.022046 |
# TODO: support writing to separate file
return super(self.__class__, self).to_json(validate=validate,
pretty_print=pretty_print) | def to_json(self, validate=False, pretty_print=True, data_path=None) | Convert data to JSON
Parameters
----------
data_path : string
If not None, then data is written to a separate file at the
specified path. Note that the ``url`` attribute if the data must
be set independently for the data to load correctly.
Returns
-------
string
Valid Vega JSON. | 6.386598 | 7.143742 | 0.894013 |
try:
from IPython.core.display import display, HTML
except ImportError:
print("IPython Notebook could not be loaded.")
# Thanks to @jakevdp:
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L85
load_lib =
lib_urls = [
"'//cdnjs.cloudflare.com/ajax/libs/d3/3.5.3/d3.min.js'",
("'//cdnjs.cloudflare.com/ajax/libs/d3-geo-projection/0.2.9/"
"d3.geo.projection.min.js'"),
"'//wrobstory.github.io/d3-cloud/d3.layout.cloud.js'",
"'//wrobstory.github.io/vega/vega.v1.3.3.js'"
]
get_lib =
load_js = get_lib
ipy_trigger = "window.dispatchEvent(vincent_event);"
for elem in lib_urls[:-1]:
load_js = load_js % (elem, get_lib)
load_js = load_js % (lib_urls[-1], ipy_trigger)
html = % (load_lib, load_js,)
return display(HTML(html)) | def initialize_notebook() | Initialize the IPython notebook display elements | 4.002928 | 3.98075 | 1.005571 |
if not isinstance(value, value_type):
if type(value_type) is tuple:
types = ', '.join(t.__name__ for t in value_type)
raise ValueError('{0} must be one of ({1})'.format(name, types))
else:
raise ValueError('{0} must be {1}'
.format(name, value_type.__name__)) | def _assert_is_type(name, value, value_type) | Assert that a value must be a given type. | 2.086491 | 1.978051 | 1.054821 |
def grammar_creator(validator, name):
def setter(self, value):
if isinstance(grammar_type, (type, tuple)):
_assert_is_type(validator.__name__, value, grammar_type)
validator(value)
self.grammar[name] = value
def getter(self):
return self.grammar.get(name, None)
def deleter(self):
if name in self.grammar:
del self.grammar[name]
return property(getter, setter, deleter, validator.__doc__)
if isinstance(grammar_type, (type, tuple)):
# If grammar_type is a type, return another decorator.
def grammar_dec(validator):
# Make sure to use the grammar name if it's there.
if grammar_name:
return grammar_creator(validator, grammar_name)
else:
return grammar_creator(validator, validator.__name__)
return grammar_dec
elif isinstance(grammar_name, str_types):
# If grammar_name is a string, use that name and return another
# decorator.
def grammar_dec(validator):
return grammar_creator(validator, grammar_name)
return grammar_dec
else:
# Otherwise we assume that grammar_type is actually the function being
# decorated.
return grammar_creator(grammar_type, grammar_type.__name__) | def grammar(grammar_type=None, grammar_name=None) | Decorator to define properties that map to the ``grammar``
dict. This dict is the canonical representation of the Vega grammar
within Vincent.
This decorator is intended for classes that map to some pre-defined JSON
structure, such as axes, data, marks, scales, etc. It is assumed that this
decorates functions with an instance of ``self.grammar``.
Parameters
----------
grammar_type : type or tuple of types, default None
If the argument to the decorated function is not of the given types,
then a ValueError is raised. No type checking is done if the type is
None (default).
grammar_name : string, default None
An optional name to map to the internal ``grammar`` dict. If None
(default), then the key for the dict is the name of the function
being decorated. If not None, then it will be the name specified
here. This is useful if the expected JSON field name is a Python
keyword or has an un-Pythonic name.
This should decorate a "validator" function that should return no value
but raise an exception if the provided value is not valid Vega grammar. If
the validator throws no exception, then the value is assigned to the
``grammar`` dict.
The validator function should take only one argument - the value to be
validated - so that no ``self`` argument is included; the validator
should not modify the class.
If no arguments are given, then no type-checking is done the property
will be mapped to a field with the name of the decorated function.
The doc string for the property is taken from the validator functions's
doc string. | 2.640766 | 2.497151 | 1.057511 |
for key, val in self.grammar.items():
try:
setattr(self, key, val)
except ValueError as e:
raise ValidationError('invalid contents: ' + e.args[0]) | def validate(self) | Validate the contents of the object.
This calls ``setattr`` for each of the class's grammar properties. It
will catch ``ValueError``s raised by the grammar property's setters
and re-raise them as :class:`ValidationError`. | 5.586792 | 3.480639 | 1.605105 |
if validate:
self.validate()
if pretty_print:
dumps_args = {'indent': 2, 'separators': (',', ': ')}
else:
dumps_args = {}
def encoder(obj):
if hasattr(obj, 'grammar'):
return obj.grammar
if html_out:
template = Template(
str(resource_string('vincent', 'vega_template.html')))
with open(html_path, 'w') as f:
f.write(template.substitute(path=path))
if path:
with open(path, 'w') as f:
json.dump(self.grammar, f, default=encoder, sort_keys=True,
**dumps_args)
else:
return json.dumps(self.grammar, default=encoder, sort_keys=True,
**dumps_args) | def to_json(self, path=None, html_out=False,
html_path='vega_template.html', validate=False,
pretty_print=True) | Convert object to JSON
Parameters
----------
path: string, default None
Path to write JSON out. If there is no path provided, JSON
will be returned as a string to the console.
html_out: boolean, default False
If True, vincent will output an simple HTML scaffold to
visualize the vega json output.
html_path: string, default 'vega_template.html'
Path for the html file (if html_out=True)
validate : boolean
If True, call the object's `validate` method before
serializing. Default is False.
pretty_print : boolean
If True (default), JSON is printed in more-readable form with
indentation and spaces.
Returns
-------
string
JSON serialization of the class's grammar properties. | 2.344094 | 2.203559 | 1.063777 |
if text is None:
return False
mimetype = normalize_mimetype(text)
return mimetype not in [DEFAULT, PLAIN, None] | def useful_mimetype(text) | Check to see if the given mime type is a MIME type
which is useful in terms of how to treat this file. | 6.507739 | 6.524581 | 0.997419 |
extension = decode_path(extension)
if extension is None:
return
if extension.startswith('.'):
extension = extension[1:]
if '.' in extension:
_, extension = os.path.splitext(extension)
extension = slugify(extension, sep='')
if extension is None:
return
if len(extension):
return extension | def normalize_extension(extension) | Normalise a file name extension. | 3.291712 | 3.215186 | 1.023801 |
kwargs.setdefault('headers', DEFAULT_HEADERS)
try:
res = requests.get(url, **kwargs)
res.raise_for_status()
except requests.RequestException as e:
print(e)
else:
html = res.text
tree = Selector(text=html)
return tree | def fetch(url: str, **kwargs) -> Selector | Send HTTP request and parse it as a DOM tree.
Args:
url (str): The url of the site.
Returns:
Selector: allows you to select parts of HTML text using CSS or XPath expressions. | 2.534437 | 2.823326 | 0.897678 |
kwargs.setdefault('headers', DEFAULT_HEADERS)
async with aiohttp.ClientSession(**kwargs) as ses:
async with ses.get(url, **kwargs) as res:
html = await res.text()
tree = Selector(text=html)
return tree | async def async_fetch(url: str, **kwargs) -> Selector | Do the fetch in an async style.
Args:
url (str): The url of the site.
Returns:
Selector: allows you to select parts of HTML text using CSS or XPath expressions. | 2.701533 | 3.326574 | 0.812107 |
kwargs.setdefault('headers', DEFAULT_HEADERS)
html = requests.get(url, **kwargs).content
if b'<base' not in html:
repl = f'<head><base href="{url}">'
html = html.replace(b'<head>', repl.encode('utf-8'))
fd, fname = tempfile.mkstemp('.html')
os.write(fd, html)
os.close(fd)
return webbrowser.open(f'file://{fname}') | def view(url: str, **kwargs) -> bool | View the page whether rendered properly. (ensure the <base> tag to make external links work)
Args:
url (str): The url of the site. | 3.149184 | 2.84606 | 1.106507 |
hrefs = [link.to_text() for link in find_all_links(res.text)]
if search:
hrefs = [href for href in hrefs if search in href]
if pattern:
hrefs = [href for href in hrefs if re.findall(pattern, href)]
return list(set(hrefs)) | def links(res: requests.models.Response,
search: str = None,
pattern: str = None) -> list | Get the links of the page.
Args:
res (requests.models.Response): The response of the page.
search (str, optional): Defaults to None. Search the links you want.
pattern (str, optional): Defaults to None. Search the links use a regex pattern.
Returns:
list: All the links of the page. | 2.506856 | 2.693911 | 0.930564 |
if sort_by:
reverse = order == 'desc'
total = sorted(total, key=itemgetter(sort_by), reverse=reverse)
if no_duplicate:
total = [key for key, _ in groupby(total)]
data = json.dumps(total, ensure_ascii=False)
Path(name).write_text(data, encoding='utf-8') | def save_as_json(total: list,
name='data.json',
sort_by: str = None,
no_duplicate=False,
order='asc') | Save what you crawled as a json file.
Args:
total (list): Total of data you crawled.
name (str, optional): Defaults to 'data.json'. The name of the file.
sort_by (str, optional): Defaults to None. Sort items by a specific key.
no_duplicate (bool, optional): Defaults to False. If True, it will remove duplicated data.
order (str, optional): Defaults to 'asc'. The opposite option is 'desc'. | 2.191902 | 2.520806 | 0.869524 |
argv = docopt(__doc__, version=VERSION)
if argv['genspider']:
name = f"{argv['<name>']}.py"
use_async = argv['--async']
template = 'data_async.tmpl' if use_async else 'data.tmpl'
package_dir = Path(__file__).parent
template_text = package_dir.joinpath('templates', template).read_text()
Path(name).write_text(template_text)
if argv['shell']:
url = argv['<url>'] if argv['<url>'] else input(
'Plz specific a site to crawl\nurl: ')
res = requests.get(url, headers=DEFAULT_HEADERS)
if not res:
exit('Failed to fetch the page.')
tree = Selector(text=res.text)
allvars = {**locals(), **globals()}
try:
from ptpython.repl import embed
print(BANNER)
embed(allvars)
except ImportError:
code.interact(local=allvars, banner=BANNER) | def cli() | Commandline for looter :d | 4.370759 | 4.407304 | 0.991708 |
retval = tuple()
for val in self.VALUES:
retval += (getattr(self, val),)
return retval | def get_value_tuple(self) | Returns a tuple of the color's values (in order). For example,
an LabColor object will return (lab_l, lab_a, lab_b), where each
member of the tuple is the float value for said variable. | 6.593547 | 5.879219 | 1.1215 |
observer = str(observer)
if observer not in color_constants.OBSERVERS:
raise InvalidObserverError(self)
self.observer = observer | def set_observer(self, observer) | Validates and sets the color's observer angle.
.. note:: This only changes the observer angle value. It does no conversion
of the color's coordinates.
:param str observer: One of '2' or '10'. | 6.7325 | 11.376806 | 0.591774 |
illuminant = illuminant.lower()
if illuminant not in color_constants.ILLUMINANTS[self.observer]:
raise InvalidIlluminantError(illuminant)
self.illuminant = illuminant | def set_illuminant(self, illuminant) | Validates and sets the color's illuminant.
.. note:: This only changes the illuminant. It does no conversion
of the color's coordinates. For this, you'll want to refer to
:py:meth:`XYZColor.apply_adaptation <colormath.color_objects.XYZColor.apply_adaptation>`.
.. tip:: Call this after setting your observer.
:param str illuminant: One of the various illuminants. | 3.507856 | 3.961984 | 0.885379 |
try:
if observer is None:
observer = self.observer
illums_observer = color_constants.ILLUMINANTS[observer]
except KeyError:
raise InvalidObserverError(self)
try:
if illuminant is None:
illuminant = self.illuminant
illum_xyz = illums_observer[illuminant]
except (KeyError, AttributeError):
raise InvalidIlluminantError(illuminant)
return {'X': illum_xyz[0], 'Y': illum_xyz[1], 'Z': illum_xyz[2]} | def get_illuminant_xyz(self, observer=None, illuminant=None) | :param str observer: Get the XYZ values for another observer angle. Must
be either '2' or '10'.
:param str illuminant: Get the XYZ values for another illuminant.
:returns: the color's illuminant's XYZ values. | 2.654704 | 2.704164 | 0.98171 |
# This holds the obect's spectral data, and will be passed to
# numpy.array() to create a numpy array (matrix) for the matrix math
# that will be done during the conversion to XYZ.
values = []
# Use the required value list to build this dynamically. Default to
# 0.0, since that ultimately won't affect the outcome due to the math
# involved.
for val in self.VALUES:
values.append(getattr(self, val, 0.0))
# Create and the actual numpy array/matrix from the spectral list.
color_array = numpy.array([values])
return color_array | def get_numpy_array(self) | Dump this color into NumPy array. | 11.971473 | 11.074844 | 1.080961 |
if density_standard is not None:
return density.ansi_density(self, density_standard)
else:
return density.auto_density(self) | def calc_density(self, density_standard=None) | Calculates the density of the SpectralColor. By default, Status T
density is used, and the correct density distribution (Red, Green,
or Blue) is chosen by comparing the Red, Green, and Blue components of
the spectral sample (the values being red in via "filters"). | 4.343236 | 4.671123 | 0.929806 |
logger.debug(" \- Original illuminant: %s", self.illuminant)
logger.debug(" \- Target illuminant: %s", target_illuminant)
# If the XYZ values were taken with a different reference white than the
# native reference white of the target RGB space, a transformation matrix
# must be applied.
if self.illuminant != target_illuminant:
logger.debug(" \* Applying transformation from %s to %s ",
self.illuminant, target_illuminant)
# Sets the adjusted XYZ values, and the new illuminant.
apply_chromatic_adaptation_on_color(
color=self,
targ_illum=target_illuminant,
adaptation=adaptation) | def apply_adaptation(self, target_illuminant, adaptation='bradford') | This applies an adaptation matrix to change the XYZ color's illuminant.
You'll most likely only need this during RGB conversions. | 4.358024 | 4.229686 | 1.030342 |
if not self.is_upscaled:
return min(max(coord, 0.0), 1.0)
else:
return min(max(coord, 1), 255) | def _clamp_rgb_coordinate(self, coord) | Clamps an RGB coordinate, taking into account whether or not the
color is upscaled or not.
:param float coord: The coordinate value.
:rtype: float
:returns: The clamped value. | 3.23777 | 2.681174 | 1.207594 |
# Scale up to 0-255 values.
rgb_r = int(math.floor(0.5 + self.rgb_r * 255))
rgb_g = int(math.floor(0.5 + self.rgb_g * 255))
rgb_b = int(math.floor(0.5 + self.rgb_b * 255))
return rgb_r, rgb_g, rgb_b | def get_upscaled_value_tuple(self) | Scales an RGB color object from decimal 0.0-1.0 to int 0-255. | 2.171046 | 1.908322 | 1.137673 |
rgb_r, rgb_g, rgb_b = self.get_upscaled_value_tuple()
return '#%02x%02x%02x' % (rgb_r, rgb_g, rgb_b) | def get_rgb_hex(self) | Converts the RGB value to a hex value in the form of: #RRGGBB
:rtype: str | 2.948136 | 3.161768 | 0.932432 |
colorstring = hex_str.strip()
if colorstring[0] == '#':
colorstring = colorstring[1:]
if len(colorstring) != 6:
raise ValueError("input #%s is not in #RRGGBB format" % colorstring)
r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:]
r, g, b = [int(n, 16) / 255.0 for n in (r, g, b)]
return cls(r, g, b) | def new_from_rgb_hex(cls, hex_str) | Converts an RGB hex string like #RRGGBB and assigns the values to
this sRGBColor object.
:rtype: sRGBColor | 1.843833 | 1.933924 | 0.953415 |
return numpy.sqrt(
numpy.sum(numpy.power(lab_color_vector - lab_color_matrix, 2), axis=1)) | def delta_e_cie1976(lab_color_vector, lab_color_matrix) | Calculates the Delta E (CIE1976) between `lab_color_vector` and all
colors in `lab_color_matrix`. | 2.597642 | 3.04161 | 0.854035 |
C_1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C_2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
delta_lab = lab_color_vector - lab_color_matrix
delta_L = delta_lab[:, 0].copy()
delta_C = C_1 - C_2
delta_lab[:, 0] = delta_C
delta_H_sq = numpy.sum(numpy.power(delta_lab, 2) * numpy.array([-1, 1, 1]), axis=1)
# noinspection PyArgumentList
delta_H = numpy.sqrt(delta_H_sq.clip(min=0))
S_L = 1
S_C = 1 + K_1 * C_1
S_H = 1 + K_2 * C_1
LCH = numpy.vstack([delta_L, delta_C, delta_H])
params = numpy.array([[K_L * S_L], [K_C * S_C], [K_H * S_H]])
return numpy.sqrt(numpy.sum(numpy.power(LCH / params, 2), axis=0)) | def delta_e_cie1994(lab_color_vector, lab_color_matrix,
K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015) | Calculates the Delta E (CIE1994) of two colors.
K_l:
0.045 graphic arts
0.048 textiles
K_2:
0.015 graphic arts
0.014 textiles
K_L:
1 default
2 textiles | 2.284748 | 2.320741 | 0.984491 |
L, a, b = lab_color_vector
C_1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C_2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
delta_lab = lab_color_vector - lab_color_matrix
delta_L = delta_lab[:, 0].copy()
delta_C = C_1 - C_2
delta_lab[:, 0] = delta_C
H_1 = numpy.degrees(numpy.arctan2(b, a))
if H_1 < 0:
H_1 += 360
F = numpy.sqrt(numpy.power(C_1, 4) / (numpy.power(C_1, 4) + 1900.0))
# noinspection PyChainedComparisons
if 164 <= H_1 and H_1 <= 345:
T = 0.56 + abs(0.2 * numpy.cos(numpy.radians(H_1 + 168)))
else:
T = 0.36 + abs(0.4 * numpy.cos(numpy.radians(H_1 + 35)))
if L < 16:
S_L = 0.511
else:
S_L = (0.040975 * L) / (1 + 0.01765 * L)
S_C = ((0.0638 * C_1) / (1 + 0.0131 * C_1)) + 0.638
S_H = S_C * (F * T + 1 - F)
delta_C = C_1 - C_2
delta_H_sq = numpy.sum(numpy.power(delta_lab, 2) * numpy.array([-1, 1, 1]), axis=1)
# noinspection PyArgumentList
delta_H = numpy.sqrt(delta_H_sq.clip(min=0))
LCH = numpy.vstack([delta_L, delta_C, delta_H])
params = numpy.array([[pl * S_L], [pc * S_C], [S_H]])
return numpy.sqrt(numpy.sum(numpy.power(LCH / params, 2), axis=0)) | def delta_e_cmc(lab_color_vector, lab_color_matrix, pl=2, pc=1) | Calculates the Delta E (CIE1994) of two colors.
CMC values
Acceptability: pl=2, pc=1
Perceptability: pl=1, pc=1 | 2.415849 | 2.454437 | 0.984278 |
# Load the spec_XXXnm attributes into a Numpy array.
sample = color.get_numpy_array()
# Matrix multiplication
intermediate = sample * density_standard
# Sum the products.
numerator = intermediate.sum()
# This is the denominator in the density equation.
sum_of_standard_wavelengths = density_standard.sum()
# This is the top level of the density formula.
return -1.0 * log10(numerator / sum_of_standard_wavelengths) | def ansi_density(color, density_standard) | Calculates density for the given SpectralColor using the spectral weighting
function provided. For example, ANSI_STATUS_T_RED. These may be found in
:py:mod:`colormath.density_standards`.
:param SpectralColor color: The SpectralColor object to calculate
density for.
:param numpy.ndarray density_standard: NumPy array of filter of choice
from :py:mod:`colormath.density_standards`.
:rtype: float
:returns: The density value for the given color and density standard. | 8.809732 | 8.963363 | 0.98286 |
blue_density = ansi_density(color, ANSI_STATUS_T_BLUE)
green_density = ansi_density(color, ANSI_STATUS_T_GREEN)
red_density = ansi_density(color, ANSI_STATUS_T_RED)
densities = [blue_density, green_density, red_density]
min_density = min(densities)
max_density = max(densities)
density_range = max_density - min_density
# See comments in density_standards.py for VISUAL_DENSITY_THRESH to
# understand what this is doing.
if density_range <= VISUAL_DENSITY_THRESH:
return ansi_density(color, ISO_VISUAL)
elif blue_density > green_density and blue_density > red_density:
return blue_density
elif green_density > blue_density and green_density > red_density:
return green_density
else:
return red_density | def auto_density(color) | Given a SpectralColor, automatically choose the correct ANSI T filter.
Returns a tuple with a string representation of the filter the
calculated density.
:param SpectralColor color: The SpectralColor object to calculate
density for.
:rtype: float
:returns: The density value, with the filter selected automatically. | 2.772206 | 2.771969 | 1.000086 |
if not color.__class__.__name__ == 'LabColor':
raise ValueError(
"Delta E functions can only be used with two LabColor objects.")
return numpy.array([color.lab_l, color.lab_a, color.lab_b]) | def _get_lab_color1_vector(color) | Converts an LabColor into a NumPy vector.
:param LabColor color:
:rtype: numpy.ndarray | 5.09676 | 5.284231 | 0.964523 |
color1_vector = _get_lab_color1_vector(color1)
color2_matrix = _get_lab_color2_matrix(color2)
delta_e = color_diff_matrix.delta_e_cie1976(color1_vector, color2_matrix)[0]
return numpy.asscalar(delta_e) | def delta_e_cie1976(color1, color2) | Calculates the Delta E (CIE1976) of two colors. | 3.257775 | 3.218294 | 1.012268 |
color1_vector = _get_lab_color1_vector(color1)
color2_matrix = _get_lab_color2_matrix(color2)
delta_e = color_diff_matrix.delta_e_cie1994(
color1_vector, color2_matrix, K_L=K_L, K_C=K_C, K_H=K_H, K_1=K_1, K_2=K_2)[0]
return numpy.asscalar(delta_e) | def delta_e_cie1994(color1, color2, K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015) | Calculates the Delta E (CIE1994) of two colors.
K_l:
0.045 graphic arts
0.048 textiles
K_2:
0.015 graphic arts
0.014 textiles
K_L:
1 default
2 textiles | 2.383739 | 2.575901 | 0.9254 |
color1_vector = _get_lab_color1_vector(color1)
color2_matrix = _get_lab_color2_matrix(color2)
delta_e = color_diff_matrix.delta_e_cie2000(
color1_vector, color2_matrix, Kl=Kl, Kc=Kc, Kh=Kh)[0]
return numpy.asscalar(delta_e) | def delta_e_cie2000(color1, color2, Kl=1, Kc=1, Kh=1) | Calculates the Delta E (CIE2000) of two colors. | 2.865243 | 2.93249 | 0.977068 |
color1_vector = _get_lab_color1_vector(color1)
color2_matrix = _get_lab_color2_matrix(color2)
delta_e = color_diff_matrix.delta_e_cmc(
color1_vector, color2_matrix, pl=pl, pc=pc)[0]
return numpy.asscalar(delta_e) | def delta_e_cmc(color1, color2, pl=2, pc=1) | Calculates the Delta E (CMC) of two colors.
CMC values
Acceptability: pl=2, pc=1
Perceptability: pl=1, pc=1 | 3.141172 | 3.611605 | 0.869744 |
# Get the appropriate transformation matrix, [MsubA].
m_sharp = color_constants.ADAPTATION_MATRICES[adaptation]
# In case the white-points are still input as strings
# Get white-points for illuminant
if isinstance(wp_src, str):
orig_illum = wp_src.lower()
wp_src = color_constants.ILLUMINANTS[observer][orig_illum]
elif hasattr(wp_src, '__iter__'):
wp_src = wp_src
if isinstance(wp_dst, str):
targ_illum = wp_dst.lower()
wp_dst = color_constants.ILLUMINANTS[observer][targ_illum]
elif hasattr(wp_dst, '__iter__'):
wp_dst = wp_dst
# Sharpened cone responses ~ rho gamma beta ~ sharpened r g b
rgb_src = numpy.dot(m_sharp, wp_src)
rgb_dst = numpy.dot(m_sharp, wp_dst)
# Ratio of whitepoint sharpened responses
m_rat = numpy.diag(rgb_dst / rgb_src)
# Final transformation matrix
m_xfm = numpy.dot(numpy.dot(pinv(m_sharp), m_rat), m_sharp)
return m_xfm | def _get_adaptation_matrix(wp_src, wp_dst, observer, adaptation) | Calculate the correct transformation matrix based on origin and target
illuminants. The observer angle must be the same between illuminants.
See colormath.color_constants.ADAPTATION_MATRICES for a list of possible
adaptations.
Detailed conversion documentation is available at:
http://brucelindbloom.com/Eqn_ChromAdapt.html | 4.547472 | 4.22586 | 1.076106 |
# It's silly to have to do this, but some people may want to call this
# function directly, so we'll protect them from messing up upper/lower case.
adaptation = adaptation.lower()
# Get white-points for illuminant
if isinstance(orig_illum, str):
orig_illum = orig_illum.lower()
wp_src = color_constants.ILLUMINANTS[observer][orig_illum]
elif hasattr(orig_illum, '__iter__'):
wp_src = orig_illum
if isinstance(targ_illum, str):
targ_illum = targ_illum.lower()
wp_dst = color_constants.ILLUMINANTS[observer][targ_illum]
elif hasattr(targ_illum, '__iter__'):
wp_dst = targ_illum
logger.debug(" \* Applying adaptation matrix: %s", adaptation)
# Retrieve the appropriate transformation matrix from the constants.
transform_matrix = _get_adaptation_matrix(wp_src, wp_dst,
observer, adaptation)
# Stuff the XYZ values into a NumPy matrix for conversion.
XYZ_matrix = numpy.array((val_x, val_y, val_z))
# Perform the adaptation via matrix multiplication.
result_matrix = numpy.dot(transform_matrix, XYZ_matrix)
# Return individual X, Y, and Z coordinates.
return result_matrix[0], result_matrix[1], result_matrix[2] | def apply_chromatic_adaptation(val_x, val_y, val_z, orig_illum, targ_illum,
observer='2', adaptation='bradford') | Applies a chromatic adaptation matrix to convert XYZ values between
illuminants. It is important to recognize that color transformation results
in color errors, determined by how far the original illuminant is from the
target illuminant. For example, D65 to A could result in very high maximum
deviance.
An informative article with estimate average Delta E values for each
illuminant conversion may be found at:
http://brucelindbloom.com/ChromAdaptEval.html | 3.370268 | 3.49672 | 0.963837 |
xyz_x = color.xyz_x
xyz_y = color.xyz_y
xyz_z = color.xyz_z
orig_illum = color.illuminant
targ_illum = targ_illum.lower()
observer = color.observer
adaptation = adaptation.lower()
# Return individual X, Y, and Z coordinates.
color.xyz_x, color.xyz_y, color.xyz_z = apply_chromatic_adaptation(
xyz_x, xyz_y, xyz_z, orig_illum, targ_illum,
observer=observer, adaptation=adaptation)
color.set_illuminant(targ_illum)
return color | def apply_chromatic_adaptation_on_color(color, targ_illum, adaptation='bradford') | Convenience function to apply an adaptation directly to a Color object. | 2.686963 | 2.665042 | 1.008225 |
print("=== Simple Example: Lab->XYZ ===")
# Instantiate an Lab color object with the given values.
lab = LabColor(0.903, 16.296, -2.22)
# Show a string representation.
print(lab)
# Convert to XYZ.
xyz = convert_color(lab, XYZColor)
print(xyz)
print("=== End Example ===\n") | def example_lab_to_xyz() | This function shows a simple conversion of an Lab color to an XYZ color. | 6.023636 | 5.40557 | 1.114339 |
print("=== Complex Example: LCHab->LCHuv ===")
# Instantiate an LCHab color object with the given values.
lchab = LCHabColor(0.903, 16.447, 352.252)
# Show a string representation.
print(lchab)
# Convert to LCHuv.
lchuv = convert_color(lchab, LCHuvColor)
print(lchuv)
print("=== End Example ===\n") | def example_lchab_to_lchuv() | This function shows very complex chain of conversions in action.
LCHab to LCHuv involves four different calculations, making this the
conversion requiring the most steps. | 4.679805 | 4.370616 | 1.070743 |
print("=== RGB Example: Lab->RGB ===")
# Instantiate an Lab color object with the given values.
lab = LabColor(0.903, 16.296, -2.217)
# Show a string representation.
print(lab)
# Convert to XYZ.
rgb = convert_color(lab, sRGBColor)
print(rgb)
print("=== End Example ===\n") | def example_lab_to_rgb() | Conversions to RGB are a little more complex mathematically. There are also
several kinds of RGB color spaces. When converting from a device-independent
color space to RGB, sRGB is assumed unless otherwise specified with the
target_rgb keyword arg. | 7.060653 | 7.062833 | 0.999691 |
print("=== RGB Example: RGB->XYZ ===")
# Instantiate an Lab color object with the given values.
rgb = sRGBColor(120, 130, 140)
# Show a string representation.
print(rgb)
# Convert RGB to XYZ using a D50 illuminant.
xyz = convert_color(rgb, XYZColor, target_illuminant='D50')
print(xyz)
print("=== End Example ===\n") | def example_rgb_to_xyz() | The reverse is similar. | 5.514726 | 5.35072 | 1.030651 |
print("=== Example: Spectral->XYZ ===")
spc = SpectralColor(
observer='2', illuminant='d50',
spec_380nm=0.0600, spec_390nm=0.0600, spec_400nm=0.0641,
spec_410nm=0.0654, spec_420nm=0.0645, spec_430nm=0.0605,
spec_440nm=0.0562, spec_450nm=0.0543, spec_460nm=0.0537,
spec_470nm=0.0541, spec_480nm=0.0559, spec_490nm=0.0603,
spec_500nm=0.0651, spec_510nm=0.0680, spec_520nm=0.0705,
spec_530nm=0.0736, spec_540nm=0.0772, spec_550nm=0.0809,
spec_560nm=0.0870, spec_570nm=0.0990, spec_580nm=0.1128,
spec_590nm=0.1251, spec_600nm=0.1360, spec_610nm=0.1439,
spec_620nm=0.1511, spec_630nm=0.1590, spec_640nm=0.1688,
spec_650nm=0.1828, spec_660nm=0.1996, spec_670nm=0.2187,
spec_680nm=0.2397, spec_690nm=0.2618, spec_700nm=0.2852,
spec_710nm=0.2500, spec_720nm=0.2400, spec_730nm=0.2300)
xyz = convert_color(spc, XYZColor)
print(xyz)
print("=== End Example ===\n") | def example_spectral_to_xyz() | Instantiate an Lab color object with the given values. Note that the
spectral range can run from 340nm to 830nm. Any omitted values assume a
value of 0.0, which is more or less ignored. For the distribution below,
we are providing an example reading from an X-Rite i1 Pro, which only
measures between 380nm and 730nm. | 1.824052 | 1.789331 | 1.019404 |
print("=== Simple Example: XYZ->IPT ===")
# Instantiate an XYZ color object with the given values.
xyz = XYZColor(0.5, 0.5, 0.5, illuminant='d65')
# Show a string representation.
print(xyz)
# Convert to IPT.
ipt = convert_color(xyz, IPTColor)
print(ipt)
print("=== End Example ===\n") | def example_lab_to_ipt() | This function shows a simple conversion of an XYZ color to an IPT color. | 5.387578 | 4.232827 | 1.272809 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.