patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -139,10 +139,17 @@ class Populator
$dependent_classlikes[$fq_classlike_name_lc] = true;
- $this->populateDataFromTraits($storage, $storage_provider, $dependent_classlikes);
+ foreach ($storage->used_traits as $used_trait_lc => $_) {
+ $this->populateDataFromTrait($storage, $storage_provider, $dependent_classlikes, $used_trait_lc);
+ }
if ($storage->parent_classes) {
- $this->populateDataFromParentClass($storage, $storage_provider, $dependent_classlikes);
+ $this->populateDataFromParentClass(
+ $storage,
+ $storage_provider,
+ $dependent_classlikes,
+ reset($storage->parent_classes)
+ );
}
if (!strpos($fq_classlike_name_lc, '\\') | 1 | <?php
namespace Psalm\Internal\Codebase;
use InvalidArgumentException;
use Psalm\Internal\Analyzer\ClassLikeAnalyzer;
use Psalm\Internal\MethodIdentifier;
use Psalm\Internal\Provider\ClassLikeStorageProvider;
use Psalm\Internal\Provider\FileReferenceProvider;
use Psalm\Internal\Provider\FileStorageProvider;
use Psalm\Issue\CircularReference;
use Psalm\IssueBuffer;
use Psalm\Progress\Progress;
use Psalm\Storage\ClassLikeStorage;
use Psalm\Storage\FileStorage;
use Psalm\Type\Atomic\TTemplateParam;
use Psalm\Type\Union;
use function array_filter;
use function array_intersect_key;
use function array_keys;
use function array_merge;
use function count;
use function in_array;
use function reset;
use function strlen;
use function strpos;
use function strtolower;
/**
* @internal
*
* Populates file and class information so that analysis can work properly
*/
class Populator
{
/**
* @var ClassLikeStorageProvider
*/
private $classlike_storage_provider;
/**
* @var FileStorageProvider
*/
private $file_storage_provider;
/**
* @var array<lowercase-string, list<ClassLikeStorage>>
*/
private $invalid_class_storages = [];
/**
* @var Progress
*/
private $progress;
/**
* @var ClassLikes
*/
private $classlikes;
/**
* @var FileReferenceProvider
*/
private $file_reference_provider;
public function __construct(
ClassLikeStorageProvider $classlike_storage_provider,
FileStorageProvider $file_storage_provider,
ClassLikes $classlikes,
FileReferenceProvider $file_reference_provider,
Progress $progress
) {
$this->classlike_storage_provider = $classlike_storage_provider;
$this->file_storage_provider = $file_storage_provider;
$this->classlikes = $classlikes;
$this->progress = $progress;
$this->file_reference_provider = $file_reference_provider;
}
public function populateCodebase(): void
{
$this->progress->debug('ClassLikeStorage is populating' . "\n");
foreach ($this->classlike_storage_provider->getNew() as $class_storage) {
$this->populateClassLikeStorage($class_storage);
}
$this->progress->debug('ClassLikeStorage is populated' . "\n");
$this->progress->debug('FileStorage is populating' . "\n");
$all_file_storage = $this->file_storage_provider->getNew();
foreach ($all_file_storage as $file_storage) {
$this->populateFileStorage($file_storage);
}
foreach ($this->classlike_storage_provider->getNew() as $class_storage) {
foreach ($class_storage->dependent_classlikes as $dependent_classlike_lc => $_) {
try {
$dependee_storage = $this->classlike_storage_provider->get($dependent_classlike_lc);
} catch (InvalidArgumentException $exception) {
continue;
}
$class_storage->dependent_classlikes += $dependee_storage->dependent_classlikes;
}
}
$this->progress->debug('FileStorage is populated' . "\n");
ClassLikeStorageProvider::populated();
FileStorageProvider::populated();
}
private function populateClassLikeStorage(ClassLikeStorage $storage, array $dependent_classlikes = []): void
{
if ($storage->populated) {
return;
}
$fq_classlike_name_lc = strtolower($storage->name);
if (isset($dependent_classlikes[$fq_classlike_name_lc])) {
if ($storage->location && IssueBuffer::accepts(
new CircularReference(
'Circular reference discovered when loading ' . $storage->name,
$storage->location
)
)) {
// fall through
}
return;
}
$storage_provider = $this->classlike_storage_provider;
$dependent_classlikes[$fq_classlike_name_lc] = true;
$this->populateDataFromTraits($storage, $storage_provider, $dependent_classlikes);
if ($storage->parent_classes) {
$this->populateDataFromParentClass($storage, $storage_provider, $dependent_classlikes);
}
if (!strpos($fq_classlike_name_lc, '\\')
&& !isset($storage->methods['__construct'])
&& isset($storage->methods[$fq_classlike_name_lc])
&& !$storage->is_interface
&& !$storage->is_trait
) {
$storage->methods['__construct'] = $storage->methods[$fq_classlike_name_lc];
}
$this->populateInterfaceDataFromParentInterfaces($storage, $storage_provider, $dependent_classlikes);
$this->populateDataFromImplementedInterfaces($storage, $storage_provider, $dependent_classlikes);
if ($storage->location) {
$file_path = $storage->location->file_path;
foreach ($storage->parent_interfaces as $parent_interface_lc) {
$this->file_reference_provider->addFileInheritanceToClass($file_path, $parent_interface_lc);
}
foreach ($storage->parent_classes as $parent_class_lc => $_) {
$this->file_reference_provider->addFileInheritanceToClass($file_path, $parent_class_lc);
}
foreach ($storage->class_implements as $implemented_interface) {
$this->file_reference_provider->addFileInheritanceToClass(
$file_path,
strtolower($implemented_interface)
);
}
foreach ($storage->used_traits as $used_trait_lc => $_) {
$this->file_reference_provider->addFileInheritanceToClass($file_path, $used_trait_lc);
}
}
if ($storage->mutation_free || $storage->external_mutation_free) {
foreach ($storage->methods as $method) {
if (!$method->is_static && !$method->external_mutation_free) {
$method->mutation_free = $storage->mutation_free;
$method->external_mutation_free = $storage->external_mutation_free;
$method->immutable = $storage->mutation_free;
}
}
if ($storage->mutation_free) {
foreach ($storage->properties as $property) {
if (!$property->is_static) {
$property->readonly = true;
}
}
}
}
if ($storage->specialize_instance) {
foreach ($storage->methods as $method) {
if (!$method->is_static) {
$method->specialize_call = true;
}
}
}
if (!$storage->is_interface && !$storage->is_trait) {
foreach ($storage->methods as $method) {
if (strlen($storage->internal) > strlen($method->internal)) {
$method->internal = $storage->internal;
}
}
foreach ($storage->properties as $property) {
if (strlen($storage->internal) > strlen($property->internal)) {
$property->internal = $storage->internal;
}
}
}
$this->populateOverriddenMethods($storage);
$this->progress->debug('Have populated ' . $storage->name . "\n");
$storage->populated = true;
if (isset($this->invalid_class_storages[$fq_classlike_name_lc])) {
foreach ($this->invalid_class_storages[$fq_classlike_name_lc] as $dependency) {
$dependency->populated = false;
$this->populateClassLikeStorage($dependency, $dependent_classlikes);
}
unset($this->invalid_class_storages[$fq_classlike_name_lc]);
}
}
private function populateOverriddenMethods(
ClassLikeStorage $storage
): void {
$storage->documenting_method_ids = [];
foreach ($storage->methods as $method_name => $method_storage) {
if (isset($storage->overridden_method_ids[$method_name])) {
$overridden_method_ids = $storage->overridden_method_ids[$method_name];
$candidate_overridden_ids = null;
$declaring_class_storages = [];
foreach ($overridden_method_ids as $declaring_method_id) {
$declaring_class = $declaring_method_id->fq_class_name;
$declaring_class_storage
= $declaring_class_storages[$declaring_class]
= $this->classlike_storage_provider->get($declaring_class);
if ($candidate_overridden_ids === null) {
$candidate_overridden_ids
= ($declaring_class_storage->overridden_method_ids[$method_name] ?? [])
+ [$declaring_method_id->fq_class_name => $declaring_method_id];
} else {
$candidate_overridden_ids = array_intersect_key(
$candidate_overridden_ids,
($declaring_class_storage->overridden_method_ids[$method_name] ?? [])
+ [$declaring_method_id->fq_class_name => $declaring_method_id]
);
}
}
foreach ($overridden_method_ids as $declaring_method_id) {
$declaring_class = $declaring_method_id->fq_class_name;
$declaring_method_name = $declaring_method_id->method_name;
$declaring_class_storage = $declaring_class_storages[$declaring_class];
$declaring_method_storage = $declaring_class_storage->methods[$declaring_method_name];
if (($declaring_method_storage->has_docblock_param_types
|| $declaring_method_storage->has_docblock_return_type)
&& !$method_storage->has_docblock_param_types
&& !$method_storage->has_docblock_return_type
&& $method_storage->inherited_return_type !== null
) {
if (!isset($storage->documenting_method_ids[$method_name])
|| (string) $storage->documenting_method_ids[$method_name]
=== (string) $declaring_method_id
) {
$storage->documenting_method_ids[$method_name] = $declaring_method_id;
$method_storage->inherited_return_type = true;
} else {
if (in_array(
$storage->documenting_method_ids[$method_name]->fq_class_name,
$declaring_class_storage->parent_interfaces
)) {
$storage->documenting_method_ids[$method_name] = $declaring_method_id;
$method_storage->inherited_return_type = true;
} else {
$documenting_class_storage = $declaring_class_storages
[$storage->documenting_method_ids[$method_name]->fq_class_name];
if (!in_array(
$declaring_class,
$documenting_class_storage->parent_interfaces
) && $documenting_class_storage->is_interface
) {
unset($storage->documenting_method_ids[$method_name]);
$method_storage->inherited_return_type = null;
}
}
}
}
// tell the declaring class it's overridden downstream
$declaring_method_storage->overridden_downstream = true;
$declaring_method_storage->overridden_somewhere = true;
if ($declaring_method_storage->mutation_free_inferred) {
$declaring_method_storage->mutation_free = false;
$declaring_method_storage->external_mutation_free = false;
$declaring_method_storage->mutation_free_inferred = false;
}
if ($declaring_method_storage->throws
&& (!$method_storage->throws || $method_storage->inheritdoc)
) {
$method_storage->throws += $declaring_method_storage->throws;
}
}
}
}
}
private function populateDataFromTraits(
ClassLikeStorage $storage,
ClassLikeStorageProvider $storage_provider,
array $dependent_classlikes
): void {
foreach ($storage->used_traits as $used_trait_lc => $_) {
try {
$used_trait_lc = strtolower(
$this->classlikes->getUnAliasedName(
$used_trait_lc
)
);
$trait_storage = $storage_provider->get($used_trait_lc);
} catch (InvalidArgumentException $e) {
continue;
}
$this->populateClassLikeStorage($trait_storage, $dependent_classlikes);
$this->inheritMethodsFromParent($storage, $trait_storage);
$this->inheritPropertiesFromParent($storage, $trait_storage);
if ($trait_storage->template_types) {
$storage->template_extended_params[$trait_storage->name] = [];
if (isset($storage->template_extended_offsets[$trait_storage->name])) {
foreach ($storage->template_extended_offsets[$trait_storage->name] as $i => $type) {
$trait_template_type_names = array_keys($trait_storage->template_types);
$mapped_name = $trait_template_type_names[$i] ?? null;
if ($mapped_name) {
$storage->template_extended_params[$trait_storage->name][$mapped_name] = $type;
}
}
if ($trait_storage->template_extended_params) {
foreach ($trait_storage->template_extended_params as $t_storage_class => $type_map) {
foreach ($type_map as $i => $type) {
$storage->template_extended_params[$t_storage_class][$i] = self::extendType(
$type,
$storage
);
}
}
}
} else {
foreach ($trait_storage->template_types as $template_name => $template_type_map) {
foreach ($template_type_map as $template_type) {
$default_param = clone $template_type;
$default_param->from_docblock = false;
$storage->template_extended_params[$trait_storage->name][$template_name]
= $default_param;
}
}
}
} elseif ($trait_storage->template_extended_params) {
$storage->template_extended_params = array_merge(
$storage->template_extended_params ?: [],
$trait_storage->template_extended_params
);
}
$storage->pseudo_property_get_types += $trait_storage->pseudo_property_get_types;
$storage->pseudo_property_set_types += $trait_storage->pseudo_property_set_types;
$storage->pseudo_methods += $trait_storage->pseudo_methods;
$storage->declaring_pseudo_method_ids += $trait_storage->declaring_pseudo_method_ids;
}
}
private static function extendType(
Union $type,
ClassLikeStorage $storage
): Union {
$extended_types = [];
foreach ($type->getAtomicTypes() as $atomic_type) {
if ($atomic_type instanceof TTemplateParam) {
$referenced_type
= $storage->template_extended_params[$atomic_type->defining_class][$atomic_type->param_name]
?? null;
if ($referenced_type) {
foreach ($referenced_type->getAtomicTypes() as $atomic_referenced_type) {
if (!$atomic_referenced_type instanceof TTemplateParam) {
$extended_types[] = $atomic_referenced_type;
} else {
$extended_types[] = $atomic_type;
}
}
} else {
$extended_types[] = $atomic_type;
}
} else {
$extended_types[] = $atomic_type;
}
}
return new Union($extended_types);
}
private function populateDataFromParentClass(
ClassLikeStorage $storage,
ClassLikeStorageProvider $storage_provider,
array $dependent_classlikes
): void {
$parent_storage_class = reset($storage->parent_classes);
$parent_storage_class = strtolower(
$this->classlikes->getUnAliasedName(
$parent_storage_class
)
);
try {
$parent_storage = $storage_provider->get($parent_storage_class);
} catch (InvalidArgumentException $e) {
$this->progress->debug('Populator could not find dependency (' . __LINE__ . ")\n");
$storage->invalid_dependencies[] = $parent_storage_class;
$this->invalid_class_storages[$parent_storage_class][] = $storage;
return;
}
$this->populateClassLikeStorage($parent_storage, $dependent_classlikes);
$storage->parent_classes = array_merge($storage->parent_classes, $parent_storage->parent_classes);
if ($parent_storage->template_types) {
$storage->template_extended_params[$parent_storage->name] = [];
if (isset($storage->template_extended_offsets[$parent_storage->name])) {
foreach ($storage->template_extended_offsets[$parent_storage->name] as $i => $type) {
$parent_template_type_names = array_keys($parent_storage->template_types);
$mapped_name = $parent_template_type_names[$i] ?? null;
if ($mapped_name) {
$storage->template_extended_params[$parent_storage->name][$mapped_name] = $type;
}
}
if ($parent_storage->template_extended_params) {
foreach ($parent_storage->template_extended_params as $t_storage_class => $type_map) {
foreach ($type_map as $i => $type) {
$storage->template_extended_params[$t_storage_class][$i] = self::extendType(
$type,
$storage
);
}
}
}
} else {
foreach ($parent_storage->template_types as $template_name => $template_type_map) {
foreach ($template_type_map as $template_type) {
$default_param = clone $template_type;
$default_param->from_docblock = false;
$storage->template_extended_params[$parent_storage->name][$template_name]
= $default_param;
}
}
if ($parent_storage->template_extended_params) {
$storage->template_extended_params = array_merge(
$storage->template_extended_params,
$parent_storage->template_extended_params
);
}
}
} elseif ($parent_storage->template_extended_params) {
$storage->template_extended_params = array_merge(
$storage->template_extended_params ?: [],
$parent_storage->template_extended_params
);
}
$this->inheritMethodsFromParent($storage, $parent_storage);
$this->inheritPropertiesFromParent($storage, $parent_storage);
$storage->class_implements = array_merge($storage->class_implements, $parent_storage->class_implements);
$storage->invalid_dependencies = array_merge(
$storage->invalid_dependencies,
$parent_storage->invalid_dependencies
);
if ($parent_storage->has_visitor_issues) {
$storage->has_visitor_issues = true;
}
$storage->constants = array_merge(
array_filter(
$parent_storage->constants,
fn($constant) => $constant->visibility === ClassLikeAnalyzer::VISIBILITY_PUBLIC
|| $constant->visibility === ClassLikeAnalyzer::VISIBILITY_PROTECTED
),
$storage->constants
);
if ($parent_storage->preserve_constructor_signature) {
$storage->preserve_constructor_signature = true;
}
if (($parent_storage->namedMixins || $parent_storage->templatedMixins)
&& (!$storage->namedMixins || !$storage->templatedMixins)) {
$storage->mixin_declaring_fqcln = $parent_storage->mixin_declaring_fqcln;
if (!$storage->namedMixins) {
$storage->namedMixins = $parent_storage->namedMixins;
}
if (!$storage->templatedMixins) {
$storage->templatedMixins = $parent_storage->templatedMixins;
}
}
$storage->pseudo_property_get_types += $parent_storage->pseudo_property_get_types;
$storage->pseudo_property_set_types += $parent_storage->pseudo_property_set_types;
$parent_storage->dependent_classlikes[strtolower($storage->name)] = true;
$storage->pseudo_methods += $parent_storage->pseudo_methods;
$storage->declaring_pseudo_method_ids += $parent_storage->declaring_pseudo_method_ids;
}
private function populateInterfaceDataFromParentInterfaces(
ClassLikeStorage $storage,
ClassLikeStorageProvider $storage_provider,
array $dependent_classlikes
): void {
$parent_interfaces = [];
foreach ($storage->direct_interface_parents as $parent_interface_lc => $_) {
try {
$parent_interface_lc = strtolower(
$this->classlikes->getUnAliasedName(
$parent_interface_lc
)
);
$parent_interface_storage = $storage_provider->get($parent_interface_lc);
} catch (InvalidArgumentException $e) {
$this->progress->debug('Populator could not find dependency (' . __LINE__ . ")\n");
$storage->invalid_dependencies[] = $parent_interface_lc;
continue;
}
$this->populateClassLikeStorage($parent_interface_storage, $dependent_classlikes);
// copy over any constants
$storage->constants = array_merge(
array_filter(
$parent_interface_storage->constants,
fn($constant) => $constant->visibility === ClassLikeAnalyzer::VISIBILITY_PUBLIC
),
$storage->constants
);
$storage->invalid_dependencies = array_merge(
$storage->invalid_dependencies,
$parent_interface_storage->invalid_dependencies
);
if ($parent_interface_storage->template_types) {
$storage->template_extended_params[$parent_interface_storage->name] = [];
if (isset($storage->template_extended_offsets[$parent_interface_storage->name])) {
foreach ($storage->template_extended_offsets[$parent_interface_storage->name] as $i => $type) {
$parent_template_type_names = array_keys($parent_interface_storage->template_types);
$mapped_name = $parent_template_type_names[$i] ?? null;
if ($mapped_name) {
$storage->template_extended_params[$parent_interface_storage->name][$mapped_name] = $type;
}
}
if ($parent_interface_storage->template_extended_params) {
foreach ($parent_interface_storage->template_extended_params as $t_storage_class => $type_map) {
foreach ($type_map as $i => $type) {
$storage->template_extended_params[$t_storage_class][$i] = self::extendType(
$type,
$storage
);
}
}
}
} else {
foreach ($parent_interface_storage->template_types as $template_name => $template_type_map) {
foreach ($template_type_map as $template_type) {
$default_param = clone $template_type;
$default_param->from_docblock = false;
$storage->template_extended_params[$parent_interface_storage->name][$template_name]
= $default_param;
}
}
}
} elseif ($parent_interface_storage->template_extended_params) {
$storage->template_extended_params = array_merge(
$storage->template_extended_params ?: [],
$parent_interface_storage->template_extended_params
);
}
$parent_interfaces = array_merge($parent_interfaces, $parent_interface_storage->parent_interfaces);
$this->inheritMethodsFromParent($storage, $parent_interface_storage);
$storage->pseudo_methods += $parent_interface_storage->pseudo_methods;
$storage->declaring_pseudo_method_ids += $parent_interface_storage->declaring_pseudo_method_ids;
}
$storage->parent_interfaces = array_merge($parent_interfaces, $storage->parent_interfaces);
foreach ($storage->parent_interfaces as $parent_interface_lc => $_) {
try {
$parent_interface_lc = strtolower(
$this->classlikes->getUnAliasedName(
$parent_interface_lc
)
);
$parent_interface_storage = $storage_provider->get($parent_interface_lc);
} catch (InvalidArgumentException $e) {
continue;
}
$parent_interface_storage->dependent_classlikes[strtolower($storage->name)] = true;
}
}
private function populateDataFromImplementedInterfaces(
ClassLikeStorage $storage,
ClassLikeStorageProvider $storage_provider,
array $dependent_classlikes
): void {
$extra_interfaces = [];
foreach ($storage->direct_class_interfaces as $implemented_interface_lc => $_) {
try {
$implemented_interface_lc = strtolower(
$this->classlikes->getUnAliasedName(
$implemented_interface_lc
)
);
$implemented_interface_storage = $storage_provider->get($implemented_interface_lc);
} catch (InvalidArgumentException $e) {
$this->progress->debug('Populator could not find dependency (' . __LINE__ . ")\n");
$storage->invalid_dependencies[] = $implemented_interface_lc;
continue;
}
$this->populateClassLikeStorage($implemented_interface_storage, $dependent_classlikes);
// copy over any constants
$storage->constants = array_merge(
array_filter(
$implemented_interface_storage->constants,
fn($constant) => $constant->visibility === ClassLikeAnalyzer::VISIBILITY_PUBLIC
),
$storage->constants
);
$storage->invalid_dependencies = array_merge(
$storage->invalid_dependencies,
$implemented_interface_storage->invalid_dependencies
);
if ($implemented_interface_storage->template_types) {
$storage->template_extended_params[$implemented_interface_storage->name] = [];
if (isset($storage->template_extended_offsets[$implemented_interface_storage->name])) {
foreach ($storage->template_extended_offsets[$implemented_interface_storage->name] as $i => $type) {
$parent_template_type_names = array_keys($implemented_interface_storage->template_types);
$mapped_name = $parent_template_type_names[$i] ?? null;
if ($mapped_name) {
$storage->template_extended_params[$implemented_interface_storage->name][$mapped_name]
= $type;
}
}
if ($implemented_interface_storage->template_extended_params) {
foreach ($implemented_interface_storage->template_extended_params as $e_i => $type_map) {
foreach ($type_map as $i => $type) {
$storage->template_extended_params[$e_i][$i] = self::extendType(
$type,
$storage
);
}
}
}
} else {
foreach ($implemented_interface_storage->template_types as $template_name => $template_type_map) {
foreach ($template_type_map as $template_type) {
$default_param = clone $template_type;
$default_param->from_docblock = false;
$storage->template_extended_params[$implemented_interface_storage->name][$template_name]
= $default_param;
}
}
}
} elseif ($implemented_interface_storage->template_extended_params) {
$storage->template_extended_params = array_merge(
$storage->template_extended_params ?: [],
$implemented_interface_storage->template_extended_params
);
}
$extra_interfaces = array_merge($extra_interfaces, $implemented_interface_storage->parent_interfaces);
}
$storage->class_implements = array_merge($storage->class_implements, $extra_interfaces);
$interface_method_implementers = [];
foreach ($storage->class_implements as $implemented_interface_lc => $_) {
try {
$implemented_interface = strtolower(
$this->classlikes->getUnAliasedName(
$implemented_interface_lc
)
);
$implemented_interface_storage = $storage_provider->get($implemented_interface);
} catch (InvalidArgumentException $e) {
continue;
}
$implemented_interface_storage->dependent_classlikes[strtolower($storage->name)] = true;
foreach ($implemented_interface_storage->methods as $method_name => $method) {
if ($method->visibility === ClassLikeAnalyzer::VISIBILITY_PUBLIC) {
$interface_method_implementers[$method_name][] = new MethodIdentifier(
$implemented_interface_storage->name,
$method_name
);
}
}
}
foreach ($interface_method_implementers as $method_name => $interface_method_ids) {
if (count($interface_method_ids) === 1) {
if (isset($storage->methods[$method_name])) {
$method_storage = $storage->methods[$method_name];
if ($method_storage->signature_return_type
&& !$method_storage->signature_return_type->isVoid()
&& $method_storage->return_type === $method_storage->signature_return_type
) {
$interface_fqcln = $interface_method_ids[0]->fq_class_name;
$interface_storage = $storage_provider->get($interface_fqcln);
if (isset($interface_storage->methods[$method_name])) {
$interface_method_storage = $interface_storage->methods[$method_name];
if ($interface_method_storage->throws
&& (!$method_storage->throws || $method_storage->inheritdoc)
) {
$method_storage->throws += $interface_method_storage->throws;
}
}
}
}
}
foreach ($interface_method_ids as $interface_method_id) {
$storage->overridden_method_ids[$method_name][$interface_method_id->fq_class_name]
= $interface_method_id;
}
}
}
/**
* @param array<string, bool> $dependent_file_paths
*/
private function populateFileStorage(FileStorage $storage, array $dependent_file_paths = []): void
{
if ($storage->populated) {
return;
}
$file_path_lc = strtolower($storage->file_path);
if (isset($dependent_file_paths[$file_path_lc])) {
return;
}
$dependent_file_paths[$file_path_lc] = true;
$all_required_file_paths = $storage->required_file_paths;
foreach ($storage->required_file_paths as $included_file_path => $_) {
try {
$included_file_storage = $this->file_storage_provider->get($included_file_path);
} catch (InvalidArgumentException $e) {
continue;
}
$this->populateFileStorage($included_file_storage, $dependent_file_paths);
$all_required_file_paths = $all_required_file_paths + $included_file_storage->required_file_paths;
}
foreach ($all_required_file_paths as $included_file_path => $_) {
try {
$included_file_storage = $this->file_storage_provider->get($included_file_path);
} catch (InvalidArgumentException $e) {
continue;
}
$storage->declaring_function_ids = array_merge(
$included_file_storage->declaring_function_ids,
$storage->declaring_function_ids
);
$storage->declaring_constants = array_merge(
$included_file_storage->declaring_constants,
$storage->declaring_constants
);
}
foreach ($storage->referenced_classlikes as $fq_class_name) {
try {
$classlike_storage = $this->classlike_storage_provider->get($fq_class_name);
} catch (InvalidArgumentException $e) {
continue;
}
if (!$classlike_storage->location) {
continue;
}
try {
$included_file_storage = $this->file_storage_provider->get($classlike_storage->location->file_path);
} catch (InvalidArgumentException $e) {
continue;
}
foreach ($classlike_storage->used_traits as $used_trait) {
try {
$trait_storage = $this->classlike_storage_provider->get($used_trait);
} catch (InvalidArgumentException $e) {
continue;
}
if (!$trait_storage->location) {
continue;
}
try {
$included_trait_file_storage = $this->file_storage_provider->get(
$trait_storage->location->file_path
);
} catch (InvalidArgumentException $e) {
continue;
}
$storage->declaring_function_ids = array_merge(
$included_trait_file_storage->declaring_function_ids,
$storage->declaring_function_ids
);
}
$storage->declaring_function_ids = array_merge(
$included_file_storage->declaring_function_ids,
$storage->declaring_function_ids
);
}
$storage->required_file_paths = $all_required_file_paths;
foreach ($all_required_file_paths as $required_file_path) {
try {
$required_file_storage = $this->file_storage_provider->get($required_file_path);
} catch (InvalidArgumentException $e) {
continue;
}
$required_file_storage->required_by_file_paths += [$file_path_lc => $storage->file_path];
}
foreach ($storage->required_classes as $required_classlike) {
try {
$classlike_storage = $this->classlike_storage_provider->get($required_classlike);
} catch (InvalidArgumentException $e) {
continue;
}
if (!$classlike_storage->location) {
continue;
}
try {
$required_file_storage = $this->file_storage_provider->get($classlike_storage->location->file_path);
} catch (InvalidArgumentException $e) {
continue;
}
$required_file_storage->required_by_file_paths += [$file_path_lc => $storage->file_path];
}
$storage->populated = true;
}
protected function inheritMethodsFromParent(
ClassLikeStorage $storage,
ClassLikeStorage $parent_storage
): void {
$fq_class_name = $storage->name;
$fq_class_name_lc = strtolower($fq_class_name);
if ($parent_storage->sealed_methods) {
$storage->sealed_methods = true;
}
// register where they appear (can never be in a trait)
foreach ($parent_storage->appearing_method_ids as $method_name_lc => $appearing_method_id) {
$aliased_method_names = [$method_name_lc];
if ($parent_storage->is_trait
&& $storage->trait_alias_map
) {
$aliased_method_names = array_merge(
$aliased_method_names,
array_keys($storage->trait_alias_map, $method_name_lc, true)
);
}
foreach ($aliased_method_names as $aliased_method_name) {
if (isset($storage->appearing_method_ids[$aliased_method_name])) {
continue;
}
$implemented_method_id = new MethodIdentifier(
$fq_class_name,
$aliased_method_name
);
$storage->appearing_method_ids[$aliased_method_name] =
$parent_storage->is_trait ? $implemented_method_id : $appearing_method_id;
$this_method_id = $fq_class_name_lc . '::' . $method_name_lc;
if (isset($storage->methods[$aliased_method_name])) {
$storage->potential_declaring_method_ids[$aliased_method_name] = [$this_method_id => true];
} else {
if (isset($parent_storage->potential_declaring_method_ids[$aliased_method_name])) {
$storage->potential_declaring_method_ids[$aliased_method_name]
= $parent_storage->potential_declaring_method_ids[$aliased_method_name];
}
$storage->potential_declaring_method_ids[$aliased_method_name][$this_method_id] = true;
$parent_method_id = strtolower($parent_storage->name) . '::' . $method_name_lc;
$storage->potential_declaring_method_ids[$aliased_method_name][$parent_method_id] = true;
}
}
}
// register where they're declared
foreach ($parent_storage->inheritable_method_ids as $method_name_lc => $declaring_method_id) {
if ($method_name_lc !== '__construct'
|| $parent_storage->preserve_constructor_signature
) {
if ($parent_storage->is_trait) {
$declaring_class = $declaring_method_id->fq_class_name;
$declaring_class_storage = $this->classlike_storage_provider->get($declaring_class);
if (isset($declaring_class_storage->methods[$method_name_lc])
&& $declaring_class_storage->methods[$method_name_lc]->abstract
) {
$storage->overridden_method_ids[$method_name_lc][$declaring_method_id->fq_class_name]
= $declaring_method_id;
}
} else {
$storage->overridden_method_ids[$method_name_lc][$declaring_method_id->fq_class_name]
= $declaring_method_id;
}
if (isset($parent_storage->overridden_method_ids[$method_name_lc])
&& isset($storage->overridden_method_ids[$method_name_lc])
) {
$storage->overridden_method_ids[$method_name_lc]
+= $parent_storage->overridden_method_ids[$method_name_lc];
}
}
$aliased_method_names = [$method_name_lc];
if ($parent_storage->is_trait
&& $storage->trait_alias_map
) {
$aliased_method_names = array_merge(
$aliased_method_names,
array_keys($storage->trait_alias_map, $method_name_lc, true)
);
}
foreach ($aliased_method_names as $aliased_method_name) {
if (isset($storage->declaring_method_ids[$aliased_method_name])) {
$implementing_method_id = $storage->declaring_method_ids[$aliased_method_name];
$implementing_class_storage = $this->classlike_storage_provider->get(
$implementing_method_id->fq_class_name
);
if (!$implementing_class_storage->methods[$implementing_method_id->method_name]->abstract
|| !empty($storage->methods[$implementing_method_id->method_name]->abstract)
) {
continue;
}
}
$storage->declaring_method_ids[$aliased_method_name] = $declaring_method_id;
$storage->inheritable_method_ids[$aliased_method_name] = $declaring_method_id;
}
}
}
private function inheritPropertiesFromParent(
ClassLikeStorage $storage,
ClassLikeStorage $parent_storage
): void {
if ($parent_storage->sealed_properties) {
$storage->sealed_properties = true;
}
// register where they appear (can never be in a trait)
foreach ($parent_storage->appearing_property_ids as $property_name => $appearing_property_id) {
if (isset($storage->appearing_property_ids[$property_name])) {
continue;
}
if (!$parent_storage->is_trait
&& isset($parent_storage->properties[$property_name])
&& $parent_storage->properties[$property_name]->visibility === ClassLikeAnalyzer::VISIBILITY_PRIVATE
) {
continue;
}
$implemented_property_id = $storage->name . '::$' . $property_name;
$storage->appearing_property_ids[$property_name] =
$parent_storage->is_trait ? $implemented_property_id : $appearing_property_id;
}
// register where they're declared
foreach ($parent_storage->declaring_property_ids as $property_name => $declaring_property_class) {
if (isset($storage->declaring_property_ids[$property_name])) {
continue;
}
if (!$parent_storage->is_trait
&& isset($parent_storage->properties[$property_name])
&& $parent_storage->properties[$property_name]->visibility === ClassLikeAnalyzer::VISIBILITY_PRIVATE
) {
continue;
}
$storage->declaring_property_ids[$property_name] = $declaring_property_class;
}
// register where they're declared
foreach ($parent_storage->inheritable_property_ids as $property_name => $inheritable_property_id) {
if (!$parent_storage->is_trait
&& isset($parent_storage->properties[$property_name])
&& $parent_storage->properties[$property_name]->visibility === ClassLikeAnalyzer::VISIBILITY_PRIVATE
) {
continue;
}
if (!$parent_storage->is_trait) {
$storage->overridden_property_ids[$property_name][] = $inheritable_property_id;
}
$storage->inheritable_property_ids[$property_name] = $inheritable_property_id;
}
}
}
| 1 | 12,069 | was there a reason to not loop over parent classes here? | vimeo-psalm | php |
@@ -1,6 +1,8 @@
-import AuthenticatedRouteMixin from 'ember-simple-auth/mixins/authenticated-route-mixin';
import Ember from 'ember';
+import AuthenticatedRouteMixin from 'ember-simple-auth/mixins/authenticated-route-mixin';
import UserSession from 'hospitalrun/mixins/user-session';
+
+const { computed } = Ember;
/**
* Abstract route for top level modules (eg patients, inventory, users)
*/ | 1 | import AuthenticatedRouteMixin from 'ember-simple-auth/mixins/authenticated-route-mixin';
import Ember from 'ember';
import UserSession from 'hospitalrun/mixins/user-session';
/**
* Abstract route for top level modules (eg patients, inventory, users)
*/
export default Ember.Route.extend(UserSession, AuthenticatedRouteMixin, {
addCapability: null,
additionalModels: null,
allowSearch: true,
currentScreenTitle: null,
moduleName: null,
newButtonText: null,
sectionTitle: null,
subActions: null,
editPath: function() {
var module = this.get('moduleName');
return module + '.edit';
}.property('moduleName'),
deletePath: function() {
var module = this.get('moduleName');
return module + '.delete';
}.property('moduleName'),
newButtonAction: function() {
if (this.currentUserCan(this.get('addCapability'))) {
return 'newItem';
} else {
return null;
}
}.property(),
searchRoute: function() {
var module = this.get('moduleName');
return '/' + module + '/search';
}.property('moduleName'),
actions: {
allItems: function() {
this.transitionTo(this.get('moduleName') + '.index');
},
deleteItem: function(item) {
var deletePath = this.get('deletePath');
this.send('openModal', deletePath, item);
},
editItem: function(item) {
this.transitionTo(this.get('editPath'), item);
},
newItem: function() {
if (this.currentUserCan(this.get('addCapability'))) {
this.transitionTo(this.get('editPath'), 'new');
}
},
/**
* Action to set items in the section header.
* @param details an object containing details to set on the section header.
* The following parameters are supported:
* - currentScreenTitle - The current screen title.
* - newButtonText - The text to display for the "new" button.
* - newButtonAction - The action to fire for the "new" button.
*/
setSectionHeader: function(details) {
var currentController = this.controllerFor(this.get('moduleName'));
currentController.setProperties(details);
}
},
/**
* Make sure the user has permissions to the module; if not reroute to index.
*/
beforeModel: function(transition) {
var moduleName = this.get('moduleName');
if (this.currentUserCan(moduleName)) {
return this._super(transition);
} else {
this.transitionTo('index');
return Ember.RSVP.reject('Not available');
}
},
/**
* Override this function to generate an id for a new record
* @return a promise that will resolved to a generated id;default is null which means that an
* id will be automatically generated via Ember data.
*/
generateId: function() {
return Ember.RSVP.resolve(null);
},
model: function() {
if (!Ember.isEmpty(this.additionalModels)) {
return new Ember.RSVP.Promise(function(resolve, reject) {
var promises = this.additionalModels.map(function(modelMap) {
if (modelMap.findArgs.length === 1) {
return this.store.findAll.apply(this.store, modelMap.findArgs);
} else {
return this.store.find.apply(this.store, modelMap.findArgs);
}
}.bind(this));
Ember.RSVP.allSettled(promises, 'All additional Models for ' + this.get('moduleName')).then(function(array) {
array.forEach(function(item, index) {
if (item.state === 'fulfilled') {
this.set(this.additionalModels[index].name, item.value);
}
}.bind(this));
resolve();
}.bind(this), reject);
}.bind(this), 'Additional Models for' + this.get('moduleName'));
} else {
return Ember.RSVP.resolve();
}
},
renderTemplate: function() {
this.render('section');
},
setupController: function(controller, model) {
var navigationController = this.controllerFor('navigation');
if (this.get('allowSearch') === true) {
navigationController.set('allowSearch', true);
navigationController.set('searchRoute', this.get('searchRoute'));
} else {
navigationController.set('allowSearch', false);
}
var currentController = this.controllerFor(this.get('moduleName'));
var propsToSet = this.getProperties('additionalButtons', 'currentScreenTitle', 'newButtonAction', 'newButtonText', 'sectionTitle', 'subActions');
currentController.setProperties(propsToSet);
if (!Ember.isEmpty(this.additionalModels)) {
this.additionalModels.forEach(function(item) {
controller.set(item.name, this.get(item.name));
}.bind(this));
}
this._super(controller, model);
}
});
| 1 | 13,056 | @billybonks Why move `import Ember from 'ember';` to the top? | HospitalRun-hospitalrun-frontend | js |
@@ -1,7 +1,16 @@
class Approval < ActiveRecord::Base
- include ThreeStateWorkflow
-
- workflow_column :status
+ include WorkflowModel
+ workflow do
+ state :pending do
+ event :make_actionable, transitions_to: :actionable
+ end
+ state :actionable do
+ event :approve, transitions_to: :approved
+ event :reject, transitions_to: :rejected
+ end
+ state :approved
+ state :rejected
+ end
belongs_to :proposal
has_one :cart, through: :proposal | 1 | class Approval < ActiveRecord::Base
include ThreeStateWorkflow
workflow_column :status
belongs_to :proposal
has_one :cart, through: :proposal
belongs_to :user
has_one :api_token, -> { fresh }
has_one :approval_group, through: :cart
has_one :user_role, -> { where(approval_group_id: cart.approval_group.id, user_id: self.user_id) }
delegate :full_name, :email_address, :to => :user, :prefix => true
delegate :approvals, :to => :cart, :prefix => true
acts_as_list scope: :proposal
# TODO validates_uniqueness_of :user_id, scope: cart_id
self.statuses.each do |status|
scope status, -> { where(status: status) }
end
scope :received, -> { approvable.where.not(status: 'pending') }
default_scope { order('position ASC') }
# TODO remove
def cart_id
self.proposal.cart.id
end
# TODO we should probably store this value
def approved_at
if self.approved?
self.updated_at
else
nil
end
end
# Used by the state machine
def on_rejected_entry(new_state, event)
self.proposal.reject!
end
# Used by the state machine
def on_approved_entry(new_state, event)
self.proposal.partial_approve!
Dispatcher.on_approval_approved(self)
end
end
| 1 | 13,117 | Everything's so simple (a `ThreeStateWorkflow`) until it isn't :smirk: | 18F-C2 | rb |
@@ -57,8 +57,9 @@ class OperationTests(ComparisonTestCase):
def test_image_contours(self):
img = Image(np.array([[0, 1, 0], [3, 4, 5.], [6, 7, 8]]))
op_contours = contours(img, levels=[0.5])
- contour = Contours([[(-0.5, 0.416667, 0.5), (-0.25, 0.5, 0.5)],
- [(0.25, 0.5, 0.5), (0.5, 0.45, 0.5)]],
+ contour = Contours([[(-0.5, 0.416667, 0.5), (-0.25, 0.5, 0.5),
+ (np.NaN, np.NaN, 0.5), (0.25, 0.5, 0.5),
+ (0.5, 0.45, 0.5)]],
vdims=img.vdims)
self.assertEqual(op_contours, contour)
| 1 | import numpy as np
from nose.plugins.attrib import attr
from holoviews import (HoloMap, NdOverlay, NdLayout, GridSpace, Image,
Contours, Polygons, Points, Histogram, Curve, Area)
from holoviews.element.comparison import ComparisonTestCase
from holoviews.operation.element import (operation, transform, threshold,
gradient, contours, histogram,
interpolate_curve)
class OperationTests(ComparisonTestCase):
"""
Tests allowable data formats when constructing
the basic Element types.
"""
def test_operation_element(self):
img = Image(np.random.rand(10, 10))
op_img = operation(img, op=lambda x, k: x.clone(x.data*2))
self.assertEqual(op_img, img.clone(img.data*2, group='Operation'))
def test_operation_ndlayout(self):
ndlayout = NdLayout({i: Image(np.random.rand(10, 10)) for i in range(10)})
op_ndlayout = operation(ndlayout, op=lambda x, k: x.clone(x.data*2))
doubled = ndlayout.clone({k: v.clone(v.data*2, group='Operation')
for k, v in ndlayout.items()})
self.assertEqual(op_ndlayout, doubled)
def test_operation_grid(self):
grid = GridSpace({i: Image(np.random.rand(10, 10)) for i in range(10)}, kdims=['X'])
op_grid = operation(grid, op=lambda x, k: x.clone(x.data*2))
doubled = grid.clone({k: v.clone(v.data*2, group='Operation')
for k, v in grid.items()})
self.assertEqual(op_grid, doubled)
def test_operation_holomap(self):
hmap = HoloMap({1: Image(np.random.rand(10, 10))})
op_hmap = operation(hmap, op=lambda x, k: x.clone(x.data*2))
self.assertEqual(op_hmap.last, hmap.last.clone(hmap.last.data*2, group='Operation'))
def test_image_transform(self):
img = Image(np.random.rand(10, 10))
op_img = transform(img, operator=lambda x: x*2)
self.assertEqual(op_img, img.clone(img.data*2, group='Transform'))
def test_image_threshold(self):
img = Image(np.array([[0, 1, 0], [3, 4, 5.]]))
op_img = threshold(img)
self.assertEqual(op_img, img.clone(np.array([[0, 1, 0], [1, 1, 1]]), group='Threshold'))
def test_image_gradient(self):
img = Image(np.array([[0, 1, 0], [3, 4, 5.], [6, 7, 8]]))
op_img = gradient(img)
self.assertEqual(op_img, img.clone(np.array([[3.162278, 3.162278], [3.162278, 3.162278]]), group='Gradient'))
@attr(optional=1) # Requires matplotlib
def test_image_contours(self):
img = Image(np.array([[0, 1, 0], [3, 4, 5.], [6, 7, 8]]))
op_contours = contours(img, levels=[0.5])
contour = Contours([[(-0.5, 0.416667, 0.5), (-0.25, 0.5, 0.5)],
[(0.25, 0.5, 0.5), (0.5, 0.45, 0.5)]],
vdims=img.vdims)
self.assertEqual(op_contours, contour)
@attr(optional=1) # Requires matplotlib
def test_image_contours_filled(self):
img = Image(np.array([[0, 1, 0], [3, 4, 5.], [6, 7, 8]]))
op_contours = contours(img, filled=True, levels=[2, 2.5])
data = [[(0., 0.333333, 2.25), (0.5, 0.3, 2.25), (0.5, 0.25, 2.25), (0., 0.25, 2.25),
(-0.5, 0.08333333, 2.25), (-0.5, 0.16666667, 2.25), (0., 0.33333333, 2.25)]]
polys = Polygons(data, vdims=img.vdims)
self.assertEqual(op_contours, polys)
def test_points_histogram(self):
points = Points([float(i) for i in range(10)])
op_hist = histogram(points, num_bins=3)
# Make sure that the name and label are as desired
op_freq_dim = op_hist.get_dimension('x_frequency')
self.assertEqual(op_freq_dim.label, 'x Frequency')
# Because the operation labels are now different from the
# default Element label, change back before comparing.
op_hist = op_hist.redim(x_frequency='Frequency')
hist = Histogram(([0.1, 0.1, 0.133333], [0, 3, 6, 9]))
self.assertEqual(op_hist, hist)
def test_points_histogram_bin_range(self):
points = Points([float(i) for i in range(10)])
op_hist = histogram(points, num_bins=3, bin_range=(0, 3))
# Make sure that the name and label are as desired
op_freq_dim = op_hist.get_dimension('x_frequency')
self.assertEqual(op_freq_dim.label, 'x Frequency')
# Because the operation labels are now different from the
# default Element label, change back before comparing.
op_hist = op_hist.redim(x_frequency='Frequency')
hist = Histogram(([0.25, 0.25, 0.5], [0., 1., 2., 3.]))
self.assertEqual(op_hist, hist)
def test_points_histogram_not_normed(self):
points = Points([float(i) for i in range(10)])
op_hist = histogram(points, num_bins=3, normed=False)
# Make sure that the name and label are as desired
op_freq_dim = op_hist.get_dimension('x_frequency')
self.assertEqual(op_freq_dim.label, 'x Frequency')
# Because the operation labels are now different from the
# default Element label, change back before comparing.
op_hist = op_hist.redim(x_frequency='Frequency')
hist = Histogram(([3, 3, 4], [0, 3, 6, 9]))
self.assertEqual(op_hist, hist)
def test_points_histogram_weighted(self):
points = Points([float(i) for i in range(10)])
op_hist = histogram(points, num_bins=3, weight_dimension='y')
hist = Histogram(([0.022222, 0.088889, 0.222222], [0, 3, 6, 9]), vdims=['y'])
self.assertEqual(op_hist, hist)
def test_points_histogram_mean_weighted(self):
points = Points([float(i) for i in range(10)])
op_hist = histogram(points, num_bins=3, weight_dimension='y', mean_weighted=True)
hist = Histogram(([1., 4., 7.5], [0, 3, 6, 9]), vdims=['y'])
self.assertEqual(op_hist, hist)
def test_interpolate_curve_pre(self):
interpolated = interpolate_curve(Curve([0, 0.5, 1]), interpolation='steps-pre')
curve = Curve([(0, 0), (0, 0.5), (1, 0.5), (1, 1), (2, 1)])
self.assertEqual(interpolated, curve)
def test_interpolate_curve_mid(self):
interpolated = interpolate_curve(Curve([0, 0.5, 1]), interpolation='steps-mid')
curve = Curve([(0, 0), (0.5, 0), (0.5, 0.5), (1.5, 0.5), (1.5, 1), (2, 1)])
self.assertEqual(interpolated, curve)
def test_interpolate_curve_post(self):
interpolated = interpolate_curve(Curve([0, 0.5, 1]), interpolation='steps-post')
curve = Curve([(0, 0), (1, 0), (1, 0.5), (2, 0.5), (2, 1)])
self.assertEqual(interpolated, curve)
def test_stack_area_overlay(self):
areas = Area([1, 2, 3]) * Area([1, 2, 3])
stacked = Area.stack(areas)
area1 = Area(([0, 1, 2], [1, 2, 3], [0, 0, 0]), vdims=['y', 'Baseline'])
area2 = Area(([0, 1, 2], [2, 4, 6], [1, 2, 3]), vdims=['y', 'Baseline'])
self.assertEqual(stacked, area1 * area2)
def test_stack_area_ndoverlay(self):
areas = NdOverlay([(0, Area([1, 2, 3])), (1, Area([1, 2, 3]))])
stacked = Area.stack(areas)
area1 = Area(([0, 1, 2], [1, 2, 3], [0, 0, 0]), vdims=['y', 'Baseline'])
area2 = Area(([0, 1, 2], [2, 4, 6], [1, 2, 3]), vdims=['y', 'Baseline'])
self.assertEqual(stacked, NdOverlay([(0, area1), (1, area2)]))
def test_pre_and_postprocess_hooks(self):
pre_backup = operation._preprocess_hooks
post_backup = operation._postprocess_hooks
operation._preprocess_hooks = [lambda op, x: {'label': str(x.id)}]
operation._postprocess_hooks = [lambda op, x, **kwargs: x.clone(**kwargs)]
curve = Curve([1, 2, 3])
self.assertEqual(operation(curve).label, str(curve.id))
operation._preprocess_hooks = pre_backup
operation._postprocess_hooks = post_backup
| 1 | 20,555 | I don't quite understand where the NaNs come from... | holoviz-holoviews | py |
@@ -45,8 +45,8 @@ func LoadTestConfig(addr string, allowMultiConnsPerHost bool) *config.Network {
config := config.Config{
NodeType: config.DelegateType,
Network: config.Network{
- Host: host,
- Port: port,
+ Host: host,
+ Port: port,
MsgLogsCleaningInterval: 2 * time.Second,
MsgLogRetention: 10 * time.Second,
HealthCheckInterval: time.Second, | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package network
import (
"fmt"
"io/ioutil"
"math"
"math/rand"
"net"
"os"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
"gopkg.in/yaml.v2"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/dispatcher"
"github.com/iotexproject/iotex-core/network/node"
"github.com/iotexproject/iotex-core/proto"
"github.com/iotexproject/iotex-core/testutil"
)
func LoadTestConfig(addr string, allowMultiConnsPerHost bool) *config.Network {
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
host = "127.0.0.1"
}
port, err := strconv.Atoi(portStr)
if err != nil {
port = 0
}
config := config.Config{
NodeType: config.DelegateType,
Network: config.Network{
Host: host,
Port: port,
MsgLogsCleaningInterval: 2 * time.Second,
MsgLogRetention: 10 * time.Second,
HealthCheckInterval: time.Second,
SilentInterval: 5 * time.Second,
PeerMaintainerInterval: time.Second,
NumPeersLowerBound: 5,
NumPeersUpperBound: 5,
AllowMultiConnsPerHost: allowMultiConnsPerHost,
RateLimitEnabled: false,
PingInterval: time.Second,
BootstrapNodes: []string{"127.0.0.1:10001", "127.0.0.1:10002"},
MaxMsgSize: 1024 * 1024 * 10,
PeerDiscovery: true,
TTL: 3,
},
}
return &config.Network
}
func LoadTestConfigWithTLSEnabled(addr string, allowMultiConnsPerHost bool) *config.Network {
cfg := LoadTestConfig(addr, allowMultiConnsPerHost)
cfg.TLSEnabled = true
cfg.CACrtPath = "../test/assets/ssl/iotex.io.crt"
cfg.PeerCrtPath = "../test/assets/ssl/127.0.0.1.crt"
cfg.PeerKeyPath = "../test/assets/ssl/127.0.0.1.key"
return cfg
}
type MockDispatcher struct {
}
func (d *MockDispatcher) AddSubscriber(uint32, dispatcher.Subscriber) {}
func (d *MockDispatcher) Start(_ context.Context) error {
return nil
}
func (d *MockDispatcher) Stop(_ context.Context) error {
return nil
}
func (d *MockDispatcher) HandleBroadcast(uint32, proto.Message, chan bool) {
}
func (d *MockDispatcher) HandleTell(uint32, net.Addr, proto.Message, chan bool) {
}
type MockDispatcher1 struct {
MockDispatcher
Count uint32
}
func (d1 *MockDispatcher1) AddSubscriber(uint32, dispatcher.Subscriber) {}
func (d1 *MockDispatcher1) HandleBroadcast(uint32, proto.Message, chan bool) {
d1.Count++
}
func TestOverlay(t *testing.T) {
ctx := context.Background()
if testing.Short() {
t.Skip("Skipping the IotxOverlay test in short mode.")
}
size := 10
dps := []*MockDispatcher1{}
nodes := []*IotxOverlay{}
for i := 0; i < size; i++ {
dp := &MockDispatcher1{}
dps = append(dps, dp)
var config *config.Network
if i == 0 {
config = LoadTestConfig("127.0.0.1:10001", true)
} else if i == 1 {
config = LoadTestConfig("127.0.0.1:10002", true)
} else {
config = LoadTestConfig("", true)
}
node := NewOverlay(config)
node.AttachDispatcher(dp)
err := node.Start(ctx)
assert.NoError(t, err)
nodes = append(nodes, node)
}
defer func() {
for _, node := range nodes {
err := node.Stop(ctx)
assert.NoError(t, err)
}
}()
time.Sleep(10 * time.Second)
for i := 0; i < size; i++ {
assert.True(t, LenSyncMap(nodes[i].PM.Peers) >= nodes[i].PM.NumPeersLowerBound)
}
err := nodes[0].Broadcast(config.Default.Chain.ID, &iproto.ActionPb{})
assert.NoError(t, err)
time.Sleep(5 * time.Second)
for i, dp := range dps {
if i == 0 {
assert.Equal(t, uint32(0), dp.Count)
} else {
assert.Equal(t, uint32(1), dp.Count)
}
}
}
type MockDispatcher2 struct {
MockDispatcher
T *testing.T
Count uint32
}
func (d2 *MockDispatcher2) AddSubscriber(uint32, dispatcher.Subscriber) {}
func (d2 *MockDispatcher2) HandleTell(chainID uint32, sender net.Addr, message proto.Message, done chan bool) {
// Handle Tx Msg
msgType, err := iproto.GetTypeFromProtoMsg(message)
/*
switch (msgType) {
case iproto.MsgTxProtoMsgType:
break
default:
break
}
*/
assert.True(d2.T, strings.HasPrefix(sender.Network(), "tcp"))
assert.True(d2.T, strings.HasPrefix(sender.String(), "127.0.0.1"))
assert.Nil(d2.T, err)
assert.Equal(d2.T, iproto.MsgActionType, msgType)
d2.Count++
}
func TestTell(t *testing.T) {
ctx := context.Background()
dp1 := &MockDispatcher2{T: t}
addr1 := randomAddress()
addr2 := randomAddress()
p1 := NewOverlay(LoadTestConfig(addr1, true))
p1.AttachDispatcher(dp1)
err := p1.Start(ctx)
assert.NoError(t, err)
dp2 := &MockDispatcher2{T: t}
p2 := NewOverlay(LoadTestConfig(addr2, true))
p2.AttachDispatcher(dp2)
err = p2.Start(ctx)
assert.NoError(t, err)
defer func() {
err := p1.Stop(ctx)
assert.NoError(t, err)
err = p2.Stop(ctx)
assert.NoError(t, err)
}()
// P1 tell Tx Msg
err = p1.Tell(config.Default.Chain.ID, &node.Node{Addr: addr2}, &iproto.ActionPb{})
assert.NoError(t, err)
// P2 tell Tx Msg
err = p2.Tell(config.Default.Chain.ID, &node.Node{Addr: addr1}, &iproto.ActionPb{})
assert.NoError(t, err)
err = testutil.WaitUntil(10*time.Millisecond, 5*time.Second, func() (bool, error) {
if dp2.Count != uint32(1) {
return false, nil
}
if dp1.Count != uint32(1) {
return false, nil
}
return true, nil
})
require.Nil(t, err)
}
func TestOneConnPerHost(t *testing.T) {
ctx := context.Background()
dp1 := &MockDispatcher2{T: t}
addr1 := randomAddress()
addr2 := randomAddress()
addr3 := randomAddress()
p1 := NewOverlay(LoadTestConfig(addr1, false))
p1.AttachDispatcher(dp1)
err := p1.Start(ctx)
require.Nil(t, err)
dp2 := &MockDispatcher2{T: t}
p2 := NewOverlay(LoadTestConfig(addr2, false))
p2.AttachDispatcher(dp2)
err = p2.Start(ctx)
assert.NoError(t, err)
dp3 := &MockDispatcher2{T: t}
p3 := NewOverlay(LoadTestConfig(addr3, false))
p3.AttachDispatcher(dp3)
err = p3.Start(ctx)
assert.NoError(t, err)
defer func() {
err := p1.Stop(ctx)
assert.NoError(t, err)
err = p2.Stop(ctx)
assert.NoError(t, err)
err = p3.Stop(ctx)
assert.NoError(t, err)
}()
err = testutil.WaitUntil(10*time.Millisecond, 5*time.Second, func() (bool, error) {
if uint(1) != LenSyncMap(p1.PM.Peers) {
return false, nil
}
if uint(1) != LenSyncMap(p2.PM.Peers) {
return false, nil
}
if uint(1) != LenSyncMap(p3.PM.Peers) {
return false, nil
}
return true, nil
})
require.Nil(t, err)
}
func TestConfigBasedTopology(t *testing.T) {
ctx := context.Background()
addr1 := randomAddress()
addr2 := randomAddress()
addr3 := randomAddress()
addr4 := randomAddress()
addresses := []string{addr1, addr2, addr3, addr4}
topology := Topology{
NeighborList: map[string][]string{
addr1: {addr2, addr3, addr4},
addr2: {addr1, addr3, addr4},
addr3: {addr1, addr2, addr4},
addr4: {addr1, addr2, addr3},
},
}
topologyStr, err := yaml.Marshal(topology)
assert.Nil(t, err)
path := "/tmp/topology_" + strconv.Itoa(rand.Int()) + ".yaml"
err = ioutil.WriteFile(path, topologyStr, 0666)
assert.NoError(t, err)
nodes := make([]*IotxOverlay, 4)
for i := 1; i <= 4; i++ {
config := LoadTestConfig(addresses[i-1], true)
config.PeerDiscovery = false
config.TopologyPath = path
dp := &MockDispatcher{}
node := NewOverlay(config)
node.AttachDispatcher(dp)
err = node.Start(ctx)
assert.NoError(t, err)
nodes[i-1] = node
}
defer func() {
for _, node := range nodes {
err := node.Stop(ctx)
assert.NoError(t, err)
}
if os.Remove(path) != nil {
assert.Fail(t, "Error when deleting the test file")
}
}()
err = testutil.WaitUntil(10*time.Millisecond, 5*time.Second, func() (bool, error) {
for _, node := range nodes {
if uint(3) != LenSyncMap(node.PM.Peers) {
return false, nil
}
addrs := make([]string, 0)
node.PM.Peers.Range(func(key, value interface{}) bool {
addrs = append(addrs, key.(string))
return true
})
sort.Strings(addrs)
sort.Strings(topology.NeighborList[node.RPC.String()])
if !reflect.DeepEqual(topology.NeighborList[node.RPC.String()], addrs) {
return false, nil
}
}
return true, nil
})
require.Nil(t, err)
}
func TestRandomizePeerList(t *testing.T) {
if testing.Short() {
t.Skip("Skipping TestRandomizePeerList in short mode.")
}
ctx := context.Background()
size := 10
var dps []*MockDispatcher1
var nodes []*IotxOverlay
for i := 0; i < size; i++ {
dp := &MockDispatcher1{}
dps = append(dps, dp)
cfg := LoadTestConfig(fmt.Sprintf("127.0.0.1:1000%d", i), true)
require.NotNil(t, cfg)
cfg.NumPeersLowerBound = 4
cfg.NumPeersUpperBound = 4
cfg.PeerForceDisconnectionRoundInterval = 1
node := NewOverlay(cfg)
node.AttachDispatcher(dp)
require.NoError(t, node.Start(ctx))
nodes = append(nodes, node)
}
defer func() {
for _, n := range nodes {
assert.NoError(t, n.Stop(ctx))
}
}()
// Sleep for neighbors to be fully shuffled
time.Sleep(5 * time.Second)
err := nodes[0].Broadcast(config.Default.Chain.ID, &iproto.ActionPb{})
require.Nil(t, err)
time.Sleep(5 * time.Second)
testutil.WaitUntil(100*time.Millisecond, 5*time.Second, func() (bool, error) {
for i := 0; i < size; i++ {
if i == 0 {
return uint32(0) != dps[i].Count, nil
}
return uint32(1) != dps[i].Count, nil
}
return true, nil
})
}
type MockDispatcher3 struct {
MockDispatcher
C chan bool
}
func (d3 *MockDispatcher3) HandleTell(uint32, net.Addr, proto.Message, chan bool) {
d3.C <- true
}
func (d3 *MockDispatcher3) HandleBroadcast(uint32, proto.Message, chan bool) {
d3.C <- true
}
func runBenchmarkOp(tell bool, size int, parallel bool, tls bool, b *testing.B) {
ctx := context.Background()
var cfg1, cfg2 *config.Network
if tls {
cfg1 = LoadTestConfigWithTLSEnabled("127.0.0.1:10001", true)
cfg2 = LoadTestConfigWithTLSEnabled("127.0.0.1:10002", true)
} else {
cfg1 = LoadTestConfig("127.0.0.1:10001", true)
cfg2 = LoadTestConfig("127.0.0.1:10002", true)
}
c1 := make(chan bool)
d1 := &MockDispatcher3{C: c1}
p1 := NewOverlay(cfg1)
p1.AttachDispatcher(d1)
err := p1.Start(ctx)
assert.NoError(b, err)
c2 := make(chan bool)
d2 := &MockDispatcher3{C: c2}
p2 := NewOverlay(cfg2)
p2.AttachDispatcher(d2)
err = p2.Start(ctx)
assert.NoError(b, err)
chainID := config.Default.Chain.ID
defer func() {
err := p1.Stop(ctx)
assert.NoError(b, err)
err = p2.Stop(ctx)
assert.NoError(b, err)
}()
time.Sleep(time.Second)
bytes := make([]byte, size)
for i := 0; i < size; i++ {
bytes[i] = uint8(rand.Intn(math.MaxUint8))
}
b.ResetTimer()
if parallel {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if tell {
err := p1.Tell(chainID, &node.Node{Addr: "127.0.0.1:10002"}, &iproto.TestPayload{MsgBody: bytes})
assert.NoError(b, err)
} else {
err := p1.Broadcast(chainID, &iproto.TestPayload{MsgBody: bytes})
assert.NoError(b, err)
}
<-c2
}
})
} else {
for i := 0; i < b.N; i++ {
if tell {
err := p1.Tell(chainID, &node.Node{Addr: "127.0.0.1:10002"}, &iproto.TestPayload{MsgBody: bytes})
assert.NoError(b, err)
} else {
err := p1.Broadcast(chainID, &iproto.TestPayload{MsgBody: bytes})
assert.NoError(b, err)
}
<-c2
}
}
}
func generateBlockConfig() *map[string]int {
return &map[string]int{
"0K payload": 0,
"1K payload": 1024,
"10K payload": 1024 * 10,
"100K payload": 1024 * 100,
"1M payload": 1024 * 1024,
"2M payload": 1024 * 1024 * 2,
"5M payload": 1024 * 1024 * 5,
}
}
func BenchmarkTell(b *testing.B) {
for name, size := range *generateBlockConfig() {
b.Run(name, func(b *testing.B) {
runBenchmarkOp(true, size, false, false, b)
})
}
}
func BenchmarkSecureTell(b *testing.B) {
for name, size := range *generateBlockConfig() {
b.Run(name, func(b *testing.B) {
runBenchmarkOp(true, size, false, true, b)
})
}
}
func BenchmarkParallelTell(b *testing.B) {
for name, size := range *generateBlockConfig() {
b.Run(name, func(b *testing.B) {
runBenchmarkOp(true, size, true, false, b)
})
}
}
func BenchmarkParallelSecureTell(b *testing.B) {
for name, size := range *generateBlockConfig() {
b.Run(name, func(b *testing.B) {
runBenchmarkOp(true, size, true, true, b)
})
}
}
func randomAddress() string {
endPoint := rand.Intn(40000) + 10000
return "127.0.0.1:" + strconv.Itoa(endPoint)
}
/*
func BenchmarkBroadcast(b *testing.B) {
for name, size := range *generateBlockConfig() {
b.Run(name, func(b *testing.B) {
runBenchmarkOp(false, size, false, false, b)
})
}
}
*/
| 1 | 12,162 | File is not `goimports`-ed | iotexproject-iotex-core | go |
@@ -354,6 +354,7 @@ func DefaultConfiguration() *Configuration {
config.Go.PleaseGoTool = "//_please:please_go"
config.Go.EmbedTool = "//_please:please_go_embed"
config.Python.PexTool = "//_please:please_pex"
+ config.Python.WheelResolverTool = "//_please:wheel_resolver"
config.Java.JavacWorker = "//_please:javac_worker"
config.Java.JarCatTool = "//_please:jarcat"
config.Java.JUnitRunner = "//_please:junit_runner" | 1 | // Utilities for reading the Please config files.
package core
import (
"crypto/sha1"
"fmt"
"io"
"os"
"path"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/google/shlex"
"github.com/peterebden/gcfg"
"github.com/thought-machine/go-flags"
"github.com/thought-machine/please/src/cli"
"github.com/thought-machine/please/src/fs"
)
// OsArch is the os/arch pair, like linux_amd64 etc.
const OsArch = runtime.GOOS + "_" + runtime.GOARCH
// ConfigName is the base name for config files.
const ConfigName string = "plzconfig"
// ConfigFileName is the file name for the typical repo config - this is normally checked in
const ConfigFileName string = ".plzconfig"
// ArchConfigFileName is the architecture-specific config file which overrides the repo one.
// Also normally checked in if needed.
const ArchConfigFileName string = ".plzconfig_" + OsArch
// LocalConfigFileName is the file name for the local repo config - this is not normally checked
// in and used to override settings on the local machine.
const LocalConfigFileName string = ".plzconfig.local"
// MachineConfigFileName is the file name for the machine-level config - can use this to override
// things for a particular machine (eg. build machine with different caching behaviour).
const MachineConfigFileName = "/etc/please/plzconfig"
// UserConfigFileName is the file name for user-specific config (for all their repos).
const UserConfigFileName = "~/.config/please/plzconfig"
// DefaultPath is the default location please looks for programs in
var DefaultPath = []string{"/usr/local/bin", "/usr/bin", "/bin"}
func readConfigFile(config *Configuration, filename string) error {
log.Debug("Attempting to read config from %s...", filename)
if err := gcfg.ReadFileInto(config, filename); err != nil && os.IsNotExist(err) {
return nil // It's not an error to not have the file at all.
} else if gcfg.FatalOnly(err) != nil {
return err
} else if err != nil {
log.Warning("Error in config file: %s", err)
} else {
log.Debug("Read config from %s", filename)
}
return nil
}
// ReadDefaultConfigFiles reads all the config files from the default locations and
// merges them into a config object.
// The repo root must have already have been set before calling this.
func ReadDefaultConfigFiles(profiles []ConfigProfile) (*Configuration, error) {
s := make([]string, len(profiles))
for i, p := range profiles {
s[i] = string(p)
}
return ReadConfigFiles(defaultConfigFiles(), s)
}
// defaultGlobalConfigFiles returns the set of global default config file names.
func defaultGlobalConfigFiles() []string {
configFiles := []string{
MachineConfigFileName,
}
if xdgConfigDirs := os.Getenv("XDG_CONFIG_DIRS"); xdgConfigDirs != "" {
for _, p := range strings.Split(xdgConfigDirs, ":") {
if !strings.HasPrefix(p, "/") {
continue
}
configFiles = append(configFiles, filepath.Join(p, ConfigName))
}
}
// Note: according to the XDG Base Directory Specification,
// this path should only be checked if XDG_CONFIG_HOME env var is not set,
// but it should be kept here for backward compatibility purposes.
configFiles = append(configFiles, fs.ExpandHomePath(UserConfigFileName))
if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" && strings.HasPrefix(xdgConfigHome, "/") {
configFiles = append(configFiles, filepath.Join(xdgConfigHome, ConfigName))
}
return configFiles
}
// defaultConfigFiles returns the set of default config file names.
func defaultConfigFiles() []string {
return append(
defaultGlobalConfigFiles(),
path.Join(RepoRoot, ConfigFileName),
path.Join(RepoRoot, ArchConfigFileName),
path.Join(RepoRoot, LocalConfigFileName),
)
}
// ReadConfigFiles reads all the config locations, in order, and merges them into a config object.
// Values are filled in by defaults initially and then overridden by each file in turn.
func ReadConfigFiles(filenames []string, profiles []string) (*Configuration, error) {
config := DefaultConfiguration()
for _, filename := range filenames {
if err := readConfigFile(config, filename); err != nil {
return config, err
}
for _, profile := range profiles {
if err := readConfigFile(config, filename+"."+profile); err != nil {
return config, err
}
}
}
// Set default values for slices. These add rather than overwriting so we can't set
// them upfront as we would with other config values.
if usingBazelWorkspace {
setDefault(&config.Parse.BuildFileName, "BUILD.bazel", "BUILD", "BUILD.plz")
} else {
setDefault(&config.Parse.BuildFileName, "BUILD", "BUILD.plz")
}
setBuildPath(&config.Build.Path, config.Build.PassEnv, config.Build.PassUnsafeEnv)
setDefault(&config.Build.PassUnsafeEnv)
setDefault(&config.Build.PassEnv)
setDefault(&config.Cover.FileExtension, ".go", ".py", ".java", ".tsx", ".ts", ".js", ".cc", ".h", ".c")
setDefault(&config.Cover.ExcludeExtension, ".pb.go", "_pb2.py", ".spec.tsx", ".spec.ts", ".spec.js", ".pb.cc", ".pb.h", "_test.py", "_test.go", "_pb.go", "_bindata.go", "_test_main.cc")
setDefault(&config.Proto.Language, "cc", "py", "java", "go", "js")
setDefault(&config.Parse.BuildDefsDir, "build_defs")
if config.Go.GoRoot != "" {
config.Go.GoTool = filepath.Join(config.Go.GoRoot, "bin", "go")
}
// Default values for these guys depend on config.Java.JavaHome if that's been set.
if config.Java.JavaHome != "" {
defaultPathIfExists(&config.Java.JlinkTool, config.Java.JavaHome, "bin/jlink")
}
if config.Colours == nil {
config.Colours = map[string]string{
"py": "${GREEN}",
"java": "${RED}",
"go": "${YELLOW}",
"js": "${BLUE}",
}
} else {
// You are allowed to just write "yellow" but we map that to a pseudo-variable thing.
for k, v := range config.Colours {
if v[0] != '$' {
config.Colours[k] = "${" + strings.ToUpper(v) + "}"
}
}
}
// In a few versions we will deprecate Cpp.Coverage completely in favour of this more generic scheme.
if !config.Cpp.Coverage {
config.Test.DisableCoverage = append(config.Test.DisableCoverage, "cc")
}
if len(config.Size) == 0 {
config.Size = map[string]*Size{
"small": {
Timeout: cli.Duration(1 * time.Minute),
TimeoutName: "short",
},
"medium": {
Timeout: cli.Duration(5 * time.Minute),
TimeoutName: "moderate",
},
"large": {
Timeout: cli.Duration(15 * time.Minute),
TimeoutName: "long",
},
"enormous": {
TimeoutName: "eternal",
},
}
}
// Dump the timeout names back in so we can look them up later
for _, size := range config.Size {
if size.TimeoutName != "" {
config.Size[size.TimeoutName] = size
}
}
// Resolve the full path to its location.
config.EnsurePleaseLocation()
// If the HTTP proxy config is set and there is no env var overriding it, set it now
// so various other libraries will honour it.
if config.Build.HTTPProxy != "" {
os.Setenv("HTTP_PROXY", config.Build.HTTPProxy.String())
}
// Deal with the various sandbox settings that are moving.
if config.Build.Sandbox {
log.Warning("build.sandbox in config is deprecated, use sandbox.build instead")
config.Sandbox.Build = true
}
if config.Test.Sandbox {
log.Warning("test.sandbox in config is deprecated, use sandbox.test instead")
config.Sandbox.Test = true
}
if config.Build.PleaseSandboxTool != "" {
log.Warning("build.pleasesandboxtool in config is deprecated, use sandbox.tool instead")
config.Sandbox.Tool = config.Build.PleaseSandboxTool
}
// We can only verify options by reflection (we need struct tags) so run them quickly through this.
return config, config.ApplyOverrides(map[string]string{
"build.hashfunction": config.Build.HashFunction,
})
}
// setDefault sets a slice of strings in the config if the set one is empty.
func setDefault(conf *[]string, def ...string) {
if len(*conf) == 0 {
*conf = def
}
}
// setDefault checks if "PATH" is in passEnv, if it is set config.build.Path to use the environment variable.
func setBuildPath(conf *[]string, passEnv []string, passUnsafeEnv []string) {
pathVal := DefaultPath
for _, i := range passUnsafeEnv {
if i == "PATH" {
pathVal = strings.Split(os.Getenv("PATH"), ":")
}
}
for _, i := range passEnv {
if i == "PATH" {
pathVal = strings.Split(os.Getenv("PATH"), ":")
}
}
setDefault(conf, pathVal...)
}
// defaultPathIfExists sets a variable to a location in a directory if it's not already set and if the location exists.
func defaultPathIfExists(conf *string, dir, file string) {
if *conf == "" {
location := path.Join(dir, file)
// check that the location is valid
if _, err := os.Stat(location); err == nil {
*conf = location
}
}
}
// DefaultConfiguration returns the default configuration object with no overrides.
// N.B. Slice fields are not populated by this (since it interferes with reading them)
func DefaultConfiguration() *Configuration {
config := Configuration{buildEnvStored: &storedBuildEnv{}}
config.Please.SelfUpdate = true
config.Please.Autoclean = true
config.Please.DownloadLocation = "https://get.please.build"
config.Please.NumOldVersions = 10
config.Please.NumThreads = runtime.NumCPU() + 2
config.Parse.NumThreads = config.Please.NumThreads
config.Parse.GitFunctions = true
config.Build.Arch = cli.NewArch(runtime.GOOS, runtime.GOARCH)
config.Build.Lang = "en_GB.UTF-8" // Not the language of the UI, the language passed to rules.
config.Build.Nonce = "1402" // Arbitrary nonce to invalidate config when needed.
config.Build.Timeout = cli.Duration(10 * time.Minute)
config.Build.Config = "opt" // Optimised builds by default
config.Build.FallbackConfig = "opt" // Optimised builds as a fallback on any target that doesn't have a matching one set
config.Build.Xattrs = true
config.Build.HashFunction = "sha256"
config.BuildConfig = map[string]string{}
config.BuildEnv = map[string]string{}
config.Cache.HTTPWriteable = true
config.Cache.HTTPTimeout = cli.Duration(25 * time.Second)
config.Cache.HTTPConcurrentRequestLimit = 20
config.Cache.HTTPRetry = 4
if dir, err := os.UserCacheDir(); err == nil {
config.Cache.Dir = path.Join(dir, "please")
}
config.Cache.DirCacheHighWaterMark = 10 * cli.GiByte
config.Cache.DirCacheLowWaterMark = 8 * cli.GiByte
config.Cache.DirClean = true
config.Cache.Workers = runtime.NumCPU() + 2 // Mirrors the number of workers in please.go.
config.Test.Timeout = cli.Duration(10 * time.Minute)
config.Display.SystemStats = true
config.Display.MaxWorkers = 40
config.Display.ColourScheme = "dark"
config.Remote.NumExecutors = 20 // kind of arbitrary
config.Remote.Secure = true
config.Remote.VerifyOutputs = true
config.Remote.UploadDirs = true
config.Remote.CacheDuration = cli.Duration(10000 * 24 * time.Hour) // Effectively forever.
config.Go.GoTool = "go"
config.Go.CgoCCTool = "gcc"
config.Python.DefaultInterpreter = "python3"
config.Python.DisableVendorFlags = false
config.Python.TestRunner = "unittest"
config.Python.TestRunnerBootstrap = ""
config.Python.UsePyPI = true
config.Python.InterpreterOptions = ""
config.Python.PipFlags = ""
config.Java.DefaultTestPackage = ""
config.Java.SourceLevel = "8"
config.Java.TargetLevel = "8"
config.Java.ReleaseLevel = ""
config.Java.DefaultMavenRepo = []cli.URL{"https://repo1.maven.org/maven2", "https://jcenter.bintray.com/"}
config.Java.JavacFlags = "-Werror -Xlint:-options" // bootstrap class path warnings are pervasive without this.
config.Java.JlinkTool = "jlink"
config.Java.JavaHome = ""
config.Cpp.CCTool = "gcc"
config.Cpp.CppTool = "g++"
config.Cpp.LdTool = "ld"
config.Cpp.ArTool = "ar"
config.Cpp.DefaultOptCflags = "--std=c99 -O3 -pipe -DNDEBUG -Wall -Werror"
config.Cpp.DefaultDbgCflags = "--std=c99 -g3 -pipe -DDEBUG -Wall -Werror"
config.Cpp.DefaultOptCppflags = "--std=c++11 -O3 -pipe -DNDEBUG -Wall -Werror"
config.Cpp.DefaultDbgCppflags = "--std=c++11 -g3 -pipe -DDEBUG -Wall -Werror"
config.Cpp.Coverage = true
config.Cpp.ClangModules = true
config.Proto.ProtocTool = "protoc"
// We're using the most common names for these; typically gRPC installs the builtin plugins
// as grpc_python_plugin etc.
config.Proto.ProtocGoPlugin = "protoc-gen-go"
config.Proto.GrpcPythonPlugin = "grpc_python_plugin"
config.Proto.GrpcJavaPlugin = "protoc-gen-grpc-java"
config.Proto.GrpcCCPlugin = "grpc_cpp_plugin"
config.Proto.PythonDep = "//third_party/python:protobuf"
config.Proto.JavaDep = "//third_party/java:protobuf"
config.Proto.GoDep = "//third_party/go:protobuf"
config.Proto.JsDep = ""
config.Proto.PythonGrpcDep = "//third_party/python:grpc"
config.Proto.JavaGrpcDep = "//third_party/java:grpc-all"
config.Proto.GoGrpcDep = "//third_party/go:grpc"
config.Remote.Timeout = cli.Duration(2 * time.Minute)
config.Bazel.Compatibility = usingBazelWorkspace
config.Sandbox.Tool = "please_sandbox"
// Please tools
config.Go.FilterTool = "//_please:please_go_filter"
config.Go.PleaseGoTool = "//_please:please_go"
config.Go.EmbedTool = "//_please:please_go_embed"
config.Python.PexTool = "//_please:please_pex"
config.Java.JavacWorker = "//_please:javac_worker"
config.Java.JarCatTool = "//_please:jarcat"
config.Java.JUnitRunner = "//_please:junit_runner"
return &config
}
// A Configuration contains all the settings that can be configured about Please.
// This is parsed from .plzconfig etc; we also auto-generate help messages from its tags.
type Configuration struct {
Please struct {
Version cli.Version `help:"Defines the version of plz that this repo is supposed to use currently. If it's not present or the version matches the currently running version no special action is taken; otherwise if SelfUpdate is set Please will attempt to download an appropriate version, otherwise it will issue a warning and continue.\n\nNote that if this is not set, you can run plz update to update to the latest version available on the server." var:"PLZ_VERSION"`
VersionChecksum []string `help:"Defines a hex-encoded sha256 checksum that the downloaded version must match. Can be specified multiple times to support different architectures." example:"abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"`
Location string `help:"Defines the directory Please is installed into.\nDefaults to ~/.please but you might want it to be somewhere else if you're installing via another method (e.g. the debs and install script still use /opt/please)."`
SelfUpdate bool `help:"Sets whether plz will attempt to update itself when the version set in the config file is different."`
DownloadLocation cli.URL `help:"Defines the location to download Please from when self-updating. Defaults to the Please web server, but you can point it to some location of your own if you prefer to keep traffic within your network or use home-grown versions."`
NumOldVersions int `help:"Number of old versions to keep from autoupdates."`
Autoclean bool `help:"Automatically clean stale versions without prompting"`
NumThreads int `help:"Number of parallel build operations to run.\nIs overridden by the equivalent command-line flag, if that's passed." example:"6"`
Motd []string `help:"Message of the day; is displayed once at the top during builds. If multiple are given, one is randomly chosen."`
DefaultRepo string `help:"Location of the default repository; this is used if plz is invoked when not inside a repo, it changes to that directory then does its thing."`
} `help:"The [please] section in the config contains non-language-specific settings defining how Please should operate."`
Parse struct {
ExperimentalDir []string `help:"Directory containing experimental code. This is subject to some extra restrictions:\n - Code in the experimental dir can override normal visibility constraints\n - Code outside the experimental dir can never depend on code inside it\n - Tests are excluded from general detection." example:"experimental"`
BuildFileName []string `help:"Sets the names that Please uses instead of BUILD for its build files.\nFor clarity the documentation refers to them simply as BUILD files but you could reconfigure them here to be something else.\nOne case this can be particularly useful is in cases where you have a subdirectory named build on a case-insensitive file system like HFS+." var:"BUILD_FILE_NAMES"`
BlacklistDirs []string `help:"Directories to blacklist when recursively searching for BUILD files (e.g. when using plz build ... or similar).\nThis is generally useful when you have large directories within your repo that don't need to be searched, especially things like node_modules that have come from external package managers."`
PreloadBuildDefs []string `help:"Files to preload by the parser before loading any BUILD files.\nSince this is done before the first package is parsed they must be files in the repository, they cannot be subinclude() paths. Use PreloadSubincludes instead." example:"build_defs/go_bindata.build_defs"`
PreloadSubincludes []string `help:"Subinclude targets to preload by the parser before loading any BUILD files.\nSubincludes can be slow so it's recommended to use PreloadBuildDefs where possible." example:"///pleasings//python:requirements"`
BuildDefsDir []string `help:"Directory to look in when prompted for help topics that aren't known internally." example:"build_defs"`
NumThreads int `help:"Number of parallel parse operations to run.\nIs overridden by the --num_threads command line flag." example:"6"`
GitFunctions bool `help:"Activates built-in functions git_branch, git_commit, git_show and git_state. If disabled they will not be usable at parse time."`
} `help:"The [parse] section in the config contains settings specific to parsing files."`
Display struct {
UpdateTitle bool `help:"Updates the title bar of the shell window Please is running in as the build progresses. This isn't on by default because not everyone's shell is configured to reset it again after and we don't want to alter it forever."`
SystemStats bool `help:"Whether or not to show basic system resource usage in the interactive display. Has no effect without that configured."`
MaxWorkers int `help:"Maximum number of worker rows to display at any one time."`
ColourScheme string `help:"Shell colour scheme mode, dark or light. Defaults to dark"`
} `help:"Please has an animated display mode which shows the currently building targets.\nBy default it will autodetect whether it is using an interactive TTY session and choose whether to use it or not, although you can force it on or off via flags.\n\nThe display is heavily inspired by Buck's SuperConsole."`
Colours map[string]string `help:"Colour code overrides for the targets in interactive output. These colours are map labels on targets to colours e.g. go -> ${YELLOW}."`
Build struct {
Arch cli.Arch `help:"The target architecture to compile for. Defaults to the host architecture."`
Timeout cli.Duration `help:"Default timeout for build actions. Default is ten minutes."`
Path []string `help:"The PATH variable that will be passed to the build processes.\nDefaults to /usr/local/bin:/usr/bin:/bin but of course can be modified if you need to get binaries from other locations." example:"/usr/local/bin:/usr/bin:/bin"`
Config string `help:"The build config to use when one is not chosen on the command line. Defaults to opt." example:"opt | dbg"`
FallbackConfig string `help:"The build config to use when one is chosen and a required target does not have one by the same name. Also defaults to opt." example:"opt | dbg"`
Lang string `help:"Sets the language passed to build rules when building. This can be important for some tools (although hopefully not many) - we've mostly observed it with Sass."`
Sandbox bool `help:"Deprecated, use sandbox.build instead."`
Xattrs bool `help:"True (the default) to attempt to use xattrs to record file metadata. If false Please will fall back to using additional files where needed, which is more compatible but has slightly worse performance."`
PleaseSandboxTool string `help:"Deprecated, use sandbox.tool instead."`
Nonce string `help:"This is an arbitrary string that is added to the hash of every build target. It provides a way to force a rebuild of everything when it's changed.\nWe will bump the default of this whenever we think it's required - although it's been a pretty long time now and we hope that'll continue."`
PassEnv []string `help:"A list of environment variables to pass from the current environment to build rules. For example\n\nPassEnv = HTTP_PROXY\n\nwould copy your HTTP_PROXY environment variable to the build env for any rules."`
PassUnsafeEnv []string `help:"Similar to PassEnv, a list of environment variables to pass from the current environment to build rules. Unlike PassEnv, the environment variable values are not used when calculating build target hashes."`
HTTPProxy cli.URL `help:"A URL to use as a proxy server for downloads. Only applies to internal ones - e.g. self-updates or remote_file rules."`
HashFunction string `help:"The hash function to use internally for build actions." options:"sha1,sha256"`
ExitOnError bool `help:"True to have build actions automatically fail on error (essentially passing -e to the shell they run in)." var:"EXIT_ON_ERROR"`
LinkGeneratedSources bool `help:"If set, supported build definitions will link generated sources back into the source tree. The list of generated files can be generated for the .gitignore through 'plz query print --label gitignore: //...'. Defaults to false." var:"LINK_GEN_SOURCES"`
} `help:"A config section describing general settings related to building targets in Please.\nSince Please is by nature about building things, this only has the most generic properties; most of the more esoteric properties are configured in their own sections."`
BuildConfig map[string]string `help:"A section of arbitrary key-value properties that are made available in the BUILD language. These are often useful for writing custom rules that need some configurable property.\n\n[buildconfig]\nandroid-tools-version = 23.0.2\n\nFor example, the above can be accessed as CONFIG.ANDROID_TOOLS_VERSION."`
BuildEnv map[string]string `help:"A set of extra environment variables to define for build rules. For example:\n\n[buildenv]\nsecret-passphrase = 12345\n\nThis would become SECRET_PASSPHRASE for any rules. These can be useful for passing secrets into custom rules; any variables containing SECRET or PASSWORD won't be logged.\n\nIt's also useful if you'd like internal tools to honour some external variable."`
Cache struct {
Workers int `help:"Number of workers for uploading artifacts to remote caches, which is done asynchronously."`
Dir string `help:"Sets the directory to use for the dir cache.\nThe default is 'please' under the user's cache dir (i.e. ~/.cache/please, ~/Library/Caches/please, etc), if set to the empty string the dir cache will be disabled." example:".plz-cache"`
DirCacheHighWaterMark cli.ByteSize `help:"Starts cleaning the directory cache when it is over this number of bytes.\nCan also be given with human-readable suffixes like 10G, 200MB etc."`
DirCacheLowWaterMark cli.ByteSize `help:"When cleaning the directory cache, it's reduced to at most this size."`
DirClean bool `help:"Controls whether entries in the dir cache are cleaned or not. If disabled the cache will only grow."`
DirCompress bool `help:"Compresses stored artifacts in the dir cache. They are slower to store & retrieve but more compact."`
HTTPURL cli.URL `help:"Base URL of the HTTP cache.\nNot set to anything by default which means the cache will be disabled."`
HTTPWriteable bool `help:"If True this plz instance will write content back to the HTTP cache.\nBy default it runs in read-only mode."`
HTTPTimeout cli.Duration `help:"Timeout for operations contacting the HTTP cache, in seconds."`
HTTPConcurrentRequestLimit int `help:"The maximum amount of concurrent requests that can be open. Default 20."`
HTTPRetry int `help:"The maximum number of retries before a request will give up, if a request is retryable"`
} `help:"Please has several built-in caches that can be configured in its config file.\n\nThe simplest one is the directory cache which by default is written into the .plz-cache directory. This allows for fast retrieval of code that has been built before (for example, when swapping Git branches).\n\nThere is also a remote RPC cache which allows using a centralised server to store artifacts. A typical pattern here is to have your CI system write artifacts into it and give developers read-only access so they can reuse its work.\n\nFinally there's a HTTP cache which is very similar, but a little obsolete now since the RPC cache outperforms it and has some extra features. Otherwise the two have similar semantics and share quite a bit of implementation.\n\nPlease has server implementations for both the RPC and HTTP caches."`
Test struct {
Timeout cli.Duration `help:"Default timeout applied to all tests. Can be overridden on a per-rule basis."`
Sandbox bool `help:"Deprecated, use sandbox.test instead."`
DisableCoverage []string `help:"Disables coverage for tests that have any of these labels spcified."`
Upload cli.URL `help:"URL to upload test results to (in XML format)"`
UploadGzipped bool `help:"True to upload the test results gzipped."`
StoreTestOutputOnSuccess bool `help:"True to store stdout and stderr in the test results for successful tests."`
} `help:"A config section describing settings related to testing in general."`
Sandbox struct {
Tool string `help:"The location of the tool to use for sandboxing. This can assume it is being run in a new network, user, and mount namespace on linux. If not set, Please will use 'plz sandbox'."`
Dir []string `help:"Directories to hide within the sandbox"`
Namespace string `help:"Set to 'always', to namespace all actions. Set to 'sandbox' to namespace only when sandboxing the build action. Defaults to 'never', under the assumption the sandbox tool will handle its own namespacing. If set, user namespacing will be enabled for all rules. Mount and network will only be enabled if the rule is to be sandboxed."`
Build bool `help:"True to sandbox individual build actions, which isolates them from network access and some aspects of the filesystem. Currently only works on Linux." var:"BUILD_SANDBOX"`
Test bool `help:"True to sandbox individual tests, which isolates them from network access, IPC and some aspects of the filesystem. Currently only works on Linux." var:"TEST_SANDBOX"`
} `help:"A config section describing settings relating to sandboxing of build actions."`
Remote struct {
URL string `help:"URL for the remote server."`
CASURL string `help:"URL for the CAS service, if it is different to the main one."`
AssetURL string `help:"URL for the remote asset server, if it is different to the main one."`
NumExecutors int `help:"Maximum number of remote executors to use simultaneously."`
Instance string `help:"Remote instance name to request; depending on the server this may be required."`
Name string `help:"A name for this worker instance. This is attached to artifacts uploaded to remote storage." example:"agent-001"`
DisplayURL string `help:"A URL to browse the remote server with (e.g. using buildbarn-browser). Only used when printing hashes."`
TokenFile string `help:"A file containing a token that is attached to outgoing RPCs to authenticate them. This is somewhat bespoke; we are still investigating further options for authentication."`
Timeout cli.Duration `help:"Timeout for connections made to the remote server."`
Secure bool `help:"Whether to use TLS for communication or not."`
VerifyOutputs bool `help:"Whether to verify all outputs are present after a cached remote execution action. Depending on your server implementation, you may require this to ensure files are really present."`
UploadDirs bool `help:"Uploads individual directory blobs after build actions. This might not be necessary with some servers, but if you aren't sure, you should leave it on."`
Shell string `help:"Path to the shell to use to execute actions in. Default looks up bash based on the build.path setting."`
Platform []string `help:"Platform properties to request from remote workers, in the format key=value."`
CacheDuration cli.Duration `help:"Length of time before we re-check locally cached build actions. Default is unlimited."`
BuildID string `help:"ID of the build action that's being run, to attach to remote requests."`
} `help:"Settings related to remote execution & caching using the Google remote execution APIs. This section is still experimental and subject to change."`
Size map[string]*Size `help:"Named sizes of targets; these are the definitions of what can be passed to the 'size' argument."`
Cover struct {
FileExtension []string `help:"Extensions of files to consider for coverage.\nDefaults to .go, .py, .java, .tsx, .ts, .js, .cc, .h, and .c"`
ExcludeExtension []string `help:"Extensions of files to exclude from coverage.\nTypically this is for generated code; the default is to exclude protobuf extensions like .pb.go, _pb2.py, etc."`
ExcludeGlob []string `help:"Exclude glob patterns from coverage.\nTypically this is for generated code and it is useful when there is no other discrimination possible."`
} `help:"Configuration relating to coverage reports."`
Gc struct {
Keep []BuildLabel `help:"Marks targets that gc should always keep. Can include meta-targets such as //test/... and //docs:all."`
KeepLabel []string `help:"Defines a target label to be kept; for example, if you set this to go, no Go targets would ever be considered for deletion." example:"go"`
} `help:"Please supports a form of 'garbage collection', by which it means identifying targets that are not used for anything. By default binary targets and all their transitive dependencies are always considered non-garbage, as are any tests directly on those. The config options here allow tweaking this behaviour to retain more things.\n\nNote that it's a very good idea that your BUILD files are in the standard format when running this."`
Go struct {
GoTool string `help:"The binary to use to invoke Go & its subtools with." var:"GO_TOOL"`
GoRoot string `help:"If set, will set the GOROOT environment variable appropriately during build actions." var:"GOROOT"`
GoPath string `help:"If set, will set the GOPATH environment variable appropriately during build actions." var:"GOPATH"`
ImportPath string `help:"Sets the default Go import path at the root of this repository.\nFor example, in the Please repo, we might set it to github.com/thought-machine/please to allow imports from that package within the repo." var:"GO_IMPORT_PATH"`
CgoCCTool string `help:"Sets the location of CC while building cgo_library and cgo_test rules. Defaults to gcc" var:"CGO_CC_TOOL"`
CgoEnabled string `help:"Sets the CGO_ENABLED which controls whether the cgo build flag is set during cross compilation. Defaults to '0' (disabled)" var:"CGO_ENABLED"`
FilterTool string `help:"Sets the location of the please_go_filter tool that is used to filter source files against build constraints." var:"GO_FILTER_TOOL"`
PleaseGoTool string `help:"Sets the location of the please_go tool that is used to compile and test go code." var:"PLEASE_GO_TOOL"`
EmbedTool string `help:"Sets the location of the please_go_embed tool that is used to parse //go:embed directives." var:"GO_EMBED_TOOL"`
DefaultStatic bool `help:"Sets Go binaries to default to static linking. Note that enabling this may have negative consequences for some code, including Go's DNS lookup code in the net module." var:"GO_DEFAULT_STATIC"`
GoTestRootCompat bool `help:"Changes the behavior of the build rules to be more compatible with go test i.e. please will descend into the package directory to run unit tests as go test does." var:"GO_TEST_ROOT_COMPAT"`
CFlags string `help:"Sets the CFLAGS env var for go rules." var:"GO_C_FLAGS"`
LDFlags string `help:"Sets the LDFLAGS env var for go rules." var:"GO_LD_FLAGS"`
} `help:"Please has built-in support for compiling Go, and of course is written in Go itself.\nSee the config subfields or the Go rules themselves for more information.\n\nNote that Please is a bit more flexible than Go about directory layout - for example, it is possible to have multiple packages in a directory, but it's not a good idea to push this too far since Go's directory layout is inextricably linked with its import paths."`
Python struct {
PipTool string `help:"The tool that is invoked during pip_library rules." var:"PIP_TOOL"`
PipFlags string `help:"Additional flags to pass to pip invocations in pip_library rules." var:"PIP_FLAGS"`
PexTool string `help:"The tool that's invoked to build pexes. Defaults to please_pex in the install directory." var:"PEX_TOOL"`
DefaultInterpreter string `help:"The interpreter used for python_binary and python_test rules when none is specified on the rule itself. Defaults to python but you could of course set it to, say, pypy." var:"DEFAULT_PYTHON_INTERPRETER"`
TestRunner string `help:"The test runner used to discover & run Python tests; one of unittest, pytest or behave, or a custom import path to bring your own." var:"PYTHON_TEST_RUNNER"`
TestRunnerBootstrap string `help:"Target providing test-runner library and its transitive dependencies. Injects plz-provided bootstraps if not given." var:"PYTHON_TEST_RUNNER_BOOTSTRAP"`
ModuleDir string `help:"Defines a directory containing modules from which they can be imported at the top level.\nBy default this is empty but by convention we define our pip_library rules in third_party/python and set this appropriately. Hence any of those third-party libraries that try something like import six will have it work as they expect, even though it's actually in a different location within the .pex." var:"PYTHON_MODULE_DIR"`
DefaultPipRepo cli.URL `help:"Defines a location for a pip repo to download wheels from.\nBy default pip_library uses PyPI (although see below on that) but you may well want to use this define another location to upload your own wheels to.\nIs overridden by the repo argument to pip_library." var:"PYTHON_DEFAULT_PIP_REPO"`
WheelRepo cli.URL `help:"Defines a location for a remote repo that python_wheel rules will download from. See python_wheel for more information." var:"PYTHON_WHEEL_REPO"`
UsePyPI bool `help:"Whether or not to use PyPI for pip_library rules or not. Defaults to true, if you disable this you will presumably want to set DefaultPipRepo to use one of your own.\nIs overridden by the use_pypi argument to pip_library." var:"USE_PYPI"`
WheelNameScheme []string `help:"Defines a custom templatized wheel naming scheme. Templatized variables should be surrounded in curly braces, and the available options are: url_base, package_name, version and initial (the first character of package_name). The default search pattern is '{url_base}/{package_name}-{version}-${{OS}}-${{ARCH}}.whl' along with a few common variants." var:"PYTHON_WHEEL_NAME_SCHEME"`
InterpreterOptions string `help:"Options to pass to the python interpeter, when writing shebangs for pex executables." var:"PYTHON_INTERPRETER_OPTIONS"`
DisableVendorFlags bool `help:"Disables injection of vendor specific flags for pip while using pip_library. The option can be useful if you are using something like Pyenv, and the passing of additional flags or configuration that are vendor specific, e.g. --system, breaks your build." var:"DISABLE_VENDOR_FLAGS"`
} `help:"Please has built-in support for compiling Python.\nPlease's Python artifacts are pex files, which are essentially self-executable zip files containing all needed dependencies, bar the interpreter itself. This fits our aim of at least semi-static binaries for each language.\nSee https://github.com/pantsbuild/pex for more information.\nNote that due to differences between the environment inside a pex and outside some third-party code may not run unmodified (for example, it cannot simply open() files). It's possible to work around a lot of this, but if it all becomes too much it's possible to mark pexes as not zip-safe which typically resolves most of it at a modest speed penalty."`
Java struct {
JavacTool string `help:"Defines the tool used for the Java compiler. Defaults to javac." var:"JAVAC_TOOL"`
JlinkTool string `help:"Defines the tool used for the Java linker. Defaults to jlink." var:"JLINK_TOOL"`
JavaHome string `help:"Defines the path of the Java Home folder." var:"JAVA_HOME"`
JavacWorker string `help:"Defines the tool used for the Java persistent compiler. This is significantly (approx 4x) faster for large Java trees than invoking javac separately each time. Default to javac_worker in the install directory, but can be switched off to fall back to javactool and separate invocation." var:"JAVAC_WORKER"`
JarCatTool string `help:"Defines the tool used to concatenate .jar files which we use to build the output of java_binary, java_test and various other rules. Defaults to jarcat in the Please install directory." var:"JARCAT_TOOL"`
JUnitRunner string `help:"Defines the .jar containing the JUnit runner. This is built into all java_test rules since it's necessary to make JUnit do anything useful.\nDefaults to junit_runner.jar in the Please install directory." var:"JUNIT_RUNNER"`
DefaultTestPackage string `help:"The Java classpath to search for functions annotated with @Test. If not specified the compiled sources will be searched for files named *Test.java." var:"DEFAULT_TEST_PACKAGE"`
ReleaseLevel string `help:"The default Java release level when compiling.\nSourceLevel and TargetLevel are ignored if this is set. Bear in mind that this flag is only supported in Java version 9+." var:"JAVA_RELEASE_LEVEL"`
SourceLevel string `help:"The default Java source level when compiling. Defaults to 8." var:"JAVA_SOURCE_LEVEL"`
TargetLevel string `help:"The default Java bytecode level to target. Defaults to 8." var:"JAVA_TARGET_LEVEL"`
JavacFlags string `help:"Additional flags to pass to javac when compiling libraries." example:"-Xmx1200M" var:"JAVAC_FLAGS"`
JavacTestFlags string `help:"Additional flags to pass to javac when compiling tests." example:"-Xmx1200M" var:"JAVAC_TEST_FLAGS"`
DefaultMavenRepo []cli.URL `help:"Default location to load artifacts from in maven_jar rules. Can be overridden on a per-rule basis." var:"DEFAULT_MAVEN_REPO"`
Toolchain string `help:"A label identifying a java_toolchain." var:"JAVA_TOOLCHAIN"`
} `help:"Please has built-in support for compiling Java.\nIt builds uber-jars for binary and test rules which contain all dependencies and can be easily deployed, and with the help of some of Please's additional tools they are deterministic as well.\n\nWe've only tested support for Java 7 and 8, although it's likely newer versions will work with little or no change."`
Cpp struct {
CCTool string `help:"The tool invoked to compile C code. Defaults to gcc but you might want to set it to clang, for example." var:"CC_TOOL"`
CppTool string `help:"The tool invoked to compile C++ code. Defaults to g++ but you might want to set it to clang++, for example." var:"CPP_TOOL"`
LdTool string `help:"The tool invoked to link object files. Defaults to ld but you could also set it to gold, for example." var:"LD_TOOL"`
ArTool string `help:"The tool invoked to archive static libraries. Defaults to ar." var:"AR_TOOL"`
LinkWithLdTool bool `help:"If true, instructs Please to use the tool set earlier in ldtool to link binaries instead of cctool.\nThis is an esoteric setting that most people don't want; a vanilla ld will not perform all steps necessary here (you'll get lots of missing symbol messages from having no libc etc). Generally best to leave this disabled unless you have very specific requirements." var:"LINK_WITH_LD_TOOL"`
DefaultOptCflags string `help:"Compiler flags passed to all C rules during opt builds; these are typically pretty basic things like what language standard you want to target, warning flags, etc.\nDefaults to --std=c99 -O3 -DNDEBUG -Wall -Wextra -Werror" var:"DEFAULT_OPT_CFLAGS"`
DefaultDbgCflags string `help:"Compiler rules passed to all C rules during dbg builds.\nDefaults to --std=c99 -g3 -DDEBUG -Wall -Wextra -Werror." var:"DEFAULT_DBG_CFLAGS"`
DefaultOptCppflags string `help:"Compiler flags passed to all C++ rules during opt builds; these are typically pretty basic things like what language standard you want to target, warning flags, etc.\nDefaults to --std=c++11 -O3 -DNDEBUG -Wall -Wextra -Werror" var:"DEFAULT_OPT_CPPFLAGS"`
DefaultDbgCppflags string `help:"Compiler rules passed to all C++ rules during dbg builds.\nDefaults to --std=c++11 -g3 -DDEBUG -Wall -Wextra -Werror." var:"DEFAULT_DBG_CPPFLAGS"`
DefaultLdflags string `help:"Linker flags passed to all C++ rules.\nBy default this is empty." var:"DEFAULT_LDFLAGS"`
PkgConfigPath string `help:"Custom PKG_CONFIG_PATH for pkg-config.\nBy default this is empty." var:"PKG_CONFIG_PATH"`
Coverage bool `help:"If true (the default), coverage will be available for C and C++ build rules.\nThis is still a little experimental but should work for GCC. Right now it does not work for Clang (it likely will in Clang 4.0 which will likely support --fprofile-dir) and so this can be useful to disable it.\nIt's also useful in some cases for CI systems etc if you'd prefer to avoid the overhead, since the tests have to be compiled with extra instrumentation and without optimisation." var:"CPP_COVERAGE"`
TestMain BuildLabel `help:"The build target to use for the default main for C++ test rules." example:"///pleasings//cc:unittest_main" var:"CC_TEST_MAIN"`
ClangModules bool `help:"Uses Clang-style arguments for compiling cc_module rules. If disabled gcc-style arguments will be used instead. Experimental, expected to be removed at some point once module compilation methods are more consistent." var:"CC_MODULES_CLANG"`
DsymTool string `help:"Set this to dsymutil or equivalent on macOS to use this tool to generate xcode symbol information for debug builds." var:"DSYM_TOOL"`
} `help:"Please has built-in support for compiling C and C++ code. We don't support every possible nuance of compilation for these languages, but aim to provide something fairly straightforward.\nTypically there is little problem compiling & linking against system libraries although Please has no insight into those libraries and when they change, so cannot rebuild targets appropriately.\n\nThe C and C++ rules are very similar and simply take a different set of tools and flags to facilitate side-by-side usage."`
Proto struct {
ProtocTool string `help:"The binary invoked to compile .proto files. Defaults to protoc." var:"PROTOC_TOOL"`
ProtocGoPlugin string `help:"The binary passed to protoc as a plugin to generate Go code. Defaults to protoc-gen-go.\nWe've found this easier to manage with a go_get rule instead though, so you can also pass a build label here. See the Please repo for an example." var:"PROTOC_GO_PLUGIN"`
GrpcPythonPlugin string `help:"The plugin invoked to compile Python code for grpc_library.\nDefaults to protoc-gen-grpc-python." var:"GRPC_PYTHON_PLUGIN"`
GrpcJavaPlugin string `help:"The plugin invoked to compile Java code for grpc_library.\nDefaults to protoc-gen-grpc-java." var:"GRPC_JAVA_PLUGIN"`
GrpcGoPlugin string `help:"The plugin invoked to compile Go code for grpc_library.\nIf not set, then the protoc plugin will be used instead." var:"GRPC_GO_PLUGIN"`
GrpcCCPlugin string `help:"The plugin invoked to compile C++ code for grpc_library.\nDefaults to grpc_cpp_plugin." var:"GRPC_CC_PLUGIN"`
Language []string `help:"Sets the default set of languages that proto rules are built for.\nChosen from the set of {cc, java, go, py}.\nDefaults to all of them!" var:"PROTO_LANGUAGES"`
PythonDep string `help:"An in-repo dependency that's applied to any Python proto libraries." var:"PROTO_PYTHON_DEP"`
JavaDep string `help:"An in-repo dependency that's applied to any Java proto libraries." var:"PROTO_JAVA_DEP"`
GoDep string `help:"An in-repo dependency that's applied to any Go proto libraries." var:"PROTO_GO_DEP"`
JsDep string `help:"An in-repo dependency that's applied to any Javascript proto libraries." var:"PROTO_JS_DEP"`
PythonGrpcDep string `help:"An in-repo dependency that's applied to any Python gRPC libraries." var:"GRPC_PYTHON_DEP"`
JavaGrpcDep string `help:"An in-repo dependency that's applied to any Java gRPC libraries." var:"GRPC_JAVA_DEP"`
GoGrpcDep string `help:"An in-repo dependency that's applied to any Go gRPC libraries." var:"GRPC_GO_DEP"`
ProtocFlag []string `help:"Flags to pass to protoc i.e. the location of well known types. Can be repeated." var:"PROTOC_FLAGS"`
} `help:"Please has built-in support for compiling protocol buffers, which are a form of codegen to define common data types which can be serialised and communicated between different languages.\nSee https://developers.google.com/protocol-buffers/ for more information.\n\nThere is also support for gRPC, which is an implementation of protobuf's RPC framework. See http://www.grpc.io/ for more information.\n\nNote that you must have the protocol buffers compiler (and gRPC plugins, if needed) installed on your machine to make use of these rules."`
Licences struct {
Accept []string `help:"Licences that are accepted in this repository.\nWhen this is empty licences are ignored. As soon as it's set any licence detected or assigned must be accepted explicitly here.\nThere's no fuzzy matching, so some package managers (especially PyPI and Maven, but shockingly not npm which rather nicely uses SPDX) will generate a lot of slightly different spellings of the same thing, which will all have to be accepted here. We'd rather that than trying to 'cleverly' match them which might result in matching the wrong thing."`
Reject []string `help:"Licences that are explicitly rejected in this repository.\nAn astute observer will notice that this is not very different to just not adding it to the accept section, but it does have the advantage of explicitly documenting things that the team aren't allowed to use."`
} `help:"Please has some limited support for declaring acceptable licences and detecting them from some libraries. You should not rely on this for complete licence compliance, but it can be a useful check to try to ensure that unacceptable licences do not slip in."`
Alias map[string]*Alias `help:"Allows defining alias replacements with more detail than the [aliases] section. Otherwise follows the same process, i.e. performs replacements of command strings."`
Bazel struct {
Compatibility bool `help:"Activates limited Bazel compatibility mode. When this is active several rule arguments are available under different names (e.g. compiler_flags -> copts etc), the WORKSPACE file is interpreted, Makefile-style replacements like $< and $@ are made in genrule commands, etc.\nNote that Skylark is not generally supported and many aspects of compatibility are fairly superficial; it's unlikely this will work for complex setups of either tool." var:"BAZEL_COMPATIBILITY"`
} `help:"Bazel is an open-sourced version of Google's internal build tool. Please draws a lot of inspiration from the original tool although the two have now diverged in various ways.\nNonetheless, if you've used Bazel, you will likely find Please familiar."`
// buildEnvStored is a cached form of BuildEnv.
buildEnvStored *storedBuildEnv
// Profiling can be set to true by a caller to enable CPU profiling in any areas that might
// want to take special effort about it.
Profiling bool
FeatureFlags struct {
JavaBinaryExecutableByDefault bool `help:"Makes java_binary rules self executable by default. Target release version 16." var:"FF_JAVA_SELF_EXEC"`
SingleSHA1Hash bool `help:"Stop combining sha1 with the empty hash when there's a single output (just like SHA256 and the other hash functions do) "`
PackageOutputsStrictness bool `help:"Prevents certain combinations of target outputs within a package that result in nondeterminist behaviour"`
PythonWheelHashing bool `help:"This hashes the internal build rule that downloads the wheel instead" var:"FF_PYTHON_WHEEL_HASHING"`
} `help:"Flags controlling preview features for the next release. Typically these config options gate breaking changes and only have a lifetime of one major release."`
Metrics struct {
PrometheusGatewayURL string `help:"The gateway URL to push prometheus updates to."`
} `help:"Settings for collecting metrics."`
}
// An Alias represents aliases in the config.
type Alias struct {
Cmd string `help:"Command to run for this alias."`
Desc string `help:"Description of this alias"`
Subcommand []string `help:"Known subcommands of this command"`
Flag []string `help:"Known flags of this command"`
PositionalLabels bool `help:"Treats positional arguments after commands as build labels for the purpose of tab completion."`
}
// A Size represents a named size in the config.
type Size struct {
Timeout cli.Duration `help:"Timeout for targets of this size"`
TimeoutName string `help:"Name of the timeout, to be passed to the 'timeout' argument"`
}
type storedBuildEnv struct {
Env, Path []string
Once sync.Once
}
// Hash returns a hash of the parts of this configuration that affect building targets in general.
// Most parts are considered not to (e.g. cache settings) or affect specific targets (e.g. changing
// tool paths which get accounted for on the targets that use them).
func (config *Configuration) Hash() []byte {
h := sha1.New()
// These fields are the ones that need to be in the general hash; other things will be
// picked up by relevant rules (particularly tool paths etc).
// Note that container settings are handled separately.
h.Write([]byte(config.Build.Lang))
h.Write([]byte(config.Build.Nonce))
for _, l := range config.Licences.Reject {
h.Write([]byte(l))
}
for _, env := range config.getBuildEnv(false, false) {
if !strings.HasPrefix(env, "SECRET") {
h.Write([]byte(env))
}
}
return h.Sum(nil)
}
// GetBuildEnv returns the build environment configured for this config object.
func (config *Configuration) GetBuildEnv() []string {
config.buildEnvStored.Once.Do(func() {
config.buildEnvStored.Env = config.getBuildEnv(true, true)
for _, e := range config.buildEnvStored.Env {
if strings.HasPrefix(e, "PATH=") {
config.buildEnvStored.Path = strings.Split(strings.TrimPrefix(e, "PATH="), ":")
}
}
})
return config.buildEnvStored.Env
}
// EnsurePleaseLocation will resolve `config.Please.Location` to a full path location where it is to be found.
func (config *Configuration) EnsurePleaseLocation() {
defaultPleaseLocation := fs.ExpandHomePath("~/.please")
if config.Please.Location == "" {
// Determine the location based off where we're running from.
if exec, err := fs.Executable(); err != nil {
log.Warning("Can't determine current executable: %s", err)
config.Please.Location = defaultPleaseLocation
} else if strings.HasPrefix(exec, defaultPleaseLocation) {
// Paths within ~/.please are managed by us and have symlinks to subdirectories
// that we don't want to follow.
config.Please.Location = defaultPleaseLocation
} else if deref, err := filepath.EvalSymlinks(exec); err != nil {
log.Warning("Can't dereference %s: %s", exec, err)
config.Please.Location = defaultPleaseLocation
} else {
config.Please.Location = path.Dir(deref)
}
} else {
config.Please.Location = fs.ExpandHomePath(config.Please.Location)
if !filepath.IsAbs(config.Please.Location) {
config.Please.Location = filepath.Join(RepoRoot, config.Please.Location)
}
}
}
// Path returns the slice of strings corresponding to the PATH env var.
func (config *Configuration) Path() []string {
config.GetBuildEnv() // ensure it is initialised
return config.buildEnvStored.Path
}
func (config *Configuration) getBuildEnv(includePath bool, includeUnsafe bool) []string {
env := []string{}
// from the BuildEnv config keyword
for k, v := range config.BuildEnv {
pair := strings.ReplaceAll(strings.ToUpper(k), "-", "_") + "=" + v
env = append(env, pair)
}
// from the user's environment based on the PassUnsafeEnv config keyword
if includeUnsafe {
for _, k := range config.Build.PassUnsafeEnv {
if v, isSet := os.LookupEnv(k); isSet {
if k == "PATH" {
// plz's install location always needs to be on the path.
v = config.Please.Location + ":" + v
includePath = false // skip this in a bit
}
env = append(env, k+"="+v)
}
}
}
// from the user's environment based on the PassEnv config keyword
for _, k := range config.Build.PassEnv {
if v, isSet := os.LookupEnv(k); isSet {
if k == "PATH" {
// plz's install location always needs to be on the path.
v = config.Please.Location + ":" + v
includePath = false // skip this in a bit
}
env = append(env, k+"="+v)
}
}
if includePath {
// Use a restricted PATH; it'd be easier for the user if we pass it through
// but really external environment variables shouldn't affect this.
// The only concession is that ~ is expanded as the user's home directory
// in PATH entries.
env = append(env, "PATH="+strings.Join(append([]string{config.Please.Location}, config.Build.Path...), ":"))
}
sort.Strings(env)
return env
}
// TagsToFields returns a map of string represent the properties of CONFIG object to the config Structfield
func (config *Configuration) TagsToFields() map[string]reflect.StructField {
tags := make(map[string]reflect.StructField)
v := reflect.ValueOf(config).Elem()
for i := 0; i < v.NumField(); i++ {
if field := v.Field(i); field.Kind() == reflect.Struct {
for j := 0; j < field.NumField(); j++ {
if tag := field.Type().Field(j).Tag.Get("var"); tag != "" {
tags[tag] = field.Type().Field(j)
}
}
}
}
return tags
}
// ApplyOverrides applies a set of overrides to the config.
// The keys of the given map are dot notation for the config setting.
func (config *Configuration) ApplyOverrides(overrides map[string]string) error {
match := func(s1 string) func(string) bool {
return func(s2 string) bool {
return strings.ToLower(s2) == s1
}
}
elem := reflect.ValueOf(config).Elem()
for k, v := range overrides {
split := strings.Split(strings.ToLower(k), ".")
if len(split) != 2 {
return fmt.Errorf("Bad option format: %s", k)
}
field := elem.FieldByNameFunc(match(split[0]))
if !field.IsValid() {
return fmt.Errorf("Unknown config field: %s", split[0])
} else if field.Kind() == reflect.Map {
field.SetMapIndex(reflect.ValueOf(split[1]), reflect.ValueOf(v))
continue
} else if field.Kind() != reflect.Struct {
return fmt.Errorf("Unsettable config field: %s", split[0])
}
subfield, ok := field.Type().FieldByNameFunc(match(split[1]))
if !ok {
return fmt.Errorf("Unknown config field: %s", split[1])
}
field = field.FieldByNameFunc(match(split[1]))
switch field.Kind() {
case reflect.String:
// verify this is a legit setting for this field
if options := subfield.Tag.Get("options"); options != "" {
if !cli.ContainsString(v, strings.Split(options, ",")) {
return fmt.Errorf("Invalid value %s for field %s; options are %s", v, k, options)
}
}
if field.Type().Name() == "URL" {
field.Set(reflect.ValueOf(cli.URL(v)))
} else {
field.Set(reflect.ValueOf(v))
}
case reflect.Bool:
v = strings.ToLower(v)
// Mimics the set of truthy things gcfg accepts in our config file.
field.SetBool(v == "true" || v == "yes" || v == "on" || v == "1")
case reflect.Int:
i, err := strconv.Atoi(v)
if err != nil {
return fmt.Errorf("Invalid value for an integer field: %s", v)
}
field.Set(reflect.ValueOf(i))
case reflect.Int64:
var d cli.Duration
if err := d.UnmarshalText([]byte(v)); err != nil {
return fmt.Errorf("Invalid value for a duration field: %s", v)
}
field.Set(reflect.ValueOf(d))
case reflect.Slice:
// Comma-separated values are accepted.
if field.Type().Elem().Kind() == reflect.Struct {
// Assume it must be a slice of BuildLabel.
l := []BuildLabel{}
for _, s := range strings.Split(v, ",") {
l = append(l, ParseBuildLabel(s, ""))
}
field.Set(reflect.ValueOf(l))
} else if field.Type().Elem().Name() == "URL" {
urls := []cli.URL{}
for _, s := range strings.Split(v, ",") {
urls = append(urls, cli.URL(s))
}
field.Set(reflect.ValueOf(urls))
} else {
field.Set(reflect.ValueOf(strings.Split(v, ",")))
}
default:
return fmt.Errorf("Can't override config field %s (is %s)", k, field.Kind())
}
}
// Resolve the full path to its location.
config.EnsurePleaseLocation()
return nil
}
// Completions returns a list of possible completions for the given option prefix.
func (config *Configuration) Completions(prefix string) []flags.Completion {
ret := []flags.Completion{}
t := reflect.TypeOf(config).Elem()
for i := 0; i < t.NumField(); i++ {
if field := t.Field(i); field.Type.Kind() == reflect.Struct {
for j := 0; j < field.Type.NumField(); j++ {
subfield := field.Type.Field(j)
if name := strings.ToLower(field.Name + "." + subfield.Name); strings.HasPrefix(name, prefix) {
help := subfield.Tag.Get("help")
if options := subfield.Tag.Get("options"); options != "" {
for _, option := range strings.Split(options, ",") {
ret = append(ret, flags.Completion{Item: name + ":" + option, Description: help})
}
} else {
ret = append(ret, flags.Completion{Item: name + ":", Description: help})
}
}
}
}
}
return ret
}
// UpdateArgsWithAliases applies the aliases in this config to the given set of arguments.
func (config *Configuration) UpdateArgsWithAliases(args []string) []string {
for idx, arg := range args[1:] {
// Please should not touch anything that comes after `--`
if arg == "--" {
break
}
for k, v := range config.Alias {
if arg == k {
// We could insert every token in v into os.Args at this point and then we could have
// aliases defined in terms of other aliases but that seems rather like overkill so just
// stick the replacement in wholesale instead.
// Do not ask about the inner append and the empty slice.
cmd, err := shlex.Split(v.Cmd)
if err != nil {
log.Fatalf("Invalid alias replacement for %s: %s", k, err)
}
return append(append(append([]string{}, args[:idx+1]...), cmd...), args[idx+2:]...)
}
}
}
return args
}
// PrintAliases prints the set of aliases defined in the config.
func (config *Configuration) PrintAliases(w io.Writer) {
aliases := config.Alias
names := make([]string, 0, len(aliases))
maxlen := 0
for alias := range aliases {
names = append(names, alias)
if len(alias) > maxlen {
maxlen = len(alias)
}
}
sort.Strings(names)
w.Write([]byte("\nAvailable commands for this repository:\n"))
tmpl := fmt.Sprintf(" %%-%ds %%s\n", maxlen)
for _, name := range names {
fmt.Fprintf(w, tmpl, name, aliases[name].Desc)
}
}
// IsABuildFile returns true if given filename is a build file name.
func (config *Configuration) IsABuildFile(name string) bool {
for _, buildFileName := range config.Parse.BuildFileName {
if name == buildFileName {
return true
}
}
return false
}
// NumRemoteExecutors returns the number of actual remote executors we'll have
func (config *Configuration) NumRemoteExecutors() int {
if config.Remote.URL == "" {
return 0
}
return config.Remote.NumExecutors
}
// A ConfigProfile is a string that knows how to handle completions given all the possible config file locations.
type ConfigProfile string
// Complete implements command-line flags completion for a ConfigProfile.
func (profile ConfigProfile) Complete(match string) (completions []flags.Completion) {
for _, filename := range defaultConfigFiles() {
matches, _ := filepath.Glob(filename + "." + match + "*")
for _, match := range matches {
if suffix := strings.TrimPrefix(match, filename+"."); suffix != "local" { // .plzconfig.local doesn't count
completions = append(completions, flags.Completion{
Item: suffix,
Description: "Profile defined at " + match,
})
}
}
}
return completions
}
| 1 | 10,193 | I wonder if we should look at moving the Python stuff to a plugin and doing this there rather than adding to core plz at this point? | thought-machine-please | go |
@@ -44,8 +44,7 @@ class Syntax(base.BaseCommand):
ansible = ansible_playbook.AnsiblePlaybook(self.molecule.config.config[
'ansible'])
ansible.add_cli_arg('syntax-check', True)
- ansible.add_cli_arg('inventory-file', 'localhost,')
-
+ ansible.add_cli_arg('inventory_file', 'localhost,')
utilities.print_info("Checking playbooks syntax ...")
return ansible.execute(hide_errors=True) | 1 | # Copyright (c) 2015-2016 Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from molecule import ansible_galaxy
from molecule import ansible_playbook
from molecule import utilities
from molecule.commands import base
class Syntax(base.BaseCommand):
"""
Performs a syntax check on the current role.
Usage:
syntax
"""
def execute(self, exit=True):
self.molecule._create_templates()
if 'requirements_file' in self.molecule.config.config[
'ansible'] and not self.molecule._state.installed_deps:
galaxy = ansible_galaxy.AnsibleGalaxy(self.molecule.config.config)
galaxy.install()
self.molecule._state.change_state('installed_deps', True)
ansible = ansible_playbook.AnsiblePlaybook(self.molecule.config.config[
'ansible'])
ansible.add_cli_arg('syntax-check', True)
ansible.add_cli_arg('inventory-file', 'localhost,')
utilities.print_info("Checking playbooks syntax ...")
return ansible.execute(hide_errors=True)
| 1 | 6,507 | This is the actual bug fix. I prob should have broken this out into two PRs. | ansible-community-molecule | py |
@@ -235,6 +235,16 @@ namespace Nethermind.Core.Specs
/// </summary>
bool IsEip3198Enabled { get; }
+ /// <summary>
+ /// Difficulty Bomb Delay to Q2/2022
+ /// </summary>
+ bool IsEip3238Enabled { get; }
+
+ /// <summary>
+ /// Reduction in refunds
+ /// </summary>
+ bool IsEip3529Enabled { get; }
+
/// <summary>
/// Should transactions be validated against chainId.
/// </summary> | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using Nethermind.Int256;
namespace Nethermind.Core.Specs
{
/// <summary>
/// https://github.com/ethereum/EIPs
/// </summary>
public interface IReleaseSpec
{
public string Name { get; }
long MaximumExtraDataSize { get; }
long MaxCodeSize { get; }
long MinGasLimit { get; }
long GasLimitBoundDivisor { get; }
UInt256 BlockReward { get; }
long DifficultyBombDelay { get; }
long DifficultyBoundDivisor { get; }
long? FixedDifficulty { get; }
int MaximumUncleCount { get; }
/// <summary>
/// ---
/// In chainspec - Ethash.Duration
/// </summary>
bool IsTimeAdjustmentPostOlympic { get; }
/// <summary>
/// Homestead contract creation via transaction cost set to 21000 + 32000 (previously 21000)
/// Failing init does not create an empty code contract
/// Difficulty adjustment changed
/// Transaction signature uniqueness (s-value has to be less or equal than than secp256k1n/2)
/// </summary>
bool IsEip2Enabled { get; }
/// <summary>
/// Homestead DELEGATECALL instruction added
/// </summary>
bool IsEip7Enabled { get; }
/// <summary>
/// Byzantium Change difficulty adjustment to target mean block time including uncles
/// </summary>
bool IsEip100Enabled { get; }
/// <summary>
/// Byzantium REVERT instruction in the Ethereum Virtual Machine
/// ---
/// in chainspec Ethash.Eip100bTransition
/// </summary>
bool IsEip140Enabled { get; }
/// <summary>
/// Tangerine Whistle Gas cost of IO operations increased
/// </summary>
bool IsEip150Enabled { get; }
/// <summary>
/// Spurious Dragon Chain ID in signatures (replay attack protection)
/// </summary>
bool IsEip155Enabled { get; }
/// <summary>
/// Spurious Dragon State clearing
/// </summary>
bool IsEip158Enabled { get; }
/// <summary>
/// Spurious Dragon EXP cost increase
/// </summary>
bool IsEip160Enabled { get; }
/// <summary>
/// Spurious Dragon Code size limit
/// ---
/// in chainspec MaxCodeSizeTransition
/// </summary>
bool IsEip170Enabled { get; }
/// <summary>
/// Byzantium Precompiled contracts for addition and scalar multiplication on the elliptic curve alt_bn128
/// ---
/// in chainspec in builtin accounts
/// </summary>
bool IsEip196Enabled { get; }
/// <summary>
/// Byzantium Precompiled contracts for optimal ate pairing check on the elliptic curve alt_bn128
/// ---
/// in chainspec in builtin accounts
/// </summary>
bool IsEip197Enabled { get; }
/// <summary>
/// Byzantium Precompiled contract for bigint modular exponentiation
/// ---
/// in chainspec in builtin accounts
/// </summary>
bool IsEip198Enabled { get; }
/// <summary>
/// Byzantium New opcodes: RETURNDATASIZE and RETURNDATACOPY
/// </summary>
bool IsEip211Enabled { get; }
/// <summary>
/// Byzantium New opcode STATICCALL
/// </summary>
bool IsEip214Enabled { get; }
/// <summary>
/// Byzantium Difficulty Bomb Delay and Block Reward Reduction
/// ---
/// in chainspec as DifficultyBombDelays
/// </summary>
bool IsEip649Enabled { get; }
/// <summary>
/// Byzantium Embedding transaction return data in receipts
/// </summary>
bool IsEip658Enabled { get; }
/// <summary>
/// Constantinople SHL, SHR, SAR instructions
/// </summary>
bool IsEip145Enabled { get; }
/// <summary>
/// Constantinople Skinny CREATE2
/// </summary>
bool IsEip1014Enabled { get; }
/// <summary>
/// Constantinople EXTCODEHASH instructions
/// </summary>
bool IsEip1052Enabled { get; }
/// <summary>
/// Constantinople Net gas metering for SSTORE operations
/// </summary>
bool IsEip1283Enabled { get; }
/// <summary>
/// Constantinople Difficulty Bomb Delay and Block Reward Adjustment
/// ---
/// in chainspec as DifficultyBombDelays and BlockReward
/// </summary>
bool IsEip1234Enabled { get; }
/// <summary>
/// Istanbul ChainID opcode
/// </summary>
bool IsEip1344Enabled { get; }
/// <summary>
/// Istanbul transaction data gas cost reduction
/// </summary>
bool IsEip2028Enabled { get; }
/// <summary>
/// Istanbul Blake2F precompile
/// </summary>
bool IsEip152Enabled { get; }
/// <summary>
/// Istanbul alt_bn128 gas cost reduction
/// </summary>
bool IsEip1108Enabled { get; }
/// <summary>
/// Istanbul state opcodes gas cost increase
/// </summary>
bool IsEip1884Enabled { get; }
/// <summary>
/// Istanbul net-metered SSTORE
/// </summary>
bool IsEip2200Enabled { get; }
/// <summary>
/// Berlin subroutines -> https://github.com/ethereum/EIPs/issues/2315
/// </summary>
bool IsEip2315Enabled { get; }
/// <summary>
/// Berlin BLS crypto precompiles
/// </summary>
bool IsEip2537Enabled { get; }
/// <summary>
/// Berlin MODEXP precompiles
/// </summary>
bool IsEip2565Enabled { get; }
/// <summary>
/// Berlin gas cost increases for state reading opcodes
/// </summary>
bool IsEip2929Enabled { get; }
/// <summary>
/// Berlin access lists
/// </summary>
bool IsEip2930Enabled { get; }
/// <summary>
/// Should EIP158 be ignored for this account.
/// </summary>
/// <remarks>THis is needed for SystemUser account compatibility with Parity.</remarks>
/// <param name="address"></param>
/// <returns></returns>
bool IsEip158IgnoredAccount(Address address);
/// <summary>
/// Gas target and base fee, and fee burning.
/// </summary>
bool IsEip1559Enabled { get; }
/// <summary>
/// BaseFee opcode
/// </summary>
bool IsEip3198Enabled { get; }
/// <summary>
/// Should transactions be validated against chainId.
/// </summary>
/// <remarks>Backward compatibility for early Kovan blocks.</remarks>
bool ValidateChainId => true;
/// <summary>
/// Should validate ReceiptsRoot.
/// </summary>
/// <remarks>Backward compatibility for early Kovan blocks.</remarks>
bool ValidateReceipts => true;
public long Eip1559TransitionBlock { get; }
// STATE related
public bool ClearEmptyAccountWhenTouched => IsEip158Enabled;
// VM
public bool LimitCodeSize => IsEip170Enabled;
public bool UseHotAndColdStorage => IsEip2929Enabled;
public bool UseTxAccessLists => IsEip2930Enabled;
public bool ModExpEnabled => IsEip198Enabled;
public bool Bn128Enabled => IsEip196Enabled && IsEip197Enabled;
public bool BlakeEnabled => IsEip152Enabled;
public bool Bls381Enabled => IsEip2537Enabled;
public bool ChargeForTopLevelCreate => IsEip2Enabled;
public bool FailOnOutOfGasCodeDeposit => IsEip2Enabled;
public bool UseShanghaiDDosProtection => IsEip150Enabled;
public bool UseExpDDosProtection => IsEip160Enabled;
public bool UseLargeStateDDosProtection => IsEip1884Enabled;
public bool ReturnDataOpcodesEnabled => IsEip211Enabled;
public bool ChainIdOpcodeEnabled => IsEip1344Enabled;
public bool Create2OpcodeEnabled => IsEip1014Enabled;
public bool DelegateCallEnabled => IsEip7Enabled;
public bool StaticCallEnabled => IsEip214Enabled;
public bool ShiftOpcodesEnabled => IsEip145Enabled;
public bool SubroutinesEnabled => IsEip2315Enabled;
public bool RevertOpcodeEnabled => IsEip140Enabled;
public bool ExtCodeHashOpcodeEnabled => IsEip1052Enabled;
public bool SelfBalanceOpcodeEnabled => IsEip1884Enabled;
public bool UseConstantinopleNetGasMetering => IsEip1283Enabled;
public bool UseIstanbulNetGasMetering => IsEip2200Enabled;
public bool UseNetGasMetering => UseConstantinopleNetGasMetering | UseIstanbulNetGasMetering;
public bool UseNetGasMeteringWithAStipendFix => UseIstanbulNetGasMetering;
public bool Use63Over64Rule => UseShanghaiDDosProtection;
public bool BaseFeeEnabled => IsEip3198Enabled;
}
}
| 1 | 25,333 | I am not sure if we need this, we already have DifficultyBombDelays | NethermindEth-nethermind | .cs |
@@ -389,6 +389,9 @@ namespace Nethermind.Blockchain.Synchronization
}
}
}
+
+ delaySource.Dispose();
+ linkedSource.Dispose();
}, token);
}
| 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Dirichlet.Numerics;
using Nethermind.Logging;
using Nethermind.Stats;
using Nethermind.Stats.Model;
namespace Nethermind.Blockchain.Synchronization
{
public class EthSyncPeerPool : IEthSyncPeerPool
{
private const decimal _minDiffPercentageForSpeedSwitch = 0.10m;
private const int _minDiffForSpeedSwitch = 10;
private readonly ILogger _logger;
private readonly IBlockTree _blockTree;
private readonly INodeStatsManager _stats;
private readonly ISyncConfig _syncConfig;
private readonly ConcurrentDictionary<PublicKey, PeerInfo> _peers = new ConcurrentDictionary<PublicKey, PeerInfo>();
private ConcurrentDictionary<SyncPeerAllocation, object> _allocations = new ConcurrentDictionary<SyncPeerAllocation, object>();
private const int AllocationsUpgradeInterval = 1000;
private System.Timers.Timer _upgradeTimer;
private readonly BlockingCollection<PeerInfo> _peerRefreshQueue = new BlockingCollection<PeerInfo>();
private Task _refreshLoopTask;
private CancellationTokenSource _refreshLoopCancellation = new CancellationTokenSource();
private readonly ConcurrentDictionary<PublicKey, CancellationTokenSource> _refreshCancelTokens = new ConcurrentDictionary<PublicKey, CancellationTokenSource>();
private TimeSpan _timeBeforeWakingPeerUp = TimeSpan.FromSeconds(3);
public void ReportNoSyncProgress(SyncPeerAllocation allocation)
{
ReportNoSyncProgress(allocation?.Current);
}
public void ReportNoSyncProgress(PeerInfo peerInfo)
{
if (peerInfo == null)
{
return;
}
if (_logger.IsDebug) _logger.Debug($"No sync progress reported with {peerInfo}");
peerInfo.SleepingSince = DateTime.UtcNow;
}
public void ReportInvalid(SyncPeerAllocation allocation)
{
ReportInvalid(allocation?.Current);
}
public void ReportInvalid(PeerInfo peerInfo)
{
if (peerInfo != null)
{
_stats.ReportSyncEvent(peerInfo.SyncPeer.Node, NodeStatsEventType.SyncFailed);
peerInfo.SyncPeer.Disconnect(DisconnectReason.BreachOfProtocol, "SYNC BREACH");
}
}
public EthSyncPeerPool(
IBlockTree blockTree,
INodeStatsManager nodeStatsManager,
ISyncConfig syncConfig,
int peersMaxCount,
ILogManager logManager)
{
_blockTree = blockTree ?? throw new ArgumentNullException(nameof(blockTree));
_stats = nodeStatsManager ?? throw new ArgumentNullException(nameof(nodeStatsManager));
_syncConfig = syncConfig ?? throw new ArgumentNullException(nameof(syncConfig));
PeerMaxCount = peersMaxCount;
_logger = logManager.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));
}
private async Task RunRefreshPeerLoop()
{
foreach (PeerInfo peerInfo in _peerRefreshQueue.GetConsumingEnumerable(_refreshLoopCancellation.Token))
{
try
{
if (_logger.IsDebug) _logger.Debug($"Refreshing info for {peerInfo}.");
var initCancelSource = _refreshCancelTokens[peerInfo.SyncPeer.Node.Id] = new CancellationTokenSource();
var linkedSource = CancellationTokenSource.CreateLinkedTokenSource(initCancelSource.Token, _refreshLoopCancellation.Token);
await RefreshPeerInfo(peerInfo, linkedSource.Token).ContinueWith(t =>
{
_refreshCancelTokens.TryRemove(peerInfo.SyncPeer.Node.Id, out _);
if (t.IsFaulted)
{
if (t.Exception != null && t.Exception.InnerExceptions.Any(x => x.InnerException is TimeoutException))
{
if (_logger.IsTrace) _logger.Trace($"Refreshing info for {peerInfo} failed due to timeout: {t.Exception.Message}");
}
else if (_logger.IsDebug) _logger.Debug($"Refreshing info for {peerInfo} failed {t.Exception}");
}
else if (t.IsCanceled)
{
if (_logger.IsTrace) _logger.Trace($"Refresh peer info canceled: {peerInfo.SyncPeer.Node:s}");
}
else
{
UpdateAllocations("REFRESH");
// cases when we want other nodes to resolve the impasse (check Goerli discussion on 5 out of 9 validators)
if (peerInfo.TotalDifficulty == _blockTree.BestSuggestedHeader?.TotalDifficulty && peerInfo.HeadHash != _blockTree.BestSuggestedHeader?.Hash)
{
Block block = _blockTree.FindBlock(_blockTree.BestSuggestedHeader.Hash, BlockTreeLookupOptions.None);
if (block != null) // can be null if fast syncing headers only
{
peerInfo.SyncPeer.SendNewBlock(block);
if (_logger.IsDebug) _logger.Debug($"Sending my best block {block} to {peerInfo}");
}
}
}
if (_logger.IsDebug) _logger.Debug($"Refreshed peer info for {peerInfo}.");
initCancelSource.Dispose();
linkedSource.Dispose();
});
}
catch (Exception e)
{
if (_logger.IsDebug) _logger.Debug($"Failed to refresh {peerInfo} {e}");
}
}
if (_logger.IsInfo) _logger.Info($"Exiting sync peer refresh loop");
}
private bool _isStarted;
public void Start()
{
// _refreshLoopTask = Task.Run(RunRefreshPeerLoop, _refreshLoopCancellation.Token)
_refreshLoopTask = Task.Factory.StartNew(
RunRefreshPeerLoop,
_refreshLoopCancellation.Token,
TaskCreationOptions.LongRunning,
TaskScheduler.Default).Unwrap()
.ContinueWith(t =>
{
if (t.IsFaulted)
{
if (_logger.IsError) _logger.Error("Init peer loop encountered an exception.", t.Exception);
}
else if (t.IsCanceled)
{
if (_logger.IsDebug) _logger.Debug("Init peer loop stopped.");
}
else if (t.IsCompleted)
{
if (_logger.IsError) _logger.Error("Peer loop completed unexpectedly.");
}
});
_isStarted = true;
StartUpgradeTimer();
_blockTree.NewHeadBlock += BlockTreeOnNewHeadBlock;
}
private void BlockTreeOnNewHeadBlock(object sender, BlockEventArgs e)
{
foreach ((SyncPeerAllocation allocation, _) in _allocations)
{
PeerInfo currentPeer = allocation.Current;
if (currentPeer == null)
{
continue;
}
if (currentPeer.TotalDifficulty < (e.Block.TotalDifficulty ?? 0))
{
allocation.Cancel();
}
}
}
private void StartUpgradeTimer()
{
if (_logger.IsDebug) _logger.Debug("Starting eth sync peer upgrade timer");
_upgradeTimer = new System.Timers.Timer(AllocationsUpgradeInterval);
_upgradeTimer.Elapsed += (s, e) =>
{
try
{
_upgradeTimer.Enabled = false;
UpdateAllocations("TIMER");
DropUselessPeers();
}
catch (Exception exception)
{
if (_logger.IsDebug) _logger.Error("Allocations upgrade failure", exception);
}
finally
{
_upgradeTimer.Enabled = true;
}
};
_upgradeTimer.Start();
}
private DateTime _lastUselessDrop = DateTime.UtcNow;
private void DropUselessPeers()
{
if (DateTime.UtcNow - _lastUselessDrop < TimeSpan.FromSeconds(30))
{
// give some time to monitoring nodes
return;
}
if(_logger.IsTrace) _logger.Trace($"Reviewing {PeerCount} peer usefulness");
int peersDropped = 0;
_lastUselessDrop = DateTime.UtcNow;
long ourNumber = _blockTree.BestSuggestedHeader?.Number ?? 0L;
UInt256 ourDifficulty = _blockTree.BestSuggestedHeader?.TotalDifficulty ?? UInt256.Zero;
foreach (PeerInfo peerInfo in AllPeers)
{
if (peerInfo.HeadNumber > ourNumber)
{
// as long as we are behind we can use the stuck peers
continue;
}
if (peerInfo.HeadNumber == 0
&& ourNumber != 0
&& !peerInfo.SyncPeer.ClientId.Contains("Nethermind"))
{
peersDropped++;
peerInfo.SyncPeer.Disconnect(DisconnectReason.UselessPeer, "PEER REVIEW / HEAD 0");
}
else if (peerInfo.HeadNumber == 1920000) // mainnet, stuck Geth nodes
{
peersDropped++;
peerInfo.SyncPeer.Disconnect(DisconnectReason.UselessPeer, "PEER REVIEW / 1920000");
}
else if (peerInfo.HeadNumber == 7280022) // mainnet, stuck Geth nodes
{
peersDropped++;
peerInfo.SyncPeer.Disconnect(DisconnectReason.UselessPeer, "PEER REVIEW / 7280022");
}
else if (peerInfo.HeadNumber > ourNumber + 1024L && peerInfo.TotalDifficulty < ourDifficulty)
{
// probably classic nodes tht remain connected after we went pass the DAO
// worth to find a better way to discard them at the right time
peersDropped++;
peerInfo.SyncPeer.Disconnect(DisconnectReason.UselessPeer, "STRAY PEER");
}
}
if (PeerCount == PeerMaxCount)
{
long worstSpeed = long.MaxValue;
PeerInfo worstPeer = null;
foreach (PeerInfo peerInfo in AllPeers)
{
long transferSpeed = _stats.GetOrAdd(peerInfo.SyncPeer.Node).GetAverageTransferSpeed() ?? 0;
if (transferSpeed < worstSpeed)
{
worstPeer = peerInfo;
}
}
peersDropped++;
worstPeer?.SyncPeer.Disconnect(DisconnectReason.TooManyPeers, "PEER REVIEW / LATENCY");
}
if(_logger.IsDebug) _logger.Debug($"Dropped {peersDropped} useless peers");
}
public async Task StopAsync()
{
_isStarted = false;
_refreshLoopCancellation.Cancel();
await (_refreshLoopTask ?? Task.CompletedTask);
}
public void EnsureBest()
{
UpdateAllocations("ENSURE BEST");
}
private ConcurrentDictionary<PeerInfo, int> _peerBadness = new ConcurrentDictionary<PeerInfo, int>();
public void ReportBadPeer(SyncPeerAllocation batchAssignedPeer)
{
if (batchAssignedPeer.CanBeReplaced)
{
throw new InvalidOperationException("Reporting bad peer is only supported for non-dynamic allocations");
}
_peerBadness.AddOrUpdate(batchAssignedPeer.Current, 0, (pi, badness) => badness + 1);
if (_peerBadness[batchAssignedPeer.Current] >= 10)
{
// fast Geth nodes send invalid nodes quite often :/
// so we let them deliver fast and only disconnect them when they really misbehave
batchAssignedPeer.Current.SyncPeer.Disconnect(DisconnectReason.BreachOfProtocol, "bad node data");
}
}
private static int InitTimeout = 10000;
private async Task RefreshPeerInfo(PeerInfo peerInfo, CancellationToken token)
{
if (_logger.IsTrace) _logger.Trace($"Requesting head block info from {peerInfo.SyncPeer.Node:s}");
ISyncPeer syncPeer = peerInfo.SyncPeer;
Task<BlockHeader> getHeadHeaderTask = peerInfo.SyncPeer.GetHeadBlockHeader(peerInfo.HeadHash, token);
CancellationTokenSource delaySource = new CancellationTokenSource();
CancellationTokenSource linkedSource = CancellationTokenSource.CreateLinkedTokenSource(delaySource.Token, token);
Task delayTask = Task.Delay(InitTimeout, linkedSource.Token);
Task firstToComplete = await Task.WhenAny(getHeadHeaderTask, delayTask);
await firstToComplete.ContinueWith(
t =>
{
if (firstToComplete.IsFaulted || firstToComplete == delayTask)
{
if (_logger.IsDebug) _logger.Debug($"InitPeerInfo failed for node: {syncPeer.Node:c}{Environment.NewLine}{t.Exception}");
_stats.ReportSyncEvent(syncPeer.Node, peerInfo.IsInitialized ? NodeStatsEventType.SyncFailed : NodeStatsEventType.SyncInitFailed);
syncPeer.Disconnect(DisconnectReason.DisconnectRequested, "refresh peer info fault");
}
else if (firstToComplete.IsCanceled)
{
if (_logger.IsTrace) _logger.Trace($"InitPeerInfo canceled for node: {syncPeer.Node:c}{Environment.NewLine}{t.Exception}");
_stats.ReportSyncEvent(syncPeer.Node, peerInfo.IsInitialized ? NodeStatsEventType.SyncCancelled : NodeStatsEventType.SyncInitCancelled);
token.ThrowIfCancellationRequested();
}
else
{
delaySource.Cancel();
BlockHeader header = getHeadHeaderTask.Result;
if (header == null)
{
if (_logger.IsDebug) _logger.Debug($"InitPeerInfo failed for node: {syncPeer.Node:c}{Environment.NewLine}{t.Exception}");
_stats.ReportSyncEvent(syncPeer.Node, peerInfo.IsInitialized ? NodeStatsEventType.SyncFailed: NodeStatsEventType.SyncInitFailed);
syncPeer.Disconnect(DisconnectReason.DisconnectRequested, "refresh peer info fault");
return;
}
if (_logger.IsTrace) _logger.Trace($"Received head block info from {syncPeer.Node:c} with head block numer {header.Number}");
if (!peerInfo.IsInitialized)
{
_stats.ReportSyncEvent(syncPeer.Node, NodeStatsEventType.SyncInitCompleted);
}
if (_logger.IsTrace) _logger.Trace($"REFRESH Updating header of {peerInfo} from {peerInfo.HeadNumber} to {header.Number}");
peerInfo.HeadNumber = header.Number;
peerInfo.HeadHash = header.Hash;
BlockHeader parent = _blockTree.FindHeader(header.ParentHash, BlockTreeLookupOptions.None);
if (parent != null)
{
peerInfo.TotalDifficulty = (parent.TotalDifficulty ?? UInt256.Zero) + header.Difficulty;
}
peerInfo.IsInitialized = true;
foreach ((SyncPeerAllocation allocation, object _) in _allocations)
{
if (allocation.Current == peerInfo)
{
allocation.Refresh();
}
}
}
}, token);
}
public IEnumerable<PeerInfo> AllPeers
{
get
{
foreach ((_, PeerInfo peerInfo) in _peers)
{
yield return peerInfo;
}
}
}
public IEnumerable<PeerInfo> UsefulPeers
{
get
{
foreach ((_, PeerInfo peerInfo) in _peers)
{
if (peerInfo.IsAsleep)
{
continue;
}
if (!peerInfo.IsInitialized)
{
continue;
}
if (peerInfo.TotalDifficulty < (_blockTree.BestSuggestedHeader?.TotalDifficulty ?? 0))
{
continue;
}
yield return peerInfo;
}
}
}
public IEnumerable<SyncPeerAllocation> Allocations
{
get
{
foreach ((SyncPeerAllocation allocation, _) in _allocations)
{
yield return allocation;
}
}
}
public int PeerCount => _peers.Count;
public int UsefulPeerCount => UsefulPeers.Count();
public int PeerMaxCount { get; }
public void Refresh(PublicKey publicKey)
{
TryFind(publicKey, out PeerInfo peerInfo);
if (peerInfo != null)
{
_peerRefreshQueue.Add(peerInfo);
}
}
public void AddPeer(ISyncPeer syncPeer)
{
if (_logger.IsDebug) _logger.Debug($"Adding sync peer {syncPeer.Node:c}");
if (!_isStarted)
{
if (_logger.IsDebug) _logger.Debug($"Sync peer pool not started yet - adding peer is blocked: {syncPeer.Node:s}");
return;
}
if (_peers.ContainsKey(syncPeer.Node.Id))
{
if (_logger.IsDebug) _logger.Debug($"Sync peer {syncPeer.Node:c} already in peers collection.");
return;
}
var peerInfo = new PeerInfo(syncPeer);
_peers.TryAdd(syncPeer.Node.Id, peerInfo);
Metrics.SyncPeers = _peers.Count;
if (_logger.IsDebug) _logger.Debug($"Adding {syncPeer.Node:c} to refresh queue");
_peerRefreshQueue.Add(peerInfo);
}
public void RemovePeer(ISyncPeer syncPeer)
{
if (_logger.IsDebug) _logger.Debug($"Removing sync peer {syncPeer.Node:c}");
if (!_isStarted)
{
if (_logger.IsDebug) _logger.Debug($"Sync peer pool not started yet - removing {syncPeer.Node:c} is blocked.");
return;
}
if (!_peers.TryRemove(syncPeer.Node.Id, out var peerInfo))
{
//possible if sync failed - we remove peer and eventually initiate disconnect, which calls remove peer again
return;
}
Metrics.SyncPeers = _peers.Count;
foreach ((SyncPeerAllocation allocation, _) in _allocations)
{
if (allocation.Current?.SyncPeer.Node.Id == syncPeer.Node.Id)
{
if (_logger.IsTrace) _logger.Trace($"Requesting peer cancel with {syncPeer.Node:c} on {allocation}");
allocation.Cancel();
}
}
if (_refreshCancelTokens.TryGetValue(syncPeer.Node.Id, out CancellationTokenSource initCancelTokenSource))
{
initCancelTokenSource?.Cancel();
}
}
private PeerInfo SelectBestPeerForAllocation(SyncPeerAllocation allocation, string reason, bool isLowPriority)
{
if (_logger.IsTrace) _logger.Trace($"[{reason}] Selecting best peer for {allocation}");
(PeerInfo Info, long TransferSpeed) bestPeer = (null, isLowPriority ? long.MaxValue : -1);
foreach ((_, PeerInfo info) in _peers)
{
if (allocation.MinBlocksAhead.HasValue && info.HeadNumber < (_blockTree.BestSuggestedHeader?.Number ?? 0) + allocation.MinBlocksAhead.Value)
{
continue;
}
if (!info.IsInitialized || info.TotalDifficulty <= (_blockTree.BestSuggestedHeader?.TotalDifficulty ?? UInt256.Zero))
{
continue;
}
if (info.IsAllocated && info != allocation.Current)
{
continue;
}
if (info.IsAsleep)
{
if (DateTime.UtcNow - info.SleepingSince < _timeBeforeWakingPeerUp)
{
continue;
}
info.SleepingSince = null;
}
if (info.TotalDifficulty - (_blockTree.BestSuggestedHeader?.TotalDifficulty ?? UInt256.Zero) <= 2 && info.SyncPeer.ClientId.Contains("Parity"))
{
// Parity advertises a better block but never sends it back and then it disconnects after a few conversations like this
// Geth responds all fine here
// note this is only 2 difficulty difference which means that is just for the POA / Clique chains
continue;
}
long averageTransferSpeed = _stats.GetOrAdd(info.SyncPeer.Node).GetAverageTransferSpeed() ?? 0;
if (isLowPriority ? (averageTransferSpeed <= bestPeer.TransferSpeed) : (averageTransferSpeed > bestPeer.TransferSpeed))
{
bestPeer = (info, averageTransferSpeed);
}
}
if (bestPeer.Info == null)
{
if (_logger.IsTrace) _logger.Trace($"[{reason}] No peer found for ETH sync");
}
else
{
if (_logger.IsTrace) _logger.Trace($"[{reason}] Best ETH sync peer: {bestPeer.Info} | BlockHeaderAvSpeed: {bestPeer.TransferSpeed}");
}
return bestPeer.Info;
}
private void ReplaceIfWorthReplacing(SyncPeerAllocation allocation, PeerInfo peerInfo)
{
if (!allocation.CanBeReplaced)
{
return;
}
if (peerInfo == null)
{
return;
}
if (allocation.Current == null)
{
allocation.ReplaceCurrent(peerInfo);
return;
}
if (peerInfo == allocation.Current)
{
if (_logger.IsTrace) _logger.Trace($"{allocation} is already syncing with best peer {peerInfo}");
return;
}
var currentSpeed = _stats.GetOrAdd(allocation.Current?.SyncPeer.Node)?.GetAverageTransferSpeed() ?? 0;
var newSpeed = _stats.GetOrAdd(peerInfo.SyncPeer.Node)?.GetAverageTransferSpeed() ?? 0;
if (newSpeed / (decimal) Math.Max(1L, currentSpeed) > 1m + _minDiffPercentageForSpeedSwitch
&& newSpeed > currentSpeed + _minDiffForSpeedSwitch)
{
if (_logger.IsInfo) _logger.Info($"Sync peer substitution{Environment.NewLine} OUT: {allocation.Current}[{currentSpeed}]{Environment.NewLine} IN : {peerInfo}[{newSpeed}]");
allocation.ReplaceCurrent(peerInfo);
}
else
{
if (_logger.IsTrace) _logger.Trace($"Staying with current peer {allocation.Current}[{currentSpeed}] (ignoring {peerInfo}[{newSpeed}])");
}
}
private void UpdateAllocations(string reason)
{
foreach ((SyncPeerAllocation allocation, _) in _allocations)
{
if (!allocation.CanBeReplaced)
{
continue;
}
PeerInfo bestPeer = SelectBestPeerForAllocation(allocation, reason, false);
if (bestPeer != allocation.Current)
{
ReplaceIfWorthReplacing(allocation, bestPeer);
}
else
{
if (_logger.IsTrace) _logger.Trace($"No better peer to sync with when updating allocations");
}
}
}
public bool TryFind(PublicKey nodeId, out PeerInfo peerInfo)
{
return _peers.TryGetValue(nodeId, out peerInfo);
}
public SyncPeerAllocation Borrow(string description)
{
return Borrow(BorrowOptions.None, description);
}
public SyncPeerAllocation Borrow(BorrowOptions borrowOptions, string description, long? minNumber = null)
{
SyncPeerAllocation allocation = new SyncPeerAllocation(description);
allocation.MinBlocksAhead = minNumber - _blockTree.BestSuggestedHeader?.Number;
if ((borrowOptions & BorrowOptions.DoNotReplace) == BorrowOptions.DoNotReplace)
{
allocation.CanBeReplaced = false;
}
PeerInfo bestPeer = SelectBestPeerForAllocation(allocation, "BORROW", (borrowOptions & BorrowOptions.LowPriority) == BorrowOptions.LowPriority);
if (bestPeer != null)
{
allocation.ReplaceCurrent(bestPeer);
}
_allocations.TryAdd(allocation, null);
return allocation;
}
public void Free(SyncPeerAllocation syncPeerAllocation)
{
if (_logger.IsTrace) _logger.Trace($"Returning {syncPeerAllocation}");
PeerInfo peerInfo = syncPeerAllocation.Current;
if (peerInfo != null && !syncPeerAllocation.CanBeReplaced)
{
_peerBadness.TryRemove(peerInfo, out _);
}
_allocations.TryRemove(syncPeerAllocation, out _);
syncPeerAllocation.Cancel();
if (_allocations.Count > 1024 * 16)
{
_logger.Warn($"Peer allocations leakage - {_allocations.Count}");
}
}
}
} | 1 | 22,862 | move to finally - I can see at least one return that would omit it, also any exception and so on | NethermindEth-nethermind | .cs |
@@ -122,7 +122,9 @@ func NewProtocol(
if cfg.Consensus.Scheme != config.RollDPoSScheme {
return nil, nil
}
- if !genesisConfig.EnableGravityChainVoting || electionCommittee == nil || genesisConfig.GravityChainStartHeight == 0 {
+
+ if !genesisConfig.EnableGravityChainVoting || genesisConfig.GravityChainStartHeight == 0 ||
+ (electionCommittee == nil && stakingV2 == nil) {
delegates := genesisConfig.Delegates
if uint64(len(delegates)) < genesisConfig.NumDelegates {
return nil, errors.New("invalid delegate address in genesis block") | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package poll
import (
"context"
"math/big"
"time"
"github.com/iotexproject/iotex-election/committee"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/staking"
"github.com/iotexproject/iotex-core/action/protocol/vote"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/state"
)
const (
protocolID = "poll"
)
// ErrInconsistentHeight is an error that result of "readFromStateDB" is not consistent with others
var ErrInconsistentHeight = errors.New("data is inconsistent because the state height has been changed")
// ErrNoElectionCommittee is an error that the election committee is not specified
var ErrNoElectionCommittee = errors.New("no election committee specified")
// ErrProposedDelegatesLength is an error that the proposed delegate list length is not right
var ErrProposedDelegatesLength = errors.New("the proposed delegate list length")
// ErrDelegatesNotAsExpected is an error that the delegates are not as expected
var ErrDelegatesNotAsExpected = errors.New("delegates are not as expected")
// ErrDelegatesNotExist is an error that the delegates cannot be prepared
var ErrDelegatesNotExist = errors.New("delegates cannot be found")
// CandidatesByHeight returns the candidates of a given height
type CandidatesByHeight func(protocol.StateReader, uint64) ([]*state.Candidate, error)
// GetCandidates returns the current candidates
type GetCandidates func(protocol.StateReader, bool, ...protocol.StateOption) ([]*state.Candidate, uint64, error)
// GetKickoutList returns current the blacklist
type GetKickoutList func(protocol.StateReader, bool, ...protocol.StateOption) (*vote.Blacklist, uint64, error)
// GetUnproductiveDelegate returns unproductiveDelegate struct which contains a cache of upd info by epochs
type GetUnproductiveDelegate func(protocol.StateReader) (*vote.UnproductiveDelegate, error)
// GetBlockTime defines a function to get block creation time
type GetBlockTime func(uint64) (time.Time, error)
// ProductivityByEpoch returns the number of produced blocks per delegate in an epoch
type ProductivityByEpoch func(context.Context, uint64) (uint64, map[string]uint64, error)
// Protocol defines the protocol of handling votes
type Protocol interface {
protocol.Protocol
protocol.GenesisStateCreator
DelegatesByEpoch(context.Context, uint64) (state.CandidateList, error)
CandidatesByHeight(context.Context, uint64) (state.CandidateList, error)
// CalculateCandidatesByHeight calculates candidate and returns candidates by chain height
CalculateCandidatesByHeight(context.Context, uint64) (state.CandidateList, error)
}
// FindProtocol finds the registered protocol from registry
func FindProtocol(registry *protocol.Registry) Protocol {
if registry == nil {
return nil
}
p, ok := registry.Find(protocolID)
if !ok {
return nil
}
pp, ok := p.(Protocol)
if !ok {
log.S().Panic("fail to cast poll protocol")
}
return pp
}
// MustGetProtocol return a registered protocol from registry
func MustGetProtocol(registry *protocol.Registry) Protocol {
if registry == nil {
log.S().Panic("registry cannot be nil")
}
p, ok := registry.Find(protocolID)
if !ok {
log.S().Panic("poll protocol is not registered")
}
pp, ok := p.(Protocol)
if !ok {
log.S().Panic("fail to cast poll protocol")
}
return pp
}
// NewProtocol instantiates a rewarding protocol instance.
func NewProtocol(
cfg config.Config,
readContract ReadContract,
candidatesByHeight CandidatesByHeight,
getCandidates GetCandidates,
kickoutListByEpoch GetKickoutList,
getUnproductiveDelegate GetUnproductiveDelegate,
electionCommittee committee.Committee,
enableV2 bool,
stakingV2 *staking.Protocol,
getBlockTimeFunc GetBlockTime,
sr protocol.StateReader,
productivityByEpoch ProductivityByEpoch,
) (Protocol, error) {
genesisConfig := cfg.Genesis
if cfg.Consensus.Scheme != config.RollDPoSScheme {
return nil, nil
}
if !genesisConfig.EnableGravityChainVoting || electionCommittee == nil || genesisConfig.GravityChainStartHeight == 0 {
delegates := genesisConfig.Delegates
if uint64(len(delegates)) < genesisConfig.NumDelegates {
return nil, errors.New("invalid delegate address in genesis block")
}
return NewLifeLongDelegatesProtocol(delegates), nil
}
var pollProtocol, governance Protocol
var err error
if governance, err = NewGovernanceChainCommitteeProtocol(
candidatesByHeight,
getCandidates,
kickoutListByEpoch,
getUnproductiveDelegate,
electionCommittee,
genesisConfig.GravityChainStartHeight,
getBlockTimeFunc,
genesisConfig.NumCandidateDelegates,
genesisConfig.NumDelegates,
cfg.Chain.PollInitialCandidatesInterval,
sr,
productivityByEpoch,
genesisConfig.ProductivityThreshold,
genesisConfig.KickoutEpochPeriod,
genesisConfig.KickoutIntensityRate,
genesisConfig.UnproductiveDelegateMaxCacheSize,
); err != nil {
return nil, err
}
scoreThreshold, ok := new(big.Int).SetString(cfg.Genesis.ScoreThreshold, 10)
if !ok {
return nil, errors.Errorf("failed to parse score threshold %s", cfg.Genesis.ScoreThreshold)
}
if pollProtocol, err = NewStakingCommittee(
electionCommittee,
governance,
enableV2,
stakingV2,
readContract,
cfg.Genesis.NativeStakingContractAddress,
cfg.Genesis.NativeStakingContractCode,
scoreThreshold,
); err != nil {
return nil, err
}
return pollProtocol, nil
}
| 1 | 21,324 | remove `genesisConfig.GravityChainStartHeight == 0` | iotexproject-iotex-core | go |
@@ -4,6 +4,10 @@ declare(strict_types=1);
use Doctrine\Common\Annotations\AnnotationRegistry;
+$symfonyDumpFunctionPath = 'vendor/symfony/var-dumper/Resources/functions/dump.php';
+
+file_exists(__DIR__ . '/../' . $symfonyDumpFunctionPath) ? require_once __DIR__ . '/../' . $symfonyDumpFunctionPath : require_once __DIR__ . '/../../' . $symfonyDumpFunctionPath;
+
/* @var \Composer\Autoload\ClassLoader $loader */
$loader = file_exists(__DIR__ . '/../vendor/autoload.php') ? require __DIR__ . '/../vendor/autoload.php' : require __DIR__ . '/../../vendor/autoload.php';
| 1 | <?php
declare(strict_types=1);
use Doctrine\Common\Annotations\AnnotationRegistry;
/* @var \Composer\Autoload\ClassLoader $loader */
$loader = file_exists(__DIR__ . '/../vendor/autoload.php') ? require __DIR__ . '/../vendor/autoload.php' : require __DIR__ . '/../../vendor/autoload.php';
AnnotationRegistry::registerLoader([$loader, 'loadClass']);
return $loader;
| 1 | 21,917 | this might kill whole application if `var-dumper` will not be installed. What about two separate file_exists conditions (monorepo/project)? | shopsys-shopsys | php |
@@ -163,6 +163,11 @@ var ConfigCommand *cobra.Command = &cobra.Command{
util.Failed("Could not write ddev config file: %v", err)
}
+ _, err = app.CreateSettingsFile()
+ if err != nil {
+ util.Failed("Could not write settings file: %w", err)
+ }
+
// If a provider is specified, prompt about whether to do an import after config.
switch provider {
case ddevapp.DefaultProviderName: | 1 | package cmd
import (
"fmt"
"os"
"strings"
"path/filepath"
"github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/util"
"github.com/spf13/cobra"
)
// docrootRelPath is the relative path to the docroot where index.php is
var docrootRelPath string
// siteName is the name of the site
var siteName string
// pantheonEnvironment is the environment for pantheon, dev/test/prod
var pantheonEnvironment string
// fallbackPantheonEnvironment is our assumption that "dev" will be available in any case
const fallbackPantheonEnvironment = "dev"
// appType is the ddev app type, like drupal7/drupal8/wordpress
var appType string
// showConfigLocation if set causes the command to show the config location.
var showConfigLocation bool
// ConfigCommand represents the `ddev config` command
var ConfigCommand *cobra.Command = &cobra.Command{
Use: "config [provider]",
Short: "Create or modify a ddev project configuration in the current directory",
Run: func(cmd *cobra.Command, args []string) {
appRoot, err := os.Getwd()
if err != nil {
util.Failed("Could not determine current working directory: %v", err)
}
provider := ddevapp.DefaultProviderName
if len(args) > 1 {
output.UserOut.Fatal("Invalid argument detected. Please use 'ddev config' or 'ddev config [provider]' to configure a project.")
}
if len(args) == 1 {
provider = args[0]
}
app, err := ddevapp.NewApp(appRoot, provider)
if err != nil {
util.Failed("Could not create new config: %v", err)
}
// Support the show-config-location flag.
if showConfigLocation {
activeApp, err := ddevapp.GetActiveApp("")
if err != nil {
if strings.Contains(err.Error(), "Have you run 'ddev config'") {
util.Failed("No project configuration currently exists")
} else {
util.Failed("Failed to access project configuration: %v", err)
}
}
if activeApp.ConfigPath != "" && activeApp.ConfigExists() {
rawResult := make(map[string]interface{})
rawResult["configpath"] = activeApp.ConfigPath
rawResult["approot"] = activeApp.AppRoot
friendlyMsg := fmt.Sprintf("The project config location is %s", activeApp.ConfigPath)
output.UserOut.WithField("raw", rawResult).Print(friendlyMsg)
return
}
}
// If they have not given us any flags, we prompt for full info. Otherwise, we assume they're in control.
if siteName == "" && docrootRelPath == "" && pantheonEnvironment == "" && appType == "" {
err = app.PromptForConfig()
if err != nil {
util.Failed("There was a problem configuring your project: %v", err)
}
} else { // In this case we have to validate the provided items, or set to sane defaults
// Let them know if we're replacing the config.yaml
app.WarnIfConfigReplace()
// app.Name gets set to basename if not provided, or set to siteName if provided
if app.Name != "" && siteName == "" { // If we already have a c.Name and no siteName, leave c.Name alone
// Sorry this is empty but it makes the logic clearer.
} else if siteName != "" { // if we have a siteName passed in, use it for c.Name
app.Name = siteName
} else { // No siteName passed, c.Name not set: use c.Name from the directory
// nolint: vetshadow
pwd, err := os.Getwd()
util.CheckErr(err)
app.Name = filepath.Base(pwd)
}
// docrootRelPath must exist
if docrootRelPath != "" {
app.Docroot = docrootRelPath
if _, err = os.Stat(docrootRelPath); os.IsNotExist(err) {
util.Failed("The docroot provided (%v) does not exist", docrootRelPath)
}
} else if !cmd.Flags().Changed("docroot") {
app.Docroot = ddevapp.DiscoverDefaultDocroot(app)
}
// pantheonEnvironment must be appropriate, and can only be used with pantheon provider.
if provider != "pantheon" && pantheonEnvironment != "" {
util.Failed("--pantheon-environment can only be used with pantheon provider, for example 'ddev config pantheon --pantheon-environment=dev --docroot=docroot'")
}
if appType != "" && !ddevapp.IsValidAppType(appType) {
validAppTypes := strings.Join(ddevapp.GetValidAppTypes(), ", ")
util.Failed("apptype must be one of %s", validAppTypes)
}
detectedApptype := app.DetectAppType()
fullPath, pathErr := filepath.Abs(app.Docroot)
if pathErr != nil {
util.Failed("Failed to get absolute path to Docroot %s: %v", app.Docroot, pathErr)
}
if appType == "" || appType == detectedApptype { // Found an app, matches passed-in or no apptype passed
appType = detectedApptype
util.Success("Found a %s codebase at %s", detectedApptype, fullPath)
} else if appType != "" { // apptype was passed, but we found no app at all
util.Warning("You have specified a project type of %s but no project of that type is found in %s", appType, fullPath)
} else if appType != "" && detectedApptype != appType { // apptype was passed, app was found, but not the same type
util.Warning("You have specified a project type of %s but a project of type %s was discovered in %s", appType, detectedApptype, fullPath)
}
app.Type = appType
prov, _ := app.GetProvider()
if provider == "pantheon" {
pantheonProvider := prov.(*ddevapp.PantheonProvider)
if pantheonEnvironment == "" {
pantheonEnvironment = fallbackPantheonEnvironment // assume a basic default if they haven't provided one.
}
pantheonProvider.SetSiteNameAndEnv(pantheonEnvironment)
}
// But pantheon *does* validate "Name"
appTypeErr := prov.Validate()
if appTypeErr != nil {
util.Failed("Failed to validate project name %v and environment %v with provider %v: %v", app.Name, pantheonEnvironment, provider, appTypeErr)
} else {
util.Success("Using project name '%s' and environment '%s'.", app.Name, pantheonEnvironment)
}
err = app.ConfigFileOverrideAction()
if err != nil {
util.Failed("Failed to run ConfigFileOverrideAction: %v", err)
}
}
err = app.WriteConfig()
if err != nil {
util.Failed("Could not write ddev config file: %v", err)
}
// If a provider is specified, prompt about whether to do an import after config.
switch provider {
case ddevapp.DefaultProviderName:
util.Success("Configuration complete. You may now run 'ddev start'.")
default:
util.Success("Configuration complete. You may now run 'ddev start' or 'ddev pull'")
}
},
}
func init() {
validAppTypes := strings.Join(ddevapp.GetValidAppTypes(), ", ")
apptypeUsage := fmt.Sprintf("Provide the project type (one of %s). This is autodetected and this flag is necessary only to override the detection.", validAppTypes)
projectNameUsage := fmt.Sprintf("Provide the project name of project to configure (normally the same as the last part of directory name)")
ConfigCommand.Flags().StringVarP(&siteName, "projectname", "", "", projectNameUsage)
ConfigCommand.Flags().StringVarP(&docrootRelPath, "docroot", "", "", "Provide the relative docroot of the project, like 'docroot' or 'htdocs' or 'web', defaults to empty, the current directory")
ConfigCommand.Flags().StringVarP(&pantheonEnvironment, "pantheon-environment", "", "", "Choose the environment for a Pantheon site (dev/test/prod) (Pantheon-only)")
ConfigCommand.Flags().StringVarP(&appType, "projecttype", "", "", apptypeUsage)
// apptype flag is there for backwards compatibility.
ConfigCommand.Flags().StringVarP(&appType, "apptype", "", "", apptypeUsage+" This is the same as --projecttype and is included only for backwards compatibility.")
ConfigCommand.Flags().BoolVarP(&showConfigLocation, "show-config-location", "", false, "Output the location of the config.yaml file if it exists, or error that it doesn't exist.")
ConfigCommand.Flags().StringVarP(&siteName, "sitename", "", "", projectNameUsage+" This is the same as projectname and is included only for backwards compatibility")
err := ConfigCommand.Flags().MarkDeprecated("sitename", "The sitename flag is deprecated in favor of --projectname")
util.CheckErr(err)
err = ConfigCommand.Flags().MarkDeprecated("apptype", "The apptype flag is deprecated in favor of --projecttype")
util.CheckErr(err)
RootCmd.AddCommand(ConfigCommand)
}
| 1 | 12,364 | Hrm. Maybe this isn't the correct spot to run this? Because it errors if the app type doesn't support settings. But we want to check that before running it. | drud-ddev | go |
@@ -1359,7 +1359,7 @@ def get_terminal_width():
else:
return None
-def pretty_routes(rtlst, header, sortBy=0):
+def pretty_list(rtlst, header, sortBy=0):
"""Pretty route list, and add header"""
_l_header = len(header[0])
_space = " " | 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
General utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
import os, sys, socket, types
import random, time
import gzip, zlib
import re, struct, array
import subprocess
import tempfile
import warnings
import scapy.modules.six as six
from scapy.modules.six.moves import range
warnings.filterwarnings("ignore","tempnam",RuntimeWarning, __name__)
from scapy.config import conf
from scapy.consts import DARWIN, WINDOWS
from scapy.data import MTU
from scapy.compat import *
from scapy.error import log_runtime, log_loading, log_interactive, Scapy_Exception, warning
from scapy.base_classes import BasePacketList
###########
## Tools ##
###########
def get_temp_file(keep=False, autoext=""):
"""Create a temporary file and return its name. When keep is False,
the file is deleted when scapy exits.
"""
fname = tempfile.NamedTemporaryFile(prefix="scapy", suffix=autoext,
delete=False).name
if not keep:
conf.temp_files.append(fname)
return fname
def sane_color(x):
r=""
for i in x:
j = orb(i)
if (j < 32) or (j >= 127):
r=r+conf.color_theme.not_printable(".")
else:
r=r+chr(j)
return r
def sane(x):
r=""
for i in x:
j = orb(i)
if (j < 32) or (j >= 127):
r=r+"."
else:
r=r+chr(j)
return r
def lhex(x):
if type(x) in six.integer_types:
return hex(x)
elif isinstance(x, tuple):
return "(%s)" % ", ".join(map(lhex, x))
elif isinstance(x, list):
return "[%s]" % ", ".join(map(lhex, x))
else:
return x
@conf.commands.register
def hexdump(x, dump=False):
""" Build a tcpdump like hexadecimal view
:param x: a Packet
:param dump: define if the result must be printed or returned in a variable
:returns: a String only when dump=True
"""
s = ""
x = raw(x)
l = len(x)
i = 0
while i < l:
s += "%04x " % i
for j in range(16):
if i+j < l:
s += "%02X" % orb(x[i+j])
else:
s += " "
if j%16 == 7:
s += ""
s += " "
s += sane_color(x[i:i+16])
i += 16
s += "\n"
# remove trailing \n
if s.endswith("\n"):
s = s[:-1]
if dump:
return s
else:
print(s)
@conf.commands.register
def linehexdump(x, onlyasc=0, onlyhex=0, dump=False):
""" Build an equivalent view of hexdump() on a single line
Note that setting both onlyasc and onlyhex to 1 results in a empty output
:param x: a Packet
:param onlyasc: 1 to display only the ascii view
:param onlyhex: 1 to display only the hexadecimal view
:param dump: print the view if False
:returns: a String only when dump=True
"""
s = ""
x = raw(x)
l = len(x)
if not onlyasc:
for i in range(l):
s += "%02X" % orb(x[i])
if not onlyhex: # separate asc & hex if both are displayed
s += " "
if not onlyhex:
s += sane_color(x)
if dump:
return s
else:
print(s)
@conf.commands.register
def chexdump(x, dump=False):
""" Build a per byte hexadecimal representation
Example:
>>> chexdump(IP())
0x45, 0x00, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe7, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01
:param x: a Packet
:param dump: print the view if False
:returns: a String only if dump=True
"""
x = raw(x)
s = ", ".join("%#04x" % orb(x) for x in x)
if dump:
return s
else:
print(s)
@conf.commands.register
def hexstr(x, onlyasc=0, onlyhex=0):
s = []
if not onlyasc:
s.append(" ".join("%02x" % orb(b) for b in x))
if not onlyhex:
s.append(sane(x))
return " ".join(s)
def repr_hex(s):
""" Convert provided bitstring to a simple string of hex digits """
return "".join("%02x" % orb(x) for x in s)
@conf.commands.register
def hexdiff(x,y):
"""Show differences between 2 binary strings"""
x=str(x)[::-1]
y=str(y)[::-1]
SUBST=1
INSERT=1
d = {(-1, -1): (0, (-1, -1))}
for j in range(len(y)):
d[-1,j] = d[-1,j-1][0]+INSERT, (-1,j-1)
for i in range(len(x)):
d[i,-1] = d[i-1,-1][0]+INSERT, (i-1,-1)
for j in range(len(y)):
for i in range(len(x)):
d[i,j] = min( ( d[i-1,j-1][0]+SUBST*(x[i] != y[j]), (i-1,j-1) ),
( d[i-1,j][0]+INSERT, (i-1,j) ),
( d[i,j-1][0]+INSERT, (i,j-1) ) )
backtrackx = []
backtracky = []
i=len(x)-1
j=len(y)-1
while not (i == j == -1):
i2,j2 = d[i,j][1]
backtrackx.append(x[i2+1:i+1])
backtracky.append(y[j2+1:j+1])
i,j = i2,j2
x = y = i = 0
colorize = { 0: lambda x:x,
-1: conf.color_theme.left,
1: conf.color_theme.right }
dox=1
doy=0
l = len(backtrackx)
while i < l:
separate=0
linex = backtrackx[i:i+16]
liney = backtracky[i:i+16]
xx = sum(len(k) for k in linex)
yy = sum(len(k) for k in liney)
if dox and not xx:
dox = 0
doy = 1
if dox and linex == liney:
doy=1
if dox:
xd = y
j = 0
while not linex[j]:
j += 1
xd -= 1
print(colorize[doy-dox]("%04x" % xd), end=' ')
x += xx
line=linex
else:
print(" ", end=' ')
if doy:
yd = y
j = 0
while not liney[j]:
j += 1
yd -= 1
print(colorize[doy-dox]("%04x" % yd), end=' ')
y += yy
line=liney
else:
print(" ", end=' ')
print(" ", end=' ')
cl = ""
for j in range(16):
if i+j < l:
if line[j]:
col = colorize[(linex[j]!=liney[j])*(doy-dox)]
print(col("%02X" % orb(line[j])), end=' ')
if linex[j]==liney[j]:
cl += sane_color(line[j])
else:
cl += col(sane(line[j]))
else:
print(" ", end=' ')
cl += " "
else:
print(" ", end=' ')
if j == 7:
print("", end=' ')
print(" ",cl)
if doy or not yy:
doy=0
dox=1
i += 16
else:
if yy:
dox=0
doy=1
else:
i += 16
if struct.pack("H",1) == b"\x00\x01": # big endian
def checksum(pkt):
if len(pkt) % 2 == 1:
pkt += b"\0"
s = sum(array.array("H", pkt))
s = (s >> 16) + (s & 0xffff)
s += s >> 16
s = ~s
return s & 0xffff
else:
def checksum(pkt):
if len(pkt) % 2 == 1:
pkt += b"\0"
s = sum(array.array("H", pkt))
s = (s >> 16) + (s & 0xffff)
s += s >> 16
s = ~s
return (((s>>8)&0xff)|s<<8) & 0xffff
def _fletcher16(charbuf):
# This is based on the GPLed C implementation in Zebra <http://www.zebra.org/>
c0 = c1 = 0
for char in charbuf:
c0 += orb(char)
c1 += c0
c0 %= 255
c1 %= 255
return (c0,c1)
@conf.commands.register
def fletcher16_checksum(binbuf):
""" Calculates Fletcher-16 checksum of the given buffer.
Note:
If the buffer contains the two checkbytes derived from the Fletcher-16 checksum
the result of this function has to be 0. Otherwise the buffer has been corrupted.
"""
(c0,c1)= _fletcher16(binbuf)
return (c1 << 8) | c0
@conf.commands.register
def fletcher16_checkbytes(binbuf, offset):
""" Calculates the Fletcher-16 checkbytes returned as 2 byte binary-string.
Including the bytes into the buffer (at the position marked by offset) the
global Fletcher-16 checksum of the buffer will be 0. Thus it is easy to verify
the integrity of the buffer on the receiver side.
For details on the algorithm, see RFC 2328 chapter 12.1.7 and RFC 905 Annex B.
"""
# This is based on the GPLed C implementation in Zebra <http://www.zebra.org/>
if len(binbuf) < offset:
raise Exception("Packet too short for checkbytes %d" % len(binbuf))
binbuf = binbuf[:offset] + b"\x00\x00" + binbuf[offset + 2:]
(c0,c1)= _fletcher16(binbuf)
x = ((len(binbuf) - offset - 1) * c0 - c1) % 255
if (x <= 0):
x += 255
y = 510 - c0 - x
if (y > 255):
y -= 255
return chb(x) + chb(y)
def mac2str(mac):
return b"".join(chb(int(x, 16)) for x in mac.split(':'))
def str2mac(s):
if isinstance(s, str):
return ("%02x:"*6)[:-1] % tuple(map(ord, s))
return ("%02x:"*6)[:-1] % tuple(s)
def randstring(l):
"""
Returns a random string of length l (l >= 0)
"""
return b"".join(struct.pack('B', random.randint(0, 255)) for _ in range(l))
def zerofree_randstring(l):
"""
Returns a random string of length l (l >= 0) without zero in it.
"""
return b"".join(struct.pack('B', random.randint(1, 255)) for _ in range(l))
def strxor(s1, s2):
"""
Returns the binary XOR of the 2 provided strings s1 and s2. s1 and s2
must be of same length.
"""
return b"".join(map(lambda x,y:chb(orb(x)^orb(y)), s1, s2))
def strand(s1, s2):
"""
Returns the binary AND of the 2 provided strings s1 and s2. s1 and s2
must be of same length.
"""
return b"".join(map(lambda x,y:chb(orb(x)&orb(y)), s1, s2))
# Workaround bug 643005 : https://sourceforge.net/tracker/?func=detail&atid=105470&aid=643005&group_id=5470
try:
socket.inet_aton("255.255.255.255")
except socket.error:
def inet_aton(x):
if x == "255.255.255.255":
return b"\xff"*4
else:
return socket.inet_aton(x)
else:
inet_aton = socket.inet_aton
inet_ntoa = socket.inet_ntoa
from scapy.pton_ntop import *
def atol(x):
try:
ip = inet_aton(x)
except socket.error:
ip = inet_aton(socket.gethostbyname(x))
return struct.unpack("!I", ip)[0]
def ltoa(x):
return inet_ntoa(struct.pack("!I", x&0xffffffff))
def itom(x):
return (0xffffffff00000000>>x)&0xffffffff
class ContextManagerSubprocess(object):
"""
Context manager that eases checking for unknown command.
Example:
>>> with ContextManagerSubprocess("my custom message"):
>>> subprocess.Popen(["unknown_command"])
"""
def __init__(self, name):
self.name = name
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type == OSError:
msg = "%s: executing %r failed"
log_runtime.error(msg, self.name, conf.prog.wireshark, exc_info=1)
return True # Suppress the exception
class ContextManagerCaptureOutput(object):
"""
Context manager that intercept the console's output.
Example:
>>> with ContextManagerCaptureOutput() as cmco:
... print("hey")
... assert cmco.get_output() == "hey"
"""
def __init__(self):
self.result_export_object = ""
try:
import mock
except:
raise ImportError("The mock module needs to be installed !")
def __enter__(self):
import mock
def write(s, decorator=self):
decorator.result_export_object += s
mock_stdout = mock.Mock()
mock_stdout.write = write
self.bck_stdout = sys.stdout
sys.stdout = mock_stdout
return self
def __exit__(self, *exc):
sys.stdout = self.bck_stdout
return False
def get_output(self, eval_bytes=False):
if self.result_export_object.startswith("b'") and eval_bytes:
return plain_str(eval(self.result_export_object))
return self.result_export_object
def do_graph(graph,prog=None,format=None,target=None,type=None,string=None,options=None):
"""do_graph(graph, prog=conf.prog.dot, format="svg",
target="| conf.prog.display", options=None, [string=1]):
string: if not None, simply return the graph string
graph: GraphViz graph description
format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
target: filename or redirect. Defaults pipe to Imagemagick's display program
prog: which graphviz program to use
options: options to be passed to prog"""
if format is None:
if WINDOWS:
format = "png" # use common format to make sure a viewer is installed
else:
format = "svg"
if string:
return graph
if type is not None:
format=type
if prog is None:
prog = conf.prog.dot
start_viewer=False
if target is None:
if WINDOWS:
target = get_temp_file(autoext="."+format)
start_viewer = True
else:
with ContextManagerSubprocess("do_graph()"):
target = subprocess.Popen([conf.prog.display], stdin=subprocess.PIPE).stdin
if format is not None:
format = "-T%s" % format
if isinstance(target, str):
target = open(os.path.abspath(target), "wb")
proc = subprocess.Popen("\"%s\" %s %s" % (prog, options or "", format or ""),
shell=True, stdin=subprocess.PIPE, stdout=target)
proc.communicate(input=raw(graph))
try:
target.close()
except:
pass
if start_viewer:
# Workaround for file not found error: We wait until tempfile is written.
waiting_start = time.time()
while not os.path.exists(target.name):
time.sleep(0.1)
if time.time() - waiting_start > 3:
warning("Temporary file '%s' could not be written. Graphic will not be displayed.", tempfile)
break
else:
if conf.prog.display == conf.prog._default:
os.startfile(target.name)
else:
with ContextManagerSubprocess("do_graph()"):
subprocess.Popen([conf.prog.display, target.name])
_TEX_TR = {
"{":"{\\tt\\char123}",
"}":"{\\tt\\char125}",
"\\":"{\\tt\\char92}",
"^":"\\^{}",
"$":"\\$",
"#":"\\#",
"~":"\\~",
"_":"\\_",
"&":"\\&",
"%":"\\%",
"|":"{\\tt\\char124}",
"~":"{\\tt\\char126}",
"<":"{\\tt\\char60}",
">":"{\\tt\\char62}",
}
def tex_escape(x):
s = ""
for c in x:
s += _TEX_TR.get(c,c)
return s
def colgen(*lstcol,**kargs):
"""Returns a generator that mixes provided quantities forever
trans: a function to convert the three arguments into a color. lambda x,y,z:(x,y,z) by default"""
if len(lstcol) < 2:
lstcol *= 2
trans = kargs.get("trans", lambda x,y,z: (x,y,z))
while True:
for i in range(len(lstcol)):
for j in range(len(lstcol)):
for k in range(len(lstcol)):
if i != j or j != k or k != i:
yield trans(lstcol[(i+j)%len(lstcol)],lstcol[(j+k)%len(lstcol)],lstcol[(k+i)%len(lstcol)])
def incremental_label(label="tag%05i", start=0):
while True:
yield label % start
start += 1
def binrepr(val):
return bin(val)[2:]
def long_converter(s):
return int(s.replace('\n', '').replace(' ', ''), 16)
#########################
#### Enum management ####
#########################
class EnumElement:
_value=None
def __init__(self, key, value):
self._key = key
self._value = value
def __repr__(self):
return "<%s %s[%r]>" % (self.__dict__.get("_name", self.__class__.__name__), self._key, self._value)
def __getattr__(self, attr):
return getattr(self._value, attr)
def __str__(self):
return self._key
def __bytes__(self):
return raw(self.__str__())
def __hash__(self):
return self._value
def __int__(self):
return int(self._value)
def __eq__(self, other):
return self._value == int(other)
def __neq__(self, other):
return not self.__eq__(other)
class Enum_metaclass(type):
element_class = EnumElement
def __new__(cls, name, bases, dct):
rdict={}
for k,v in six.iteritems(dct):
if isinstance(v, int):
v = cls.element_class(k,v)
dct[k] = v
rdict[v] = k
dct["__rdict__"] = rdict
return super(Enum_metaclass, cls).__new__(cls, name, bases, dct)
def __getitem__(self, attr):
return self.__rdict__[attr]
def __contains__(self, val):
return val in self.__rdict__
def get(self, attr, val=None):
return self.__rdict__.get(attr, val)
def __repr__(self):
return "<%s>" % self.__dict__.get("name", self.__name__)
###################
## Object saving ##
###################
def export_object(obj):
print(bytes_codec(gzip.zlib.compress(six.moves.cPickle.dumps(obj,2),9), "base64"))
def import_object(obj=None):
if obj is None:
obj = sys.stdin.read()
return six.moves.cPickle.loads(gzip.zlib.decompress(base64_bytes(obj.strip())))
def save_object(fname, obj):
"""Pickle a Python object"""
fd = gzip.open(fname, "wb")
six.moves.cPickle.dump(obj, fd)
fd.close()
def load_object(fname):
"""unpickle a Python object"""
return six.moves.cPickle.load(gzip.open(fname,"rb"))
@conf.commands.register
def corrupt_bytes(s, p=0.01, n=None):
"""Corrupt a given percentage or number of bytes from a string"""
s = array.array("B",raw(s))
l = len(s)
if n is None:
n = max(1,int(l*p))
for i in random.sample(range(l), n):
s[i] = (s[i]+random.randint(1,255))%256
return s.tostring()
@conf.commands.register
def corrupt_bits(s, p=0.01, n=None):
"""Flip a given percentage or number of bits from a string"""
s = array.array("B",raw(s))
l = len(s)*8
if n is None:
n = max(1,int(l*p))
for i in random.sample(range(l), n):
s[i // 8] ^= 1 << (i % 8)
return s.tostring()
#############################
## pcap capture file stuff ##
#############################
@conf.commands.register
def wrpcap(filename, pkt, *args, **kargs):
"""Write a list of packets to a pcap file
filename: the name of the file to write packets to, or an open,
writable file-like object. The file descriptor will be
closed at the end of the call, so do not use an object you
do not want to close (e.g., running wrpcap(sys.stdout, [])
in interactive mode will crash Scapy).
gz: set to 1 to save a gzipped capture
linktype: force linktype value
endianness: "<" or ">", force endianness
sync: do not bufferize writes to the capture file
"""
with PcapWriter(filename, *args, **kargs) as fdesc:
fdesc.write(pkt)
@conf.commands.register
def rdpcap(filename, count=-1):
"""Read a pcap or pcapng file and return a packet list
count: read only <count> packets
"""
with PcapReader(filename) as fdesc:
return fdesc.read_all(count=count)
class PcapReader_metaclass(type):
"""Metaclass for (Raw)Pcap(Ng)Readers"""
def __new__(cls, name, bases, dct):
"""The `alternative` class attribute is declared in the PcapNg
variant, and set here to the Pcap variant.
"""
newcls = super(PcapReader_metaclass, cls).__new__(cls, name, bases, dct)
if 'alternative' in dct:
dct['alternative'].alternative = newcls
return newcls
def __call__(cls, filename):
"""Creates a cls instance, use the `alternative` if that
fails.
"""
i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)
filename, fdesc, magic = cls.open(filename)
try:
i.__init__(filename, fdesc, magic)
except Scapy_Exception:
if "alternative" in cls.__dict__:
cls = cls.__dict__["alternative"]
i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)
try:
i.__init__(filename, fdesc, magic)
except Scapy_Exception:
raise
try:
i.f.seek(-4, 1)
except:
pass
raise Scapy_Exception("Not a supported capture file")
return i
@staticmethod
def open(filename):
"""Open (if necessary) filename, and read the magic."""
if isinstance(filename, six.string_types):
try:
fdesc = gzip.open(filename,"rb")
magic = fdesc.read(4)
except IOError:
fdesc = open(filename, "rb")
magic = fdesc.read(4)
else:
fdesc = filename
filename = (fdesc.name
if hasattr(fdesc, "name") else
"No name")
magic = fdesc.read(4)
return filename, fdesc, magic
class RawPcapReader(six.with_metaclass(PcapReader_metaclass)):
"""A stateful pcap reader. Each packet is returned as a string"""
def __init__(self, filename, fdesc, magic):
self.filename = filename
self.f = fdesc
if magic == b"\xa1\xb2\xc3\xd4": # big endian
self.endian = ">"
self.nano = False
elif magic == b"\xd4\xc3\xb2\xa1": # little endian
self.endian = "<"
self.nano = False
elif magic == b"\xa1\xb2\x3c\x4d": # big endian, nanosecond-precision
self.endian = ">"
self.nano = True
elif magic == b"\x4d\x3c\xb2\xa1": # little endian, nanosecond-precision
self.endian = "<"
self.nano = True
else:
raise Scapy_Exception(
"Not a pcap capture file (bad magic: %r)" % magic
)
hdr = self.f.read(20)
if len(hdr)<20:
raise Scapy_Exception("Invalid pcap file (too short)")
vermaj, vermin, tz, sig, snaplen, linktype = struct.unpack(
self.endian + "HHIIII", hdr
)
self.linktype = linktype
def __iter__(self):
return self
def next(self):
"""implement the iterator protocol on a set of packets in a pcap file"""
pkt = self.read_packet()
if pkt == None:
raise StopIteration
return pkt
__next__ = next
def read_packet(self, size=MTU):
"""return a single packet read from the file
returns None when no more packets are available
"""
hdr = self.f.read(16)
if len(hdr) < 16:
return None
sec,usec,caplen,wirelen = struct.unpack(self.endian+"IIII", hdr)
s = self.f.read(caplen)[:size]
return s,(sec,usec,wirelen) # caplen = len(s)
def dispatch(self, callback):
"""call the specified callback routine for each packet read
This is just a convenience function for the main loop
that allows for easy launching of packet processing in a
thread.
"""
for p in self:
callback(p)
def read_all(self,count=-1):
"""return a list of all packets in the pcap file
"""
res=[]
while count != 0:
count -= 1
p = self.read_packet()
if p is None:
break
res.append(p)
return res
def recv(self, size=MTU):
""" Emulate a socket
"""
return self.read_packet(size=size)[0]
def fileno(self):
return self.f.fileno()
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tracback):
self.close()
class PcapReader(RawPcapReader):
def __init__(self, filename, fdesc, magic):
RawPcapReader.__init__(self, filename, fdesc, magic)
try:
self.LLcls = conf.l2types[self.linktype]
except KeyError:
warning("PcapReader: unknown LL type [%i]/[%#x]. Using Raw packets" % (self.linktype,self.linktype))
self.LLcls = conf.raw_layer
def read_packet(self, size=MTU):
rp = RawPcapReader.read_packet(self, size=size)
if rp is None:
return None
s,(sec,usec,wirelen) = rp
try:
p = self.LLcls(s)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
p = conf.raw_layer(s)
p.time = sec + (0.000000001 if self.nano else 0.000001) * usec
return p
def read_all(self,count=-1):
res = RawPcapReader.read_all(self, count)
from scapy import plist
return plist.PacketList(res,name = os.path.basename(self.filename))
def recv(self, size=MTU):
return self.read_packet(size=size)
class RawPcapNgReader(RawPcapReader):
"""A stateful pcapng reader. Each packet is returned as a
string.
"""
alternative = RawPcapReader
def __init__(self, filename, fdesc, magic):
self.filename = filename
self.f = fdesc
# A list of (linktype, snaplen, tsresol); will be populated by IDBs.
self.interfaces = []
self.blocktypes = {
1: self.read_block_idb,
2: self.read_block_pkt,
3: self.read_block_spb,
6: self.read_block_epb,
}
if magic != b"\x0a\x0d\x0d\x0a": # PcapNg:
raise Scapy_Exception(
"Not a pcapng capture file (bad magic: %r)" % magic
)
# see https://github.com/pcapng/pcapng
blocklen, magic = self.f.read(4), self.f.read(4)
if magic == b"\x1a\x2b\x3c\x4d":
self.endian = ">"
elif magic == b"\x4d\x3c\x2b\x1a":
self.endian = "<"
else:
raise Scapy_Exception("Not a pcapng capture file (bad magic)")
try:
self.f.seek(0)
except:
pass
def read_packet(self, size=MTU):
"""Read blocks until it reaches either EOF or a packet, and
returns None or (packet, (linktype, sec, usec, wirelen)),
where packet is a string.
"""
while True:
try:
blocktype, blocklen = struct.unpack(self.endian + "2I",
self.f.read(8))
except struct.error:
return None
block = self.f.read(blocklen - 12)
if blocklen % 4:
pad = self.f.read(4 - (blocklen % 4))
warning("PcapNg: bad blocklen %d (MUST be a multiple of 4. "
"Ignored padding %r" % (blocklen, pad))
try:
if (blocklen,) != struct.unpack(self.endian + 'I',
self.f.read(4)):
warning("PcapNg: Invalid pcapng block (bad blocklen)")
except struct.error:
return None
res = self.blocktypes.get(blocktype,
lambda block, size: None)(block, size)
if res is not None:
return res
def read_block_idb(self, block, _):
"""Interface Description Block"""
options = block[16:]
tsresol = 1000000
while len(options) >= 4:
code, length = struct.unpack(self.endian + "HH", options[:4])
# PCAP Next Generation (pcapng) Capture File Format
# 4.2. - Interface Description Block
# http://xml2rfc.tools.ietf.org/cgi-bin/xml2rfc.cgi?url=https://raw.githubusercontent.com/pcapng/pcapng/master/draft-tuexen-opsawg-pcapng.xml&modeAsFormat=html/ascii&type=ascii#rfc.section.4.2
if code == 9 and length == 1 and len(options) >= 5:
tsresol = orb(options[4])
tsresol = (2 if tsresol & 128 else 10) ** (tsresol & 127)
if code == 0:
if length != 0:
warning("PcapNg: invalid option length %d for end-of-option" % length)
break
if length % 4:
length += (4 - (length % 4))
options = options[4 + length:]
self.interfaces.append(struct.unpack(self.endian + "HxxI", block[:8])
+ (tsresol,))
def read_block_epb(self, block, size):
"""Enhanced Packet Block"""
intid, tshigh, tslow, caplen, wirelen = struct.unpack(
self.endian + "5I",
block[:20],
)
return (block[20:20 + caplen][:size],
(self.interfaces[intid][0], self.interfaces[intid][2],
tshigh, tslow, wirelen))
def read_block_spb(self, block, size):
"""Simple Packet Block"""
# "it MUST be assumed that all the Simple Packet Blocks have
# been captured on the interface previously specified in the
# first Interface Description Block."
intid = 0
wirelen, = struct.unpack(self.endian + "I", block[:4])
caplen = min(wirelen, self.interfaces[intid][1])
return (block[4:4 + caplen][:size],
(self.interfaces[intid][0], self.interfaces[intid][2],
None, None, wirelen))
def read_block_pkt(self, block, size):
"""(Obsolete) Packet Block"""
intid, drops, tshigh, tslow, caplen, wirelen = struct.unpack(
self.endian + "HH4I",
block[:20],
)
return (block[20:20 + caplen][:size],
(self.interfaces[intid][0], self.interfaces[intid][2],
tshigh, tslow, wirelen))
class PcapNgReader(RawPcapNgReader):
alternative = PcapReader
def __init__(self, filename, fdesc, magic):
RawPcapNgReader.__init__(self, filename, fdesc, magic)
def read_packet(self, size=MTU):
rp = RawPcapNgReader.read_packet(self, size=size)
if rp is None:
return None
s, (linktype, tsresol, tshigh, tslow, wirelen) = rp
try:
p = conf.l2types[linktype](s)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
p = conf.raw_layer(s)
if tshigh is not None:
p.time = float((tshigh << 32) + tslow) / tsresol
return p
def read_all(self,count=-1):
res = RawPcapNgReader.read_all(self, count)
from scapy import plist
return plist.PacketList(res, name=os.path.basename(self.filename))
def recv(self, size=MTU):
return self.read_packet()
class RawPcapWriter:
"""A stream PCAP writer with more control than wrpcap()"""
def __init__(self, filename, linktype=None, gz=False, endianness="",
append=False, sync=False, nano=False):
"""
filename: the name of the file to write packets to, or an open,
writable file-like object.
linktype: force linktype to a given value. If None, linktype is taken
from the first writer packet
gz: compress the capture on the fly
endianness: force an endianness (little:"<", big:">"). Default is native
append: append packets to the capture file instead of truncating it
sync: do not bufferize writes to the capture file
nano: use nanosecond-precision (requires libpcap >= 1.5.0)
"""
self.linktype = linktype
self.header_present = 0
self.append = append
self.gz = gz
self.endian = endianness
self.sync = sync
self.nano = nano
bufsz=4096
if sync:
bufsz = 0
if isinstance(filename, six.string_types):
self.filename = filename
self.f = [open,gzip.open][gz](filename,append and "ab" or "wb", gz and 9 or bufsz)
else:
self.f = filename
self.filename = (filename.name
if hasattr(filename, "name") else
"No name")
def fileno(self):
return self.f.fileno()
def _write_header(self, pkt):
self.header_present=1
if self.append:
# Even if prone to race conditions, this seems to be
# safest way to tell whether the header is already present
# because we have to handle compressed streams that
# are not as flexible as basic files
g = [open,gzip.open][self.gz](self.filename,"rb")
if g.read(16):
return
self.f.write(struct.pack(self.endian+"IHHIIII", 0xa1b23c4d if self.nano else 0xa1b2c3d4,
2, 4, 0, 0, MTU, self.linktype))
self.f.flush()
def write(self, pkt):
"""accepts either a single packet or a list of packets to be
written to the dumpfile
"""
if isinstance(pkt, str):
if not self.header_present:
self._write_header(pkt)
self._write_packet(pkt)
else:
pkt = pkt.__iter__()
if not self.header_present:
try:
p = next(pkt)
except StopIteration:
self._write_header(b"")
return
self._write_header(p)
self._write_packet(p)
for p in pkt:
self._write_packet(p)
def _write_packet(self, packet, sec=None, usec=None, caplen=None, wirelen=None):
"""writes a single packet to the pcap file
"""
if isinstance(packet, tuple):
for pkt in packet:
self._write_packet(pkt, sec=sec, usec=usec, caplen=caplen,
wirelen=wirelen)
return
if caplen is None:
caplen = len(packet)
if wirelen is None:
wirelen = caplen
if sec is None or usec is None:
t=time.time()
it = int(t)
if sec is None:
sec = it
if usec is None:
usec = int(round((t - it) * (1000000000 if self.nano else 1000000)))
self.f.write(struct.pack(self.endian+"IIII", sec, usec, caplen, wirelen))
self.f.write(packet)
if self.sync:
self.f.flush()
def flush(self):
return self.f.flush()
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tracback):
self.flush()
self.close()
class PcapWriter(RawPcapWriter):
"""A stream PCAP writer with more control than wrpcap()"""
def _write_header(self, pkt):
if isinstance(pkt, tuple) and pkt:
pkt = pkt[0]
if self.linktype == None:
try:
self.linktype = conf.l2types[pkt.__class__]
except KeyError:
warning("PcapWriter: unknown LL type for %s. Using type 1 (Ethernet)", pkt.__class__.__name__)
self.linktype = 1
RawPcapWriter._write_header(self, pkt)
def _write_packet(self, packet):
if isinstance(packet, tuple):
for pkt in packet:
self._write_packet(pkt)
return
sec = int(packet.time)
usec = int(round((packet.time - sec) * (1000000000 if self.nano else 1000000)))
s = raw(packet)
caplen = len(s)
RawPcapWriter._write_packet(self, s, sec, usec, caplen, caplen)
re_extract_hexcap = re.compile("^((0x)?[0-9a-fA-F]{2,}[ :\t]{,3}|) *(([0-9a-fA-F]{2} {,2}){,16})")
@conf.commands.register
def import_hexcap():
p = ""
try:
while True:
l = input().strip()
try:
p += re_extract_hexcap.match(l).groups()[2]
except:
warning("Parsing error during hexcap")
continue
except EOFError:
pass
p = p.replace(" ","")
return p.decode("hex")
@conf.commands.register
def wireshark(pktlist):
"""Run wireshark on a list of packets"""
f = get_temp_file()
wrpcap(f, pktlist)
with ContextManagerSubprocess("wireshark()"):
subprocess.Popen([conf.prog.wireshark, "-r", f])
@conf.commands.register
def tcpdump(pktlist, dump=False, getfd=False, args=None,
prog=None, getproc=False, quiet=False):
"""Run tcpdump or tshark on a list of packets
pktlist: a Packet instance, a PacketList instance or a list of Packet
instances. Can also be a filename (as a string) or an open
file-like object that must be a file format readable by
tshark (Pcap, PcapNg, etc.)
dump: when set to True, returns a string instead of displaying it.
getfd: when set to True, returns a file-like object to read data
from tcpdump or tshark from.
getproc: when set to True, the subprocess.Popen object is returned
args: arguments (as a list) to pass to tshark (example for tshark:
args=["-T", "json"]). Defaults to ["-n"].
prog: program to use (defaults to tcpdump, will work with tshark)
quiet: when set to True, the process stderr is discarded
Examples:
>>> tcpdump([IP()/TCP(), IP()/UDP()])
reading from file -, link-type RAW (Raw IP)
16:46:00.474515 IP 127.0.0.1.20 > 127.0.0.1.80: Flags [S], seq 0, win 8192, length 0
16:46:00.475019 IP 127.0.0.1.53 > 127.0.0.1.53: [|domain]
>>> tcpdump([IP()/TCP(), IP()/UDP()], prog=conf.prog.tshark)
1 0.000000 127.0.0.1 -> 127.0.0.1 TCP 40 20->80 [SYN] Seq=0 Win=8192 Len=0
2 0.000459 127.0.0.1 -> 127.0.0.1 UDP 28 53->53 Len=0
To get a JSON representation of a tshark-parsed PacketList(), one can:
>>> import json, pprint
>>> json_data = json.load(tcpdump(IP(src="217.25.178.5", dst="45.33.32.156"),
... prog=conf.prog.tshark, args=["-T", "json"],
... getfd=True))
>>> pprint.pprint(json_data)
[{u'_index': u'packets-2016-12-23',
u'_score': None,
u'_source': {u'layers': {u'frame': {u'frame.cap_len': u'20',
u'frame.encap_type': u'7',
[...]
u'frame.time_relative': u'0.000000000'},
u'ip': {u'ip.addr': u'45.33.32.156',
u'ip.checksum': u'0x0000a20d',
[...]
u'ip.ttl': u'64',
u'ip.version': u'4'},
u'raw': u'Raw packet data'}},
u'_type': u'pcap_file'}]
>>> json_data[0]['_source']['layers']['ip']['ip.ttl']
u'64'
"""
getfd = getfd or getproc
if prog is None:
prog = [conf.prog.tcpdump]
elif isinstance(prog, six.string_types):
prog = [prog]
if pktlist is None:
with ContextManagerSubprocess("tcpdump()"):
proc = subprocess.Popen(
prog + (args if args is not None else []),
stdout=subprocess.PIPE if dump or getfd else None,
stderr=open(os.devnull) if quiet else None,
)
elif isinstance(pktlist, six.string_types):
with ContextManagerSubprocess("tcpdump()"):
proc = subprocess.Popen(
prog + ["-r", pktlist] + (args if args is not None else []),
stdout=subprocess.PIPE if dump or getfd else None,
stderr=open(os.devnull) if quiet else None,
)
elif DARWIN:
# Tcpdump cannot read from stdin, see
# <http://apple.stackexchange.com/questions/152682/>
tmpfile = tempfile.NamedTemporaryFile(delete=False)
try:
tmpfile.writelines(iter(lambda: pktlist.read(1048576), b""))
except AttributeError:
wrpcap(tmpfile, pktlist)
else:
tmpfile.close()
with ContextManagerSubprocess("tcpdump()"):
proc = subprocess.Popen(
prog + ["-r", tmpfile.name] + (args if args is not None else []),
stdout=subprocess.PIPE if dump or getfd else None,
stderr=open(os.devnull) if quiet else None,
)
conf.temp_files.append(tmpfile.name)
else:
with ContextManagerSubprocess("tcpdump()"):
proc = subprocess.Popen(
prog + ["-r", "-"] + (args if args is not None else []),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE if dump or getfd else None,
stderr=open(os.devnull) if quiet else None,
)
try:
proc.stdin.writelines(iter(lambda: pktlist.read(1048576), b""))
except AttributeError:
wrpcap(proc.stdin, pktlist)
else:
proc.stdin.close()
if dump:
return b"".join(iter(lambda: proc.stdout.read(1048576), b""))
if getproc:
return proc
if getfd:
return proc.stdout
proc.wait()
@conf.commands.register
def hexedit(x):
x = str(x)
f = get_temp_file()
open(f,"wb").write(x)
with ContextManagerSubprocess("hexedit()"):
subprocess.call([conf.prog.hexedit, f])
x = open(f).read()
os.unlink(f)
return x
def get_terminal_width():
"""Get terminal width if in a window"""
if WINDOWS:
from ctypes import windll, create_string_buffer
# http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
#sizey = bottom - top + 1
return sizex
else:
return None
else:
sizex = 0
try:
import struct, fcntl, termios
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
sizex = struct.unpack('HHHH', x)[1]
except IOError:
pass
if not sizex:
try:
sizex = int(os.environ['COLUMNS'])
except:
pass
if sizex:
return sizex
else:
return None
def pretty_routes(rtlst, header, sortBy=0):
"""Pretty route list, and add header"""
_l_header = len(header[0])
_space = " "
# Sort correctly
rtlst.sort(key=lambda x: x[sortBy])
# Append tag
rtlst = header + rtlst
# Detect column's width
colwidth = [max([len(y) for y in x]) for x in zip(*rtlst)]
# Make text fit in box (if exist)
# TODO: find a better and more precise way of doing this. That's currently working but very complicated
width = get_terminal_width()
if width:
if sum(colwidth) > width:
# Needs to be cropped
_med = (width // _l_header) - (1 if WINDOWS else 0) # Windows has a fat window border
# Crop biggest until size is correct
for i in range(1, len(colwidth)): # Should use while, but this is safer
if (sum(colwidth)+6) <= width:
break
_max = max(colwidth)
colwidth = [_med if x == _max else x for x in colwidth]
def _crop(x, width):
_r = x[:width]
if _r != x:
_r = x[:width-3]
return _r + "..."
return _r
rtlst = [tuple([_crop(rtlst[j][i], colwidth[i]) for i in range(0, len(rtlst[j]))]) for j in range(0, len(rtlst))]
# Recalculate column's width
colwidth = [max([len(y) for y in x]) for x in zip(*rtlst)]
fmt = _space.join(["%%-%ds"%x for x in colwidth])
rt = "\n".join([fmt % x for x in rtlst])
return rt
def __make_table(yfmtfunc, fmtfunc, endline, list, fxyz, sortx=None, sorty=None, seplinefunc=None):
vx = {}
vy = {}
vz = {}
vxf = {}
vyf = {}
l = 0
for e in list:
xx, yy, zz = [str(s) for s in fxyz(e)]
l = max(len(yy),l)
vx[xx] = max(vx.get(xx,0), len(xx), len(zz))
vy[yy] = None
vz[(xx,yy)] = zz
vxk = sorted(vx.keys())
vyk = sorted(vy.keys())
if sortx:
vxk.sort(sortx)
else:
try:
vxk.sort(lambda x,y:int(x)-int(y))
except:
try:
vxk.sort(lambda x,y: cmp(atol(x),atol(y)))
except:
vxk.sort()
if sorty:
vyk.sort(sorty)
else:
try:
vyk.sort(lambda x,y:int(x)-int(y))
except:
try:
vyk.sort(lambda x,y: cmp(atol(x),atol(y)))
except:
vyk.sort()
if seplinefunc:
sepline = seplinefunc(l, [vx[x] for x in vxk])
print(sepline)
fmt = yfmtfunc(l)
print(fmt % "", end=' ')
for x in vxk:
vxf[x] = fmtfunc(vx[x])
print(vxf[x] % x, end=' ')
print(endline)
if seplinefunc:
print(sepline)
for y in vyk:
print(fmt % y, end=' ')
for x in vxk:
print(vxf[x] % vz.get((x,y), "-"), end=' ')
print(endline)
if seplinefunc:
print(sepline)
def make_table(*args, **kargs):
__make_table(lambda l:"%%-%is" % l, lambda l:"%%-%is" % l, "", *args, **kargs)
def make_lined_table(*args, **kargs):
__make_table(lambda l:"%%-%is |" % l, lambda l:"%%-%is |" % l, "",
seplinefunc=lambda a,x:"+".join('-'*(y+2) for y in [a-1]+x+[-2]),
*args, **kargs)
def make_tex_table(*args, **kargs):
__make_table(lambda l: "%s", lambda l: "& %s", "\\\\", seplinefunc=lambda a,x:"\\hline", *args, **kargs)
###############################################
### WHOIS CLIENT (not available on windows) ###
###############################################
def whois(ip_address):
"""Whois client for Python"""
whois_ip = str(ip_address)
try:
query = socket.gethostbyname(whois_ip)
except:
query = whois_ip
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("whois.ripe.net", 43))
s.send(query.encode("utf8") + b"\r\n")
answer = b""
while True:
d = s.recv(4096)
answer += d
if not d:
break
s.close()
ignore_tag = b"remarks:"
# ignore all lines starting with the ignore_tag
lines = [ line for line in answer.split(b"\n") if not line or (line and not line.startswith(ignore_tag))]
# remove empty lines at the bottom
for i in range(1, len(lines)):
if not lines[-i].strip():
del lines[-i]
else:
break
return b"\n".join(lines[3:])
| 1 | 11,478 | Why are you changing this name ? | secdev-scapy | py |
@@ -579,6 +579,12 @@ def in_docker():
"""
if OVERRIDE_IN_DOCKER:
return True
+
+ # check things from Dockerfile
+ # TODO: is it enough? can we remove all the checks bellow?
+ if __file__.startswith("/opt/code/localstack/") and os.environ.get("USER") == "localstack":
+ return True
+
if os.path.exists("/.dockerenv"):
return True
if not os.path.exists("/proc/1/cgroup"): | 1 | import logging
import os
import platform
import re
import socket
import subprocess
import tempfile
import time
from typing import Any, Dict, List, Mapping, Tuple
import six
from boto3 import Session
from localstack.constants import (
AWS_REGION_US_EAST_1,
DEFAULT_BUCKET_MARKER_LOCAL,
DEFAULT_DEVELOP_PORT,
DEFAULT_LAMBDA_CONTAINER_REGISTRY,
DEFAULT_PORT_EDGE,
DEFAULT_SERVICE_PORTS,
FALSE_STRINGS,
INSTALL_DIR_INFRA,
LOCALHOST,
LOCALHOST_IP,
LOG_LEVELS,
TRACE_LOG_LEVELS,
TRUE_STRINGS,
)
# keep track of start time, for performance debugging
load_start_time = time.time()
class Directories:
"""
Holds the different directories available to localstack. Some directories are shared between the host and the
localstack container, some live only on the host and some only in the container.
Attributes:
static_libs: container only; binaries and libraries statically packaged with the image
var_libs: shared; binaries and libraries+data computed at runtime: lazy-loaded binaries, ssl cert, ...
cache: shared; ephemeral data that has to persist across localstack runs and reboots
tmp: shared; ephemeral data that has to persist across localstack runs but not reboots
functions: shared; volume to communicate between host<->lambda containers
data: shared; holds localstack state, pods, ...
config: host only; pre-defined configuration values, cached credentials, machine id, ...
init: shared; user-defined provisioning scripts executed in the container when it starts
logs: shared; log files produced by localstack
"""
static_libs: str
var_libs: str
cache: str
tmp: str
functions: str
data: str
config: str
init: str
logs: str
# these are the folders mounted into the container by default when the CLI is used
default_bind_mounts = ["var_libs", "cache", "tmp", "data", "init", "logs"]
def __init__(
self,
static_libs: str = None,
var_libs: str = None,
cache: str = None,
tmp: str = None,
functions: str = None,
data: str = None,
config: str = None,
init: str = None,
logs: str = None,
) -> None:
super().__init__()
self.static_libs = static_libs
self.var_libs = var_libs
self.cache = cache
self.tmp = tmp
self.functions = functions
self.data = data
self.config = config
self.init = init
self.logs = logs
@staticmethod
def from_config():
"""Returns Localstack directory paths from the config/environment variables defined by the config."""
return Directories(
static_libs=INSTALL_DIR_INFRA,
var_libs=TMP_FOLDER, # TODO: add variable
cache=CACHE_DIR,
tmp=TMP_FOLDER, # TODO: should inherit from root value for /var/lib/localstack (e.g., MOUNT_ROOT)
functions=HOST_TMP_FOLDER, # TODO: rename variable/consider a volume
data=DATA_DIR,
config=CONFIG_DIR,
init=None, # TODO: introduce environment variable
logs=TMP_FOLDER, # TODO: add variable
)
@staticmethod
def for_container() -> "Directories":
"""
Returns Localstack directory paths as they are defined within the container. Everything shared and writable
lives in /var/lib/localstack or /tmp/localstack.
:returns: Directories object
"""
# only set CONTAINER_VAR_LIBS_FOLDER/CONTAINER_CACHE_FOLDER inside the container to redirect var_libs/cache to
# another directory to avoid override by host mount
var_libs = (
os.environ.get("CONTAINER_VAR_LIBS_FOLDER", "").strip()
or "/var/lib/localstack/var_libs"
)
cache = os.environ.get("CONTAINER_CACHE_FOLDER", "").strip() or "/var/lib/localstack/cache"
return Directories(
static_libs=INSTALL_DIR_INFRA,
var_libs=var_libs,
cache=cache,
tmp=TMP_FOLDER, # TODO: move to /var/lib/localstack/tmp - or /tmp/localstack
functions=HOST_TMP_FOLDER, # TODO: move to /var/lib/localstack/tmp
data=DATA_DIR, # TODO: move to /var/lib/localstack/data
config=None, # config directory is host-only
logs="/var/lib/localstack/logs",
init="/docker-entrypoint-initaws.d",
)
def mkdirs(self):
for folder in [
self.static_libs,
self.var_libs,
self.cache,
self.tmp,
self.functions,
self.data,
self.config,
self.init,
self.logs,
]:
if folder and not os.path.exists(folder):
try:
os.makedirs(folder)
except Exception:
# this can happen due to a race condition when starting
# multiple processes in parallel. Should be safe to ignore
pass
def __str__(self):
return str(self.__dict__)
def eval_log_type(env_var_name):
"""get the log type from environment variable"""
ls_log = os.environ.get(env_var_name, "").lower().strip()
return ls_log if ls_log in LOG_LEVELS else False
def is_env_true(env_var_name):
"""Whether the given environment variable has a truthy value."""
return os.environ.get(env_var_name, "").lower().strip() in TRUE_STRINGS
def is_env_not_false(env_var_name):
"""Whether the given environment variable is empty or has a truthy value."""
return os.environ.get(env_var_name, "").lower().strip() not in FALSE_STRINGS
def load_environment(profile: str = None):
"""Loads the environment variables from ~/.localstack/{profile}.env
:param profile: the profile to load (defaults to "default")
"""
if not profile:
profile = "default"
path = os.path.join(CONFIG_DIR, f"{profile}.env")
if not os.path.exists(path):
return
import dotenv
dotenv.load_dotenv(path, override=False)
# the configuration profile to load
CONFIG_PROFILE = os.environ.get("CONFIG_PROFILE", "").strip()
# host configuration directory
CONFIG_DIR = os.environ.get("CONFIG_DIR", os.path.expanduser("~/.localstack"))
# keep this on top to populate environment
try:
load_environment(CONFIG_PROFILE)
except ImportError:
# dotenv may not be available in lambdas or other environments where config is loaded
pass
# java options to Lambda
LAMBDA_JAVA_OPTS = os.environ.get("LAMBDA_JAVA_OPTS", "").strip()
# limit in which to kinesalite will start throwing exceptions
KINESIS_SHARD_LIMIT = os.environ.get("KINESIS_SHARD_LIMIT", "").strip() or "100"
# delay in kinesalite response when making changes to streams
KINESIS_LATENCY = os.environ.get("KINESIS_LATENCY", "").strip() or "500"
# Kinesis provider - either "kinesis-mock" or "kinesalite"
KINESIS_PROVIDER = os.environ.get("KINESIS_PROVIDER") or "kinesis-mock"
# default AWS region
if "DEFAULT_REGION" not in os.environ:
os.environ["DEFAULT_REGION"] = os.environ.get("AWS_DEFAULT_REGION") or AWS_REGION_US_EAST_1
DEFAULT_REGION = os.environ["DEFAULT_REGION"]
# Whether or not to handle lambda event sources as synchronous invocations
SYNCHRONOUS_SNS_EVENTS = is_env_true("SYNCHRONOUS_SNS_EVENTS")
SYNCHRONOUS_SQS_EVENTS = is_env_true("SYNCHRONOUS_SQS_EVENTS")
SYNCHRONOUS_API_GATEWAY_EVENTS = is_env_not_false("SYNCHRONOUS_API_GATEWAY_EVENTS")
SYNCHRONOUS_KINESIS_EVENTS = is_env_not_false("SYNCHRONOUS_KINESIS_EVENTS")
SYNCHRONOUS_DYNAMODB_EVENTS = is_env_not_false("SYNCHRONOUS_DYNAMODB_EVENTS")
# randomly inject faults to Kinesis
KINESIS_ERROR_PROBABILITY = float(os.environ.get("KINESIS_ERROR_PROBABILITY", "").strip() or 0.0)
# randomly inject faults to DynamoDB
DYNAMODB_ERROR_PROBABILITY = float(os.environ.get("DYNAMODB_ERROR_PROBABILITY", "").strip() or 0.0)
DYNAMODB_READ_ERROR_PROBABILITY = float(
os.environ.get("DYNAMODB_READ_ERROR_PROBABILITY", "").strip() or 0.0
)
DYNAMODB_WRITE_ERROR_PROBABILITY = float(
os.environ.get("DYNAMODB_WRITE_ERROR_PROBABILITY", "").strip() or 0.0
)
# JAVA EE heap size for dynamodb
DYNAMODB_HEAP_SIZE = os.environ.get("DYNAMODB_HEAP_SIZE", "").strip() or "256m"
# expose services on a specific host externally
HOSTNAME_EXTERNAL = os.environ.get("HOSTNAME_EXTERNAL", "").strip() or LOCALHOST
# expose SQS on a specific port externally
SQS_PORT_EXTERNAL = int(os.environ.get("SQS_PORT_EXTERNAL") or 0)
# name of the host under which the LocalStack services are available
LOCALSTACK_HOSTNAME = os.environ.get("LOCALSTACK_HOSTNAME", "").strip() or LOCALHOST
# host under which the LocalStack services are available from Lambda Docker containers
HOSTNAME_FROM_LAMBDA = os.environ.get("HOSTNAME_FROM_LAMBDA", "").strip()
# whether to remotely copy the lambda code or locally mount a volume
LAMBDA_REMOTE_DOCKER = is_env_true("LAMBDA_REMOTE_DOCKER")
# Marker name to indicate that a bucket represents the local file system. This is used for testing
# Serverless applications where we mount the Lambda code directly into the container from the host OS.
BUCKET_MARKER_LOCAL = (
os.environ.get("BUCKET_MARKER_LOCAL", "").strip() or DEFAULT_BUCKET_MARKER_LOCAL
)
# network that the docker lambda container will be joining
LAMBDA_DOCKER_NETWORK = os.environ.get("LAMBDA_DOCKER_NETWORK", "").strip()
# custom DNS server that the docker lambda container will use
LAMBDA_DOCKER_DNS = os.environ.get("LAMBDA_DOCKER_DNS", "").strip()
# additional flags passed to Lambda Docker run/create commands
LAMBDA_DOCKER_FLAGS = os.environ.get("LAMBDA_DOCKER_FLAGS", "").strip()
# default container registry for lambda execution images
LAMBDA_CONTAINER_REGISTRY = (
os.environ.get("LAMBDA_CONTAINER_REGISTRY", "").strip() or DEFAULT_LAMBDA_CONTAINER_REGISTRY
)
# whether to remove containers after Lambdas finished executing
LAMBDA_REMOVE_CONTAINERS = (
os.environ.get("LAMBDA_REMOVE_CONTAINERS", "").lower().strip() not in FALSE_STRINGS
)
# directory for persisting data
DATA_DIR = os.environ.get("DATA_DIR", "").strip()
# folder for temporary files and data
TMP_FOLDER = os.path.join(tempfile.gettempdir(), "localstack")
# fix for Mac OS, to be able to mount /var/folders in Docker
if TMP_FOLDER.startswith("/var/folders/") and os.path.exists("/private%s" % TMP_FOLDER):
TMP_FOLDER = "/private%s" % TMP_FOLDER
# temporary folder of the host (required when running in Docker). Fall back to local tmp folder if not set
HOST_TMP_FOLDER = os.environ.get("HOST_TMP_FOLDER", TMP_FOLDER)
# ephemeral cache dir that persists over reboots
CACHE_DIR = os.environ.get("CACHE_DIR", os.path.join(TMP_FOLDER, "cache")).strip()
# whether to enable verbose debug logging
LS_LOG = eval_log_type("LS_LOG")
DEBUG = is_env_true("DEBUG") or LS_LOG in TRACE_LOG_LEVELS
# whether to enable debugpy
DEVELOP = is_env_true("DEVELOP")
# PORT FOR DEBUGGER
DEVELOP_PORT = int(os.environ.get("DEVELOP_PORT", "").strip() or DEFAULT_DEVELOP_PORT)
# whether to make debugpy wait for a debbuger client
WAIT_FOR_DEBUGGER = is_env_true("WAIT_FOR_DEBUGGER")
# whether to use SSL encryption for the services
# TODO: this is deprecated and should be removed (edge port supports HTTP/HTTPS multiplexing)
USE_SSL = is_env_true("USE_SSL")
# whether to use the legacy single-region mode, defined via DEFAULT_REGION
USE_SINGLE_REGION = is_env_true("USE_SINGLE_REGION")
# whether to run in TF compatibility mode for TF integration tests
# (e.g., returning verbatim ports for ELB resources, rather than edge port 4566, etc.)
TF_COMPAT_MODE = is_env_true("TF_COMPAT_MODE")
# default encoding used to convert strings to byte arrays (mainly for Python 3 compatibility)
DEFAULT_ENCODING = "utf-8"
# path to local Docker UNIX domain socket
DOCKER_SOCK = os.environ.get("DOCKER_SOCK", "").strip() or "/var/run/docker.sock"
# additional flags to pass to "docker run" when starting the stack in Docker
DOCKER_FLAGS = os.environ.get("DOCKER_FLAGS", "").strip()
# command used to run Docker containers (e.g., set to "sudo docker" to run as sudo)
DOCKER_CMD = os.environ.get("DOCKER_CMD", "").strip() or "docker"
# use the command line docker client instead of the new sdk version, might get removed in the future
LEGACY_DOCKER_CLIENT = is_env_true("LEGACY_DOCKER_CLIENT")
# whether to forward edge requests in-memory (instead of via proxy servers listening on backend ports)
# TODO: this will likely become the default and may get removed in the future
FORWARD_EDGE_INMEM = True
# Default bind address for the edge service
EDGE_BIND_HOST = os.environ.get("EDGE_BIND_HOST", "").strip() or "127.0.0.1"
# port number for the edge service, the main entry point for all API invocations
EDGE_PORT = int(os.environ.get("EDGE_PORT") or 0) or DEFAULT_PORT_EDGE
# fallback port for non-SSL HTTP edge service (in case HTTPS edge service cannot be used)
EDGE_PORT_HTTP = int(os.environ.get("EDGE_PORT_HTTP") or 0)
# optional target URL to forward all edge requests to
EDGE_FORWARD_URL = os.environ.get("EDGE_FORWARD_URL", "").strip()
# IP of the docker bridge used to enable access between containers
DOCKER_BRIDGE_IP = os.environ.get("DOCKER_BRIDGE_IP", "").strip()
# whether to enable API-based updates of configuration variables at runtime
ENABLE_CONFIG_UPDATES = is_env_true("ENABLE_CONFIG_UPDATES")
# CORS settings
DISABLE_CORS_CHECKS = is_env_true("DISABLE_CORS_CHECKS")
DISABLE_CUSTOM_CORS_S3 = is_env_true("DISABLE_CUSTOM_CORS_S3")
DISABLE_CUSTOM_CORS_APIGATEWAY = is_env_true("DISABLE_CUSTOM_CORS_APIGATEWAY")
EXTRA_CORS_ALLOWED_HEADERS = os.environ.get("EXTRA_CORS_ALLOWED_HEADERS", "").strip()
EXTRA_CORS_EXPOSE_HEADERS = os.environ.get("EXTRA_CORS_EXPOSE_HEADERS", "").strip()
EXTRA_CORS_ALLOWED_ORIGINS = os.environ.get("EXTRA_CORS_ALLOWED_ORIGINS", "").strip()
# whether to disable publishing events to the API
DISABLE_EVENTS = is_env_true("DISABLE_EVENTS")
DEBUG_ANALYTICS = is_env_true("DEBUG_ANALYTICS")
# whether to eagerly start services
EAGER_SERVICE_LOADING = is_env_true("EAGER_SERVICE_LOADING")
# Whether to skip downloading additional infrastructure components (e.g., custom Elasticsearch versions)
SKIP_INFRA_DOWNLOADS = os.environ.get("SKIP_INFRA_DOWNLOADS", "").strip()
# whether to enable legacy record&replay persistence mechanism (default true, but will be disabled in a future release!)
LEGACY_PERSISTENCE = is_env_not_false("LEGACY_PERSISTENCE")
# Adding Stepfunctions default port
LOCAL_PORT_STEPFUNCTIONS = int(os.environ.get("LOCAL_PORT_STEPFUNCTIONS") or 8083)
# Stepfunctions lambda endpoint override
STEPFUNCTIONS_LAMBDA_ENDPOINT = os.environ.get("STEPFUNCTIONS_LAMBDA_ENDPOINT", "").strip()
# path prefix for windows volume mounting
WINDOWS_DOCKER_MOUNT_PREFIX = os.environ.get("WINDOWS_DOCKER_MOUNT_PREFIX", "/host_mnt")
# name of the main Docker container
MAIN_CONTAINER_NAME = os.environ.get("MAIN_CONTAINER_NAME", "").strip() or "localstack_main"
# the latest commit id of the repository when the docker image was created
LOCALSTACK_BUILD_GIT_HASH = os.environ.get("LOCALSTACK_BUILD_GIT_HASH", "").strip() or None
# the date on which the docker image was created
LOCALSTACK_BUILD_DATE = os.environ.get("LOCALSTACK_BUILD_DATE", "").strip() or None
# whether to skip S3 presign URL signature validation (TODO: currently enabled, until all issues are resolved)
S3_SKIP_SIGNATURE_VALIDATION = is_env_not_false("S3_SKIP_SIGNATURE_VALIDATION")
# whether to skip waiting for the infrastructure to shut down, or exit immediately
FORCE_SHUTDOWN = is_env_not_false("FORCE_SHUTDOWN")
# whether the in_docker check should always return true
OVERRIDE_IN_DOCKER = is_env_true("OVERRIDE_IN_DOCKER")
# whether to return mocked success responses for still unimplemented API methods
MOCK_UNIMPLEMENTED = is_env_true("MOCK_UNIMPLEMENTED")
def has_docker():
try:
with open(os.devnull, "w") as devnull:
subprocess.check_output("docker ps", stderr=devnull, shell=True)
return True
except Exception:
return False
def is_linux():
return platform.system() == "Linux"
# whether to use Lambda functions in a Docker container
LAMBDA_EXECUTOR = os.environ.get("LAMBDA_EXECUTOR", "").strip()
if not LAMBDA_EXECUTOR:
LAMBDA_EXECUTOR = "docker"
if not has_docker():
LAMBDA_EXECUTOR = "local"
# Fallback URL to use when a non-existing Lambda is invoked. If this matches
# `dynamodb://<table_name>`, then the invocation is recorded in the corresponding
# DynamoDB table. If this matches `http(s)://...`, then the Lambda invocation is
# forwarded as a POST request to that URL.
LAMBDA_FALLBACK_URL = os.environ.get("LAMBDA_FALLBACK_URL", "").strip()
# Forward URL used to forward any Lambda invocations to an external
# endpoint (can use useful for advanced test setups)
LAMBDA_FORWARD_URL = os.environ.get("LAMBDA_FORWARD_URL", "").strip()
# Time in seconds to wait at max while extracting Lambda code.
# By default, it is 25 seconds for limiting the execution time
# to avoid client/network timeout issues
LAMBDA_CODE_EXTRACT_TIME = int(os.environ.get("LAMBDA_CODE_EXTRACT_TIME") or 25)
# A comma-delimited string of stream names and its corresponding shard count to
# initialize during startup.
# For example: "my-first-stream:1,my-other-stream:2,my-last-stream:1"
KINESIS_INITIALIZE_STREAMS = os.environ.get("KINESIS_INITIALIZE_STREAMS", "").strip()
# URL to a custom elasticsearch backend cluster. If this is set to a valid URL, then localstack will not create
# elasticsearch cluster instances, but instead forward all domains to the given backend.
ES_CUSTOM_BACKEND = os.environ.get("ES_CUSTOM_BACKEND", "").strip()
# Strategy used when creating elasticsearch domain endpoints routed through the edge proxy
# valid values: domain | path | off
ES_ENDPOINT_STRATEGY = os.environ.get("ES_ENDPOINT_STRATEGY", "").strip() or "domain"
# Whether to start one cluster per domain (default), or multiplex domains to a single clusters
ES_MULTI_CLUSTER = is_env_not_false("ES_MULTI_CLUSTER")
# Equivalent to HTTP_PROXY, but only applicable for external connections
OUTBOUND_HTTP_PROXY = os.environ.get("OUTBOUND_HTTP_PROXY", "")
# Equivalent to HTTPS_PROXY, but only applicable for external connections
OUTBOUND_HTTPS_PROXY = os.environ.get("OUTBOUND_HTTPS_PROXY", "")
# list of environment variable names used for configuration.
# Make sure to keep this in sync with the above!
# Note: do *not* include DATA_DIR in this list, as it is treated separately
CONFIG_ENV_VARS = [
"SERVICES",
"HOSTNAME",
"HOSTNAME_EXTERNAL",
"LOCALSTACK_HOSTNAME",
"LAMBDA_FALLBACK_URL",
"LAMBDA_EXECUTOR",
"LAMBDA_REMOTE_DOCKER",
"LAMBDA_DOCKER_NETWORK",
"LAMBDA_REMOVE_CONTAINERS",
"USE_SSL",
"USE_SINGLE_REGION",
"DEBUG",
"KINESIS_ERROR_PROBABILITY",
"DYNAMODB_ERROR_PROBABILITY",
"DYNAMODB_READ_ERROR_PROBABILITY",
"DYNAMODB_WRITE_ERROR_PROBABILITY",
"ES_CUSTOM_BACKEND",
"ES_ENDPOINT_STRATEGY",
"ES_MULTI_CLUSTER",
"DOCKER_BRIDGE_IP",
"DEFAULT_REGION",
"LAMBDA_JAVA_OPTS",
"LOCALSTACK_API_KEY",
"LAMBDA_CONTAINER_REGISTRY",
"TEST_AWS_ACCOUNT_ID",
"DISABLE_EVENTS",
"EDGE_PORT",
"LS_LOG",
"EDGE_PORT_HTTP",
"EDGE_FORWARD_URL",
"SKIP_INFRA_DOWNLOADS",
"STEPFUNCTIONS_LAMBDA_ENDPOINT",
"WINDOWS_DOCKER_MOUNT_PREFIX",
"HOSTNAME_FROM_LAMBDA",
"LOG_LICENSE_ISSUES",
"SYNCHRONOUS_API_GATEWAY_EVENTS",
"SYNCHRONOUS_KINESIS_EVENTS",
"BUCKET_MARKER_LOCAL",
"SYNCHRONOUS_SNS_EVENTS",
"SYNCHRONOUS_SQS_EVENTS",
"SYNCHRONOUS_DYNAMODB_EVENTS",
"DYNAMODB_HEAP_SIZE",
"MAIN_CONTAINER_NAME",
"LAMBDA_DOCKER_DNS",
"PERSISTENCE_SINGLE_FILE",
"S3_SKIP_SIGNATURE_VALIDATION",
"DEVELOP",
"DEVELOP_PORT",
"WAIT_FOR_DEBUGGER",
"KINESIS_INITIALIZE_STREAMS",
"TF_COMPAT_MODE",
"LAMBDA_DOCKER_FLAGS",
"LAMBDA_FORWARD_URL",
"LAMBDA_CODE_EXTRACT_TIME",
"THUNDRA_APIKEY",
"THUNDRA_AGENT_JAVA_VERSION",
"THUNDRA_AGENT_NODE_VERSION",
"THUNDRA_AGENT_PYTHON_VERSION",
"DISABLE_CORS_CHECKS",
"DISABLE_CUSTOM_CORS_S3",
"DISABLE_CUSTOM_CORS_APIGATEWAY",
"EXTRA_CORS_ALLOWED_HEADERS",
"EXTRA_CORS_EXPOSE_HEADERS",
"EXTRA_CORS_ALLOWED_ORIGINS",
"ENABLE_CONFIG_UPDATES",
"LOCALSTACK_HTTP_PROXY",
"LOCALSTACK_HTTPS_PROXY",
"REQUESTS_CA_BUNDLE",
"LEGACY_DOCKER_CLIENT",
"EAGER_SERVICE_LOADING",
"LAMBDA_STAY_OPEN_MODE",
]
for key, value in six.iteritems(DEFAULT_SERVICE_PORTS):
clean_key = key.upper().replace("-", "_")
CONFIG_ENV_VARS += [
clean_key + "_BACKEND",
clean_key + "_PORT",
clean_key + "_PORT_EXTERNAL",
]
def collect_config_items() -> List[Tuple[str, Any]]:
"""Returns a list of key-value tuples of LocalStack configuration values."""
none = object() # sentinel object
# collect which keys to print
keys = list()
keys.extend(CONFIG_ENV_VARS)
keys.append("DATA_DIR")
keys.sort()
values = globals()
result = list()
for k in keys:
v = values.get(k, none)
if v is none:
continue
result.append((k, v))
result.sort()
return result
def ping(host):
"""Returns True if host responds to a ping request"""
is_windows = platform.system().lower() == "windows"
ping_opts = "-n 1" if is_windows else "-c 1"
args = "ping %s %s" % (ping_opts, host)
return (
subprocess.call(args, shell=not is_windows, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
== 0
)
def in_docker():
"""
Returns True if running in a docker container, else False
Ref. https://docs.docker.com/config/containers/runmetrics/#control-groups
"""
if OVERRIDE_IN_DOCKER:
return True
if os.path.exists("/.dockerenv"):
return True
if not os.path.exists("/proc/1/cgroup"):
return False
try:
if any(
[
os.path.exists("/sys/fs/cgroup/memory/docker/"),
any(
[
"docker-" in file_names
for file_names in os.listdir("/sys/fs/cgroup/memory/system.slice")
]
),
os.path.exists("/sys/fs/cgroup/docker/"),
any(
[
"docker-" in file_names
for file_names in os.listdir("/sys/fs/cgroup/system.slice/")
]
),
]
):
return False
except Exception:
pass
with open("/proc/1/cgroup", "rt") as ifh:
content = ifh.read()
if "docker" in content:
return True
os_hostname = socket.gethostname()
if os_hostname and os_hostname in content:
return True
return False
is_in_docker = in_docker()
is_in_linux = is_linux()
# determine IP of Docker bridge
if not DOCKER_BRIDGE_IP:
DOCKER_BRIDGE_IP = "172.17.0.1"
if is_in_docker:
candidates = (DOCKER_BRIDGE_IP, "172.18.0.1")
for ip in candidates:
if ping(ip):
DOCKER_BRIDGE_IP = ip
break
# determine route to Docker host from container
try:
DOCKER_HOST_FROM_CONTAINER = DOCKER_BRIDGE_IP
if not is_in_docker and not is_in_linux:
# If we're running outside docker, and would like the Lambda containers to be able
# to access services running on the local machine, set DOCKER_HOST_FROM_CONTAINER accordingly
if LOCALSTACK_HOSTNAME == LOCALHOST:
DOCKER_HOST_FROM_CONTAINER = "host.docker.internal"
# update LOCALSTACK_HOSTNAME if host.docker.internal is available
if is_in_docker:
DOCKER_HOST_FROM_CONTAINER = socket.gethostbyname("host.docker.internal")
if LOCALSTACK_HOSTNAME == DOCKER_BRIDGE_IP:
LOCALSTACK_HOSTNAME = DOCKER_HOST_FROM_CONTAINER
except socket.error:
pass
# make sure we default to LAMBDA_REMOTE_DOCKER=true if running in Docker
if is_in_docker and not os.environ.get("LAMBDA_REMOTE_DOCKER", "").strip():
LAMBDA_REMOTE_DOCKER = True
# whether lambdas should use stay open mode if executed in "docker-reuse" executor
LAMBDA_STAY_OPEN_MODE = is_in_docker and is_env_not_false("LAMBDA_STAY_OPEN_MODE")
# set variables no_proxy, i.e., run internal service calls directly
no_proxy = ",".join(set((LOCALSTACK_HOSTNAME, LOCALHOST, LOCALHOST_IP, "[::1]")))
if os.environ.get("no_proxy"):
os.environ["no_proxy"] += "," + no_proxy
elif os.environ.get("NO_PROXY"):
os.environ["NO_PROXY"] += "," + no_proxy
else:
os.environ["no_proxy"] = no_proxy
# additional CLI commands, can be set by plugins
CLI_COMMANDS = {}
# set of valid regions
VALID_PARTITIONS = set(Session().get_available_partitions())
VALID_REGIONS = set()
for partition in VALID_PARTITIONS:
for region in Session().get_available_regions("sns", partition):
VALID_REGIONS.add(region)
def parse_service_ports() -> Dict[str, int]:
"""Parses the environment variable $SERVICES with a comma-separated list of services
and (optional) ports they should run on: 'service1:port1,service2,service3:port3'"""
service_ports = os.environ.get("SERVICES", "").strip()
if not service_ports:
return DEFAULT_SERVICE_PORTS
result = {}
for service_port in re.split(r"\s*,\s*", service_ports):
parts = re.split(r"[:=]", service_port)
service = parts[0]
key_upper = service.upper().replace("-", "_")
port_env_name = "%s_PORT" % key_upper
# (1) set default port number
port_number = DEFAULT_SERVICE_PORTS.get(service)
# (2) set port number from <SERVICE>_PORT environment, if present
if os.environ.get(port_env_name):
port_number = os.environ.get(port_env_name)
# (3) set port number from <service>:<port> portion in $SERVICES, if present
if len(parts) > 1:
port_number = int(parts[-1])
# (4) try to parse as int, fall back to 0 (invalid port)
try:
port_number = int(port_number)
except Exception:
port_number = 0
result[service] = port_number
return result
# TODO: we need to investigate the performance impact of this
def populate_configs(service_ports=None):
global SERVICE_PORTS, CONFIG_ENV_VARS
SERVICE_PORTS = service_ports or parse_service_ports()
globs = globals()
protocol = get_protocol()
# define service ports and URLs as environment variables
for key, value in six.iteritems(DEFAULT_SERVICE_PORTS):
key_upper = key.upper().replace("-", "_")
# define PORT_* variables with actual service ports as per configuration
port_var_name = "PORT_%s" % key_upper
port_number = service_port(key)
globs[port_var_name] = port_number
url = "%s://%s:%s" % (protocol, LOCALSTACK_HOSTNAME, port_number)
# define TEST_*_URL variables with mock service endpoints
url_key = "TEST_%s_URL" % key_upper
# allow overwriting TEST_*_URL from user-defined environment variables
existing = os.environ.get(url_key)
url = existing or url
# set global variable
globs[url_key] = url
# expose HOST_*_URL variables as environment variables
os.environ[url_key] = url
# expose LOCALSTACK_HOSTNAME as env. variable
os.environ["LOCALSTACK_HOSTNAME"] = LOCALSTACK_HOSTNAME
# create variable aliases prefixed with LOCALSTACK_ (except LOCALSTACK_HOSTNAME)
CONFIG_ENV_VARS += [
"LOCALSTACK_" + v for v in CONFIG_ENV_VARS if not v.startswith("LOCALSTACK_")
]
CONFIG_ENV_VARS = list(set(CONFIG_ENV_VARS))
def service_port(service_key):
if FORWARD_EDGE_INMEM:
if service_key == "elasticsearch":
# TODO Elasticsearch domains are a special case - we do not want to route them through
# the edge service, as that would require too many route mappings. In the future, we
# should integrate them with the port range for external services (4510-4530)
return SERVICE_PORTS.get(service_key, 0)
return get_edge_port_http()
return SERVICE_PORTS.get(service_key, 0)
def get_protocol():
return "https" if USE_SSL else "http"
def external_service_url(service_key, host=None):
host = host or HOSTNAME_EXTERNAL
return "%s://%s:%s" % (get_protocol(), host, service_port(service_key))
def get_edge_port_http():
return EDGE_PORT_HTTP or EDGE_PORT
def get_edge_url(localstack_hostname=None, protocol=None):
port = get_edge_port_http()
protocol = protocol or get_protocol()
localstack_hostname = localstack_hostname or LOCALSTACK_HOSTNAME
return "%s://%s:%s" % (protocol, localstack_hostname, port)
# initialize config values
populate_configs()
# set log levels
if DEBUG:
logging.getLogger("").setLevel(logging.DEBUG)
logging.getLogger("localstack").setLevel(logging.DEBUG)
if LS_LOG in TRACE_LOG_LEVELS:
load_end_time = time.time()
LOG = logging.getLogger(__name__)
LOG.debug(
"Initializing the configuration took %s ms" % int((load_end_time - load_start_time) * 1000)
)
class ServiceProviderConfig(Mapping[str, str]):
_provider_config: Dict[str, str]
default_value: str
def __init__(self, default_value: str):
self._provider_config = dict()
self.default_value = default_value
def get_provider(self, service: str) -> str:
return self._provider_config.get(service, self.default_value)
def set_provider_if_not_exists(self, service: str, provider: str) -> None:
if service not in self._provider_config:
self._provider_config[service] = provider
def set_provider(self, service: str, provider: str):
self._provider_config[service] = provider
def bulk_set_provider_if_not_exists(self, services: List[str], provider: str):
for service in services:
self.set_provider_if_not_exists(service, provider)
def __getitem__(self, item):
return self.get_provider(item)
def __setitem__(self, key, value):
self.set_provider(key, value)
def __len__(self):
return len(self._provider_config)
def __iter__(self):
return self._provider_config.__iter__()
SERVICE_PROVIDER_CONFIG = ServiceProviderConfig("default")
for key, value in os.environ.items():
if key.startswith("PROVIDER_OVERRIDE_"):
SERVICE_PROVIDER_CONFIG.set_provider(key.lstrip("PROVIDER_OVERRIDE_").lower(), value)
# initialize directories
if is_in_docker:
dirs = Directories.for_container()
else:
dirs = Directories.from_config()
dirs.mkdirs()
# TODO: remove deprecation warning with next release
for path in [dirs.config, os.path.join(dirs.tmp, ".localstack")]:
if path and os.path.isfile(path):
print(
f"warning: the config file .localstack is deprecated and no longer used, "
f"please remove it by running rm {path}"
)
| 1 | 13,992 | as we discussed, maybe it would be better to - [ ] set OVERRIDE_IN_DOCKER=1 in the Dockerfile - [ ] replace the if statement with `os.path.exists("/run/.containerenv")` as @dfangl suggested | localstack-localstack | py |
@@ -455,6 +455,11 @@ def is_ancestor_name(
return False
+def is_being_called(node: astroid.node_classes.NodeNG) -> bool:
+ """return True if node is the function being called in a Call node"""
+ return isinstance(node.parent, astroid.Call) and node.parent.func is node
+
+
def assign_parent(node: astroid.node_classes.NodeNG) -> astroid.node_classes.NodeNG:
"""return the higher parent which is not an AssignName, Tuple or List node"""
while node and isinstance(node, (astroid.AssignName, astroid.Tuple, astroid.List)): | 1 | # Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Ricardo Gemignani <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2018-2019 Ashley Whetter <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016-2017 Moises Lopez <[email protected]>
# Copyright (c) 2016 Brian C. Lane <[email protected]>
# Copyright (c) 2017-2018, 2020 hippo91 <[email protected]>
# Copyright (c) 2017 ttenhoeve-aa <[email protected]>
# Copyright (c) 2018 Alan Chan <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 Yury Gribov <[email protected]>
# Copyright (c) 2018 Caio Carrara <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Brian Shaginaw <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Matthijs Blom <[email protected]>
# Copyright (c) 2019 Djailla <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2019 Nathan Marrow <[email protected]>
# Copyright (c) 2019 Svet <[email protected]>
# Copyright (c) 2019 Pascal Corpet <[email protected]>
# Copyright (c) 2020 Batuhan Taskaya <[email protected]>
# Copyright (c) 2020 Luigi <[email protected]>
# Copyright (c) 2020 ethan-leba <[email protected]>
# Copyright (c) 2020 Damien Baty <[email protected]>
# Copyright (c) 2020 Andrew Simmons <[email protected]>
# Copyright (c) 2020 Ram Rachum <[email protected]>
# Copyright (c) 2020 Slavfox <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 Matus Valo <[email protected]>
# Copyright (c) 2021 Lorena B <[email protected]>
# Copyright (c) 2021 yushao2 <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""some functions that may be useful for various checkers
"""
import builtins
import itertools
import numbers
import re
import string
from functools import lru_cache, partial
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Match,
Optional,
Set,
Tuple,
Union,
)
import _string
import astroid
import astroid.objects
from pylint.constants import BUILTINS
COMP_NODE_TYPES = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
EXCEPTIONS_MODULE = "builtins"
ABC_MODULES = {"abc", "_py_abc"}
ABC_METHODS = {
"abc.abstractproperty",
"abc.abstractmethod",
"abc.abstractclassmethod",
"abc.abstractstaticmethod",
}
TYPING_PROTOCOLS = frozenset(
{"typing.Protocol", "typing_extensions.Protocol", ".Protocol"}
)
ITER_METHOD = "__iter__"
AITER_METHOD = "__aiter__"
NEXT_METHOD = "__next__"
GETITEM_METHOD = "__getitem__"
CLASS_GETITEM_METHOD = "__class_getitem__"
SETITEM_METHOD = "__setitem__"
DELITEM_METHOD = "__delitem__"
CONTAINS_METHOD = "__contains__"
KEYS_METHOD = "keys"
# Dictionary which maps the number of expected parameters a
# special method can have to a set of special methods.
# The following keys are used to denote the parameters restrictions:
#
# * None: variable number of parameters
# * number: exactly that number of parameters
# * tuple: this are the odd ones. Basically it means that the function
# can work with any number of arguments from that tuple,
# although it's best to implement it in order to accept
# all of them.
_SPECIAL_METHODS_PARAMS = {
None: ("__new__", "__init__", "__call__"),
0: (
"__del__",
"__repr__",
"__str__",
"__bytes__",
"__hash__",
"__bool__",
"__dir__",
"__len__",
"__length_hint__",
"__iter__",
"__reversed__",
"__neg__",
"__pos__",
"__abs__",
"__invert__",
"__complex__",
"__int__",
"__float__",
"__index__",
"__trunc__",
"__floor__",
"__ceil__",
"__enter__",
"__aenter__",
"__getnewargs_ex__",
"__getnewargs__",
"__getstate__",
"__reduce__",
"__copy__",
"__unicode__",
"__nonzero__",
"__await__",
"__aiter__",
"__anext__",
"__fspath__",
),
1: (
"__format__",
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__gt__",
"__ge__",
"__getattr__",
"__getattribute__",
"__delattr__",
"__delete__",
"__instancecheck__",
"__subclasscheck__",
"__getitem__",
"__missing__",
"__delitem__",
"__contains__",
"__add__",
"__sub__",
"__mul__",
"__truediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__divmod__",
"__lshift__",
"__rshift__",
"__and__",
"__xor__",
"__or__",
"__radd__",
"__rsub__",
"__rmul__",
"__rtruediv__",
"__rmod__",
"__rdivmod__",
"__rpow__",
"__rlshift__",
"__rrshift__",
"__rand__",
"__rxor__",
"__ror__",
"__iadd__",
"__isub__",
"__imul__",
"__itruediv__",
"__ifloordiv__",
"__imod__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
"__ipow__",
"__setstate__",
"__reduce_ex__",
"__deepcopy__",
"__cmp__",
"__matmul__",
"__rmatmul__",
"__imatmul__",
"__div__",
),
2: ("__setattr__", "__get__", "__set__", "__setitem__", "__set_name__"),
3: ("__exit__", "__aexit__"),
(0, 1): ("__round__",),
(1, 2): ("__pow__",),
}
SPECIAL_METHODS_PARAMS = {
name: params
for params, methods in _SPECIAL_METHODS_PARAMS.items()
for name in methods # type: ignore
}
PYMETHODS = set(SPECIAL_METHODS_PARAMS)
SUBSCRIPTABLE_CLASSES_PEP585 = frozenset(
(
"builtins.tuple",
"builtins.list",
"builtins.dict",
"builtins.set",
"builtins.frozenset",
"builtins.type",
"collections.deque",
"collections.defaultdict",
"collections.OrderedDict",
"collections.Counter",
"collections.ChainMap",
"_collections_abc.Awaitable",
"_collections_abc.Coroutine",
"_collections_abc.AsyncIterable",
"_collections_abc.AsyncIterator",
"_collections_abc.AsyncGenerator",
"_collections_abc.Iterable",
"_collections_abc.Iterator",
"_collections_abc.Generator",
"_collections_abc.Reversible",
"_collections_abc.Container",
"_collections_abc.Collection",
"_collections_abc.Callable",
"_collections_abc.Set",
"_collections_abc.MutableSet",
"_collections_abc.Mapping",
"_collections_abc.MutableMapping",
"_collections_abc.Sequence",
"_collections_abc.MutableSequence",
"_collections_abc.ByteString",
"_collections_abc.MappingView",
"_collections_abc.KeysView",
"_collections_abc.ItemsView",
"_collections_abc.ValuesView",
"contextlib.AbstractContextManager",
"contextlib.AbstractAsyncContextManager",
"re.Pattern",
"re.Match",
)
)
class NoSuchArgumentError(Exception):
pass
class InferredTypeError(Exception):
pass
def is_inside_lambda(node: astroid.node_classes.NodeNG) -> bool:
"""Return true if given node is inside lambda"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Lambda):
return True
parent = parent.parent
return False
def get_all_elements(
node: astroid.node_classes.NodeNG,
) -> Iterable[astroid.node_classes.NodeNG]:
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (astroid.Tuple, astroid.List)):
for child in node.elts:
yield from get_all_elements(child)
else:
yield node
def is_super(node: astroid.node_classes.NodeNG) -> bool:
"""return True if the node is referencing the "super" builtin function"""
if getattr(node, "name", None) == "super" and node.root().name == BUILTINS:
return True
return False
def is_error(node: astroid.scoped_nodes.FunctionDef) -> bool:
"""Return true if the given function node only raises an exception"""
return len(node.body) == 1 and isinstance(node.body[0], astroid.Raise)
builtins = builtins.__dict__.copy() # type: ignore
SPECIAL_BUILTINS = ("__builtins__",) # '__path__', '__file__')
def is_builtin_object(node: astroid.node_classes.NodeNG) -> bool:
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == BUILTINS
def is_builtin(name: str) -> bool:
"""return true if <name> could be considered as a builtin defined by python"""
return name in builtins or name in SPECIAL_BUILTINS # type: ignore
def is_defined_in_scope(
var_node: astroid.node_classes.NodeNG,
varname: str,
scope: astroid.node_classes.NodeNG,
) -> bool:
if isinstance(scope, astroid.If):
for node in scope.body:
if (
isinstance(node, astroid.Assign)
and any(
isinstance(target, astroid.AssignName) and target.name == varname
for target in node.targets
)
) or (isinstance(node, astroid.Nonlocal) and varname in node.names):
return True
elif isinstance(scope, (COMP_NODE_TYPES, astroid.For)):
for ass_node in scope.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
elif isinstance(scope, astroid.With):
for expr, ids in scope.items:
if expr.parent_of(var_node):
break
if ids and isinstance(ids, astroid.AssignName) and ids.name == varname:
return True
elif isinstance(scope, (astroid.Lambda, astroid.FunctionDef)):
if scope.args.is_argument(varname):
# If the name is found inside a default value
# of a function, then let the search continue
# in the parent's tree.
if scope.args.parent_of(var_node):
try:
scope.args.default_value(varname)
scope = scope.parent
is_defined_in_scope(var_node, varname, scope)
except astroid.NoDefault:
pass
return True
if getattr(scope, "name", None) == varname:
return True
elif isinstance(scope, astroid.ExceptHandler):
if isinstance(scope.name, astroid.AssignName):
ass_node = scope.name
if ass_node.name == varname:
return True
return False
def is_defined_before(var_node: astroid.Name) -> bool:
"""Check if the given variable node is defined before
Verify that the variable node is defined by a parent node
(list, set, dict, or generator comprehension, lambda)
or in a previous sibling node on the same line
(statement_defining ; statement_using).
"""
varname = var_node.name
_node = var_node.parent
while _node:
if is_defined_in_scope(var_node, varname, _node):
return True
_node = _node.parent
# possibly multiple statements on the same line using semi colon separator
stmt = var_node.statement()
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for assign_node in _node.nodes_of_class(astroid.AssignName):
if assign_node.name == varname:
return True
for imp_node in _node.nodes_of_class((astroid.ImportFrom, astroid.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_default_argument(
node: astroid.node_classes.NodeNG,
scope: Optional[astroid.node_classes.NodeNG] = None,
) -> bool:
"""return true if the given Name node is used in function or lambda
default argument's value
"""
if not scope:
scope = node.scope()
if isinstance(scope, (astroid.FunctionDef, astroid.Lambda)):
for default_node in scope.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
def is_func_decorator(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if parent.is_statement or isinstance(
parent,
(
astroid.Lambda,
astroid.scoped_nodes.ComprehensionScope,
astroid.scoped_nodes.ListComp,
),
):
break
parent = parent.parent
return False
def is_ancestor_name(
frame: astroid.ClassDef, node: astroid.node_classes.NodeNG
) -> bool:
"""return True if `frame` is an astroid.Class node with `node` in the
subtree of its bases attribute
"""
if not isinstance(frame, astroid.ClassDef):
return False
for base in frame.bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False
def assign_parent(node: astroid.node_classes.NodeNG) -> astroid.node_classes.NodeNG:
"""return the higher parent which is not an AssignName, Tuple or List node"""
while node and isinstance(node, (astroid.AssignName, astroid.Tuple, astroid.List)):
node = node.parent
return node
def overrides_a_method(class_node: astroid.ClassDef, name: str) -> bool:
"""return True if <name> is a method overridden from an ancestor"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.FunctionDef):
return True
return False
def check_messages(*messages: str) -> Callable:
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
Exception.__init__(self, index)
self.index = index
def parse_format_string(
format_string: str,
) -> Tuple[Set[str], int, Dict[str, str], List[str]]:
"""Parses a format string, returning a tuple of (keys, num_args), where keys
is the set of mapping keys in the format string, and num_args is the number
of arguments required by the format string. Raises
IncompleteFormatString or UnsupportedFormatCharacter if a
parse error occurs."""
keys = set()
key_types = {}
pos_types = []
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == "%":
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == "(":
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == "(":
depth += 1
elif char == ")":
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in "#0- +":
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == ".":
i, char = next_char(i)
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in "hlL":
i, char = next_char(i)
# Parse the conversion type (mandatory).
flags = "diouxXeEfFgGcrs%a"
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
key_types[key] = char
elif char != "%":
num_args += 1
pos_types.append(char)
i += 1
return keys, num_args, key_types, pos_types
def split_format_field_names(format_string) -> Tuple[str, Iterable[Tuple[bool, str]]]:
try:
return _string.formatter_field_name_split(format_string)
except ValueError as e:
raise IncompleteFormatString() from e
def collect_string_fields(format_string) -> Iterable[Optional[str]]:
"""Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well.
"""
formatter = string.Formatter()
try:
parseiterator = formatter.parse(format_string)
for result in parseiterator:
if all(item is None for item in result[1:]):
# not a replacement format
continue
name = result[1]
nested = result[2]
yield name
if nested:
yield from collect_string_fields(nested)
except ValueError as exc:
# Probably the format string is invalid.
if exc.args[0].startswith("cannot switch from manual"):
# On Jython, parsing a string with both manual
# and automatic positions will fail with a ValueError,
# while on CPython it will simply return the fields,
# the validation being done in the interpreter (?).
# We're just returning two mixed fields in order
# to trigger the format-combined-specification check.
yield ""
yield "1"
return
raise IncompleteFormatString(format_string) from exc
def parse_format_method_string(
format_string: str,
) -> Tuple[List[Tuple[str, List[Tuple[bool, str]]]], int, int]:
"""
Parses a PEP 3101 format string, returning a tuple of
(keyword_arguments, implicit_pos_args_cnt, explicit_pos_args),
where keyword_arguments is the set of mapping keys in the format string, implicit_pos_args_cnt
is the number of arguments required by the format string and
explicit_pos_args is the number of arguments passed with the position.
"""
keyword_arguments = []
implicit_pos_args_cnt = 0
explicit_pos_args = set()
for name in collect_string_fields(format_string):
if name and str(name).isdigit():
explicit_pos_args.add(str(name))
elif name:
keyname, fielditerator = split_format_field_names(name)
if isinstance(keyname, numbers.Number):
explicit_pos_args.add(str(keyname))
try:
keyword_arguments.append((keyname, list(fielditerator)))
except ValueError as e:
raise IncompleteFormatString() from e
else:
implicit_pos_args_cnt += 1
return keyword_arguments, implicit_pos_args_cnt, len(explicit_pos_args)
def is_attr_protected(attrname: str) -> bool:
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return (
attrname[0] == "_"
and attrname != "_"
and not (attrname.startswith("__") and attrname.endswith("__"))
)
def node_frame_class(node: astroid.node_classes.NodeNG) -> Optional[astroid.ClassDef]:
"""Return the class that is wrapping the given node
The function returns a class for a method node (or a staticmethod or a
classmethod), otherwise it returns `None`.
"""
klass = node.frame()
nodes_to_check = (
astroid.node_classes.NodeNG,
astroid.UnboundMethod,
astroid.BaseInstance,
)
while (
klass
and isinstance(klass, nodes_to_check)
and not isinstance(klass, astroid.ClassDef)
):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
return klass
def is_attr_private(attrname: str) -> Optional[Match[str]]:
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile("^_{2,}.*[^_]+_?$")
return regex.match(attrname)
def get_argument_from_call(
call_node: astroid.Call, position: int = None, keyword: str = None
) -> astroid.Name:
"""Returns the specified argument from a function call.
:param astroid.Call call_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: astroid.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError("Must specify at least one of: position or keyword.")
if position is not None:
try:
return call_node.args[position]
except IndexError:
pass
if keyword and call_node.keywords:
for arg in call_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node: astroid.node_classes.NodeNG) -> bool:
"""
Return true if the given class node is subclass of
exceptions.Exception.
"""
ancestors = node.ancestors() if hasattr(node, "ancestors") else []
for ancestor in itertools.chain([node], ancestors):
if (
ancestor.name in ("Exception", "BaseException")
and ancestor.root().name == EXCEPTIONS_MODULE
):
return True
return False
def error_of_type(handler: astroid.ExceptHandler, error_type) -> bool:
"""
Check if the given exception handler catches
the given error_type.
The *handler* parameter is a node, representing an ExceptHandler node.
The *error_type* can be an exception, such as AttributeError,
the name of an exception, or it can be a tuple of errors.
The function will return True if the handler catches any of the
given errors.
"""
def stringify_error(error):
if not isinstance(error, str):
return error.__name__
return error
if not isinstance(error_type, tuple):
error_type = (error_type,) # type: ignore
expected_errors = {stringify_error(error) for error in error_type} # type: ignore
if not handler.type:
return False
return handler.catch(expected_errors)
def decorated_with_property(node: astroid.FunctionDef) -> bool:
"""Detect if the given function node is decorated with a property."""
if not node.decorators:
return False
for decorator in node.decorators.nodes:
try:
if _is_property_decorator(decorator):
return True
except astroid.InferenceError:
pass
return False
def _is_property_kind(node, *kinds):
if not isinstance(node, (astroid.UnboundMethod, astroid.FunctionDef)):
return False
if node.decorators:
for decorator in node.decorators.nodes:
if isinstance(decorator, astroid.Attribute) and decorator.attrname in kinds:
return True
return False
def is_property_setter(node: astroid.FunctionDef) -> bool:
"""Check if the given node is a property setter"""
return _is_property_kind(node, "setter")
def is_property_deleter(node: astroid.FunctionDef) -> bool:
"""Check if the given node is a property deleter"""
return _is_property_kind(node, "deleter")
def is_property_setter_or_deleter(node: astroid.FunctionDef) -> bool:
"""Check if the given node is either a property setter or a deleter"""
return _is_property_kind(node, "setter", "deleter")
def _is_property_decorator(decorator: astroid.Name) -> bool:
for inferred in decorator.infer():
if isinstance(inferred, astroid.ClassDef):
if inferred.qname() in ("builtins.property", "functools.cached_property"):
return True
for ancestor in inferred.ancestors():
if ancestor.name == "property" and ancestor.root().name == BUILTINS:
return True
elif isinstance(inferred, astroid.FunctionDef):
# If decorator is function, check if it has exactly one return
# and the return is itself a function decorated with property
returns: List[astroid.Return] = list(
inferred._get_return_nodes_skip_functions()
)
if len(returns) == 1 and isinstance(
returns[0].value, (astroid.Name, astroid.Attribute)
):
inferred = safe_infer(returns[0].value)
if (
inferred
and isinstance(inferred, astroid.objects.Property)
and isinstance(inferred.function, astroid.FunctionDef)
):
return decorated_with_property(inferred.function)
return False
def decorated_with(
func: Union[astroid.FunctionDef, astroid.BoundMethod, astroid.UnboundMethod],
qnames: Iterable[str],
) -> bool:
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
if isinstance(decorator_node, astroid.Call):
# We only want to infer the function name
decorator_node = decorator_node.func
try:
if any(
i.name in qnames or i.qname() in qnames
for i in decorator_node.infer()
if i is not None and i != astroid.Uninferable
):
return True
except astroid.InferenceError:
continue
return False
@lru_cache(maxsize=1024)
def unimplemented_abstract_methods(
node: astroid.ClassDef, is_abstract_cb: astroid.FunctionDef = None
) -> Dict[str, astroid.node_classes.NodeNG]:
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = partial(decorated_with, qnames=ABC_METHODS)
visited: Dict[str, astroid.node_classes.NodeNG] = {}
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
except astroid.ResolveError:
# Probably inconsistent hierarchy, don'try
# to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
inferred = obj
if isinstance(obj, astroid.AssignName):
inferred = safe_infer(obj)
if not inferred:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(inferred, astroid.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(inferred, astroid.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(inferred)
if abstract:
visited[obj.name] = inferred
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
def find_try_except_wrapper_node(
node: astroid.node_classes.NodeNG,
) -> Optional[Union[astroid.ExceptHandler, astroid.TryExcept]]:
"""Return the ExceptHandler or the TryExcept node in which the node is."""
current = node
ignores = (astroid.ExceptHandler, astroid.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, ignores):
return current.parent
return None
def find_except_wrapper_node_in_scope(
node: astroid.node_classes.NodeNG,
) -> Optional[Union[astroid.ExceptHandler, astroid.TryExcept]]:
"""Return the ExceptHandler in which the node is, without going out of scope."""
current = node
while current.parent is not None:
current = current.parent
if isinstance(current, astroid.scoped_nodes.LocalsDictNodeNG):
# If we're inside a function/class definition, we don't want to keep checking
# higher ancestors for `except` clauses, because if these exist, it means our
# function/class was defined in an `except` clause, rather than the current code
# actually running in an `except` clause.
return None
if isinstance(current, astroid.ExceptHandler):
return current
return None
def is_from_fallback_block(node: astroid.node_classes.NodeNG) -> bool:
"""Check if the given node is from a fallback import block."""
context = find_try_except_wrapper_node(node)
if not context:
return False
if isinstance(context, astroid.ExceptHandler):
other_body = context.parent.body
handlers = context.parent.handlers
else:
other_body = itertools.chain.from_iterable(
handler.body for handler in context.handlers
)
handlers = context.handlers
has_fallback_imports = any(
isinstance(import_node, (astroid.ImportFrom, astroid.Import))
for import_node in other_body
)
ignores_import_error = _except_handlers_ignores_exception(handlers, ImportError)
return ignores_import_error or has_fallback_imports
def _except_handlers_ignores_exception(
handlers: astroid.ExceptHandler, exception
) -> bool:
func = partial(error_of_type, error_type=(exception,))
return any(func(handler) for handler in handlers)
def get_exception_handlers(
node: astroid.node_classes.NodeNG, exception=Exception
) -> Optional[List[astroid.ExceptHandler]]:
"""Return the collections of handlers handling the exception in arguments.
Args:
node (astroid.NodeNG): A node that is potentially wrapped in a try except.
exception (builtin.Exception or str): exception or name of the exception.
Returns:
list: the collection of handlers that are handling the exception or None.
"""
context = find_try_except_wrapper_node(node)
if isinstance(context, astroid.TryExcept):
return [
handler for handler in context.handlers if error_of_type(handler, exception)
]
return []
def is_node_inside_try_except(node: astroid.Raise) -> bool:
"""Check if the node is directly under a Try/Except statement.
(but not under an ExceptHandler!)
Args:
node (astroid.Raise): the node raising the exception.
Returns:
bool: True if the node is inside a try/except statement, False otherwise.
"""
context = find_try_except_wrapper_node(node)
return isinstance(context, astroid.TryExcept)
def node_ignores_exception(
node: astroid.node_classes.NodeNG, exception=Exception
) -> bool:
"""Check if the node is in a TryExcept which handles the given exception.
If the exception is not given, the function is going to look for bare
excepts.
"""
managing_handlers = get_exception_handlers(node, exception)
if not managing_handlers:
return False
return any(managing_handlers)
def class_is_abstract(node: astroid.ClassDef) -> bool:
"""return true if the given class node should be considered as an abstract
class
"""
# Only check for explicit metaclass=ABCMeta on this specific class
meta = node.declared_metaclass()
if meta is not None:
if meta.name == "ABCMeta" and meta.root().name in ABC_MODULES:
return True
for ancestor in node.ancestors():
if ancestor.name == "ABC" and ancestor.root().name in ABC_MODULES:
# abc.ABC inheritance
return True
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _supports_protocol_method(value: astroid.node_classes.NodeNG, attr: str) -> bool:
try:
attributes = value.getattr(attr)
except astroid.NotFoundError:
return False
first = attributes[0]
if isinstance(first, astroid.AssignName):
if isinstance(first.parent.value, astroid.Const):
return False
return True
def is_comprehension(node: astroid.node_classes.NodeNG) -> bool:
comprehensions = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
return isinstance(node, comprehensions)
def _supports_mapping_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(
value, GETITEM_METHOD
) and _supports_protocol_method(value, KEYS_METHOD)
def _supports_membership_test_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, CONTAINS_METHOD)
def _supports_iteration_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, ITER_METHOD) or _supports_protocol_method(
value, GETITEM_METHOD
)
def _supports_async_iteration_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, AITER_METHOD)
def _supports_getitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, GETITEM_METHOD)
def _supports_setitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, SETITEM_METHOD)
def _supports_delitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, DELITEM_METHOD)
def _is_abstract_class_name(name: str) -> bool:
lname = name.lower()
is_mixin = lname.endswith("mixin")
is_abstract = lname.startswith("abstract")
is_base = lname.startswith("base") or lname.endswith("base")
return is_mixin or is_abstract or is_base
def is_inside_abstract_class(node: astroid.node_classes.NodeNG) -> bool:
while node is not None:
if isinstance(node, astroid.ClassDef):
if class_is_abstract(node):
return True
name = getattr(node, "name", None)
if name is not None and _is_abstract_class_name(name):
return True
node = node.parent
return False
def _supports_protocol(
value: astroid.node_classes.NodeNG, protocol_callback: astroid.FunctionDef
) -> bool:
if isinstance(value, astroid.ClassDef):
if not has_known_bases(value):
return True
# classobj can only be iterable if it has an iterable metaclass
meta = value.metaclass()
if meta is not None:
if protocol_callback(meta):
return True
if isinstance(value, astroid.BaseInstance):
if not has_known_bases(value):
return True
if value.has_dynamic_getattr():
return True
if protocol_callback(value):
return True
if (
isinstance(value, astroid.bases.Proxy)
and isinstance(value._proxied, astroid.BaseInstance)
and has_known_bases(value._proxied)
):
value = value._proxied
return protocol_callback(value)
return False
def is_iterable(value: astroid.node_classes.NodeNG, check_async: bool = False) -> bool:
if check_async:
protocol_check = _supports_async_iteration_protocol
else:
protocol_check = _supports_iteration_protocol
return _supports_protocol(value, protocol_check)
def is_mapping(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol(value, _supports_mapping_protocol)
def supports_membership_test(value: astroid.node_classes.NodeNG) -> bool:
supported = _supports_protocol(value, _supports_membership_test_protocol)
return supported or is_iterable(value)
def supports_getitem(
value: astroid.node_classes.NodeNG, node: astroid.node_classes.NodeNG
) -> bool:
if isinstance(value, astroid.ClassDef):
if _supports_protocol_method(value, CLASS_GETITEM_METHOD):
return True
if is_class_subscriptable_pep585_with_postponed_evaluation_enabled(value, node):
return True
return _supports_protocol(value, _supports_getitem_protocol)
def supports_setitem(value: astroid.node_classes.NodeNG, *_: Any) -> bool:
return _supports_protocol(value, _supports_setitem_protocol)
def supports_delitem(value: astroid.node_classes.NodeNG, *_: Any) -> bool:
return _supports_protocol(value, _supports_delitem_protocol)
def _get_python_type_of_node(node):
pytype = getattr(node, "pytype", None)
if callable(pytype):
return pytype()
return None
@lru_cache(maxsize=1024)
def safe_infer(
node: astroid.node_classes.NodeNG, context=None
) -> Optional[astroid.node_classes.NodeNG]:
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred of different types).
"""
inferred_types = set()
try:
infer_gen = node.infer(context=context)
value = next(infer_gen)
except astroid.InferenceError:
return None
if value is not astroid.Uninferable:
inferred_types.add(_get_python_type_of_node(value))
try:
for inferred in infer_gen:
inferred_type = _get_python_type_of_node(inferred)
if inferred_type not in inferred_types:
return None # If there is ambiguity on the inferred node.
except astroid.InferenceError:
return None # There is some kind of ambiguity
except StopIteration:
return value
return value if len(inferred_types) <= 1 else None
def has_known_bases(klass: astroid.ClassDef, context=None) -> bool:
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
if (
not isinstance(result, astroid.ClassDef)
or result is klass
or not has_known_bases(result, context=context)
):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def is_none(node: astroid.node_classes.NodeNG) -> bool:
return (
node is None
or (isinstance(node, astroid.Const) and node.value is None)
or (isinstance(node, astroid.Name) and node.name == "None")
)
def node_type(node: astroid.node_classes.NodeNG) -> Optional[type]:
"""Return the inferred type for `node`
If there is more than one possible type, or if inferred type is Uninferable or None,
return None
"""
# check there is only one possible type for the assign node. Else we
# don't handle it for now
types = set()
try:
for var_type in node.infer():
if var_type == astroid.Uninferable or is_none(var_type):
continue
types.add(var_type)
if len(types) > 1:
return None
except astroid.InferenceError:
return None
return types.pop() if types else None
def is_registered_in_singledispatch_function(node: astroid.FunctionDef) -> bool:
"""Check if the given function node is a singledispatch function."""
singledispatch_qnames = (
"functools.singledispatch",
"singledispatch.singledispatch",
)
if not isinstance(node, astroid.FunctionDef):
return False
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
# func.register are function calls
if not isinstance(decorator, astroid.Call):
continue
func = decorator.func
if not isinstance(func, astroid.Attribute) or func.attrname != "register":
continue
try:
func_def = next(func.expr.infer())
except astroid.InferenceError:
continue
if isinstance(func_def, astroid.FunctionDef):
# pylint: disable=redundant-keyword-arg; some flow inference goes wrong here
return decorated_with(func_def, singledispatch_qnames)
return False
def get_node_last_lineno(node: astroid.node_classes.NodeNG) -> int:
"""
Get the last lineno of the given node. For a simple statement this will just be node.lineno,
but for a node that has child statements (e.g. a method) this will be the lineno of the last
child statement recursively.
"""
# 'finalbody' is always the last clause in a try statement, if present
if getattr(node, "finalbody", False):
return get_node_last_lineno(node.finalbody[-1])
# For if, while, and for statements 'orelse' is always the last clause.
# For try statements 'orelse' is the last in the absence of a 'finalbody'
if getattr(node, "orelse", False):
return get_node_last_lineno(node.orelse[-1])
# try statements have the 'handlers' last if there is no 'orelse' or 'finalbody'
if getattr(node, "handlers", False):
return get_node_last_lineno(node.handlers[-1])
# All compound statements have a 'body'
if getattr(node, "body", False):
return get_node_last_lineno(node.body[-1])
# Not a compound statement
return node.lineno
def is_postponed_evaluation_enabled(node: astroid.node_classes.NodeNG) -> bool:
"""Check if the postponed evaluation of annotations is enabled"""
module = node.root()
return "annotations" in module.future_imports
def is_class_subscriptable_pep585_with_postponed_evaluation_enabled(
value: astroid.ClassDef, node: astroid.node_classes.NodeNG
) -> bool:
"""Check if class is subscriptable with PEP 585 and
postponed evaluation enabled.
"""
return (
is_postponed_evaluation_enabled(node)
and value.qname() in SUBSCRIPTABLE_CLASSES_PEP585
and is_node_in_type_annotation_context(node)
)
def is_node_in_type_annotation_context(node: astroid.node_classes.NodeNG) -> bool:
"""Check if node is in type annotation context.
Check for 'AnnAssign', function 'Arguments',
or part of function return type anntation.
"""
# pylint: disable=too-many-boolean-expressions
current_node, parent_node = node, node.parent
while True:
if (
isinstance(parent_node, astroid.AnnAssign)
and parent_node.annotation == current_node
or isinstance(parent_node, astroid.Arguments)
and current_node
in (
*parent_node.annotations,
*parent_node.posonlyargs_annotations,
*parent_node.kwonlyargs_annotations,
parent_node.varargannotation,
parent_node.kwargannotation,
)
or isinstance(parent_node, astroid.FunctionDef)
and parent_node.returns == current_node
):
return True
current_node, parent_node = parent_node, parent_node.parent
if isinstance(parent_node, astroid.Module):
return False
def is_subclass_of(child: astroid.ClassDef, parent: astroid.ClassDef) -> bool:
"""
Check if first node is a subclass of second node.
:param child: Node to check for subclass.
:param parent: Node to check for superclass.
:returns: True if child is derived from parent. False otherwise.
"""
if not all(isinstance(node, astroid.ClassDef) for node in (child, parent)):
return False
for ancestor in child.ancestors():
try:
if astroid.helpers.is_subtype(ancestor, parent):
return True
except astroid.exceptions._NonDeducibleTypeHierarchy:
continue
return False
@lru_cache(maxsize=1024)
def is_overload_stub(node: astroid.node_classes.NodeNG) -> bool:
"""Check if a node if is a function stub decorated with typing.overload.
:param node: Node to check.
:returns: True if node is an overload function stub. False otherwise.
"""
decorators = getattr(node, "decorators", None)
return bool(decorators and decorated_with(node, ["typing.overload", "overload"]))
def is_protocol_class(cls: astroid.node_classes.NodeNG) -> bool:
"""Check if the given node represents a protocol class
:param cls: The node to check
:returns: True if the node is a typing protocol class, false otherwise.
"""
if not isinstance(cls, astroid.ClassDef):
return False
# Use .ancestors() since not all protocol classes can have
# their mro deduced.
return any(parent.qname() in TYPING_PROTOCOLS for parent in cls.ancestors())
def is_call_of_name(node: astroid.node_classes.NodeNG, name: str) -> bool:
"""Checks if node is a function call with the given name"""
return (
isinstance(node, astroid.Call)
and isinstance(node.func, astroid.Name)
and node.func.name == name
)
def is_test_condition(
node: astroid.node_classes.NodeNG,
parent: Optional[astroid.node_classes.NodeNG] = None,
) -> bool:
"""Returns true if the given node is being tested for truthiness"""
parent = parent or node.parent
if isinstance(parent, (astroid.While, astroid.If, astroid.IfExp, astroid.Assert)):
return node is parent.test or parent.test.parent_of(node)
if isinstance(parent, astroid.Comprehension):
return node in parent.ifs
return is_call_of_name(parent, "bool") and parent.parent_of(node)
def is_classdef_type(node: astroid.ClassDef) -> bool:
"""Test if ClassDef node is Type."""
if node.name == "type":
return True
for base in node.bases:
if isinstance(base, astroid.Name) and base.name == "type":
return True
return False
def is_attribute_typed_annotation(
node: Union[astroid.ClassDef, astroid.Instance], attr_name: str
) -> bool:
"""Test if attribute is typed annotation in current node
or any base nodes.
"""
attribute = node.locals.get(attr_name, [None])[0]
if (
attribute
and isinstance(attribute, astroid.AssignName)
and isinstance(attribute.parent, astroid.AnnAssign)
):
return True
for base in node.bases:
inferred = safe_infer(base)
if (
inferred
and isinstance(inferred, astroid.ClassDef)
and is_attribute_typed_annotation(inferred, attr_name)
):
return True
return False
def is_assign_name_annotated_with(node: astroid.AssignName, typing_name: str) -> bool:
"""Test if AssignName node has `typing_name` annotation.
Especially useful to check for `typing._SpecialForm` instances
like: `Union`, `Optional`, `Literal`, `ClassVar`, `Final`.
"""
if not isinstance(node.parent, astroid.AnnAssign):
return False
annotation = node.parent.annotation
if isinstance(annotation, astroid.Subscript):
annotation = annotation.value
if (
isinstance(annotation, astroid.Name)
and annotation.name == typing_name
or isinstance(annotation, astroid.Attribute)
and annotation.attrname == typing_name
):
return True
return False
def get_iterating_dictionary_name(
node: Union[astroid.For, astroid.Comprehension]
) -> Optional[str]:
"""Get the name of the dictionary which keys are being iterated over on
a `astroid.For` or `astroid.Comprehension` node.
If the iterating object is not either the keys method of a dictionary
or a dictionary itself, this returns None.
"""
# Is it a proper keys call?
if (
isinstance(node.iter, astroid.Call)
and isinstance(node.iter.func, astroid.Attribute)
and node.iter.func.attrname == "keys"
):
inferred = safe_infer(node.iter.func)
if not isinstance(inferred, astroid.BoundMethod):
return None
return node.iter.as_string().rpartition(".keys")[0]
# Is it a dictionary?
if isinstance(node.iter, (astroid.Name, astroid.Attribute)):
inferred = safe_infer(node.iter)
if not isinstance(inferred, astroid.Dict):
return None
return node.iter.as_string()
return None
def get_subscript_const_value(node: astroid.Subscript) -> astroid.Const:
"""
Returns the value 'subscript.slice' of a Subscript node.
:param node: Subscript Node to extract value from
:returns: Const Node containing subscript value
:raises InferredTypeError: if the subscript node cannot be inferred as a Const
"""
inferred = safe_infer(node.slice)
if not isinstance(inferred, astroid.Const):
raise InferredTypeError(
"Subscript.slice cannot be inferred as an astroid.Const"
)
return inferred
def get_import_name(
importnode: Union[astroid.Import, astroid.ImportFrom], modname: str
) -> str:
"""Get a prepared module name from the given import node
In the case of relative imports, this will return the
absolute qualified module name, which might be useful
for debugging. Otherwise, the initial module name
is returned unchanged.
:param importnode: node representing import statement.
:param modname: module name from import statement.
:returns: absolute qualified module name of the module
used in import.
"""
if isinstance(importnode, astroid.ImportFrom):
if importnode.level:
root = importnode.root()
if isinstance(root, astroid.Module):
modname = root.relative_to_absolute_name(
modname, level=importnode.level
)
return modname
def is_node_in_guarded_import_block(node: astroid.NodeNG) -> bool:
"""Return True if node is part for guarded if block.
I.e. `sys.version_info` or `typing.TYPE_CHECKING`
"""
return isinstance(node.parent, astroid.If) and (
node.parent.is_sys_guard() or node.parent.is_typing_guard()
)
| 1 | 15,159 | I like this style of doing things better than having super big conditional without explanation. Even though I know this won't be re-used it's nice to have a function if only for clarity. | PyCQA-pylint | py |
@@ -22,7 +22,10 @@ module RSpec
end
def example_group_finished(_notification)
- @group_level -= 1
+ if @group_level > 0
+ @group_level -= 1
+ end
+ @group_level
end
def example_passed(passed) | 1 | RSpec::Support.require_rspec_core "formatters/base_text_formatter"
RSpec::Support.require_rspec_core "formatters/console_codes"
module RSpec
module Core
module Formatters
# @private
class DocumentationFormatter < BaseTextFormatter
Formatters.register self, :example_group_started, :example_group_finished,
:example_passed, :example_pending, :example_failed
def initialize(output)
super
@group_level = 0
end
def example_group_started(notification)
output.puts if @group_level == 0
output.puts "#{current_indentation}#{notification.group.description.strip}"
@group_level += 1
end
def example_group_finished(_notification)
@group_level -= 1
end
def example_passed(passed)
output.puts passed_output(passed.example)
end
def example_pending(pending)
output.puts pending_output(pending.example,
pending.example.execution_result.pending_message)
end
def example_failed(failure)
output.puts failure_output(failure.example)
end
private
def passed_output(example)
ConsoleCodes.wrap("#{current_indentation}#{example.description.strip}", :success)
end
def pending_output(example, message)
ConsoleCodes.wrap("#{current_indentation}#{example.description.strip} " \
"(PENDING: #{message})",
:pending)
end
def failure_output(example)
ConsoleCodes.wrap("#{current_indentation}#{example.description.strip} " \
"(FAILED - #{next_failure_index})",
:failure)
end
def next_failure_index
@next_failure_index ||= 0
@next_failure_index += 1
end
def current_indentation
' ' * @group_level
end
end
end
end
end
| 1 | 16,635 | Our preference here is to either have inline statements e.g. `@group_level -= 1 if @group_level > 0` but stylistically here I think `return @group_level if @group_level == 0` might be nicer. As it stands however our rubocop is picking this up as a fail :( | rspec-rspec-core | rb |
@@ -20,6 +20,16 @@
package org.apache.iceberg.spark.actions;
import org.apache.iceberg.actions.TestRemoveOrphanFilesAction;
+import org.junit.Ignore;
public class TestRemoveOrphanFilesAction3 extends TestRemoveOrphanFilesAction {
+
+ /**
+ * Todo: Its failing for Spark3, so has to fix it in the parent class.
+ * Ignoring for now, as still Spark3 is not supported.
+ *
+ */
+ @Ignore
+ public void testOlderThanTimestampWithPartitionWithWhitSpace() {
+ }
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.actions;
import org.apache.iceberg.actions.TestRemoveOrphanFilesAction;
public class TestRemoveOrphanFilesAction3 extends TestRemoveOrphanFilesAction {
}
| 1 | 24,647 | Spark3 Is supported in OSS Iceberg, Do you mean this particular function is not supported in Spark3? If so I think we should have a more clear reason why | apache-iceberg | java |
@@ -399,3 +399,11 @@ def get_partition_set_execution_param_data(recon_repo, partition_set_name, parti
return ExternalPartitionExecutionErrorData(
serializable_error_info_from_exc_info(sys.exc_info())
)
+
+
+def get_notebook_data(notebook_path):
+ check.str_param(notebook_path, "notebook_path")
+
+ with open(os.path.abspath(notebook_path), "rb") as f:
+ content = f.read()
+ return content | 1 | """Workhorse functions for individual API requests."""
import os
import sys
import pendulum
from dagster import check
from dagster.core.definitions import ScheduleEvaluationContext
from dagster.core.definitions.reconstructable import (
ReconstructablePipeline,
ReconstructableRepository,
)
from dagster.core.definitions.sensor import SensorEvaluationContext
from dagster.core.errors import (
DagsterExecutionInterruptedError,
DagsterInvalidSubsetError,
DagsterRunNotFoundError,
PartitionExecutionError,
ScheduleExecutionError,
SensorExecutionError,
user_code_error_boundary,
)
from dagster.core.events import EngineEventData
from dagster.core.execution.api import create_execution_plan, execute_run_iterator
from dagster.core.host_representation import external_pipeline_data_from_def
from dagster.core.host_representation.external_data import (
ExternalPartitionConfigData,
ExternalPartitionExecutionErrorData,
ExternalPartitionExecutionParamData,
ExternalPartitionNamesData,
ExternalPartitionSetExecutionParamData,
ExternalPartitionTagsData,
ExternalPipelineSubsetResult,
ExternalScheduleExecutionErrorData,
ExternalSensorExecutionErrorData,
)
from dagster.core.instance import DagsterInstance
from dagster.core.snap.execution_plan_snapshot import (
ExecutionPlanSnapshotErrorData,
snapshot_from_execution_plan,
)
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.grpc.types import ExecutionPlanSnapshotArgs
from dagster.serdes import deserialize_json_to_dagster_namedtuple
from dagster.serdes.ipc import IPCErrorMessage
from dagster.utils import start_termination_thread
from dagster.utils.error import serializable_error_info_from_exc_info
from dagster.utils.interrupts import capture_interrupts
from .types import ExecuteExternalPipelineArgs
class RunInSubprocessComplete:
"""Sentinel passed over multiprocessing Queue when subprocess is complete"""
class StartRunInSubprocessSuccessful:
"""Sentinel passed over multiprocessing Queue when launch is successful in subprocess."""
def _report_run_failed_if_not_finished(instance, pipeline_run_id):
check.inst_param(instance, "instance", DagsterInstance)
pipeline_run = instance.get_run_by_id(pipeline_run_id)
if pipeline_run and (not pipeline_run.is_finished):
yield instance.report_run_failed(pipeline_run)
def core_execute_run(recon_pipeline, pipeline_run, instance):
check.inst_param(recon_pipeline, "recon_pipeline", ReconstructablePipeline)
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
check.inst_param(instance, "instance", DagsterInstance)
# try to load the pipeline definition early
try:
recon_pipeline.get_definition()
except Exception: # pylint: disable=broad-except
yield instance.report_engine_event(
"Could not load pipeline definition.",
pipeline_run,
EngineEventData.engine_error(serializable_error_info_from_exc_info(sys.exc_info())),
)
yield from _report_run_failed_if_not_finished(instance, pipeline_run.run_id)
return
try:
yield from execute_run_iterator(recon_pipeline, pipeline_run, instance)
except (KeyboardInterrupt, DagsterExecutionInterruptedError):
yield from _report_run_failed_if_not_finished(instance, pipeline_run.run_id)
yield instance.report_engine_event(
message="Pipeline execution terminated by interrupt",
pipeline_run=pipeline_run,
)
except Exception: # pylint: disable=broad-except
yield instance.report_engine_event(
"An exception was thrown during execution that is likely a framework error, "
"rather than an error in user code.",
pipeline_run,
EngineEventData.engine_error(serializable_error_info_from_exc_info(sys.exc_info())),
)
yield from _report_run_failed_if_not_finished(instance, pipeline_run.run_id)
def _run_in_subprocess(
serialized_execute_run_args,
recon_pipeline,
termination_event,
subprocess_status_handler,
run_event_handler,
):
start_termination_thread(termination_event)
try:
execute_run_args = deserialize_json_to_dagster_namedtuple(serialized_execute_run_args)
check.inst_param(execute_run_args, "execute_run_args", ExecuteExternalPipelineArgs)
instance = DagsterInstance.from_ref(execute_run_args.instance_ref)
pipeline_run = instance.get_run_by_id(execute_run_args.pipeline_run_id)
if not pipeline_run:
raise DagsterRunNotFoundError(
"gRPC server could not load run {run_id} in order to execute it. Make sure that the gRPC server has access to your run storage.".format(
run_id=execute_run_args.pipeline_run_id
),
invalid_run_id=execute_run_args.pipeline_run_id,
)
pid = os.getpid()
except: # pylint: disable=bare-except
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
event = IPCErrorMessage(
serializable_error_info=serializable_error_info,
message="Error during RPC setup for executing run: {message}".format(
message=serializable_error_info.message
),
)
subprocess_status_handler(event)
subprocess_status_handler(RunInSubprocessComplete())
if instance:
instance.dispose()
return
subprocess_status_handler(StartRunInSubprocessSuccessful())
run_event_handler(
instance.report_engine_event(
"Started process for pipeline (pid: {pid}).".format(pid=pid),
pipeline_run,
EngineEventData.in_process(pid, marker_end="cli_api_subprocess_init"),
)
)
# This is so nasty but seemingly unavoidable
# https://amir.rachum.com/blog/2017/03/03/generator-cleanup/
closed = False
try:
for event in core_execute_run(recon_pipeline, pipeline_run, instance):
run_event_handler(event)
except GeneratorExit:
closed = True
raise
finally:
if not closed:
run_event_handler(
instance.report_engine_event(
"Process for pipeline exited (pid: {pid}).".format(pid=pid),
pipeline_run,
)
)
subprocess_status_handler(RunInSubprocessComplete())
instance.dispose()
def start_run_in_subprocess(
serialized_execute_run_args, recon_pipeline, event_queue, termination_event
):
with capture_interrupts():
_run_in_subprocess(
serialized_execute_run_args,
recon_pipeline,
termination_event,
subprocess_status_handler=event_queue.put,
run_event_handler=lambda x: None,
)
def get_external_pipeline_subset_result(recon_pipeline, solid_selection):
check.inst_param(recon_pipeline, "recon_pipeline", ReconstructablePipeline)
if solid_selection:
try:
sub_pipeline = recon_pipeline.subset_for_execution(solid_selection)
definition = sub_pipeline.get_definition()
except DagsterInvalidSubsetError:
return ExternalPipelineSubsetResult(
success=False, error=serializable_error_info_from_exc_info(sys.exc_info())
)
else:
definition = recon_pipeline.get_definition()
external_pipeline_data = external_pipeline_data_from_def(definition)
return ExternalPipelineSubsetResult(success=True, external_pipeline_data=external_pipeline_data)
def get_external_schedule_execution(
recon_repo,
instance_ref,
schedule_name,
scheduled_execution_timestamp,
scheduled_execution_timezone,
):
check.inst_param(
recon_repo,
"recon_repo",
ReconstructableRepository,
)
definition = recon_repo.get_definition()
schedule_def = definition.get_schedule_def(schedule_name)
scheduled_execution_time = (
pendulum.from_timestamp(
scheduled_execution_timestamp,
tz=scheduled_execution_timezone,
)
if scheduled_execution_timestamp
else None
)
with ScheduleEvaluationContext(instance_ref, scheduled_execution_time) as schedule_context:
try:
with user_code_error_boundary(
ScheduleExecutionError,
lambda: "Error occurred during the execution function for schedule "
"{schedule_name}".format(schedule_name=schedule_def.name),
):
return schedule_def.evaluate_tick(schedule_context)
except ScheduleExecutionError:
return ExternalScheduleExecutionErrorData(
serializable_error_info_from_exc_info(sys.exc_info())
)
def get_external_sensor_execution(
recon_repo, instance_ref, sensor_name, last_completion_timestamp, last_run_key, cursor
):
check.inst_param(
recon_repo,
"recon_repo",
ReconstructableRepository,
)
definition = recon_repo.get_definition()
sensor_def = definition.get_sensor_def(sensor_name)
with SensorEvaluationContext(
instance_ref,
last_completion_time=last_completion_timestamp,
last_run_key=last_run_key,
cursor=cursor,
repository_name=recon_repo.get_definition().name,
) as sensor_context:
try:
with user_code_error_boundary(
SensorExecutionError,
lambda: "Error occurred during the execution of evaluation_fn for sensor "
"{sensor_name}".format(sensor_name=sensor_def.name),
):
return sensor_def.evaluate_tick(sensor_context)
except SensorExecutionError:
return ExternalSensorExecutionErrorData(
serializable_error_info_from_exc_info(sys.exc_info())
)
def get_partition_config(recon_repo, partition_set_name, partition_name):
definition = recon_repo.get_definition()
partition_set_def = definition.get_partition_set_def(partition_set_name)
partition = partition_set_def.get_partition(partition_name)
try:
with user_code_error_boundary(
PartitionExecutionError,
lambda: "Error occurred during the evaluation of the `run_config_for_partition` "
"function for partition set {partition_set_name}".format(
partition_set_name=partition_set_def.name
),
):
run_config = partition_set_def.run_config_for_partition(partition)
return ExternalPartitionConfigData(name=partition.name, run_config=run_config)
except PartitionExecutionError:
return ExternalPartitionExecutionErrorData(
serializable_error_info_from_exc_info(sys.exc_info())
)
def get_partition_names(recon_repo, partition_set_name):
definition = recon_repo.get_definition()
partition_set_def = definition.get_partition_set_def(partition_set_name)
try:
with user_code_error_boundary(
PartitionExecutionError,
lambda: "Error occurred during the execution of the partition generation function for "
"partition set {partition_set_name}".format(partition_set_name=partition_set_def.name),
):
return ExternalPartitionNamesData(
partition_names=partition_set_def.get_partition_names()
)
except PartitionExecutionError:
return ExternalPartitionExecutionErrorData(
serializable_error_info_from_exc_info(sys.exc_info())
)
def get_partition_tags(recon_repo, partition_set_name, partition_name):
definition = recon_repo.get_definition()
partition_set_def = definition.get_partition_set_def(partition_set_name)
partition = partition_set_def.get_partition(partition_name)
try:
with user_code_error_boundary(
PartitionExecutionError,
lambda: "Error occurred during the evaluation of the `tags_for_partition` function for "
"partition set {partition_set_name}".format(partition_set_name=partition_set_def.name),
):
tags = partition_set_def.tags_for_partition(partition)
return ExternalPartitionTagsData(name=partition.name, tags=tags)
except PartitionExecutionError:
return ExternalPartitionExecutionErrorData(
serializable_error_info_from_exc_info(sys.exc_info())
)
def get_external_execution_plan_snapshot(recon_pipeline, args):
check.inst_param(recon_pipeline, "recon_pipeline", ReconstructablePipeline)
check.inst_param(args, "args", ExecutionPlanSnapshotArgs)
try:
pipeline = (
recon_pipeline.subset_for_execution(args.solid_selection)
if args.solid_selection
else recon_pipeline
)
return snapshot_from_execution_plan(
create_execution_plan(
pipeline=pipeline,
run_config=args.run_config,
mode=args.mode,
step_keys_to_execute=args.step_keys_to_execute,
known_state=args.known_state,
),
args.pipeline_snapshot_id,
)
except: # pylint: disable=bare-except
return ExecutionPlanSnapshotErrorData(
error=serializable_error_info_from_exc_info(sys.exc_info())
)
def get_partition_set_execution_param_data(recon_repo, partition_set_name, partition_names):
repo_definition = recon_repo.get_definition()
partition_set_def = repo_definition.get_partition_set_def(partition_set_name)
try:
with user_code_error_boundary(
PartitionExecutionError,
lambda: "Error occurred during the partition generation for partition set "
"{partition_set_name}".format(partition_set_name=partition_set_def.name),
):
all_partitions = partition_set_def.get_partitions()
partitions = [
partition for partition in all_partitions if partition.name in partition_names
]
partition_data = []
for partition in partitions:
def _error_message_fn(partition_set_name, partition_name):
return lambda: (
"Error occurred during the partition config and tag generation for "
"partition set {partition_set_name}::{partition_name}".format(
partition_set_name=partition_set_name, partition_name=partition_name
)
)
with user_code_error_boundary(
PartitionExecutionError, _error_message_fn(partition_set_def.name, partition.name)
):
run_config = partition_set_def.run_config_for_partition(partition)
tags = partition_set_def.tags_for_partition(partition)
partition_data.append(
ExternalPartitionExecutionParamData(
name=partition.name,
tags=tags,
run_config=run_config,
)
)
return ExternalPartitionSetExecutionParamData(partition_data=partition_data)
except PartitionExecutionError:
return ExternalPartitionExecutionErrorData(
serializable_error_info_from_exc_info(sys.exc_info())
)
| 1 | 14,705 | something about the arbitrary file access part here makes me nervous. Can we keep the `if not path.endswith(".ipynb")` check from before here as well? Just imagining a weird attack scenario where somebody somehow uses this to get access to confidental files or something | dagster-io-dagster | py |
@@ -51,6 +51,14 @@ module Blacklight
deprecation_deprecate :solr_search_params_logic=
end
+ def search_builder_class
+ Blacklight::Solr::SearchBuilder
+ end
+
+ def search_builder processor_chain = search_params_logic
+ search_builder_class.new(processor_chain, self)
+ end
+
# @returns a params hash for searching solr.
# The CatalogController #index action uses this. | 1 | module Blacklight
##
# This module contains methods that are specified by SearchHelper.search_params_logic
# They transform user parameters into parameters that are sent as a request to Solr when
# RequestBuilders#solr_search_params is called.
#
module RequestBuilders
extend ActiveSupport::Concern
extend Deprecation
self.deprecation_horizon = 'blacklight 6.0'
included do
# We want to install a class-level place to keep
# search_params_logic method names. Compare to before_filter,
# similar design. Since we're a module, we have to add it in here.
# There are too many different semantic choices in ruby 'class variables',
# we choose this one for now, supplied by Rails.
class_attribute :search_params_logic
alias_method :solr_search_params_logic, :search_params_logic
deprecation_deprecate :solr_search_params_logic
alias_method :solr_search_params_logic=, :search_params_logic=
deprecation_deprecate :solr_search_params_logic=
# Set defaults. Each symbol identifies a _method_ that must be in
# this class, taking two parameters (solr_parameters, user_parameters)
# Can be changed in local apps or by plugins, eg:
# CatalogController.include ModuleDefiningNewMethod
# CatalogController.search_params_logic += [:new_method]
# CatalogController.search_params_logic.delete(:we_dont_want)
self.search_params_logic = [:default_solr_parameters, :add_query_to_solr, :add_facet_fq_to_solr, :add_facetting_to_solr, :add_solr_fields_to_query, :add_paging_to_solr, :add_sorting_to_solr, :add_group_config_to_solr ]
if self.respond_to?(:helper_method)
helper_method(:facet_limit_for)
end
end
module ClassMethods
extend Deprecation
self.deprecation_horizon = 'blacklight 6.0'
def solr_search_params_logic
search_params_logic
end
deprecation_deprecate :solr_search_params_logic
def solr_search_params_logic= logic
self.search_params_logic= logic
end
deprecation_deprecate :solr_search_params_logic=
end
# @returns a params hash for searching solr.
# The CatalogController #index action uses this.
# Solr parameters can come from a number of places. From lowest
# precedence to highest:
# 1. General defaults in blacklight config (are trumped by)
# 2. defaults for the particular search field identified by params[:search_field] (are trumped by)
# 3. certain parameters directly on input HTTP query params
# * not just any parameter is grabbed willy nilly, only certain ones are allowed by HTTP input)
# * for legacy reasons, qt in http query does not over-ride qt in search field definition default.
# 4. extra parameters passed in as argument.
#
# spellcheck.q will be supplied with the [:q] value unless specifically
# specified otherwise.
#
# Incoming parameter :f is mapped to :fq solr parameter.
def solr_search_params(user_params = params || {}, processor_chain = search_params_logic)
Deprecation.warn(RequestBuilders, "solr_search_params is deprecated and will be removed in blacklight-6.0. Use SearchBuilder#processed_parameters instead.")
Blacklight::SearchBuilder.new(user_params, processor_chain, self).processed_parameters
end
##
# @param [Hash] user_params a hash of user submitted parameters
# @param [Array] processor_chain a list of processor methods to run
# @param [Hash] extra_params an optional hash of parameters that should be
# added to the query post processing
def build_solr_query(user_params, processor_chain, extra_params=nil)
Deprecation.warn(RequestBuilders, "build_solr_query is deprecated and will be removed in blacklight-6.0. Use SearchBuilder#query instead")
Blacklight::SearchBuilder.new(user_params, processor_chain, self).query(extra_params)
end
##
# Retrieve the results for a list of document ids
def solr_document_ids_params(ids = [])
solr_documents_by_field_values_params blacklight_config.document_model.unique_key, ids
end
##
# Retrieve the results for a list of document ids
# @deprecated
def solr_documents_by_field_values_params(field, values)
q = if Array(values).empty?
"{!lucene}NOT *:*"
else
"{!lucene}#{field}:(#{ Array(values).map { |x| solr_param_quote(x) }.join(" OR ")})"
end
{ q: q, spellcheck: 'false', fl: "*" }
end
##
# Retrieve a facet's paginated values.
def solr_facet_params(facet_field, user_params=params || {}, extra_controller_params={})
input = user_params.deep_merge(extra_controller_params)
facet_config = blacklight_config.facet_fields[facet_field]
solr_params = {}
# Now override with our specific things for fetching facet values
solr_params[:"facet.field"] = with_ex_local_param((facet_config.ex if facet_config.respond_to?(:ex)), facet_field)
limit = if respond_to?(:facet_list_limit)
facet_list_limit.to_s.to_i
elsif solr_params["facet.limit"]
solr_params["facet.limit"].to_i
else
20
end
# Need to set as f.facet_field.facet.* to make sure we
# override any field-specific default in the solr request handler.
solr_params[:"f.#{facet_field}.facet.limit"] = limit + 1
solr_params[:"f.#{facet_field}.facet.offset"] = ( input.fetch(Blacklight::Solr::FacetPaginator.request_keys[:page] , 1).to_i - 1 ) * ( limit )
solr_params[:"f.#{facet_field}.facet.sort"] = input[ Blacklight::Solr::FacetPaginator.request_keys[:sort] ] if input[ Blacklight::Solr::FacetPaginator.request_keys[:sort] ]
solr_params[:rows] = 0
solr_params
end
##
# Opensearch autocomplete parameters for plucking a field's value from the results
def solr_opensearch_params(field=nil)
if field.nil?
Deprecation.warn(Blacklight::RequestBuilders, "Calling Blacklight::RequestBuilders#solr_opensearch_params without a field name is deprecated and will be required in Blacklight 6.0.")
end
solr_params = {}
solr_params[:rows] ||= 10
solr_params[:fl] = field || blacklight_config.view_config('opensearch').title_field
solr_params
end
##
# Pagination parameters for selecting the previous and next documents
# out of a result set.
def previous_and_next_document_params(index, window = 1)
solr_params = {}
if index > 0
solr_params[:start] = index - window # get one before
solr_params[:rows] = 2*window + 1 # and one after
else
solr_params[:start] = 0 # there is no previous doc
solr_params[:rows] = 2*window # but there should be one after
end
solr_params[:fl] = '*'
solr_params[:facet] = false
solr_params
end
####
# Start with general defaults from BL config. Need to use custom
# merge to dup values, to avoid later mutating the original by mistake.
def default_solr_parameters(solr_parameters, user_params)
blacklight_config.default_solr_params.each do |key, value|
solr_parameters[key] = value.dup rescue value
end
end
##
# Take the user-entered query, and put it in the solr params,
# including config's "search field" params for current search field.
# also include setting spellcheck.q.
def add_query_to_solr(solr_parameters, user_parameters)
###
# Merge in search field configured values, if present, over-writing general
# defaults
###
# legacy behavior of user param :qt is passed through, but over-ridden
# by actual search field config if present. We might want to remove
# this legacy behavior at some point. It does not seem to be currently
# rspec'd.
solr_parameters[:qt] = user_parameters[:qt] if user_parameters[:qt]
search_field_def = blacklight_config.search_fields[user_parameters[:search_field]]
if (search_field_def)
solr_parameters[:qt] = search_field_def.qt
solr_parameters.merge!( search_field_def.solr_parameters) if search_field_def.solr_parameters
end
##
# Create Solr 'q' including the user-entered q, prefixed by any
# solr LocalParams in config, using solr LocalParams syntax.
# http://wiki.apache.org/solr/LocalParams
##
if (search_field_def && hash = search_field_def.solr_local_parameters)
local_params = hash.collect do |key, val|
key.to_s + "=" + solr_param_quote(val, :quote => "'")
end.join(" ")
solr_parameters[:q] = "{!#{local_params}}#{user_parameters[:q]}"
else
solr_parameters[:q] = user_parameters[:q] if user_parameters[:q]
end
##
# Set Solr spellcheck.q to be original user-entered query, without
# our local params, otherwise it'll try and spellcheck the local
# params! Unless spellcheck.q has already been set by someone,
# respect that.
#
# TODO: Change calling code to expect this as a symbol instead of
# a string, for consistency? :'spellcheck.q' is a symbol. Right now
# rspec tests for a string, and can't tell if other code may
# insist on a string.
solr_parameters["spellcheck.q"] = user_parameters[:q] unless solr_parameters["spellcheck.q"]
end
##
# Add any existing facet limits, stored in app-level HTTP query
# as :f, to solr as appropriate :fq query.
def add_facet_fq_to_solr(solr_parameters, user_params)
# convert a String value into an Array
if solr_parameters[:fq].is_a? String
solr_parameters[:fq] = [solr_parameters[:fq]]
end
# :fq, map from :f.
if ( user_params[:f])
f_request_params = user_params[:f]
f_request_params.each_pair do |facet_field, value_list|
Array(value_list).each do |value|
next if value.blank? # skip empty strings
solr_parameters.append_filter_query facet_value_to_fq_string(facet_field, value)
end
end
end
end
##
# Add appropriate Solr facetting directives in, including
# taking account of our facet paging/'more'. This is not
# about solr 'fq', this is about solr facet.* params.
def add_facetting_to_solr(solr_parameters, user_params)
# While not used by BL core behavior, legacy behavior seemed to be
# to accept incoming params as "facet.field" or "facets", and add them
# on to any existing facet.field sent to Solr. Legacy behavior seemed
# to be accepting these incoming params as arrays (in Rails URL with []
# on end), or single values. At least one of these is used by
# Stanford for "faux hieararchial facets".
if user_params.has_key?("facet.field") || user_params.has_key?("facets")
solr_parameters[:"facet.field"].concat( [user_params["facet.field"], user_params["facets"]].flatten.compact ).uniq!
end
blacklight_config.facet_fields.select { |field_name,facet|
facet.include_in_request || (facet.include_in_request.nil? && blacklight_config.add_facet_fields_to_solr_request)
}.each do |field_name, facet|
solr_parameters[:facet] ||= true
case
when facet.pivot
solr_parameters.append_facet_pivot with_ex_local_param(facet.ex, facet.pivot.join(","))
when facet.query
solr_parameters.append_facet_query facet.query.map { |k, x| with_ex_local_param(facet.ex, x[:fq]) }
else
solr_parameters.append_facet_fields with_ex_local_param(facet.ex, facet.field)
end
if facet.sort
solr_parameters[:"f.#{facet.field}.facet.sort"] = facet.sort
end
if facet.solr_params
facet.solr_params.each do |k, v|
solr_parameters[:"f.#{facet.field}.#{k}"] = v
end
end
# Support facet paging and 'more'
# links, by sending a facet.limit one more than what we
# want to page at, according to configured facet limits.
solr_parameters[:"f.#{facet.field}.facet.limit"] = (facet_limit_for(field_name) + 1) if facet_limit_for(field_name)
end
end
def add_solr_fields_to_query solr_parameters, user_parameters
blacklight_config.show_fields.select(&method(:should_add_to_solr)).each do |field_name, field|
if field.solr_params
field.solr_params.each do |k, v|
solr_parameters[:"f.#{field.field}.#{k}"] = v
end
end
end
blacklight_config.index_fields.select(&method(:should_add_to_solr)).each do |field_name, field|
if field.highlight
solr_parameters[:hl] = true
solr_parameters.append_highlight_field field.field
end
if field.solr_params
field.solr_params.each do |k, v|
solr_parameters[:"f.#{field.field}.#{k}"] = v
end
end
end
end
###
# copy paging params from BL app over to solr, changing
# app level per_page and page to Solr rows and start.
def add_paging_to_solr(solr_params, user_params)
# user-provided parameters should override any default row
solr_params[:rows] = user_params[:rows].to_i unless user_params[:rows].blank?
solr_params[:rows] = user_params[:per_page].to_i unless user_params[:per_page].blank?
# configuration defaults should only set a default value, not override a value set elsewhere (e.g. search field parameters)
solr_params[:rows] ||= blacklight_config.default_per_page unless blacklight_config.default_per_page.blank?
solr_params[:rows] ||= blacklight_config.per_page.first unless blacklight_config.per_page.blank?
# set a reasonable default
Rails.logger.info "Solr :rows parameter not set (by the user, configuration, or default solr parameters); using 10 rows by default"
solr_params[:rows] ||= 10
# ensure we don't excede the max page size
solr_params[:rows] = blacklight_config.max_per_page if solr_params[:rows].to_i > blacklight_config.max_per_page
unless user_params[:page].blank?
solr_params[:start] = solr_params[:rows].to_i * (user_params[:page].to_i - 1)
solr_params[:start] = 0 if solr_params[:start].to_i < 0
end
end
###
# copy sorting params from BL app over to solr
def add_sorting_to_solr(solr_parameters, user_params)
if user_params[:sort].blank? and sort_field = blacklight_config.default_sort_field
# no sort param provided, use default
solr_parameters[:sort] = sort_field.sort unless sort_field.sort.blank?
elsif sort_field = blacklight_config.sort_fields[user_params[:sort]]
# check for sort field key
solr_parameters[:sort] = sort_field.sort unless sort_field.sort.blank?
else
# just pass the key through
solr_parameters[:sort] = user_params[:sort]
end
end
# Remove the group parameter if we've faceted on the group field (e.g. for the full results for a group)
def add_group_config_to_solr solr_parameters, user_parameters
if user_parameters[:f] and user_parameters[:f][grouped_key_for_results]
solr_parameters[:group] = false
end
end
def with_ex_local_param(ex, value)
if ex
"{!ex=#{ex}}#{value}"
else
value
end
end
DEFAULT_FACET_LIMIT = 10
# Look up facet limit for given facet_field. Will look at config, and
# if config is 'true' will look up from Solr @response if available. If
# no limit is avaialble, returns nil. Used from #add_facetting_to_solr
# to supply f.fieldname.facet.limit values in solr request (no @response
# available), and used in display (with @response available) to create
# a facet paginator with the right limit.
def facet_limit_for(facet_field)
facet = blacklight_config.facet_fields[facet_field]
return if facet.blank?
if facet.limit and @response and @response.facet_by_field_name(facet_field)
limit = @response.facet_by_field_name(facet_field).limit
if limit.nil? # we didn't get or a set a limit, so infer one.
facet.limit if facet.limit != true
elsif limit == -1 # limit -1 is solr-speak for unlimited
nil
else
limit.to_i - 1 # we added 1 to find out if we needed to paginate
end
elsif facet.limit
facet.limit == true ? DEFAULT_FACET_LIMIT : facet.limit
end
end
##
# A helper method used for generating solr LocalParams, put quotes
# around the term unless it's a bare-word. Escape internal quotes
# if needed.
def solr_param_quote(val, options = {})
options[:quote] ||= '"'
unless val =~ /^[a-zA-Z0-9$_\-\^]+$/
val = options[:quote] +
# Yes, we need crazy escaping here, to deal with regexp esc too!
val.gsub("'", "\\\\\'").gsub('"', "\\\\\"") +
options[:quote]
end
return val
end
private
def should_add_to_solr field_name, field
field.include_in_request || (field.include_in_request.nil? && blacklight_config.add_field_configuration_to_solr_request)
end
##
# Convert a facet/value pair into a solr fq parameter
def facet_value_to_fq_string(facet_field, value)
facet_config = blacklight_config.facet_fields[facet_field]
local_params = []
local_params << "tag=#{facet_config.tag}" if facet_config and facet_config.tag
prefix = ""
prefix = "{!#{local_params.join(" ")}}" unless local_params.empty?
fq = case
when (facet_config and facet_config.query)
facet_config.query[value][:fq]
when (facet_config and facet_config.date)
# in solr 3.2+, this could be replaced by a !term query
"#{prefix}#{facet_field}:#{RSolr.escape(value)}"
when (value.is_a?(DateTime) or value.is_a?(Date) or value.is_a?(Time))
"#{prefix}#{facet_field}:#{RSolr.escape(value.to_time.utc.strftime("%Y-%m-%dT%H:%M:%SZ"))}"
when (value.is_a?(TrueClass) or value.is_a?(FalseClass) or value == 'true' or value == 'false'),
(value.is_a?(Integer) or (value.to_i.to_s == value if value.respond_to? :to_i)),
(value.is_a?(Float) or (value.to_f.to_s == value if value.respond_to? :to_f))
"#{prefix}#{facet_field}:#{RSolr.escape(value.to_s)}"
when value.is_a?(Range)
"#{prefix}#{facet_field}:[#{value.first} TO #{value.last}]"
else
"{!raw f=#{facet_field}#{(" " + local_params.join(" ")) unless local_params.empty?}}#{value}"
end
end
end
end
| 1 | 5,710 | I wonder if we should push this and the search_params_logic into the blacklight config. | projectblacklight-blacklight | rb |
@@ -38,6 +38,7 @@ func TestClient(t *testing.T) {
cfg := config.Default
cfg.API.Port = testutil.RandomPort()
ctx := context.Background()
+ cfg.Genesis.KamchatkaBlockHeight = 10
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() | 1 | package client
import (
"context"
"fmt"
"math/big"
"testing"
"github.com/golang/mock/gomock"
"github.com/golang/protobuf/ptypes"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/api"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockindex"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_actpool"
"github.com/iotexproject/iotex-core/test/mock/mock_blockchain"
"github.com/iotexproject/iotex-core/test/mock/mock_factory"
"github.com/iotexproject/iotex-core/testutil"
)
func TestClient(t *testing.T) {
require := require.New(t)
a := identityset.Address(28).String()
priKeyA := identityset.PrivateKey(28)
b := identityset.Address(29).String()
cfg := config.Default
cfg.API.Port = testutil.RandomPort()
ctx := context.Background()
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
chainID := uint32(1)
tx, err := action.NewTransfer(uint64(1), big.NewInt(10), b, nil, uint64(0), big.NewInt(0))
require.NoError(err)
bd := &action.EnvelopeBuilder{}
elp := bd.SetNonce(1).SetAction(tx).Build()
selp, err := action.Sign(elp, priKeyA)
require.NoError(err)
bc := mock_blockchain.NewMockBlockchain(mockCtrl)
sf := mock_factory.NewMockFactory(mockCtrl)
ap := mock_actpool.NewMockActPool(mockCtrl)
sf.EXPECT().State(gomock.Any(), gomock.Any()).Do(func(accountState *state.Account, _ protocol.StateOption) {
*accountState = state.EmptyAccount()
})
sf.EXPECT().Height().Return(uint64(10), nil).AnyTimes()
bc.EXPECT().ChainID().Return(chainID).AnyTimes()
bc.EXPECT().AddSubscriber(gomock.Any()).Return(nil).AnyTimes()
bh := &iotextypes.BlockHeader{Core: &iotextypes.BlockHeaderCore{
Version: chainID,
Height: 10,
Timestamp: ptypes.TimestampNow(),
PrevBlockHash: []byte(""),
TxRoot: []byte(""),
DeltaStateDigest: []byte(""),
ReceiptRoot: []byte(""),
}, ProducerPubkey: identityset.PrivateKey(27).PublicKey().Bytes()}
blh := block.Header{}
require.NoError(blh.LoadFromBlockHeaderProto(bh))
bc.EXPECT().BlockHeaderByHeight(gomock.Any()).Return(&blh, nil).AnyTimes()
ap.EXPECT().GetPendingNonce(gomock.Any()).Return(uint64(1), nil).AnyTimes()
ap.EXPECT().Add(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
newOption := api.WithBroadcastOutbound(func(_ context.Context, _ uint32, _ proto.Message) error {
return nil
})
indexer, err := blockindex.NewIndexer(db.NewMemKVStore(), hash.ZeroHash256)
require.NoError(err)
bfIndexer, err := blockindex.NewBloomfilterIndexer(db.NewMemKVStore(), cfg.Indexer)
require.NoError(err)
apiServer, err := api.NewServer(cfg, bc, nil, sf, nil, indexer, bfIndexer, ap, nil, newOption)
require.NoError(err)
require.NoError(apiServer.Start())
// test New()
serverAddr := fmt.Sprintf("127.0.0.1:%d", cfg.API.Port)
cli, err := New(serverAddr, true)
require.NoError(err)
// test GetAccount()
response, err := cli.GetAccount(ctx, a)
require.NotNil(response)
require.NoError(err)
// test SendAction
require.NoError(cli.SendAction(ctx, selp))
}
| 1 | 23,677 | don't need this line, since it is not tested here? | iotexproject-iotex-core | go |
@@ -197,6 +197,7 @@ class EmrConnection(AWSQueryConnection):
steps=[],
bootstrap_actions=[],
instance_groups=None,
+ ami_version="1.0",
additional_info=None):
"""
Runs a job flow | 1 | # Copyright (c) 2010 Spotify AB
# Copyright (c) 2010-2011 Yelp
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a connection to the EMR service
"""
import types
import boto
import boto.utils
from boto.ec2.regioninfo import RegionInfo
from boto.emr.emrobject import JobFlow, RunJobFlowResponse
from boto.emr.emrobject import AddInstanceGroupsResponse, ModifyInstanceGroupsResponse
from boto.emr.step import JarStep
from boto.connection import AWSQueryConnection
from boto.exception import EmrResponseError
class EmrConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')
DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',
'elasticmapreduce.amazonaws.com')
ResponseError = EmrResponseError
# Constants for AWS Console debugging
DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/'):
if not region:
region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path)
def _required_auth_capability(self):
return ['emr']
def describe_jobflow(self, jobflow_id):
"""
Describes a single Elastic MapReduce job flow
:type jobflow_id: str
:param jobflow_id: The job flow id of interest
"""
jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])
if jobflows:
return jobflows[0]
def describe_jobflows(self, states=None, jobflow_ids=None,
created_after=None, created_before=None):
"""
Retrieve all the Elastic MapReduce job flows on your account
:type states: list
:param states: A list of strings with job flow states wanted
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
:type created_after: datetime
:param created_after: Bound on job flow creation time
:type created_before: datetime
:param created_before: Bound on job flow creation time
"""
params = {}
if states:
self.build_list_params(params, states, 'JobFlowStates.member')
if jobflow_ids:
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
if created_after:
params['CreatedAfter'] = created_after.strftime(
boto.utils.ISO8601)
if created_before:
params['CreatedBefore'] = created_before.strftime(
boto.utils.ISO8601)
return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])
def terminate_jobflow(self, jobflow_id):
"""
Terminate an Elastic MapReduce job flow
:type jobflow_id: str
:param jobflow_id: A jobflow id
"""
self.terminate_jobflows([jobflow_id])
def terminate_jobflows(self, jobflow_ids):
"""
Terminate an Elastic MapReduce job flow
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
"""
params = {}
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
return self.get_status('TerminateJobFlows', params, verb='POST')
def add_jobflow_steps(self, jobflow_id, steps):
"""
Adds steps to a jobflow
:type jobflow_id: str
:param jobflow_id: The job flow id
:type steps: list(boto.emr.Step)
:param steps: A list of steps to add to the job
"""
if type(steps) != types.ListType:
steps = [steps]
params = {}
params['JobFlowId'] = jobflow_id
# Step args
step_args = [self._build_step_args(step) for step in steps]
params.update(self._build_step_list(step_args))
return self.get_object(
'AddJobFlowSteps', params, RunJobFlowResponse, verb='POST')
def add_instance_groups(self, jobflow_id, instance_groups):
"""
Adds instance groups to a running cluster.
:type jobflow_id: str
:param jobflow_id: The id of the jobflow which will take the new instance groups
:type instance_groups: list(boto.emr.InstanceGroup)
:param instance_groups: A list of instance groups to add to the job
"""
if type(instance_groups) != types.ListType:
instance_groups = [instance_groups]
params = {}
params['JobFlowId'] = jobflow_id
params.update(self._build_instance_group_list_args(instance_groups))
return self.get_object('AddInstanceGroups', params, AddInstanceGroupsResponse, verb='POST')
def modify_instance_groups(self, instance_group_ids, new_sizes):
"""
Modify the number of nodes and configuration settings in an instance group.
:type instance_group_ids: list(str)
:param instance_group_ids: A list of the ID's of the instance groups to be modified
:type new_sizes: list(int)
:param new_sizes: A list of the new sizes for each instance group
"""
if type(instance_group_ids) != types.ListType:
instance_group_ids = [instance_group_ids]
if type(new_sizes) != types.ListType:
new_sizes = [new_sizes]
instance_groups = zip(instance_group_ids, new_sizes)
params = {}
for k, ig in enumerate(instance_groups):
#could be wrong - the example amazon gives uses InstanceRequestCount,
#while the api documentation says InstanceCount
params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0]
params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1]
return self.get_object('ModifyInstanceGroups', params, ModifyInstanceGroupsResponse, verb='POST')
def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None,
master_instance_type='m1.small',
slave_instance_type='m1.small', num_instances=1,
action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
enable_debugging=False,
hadoop_version='0.20',
steps=[],
bootstrap_actions=[],
instance_groups=None,
additional_info=None):
"""
Runs a job flow
:type name: str
:param name: Name of the job flow
:type log_uri: str
:param log_uri: URI of the S3 bucket to place logs
:type ec2_keyname: str
:param ec2_keyname: EC2 key used for the instances
:type availability_zone: str
:param availability_zone: EC2 availability zone of the cluster
:type master_instance_type: str
:param master_instance_type: EC2 instance type of the master
:type slave_instance_type: str
:param slave_instance_type: EC2 instance type of the slave nodes
:type num_instances: int
:param num_instances: Number of instances in the Hadoop cluster
:type action_on_failure: str
:param action_on_failure: Action to take if a step terminates
:type keep_alive: bool
:param keep_alive: Denotes whether the cluster should stay alive upon completion
:type enable_debugging: bool
:param enable_debugging: Denotes whether AWS console debugging should be enabled.
:type steps: list(boto.emr.Step)
:param steps: List of steps to add with the job
:type bootstrap_actions: list(boto.emr.BootstrapAction)
:param bootstrap_actions: List of bootstrap actions that run before Hadoop starts.
:type instance_groups: list(boto.emr.InstanceGroup)
:param instance_groups: Optional list of instance groups to use when creating
this job. NB: When provided, this argument supersedes
num_instances and master/slave_instance_type.
:type additional_info: JSON str
:param additional_info: A JSON string for selecting additional features
:rtype: str
:return: The jobflow id
"""
params = {}
if action_on_failure:
params['ActionOnFailure'] = action_on_failure
params['Name'] = name
params['LogUri'] = log_uri
# Common instance args
common_params = self._build_instance_common_args(ec2_keyname,
availability_zone,
keep_alive, hadoop_version)
params.update(common_params)
# NB: according to the AWS API's error message, we must
# "configure instances either using instance count, master and
# slave instance type or instance groups but not both."
#
# Thus we switch here on the truthiness of instance_groups.
if not instance_groups:
# Instance args (the common case)
instance_params = self._build_instance_count_and_type_args(
master_instance_type,
slave_instance_type,
num_instances)
params.update(instance_params)
else:
# Instance group args (for spot instances or a heterogenous cluster)
list_args = self._build_instance_group_list_args(instance_groups)
instance_params = dict(
('Instances.%s' % k, v) for k, v in list_args.iteritems()
)
params.update(instance_params)
# Debugging step from EMR API docs
if enable_debugging:
debugging_step = JarStep(name='Setup Hadoop Debugging',
action_on_failure='TERMINATE_JOB_FLOW',
main_class=None,
jar=self.DebuggingJar,
step_args=self.DebuggingArgs)
steps.insert(0, debugging_step)
# Step args
if steps:
step_args = [self._build_step_args(step) for step in steps]
params.update(self._build_step_list(step_args))
if bootstrap_actions:
bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]
params.update(self._build_bootstrap_action_list(bootstrap_action_args))
if additional_info is not None:
params['AdditionalInfo'] = additional_info
response = self.get_object(
'RunJobFlow', params, RunJobFlowResponse, verb='POST')
return response.jobflowid
def set_termination_protection(self, jobflow_id, termination_protection_status):
"""
Set termination protection on specified Elastic MapReduce job flows
:type jobflow_ids: list or str
:param jobflow_ids: A list of job flow IDs
:type termination_protection_status: bool
:param termination_protection_status: Termination protection status
"""
assert termination_protection_status in (True, False)
params = {}
params['TerminationProtected'] = (termination_protection_status and "true") or "false"
self.build_list_params(params, [jobflow_id], 'JobFlowIds.member')
return self.get_status('SetTerminationProtection', params, verb='POST')
def _build_bootstrap_action_args(self, bootstrap_action):
bootstrap_action_params = {}
bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path
try:
bootstrap_action_params['Name'] = bootstrap_action.name
except AttributeError:
pass
args = bootstrap_action.args()
if args:
self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member')
return bootstrap_action_params
def _build_step_args(self, step):
step_params = {}
step_params['ActionOnFailure'] = step.action_on_failure
step_params['HadoopJarStep.Jar'] = step.jar()
main_class = step.main_class()
if main_class:
step_params['HadoopJarStep.MainClass'] = main_class
args = step.args()
if args:
self.build_list_params(step_params, args, 'HadoopJarStep.Args.member')
step_params['Name'] = step.name
return step_params
def _build_bootstrap_action_list(self, bootstrap_actions):
if type(bootstrap_actions) != types.ListType:
bootstrap_actions = [bootstrap_actions]
params = {}
for i, bootstrap_action in enumerate(bootstrap_actions):
for key, value in bootstrap_action.iteritems():
params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value
return params
def _build_step_list(self, steps):
if type(steps) != types.ListType:
steps = [steps]
params = {}
for i, step in enumerate(steps):
for key, value in step.iteritems():
params['Steps.member.%s.%s' % (i+1, key)] = value
return params
def _build_instance_common_args(self, ec2_keyname, availability_zone,
keep_alive, hadoop_version):
"""
Takes a number of parameters used when starting a jobflow (as
specified in run_jobflow() above). Returns a comparable dict for
use in making a RunJobFlow request.
"""
params = {
'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),
'Instances.HadoopVersion' : hadoop_version
}
if ec2_keyname:
params['Instances.Ec2KeyName'] = ec2_keyname
if availability_zone:
params['Instances.Placement.AvailabilityZone'] = availability_zone
return params
def _build_instance_count_and_type_args(self, master_instance_type,
slave_instance_type, num_instances):
"""
Takes a master instance type (string), a slave instance type
(string), and a number of instances. Returns a comparable dict
for use in making a RunJobFlow request.
"""
params = {
'Instances.MasterInstanceType' : master_instance_type,
'Instances.SlaveInstanceType' : slave_instance_type,
'Instances.InstanceCount' : num_instances,
}
return params
def _build_instance_group_args(self, instance_group):
"""
Takes an InstanceGroup; returns a dict that, when its keys are
properly prefixed, can be used for describing InstanceGroups in
RunJobFlow or AddInstanceGroups requests.
"""
params = {
'InstanceCount' : instance_group.num_instances,
'InstanceRole' : instance_group.role,
'InstanceType' : instance_group.type,
'Name' : instance_group.name,
'Market' : instance_group.market
}
if instance_group.market == 'SPOT':
params['BidPrice'] = instance_group.bidprice
return params
def _build_instance_group_list_args(self, instance_groups):
"""
Takes a list of InstanceGroups, or a single InstanceGroup. Returns
a comparable dict for use in making a RunJobFlow or AddInstanceGroups
request.
"""
if type(instance_groups) != types.ListType:
instance_groups = [instance_groups]
params = {}
for i, instance_group in enumerate(instance_groups):
ig_dict = self._build_instance_group_args(instance_group)
for key, value in ig_dict.iteritems():
params['InstanceGroups.member.%d.%s' % (i+1, key)] = value
return params
| 1 | 8,010 | I wonder if adding this new param in the middle of the param list might screw up some existing code that is depending on the old parameter order. Might be better to add it to the end of the parameter list? | boto-boto | py |
@@ -178,7 +178,8 @@ public final class BaselineErrorProne implements Plugin<Project> {
errorProneOptions.setEnabled(true);
errorProneOptions.setDisableWarningsInGeneratedCode(true);
- errorProneOptions.setExcludedPaths(project.getBuildDir().getAbsolutePath() + "/.*");
+ errorProneOptions.setExcludedPaths(
+ String.format("%s/(build|src/generated.*)/.*", project.getProjectDir().getAbsolutePath()));
errorProneOptions.check("EqualsHashCode", CheckSeverity.ERROR);
errorProneOptions.check("EqualsIncompatibleType", CheckSeverity.ERROR);
errorProneOptions.check("StreamResourceLeak", CheckSeverity.ERROR); | 1 | /*
* (c) Copyright 2017 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.baseline.plugins;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import com.palantir.baseline.extensions.BaselineErrorProneExtension;
import com.palantir.baseline.tasks.RefasterCompileTask;
import java.io.File;
import java.util.AbstractList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import net.ltgt.gradle.errorprone.CheckSeverity;
import net.ltgt.gradle.errorprone.ErrorProneOptions;
import net.ltgt.gradle.errorprone.ErrorPronePlugin;
import org.gradle.api.JavaVersion;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.artifacts.Configuration;
import org.gradle.api.file.FileCollection;
import org.gradle.api.file.RegularFile;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import org.gradle.api.plugins.ExtensionAware;
import org.gradle.api.provider.Provider;
import org.gradle.api.tasks.compile.JavaCompile;
import org.gradle.api.tasks.javadoc.Javadoc;
import org.gradle.api.tasks.testing.Test;
public final class BaselineErrorProne implements Plugin<Project> {
private static final Logger log = Logging.getLogger(BaselineErrorProne.class);
public static final String EXTENSION_NAME = "baselineErrorProne";
private static final String ERROR_PRONE_JAVAC_VERSION = "9+181-r4173-1";
private static final String PROP_ERROR_PRONE_APPLY = "errorProneApply";
private static final String PROP_REFASTER_APPLY = "refasterApply";
@Override
public void apply(Project project) {
project.getPluginManager().withPlugin("java", unused -> {
applyToJavaProject(project);
});
}
private static void applyToJavaProject(Project project) {
BaselineErrorProneExtension errorProneExtension = project.getExtensions()
.create(EXTENSION_NAME, BaselineErrorProneExtension.class, project);
project.getPluginManager().apply(ErrorPronePlugin.class);
String version = Optional.ofNullable(BaselineErrorProne.class.getPackage().getImplementationVersion())
.orElseGet(() -> {
log.warn("Baseline is using 'latest.release' - beware this compromises build reproducibility");
return "latest.release";
});
Configuration refasterConfiguration = project.getConfigurations().create("refaster", conf -> {
conf.defaultDependencies(deps -> {
deps.add(project.getDependencies().create(
"com.palantir.baseline:baseline-refaster-rules:" + version + ":sources"));
});
});
Configuration refasterCompilerConfiguration = project.getConfigurations()
.create("refasterCompiler", configuration -> configuration.extendsFrom(refasterConfiguration));
project.getDependencies().add(
ErrorPronePlugin.CONFIGURATION_NAME,
"com.palantir.baseline:baseline-error-prone:" + version);
project.getDependencies().add(
"refasterCompiler",
"com.palantir.baseline:baseline-refaster-javac-plugin:" + version);
Provider<File> refasterRulesFile = project.getLayout().getBuildDirectory()
.file("refaster/rules.refaster")
.map(RegularFile::getAsFile);
RefasterCompileTask compileRefaster =
project.getTasks().create("compileRefaster", RefasterCompileTask.class, task -> {
task.setSource(refasterConfiguration);
task.getRefasterSources().set(refasterConfiguration);
task.setClasspath(refasterCompilerConfiguration);
task.getRefasterRulesFile().set(refasterRulesFile);
});
project.getTasks().withType(JavaCompile.class).configureEach(javaCompile -> {
((ExtensionAware) javaCompile.getOptions()).getExtensions()
.configure(ErrorProneOptions.class, errorProneOptions -> {
configureErrorProneOptions(
project,
refasterRulesFile,
compileRefaster,
errorProneExtension,
javaCompile,
errorProneOptions);
});
});
// To allow refactoring of deprecated methods, even when -Xlint:deprecation is specified, we need to remove
// these compiler flags after all configuration has happened.
project.afterEvaluate(unused -> project.getTasks().withType(JavaCompile.class)
.configureEach(javaCompile -> {
if (javaCompile.equals(compileRefaster)) {
return;
}
if (isRefactoring(project)) {
javaCompile.getOptions().setWarnings(false);
javaCompile.getOptions().setDeprecation(false);
javaCompile.getOptions().setCompilerArgs(javaCompile.getOptions().getCompilerArgs()
.stream()
.filter(arg -> !arg.equals("-Werror"))
.filter(arg -> !arg.equals("-deprecation"))
.filter(arg -> !arg.equals("-Xlint:deprecation"))
.collect(Collectors.toList()));
}
}));
project.getPluginManager().withPlugin("java-gradle-plugin", appliedPlugin -> {
project.getTasks().withType(JavaCompile.class).configureEach(javaCompile ->
((ExtensionAware) javaCompile.getOptions()).getExtensions()
.configure(ErrorProneOptions.class, errorProneOptions -> {
errorProneOptions.check("Slf4jLogsafeArgs", CheckSeverity.OFF);
errorProneOptions.check("PreferSafeLoggableExceptions", CheckSeverity.OFF);
errorProneOptions.check("PreferSafeLoggingPreconditions", CheckSeverity.OFF);
errorProneOptions.check("PreconditionsConstantMessage", CheckSeverity.OFF);
}));
});
// In case of java 8 we need to add errorprone javac compiler to bootstrap classpath of tasks that perform
// compilation or code analysis. ErrorProneJavacPluginPlugin handles JavaCompile cases via errorproneJavac
// configuration and we do similar thing for Test and Javadoc type tasks
if (!JavaVersion.current().isJava9Compatible()) {
project.getDependencies().add(ErrorPronePlugin.JAVAC_CONFIGURATION_NAME,
"com.google.errorprone:javac:" + ERROR_PRONE_JAVAC_VERSION);
project.getConfigurations()
.named(ErrorPronePlugin.JAVAC_CONFIGURATION_NAME)
.configure(conf -> {
List<File> bootstrapClasspath = Splitter.on(File.pathSeparator)
.splitToList(System.getProperty("sun.boot.class.path"))
.stream()
.map(File::new)
.collect(Collectors.toList());
FileCollection errorProneFiles = conf.plus(project.files(bootstrapClasspath));
project.getTasks().withType(Test.class)
.configureEach(test -> test.setBootstrapClasspath(errorProneFiles));
project.getTasks().withType(Javadoc.class)
.configureEach(javadoc -> javadoc.getOptions()
.setBootClasspath(new LazyConfigurationList(errorProneFiles)));
});
}
}
@SuppressWarnings("UnstableApiUsage")
private static void configureErrorProneOptions(
Project project,
Provider<File> refasterRulesFile,
RefasterCompileTask compileRefaster,
BaselineErrorProneExtension errorProneExtension,
JavaCompile javaCompile,
ErrorProneOptions errorProneOptions) {
JavaVersion jdkVersion = JavaVersion.toVersion(javaCompile.getToolChain().getVersion());
errorProneOptions.setEnabled(true);
errorProneOptions.setDisableWarningsInGeneratedCode(true);
errorProneOptions.setExcludedPaths(project.getBuildDir().getAbsolutePath() + "/.*");
errorProneOptions.check("EqualsHashCode", CheckSeverity.ERROR);
errorProneOptions.check("EqualsIncompatibleType", CheckSeverity.ERROR);
errorProneOptions.check("StreamResourceLeak", CheckSeverity.ERROR);
errorProneOptions.check("InputStreamSlowMultibyteRead", CheckSeverity.ERROR);
errorProneOptions.check("JavaDurationGetSecondsGetNano", CheckSeverity.ERROR);
errorProneOptions.check("URLEqualsHashCode", CheckSeverity.ERROR);
if (jdkVersion.compareTo(JavaVersion.toVersion("12.0.1")) >= 0) {
// Errorprone isn't officially compatible with Java12, but in practise everything
// works apart from this one check: https://github.com/google/error-prone/issues/1106
errorProneOptions.check("Finally", CheckSeverity.OFF);
}
if (jdkVersion.compareTo(JavaVersion.toVersion("13.0.0")) >= 0) {
// Errorprone isn't officially compatible with Java13 either
// https://github.com/google/error-prone/issues/1106
errorProneOptions.check("TypeParameterUnusedInFormals", CheckSeverity.OFF);
}
if (javaCompile.equals(compileRefaster)) {
// Don't apply refaster to itself...
return;
}
if (isRefactoring(project)) {
// Don't attempt to cache since it won't capture the source files that might be modified
javaCompile.getOutputs().cacheIf(t -> false);
if (isRefasterRefactoring(project)) {
javaCompile.dependsOn(compileRefaster);
errorProneOptions.getErrorproneArgumentProviders().add(() -> {
String file = refasterRulesFile.get().getAbsolutePath();
return new File(file).exists()
? ImmutableList.of(
"-XepPatchChecks:refaster:" + file,
"-XepPatchLocation:IN_PLACE")
: Collections.emptyList();
});
}
if (isErrorProneRefactoring(project)) {
// TODO(gatesn): Is there a way to discover error-prone checks?
// Maybe service-load from a ClassLoader configured with annotation processor path?
// https://github.com/google/error-prone/pull/947
errorProneOptions.getErrorproneArgumentProviders().add(() -> {
// Don't apply checks that have been explicitly disabled
Stream<String> errorProneChecks = getNotDisabledErrorproneChecks(
errorProneExtension, javaCompile, errorProneOptions);
return ImmutableList.of(
"-XepPatchChecks:" + Joiner.on(',').join(errorProneChecks.iterator()),
"-XepPatchLocation:IN_PLACE");
});
}
}
}
private static Stream<String> getNotDisabledErrorproneChecks(
BaselineErrorProneExtension errorProneExtension,
JavaCompile javaCompile,
ErrorProneOptions errorProneOptions) {
return errorProneExtension.getPatchChecks().get().stream().filter(check -> {
if (checkExplicitlyDisabled(errorProneOptions, check)) {
log.info(
"Task {}: not applying errorprone check {} because it has severity OFF in errorProneOptions",
javaCompile.getPath(),
check);
return false;
}
return true;
});
}
private static boolean isRefactoring(Project project) {
return isRefasterRefactoring(project) || isErrorProneRefactoring(project);
}
private static boolean isRefasterRefactoring(Project project) {
return project.hasProperty(PROP_REFASTER_APPLY);
}
private static boolean isErrorProneRefactoring(Project project) {
return project.hasProperty(PROP_ERROR_PRONE_APPLY);
}
private static boolean checkExplicitlyDisabled(ErrorProneOptions errorProneOptions, String check) {
Map<String, CheckSeverity> checks = errorProneOptions.getChecks();
return checks.get(check) == CheckSeverity.OFF
|| errorProneOptions.getErrorproneArgs().contains(String.format("-Xep:%s:OFF", check));
}
private static final class LazyConfigurationList extends AbstractList<File> {
private final FileCollection files;
private List<File> fileList;
private LazyConfigurationList(FileCollection files) {
this.files = files;
}
@Override
public File get(int index) {
if (fileList == null) {
fileList = ImmutableList.copyOf(files.getFiles());
}
return fileList.get(index);
}
@Override
public int size() {
if (fileList == null) {
fileList = ImmutableList.copyOf(files.getFiles());
}
return fileList.size();
}
}
}
| 1 | 7,505 | I wonder if this absolute path is gonna affect the build-cache relocatability of these compile tasks?? | palantir-gradle-baseline | java |
@@ -18,10 +18,13 @@ class CommandOperationV2 extends OperationBase {
this.readPreference = ReadPreference.resolve(parent, this.options);
this.readConcern = resolveReadConcern(parent, this.options);
this.writeConcern = resolveWriteConcern(parent, this.options);
+ this.session = options && options.session;
+ this.inTransaction = this.session && this.session.inTransaction();
+ if (this.inTransaction) this.writeConcern = undefined;
this.explain = false;
if (operationOptions && typeof operationOptions.fullResponse === 'boolean') {
- this.fullResponse = true;
+ this.fullResponse = operationOptions.fullResponse;
}
// TODO: A lot of our code depends on having the read preference in the options. This should | 1 | 'use strict';
const { Aspect, OperationBase } = require('./operation');
const ReadConcern = require('../read_concern');
const WriteConcern = require('../write_concern');
const { maxWireVersion } = require('../utils');
const ReadPreference = require('../read_preference');
const { commandSupportsReadConcern } = require('../sessions');
const { MongoError } = require('../error');
const SUPPORTS_WRITE_CONCERN_AND_COLLATION = 5;
class CommandOperationV2 extends OperationBase {
constructor(parent, options, operationOptions) {
super(options);
this.ns = parent.s.namespace.withCollection('$cmd');
this.readPreference = ReadPreference.resolve(parent, this.options);
this.readConcern = resolveReadConcern(parent, this.options);
this.writeConcern = resolveWriteConcern(parent, this.options);
this.explain = false;
if (operationOptions && typeof operationOptions.fullResponse === 'boolean') {
this.fullResponse = true;
}
// TODO: A lot of our code depends on having the read preference in the options. This should
// go away, but also requires massive test rewrites.
this.options.readPreference = this.readPreference;
// TODO(NODE-2056): make logger another "inheritable" property
if (parent.s.logger) {
this.logger = parent.s.logger;
} else if (parent.s.db && parent.s.db.logger) {
this.logger = parent.s.db.logger;
}
}
executeCommand(server, cmd, callback) {
// TODO: consider making this a non-enumerable property
this.server = server;
const options = this.options;
const serverWireVersion = maxWireVersion(server);
const inTransaction = this.session && this.session.inTransaction();
if (this.readConcern && commandSupportsReadConcern(cmd) && !inTransaction) {
Object.assign(cmd, { readConcern: this.readConcern });
}
if (options.collation && serverWireVersion < SUPPORTS_WRITE_CONCERN_AND_COLLATION) {
callback(
new MongoError(
`Server ${server.name}, which reports wire version ${serverWireVersion}, does not support collation`
)
);
return;
}
if (serverWireVersion >= SUPPORTS_WRITE_CONCERN_AND_COLLATION) {
if (this.writeConcern && this.hasAspect(Aspect.WRITE_OPERATION)) {
Object.assign(cmd, { writeConcern: this.writeConcern });
}
if (options.collation && typeof options.collation === 'object') {
Object.assign(cmd, { collation: options.collation });
}
}
if (typeof options.maxTimeMS === 'number') {
cmd.maxTimeMS = options.maxTimeMS;
}
if (typeof options.comment === 'string') {
cmd.comment = options.comment;
}
if (this.logger && this.logger.isDebug()) {
this.logger.debug(`executing command ${JSON.stringify(cmd)} against ${this.ns}`);
}
server.command(this.ns.toString(), cmd, this.options, (err, result) => {
if (err) {
callback(err, null);
return;
}
if (this.fullResponse) {
callback(null, result);
return;
}
callback(null, result.result);
});
}
}
function resolveWriteConcern(parent, options) {
return WriteConcern.fromOptions(options) || parent.writeConcern;
}
function resolveReadConcern(parent, options) {
return ReadConcern.fromOptions(options) || parent.readConcern;
}
module.exports = CommandOperationV2;
| 1 | 17,757 | possible bug if `fullResponse` is false | mongodb-node-mongodb-native | js |
@@ -19,9 +19,11 @@
"""A HintManager to draw hints over links."""
-import math
-import functools
import collections
+import functools
+import math
+import re
+from string import ascii_lowercase
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, QObject, QEvent, Qt, QUrl,
QTimer) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""A HintManager to draw hints over links."""
import math
import functools
import collections
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, QObject, QEvent, Qt, QUrl,
QTimer)
from PyQt5.QtGui import QMouseEvent, QClipboard
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebKit import QWebElement
from PyQt5.QtWebKitWidgets import QWebPage
from qutebrowser.config import config
from qutebrowser.keyinput import modeman, modeparsers
from qutebrowser.browser import webelem
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.utils import usertypes, log, qtutils, message, objreg
from qutebrowser.misc import guiprocess
ElemTuple = collections.namedtuple('ElemTuple', ['elem', 'label'])
Target = usertypes.enum('Target', ['normal', 'tab', 'tab_fg', 'tab_bg',
'window', 'yank', 'yank_primary', 'run',
'fill', 'hover', 'download', 'userscript',
'spawn'])
@pyqtSlot(usertypes.KeyMode)
def on_mode_entered(mode, win_id):
"""Stop hinting when insert mode was entered."""
if mode == usertypes.KeyMode.insert:
modeman.maybe_leave(win_id, usertypes.KeyMode.hint, 'insert mode')
class HintContext:
"""Context namespace used for hinting.
Attributes:
frames: The QWebFrames to use.
destroyed_frames: id()'s of QWebFrames which have been destroyed.
(Workaround for https://github.com/The-Compiler/qutebrowser/issues/152)
elems: A mapping from key strings to (elem, label) namedtuples.
baseurl: The URL of the current page.
target: What to do with the opened links.
normal/tab/tab_fg/tab_bg/window: Get passed to BrowserTab.
yank/yank_primary: Yank to clipboard/primary selection.
run: Run a command.
fill: Fill commandline with link.
download: Download the link.
userscript: Call a custom userscript.
spawn: Spawn a simple command.
to_follow: The link to follow when enter is pressed.
args: Custom arguments for userscript/spawn
rapid: Whether to do rapid hinting.
mainframe: The main QWebFrame where we started hinting in.
group: The group of web elements to hint.
"""
def __init__(self):
self.elems = {}
self.target = None
self.baseurl = None
self.to_follow = None
self.rapid = False
self.frames = []
self.destroyed_frames = []
self.args = []
self.mainframe = None
self.group = None
def get_args(self, urlstr):
"""Get the arguments, with {hint-url} replaced by the given URL."""
args = []
for arg in self.args:
arg = arg.replace('{hint-url}', urlstr)
args.append(arg)
return args
class HintManager(QObject):
"""Manage drawing hints over links or other elements.
Class attributes:
HINT_TEXTS: Text displayed for different hinting modes.
Attributes:
_context: The HintContext for the current invocation.
_win_id: The window ID this HintManager is associated with.
_tab_id: The tab ID this HintManager is associated with.
Signals:
mouse_event: Mouse event to be posted in the web view.
arg: A QMouseEvent
start_hinting: Emitted when hinting starts, before a link is clicked.
arg: The ClickTarget to use.
stop_hinting: Emitted after a link was clicked.
"""
HINT_TEXTS = {
Target.normal: "Follow hint",
Target.tab: "Follow hint in new tab",
Target.tab_fg: "Follow hint in foreground tab",
Target.tab_bg: "Follow hint in background tab",
Target.window: "Follow hint in new window",
Target.yank: "Yank hint to clipboard",
Target.yank_primary: "Yank hint to primary selection",
Target.run: "Run a command on a hint",
Target.fill: "Set hint in commandline",
Target.hover: "Hover over a hint",
Target.download: "Download hint",
Target.userscript: "Call userscript via hint",
Target.spawn: "Spawn command via hint",
}
mouse_event = pyqtSignal('QMouseEvent')
start_hinting = pyqtSignal(usertypes.ClickTarget)
stop_hinting = pyqtSignal()
def __init__(self, win_id, tab_id, parent=None):
"""Constructor."""
super().__init__(parent)
self._win_id = win_id
self._tab_id = tab_id
self._context = None
mode_manager = objreg.get('mode-manager', scope='window',
window=win_id)
mode_manager.left.connect(self.on_mode_left)
def _get_text(self):
"""Get a hint text based on the current context."""
text = self.HINT_TEXTS[self._context.target]
if self._context.rapid:
text += ' (rapid mode)'
text += '...'
return text
def _cleanup(self):
"""Clean up after hinting."""
for elem in self._context.elems.values():
try:
elem.label.removeFromDocument()
except webelem.IsNullError:
pass
for f in self._context.frames:
log.hints.debug("Disconnecting frame {}".format(f))
if id(f) in self._context.destroyed_frames:
# WORKAROUND for
# https://github.com/The-Compiler/qutebrowser/issues/152
log.hints.debug("Frame has been destroyed, ignoring.")
continue
try:
f.contentsSizeChanged.disconnect(self.on_contents_size_changed)
except TypeError:
# It seems we can get this here:
# TypeError: disconnect() failed between
# 'contentsSizeChanged' and 'on_contents_size_changed'
# See # https://github.com/The-Compiler/qutebrowser/issues/263
pass
log.hints.debug("Disconnected.")
text = self._get_text()
message_bridge = objreg.get('message-bridge', scope='window',
window=self._win_id)
message_bridge.maybe_reset_text(text)
self._context = None
def _hint_strings(self, elems):
"""Calculate the hint strings for elems.
Inspired by Vimium.
Args:
elems: The elements to get hint strings for.
Return:
A list of hint strings, in the same order as the elements.
"""
if config.get('hints', 'mode') == 'number':
chars = '0123456789'
else:
chars = config.get('hints', 'chars')
min_chars = config.get('hints', 'min-chars')
if config.get('hints', 'scatter'):
return self._hint_scattered(min_chars, chars, elems)
else:
return self._hint_linear(min_chars, chars, elems)
def _hint_scattered(self, min_chars, chars, elems):
"""Produce scattered hint labels with variable length (like Vimium).
Args:
min_chars: The minimum length of labels.
chars: The alphabet to use for labels.
elems: The elements to generate labels for.
"""
# Determine how many digits the link hints will require in the worst
# case. Usually we do not need all of these digits for every link
# single hint, so we can show shorter hints for a few of the links.
needed = max(min_chars, math.ceil(math.log(len(elems), len(chars))))
# Short hints are the number of hints we can possibly show which are
# (needed - 1) digits in length.
if needed > min_chars:
short_count = math.floor((len(chars) ** needed - len(elems)) /
len(chars))
else:
short_count = 0
long_count = len(elems) - short_count
strings = []
if needed > 1:
for i in range(short_count):
strings.append(self._number_to_hint_str(i, chars, needed - 1))
start = short_count * len(chars)
for i in range(start, start + long_count):
strings.append(self._number_to_hint_str(i, chars, needed))
return self._shuffle_hints(strings, len(chars))
def _hint_linear(self, min_chars, chars, elems):
"""Produce linear hint labels with constant length (like dwb).
Args:
min_chars: The minimum length of labels.
chars: The alphabet to use for labels.
elems: The elements to generate labels for.
"""
strings = []
needed = max(min_chars, math.ceil(math.log(len(elems), len(chars))))
for i in range(len(elems)):
strings.append(self._number_to_hint_str(i, chars, needed))
return strings
def _shuffle_hints(self, hints, length):
"""Shuffle the given set of hints so that they're scattered.
Hints starting with the same character will be spread evenly throughout
the array.
Inspired by Vimium.
Args:
hints: A list of hint strings.
length: Length of the available charset.
Return:
A list of shuffled hint strings.
"""
buckets = [[] for i in range(length)]
for i, hint in enumerate(hints):
buckets[i % len(buckets)].append(hint)
result = []
for bucket in buckets:
result += bucket
return result
def _number_to_hint_str(self, number, chars, digits=0):
"""Convert a number like "8" into a hint string like "JK".
This is used to sequentially generate all of the hint text.
The hint string will be "padded with zeroes" to ensure its length is >=
digits.
Inspired by Vimium.
Args:
number: The hint number.
chars: The charset to use.
digits: The minimum output length.
Return:
A hint string.
"""
base = len(chars)
hintstr = []
remainder = 0
while True:
remainder = number % base
hintstr.insert(0, chars[remainder])
number -= remainder
number //= base
if number <= 0:
break
# Pad the hint string we're returning so that it matches digits.
for _ in range(0, digits - len(hintstr)):
hintstr.insert(0, chars[0])
return ''.join(hintstr)
def _is_hidden(self, elem):
"""Check if the element is hidden via display=none."""
display = elem.styleProperty('display', QWebElement.InlineStyle)
return display == 'none'
def _show_elem(self, elem):
"""Show a given element."""
elem.setStyleProperty('display', 'inline !important')
def _hide_elem(self, elem):
"""Hide a given element."""
elem.setStyleProperty('display', 'none !important')
def _set_style_properties(self, elem, label):
"""Set the hint CSS on the element given.
Args:
elem: The QWebElement to set the style attributes for.
label: The label QWebElement.
"""
attrs = [
('display', 'inline !important'),
('z-index', '{} !important'.format(int(2 ** 32 / 2 - 1))),
('pointer-events', 'none !important'),
('position', 'absolute !important'),
('color', config.get('colors', 'hints.fg') + ' !important'),
('background', config.get('colors', 'hints.bg') + ' !important'),
('font', config.get('fonts', 'hints') + ' !important'),
('border', config.get('hints', 'border') + ' !important'),
('opacity', str(config.get('hints', 'opacity')) + ' !important'),
]
# Make text uppercase if set in config
if (config.get('hints', 'uppercase') and
config.get('hints', 'mode') == 'letter'):
attrs.append(('text-transform', 'uppercase !important'))
else:
attrs.append(('text-transform', 'none !important'))
for k, v in attrs:
label.setStyleProperty(k, v)
self._set_style_position(elem, label)
def _set_style_position(self, elem, label):
"""Set the CSS position of the label element.
Args:
elem: The QWebElement to set the style attributes for.
label: The label QWebElement.
"""
rect = elem.geometry()
left = rect.x()
top = rect.y()
zoom = elem.webFrame().zoomFactor()
if not config.get('ui', 'zoom-text-only'):
left /= zoom
top /= zoom
log.hints.vdebug("Drawing label '{!r}' at {}/{} for element '{!r}', "
"zoom level {}".format(label, left, top, elem, zoom))
label.setStyleProperty('left', '{}px !important'.format(left))
label.setStyleProperty('top', '{}px !important'.format(top))
def _draw_label(self, elem, string):
"""Draw a hint label over an element.
Args:
elem: The QWebElement to use.
string: The hint string to print.
Return:
The newly created label element
"""
doc = elem.webFrame().documentElement()
# It seems impossible to create an empty QWebElement for which isNull()
# is false so we can work with it.
# As a workaround, we use appendInside() with markup as argument, and
# then use lastChild() to get a reference to it.
# See: http://stackoverflow.com/q/7364852/2085149
body = doc.findFirst('body')
if not body.isNull():
parent = body
else:
parent = doc
parent.appendInside('<span></span>')
label = webelem.WebElementWrapper(parent.lastChild())
label['class'] = 'qutehint'
self._set_style_properties(elem, label)
label.setPlainText(string)
return label
def _show_url_error(self):
"""Show an error because no link was found."""
message.error(self._win_id, "No suitable link found for this element.",
immediately=True)
def _click(self, elem, context):
"""Click an element.
Args:
elem: The QWebElement to click.
context: The HintContext to use.
"""
target_mapping = {
Target.normal: usertypes.ClickTarget.normal,
Target.tab_fg: usertypes.ClickTarget.tab,
Target.tab_bg: usertypes.ClickTarget.tab_bg,
Target.window: usertypes.ClickTarget.window,
Target.hover: usertypes.ClickTarget.normal,
}
if config.get('tabs', 'background-tabs'):
target_mapping[Target.tab] = usertypes.ClickTarget.tab_bg
else:
target_mapping[Target.tab] = usertypes.ClickTarget.tab
# FIXME Instead of clicking the center, we could have nicer heuristics.
# e.g. parse (-webkit-)border-radius correctly and click text fields at
# the bottom right, and everything else on the top left or so.
# https://github.com/The-Compiler/qutebrowser/issues/70
pos = elem.rect_on_view().center()
action = "Hovering" if context.target == Target.hover else "Clicking"
log.hints.debug("{} on '{}' at {}/{}".format(
action, elem, pos.x(), pos.y()))
self.start_hinting.emit(target_mapping[context.target])
if context.target in [Target.tab, Target.tab_fg, Target.tab_bg,
Target.window]:
modifiers = Qt.ControlModifier
else:
modifiers = Qt.NoModifier
events = [
QMouseEvent(QEvent.MouseMove, pos, Qt.NoButton, Qt.NoButton,
Qt.NoModifier),
]
if context.target != Target.hover:
events += [
QMouseEvent(QEvent.MouseButtonPress, pos, Qt.LeftButton,
Qt.LeftButton, modifiers),
QMouseEvent(QEvent.MouseButtonRelease, pos, Qt.LeftButton,
Qt.NoButton, modifiers),
]
for evt in events:
self.mouse_event.emit(evt)
if elem.is_text_input() and elem.is_editable():
QTimer.singleShot(0, functools.partial(
elem.webFrame().page().triggerAction,
QWebPage.MoveToEndOfDocument))
QTimer.singleShot(0, self.stop_hinting.emit)
def _yank(self, url, context):
"""Yank an element to the clipboard or primary selection.
Args:
url: The URL to open as a QUrl.
context: The HintContext to use.
"""
sel = context.target == Target.yank_primary
mode = QClipboard.Selection if sel else QClipboard.Clipboard
urlstr = url.toString(QUrl.FullyEncoded | QUrl.RemovePassword)
QApplication.clipboard().setText(urlstr, mode)
message.info(self._win_id, "URL yanked to {}".format(
"primary selection" if sel else "clipboard"))
def _run_cmd(self, url, context):
"""Run the command based on a hint URL.
Args:
url: The URL to open as a QUrl.
context: The HintContext to use.
"""
urlstr = url.toString(QUrl.FullyEncoded)
args = context.get_args(urlstr)
commandrunner = runners.CommandRunner(self._win_id)
commandrunner.run_safely(' '.join(args))
def _preset_cmd_text(self, url, context):
"""Preset a commandline text based on a hint URL.
Args:
url: The URL to open as a QUrl.
context: The HintContext to use.
"""
urlstr = url.toDisplayString(QUrl.FullyEncoded)
args = context.get_args(urlstr)
text = ' '.join(args)
if text[0] not in modeparsers.STARTCHARS:
message.error(self._win_id,
"Invalid command text '{}'.".format(text),
immediately=True)
else:
message.set_cmd_text(self._win_id, text)
def _download(self, elem, context):
"""Download a hint URL.
Args:
elem: The QWebElement to download.
_context: The HintContext to use.
"""
url = self._resolve_url(elem, context.baseurl)
if url is None:
self._show_url_error()
return
if context.rapid:
prompt = False
else:
prompt = None
download_manager = objreg.get('download-manager', scope='window',
window=self._win_id)
download_manager.get(url, page=elem.webFrame().page(),
prompt_download_directory=prompt)
def _call_userscript(self, elem, context):
"""Call a userscript from a hint.
Args:
elem: The QWebElement to use in the userscript.
context: The HintContext to use.
"""
cmd = context.args[0]
args = context.args[1:]
frame = context.mainframe
env = {
'QUTE_MODE': 'hints',
'QUTE_SELECTED_TEXT': str(elem),
'QUTE_SELECTED_HTML': elem.toOuterXml(),
}
url = self._resolve_url(elem, context.baseurl)
if url is not None:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
env.update(userscripts.store_source(frame))
userscripts.run(cmd, *args, win_id=self._win_id, env=env)
def _spawn(self, url, context):
"""Spawn a simple command from a hint.
Args:
url: The URL to open as a QUrl.
context: The HintContext to use.
"""
urlstr = url.toString(QUrl.FullyEncoded | QUrl.RemovePassword)
args = context.get_args(urlstr)
cmd, *args = args
proc = guiprocess.GUIProcess(self._win_id, what='command', parent=self)
proc.start(cmd, args)
def _resolve_url(self, elem, baseurl):
"""Resolve a URL and check if we want to keep it.
Args:
elem: The QWebElement to get the URL of.
baseurl: The baseurl of the current tab.
Return:
A QUrl with the absolute URL, or None.
"""
for attr in ('href', 'src'):
if attr in elem:
text = elem[attr]
break
else:
return None
url = QUrl(text)
if not url.isValid():
return None
if url.isRelative():
url = baseurl.resolved(url)
qtutils.ensure_valid(url)
return url
def _find_prevnext(self, frame, prev=False):
"""Find a prev/next element in frame."""
# First check for <link rel="prev(ious)|next">
elems = frame.findAllElements(
webelem.SELECTORS[webelem.Group.links])
rel_values = ('prev', 'previous') if prev else ('next')
for e in elems:
e = webelem.WebElementWrapper(e)
try:
rel_attr = e['rel']
except KeyError:
continue
if rel_attr in rel_values:
log.hints.debug("Found '{}' with rel={}".format(
e.debug_text(), rel_attr))
return e
# Then check for regular links/buttons.
elems = frame.findAllElements(
webelem.SELECTORS[webelem.Group.prevnext])
elems = [webelem.WebElementWrapper(e) for e in elems]
filterfunc = webelem.FILTERS[webelem.Group.prevnext]
elems = [e for e in elems if filterfunc(e)]
option = 'prev-regexes' if prev else 'next-regexes'
if not elems:
return None
for regex in config.get('hints', option):
log.hints.vdebug("== Checking regex '{}'.".format(regex.pattern))
for e in elems:
text = str(e)
if not text:
continue
if regex.search(text):
log.hints.debug("Regex '{}' matched on '{}'.".format(
regex.pattern, text))
return e
else:
log.hints.vdebug("No match on '{}'!".format(text))
return None
def _connect_frame_signals(self):
"""Connect the contentsSizeChanged signals to all frames."""
for f in self._context.frames:
log.hints.debug("Connecting frame {}".format(f))
f.contentsSizeChanged.connect(self.on_contents_size_changed)
def _check_args(self, target, *args):
"""Check the arguments passed to start() and raise if they're wrong.
Args:
target: A Target enum member.
args: Arguments for userscript/download
"""
if not isinstance(target, Target):
raise TypeError("Target {} is no Target member!".format(target))
if target in (Target.userscript, Target.spawn, Target.run,
Target.fill):
if not args:
raise cmdexc.CommandError(
"'args' is required with target userscript/spawn/run/"
"fill.")
else:
if args:
raise cmdexc.CommandError(
"'args' is only allowed with target userscript/spawn.")
def _init_elements(self):
"""Initialize the elements and labels based on the context set."""
elems = []
for f in self._context.frames:
elems += f.findAllElements(webelem.SELECTORS[self._context.group])
elems = [e for e in elems
if webelem.is_visible(e, self._context.mainframe)]
# We wrap the elements late for performance reasons, as wrapping 1000s
# of elements (with ~50 methods each) just takes too much time...
elems = [webelem.WebElementWrapper(e) for e in elems]
filterfunc = webelem.FILTERS.get(self._context.group, lambda e: True)
elems = [e for e in elems if filterfunc(e)]
if not elems:
raise cmdexc.CommandError("No elements found.")
strings = self._hint_strings(elems)
for e, string in zip(elems, strings):
label = self._draw_label(e, string)
self._context.elems[string] = ElemTuple(e, label)
keyparsers = objreg.get('keyparsers', scope='window',
window=self._win_id)
keyparser = keyparsers[usertypes.KeyMode.hint]
keyparser.update_bindings(strings)
def follow_prevnext(self, frame, baseurl, prev=False, tab=False,
background=False, window=False):
"""Click a "previous"/"next" element on the page.
Args:
frame: The frame where the element is in.
baseurl: The base URL of the current tab.
prev: True to open a "previous" link, False to open a "next" link.
tab: True to open in a new tab, False for the current tab.
background: True to open in a background tab.
window: True to open in a new window, False for the current one.
"""
from qutebrowser.mainwindow import mainwindow
elem = self._find_prevnext(frame, prev)
if elem is None:
raise cmdexc.CommandError("No {} links found!".format(
"prev" if prev else "forward"))
url = self._resolve_url(elem, baseurl)
if url is None:
raise cmdexc.CommandError("No {} links found!".format(
"prev" if prev else "forward"))
qtutils.ensure_valid(url)
if window:
new_window = mainwindow.MainWindow()
new_window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=new_window.win_id)
tabbed_browser.tabopen(url, background=False)
elif tab:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
tabbed_browser.tabopen(url, background=background)
else:
webview = objreg.get('webview', scope='tab', window=self._win_id,
tab=self._tab_id)
webview.openurl(url)
@cmdutils.register(instance='hintmanager', scope='tab', name='hint',
win_id='win_id')
def start(self, rapid=False, group=webelem.Group.all, target=Target.normal,
*args: {'nargs': '*'}, win_id):
"""Start hinting.
Args:
rapid: Whether to do rapid hinting. This is only possible with
targets `tab` (with background-tabs=true), `tab-bg`,
`window`, `run`, `hover`, `userscript` and `spawn`.
group: The hinting mode to use.
- `all`: All clickable elements.
- `links`: Only links.
- `images`: Only images.
target: What to do with the selected element.
- `normal`: Open the link in the current tab.
- `tab`: Open the link in a new tab (honoring the
background-tabs setting).
- `tab-fg`: Open the link in a new foreground tab.
- `tab-bg`: Open the link in a new background tab.
- `window`: Open the link in a new window.
- `hover` : Hover over the link.
- `yank`: Yank the link to the clipboard.
- `yank-primary`: Yank the link to the primary selection.
- `run`: Run the argument as command.
- `fill`: Fill the commandline with the command given as
argument.
- `download`: Download the link.
- `userscript`: Call a userscript with `$QUTE_URL` set to the
link.
- `spawn`: Spawn a command.
*args: Arguments for spawn/userscript/run/fill.
- With `spawn`: The executable and arguments to spawn.
`{hint-url}` will get replaced by the selected
URL.
- With `userscript`: The userscript to execute. Either store
the userscript in
`~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`), or use an absolute
path.
- With `fill`: The command to fill the statusbar with.
`{hint-url}` will get replaced by the selected
URL.
- With `run`: Same as `fill`.
"""
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
widget = tabbed_browser.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
mainframe = widget.page().mainFrame()
if mainframe is None:
raise cmdexc.CommandError("No frame focused!")
mode_manager = objreg.get('mode-manager', scope='window',
window=self._win_id)
if mode_manager.mode == usertypes.KeyMode.hint:
modeman.leave(win_id, usertypes.KeyMode.hint, 're-hinting')
if rapid:
if target in [Target.tab_bg, Target.window, Target.run,
Target.hover, Target.userscript, Target.spawn,
Target.download]:
pass
elif (target == Target.tab and
config.get('tabs', 'background-tabs')):
pass
else:
name = target.name.replace('_', '-')
raise cmdexc.CommandError("Rapid hinting makes no sense with "
"target {}!".format(name))
self._check_args(target, *args)
self._context = HintContext()
self._context.target = target
self._context.rapid = rapid
try:
self._context.baseurl = tabbed_browser.current_url()
except qtutils.QtValueError:
raise cmdexc.CommandError("No URL set for this page yet!")
self._context.frames = webelem.get_child_frames(mainframe)
for frame in self._context.frames:
# WORKAROUND for
# https://github.com/The-Compiler/qutebrowser/issues/152
frame.destroyed.connect(functools.partial(
self._context.destroyed_frames.append, id(frame)))
self._context.args = args
self._context.mainframe = mainframe
self._context.group = group
self._init_elements()
message_bridge = objreg.get('message-bridge', scope='window',
window=self._win_id)
message_bridge.set_text(self._get_text())
self._connect_frame_signals()
modeman.enter(self._win_id, usertypes.KeyMode.hint,
'HintManager.start')
def handle_partial_key(self, keystr):
"""Handle a new partial keypress."""
log.hints.debug("Handling new keystring: '{}'".format(keystr))
for (string, elems) in self._context.elems.items():
try:
if string.startswith(keystr):
matched = string[:len(keystr)]
rest = string[len(keystr):]
match_color = config.get('colors', 'hints.fg.match')
elems.label.setInnerXml(
'<font color="{}">{}</font>{}'.format(
match_color, matched, rest))
if self._is_hidden(elems.label):
# hidden element which matches again -> show it
self._show_elem(elems.label)
else:
# element doesn't match anymore -> hide it
self._hide_elem(elems.label)
except webelem.IsNullError:
pass
def filter_hints(self, filterstr):
"""Filter displayed hints according to a text.
Args:
filterstr: The string to filter with, or None to show all.
"""
for elems in self._context.elems.values():
try:
if (filterstr is None or
filterstr.casefold() in str(elems.elem).casefold()):
if self._is_hidden(elems.label):
# hidden element which matches again -> show it
self._show_elem(elems.label)
else:
# element doesn't match anymore -> hide it
self._hide_elem(elems.label)
except webelem.IsNullError:
pass
visible = {}
for k, e in self._context.elems.items():
try:
if not self._is_hidden(e.label):
visible[k] = e
except webelem.IsNullError:
pass
if not visible:
# Whoops, filtered all hints
modeman.leave(self._win_id, usertypes.KeyMode.hint, 'all filtered')
elif len(visible) == 1 and config.get('hints', 'auto-follow'):
# unpacking gets us the first (and only) key in the dict.
self.fire(*visible)
def fire(self, keystr, force=False):
"""Fire a completed hint.
Args:
keystr: The keychain string to follow.
force: When True, follow even when auto-follow is false.
"""
if not (force or config.get('hints', 'auto-follow')):
self.handle_partial_key(keystr)
self._context.to_follow = keystr
return
# Handlers which take a QWebElement
elem_handlers = {
Target.normal: self._click,
Target.tab: self._click,
Target.tab_fg: self._click,
Target.tab_bg: self._click,
Target.window: self._click,
Target.hover: self._click,
# _download needs a QWebElement to get the frame.
Target.download: self._download,
Target.userscript: self._call_userscript,
}
# Handlers which take a QUrl
url_handlers = {
Target.yank: self._yank,
Target.yank_primary: self._yank,
Target.run: self._run_cmd,
Target.fill: self._preset_cmd_text,
Target.spawn: self._spawn,
}
elem = self._context.elems[keystr].elem
if elem.webFrame() is None:
message.error(self._win_id, "This element has no webframe.",
immediately=True)
return
if self._context.target in elem_handlers:
handler = functools.partial(
elem_handlers[self._context.target], elem, self._context)
elif self._context.target in url_handlers:
url = self._resolve_url(elem, self._context.baseurl)
if url is None:
self._show_url_error()
return
handler = functools.partial(
url_handlers[self._context.target], url, self._context)
else:
raise ValueError("No suitable handler found!")
if not self._context.rapid:
modeman.maybe_leave(self._win_id, usertypes.KeyMode.hint,
'followed')
else:
# Show all hints again
self.filter_hints(None)
# Undo keystring highlighting
for (string, elems) in self._context.elems.items():
elems.label.setInnerXml(string)
handler()
@cmdutils.register(instance='hintmanager', scope='tab', hide=True,
modes=[usertypes.KeyMode.hint])
def follow_hint(self, keystring=None):
"""Follow a hint.
Args:
keystring: The hint to follow, or None.
"""
if keystring is None:
if self._context.to_follow is None:
raise cmdexc.CommandError("No hint to follow")
else:
keystring = self._context.to_follow
elif keystring not in self._context.elems:
raise cmdexc.CommandError("No hint {}!".format(keystring))
self.fire(keystring, force=True)
@pyqtSlot('QSize')
def on_contents_size_changed(self, _size):
"""Reposition hints if contents size changed."""
log.hints.debug("Contents size changed...!")
for elems in self._context.elems.values():
try:
if elems.elem.webFrame() is None:
# This sometimes happens for some reason...
elems.label.removeFromDocument()
continue
self._set_style_position(elems.elem, elems.label)
except webelem.IsNullError:
pass
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode):
"""Stop hinting when hinting mode was left."""
if mode != usertypes.KeyMode.hint or self._context is None:
# We have one HintManager per tab, so when this gets called,
# self._context might be None, because the current tab is not
# hinting.
return
self._cleanup()
| 1 | 14,065 | nitpick: Please only `import string` here and use `string.ascii_lowercase` - I really like to see what module stuff is coming from without looking at the imports. | qutebrowser-qutebrowser | py |
@@ -2822,10 +2822,10 @@ exports.shouldCorrectlyLogoutFromTheDatabaseWithPromises = {
db.close();
test.done();
- }).catch(function(err) { })
- }).catch(function(err) { })
- }).catch(function(err) { })
- }).catch(function(err) { })
+ }).catch(function(err) { console.dir(err )})
+ }).catch(function(err) { console.dir(err )})
+ }).catch(function(err) { console.dir(err )})
+ }).catch(function(err) { console.dir(err )})
});
// END
} | 1 | "use strict";
var f = require('util').format;
/**************************************************************************
*
* COLLECTION TESTS
*
*************************************************************************/
/**
* Call toArray on an aggregation cursor using a Promise
*
* @example-class Collection
* @example-method aggregate
* @ignore
*/
exports.aggregationExample2WithPromises = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, mongodb:">2.1.0", topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Some docs for insertion
var docs = [{
title : "this is my title", author : "bob", posted : new Date() ,
pageViews : 5, tags : [ "fun" , "good" , "fun" ], other : { foo : 5 },
comments : [
{ author :"joe", text : "this is cool" }, { author :"sam", text : "this is bad" }
]}];
// Create a collection
var collection = db.collection('aggregationExample2_with_promise');
// Insert the docs
collection.insertMany(docs, {w: 1}).then(function(result) {
// Execute aggregate, notice the pipeline is expressed as an Array
var cursor = collection.aggregate([
{ $project : {
author : 1,
tags : 1
}},
{ $unwind : "$tags" },
{ $group : {
_id : {tags : "$tags"},
authors : { $addToSet : "$author" }
}}
], { cursor: { batchSize: 1 } });
// Get all the aggregation results
cursor.toArray().then(function(docs) {
test.equal(2, docs.length);
test.done();
db.close();
}).catch(function(err) {
console.log(err.stack);
});
});
});
// END
}
}
/**
* Call next on an aggregation cursor using a Promise
*
* @example-class AggregationCursor
* @example-method next
* @ignore
*/
exports['Aggregation Cursor next Test With Promises'] = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, mongodb:">2.1.0", topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Some docs for insertion
var docs = [{
title : "this is my title", author : "bob", posted : new Date() ,
pageViews : 5, tags : [ "fun" , "good" , "fun" ], other : { foo : 5 },
comments : [
{ author :"joe", text : "this is cool" }, { author :"sam", text : "this is bad" }
]}];
// Create a collection
var collection = db.collection('aggregation_next_example_with_promise');
// Insert the docs
collection.insertMany(docs, {w: 1}).then(function(result) {
// Execute aggregate, notice the pipeline is expressed as an Array
var cursor = collection.aggregate([
{ $project : {
author : 1,
tags : 1
}},
{ $unwind : "$tags" },
{ $group : {
_id : {tags : "$tags"},
authors : { $addToSet : "$author" }
}}
], { cursor: { batchSize: 1 } });
// Get all the aggregation results
cursor.next().then(function(docs) {
test.done();
db.close();
}).catch(function(err) {
console.dir(err)
});
});
});
// END
}
}
/**
* Example of running simple count commands against a collection using a Promise.
*
* @example-class Collection
* @example-method count
* @ignore
*/
exports.shouldCorrectlyDoSimpleCountExamplesWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Crete the collection for the distinct example
var collection = db.collection('countExample1_with_promise');
// Insert documents to perform distinct against
collection.insertMany([{a:1}, {a:2}
, {a:3}, {a:4, b:1}], {w: 1}).then(function(ids) {
// Perform a total count command
collection.count().then(function(count) {
test.equal(4, count);
// Perform a partial account where b=1
collection.count({b:1}).then(function(count) {
test.equal(1, count);
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* A more complex createIndex using a Promise and a compound unique index in the background and dropping duplicated documents
*
* @example-class Collection
* @example-method createIndex
* @ignore
*/
exports.shouldCreateComplexIndexOnTwoFieldsWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('createIndexExample1_with_promise');
// Insert a bunch of documents for the index
collection.insertMany([{a:1, b:1}
, {a:2, b:2}, {a:3, b:3}, {a:4, b:4}], configuration.writeConcernMax()).then(function(result) {
// Create an index on the a field
collection.createIndex({a:1, b:1}
, {unique:true, background:true, w:1}).then(function(indexName) {
// Show that duplicate records got dropped
collection.find({}).toArray().then(function(items) {
test.equal(4, items.length);
// Perform a query, with explain to show we hit the query
collection.find({a:2}).explain().then(function(explanation) {
test.ok(explanation != null);
db.close();
test.done();
});
})
});
});
});
// END
}
}
/**
* Example of running the distinct command using a Promise against a collection
*
* @example-class Collection
* @example-method distinct
* @ignore
*/
exports.shouldCorrectlyHandleDistinctIndexesWithSubQueryFilterWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Crete the collection for the distinct example
var collection = db.collection('distinctExample1_with_promise');
// Insert documents to perform distinct against
collection.insertMany([{a:0, b:{c:'a'}}, {a:1, b:{c:'b'}}, {a:1, b:{c:'c'}},
{a:2, b:{c:'a'}}, {a:3}, {a:3}], configuration.writeConcernMax()).then(function(ids) {
// Perform a distinct query against the a field
collection.distinct('a').then(function(docs) {
test.deepEqual([0, 1, 2, 3], docs.sort());
// Perform a distinct query against the sub-field b.c
collection.distinct('b.c').then(function(docs) {
test.deepEqual(['a', 'b', 'c'], docs.sort());
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* Example of running the distinct command against a collection using a Promise with a filter query
*
* @ignore
*/
exports.shouldCorrectlyHandleDistinctIndexesWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Crete the collection for the distinct example
var collection = db.collection('distinctExample2_with_promise');
// Insert documents to perform distinct against
collection.insertMany([{a:0, b:{c:'a'}}, {a:1, b:{c:'b'}}, {a:1, b:{c:'c'}},
{a:2, b:{c:'a'}}, {a:3}, {a:3}, {a:5, c:1}], configuration.writeConcernMax(), function(err, ids) {
// Perform a distinct query with a filter against the documents
collection.distinct('a', {c:1}).then(function(docs) {
test.deepEqual([5], docs.sort());
db.close();
test.done();
});
})
});
// END
}
}
/**
* Example of Collection.prototype.drop using a Promise
*
* @example-class Collection
* @example-method drop
* @ignore
*/
exports.shouldCorrectlyDropCollectionWithDropFunctionWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
db.createCollection('test_other_drop_with_promise').then(function(collection) {
// Drop the collection
collection.drop().then(function(reply) {
// Ensure we don't have the collection in the set of names
db.listCollections().toArray().then(function(replies) {
var found = false;
// For each collection in the list of collection names in this db look for the
// dropped collection
replies.forEach(function(document) {
if(document.name == "test_other_drop_with_promise") {
found = true;
return;
}
});
// Ensure the collection is not found
test.equal(false, found);
// Let's close the db
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* Example of a how to drop all the indexes on a collection using dropAllIndexes with a Promise
*
* @example-class Collection
* @example-method dropAllIndexes
* @ignore
*/
exports.dropAllIndexesExample1WithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
db.createCollection('dropExample1_with_promise').then(function(r) {
// Drop the collection
db.collection('dropExample1_with_promise').dropAllIndexes().then(function(reply) {
// Let's close the db
db.close();
test.done();
});
});
});
// END
}
}
/**
* An examples showing the creation and dropping of an index using a Promise
*
* @example-class Collection
* @example-method dropIndex
* @ignore
*/
exports.shouldCorrectlyCreateAndDropIndexWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1, auto_reconnect:true});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
var collection = db.collection('dropIndexExample1_with_promise');
// Insert a bunch of documents for the index
collection.insertMany([{a:1, b:1}
, {a:2, b:2}, {a:3, b:3}, {a:4, b:4}], {w:1}).then(function(result) {
// Create an index on the a field
collection.ensureIndex({a:1, b:1}
, {unique:true, background:true, w:1}).then(function(indexName) {
// Drop the index
collection.dropIndex("a_1_b_1").then(function(result) {
// Verify that the index is gone
collection.indexInformation().then(function(indexInformation) {
test.deepEqual([ [ '_id', 1 ] ], indexInformation._id_);
test.equal(null, indexInformation.a_1_b_1);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* A more complex ensureIndex using a compound unique index in the background and dropping duplicated documents using a Promise.
*
* @example-class Collection
* @example-method ensureIndex
* @ignore
*/
exports.shouldCreateComplexEnsureIndexWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
var collection = db.collection('ensureIndexExample1_with_promise');
// Insert a bunch of documents for the index
collection.insertMany([{a:1, b:1}
, {a:2, b:2}, {a:3, b:3}, {a:4, b:4}], configuration.writeConcernMax()).then(function(result) {
// Create an index on the a field
db.ensureIndex('ensureIndexExample1_with_promise', {a:1, b:1}
, {unique:true, background:true, w:1}).then(function(indexName) {
// Show that duplicate records got dropped
collection.find({}).toArray().then(function(items) {
test.equal(4, items.length);
// Perform a query, with explain to show we hit the query
collection.find({a:2}).explain().then(function(explanation) {
test.ok(explanation != null);
db.close();
test.done();
});
})
});
});
});
// END
}
}
/**
* A more complex ensureIndex using a compound unique index in the background using a Promise.
*
* @example-class Collection
* @example-method ensureIndex
* @ignore
*/
exports.ensureIndexExampleWithCompountIndexWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1, auto_reconnect:true});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
var collection = db.collection('ensureIndexExample2_with_promise');
// Insert a bunch of documents for the index
collection.insertMany([{a:1, b:1}
, {a:2, b:2}, {a:3, b:3}, {a:4, b:4}], {w:1}).then(function(result) {
// Create an index on the a field
collection.ensureIndex({a:1, b:1}
, {unique:true, background:true, w:1}).then(function(indexName) {
// Show that duplicate records got dropped
collection.find({}).toArray().then(function(items) {
test.equal(4, items.length);
// Perform a query, with explain to show we hit the query
collection.find({a:2}).explain().then(function(explanation) {
test.ok(explanation != null);
db.close();
test.done();
});
})
});
});
});
// END
}
}
/**
* A simple query using the find method and toArray method with a Promise.
*
* @example-class Collection
* @example-method find
* @ignore
*/
exports.shouldPerformASimpleQueryWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('simple_query_with_promise');
// Insert a bunch of documents for the testing
collection.insertMany([{a:1}, {a:2}, {a:3}], configuration.writeConcernMax()).then(function(result) {
// Perform a simple find and return all the documents
collection.find().toArray().then(function(docs) {
test.equal(3, docs.length);
db.close();
test.done();
});
});
});
// END
}
}
/**
* A simple query showing the explain for a query using a Promise.
*
* @example-class Collection
* @example-method find
* @ignore
*/
exports.shouldPerformASimpleExplainQueryWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('simple_explain_query_with_promise');
// Insert a bunch of documents for the testing
collection.insertMany([{a:1}, {a:2}, {a:3}], configuration.writeConcernMax()).then(function(result) {
// Perform a simple find and return all the documents
collection.find({}).explain().then(function(docs) {
test.ok(docs != null);
db.close();
test.done();
});
});
});
// END
}
}
/**
* A simple query showing skip and limit using a Promise.
*
* @example-class Collection
* @example-method find
* @ignore
*/
exports.shouldPerformASimpleLimitSkipQueryWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('simple_limit_skip_query_with_promise');
// Insert a bunch of documents for the testing
collection.insertMany([{a:1, b:1}, {a:2, b:2}, {a:3, b:3}], configuration.writeConcernMax()).then(function(result) {
// Perform a simple find and return all the documents
collection.find({})
.skip(1).limit(1).project({b:1}).toArray().then(function(docs) {
test.equal(1, docs.length);
test.equal(null, docs[0].a);
test.equal(2, docs[0].b);
db.close();
test.done();
});
});
});
// END
}
}
/**
* A whole set of different ways to use the findAndModify command with a Promise..
*
* The first findAndModify command modifies a document and returns the modified document back.
* The second findAndModify command removes the document.
* The second findAndModify command upserts a document and returns the new document.
*
* @example-class Collection
* @example-method findAndModify
* @ignore
*/
exports.shouldPerformSimpleFindAndModifyOperationsWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('simple_find_and_modify_operations_with_promise');
// Insert some test documentations
collection.insertMany([{a:1}, {b:1}, {c:1}], configuration.writeConcernMax()).then(function(result) {
// Simple findAndModify command returning the new document
collection.findAndModify({a:1}, [['a', 1]], {$set:{b1:1}}, {new:true}).then(function(doc) {
test.equal(1, doc.value.a);
test.equal(1, doc.value.b1);
// Simple findAndModify command returning the new document and
// removing it at the same time
collection.findAndModify({b:1}, [['b', 1]],
{$set:{b:2}}, {remove:true}).then(function(doc) {
// Verify that the document is gone
collection.findOne({b:1}).then(function(item) {
test.equal(null, item);
// Simple findAndModify command performing an upsert and returning the new document
// executing the command safely
collection.findAndModify({d:1}, [['b', 1]],
{d:1, f:1}, {new:true, upsert:true, w:1}).then(function(doc) {
test.equal(1, doc.value.d);
test.equal(1, doc.value.f);
db.close();
test.done();
})
});
});
});
});
});
// END
}
}
/**
* An example of using findAndRemove using a Promise.
*
* @example-class Collection
* @example-method findAndRemove
* @ignore
*/
exports.shouldPerformSimpleFindAndRemoveWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('simple_find_and_modify_operations_2_with_promise');
// Insert some test documentations
collection.insertMany([{a:1}, {b:1, d:1}, {c:1}], configuration.writeConcernMax()).then(function(result) {
// Simple findAndModify command returning the old document and
// removing it at the same time
collection.findAndRemove({b:1}, [['b', 1]]).then(function(doc) {
test.equal(1, doc.value.b);
test.equal(1, doc.value.d);
// Verify that the document is gone
collection.findOne({b:1}).then(function(item) {
test.equal(null, item);
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* A simple query using findOne with a Promise.
*
* @example-class Collection
* @example-method findOne
* @ignore
*/
exports.shouldPerformASimpleLimitSkipFindOneQueryWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('simple_limit_skip_find_one_query_with_promise');
// Insert a bunch of documents for the testing
collection.insertMany([{a:1, b:1}, {a:2, b:2}, {a:3, b:3}], configuration.writeConcernMax()).then(function(result) {
// Perform a simple find and return all the documents
collection.findOne({a:2}, {fields:{b:1}}).then(function(doc) {
test.equal(null, doc.a);
test.equal(2, doc.b);
db.close();
test.done();
});
});
});
// END
}
}
/**
* Example of a simple geoNear query across some documents using a Promise.
*
* @example-class Collection
* @example-method geoNear
* @ignore
*/
exports.shouldCorrectlyPerformSimpleGeoNearCommandWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Fetch the collection
var collection = db.collection("simple_geo_near_command_with_promise");
// Add a location based index
collection.ensureIndex({loc:"2d"}).then(function(result) {
// Save a new location tagged document
collection.insertMany([{a:1, loc:[50, 30]}, {a:1, loc:[30, 50]}], configuration.writeConcernMax()).then(function(result) {
// Use geoNear command to find document
collection.geoNear(50, 50, {query:{a:1}, num:1}).then(function(docs) {
test.equal(1, docs.results.length);
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* Example of a simple geoHaystackSearch query across some documents using a Promise.
*
* @example-class Collection
* @example-method geoHaystackSearch
* @ignore
*/
exports.shouldCorrectlyPerformSimpleGeoHaystackSearchCommandWithPromises = {
metadata: { requires: { promises:true, topology: ["single", "replicaset"] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Fetch the collection
var collection = db.collection("simple_geo_haystack_command_with_promise");
// Add a location based index
collection.ensureIndex({loc: "geoHaystack", type: 1}, {bucketSize: 1}).then(function(result) {
// Save a new location tagged document
collection.insertMany([{a:1, loc:[50, 30]}, {a:1, loc:[30, 50]}], configuration.writeConcernMax()).then(function(result) {
// Use geoNear command to find document
collection.geoHaystackSearch(50, 50, {search:{a:1}, limit:1, maxDistance:100}).then(function(docs) {
test.equal(1, docs.results.length);
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* A whole lot of different ways to execute the group command using a Promise.
*
* @example-class Collection
* @example-method group
* @ignore
*/
exports.shouldCorrectlyExecuteGroupFunctionWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var Code = configuration.require.Code;
var db = configuration.newDbInstance({w:0}, {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE Code = require('mongodb').Code,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a test collection
var collection = db.collection('test_group_with_promise');
// Perform a simple group by on an empty collection
collection.group([], {}, {"count":0}, "function (obj, prev) { prev.count++; }").then(function(results) {
test.deepEqual([], results);
// Trigger some inserts on the collection
collection.insertMany([{'a':2}, {'b':5}, {'a':1}], {w:1}).then(function(ids) {
// Perform a group count
collection.group([], {}, {"count":0}, "function (obj, prev) { prev.count++; }").then(function(results) {
test.equal(3, results[0].count);
// Perform a group count using the eval method
collection.group([], {}, {"count":0}, "function (obj, prev) { prev.count++; }", false).then(function(results) {
test.equal(3, results[0].count);
// Group with a conditional
collection.group([], {'a':{'$gt':1}}, {"count":0}, "function (obj, prev) { prev.count++; }").then(function(results) {
// Results
test.equal(1, results[0].count);
// Group with a conditional using the EVAL method
collection.group([], {'a':{'$gt':1}}, {"count":0}, "function (obj, prev) { prev.count++; }" , false).then(function(results) {
// Results
test.equal(1, results[0].count);
// Insert some more test data
collection.insertMany([{'a':2}, {'b':3}], {w:1}).then(function(ids) {
// Do a Group by field a
collection.group(['a'], {}, {"count":0}, "function (obj, prev) { prev.count++; }").then(function(results) {
// Results
test.equal(2, results[0].a);
test.equal(2, results[0].count);
test.equal(null, results[1].a);
test.equal(2, results[1].count);
test.equal(1, results[2].a);
test.equal(1, results[2].count);
// Do a Group by field a
collection.group({'a':true}, {}, {"count":0}, function (obj, prev) { prev.count++; }, true).then(function(results) {
// Results
test.equal(2, results[0].a);
test.equal(2, results[0].count);
test.equal(null, results[1].a);
test.equal(2, results[1].count);
test.equal(1, results[2].a);
test.equal(1, results[2].count);
// Correctly handle illegal function
collection.group([], {}, {}, "5 ++ 5").then(function(err, results) {
}).catch(function(err) {
test.ok(err.message != null);
// Use a function to select the keys used to group by
var keyf = function(doc) { return {a: doc.a}; };
collection.group(keyf, {a: {$gt: 0}}, {"count": 0, "value": 0}, function(obj, prev) { prev.count++; prev.value += obj.a; }, true).then(function(results) {
// Results
results.sort(function(a, b) { return b.count - a.count; });
test.equal(2, results[0].count);
test.equal(2, results[0].a);
test.equal(4, results[0].value);
test.equal(1, results[1].count);
test.equal(1, results[1].a);
test.equal(1, results[1].value);
// Use a Code object to select the keys used to group by
var keyf = new Code(function(doc) { return {a: doc.a}; });
collection.group(keyf, {a: {$gt: 0}}, {"count": 0, "value": 0}, function(obj, prev) { prev.count++; prev.value += obj.a; }, true).then(function(results) {
// Results
results.sort(function(a, b) { return b.count - a.count; });
test.equal(2, results[0].count);
test.equal(2, results[0].a);
test.equal(4, results[0].value);
test.equal(1, results[1].count);
test.equal(1, results[1].a);
test.equal(1, results[1].value);
// Correctly handle illegal function when using the EVAL method
collection.group([], {}, {}, "5 ++ 5", false).then(function(results) {
}).catch(function(err) {
test.ok(err.message != null);
db.close();
test.done();
});
});
});
});
});
});
});
});
});
});
});
});
});
});
// END
}
}
/**
* A simple map reduce example using a Promise.
*
* @example-class Collection
* @example-method mapReduce
* @ignore
*/
exports.shouldPerformSimpleMapReduceFunctionsWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a test collection
var collection = db.collection('test_map_reduce_functions_with_promise');
// Insert some documents to perform map reduce over
collection.insertMany([{'user_id':1}, {'user_id':2}], {w:1}).then(function(r) {
// Map function
var map = function() { emit(this.user_id, 1); };
// Reduce function
var reduce = function(k,vals) { return 1; };
// Perform the map reduce
collection.mapReduce(map, reduce, {out: {replace : 'tempCollection'}}).then(function(collection) {
// Mapreduce returns the temporary collection with the results
collection.findOne({'_id':1}).then(function(result) {
test.equal(1, result.value);
collection.findOne({'_id':2}).then(function(result) {
test.equal(1, result.value);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* A simple map reduce example using the inline output type on MongoDB > 1.7.6 returning the statistics using a Promise.
*
* @example-class Collection
* @example-method mapReduce
* @ignore
*/
exports.shouldPerformMapReduceFunctionInlineWithPromises = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, mongodb: '>1.7.6', topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a test collection
var collection = db.collection('test_map_reduce_functions_inline_with_promise');
// Insert some test documents
collection.insertMany([{'user_id':1}, {'user_id':2}], {w:1}).then(function(r) {
// Map function
var map = function() { emit(this.user_id, 1); };
// Reduce function
var reduce = function(k,vals) { return 1; };
// Execute map reduce and return results inline
collection.mapReduce(map, reduce, {out : {inline: 1}, verbose:true}).then(function(result) {
test.equal(2, result.results.length);
test.ok(result.stats != null);
collection.mapReduce(map, reduce, {out : {replace: 'mapreduce_integration_test'}, verbose:true}).then(function(result) {
test.ok(result.stats != null);
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* Mapreduce using a provided scope containing a javascript function executed using a Promise.
*
* @example-class Collection
* @example-method mapReduce
* @ignore
*/
exports.shouldPerformMapReduceWithContextWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var Code = configuration.require.Code;
var db = configuration.newDbInstance({w:0}, {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE Code = require('mongodb').Code,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a test collection
var collection = db.collection('test_map_reduce_functions_scope_with_promise');
// Insert some test documents
collection.insertMany([{'user_id':1, 'timestamp':new Date()}
, {'user_id':2, 'timestamp':new Date()}], {w:1}).then(function(r) {
// Map function
var map = function(){
emit(fn(this.timestamp.getYear()), 1);
}
// Reduce function
var reduce = function(k, v){
count = 0;
for(i = 0; i < v.length; i++) {
count += v[i];
}
return count;
}
// Javascript function available in the map reduce scope
var t = function(val){ return val+1; }
// Execute the map reduce with the custom scope
var o = {};
o.scope = { fn: new Code(t.toString()) }
o.out = { replace: 'replacethiscollection' }
collection.mapReduce(map, reduce, o).then(function(outCollection) {
// Find all entries in the map-reduce collection
outCollection.find().toArray().then(function(results) {
test.equal(2, results[0].value)
// mapReduce with scope containing plain function
var o = {};
o.scope = { fn: t }
o.out = { replace: 'replacethiscollection' }
collection.mapReduce(map, reduce, o).then(function(outCollection) {
// Find all entries in the map-reduce collection
outCollection.find().toArray().then(function(results) {
test.equal(2, results[0].value)
db.close();
test.done();
});
});
});
});
});
});
// END
}
}
/**
* Mapreduce using a scope containing javascript objects with functions using a Promise.
*
* @example-class Collection
* @example-method mapReduce
* @ignore
*/
exports.shouldPerformMapReduceInContextObjectsWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var Code = configuration.require.Code;
var db = configuration.newDbInstance({w:0}, {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE Code = require('mongodb').Code,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a test collection
var collection = db.collection('test_map_reduce_functions_scope_objects_with_promise');
// Insert some test documents
collection.insertMany([{'user_id':1, 'timestamp':new Date()}
, {'user_id':2, 'timestamp':new Date()}], {w:1}).then(function(r) {
// Map function
var map = function(){
emit(obj.fn(this.timestamp.getYear()), 1);
}
// Reduce function
var reduce = function(k, v){
count = 0;
for(i = 0; i < v.length; i++) {
count += v[i];
}
return count;
}
// Javascript function available in the map reduce scope
var t = function(val){ return val+1; }
// Execute the map reduce with the custom scope containing objects
var o = {};
o.scope = { obj: {fn: new Code(t.toString())} }
o.out = { replace: 'replacethiscollection' }
collection.mapReduce(map, reduce, o).then(function(outCollection) {
// Find all entries in the map-reduce collection
outCollection.find().toArray().then(function(results) {
test.equal(2, results[0].value)
// mapReduce with scope containing plain function
var o = {};
o.scope = { obj: {fn: t} }
o.out = { replace: 'replacethiscollection' }
collection.mapReduce(map, reduce, o).then(function(outCollection) {
// Find all entries in the map-reduce collection
outCollection.find().toArray().then(function(results) {
test.equal(2, results[0].value)
db.close();
test.done();
});
});
});
});
});
});
// END
}
}
/**
* Example of retrieving a collections indexes using a Promise.
*
* @example-class Collection
* @example-method indexes
* @ignore
*/
exports.shouldCorrectlyRetriveACollectionsIndexesWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Crete the collection for the distinct example
var collection = db.collection('simple_key_based_distinct_with_promise');
// Create a geo 2d index
collection.ensureIndex({loc:"2d"}, configuration.writeConcernMax()).then(function(result) {
// Create a simple single field index
collection.ensureIndex({a:1}, configuration.writeConcernMax()).then(function(result) {
setTimeout(function() {
// List all of the indexes on the collection
collection.indexes().then(function(indexes) {
test.equal(3, indexes.length);
db.close();
test.done();
});
}, 1000);
});
});
});
// END
}
}
/**
* An example showing the use of the indexExists function using a Promise for a single index name and a list of index names.
*
* @example-class Collection
* @example-method indexExists
* @ignore
*/
exports.shouldCorrectlyExecuteIndexExistsWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a test collection that we are getting the options back from
var collection = db.collection('test_collection_index_exists_with_promise', configuration.writeConcernMax());
// Create an index on the collection
collection.createIndex('a', configuration.writeConcernMax()).then(function(indexName) {
// Let's test to check if a single index exists
collection.indexExists("a_1").then(function(result) {
test.equal(true, result);
// Let's test to check if multiple indexes are available
collection.indexExists(["a_1", "_id_"]).then(function(result) {
test.equal(true, result);
// Check if a non existing index exists
collection.indexExists("c_1").then(function(result) {
test.equal(false, result);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* An example showing the information returned by indexInformation using a Promise.
*
* @example-class Collection
* @example-method indexInformation
* @ignore
*/
exports.shouldCorrectlyShowTheResultsFromIndexInformationWithPromises = {
metadata: {
requires: { promises:true, topology: ["single", "replicaset"] }
},
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0, native_parser:false}, {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('more_index_information_test_2_with_promise');
// Insert a bunch of documents for the index
collection.insertMany([{a:1, b:1}
, {a:2, b:2}, {a:3, b:3}, {a:4, b:4}], configuration.writeConcernMax()).then(function(result) {
// Create an index on the a field
collection.ensureIndex({a:1, b:1}
, {unique:true, background:true, w:1}).then(function(indexName) {
// Fetch basic indexInformation for collection
db.indexInformation('more_index_information_test_2_with_promise').then(function(indexInformation) {
test.deepEqual([ [ '_id', 1 ] ], indexInformation._id_);
test.deepEqual([ [ 'a', 1 ], [ 'b', 1 ] ], indexInformation.a_1_b_1);
// Fetch full index information
collection.indexInformation({full:true}).then(function(indexInformation) {
test.deepEqual({ _id: 1 }, indexInformation[0].key);
test.deepEqual({ a: 1, b: 1 }, indexInformation[1].key);
db.close();
test.done();
});
}).catch(function(err) {
console.dir(err)
});
});
}).catch(function(err) {
console.dir(err)
});
});
// END
}
}
/**
* An examples showing the information returned by indexInformation using a Promise.
*
* @example-class Collection
* @example-method indexInformation
* @ignore
*/
exports.shouldCorrectlyShowAllTheResultsFromIndexInformationWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1, auto_reconnect:true});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('more_index_information_test_3_with_promise');
// Insert a bunch of documents for the index
collection.insertMany([{a:1, b:1}
, {a:2, b:2}, {a:3, b:3}, {a:4, b:4}], {w:1}).then(function(result) {
// Create an index on the a field
collection.ensureIndex({a:1, b:1}
, {unique:true, background:true, w:1}).then(function(indexName) {
// Fetch basic indexInformation for collection
collection.indexInformation().then(function(indexInformation) {
test.deepEqual([ [ '_id', 1 ] ], indexInformation._id_);
test.deepEqual([ [ 'a', 1 ], [ 'b', 1 ] ], indexInformation.a_1_b_1);
// Fetch full index information
collection.indexInformation({full:true}).then(function(indexInformation) {
test.deepEqual({ _id: 1 }, indexInformation[0].key);
test.deepEqual({ a: 1, b: 1 }, indexInformation[1].key);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* A simple document insert using a Promise example, not using safe mode to ensure document persistance on MongoDB
*
* @example-class Collection
* @example-method insert
* @ignore
*/
exports.shouldCorrectlyPerformASimpleSingleDocumentInsertNoCallbackNoSafeWithPromises = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
var collection = db.collection("simple_document_insert_collection_no_safe_with_promise");
// Insert a single document
collection.insertOne({hello:'world_no_safe'});
// Wait for a second before finishing up, to ensure we have written the item to disk
setTimeout(function() {
// Fetch the document
collection.findOne({hello:'world_no_safe'}).then(function(item) {
test.equal('world_no_safe', item.hello);
db.close();
test.done();
})
}, 100);
});
// END
}
}
/**
* A batch document insert using a Promise example, using safe mode to ensure document persistance on MongoDB
*
* @example-class Collection
* @example-method insert
* @ignore
*/
exports.shouldCorrectlyPerformABatchDocumentInsertSafeWithPromises = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Fetch a collection to insert document into
var collection = db.collection("batch_document_insert_collection_safe_with_promise");
// Insert a single document
collection.insertMany([{hello:'world_safe1'}
, {hello:'world_safe2'}], configuration.writeConcernMax()).then(function(result) {
// Fetch the document
collection.findOne({hello:'world_safe2'}).then(function(item) {
test.equal('world_safe2', item.hello);
db.close();
test.done();
})
});
});
// END
}
}
/**
* Example of inserting a document containing functions using a Promise.
*
* @example-class Collection
* @example-method insert
* @ignore
*/
exports.shouldCorrectlyPerformASimpleDocumentInsertWithFunctionSafeWithPromises = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Fetch a collection to insert document into
var collection = db.collection("simple_document_insert_with_function_safe_with_promise");
var o = configuration.writeConcernMax();
o.serializeFunctions = true;
// Insert a single document
collection.insertOne({hello:'world'
, func:function() {}}, o).then(function(result) {
// Fetch the document
collection.findOne({hello:'world'}).then(function(item) {
test.ok("function() {}", item.code);
db.close();
test.done();
})
});
});
// END
}
}
/**
* Example of using keepGoing to allow batch insert using a Promise to complete even when there are illegal documents in the batch
*
* @example-class Collection
* @example-method insert
* @ignore
*/
exports["Should correctly execute insert with keepGoing option on mongod >= 1.9.1 With Promises"] = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, mongodb:">1.9.1", topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection
var collection = db.collection('keepGoingExample_with_promise');
collection.drop(function() {
// Add an unique index to title to force errors in the batch insert
collection.ensureIndex({title:1}, {unique:true}).then(function(indexName) {
// Insert some intial data into the collection
collection.insertMany([{name:"Jim"}
, {name:"Sarah", title:"Princess"}], configuration.writeConcernMax()).then(function(result) {
// Force keep going flag, ignoring unique index issue
collection.insert([{name:"Jim"}
, {name:"Sarah", title:"Princess"}
, {name:'Gump', title:"Gump"}], {w:1, keepGoing:true}).then(function(result) {
}).catch(function(err) {
// Count the number of documents left (should not include the duplicates)
collection.count().then(function(count) {
test.equal(3, count);
test.done();
})
});
});
});
});
});
// END
}
}
/**
* An example showing how to establish if it's a capped collection using a Promise.
*
* @example-class Collection
* @example-method isCapped
* @ignore
*/
exports.shouldCorrectlyExecuteIsCappedWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a test collection that we are getting the options back from
db.createCollection('test_collection_is_capped_with_promise', {'capped':true, 'size':1024}).then(function(collection) {
test.equal('test_collection_is_capped_with_promise', collection.collectionName);
// Let's fetch the collection options
collection.isCapped().then(function(capped) {
test.equal(true, capped);
db.close();
test.done();
});
});
});
// END
}
}
/**
* An example returning the options for a collection using a Promise.
*
* @example-class Collection
* @example-method options
* @ignore
*/
exports.shouldCorrectlyRetriveCollectionOptionsWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var Collection = configuration.require.Collection;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a test collection that we are getting the options back from
db.createCollection('test_collection_options_with_promise', {'capped':true, 'size':1024}).then(function(collection) {
test.equal('test_collection_options_with_promise', collection.collectionName);
// Let's fetch the collection options
collection.options().then(function(options) {
test.equal(true, options.capped);
test.ok(options.size >= 1024);
db.close();
test.done();
});
});
});
// END
}
}
/**
* A parallelCollectionScan example using a Promise.
*
* @example-class Collection
* @example-method parallelCollectionScan
* @ignore
*/
exports['Should correctly execute parallelCollectionScan with multiple cursors With Promises'] = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, mongodb: ">2.5.5", topology: ["single", "replicaset"] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
var docs = [];
// Insert some documents
for(var i = 0; i < 1000; i++) {
docs.push({a:i});
}
// Get the collection
var collection = db.collection('parallelCollectionScan_with_promise');
// Insert 1000 documents in a batch
collection.insertMany(docs).then(function(result) {
var results = [];
var numCursors = 3;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({numCursors:numCursors}).then(function(cursors) {
test.ok(cursors != null);
test.ok(cursors.length > 0);
var left = cursors.length;
for(var i = 0; i < cursors.length; i++) {
cursors[i].toArray().then(function(items) {
// Add docs to results array
results = results.concat(items);
left = left - 1;
// No more cursors let's ensure we got all results
if(left == 0) {
test.equal(docs.length, results.length);
db.close();
test.done();
}
});
}
});
});
});
// END
}
}
/**
* An example showing how to force a reindex of a collection using a Promise.
*
* @example-class Collection
* @example-method reIndex
* @ignore
*/
exports.shouldCorrectlyIndexAndForceReindexOnCollectionWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1, auto_reconnect:true});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('shouldCorrectlyForceReindexOnCollection_with_promise');
// Insert a bunch of documents for the index
collection.insertMany([{a:1, b:1}
, {a:2, b:2}, {a:3, b:3}, {a:4, b:4, c:4}], {w:1}).then(function(result) {
// Create an index on the a field
collection.ensureIndex({a:1, b:1}
, {unique:true, background:true, w:1}).then(function(indexName) {
// Force a reindex of the collection
collection.reIndex().then(function(result) {
test.equal(true, result);
// Verify that the index is gone
collection.indexInformation().then(function(indexInformation) {
test.deepEqual([ [ '_id', 1 ] ], indexInformation._id_);
test.deepEqual([ [ 'a', 1 ], [ 'b', 1 ] ], indexInformation.a_1_b_1);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* An example removing all documents in a collection not using safe mode using a Promise.
*
* @example-class Collection
* @example-method remove
* @ignore
*/
exports.shouldRemoveAllDocumentsNoSafeWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Fetch a collection to insert document into
var collection = db.collection("remove_all_documents_no_safe_with_promise");
// Insert a bunch of documents
collection.insertMany([{a:1}, {b:2}], {w:1}).then(function(result) {
// Remove all the document
collection.removeMany();
// Fetch all results
collection.find().toArray().then(function(items) {
test.equal(0, items.length);
db.close();
test.done();
});
})
});
// END
}
}
/**
* An example removing a subset of documents using safe mode to ensure removal of documents using a Promise.
*
* @example-class Collection
* @example-method remove
* @ignore
*/
exports.shouldRemoveSubsetOfDocumentsSafeModeWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Fetch a collection to insert document into
var collection = db.collection("remove_subset_of_documents_safe_with_promise");
// Insert a bunch of documents
collection.insertMany([{a:1}, {b:2}], {w:1}).then(function(result) {
// Remove all the document
collection.removeOne({a:1}, {w:1}).then(function(r) {
test.equal(1, r.result.n);
db.close();
test.done();
});
});
});
// END
}
}
/**
* An example of illegal and legal renaming of a collection using a Promise.
*
* @example-class Collection
* @example-method rename
* @ignore
*/
exports.shouldCorrectlyRenameCollectionWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Open a couple of collections
db.createCollection('test_rename_collection_with_promise').then(function(collection1) {
db.createCollection('test_rename_collection2_with_promise').then(function(collection2) {
// Attemp to rename a collection to a number
try {
collection1.rename(5, function(err, collection) {});
} catch(err) {
test.ok(err instanceof Error);
test.equal("collection name must be a String", err.message);
}
// Attemp to rename a collection to an empty string
try {
collection1.rename("", function(err, collection) {});
} catch(err) {
test.ok(err instanceof Error);
test.equal("collection names cannot be empty", err.message);
}
// Attemp to rename a collection to an illegal name including the character $
try {
collection1.rename("te$t", function(err, collection) {});
} catch(err) {
test.ok(err instanceof Error);
test.equal("collection names must not contain '$'", err.message);
}
// Attemp to rename a collection to an illegal name starting with the character .
try {
collection1.rename(".test", function(err, collection) {});
} catch(err) {
test.ok(err instanceof Error);
test.equal("collection names must not start or end with '.'", err.message);
}
// Attemp to rename a collection to an illegal name ending with the character .
try {
collection1.rename("test.", function(err, collection) {});
} catch(err) {
test.ok(err instanceof Error);
test.equal("collection names must not start or end with '.'", err.message);
}
// Attemp to rename a collection to an illegal name with an empty middle name
try {
collection1.rename("tes..t", function(err, collection) {});
} catch(err) {
test.equal("collection names cannot be empty", err.message);
}
// Insert a couple of documents
collection1.insertMany([{'x':1}, {'x':2}], configuration.writeConcernMax()).then(function(docs) {
// Attemp to rename the first collection to the second one, this will fail
collection1.rename('test_rename_collection2_with_promise').then(function(err, collection) {
}).catch(function(err) {
test.ok(err instanceof Error);
test.ok(err.message.length > 0);
// Attemp to rename the first collection to a name that does not exist
// this will be succesful
collection1.rename('test_rename_collection3_with_promise').then(function(collection2) {
test.equal("test_rename_collection3_with_promise", collection2.collectionName);
// Ensure that the collection is pointing to the new one
collection2.count().then(function(count) {
test.equal(2, count);
db.close();
test.done();
});
});
});
});
});
});
});
// END
}
}
/**
* Example of a simple document save with safe set to false using a Promise.
*
* @example-class Collection
* @example-method save
* @ignore
*/
exports.shouldCorrectlySaveASimpleDocumentWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Fetch the collection
var collection = db.collection("save_a_simple_document_with_promise");
// Save a document with no safe option
collection.save({hello:'world'});
// Wait for a second
setTimeout(function() {
// Find the saved document
collection.findOne({hello:'world'}).then(function(item) {
test.equal('world', item.hello);
db.close();
test.done();
});
}, 2000);
});
// END
}
}
/**
* Example of a simple document save and then resave with safe set to true using a Promise.
*
* @example-class Collection
* @example-method save
* @ignore
*/
exports.shouldCorrectlySaveASimpleDocumentModifyItAndResaveItWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Fetch the collection
var collection = db.collection("save_a_simple_document_modify_it_and_resave_it_with_promise");
// Save a document with no safe option
collection.save({hello:'world'}, configuration.writeConcernMax()).then(function(result) {
// Find the saved document
collection.findOne({hello:'world'}).then(function(item) {
test.equal('world', item.hello);
// Update the document
item['hello2'] = 'world2';
// Save the item with the additional field
collection.save(item, configuration.writeConcernMax()).then(function(result) {
// Find the changed document
collection.findOne({hello:'world'}).then(function(item) {
test.equal('world', item.hello);
test.equal('world2', item.hello2);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* Example of a simple document update with safe set to false on an existing document using a Promise.
*
* @example-class Collection
* @example-method update
* @ignore
*/
exports.shouldCorrectlyUpdateASimpleDocumentWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get a collection
var collection = db.collection('update_a_simple_document_with_promise');
// Insert a document, then update it
collection.insertOne({a:1}, configuration.writeConcernMax()).then(function(doc) {
// Update the document with an atomic operator
collection.updateOne({a:1}, {$set:{b:2}});
// Wait for a second then fetch the document
setTimeout(function() {
// Fetch the document that we modified
collection.findOne({a:1}).then(function(item) {
test.equal(1, item.a);
test.equal(2, item.b);
db.close();
test.done();
});
}, 1000);
});
});
// END
}
}
/**
* Example of a simple document update using upsert (the document will be inserted if it does not exist) using a Promise.
*
* @example-class Collection
* @example-method update
* @ignore
*/
exports.shouldCorrectlyUpsertASimpleDocumentWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get a collection
var collection = db.collection('update_a_simple_document_upsert_with_promise');
// Update the document using an upsert operation, ensuring creation if it does not exist
collection.updateOne({a:1}, {b:2, a:1}, {upsert:true, w: 1}).then(function(result) {
test.equal(1, result.result.n);
// Fetch the document that we modified and check if it got inserted correctly
collection.findOne({a:1}).then(function(item) {
test.equal(1, item.a);
test.equal(2, item.b);
db.close();
test.done();
});
});
});
// END
}
}
/**
* Example of an update across multiple documents using the multi option and using a Promise.
*
* @example-class Collection
* @example-method update
* @ignore
*/
exports.shouldCorrectlyUpdateMultipleDocumentsWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get a collection
var collection = db.collection('update_a_simple_document_multi_with_promise');
// Insert a couple of documentations
collection.insertMany([{a:1, b:1}, {a:1, b:2}], configuration.writeConcernMax()).then(function(result) {
var o = configuration.writeConcernMax();
collection.updateMany({a:1}, {$set:{b:0}}, o).then(function(r) {
test.equal(2, r.result.n);
// Fetch all the documents and verify that we have changed the b value
collection.find().toArray().then(function(items) {
test.equal(1, items[0].a);
test.equal(0, items[0].b);
test.equal(1, items[1].a);
test.equal(0, items[1].b);
db.close();
test.done();
});
})
});
});
// END
}
}
/**
* Example of retrieving a collections stats using a Promise.
*
* @example-class Collection
* @example-method stats
* @ignore
*/
exports.shouldCorrectlyReturnACollectionsStatsWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Crete the collection for the distinct example
var collection = db.collection('collection_stats_test_with_promise');
// Insert some documents
collection.insertMany([{a:1}, {hello:'world'}], configuration.writeConcernMax()).then(function(result) {
// Retrieve the statistics for the collection
collection.stats().then(function(stats) {
test.equal(2, stats.count);
db.close();
test.done();
});
});
});
// END
}
}
/**
* An examples showing the creation and dropping of an index using Promises.
*
* @example-class Collection
* @example-method dropIndexes
* @ignore
*/
exports.shouldCorrectlyCreateAndDropAllIndexWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance({w:0}, {poolSize:1, auto_reconnect:true});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('shouldCorrectlyCreateAndDropAllIndex_with_promise');
// Insert a bunch of documents for the index
collection.insertMany([{a:1, b:1}
, {a:2, b:2}, {a:3, b:3}, {a:4, b:4, c:4}], {w:1}).then(function(result) {
// Create an index on the a field
collection.ensureIndex({a:1, b:1}
, {unique:true, background:true, w:1}).then(function(indexName) {
// Create an additional index
collection.ensureIndex({c:1}
, {unique:true, background:true, sparse:true, w:1}).then(function(indexName) {
// Drop the index
collection.dropAllIndexes().then(function(result) {
// Verify that the index is gone
collection.indexInformation().then(function(indexInformation) {
test.deepEqual([ [ '_id', 1 ] ], indexInformation._id_);
test.equal(null, indexInformation.a_1_b_1);
test.equal(null, indexInformation.c_1);
db.close();
test.done();
});
});
});
});
});
});
// END
}
}
/**************************************************************************
*
* DB TESTS
*
*************************************************************************/
/**
* An example that shows how to force close a db connection so it cannot be reused using a Promise..
*
* @example-class Db
* @example-method close
* @ignore
*/
exports.shouldCorrectlyFailOnRetryDueToAppCloseOfDbWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Fetch a collection
var collection = db.collection('shouldCorrectlyFailOnRetryDueToAppCloseOfDb_with_promise');
// Insert a document
collection.insertOne({a:1}, configuration.writeConcernMax()).then(function(result) {
// Force close the connection
db.close(true).then(function() {
// Attemp to insert should fail now with correct message 'db closed by application'
collection.insertOne({a:2}, configuration.writeConcernMax()).then(function(result) {
}).catch(function(err) {
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* A whole bunch of examples on how to use eval on the server with a Promise.
*
* @example-class Db
* @example-method eval
* @ignore
*/
exports.shouldCorrectlyExecuteEvalFunctionsWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var Code = configuration.require.Code
, ReadPreference = configuration.require.ReadPreference;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
var numberOfTests = 10;
var tests_done = function() {
numberOfTests = numberOfTests - 1;
if(numberOfTests == 0) {
db.close();
test.done();
}
}
// Evaluate a function on the server with the parameter 3 passed in
db.eval('function (x) {return x;}', [3]).then(function(result) {
test.equal(3, result); tests_done();
// Evaluate a function on the server with the parameter 3 passed in no lock aquired for eval
// on server
db.eval('function (x) {return x;}', [3], {nolock:true}).then(function(result) {
test.equal(3, result); tests_done();
});
// Evaluate a function on the server that writes to a server collection
db.eval('function (x) {db.test_eval_with_promise.save({y:x});}', [5], {readPreference: ReadPreference.PRIMARY}).then(function(result) {
setTimeout(function() {
// Locate the entry
db.collection('test_eval_with_promise', function(err, collection) {
collection.findOne().then(function(item) {
test.equal(5, item.y); tests_done();
// Evaluate a function with 2 parameters passed in
db.eval('function (x, y) {return x + y;}', [2, 3]).then(function(result) {
test.equal(5, result); tests_done();
// Evaluate a function with no parameters passed in
db.eval('function () {return 5;}').then(function(result) {
test.equal(5, result); tests_done();
// Evaluate a statement
db.eval('2 + 3;').then(function(result) {
test.equal(5, result); tests_done();
// Evaluate a statement using the code object
db.eval(new Code("2 + 3;")).then(function(result) {
test.equal(5, result); tests_done();
// Evaluate a statement using the code object including a scope
db.eval(new Code("return i;", {'i':2})).then(function(result) {
test.equal(2, result); tests_done();
// Evaluate a statement using the code object including a scope
db.eval(new Code("i + 3;", {'i':2})).then(function(result) {
test.equal(5, result); tests_done();
// Evaluate an illegal statement
db.eval("5 ++ 5;").then(function(result) {
}).catch(function(err) {
test.ok(err instanceof Error);
test.ok(err.message != null);
tests_done();
});
});
});
});
});
});
});
});
});
}, 1000);
});
});
});
// END
}
}
/**
* Defining and calling a system level javascript function (NOT recommended, http://www.mongodb.org/display/DOCS/Server-side+Code+Execution) using a Promise.
*
* @example-class Db
* @example-method eval
* @ignore
*/
exports.shouldCorrectlyDefineSystemLevelFunctionAndExecuteFunctionWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var Code = configuration.require.Code;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Clean out the collection
db.collection("system.js").deleteMany({}, configuration.writeConcernMax()).then(function(result) {
// Define a system level function
db.collection("system.js").insertOne({_id: "echo", value: new Code("function(x) { return x; }")}, configuration.writeConcernMax()).then(function(result) {
db.eval("echo(5)").then(function(result) {
test.equal(5, result);
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* An example of a simple single server db connection and close function using a Promise.
*
* @example-class Db
* @example-method close
* @ignore
*/
exports.shouldCorrectlyOpenASimpleDbSingleServerConnectionAndCloseWithCallbackWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Close the connection with a callback that is optional
db.close().then(function(result) {
test.done();
});
});
// END
}
}
/**
* An example of retrieving the collections list for a database using a Promise.
*
* @example-class Db
* @example-method listCollections
* @ignore
*/
exports.shouldCorrectlyRetrievelistCollectionsWithPromises = {
metadata: { requires: { promises:true, topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get an empty db
var db1 = db.db('listCollectionTestDb2');
// Create a collection
var collection = db1.collection('shouldCorrectlyRetrievelistCollections_with_promise');
// Ensure the collection was created
collection.insertOne({a:1}).then(function(r) {
// Return the information of a single collection name
db1.listCollections({name: "shouldCorrectlyRetrievelistCollections_with_promise"}).toArray().then(function(items) {
test.equal(1, items.length);
// Return the information of a all collections, using the callback format
db1.listCollections().toArray().then(function(items) {
test.ok(items.length >= 1);
db.close();
test.done();
});
}).catch(function(err) {
console.dir(err)
});
});
});
// END
}
}
/**
* @ignore
*/
exports.shouldCorrectlyRetrievelistCollectionsWiredTigerWithPromises = {
metadata: { requires: { promises:true, topology: ['wiredtiger'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// Get an empty db
var db1 = db.db('listCollectionTestDb2');
// Create a collection
var collection = db1.collection('shouldCorrectlyRetrievelistCollections_with_promise');
// Ensure the collection was created
collection.insertOne({a:1}).then(function(r) {
// Return the information of a single collection name
db1.listCollections({name: "shouldCorrectlyRetrievelistCollections_with_promise"}).toArray().then(function(items) {
test.equal(1, items.length);
// Return the information of a all collections, using the callback format
db1.listCollections().toArray().then(function(items) {
test.equal(1, items.length);
db.close();
test.done();
});
});
});
});
}
}
/**
* An example of retrieving a collection from a db using the collection function with a Promise.
*
* @example-class Db
* @example-method collection
* @ignore
*/
exports.shouldCorrectlyAccessACollectionWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Grab a collection without a callback no safe mode
var col1 = db.collection('test_correctly_access_collections_with_promise');
// Grab a collection with a callback but no safe operation
db.collection('test_correctly_access_collections_with_promise', function(err, col2) {
// Grab a collection with a callback in safe mode, ensuring it exists (should fail as it's not created)
db.collection('test_correctly_access_collections_with_promise', {strict:true}, function(err, col3) {
// Create the collection
db.createCollection('test_correctly_access_collections_with_promise').then(function(err, result) {
// Retry to get the collection, should work as it's now created
db.collection('test_correctly_access_collections_with_promise', {strict:true}, function(err, col3) {
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* An example of retrieving all collections for a db as Collection objects using a Promise.
*
* @example-class Db
* @example-method collections
* @ignore
*/
exports.shouldCorrectlyRetrieveAllCollectionsWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create the collection
var collection = db.collection('test_correctly_access_collections2_with_promise');
// Retry to get the collection, should work as it's now created
db.collections().then(function(collections) {
test.ok(collections.length > 0);
db.close();
test.done();
});
});
// END
}
}
/**
* An example of using the logout command for the database with a Promise.
*
* @example-class Db
* @example-method logout
* @ignore
*/
exports.shouldCorrectlyLogoutFromTheDatabaseWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
// console.log("============================================= 0")
db.open().then(function(db) {
// console.log("============================================= 1")
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Add a user to the database
db.addUser('user3', 'name').then(function(result) {
// console.log("============================================= 2")
// console.dir(result)
// Authenticate
db.authenticate('user3', 'name').then(function(result) {
// console.log("============================================= 3")
test.equal(true, result);
// Logout the db
db.logout().then(function(result) {
// console.log("============================================= 4")
test.equal(true, result);
// Remove the user
db.removeUser('user3').then(function(result) {
// console.log("============================================= 5")
test.equal(true, result);
db.close();
test.done();
}).catch(function(err) { })
}).catch(function(err) { })
}).catch(function(err) { })
}).catch(function(err) { })
});
// END
}
}
/**
* An example of using the authenticate command with a Promise.
*
* @example-class Db
* @example-method authenticate
* @ignore
*/
exports.shouldCorrectlyAuthenticateAgainstTheDatabaseWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Add a user to the database
db.addUser('user2', 'name').then(function(result) {
// Authenticate
db.authenticate('user2', 'name').then(function(result) {
test.equal(true, result);
// Remove the user from the db
db.removeUser('user2').then(function(result) {
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* An example of adding a user to the database using a Promise.
*
* @example-class Db
* @example-method addUser
* @ignore
*/
exports.shouldCorrectlyAddUserToDbWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Add a user to the database
db.addUser('user', 'name').then(function(result) {
// Remove the user from the db
db.removeUser('user').then(function(result) {
db.close();
test.done();
});
});
});
// END
}
}
/**
* An example of removing a user using a Promise.
*
* @example-class Db
* @example-method removeUser
* @ignore
*/
exports.shouldCorrectlyAddAndRemoveUserWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Add a user to the database
db.addUser('user', 'name').then(function(result) {
// Authenticate
db.authenticate('user', 'name').then(function(result) {
test.equal(true, result);
// Logout the db
db.logout().then(function(result) {
test.equal(true, result);
// Remove the user from the db
db.removeUser('user').then(function(result) {
// Authenticate
db.authenticate('user', 'name').then(function(result) {
test.equal(false, result);
db.close();
test.done();
}).catch(function(err) {
db.close();
test.done();
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing the creation of a collection using a Promise.
*
* @example-class Db
* @example-method createCollection
* @ignore
*/
exports.shouldCorrectlyCreateACollectionWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a capped collection with a maximum of 1000 documents
db.createCollection("a_simple_collection_with_promise", {capped:true, size:10000, max:1000, w:1}).then(function(collection) {
// Insert a document in the capped collection
collection.insertOne({a:1}, configuration.writeConcernMax()).then(function(result) {
db.close();
test.done();
});
});
});
// END
}
}
/**
* A simple example creating, dropping a collection and then verifying that the collection is gone using a Promise.
*
* @example-class Db
* @example-method dropCollection
* @ignore
*/
exports.shouldCorrectlyExecuteACommandAgainstTheServerWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Execute ping against the server
db.command({ping:1}).then(function(result) {
// Create a capped collection with a maximum of 1000 documents
db.createCollection("a_simple_create_drop_collection_with_promise", {capped:true, size:10000, max:1000, w:1}).then(function(collection) {
// Insert a document in the capped collection
collection.insertOne({a:1}, configuration.writeConcernMax()).then(function(result) {
// Drop the collection from this world
db.dropCollection("a_simple_create_drop_collection_with_promise").then(function(result) {
// Verify that the collection is gone
db.listCollections({name:"a_simple_create_drop_collection_with_promise"}).toArray().then(function(names) {
test.equal(0, names.length);
db.close();
test.done();
});
});
});
});
});
});
// END
}
}
/**
* A simple example executing a command against the server using a Promise.
*
* @example-class Db
* @example-method command
* @ignore
*/
exports.shouldCorrectlyCreateDropAndVerifyThatCollectionIsGoneWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Execute ping against the server
db.command({ping:1}).then(function(result) {
db.close();
test.done();
});
});
// END
}
}
/**
* A simple example creating, dropping a collection and then verifying that the collection is gone.
*
* @example-class Db
* @example-method renameCollection
* @ignore
*/
exports.shouldCorrectlyRenameACollectionWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection
db.createCollection("simple_rename_collection_with_promise", configuration.writeConcernMax()).then(function(collection) {
// Insert a document in the collection
collection.insertOne({a:1}, configuration.writeConcernMax()).then(function(result) {
// Retrieve the number of documents from the collection
collection.count().then(function(count) {
test.equal(1, count);
// Rename the collection
db.renameCollection("simple_rename_collection_with_promise", "simple_rename_collection_2_with_promise").then(function(collection2) {
// Retrieve the number of documents from the collection
collection2.count().then(function(count) {
test.equal(1, count);
// Verify that the collection is gone
db.listCollections({name:"simple_rename_collection_with_promise"}).toArray().then(function(names) {
test.equal(0, names.length);
// Verify that the new collection exists
db.listCollections({name:"simple_rename_collection_2_with_promise"}).toArray().then(function(names) {
test.equal(1, names.length);
db.close();
test.done();
});
});
});
});
});
});
});
});
// END
}
}
/**
* A more complex createIndex using a compound unique index in the background and dropping duplicated documents using a Promise.
*
* @example-class Db
* @example-method createIndex
* @ignore
*/
exports.shouldCreateOnDbComplexIndexOnTwoFieldsWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('more_complex_index_test_with_promise');
// Insert a bunch of documents for the index
collection.insertMany([{a:1, b:1}
, {a:2, b:2}, {a:3, b:3}, {a:4, b:4}], configuration.writeConcernMax()).then(function(result) {
// Create an index on the a field
db.createIndex('more_complex_index_test_with_promise', {a:1, b:1}
, {unique:true, background:true, w:1}).then(function(indexName) {
// Show that duplicate records got dropped
collection.find({}).toArray().then(function(items) {
test.equal(4, items.length);
// Perform a query, with explain to show we hit the query
collection.find({a:2}).explain().then(function(explanation) {
test.ok(explanation != null);
db.close();
test.done();
});
})
});
});
});
// END
}
}
/**
* A more complex ensureIndex using a compound unique index in the background and dropping duplicated documents using a Promise.
*
* @example-class Db
* @example-method ensureIndex
* @ignore
*/
exports.shouldCreateComplexEnsureIndexDbWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection we want to drop later
var collection = db.collection('more_complex_ensure_index_db_test_with_promise');
// Insert a bunch of documents for the index
collection.insertMany([{a:1, b:1}
, {a:2, b:2}, {a:3, b:3}, {a:4, b:4}], configuration.writeConcernMax()).then(function(result) {
// Create an index on the a field
db.ensureIndex('more_complex_ensure_index_db_test_with_promise', {a:1, b:1}
, {unique:true, background:true, w:1}).then(function(indexName) {
// Show that duplicate records got dropped
collection.find({}).toArray().then(function(items) {
test.equal(4, items.length);
// Perform a query, with explain to show we hit the query
collection.find({a:2}).explain().then(function(explanation) {
test.ok(explanation != null);
db.close();
test.done();
});
})
});
});
});
// END
}
}
/**
* An examples showing the dropping of a database using a Promise.
*
* @example-class Db
* @example-method dropDatabase
* @ignore
*/
exports.shouldCorrectlyDropTheDatabaseWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a collection
var collection = db.collection('more_index_information_test_1_with_promise');
// Insert a bunch of documents for the index
collection.insertMany([{a:1, b:1}, {a:1, b:1}
, {a:2, b:2}, {a:3, b:3}, {a:4, b:4}], configuration.writeConcernMax()).then(function(result) {
// Let's drop the database
db.dropDatabase().then(function(result) {
// Wait two seconds to let it replicate across
setTimeout(function() {
// Get the admin database
db.admin().listDatabases().then(function(dbs) {
// Grab the databases
dbs = dbs.databases;
// Did we find the db
var found = false;
// Check if we have the db in the list
for(var i = 0; i < dbs.length; i++) {
if(dbs[i].name == 'integration_tests_to_drop') found = true;
}
// We should not find the databases
if(process.env['JENKINS'] == null) test.equal(false, found);
db.close();
test.done();
});
}, 2000);
});
});
});
// END
}
}
/**
* An example showing how to retrieve the db statistics using a Promise.
*
* @example-class Db
* @example-method stats
* @ignore
*/
exports.shouldCorrectlyRetrieveDbStatsWithPromisesWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
db.stats().then(function(stats) {
test.ok(stats != null);
db.close();
test.done();
})
});
// END
}
}
/**
* Simple example connecting to two different databases sharing the socket connections below using a Promise.
*
* @example-class Db
* @example-method db
* @ignore
*/
exports.shouldCorrectlyShareConnectionPoolsAcrossMultipleDbInstancesWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Reference a different database sharing the same connections
// for the data transfer
var secondDb = db.db("integration_tests_2");
// Fetch the collections
var multipleColl1 = db.collection("multiple_db_instances_with_promise");
var multipleColl2 = secondDb.collection("multiple_db_instances_with_promise");
// Write a record into each and then count the records stored
multipleColl1.insertOne({a:1}, {w:1}).then(function(result) {
multipleColl2.insertOne({a:1}, {w:1}).then(function(result) {
// Count over the results ensuring only on record in each collection
multipleColl1.count().then(function(count) {
test.equal(1, count);
multipleColl2.count().then(function(count) {
test.equal(1, count);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* Simple replicaset connection setup, requires a running replicaset on the correct ports using a Promise.
*
* @example-class Db
* @example-method open
* @ignore
*/
exports['Should correctly connect with default replicasetNoOption With Promises'] = {
metadata: { requires: { promises:true, topology: 'replicaset' } },
// The actual test we wish to run
test: function(configuration, test) {
var ReplSet = configuration.require.ReplSet
, Server = configuration.require.Server
, Db = configuration.require.Db;
// Replica configuration
var replSet = new ReplSet([
new Server(configuration.host, configuration.port),
new Server(configuration.host, configuration.port + 1),
new Server(configuration.host, configuration.port + 2)
]
, {rs_name:configuration.replicasetName}
);
var db = new Db('integration_test_', replSet, {w:0});
db.open(function(err, p_db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
p_db.close();
test.done();
});
// END
}
}
/**************************************************************************
*
* ADMIN TESTS
*
*************************************************************************/
/**
* Authenticate against MongoDB Admin user using a Promise.
*
* @example-class Admin
* @example-method authenticate
* @ignore
*/
exports.shouldCorrectlyAuthenticateWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Grab a collection object
var collection = db.collection('test_with_promise');
// Force the creation of the collection by inserting a document
// Collections are not created until the first document is inserted
collection.insertOne({'a':1}, {w:1}).then(function(doc) {
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin2', 'admin2').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin2', 'admin2').then(function(result) {
test.ok(result);
adminDb.removeUser('admin2').then(function(result) {
test.ok(result);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* Retrieve the buildInfo for the current MongoDB instance using a Promise.
*
* @example-class Admin
* @example-method buildInfo
* @ignore
*/
exports.shouldCorrectlyRetrieveBuildInfoWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin3', 'admin3').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin3', 'admin3').then(function(result) {
test.ok(result);
// Retrive the build information for the MongoDB instance
adminDb.buildInfo().then(function(info) {
adminDb.removeUser('admin3').then(function(result) {
test.ok(result);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* Retrieve the buildInfo using the command function using a Promise.
*
* @example-class Admin
* @example-method command
* @ignore
*/
exports.shouldCorrectlyRetrieveBuildInfoUsingCommandWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin4', 'admin4').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin4', 'admin4').then(function(result) {
test.ok(result);
// Retrive the build information using the admin command
adminDb.command({buildInfo:1}).then(function(info) {
adminDb.removeUser('admin4').then(function(result) {
test.ok(result);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* Retrieve the current profiling level set for the MongoDB instance using a Promise.
*
* @example-class Admin
* @example-method profilingLevel
* @ignore
*/
exports.shouldCorrectlySetDefaultProfilingLevelWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Grab a collection object
var collection = db.collection('test_with_promise');
// Force the creation of the collection by inserting a document
// Collections are not created until the first document is inserted
collection.insertOne({'a':1}, {w: 1}).then(function(doc) {
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin5', 'admin5').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin5', 'admin5').then(function(replies) {
// Retrive the profiling level
adminDb.profilingLevel().then(function(level) {
adminDb.removeUser('admin5').then(function(result) {
test.ok(result);
db.close();
test.done();
});
});
});
});
});
});
// END
}
}
/**
* An example of how to use the setProfilingInfo using a Promise.
* Use this command to set the Profiling level on the MongoDB server
*
* @example-class Admin
* @example-method setProfilingLevel
* @ignore
*/
exports.shouldCorrectlyChangeProfilingLevelWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Grab a collection object
var collection = db.collection('test_with_promise');
// Force the creation of the collection by inserting a document
// Collections are not created until the first document is inserted
collection.insertOne({'a':1}, {w: 1}).then(function(doc) {
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin6', 'admin6').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin6', 'admin6').then(function(replies) {
// Set the profiling level to only profile slow queries
adminDb.setProfilingLevel('slow_only').then(function(level) {
// Retrive the profiling level and verify that it's set to slow_only
adminDb.profilingLevel().then(function(level) {
test.equal('slow_only', level);
// Turn profiling off
adminDb.setProfilingLevel('off').then(function(level) {
// Retrive the profiling level and verify that it's set to off
adminDb.profilingLevel().then(function(level) {
test.equal('off', level);
// Set the profiling level to log all queries
adminDb.setProfilingLevel('all').then(function(level) {
// Retrive the profiling level and verify that it's set to all
adminDb.profilingLevel().then(function(level) {
test.equal('all', level);
// Attempt to set an illegal profiling level
adminDb.setProfilingLevel('medium').then(function(level) {
}).catch(function(err) {
test.ok(err instanceof Error);
test.equal("Error: illegal profiling level value medium", err.message);
adminDb.removeUser('admin6').then(function(result) {
test.ok(result);
db.close();
test.done();
});
});
})
});
})
});
})
});
});
});
});
});
// END
}
}
/**
* An example of how to use the profilingInfo using a Promise.
* Use this command to pull back the profiling information currently set for Mongodb
*
* @example-class Admin
* @example-method profilingInfo
* @ignore
*/
exports.shouldCorrectlySetAndExtractProfilingInfoWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Grab a collection object
var collection = db.collection('test_with_promise');
// Force the creation of the collection by inserting a document
// Collections are not created until the first document is inserted
collection.insertOne({'a':1}, {w: 1}).then(function(doc) {
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin7', 'admin7').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin7', 'admin7').then(function(replies) {
// Set the profiling level to all
adminDb.setProfilingLevel('all').then(function(level) {
// Execute a query command
collection.find().toArray().then(function(items) {
// Turn off profiling
adminDb.setProfilingLevel('off').then(function(level) {
// Retrive the profiling information
adminDb.profilingInfo().then(function(infos) {
test.ok(infos.constructor == Array);
test.ok(infos.length >= 1);
test.ok(infos[0].ts.constructor == Date);
test.ok(infos[0].millis.constructor == Number);
adminDb.removeUser('admin7').then(function(result) {
test.ok(result);
db.close();
test.done();
});
});
});
});
});
});
});
});
});
// END
}
}
/**
* An example of how to use the validateCollection command using a Promise.
* Use this command to check that a collection is valid (not corrupt) and to get various statistics.
*
* @example-class Admin
* @example-method validateCollection
* @ignore
*/
exports.shouldCorrectlyCallValidateCollectionWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Grab a collection object
var collection = db.collection('test_with_promise');
// Force the creation of the collection by inserting a document
// Collections are not created until the first document is inserted
collection.insertOne({'a':1}, {w: 1}).then(function(doc) {
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin8', 'admin8').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin8', 'admin8').then(function(replies) {
// Validate the 'test' collection
adminDb.validateCollection('test_with_promise').then(function(doc) {
// Remove the user
adminDb.removeUser('admin8').then(function(result) {
test.ok(result);
db.close();
test.done();
});
});
});
});
});
});
}
}
/**
* An example of how to add a user to the admin database using a Promise.
*
* @example-class Admin
* @example-method ping
* @ignore
*/
exports.shouldCorrectlyPingTheMongoDbInstanceWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin9', 'admin9').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin9', 'admin9').then(function(result) {
test.ok(result);
// Ping the server
adminDb.ping().then(function(pingResult) {
adminDb.removeUser('admin9').then(function(result) {
test.ok(result);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* An example of how add a user, authenticate and logout using a Promise.
*
* @example-class Admin
* @example-method logout
* @ignore
*/
exports.shouldCorrectlyUseLogoutFunctionWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin10', 'admin10').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin10', 'admin10').then(function(result) {
test.ok(result);
// Logout the user
adminDb.logout().then(function(result) {
test.equal(true, result);
adminDb.removeUser('admin10').then(function(result) {
test.ok(result);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* An example of how to add a user to the admin database using a Promise.
*
* @example-class Admin
* @example-method addUser
* @ignore
*/
exports.shouldCorrectlyAddAUserToAdminDbWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin11', 'admin11').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin11', 'admin11').then(function(result) {
test.ok(result);
adminDb.removeUser('admin11').then(function(result) {
test.ok(result);
db.close();
test.done();
});
});
});
});
}
}
/**
* An example of how to remove a user from the admin database using a Promise.
*
* @example-class Admin
* @example-method removeUser
* @ignore
*/
exports.shouldCorrectlyAddAUserAndRemoveItFromAdminDbWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin12', 'admin12').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin12', 'admin12').then(function(result) {
test.ok(result);
// Remove the user
adminDb.removeUser('admin12').then(function(result) {
test.equal(true, result);
// Authenticate using the removed user should fail
adminDb.authenticate('admin12', 'admin12').then(function(result) {
}).catch(function(err) {
db.close();
test.done();
});
})
});
});
});
// END
}
}
/**
* An example of listing all available databases. using a Promise.
*
* @example-class Admin
* @example-method listDatabases
* @ignore
*/
exports.shouldCorrectlyListAllAvailableDatabasesWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Use the admin database for the operation
var adminDb = db.admin();
// List all the available databases
adminDb.listDatabases().then(function(dbs) {
test.ok(dbs.databases.length > 0);
db.close();
test.done();
});
});
// END
}
}
/**
* Retrieve the current server Info using a Promise.
*
* @example-class Admin
* @example-method serverStatus
* @ignore
*/
exports.shouldCorrectlyRetrieveServerInfoWithPromises = {
metadata: { requires: { promises:true, topology: 'single' } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Grab a collection object
var collection = db.collection('test_with_promise');
// Force the creation of the collection by inserting a document
// Collections are not created until the first document is inserted
collection.insertOne({'a':1}, {w: 1}).then(function(doc) {
// Use the admin database for the operation
var adminDb = db.admin();
// Add the new user to the admin database
adminDb.addUser('admin13', 'admin13').then(function(result) {
// Authenticate using the newly added user
adminDb.authenticate('admin13', 'admin13').then(function(result) {
// Retrive the server Info
adminDb.serverStatus().then(function(info) {
test.ok(info != null);
adminDb.removeUser('admin13').then(function(result) {
test.ok(result);
db.close();
test.done();
});
});
});
});
});
});
// END
}
}
/**************************************************************************
*
* CURSOR TESTS
*
*************************************************************************/
var fs = require('fs');
/**
* An example showing the information returned by indexInformation using a Promise.
*
* @example-class Cursor
* @example-method toArray
* @ignore
*/
exports.shouldCorrectlyExecuteToArrayWithPromises = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Create a collection to hold our documents
var collection = db.collection('test_array_with_promise');
// Insert a test document
collection.insertOne({'b':[1, 2, 3]}, configuration.writeConcernMax()).then(function(ids) {
// Retrieve all the documents in the collection
collection.find().toArray().then(function(documents) {
test.equal(1, documents.length);
test.deepEqual([1, 2, 3], documents[0].b);
db.close();
test.done();
});
});
});
// END
}
}
/**
* A simple example showing the count function of the cursor using a Promise.
*
* @example-class Cursor
* @example-method count
* @ignore
*/
exports.shouldCorrectlyUseCursorCountFunctionWithPromises = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Creat collection
var collection = db.collection('cursor_count_collection_with_promise');
// Insert some docs
collection.insertMany([{a:1}, {a:2}], configuration.writeConcernMax()).then(function(docs) {
// Do a find and get the cursor count
collection.find().count().then(function(count) {
test.equal(2, count);
db.close();
test.done();
})
});
});
// END
}
}
/**
* A simple example showing the use of nextObject using a Promise.
*
* @example-class Cursor
* @example-method nextObject
* @ignore
*/
exports.shouldCorrectlyPerformNextObjectOnCursorWithPromises = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Create a collection
var collection = db.collection('simple_next_object_collection_with_promise');
// Insert some documents we can sort on
collection.insertMany([{a:1}, {a:2}, {a:3}], configuration.writeConcernMax()).then(function(docs) {
// Do normal ascending sort
collection.find().nextObject().then(function(item) {
test.equal(1, item.a);
db.close();
test.done();
});
});
});
// END
}
}
/**
* A simple example showing the use of the cursor explain function using a Promise.
*
* @example-class Cursor
* @example-method explain
* @ignore
*/
exports.shouldCorrectlyPerformSimpleExplainCursorWithPromises = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Create a collection
var collection = db.collection('simple_explain_collection_with_promise');
// Insert some documents we can sort on
collection.insertMany([{a:1}, {a:2}, {a:3}], configuration.writeConcernMax()).then(function(docs) {
// Do normal ascending sort
collection.find().explain().then(function(explaination) {
db.close();
test.done();
});
});
});
// END
}
}
/**
* A simple example showing the use of the cursor close function using a Promise.
*
* @example-class Cursor
* @example-method close
* @ignore
*/
exports.shouldStreamDocumentsUsingTheCloseFunctionWithPromises = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
// Create a lot of documents to insert
var docs = []
for(var i = 0; i < 100; i++) {
docs.push({'a':i})
}
// Create a collection
var collection = db.collection('test_close_function_on_cursor_with_promise');
// Insert documents into collection
collection.insertMany(docs, configuration.writeConcernMax()).then(function(ids) {
// Perform a find to get a cursor
var cursor = collection.find();
// Fetch the first object
cursor.nextObject().then(function(object) {
// Close the cursor, this is the same as reseting the query
cursor.close().then(function(result) {
db.close();
test.done();
});
});
});
});
// END
}
}
/**************************************************************************
*
* MONGOCLIENT TESTS
*
*************************************************************************/
/**
* Example of a simple url connection string to a replicaset, with acknowledgement of writes using a Promise.
*
* @example-class MongoClient
* @example-method MongoClient.connect
* @ignore
*/
exports['Should correctly connect to a replicaset With Promises'] = {
metadata: { requires: { promises:true, topology: 'replicaset' } },
// The actual test we wish to run
test: function(configuration, test) {
var mongo = configuration.require
, MongoClient = mongo.MongoClient;
// Create url
var url = f("mongodb://%s,%s/%s?replicaSet=%s&readPreference=%s"
, f("%s:%s", configuration.host, configuration.port)
, f("%s:%s", configuration.host, configuration.port + 1)
, "integration_test_"
, configuration.replicasetName
, "primary");
MongoClient.connect(url).then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:30000,localhost:30001,localhost:30002/test?replicaSet=rs', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
test.ok(db != null);
db.collection("replicaset_mongo_client_collection_with_promise").updateOne({a:1}, {b:1}, {upsert:true}).then(function(result) {
test.equal(1, result.result.n);
db.close();
test.done();
});
}).catch(function(err) {
console.dir(err)
});
// END
}
}
/**
* Example of a simple url connection string to a shard, with acknowledgement of writes using a Promise.
*
* @example-class MongoClient
* @example-method MongoClient.connect
* @ignore
*/
exports['Should connect to mongos proxies using connectiong string With Promises'] = {
metadata: { requires: { promises:true, topology: 'mongos' } },
// The actual test we wish to run
test: function(configuration, test) {
var MongoClient = configuration.require.MongoClient;
var url = f('mongodb://%s:%s,%s:%s/sharded_test_db?w=1'
, configuration.host, configuration.port
, configuration.host, configuration.port + 1);
MongoClient.connect(url).then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:50000,localhost:50001/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
test.ok(db != null);
db.collection("replicaset_mongo_client_collection_with_promise").updateOne({a:1}, {b:1}, {upsert:true}).then(function(result) {
test.equal(1, result);
db.close();
test.done();
});
});
// END
}
}
/**
* Example of a simple url connection string for a single server connection
*
* @example-class MongoClient
* @example-method MongoClient.connect
* @ignore
*/
exports['Should correctly connect using MongoClient to a single server using connect With Promises'] = {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { promises:true, topology: 'single'} },
// The actual test we wish to run
test: function(configuration, test) {
var MongoClient = configuration.require.MongoClient
, Server = configuration.require.Server;
// DOC_START
// Connect using the connection string
MongoClient.connect("mongodb://localhost:27017/integration_tests", {native_parser:true}).then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE restartAndDone
// REMOVE-LINE test.done();
// BEGIN
db.collection('mongoclient_test_with_promise').updateOne({a:1}, {b:1}, {upsert:true}).then(function(result) {
test.equal(1, result.result.n);
db.close();
test.done();
});
});
// END
}
}
/**************************************************************************
*
* GRIDSTORE TESTS
*
*************************************************************************/
/**
* A simple example showing the usage of the Gridstore.exist method using a Promise.
*
* @example-class GridStore
* @example-method GridStore.exist
* @ignore
*/
exports.shouldCorrectlyExecuteGridStoreExistsByObjectIdWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Open a file for writing
var gridStore = new GridStore(db, null, "w");
gridStore.open().then(function(gridStore) {
// Writing some content to the file
gridStore.write("hello world!").then(function(gridStore) {
// Flush the file to GridFS
gridStore.close().then(function(result) {
// Check if the file exists using the id returned from the close function
GridStore.exist(db, result._id).then(function(result) {
test.equal(true, result);
})
// Show that the file does not exist for a random ObjectID
GridStore.exist(db, new ObjectID()).then(function(result) {
test.equal(false, result);
});
// Show that the file does not exist for a different file root
GridStore.exist(db, result._id, 'another_root').then(function(result) {
test.equal(false, result);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* A simple example showing the usage of the eof method using a Promise.
*
* @example-class GridStore
* @example-method GridStore.list
* @ignore
*/
exports.shouldCorrectlyExecuteGridStoreListWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Our file id
var fileId = new ObjectID();
// Open a file for writing
var gridStore = new GridStore(db, fileId, "foobar2", "w");
gridStore.open().then(function(gridStore) {
// Write some content to the file
gridStore.write("hello world!").then(function(gridStore) {
// Flush to GridFS
gridStore.close().then(function(result) {
// List the existing files
GridStore.list(db).then(function(items) {
var found = false;
items.forEach(function(filename) {
if(filename == 'foobar2') found = true;
});
test.ok(items.length >= 1);
test.ok(found);
});
// List the existing files but return only the file ids
GridStore.list(db, {id:true}).then(function(items) {
var found = false;
items.forEach(function(id) {
test.ok(typeof id == 'object');
});
test.ok(items.length >= 1);
});
// List the existing files in a specific root collection
GridStore.list(db, 'fs').then(function(items) {
var found = false;
items.forEach(function(filename) {
if(filename == 'foobar2') found = true;
});
test.ok(items.length >= 1);
test.ok(found);
});
// List the existing files in a different root collection where the file is not located
GridStore.list(db, 'my_fs').then(function(items) {
var found = false;
items.forEach(function(filename) {
if(filename == 'foobar2') found = true;
});
test.ok(items.length >= 0);
test.ok(!found);
// Specify seperate id
var fileId2 = new ObjectID();
// Write another file to GridFS
var gridStore2 = new GridStore(db, fileId2, "foobar3", "w");
gridStore2.open().then(function(gridStore) {
// Write the content
gridStore2.write('my file').then(function(gridStore) {
// Flush to GridFS
gridStore.close().then(function(result) {
// List all the available files and verify that our files are there
GridStore.list(db).then(function(items) {
var found = false;
var found2 = false;
items.forEach(function(filename) {
if(filename == 'foobar2') found = true;
if(filename == 'foobar3') found2 = true;
});
test.ok(items.length >= 2);
test.ok(found);
test.ok(found2);
db.close();
test.done();
});
});
});
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing the usage of the puts method using a Promise.
*
* @example-class GridStore
* @example-method puts
* @ignore
*/
exports.shouldCorrectlyReadlinesAndPutLinesWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Open a file for writing
var gridStore = new GridStore(db, "test_gs_puts_and_readlines", "w");
gridStore.open().then(function(gridStore) {
// Write a line to the file using the puts method
gridStore.puts("line one").then(function(gridStore) {
// Flush the file to GridFS
gridStore.close().then(function(result) {
// Read in the entire contents
GridStore.read(db, 'test_gs_puts_and_readlines').then(function(data) {
test.equal("line one\n", data.toString());
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* A simple example showing the usage of the GridStore.unlink method using a Promise.
*
* @example-class GridStore
* @example-method GridStore.unlink
* @ignore
*/
exports.shouldCorrectlyUnlinkWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Open a new file for writing
var gridStore = new GridStore(db, "test_gs_unlink", "w");
db.dropDatabase().then(function(r) {
gridStore.open().then(function(gridStore) {
// Write some content
gridStore.write("hello, world!").then(function(gridStore) {
// Flush file to GridFS
gridStore.close().then(function(result) {
// Verify the existance of the fs.files document
db.collection('fs.files', function(err, collection) {
collection.count().then(function(count) {
test.equal(1, count);
})
});
// Verify the existance of the fs.chunks chunk document
db.collection('fs.chunks', function(err, collection) {
collection.count().then(function(count) {
test.equal(1, count);
// Unlink the file (removing it)
GridStore.unlink(db, 'test_gs_unlink').then(function(gridStore) {
// Verify that fs.files document is gone
db.collection('fs.files', function(err, collection) {
collection.count().then(function(count) {
test.equal(0, count);
})
});
// Verify that fs.chunks chunk documents are gone
db.collection('fs.chunks', function(err, collection) {
collection.count().then(function(count) {
test.equal(0, count);
db.close();
test.done();
})
});
});
})
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing the usage of the read method using a Promise.
*
* @example-class GridStore
* @example-method read
* @ignore
*/
exports.shouldCorrectlyWriteAndReadJpgImageWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Read in the content of a file
var data = fs.readFileSync('./test/functional/data/iya_logo_final_bw.jpg');
// Create a new file
var gs = new GridStore(db, "test", "w");
// Open the file
gs.open().then(function(gs) {
// Write the file to GridFS
gs.write(data).then(function(gs) {
// Flush to the GridFS
gs.close().then(function(gs) {
// Define the file we wish to read
var gs2 = new GridStore(db, "test", "r");
// Open the file
gs2.open().then(function(gs) {
// Set the pointer of the read head to the start of the gridstored file
gs2.seek(0).then(function() {
// Read the entire file
gs2.read().then(function(data2) {
// Compare the file content against the orgiinal
test.equal(data.toString('base64'), data2.toString('base64'));
db.close();
test.done();
});
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing opening a file using a filename, writing to it and saving it using a Promise.
*
* @example-class GridStore
* @example-method open
* @ignore
*/
exports.shouldCorrectlySaveSimpleFileToGridStoreUsingFilenameWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a new instance of the gridstore
var gridStore = new GridStore(db, 'ourexamplefiletowrite.txt', 'w');
// Open the file
gridStore.open().then(function(gridStore) {
// Write some data to the file
gridStore.write('bar').then(function(gridStore) {
// Close (Flushes the data to MongoDB)
gridStore.close().then(function(result) {
// Verify that the file exists
GridStore.exist(db, 'ourexamplefiletowrite.txt').then(function(result) {
test.equal(true, result);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* A simple example showing opening a file using an ObjectID, writing to it and saving it using a Promise.
*
* @example-class GridStore
* @example-method open
* @ignore
*/
exports.shouldCorrectlySaveSimpleFileToGridStoreUsingObjectIDWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Our file ID
var fileId = new ObjectID();
// Create a new instance of the gridstore
var gridStore = new GridStore(db, fileId, 'w');
// Open the file
gridStore.open().then(function(gridStore) {
// Write some data to the file
gridStore.write('bar').then(function(gridStore) {
// Close (Flushes the data to MongoDB)
gridStore.close().then(function(result) {
// Verify that the file exists
GridStore.exist(db, fileId).then(function(result) {
test.equal(true, result);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/**
* A simple example showing how to write a file to Gridstore using file location path using a Promise.
*
* @example-class GridStore
* @example-method writeFile
* @ignore
*/
exports.shouldCorrectlySaveSimpleFileToGridStoreUsingWriteFileWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Our file ID
var fileId = new ObjectID();
// Open a new file
var gridStore = new GridStore(db, fileId, 'w');
// Read the filesize of file on disk (provide your own)
var fileSize = fs.statSync('./test/functional/data/test_gs_weird_bug.png').size;
// Read the buffered data for comparision reasons
var data = fs.readFileSync('./test/functional/data/test_gs_weird_bug.png');
// Open the new file
gridStore.open().then(function(gridStore) {
// Write the file to gridFS
gridStore.writeFile('./test/functional/data/test_gs_weird_bug.png').then(function(doc) {
// Read back all the written content and verify the correctness
GridStore.read(db, fileId).then(function(fileData) {
test.equal(data.toString('base64'), fileData.toString('base64'))
test.equal(fileSize, fileData.length);
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* A simple example showing how to write a file to Gridstore using a file handle using a Promise.
*
* @example-class GridStore
* @example-method writeFile
* @ignore
*/
exports.shouldCorrectlySaveSimpleFileToGridStoreUsingWriteFileWithHandleWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Our file ID
var fileId = new ObjectID();
// Open a new file
var gridStore = new GridStore(db, fileId, 'w');
// Read the filesize of file on disk (provide your own)
var fileSize = fs.statSync('./test/functional/data/test_gs_weird_bug.png').size;
// Read the buffered data for comparision reasons
var data = fs.readFileSync('./test/functional/data/test_gs_weird_bug.png');
// Open a file handle for reading the file
var fd = fs.openSync('./test/functional/data/test_gs_weird_bug.png', 'r', parseInt('0666',8));
// Open the new file
gridStore.open().then(function(gridStore) {
// Write the file to gridFS using the file handle
gridStore.writeFile(fd).then(function(doc) {
// Read back all the written content and verify the correctness
GridStore.read(db, fileId).then(function(fileData) {
test.equal(data.toString('base64'), fileData.toString('base64'));
test.equal(fileSize, fileData.length);
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* A simple example showing how to use the write command with strings and Buffers using a Promise.
*
* @example-class GridStore
* @example-method write
* @ignore
*/
exports.shouldCorrectlySaveSimpleFileToGridStoreUsingWriteWithStringsAndBuffersWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Our file ID
var fileId = new ObjectID();
// Open a new file
var gridStore = new GridStore(db, fileId, 'w');
// Open the new file
gridStore.open().then(function(gridStore) {
// Write a text string
gridStore.write('Hello world').then(function(gridStore) {
// Write a buffer
gridStore.write(new Buffer('Buffer Hello world')).then(function(gridStore) {
// Close the
gridStore.close().then(function(result) {
// Read back all the written content and verify the correctness
GridStore.read(db, fileId).then(function(fileData) {
test.equal('Hello worldBuffer Hello world', fileData.toString());
db.close();
test.done();
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing how to use the write command with strings and Buffers using a Promise.
*
* @example-class GridStore
* @example-method close
* @ignore
*/
exports.shouldCorrectlySaveSimpleFileToGridStoreUsingCloseWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Our file ID
var fileId = new ObjectID();
// Open a new file
var gridStore = new GridStore(db, fileId, 'w');
// Open the new file
gridStore.open().then(function(gridStore) {
// Write a text string
gridStore.write('Hello world').then(function(gridStore) {
// Close the
gridStore.close().then(function(result) {
db.close();
test.done();
});
});
});
});
// END
}
}
/**
* A simple example showing how to use the instance level unlink command to delete a gridstore item using a Promise.
*
* @example-class GridStore
* @example-method unlink
* @ignore
*/
exports.shouldCorrectlySaveSimpleFileToGridStoreUsingCloseAndThenUnlinkItWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Our file ID
var fileId = new ObjectID();
// Open a new file
var gridStore = new GridStore(db, fileId, 'w');
// Open the new file
gridStore.open().then(function(gridStore) {
// Write a text string
gridStore.write('Hello world').then(function(gridStore) {
// Close the
gridStore.close().then(function(result) {
// Open the file again and unlin it
new GridStore(db, fileId, 'r').open().then(function(gridStore) {
// Unlink the file
gridStore.unlink().then(function(result) {
// Verify that the file no longer exists
GridStore.exist(db, fileId).then(function(result) {
test.equal(false, result);
db.close();
test.done();
});
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing reading back using readlines to split the text into lines by the separator provided using a Promise.
*
* @example-class GridStore
* @example-method GridStore.readlines
* @ignore
*/
exports.shouldCorrectlyPutACoupleOfLinesInGridStoreAndUseReadlinesWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Our file ID
var fileId = new ObjectID();
// Open a new file
var gridStore = new GridStore(db, fileId, 'w');
// Open the new file
gridStore.open().then(function(gridStore) {
// Write one line to gridStore
gridStore.puts("line one").then(function(gridStore) {
// Write second line to gridStore
gridStore.puts("line two").then(function(gridStore) {
// Write third line to gridStore
gridStore.puts("line three").then(function(gridStore) {
// Flush file to disk
gridStore.close().then(function(result) {
// Read back all the lines
GridStore.readlines(db, fileId).then(function(lines) {
test.deepEqual(["line one\n", "line two\n", "line three\n"], lines);
db.close();
test.done();
});
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing reading back using readlines to split the text into lines by the separator provided using a Promise.
*
* @example-class GridStore
* @example-method readlines
* @ignore
*/
exports.shouldCorrectlyPutACoupleOfLinesInGridStoreAndUseInstanceReadlinesWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Our file ID
var fileId = new ObjectID();
// Open a new file
var gridStore = new GridStore(db, fileId, 'w');
// Open the new file
gridStore.open().then(function(gridStore) {
// Write one line to gridStore
gridStore.puts("line one").then(function(gridStore) {
// Write second line to gridStore
gridStore.puts("line two").then(function(gridStore) {
// Write third line to gridStore
gridStore.puts("line three").then(function(gridStore) {
// Flush file to disk
gridStore.close().then(function(result) {
// Open file for reading
gridStore = new GridStore(db, fileId, 'r');
gridStore.open().then(function(gridStore) {
// Read all the lines and verify correctness
gridStore.readlines().then(function(lines) {
test.deepEqual(["line one\n", "line two\n", "line three\n"], lines);
db.close();
test.done();
});
});
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing the usage of the read method using a Promise.
*
* @example-class GridStore
* @example-method GridStore.read
* @ignore
*/
exports.shouldCorrectlyPutACoupleOfLinesInGridStoreReadWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a new file
var gridStore = new GridStore(db, null, "w");
// Read in the content from a file, replace with your own
var data = fs.readFileSync("./test/functional/data/test_gs_weird_bug.png");
// Open the file
gridStore.open().then(function(gridStore) {
// Write the binary file data to GridFS
gridStore.write(data).then(function(gridStore) {
// Flush the remaining data to GridFS
gridStore.close().then(function(result) {
// Read in the whole file and check that it's the same content
GridStore.read(db, result._id).then(function(fileData) {
test.equal(data.length, fileData.length);
db.close();
test.done();
});
});
});
});
});
// END
}
}
/*
* A simple example showing the usage of the seek method using a Promise.
*
* @example-class GridStore
* @example-method seek
* @ignore
*/
exports.shouldCorrectlySeekWithBufferWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a file and open it
var gridStore = new GridStore(db, "test_gs_seek_with_buffer", "w");
gridStore.open().then(function(gridStore) {
// Write some content to the file
gridStore.write(new Buffer("hello, world!", "utf8")).then(function(gridStore) {
// Flush the file to GridFS
gridStore.close().then(function(result) {
// Open the file in read mode
var gridStore2 = new GridStore(db, "test_gs_seek_with_buffer", "r");
gridStore2.open().then(function(gridStore) {
// Seek to start
gridStore.seek(0).then(function(gridStore) {
// Read first character and verify
gridStore.getc().then(function(chr) {
test.equal('h', chr);
});
});
});
// Open the file in read mode
var gridStore3 = new GridStore(db, "test_gs_seek_with_buffer", "r");
gridStore3.open().then(function(gridStore) {
// Seek to 7 characters from the beginning off the file and verify
gridStore.seek(7).then(function(gridStore) {
gridStore.getc().then(function(chr) {
test.equal('w', chr);
});
});
});
// Open the file in read mode
var gridStore5 = new GridStore(db, "test_gs_seek_with_buffer", "r");
gridStore5.open().then(function(gridStore) {
// Seek to -1 characters from the end off the file and verify
gridStore.seek(-1, GridStore.IO_SEEK_END).then(function(gridStore) {
gridStore.getc().then(function(chr) {
test.equal('!', chr);
});
});
});
// Open the file in read mode
var gridStore6 = new GridStore(db, "test_gs_seek_with_buffer", "r");
gridStore6.open().then(function(gridStore) {
// Seek to -6 characters from the end off the file and verify
gridStore.seek(-6, GridStore.IO_SEEK_END).then(function(gridStore) {
gridStore.getc().then(function(chr) {
test.equal('w', chr);
});
});
});
// Open the file in read mode
var gridStore7 = new GridStore(db, "test_gs_seek_with_buffer", "r");
gridStore7.open().then(function(gridStore) {
// Seek forward 7 characters from the current read position and verify
gridStore.seek(7, GridStore.IO_SEEK_CUR).then(function(gridStore) {
gridStore.getc().then(function(chr) {
test.equal('w', chr);
// Seek forward -1 characters from the current read position and verify
gridStore.seek(-1, GridStore.IO_SEEK_CUR).then(function(gridStore) {
gridStore.getc().then(function(chr) {
test.equal('w', chr);
// Seek forward -4 characters from the current read position and verify
gridStore.seek(-4, GridStore.IO_SEEK_CUR).then(function(gridStore) {
gridStore.getc().then(function(chr) {
test.equal('o', chr);
// Seek forward 3 characters from the current read position and verify
gridStore.seek(3, GridStore.IO_SEEK_CUR).then(function(gridStore) {
gridStore.getc().then(function(chr) {
test.equal('o', chr);
db.close();
test.done();
});
});
});
});
});
});
});
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing how to rewind and overwrite the file using a Promise.
*
* @example-class GridStore
* @example-method rewind
* @ignore
*/
exports.shouldCorrectlyRewingAndTruncateOnWriteWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Our file ID
var fileId = new ObjectID();
// Create a new file
var gridStore = new GridStore(db, fileId, "w");
// Open the file
gridStore.open().then(function(gridStore) {
// Write to the file
gridStore.write("hello, world!").then(function(gridStore) {
// Flush the file to disk
gridStore.close().then(function(result) {
// Reopen the file
gridStore = new GridStore(db, fileId, "w");
gridStore.open().then(function(gridStore) {
// Write some more text to the file
gridStore.write('some text is inserted here').then(function(gridStore) {
// Let's rewind to truncate the file
gridStore.rewind().then(function(gridStore) {
// Write something from the start
gridStore.write('abc').then(function(gridStore) {
// Flush the data to mongodb
gridStore.close().then(function(result) {
// Verify that the new data was written
GridStore.read(db, fileId).then(function(data) {
test.equal("abc", data);
db.close();
test.done();
});
});
});
});
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing the usage of the tell method using a Promise.
*
* @example-class GridStore
* @example-method tell
* @ignore
*/
exports.shouldCorrectlyExecuteGridstoreTellWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a new file
var gridStore = new GridStore(db, "test_gs_tell", "w");
// Open the file
gridStore.open().then(function(gridStore) {
// Write a string to the file
gridStore.write("hello, world!").then(function(gridStore) {
// Flush the file to GridFS
gridStore.close().then(function(result) {
// Open the file in read only mode
var gridStore2 = new GridStore(db, "test_gs_tell", "r");
gridStore2.open().then(function(gridStore) {
// Read the first 5 characters
gridStore.read(5).then(function(data) {
test.equal("hello", data);
// Get the current position of the read head
gridStore.tell().then(function(position) {
test.equal(5, position);
db.close();
test.done();
});
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing the usage of the seek method using a Promise.
*
* @example-class GridStore
* @example-method getc
* @ignore
*/
exports.shouldCorrectlyRetrieveSingleCharacterUsingGetCWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a file and open it
var gridStore = new GridStore(db, "test_gs_getc_file", "w");
gridStore.open().then(function(gridStore) {
// Write some content to the file
gridStore.write(new Buffer("hello, world!", "utf8")).then(function(gridStore) {
// Flush the file to GridFS
gridStore.close().then(function(result) {
// Open the file in read mode
var gridStore2 = new GridStore(db, "test_gs_getc_file", "r");
gridStore2.open().then(function(gridStore) {
// Read first character and verify
gridStore.getc().then(function(chr) {
test.equal('h', chr);
db.close();
test.done();
});
});
});
});
});
});
// END
}
}
/**
* A simple example showing how to save a file with a filename allowing for multiple files with the same name using a Promise.
*
* @example-class GridStore
* @example-method open
* @ignore
*/
exports.shouldCorrectlyRetrieveSingleCharacterUsingGetCWithPromises = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var GridStore = configuration.require.GridStore
, ObjectID = configuration.require.ObjectID;
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE GridStore = require('mongodb').GridStore,
// LINE ObjectID = require('mongodb').ObjectID,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a file and open it
var gridStore = new GridStore(db, new ObjectID(), "test_gs_getc_file", "w");
gridStore.open().then(function(gridStore) {
// Write some content to the file
gridStore.write(new Buffer("hello, world!", "utf8")).then(function(gridStore) {
// Flush the file to GridFS
gridStore.close().then(function(fileData) {
// Create another file with same name and and save content to it
gridStore = new GridStore(db, new ObjectID(), "test_gs_getc_file", "w");
gridStore.open().then(function(gridStore) {
// Write some content to the file
gridStore.write(new Buffer("hello, world!", "utf8")).then(function(gridStore) {
// Flush the file to GridFS
gridStore.close().then(function(fileData) {
// Open the file in read mode using the filename
var gridStore2 = new GridStore(db, "test_gs_getc_file", "r");
gridStore2.open().then(function(gridStore) {
// Read first character and verify
gridStore.getc().then(function(chr) {
test.equal('h', chr);
// Open the file using an object id
gridStore2 = new GridStore(db, fileData._id, "r");
gridStore2.open().then(function(gridStore) {
// Read first character and verify
gridStore.getc().then(function(chr) {
test.equal('h', chr);
db.close();
test.done();
})
});
});
});
});
});
});
});
});
});
});
// END
}
}
/**************************************************************************
*
* BULK TESTS
*
*************************************************************************/
/**
* Example of a simple ordered insert/update/upsert/remove ordered collection using a Promise.
*
* @example-class Collection
* @example-method initializeOrderedBulkOp
* @ignore
*/
exports['Should correctly execute ordered batch with no errors using write commands With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('batch_write_ordered_ops_0_with_promise');
// Initialize the Ordered Batch
var batch = col.initializeOrderedBulkOp();
// Add some operations to be executed in order
batch.insert({a:1});
batch.find({a:1}).updateOne({$set: {b:1}});
batch.find({a:2}).upsert().updateOne({$set: {b:2}});
batch.insert({a:3});
batch.find({a:3}).remove({a:3});
// Execute the operations
batch.execute().then(function(result) {
// Check state of result
test.equal(2, result.nInserted);
test.equal(1, result.nUpserted);
test.equal(1, result.nMatched);
test.ok(1 == result.nModified || result.nModified == 0 || result.nModified == null);
test.equal(1, result.nRemoved);
var upserts = result.getUpsertedIds();
test.equal(1, upserts.length);
test.equal(2, upserts[0].index);
test.ok(upserts[0]._id != null);
var upsert = result.getUpsertedIdAt(0);
test.equal(2, upsert.index);
test.ok(upsert._id != null);
// Finish up test
db.close();
test.done();
});
});
// END
}
}
/**
* Example of a simple ordered insert/update/upsert/remove ordered collection using a Promise.
*
*
* @example-class Collection
* @example-method initializeUnorderedBulkOp
* @ignore
*/
exports['Should correctly execute unordered batch with no errors With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('batch_write_unordered_ops_legacy_0_with_promise');
// Initialize the unordered Batch
var batch = col.initializeUnorderedBulkOp();
// Add some operations to be executed in order
batch.insert({a:1});
batch.find({a:1}).updateOne({$set: {b:1}});
batch.find({a:2}).upsert().updateOne({$set: {b:2}});
batch.insert({a:3});
batch.find({a:3}).remove({a:3});
// Execute the operations
batch.execute().then(function(result) {
// Check state of result
test.equal(2, result.nInserted);
test.equal(1, result.nUpserted);
test.equal(1, result.nMatched);
test.ok(1 == result.nModified || result.nModified == 0 || result.nModified == null);
test.equal(1, result.nRemoved);
var upserts = result.getUpsertedIds();
test.equal(1, upserts.length);
test.equal(2, upserts[0].index);
test.ok(upserts[0]._id != null);
var upsert = result.getUpsertedIdAt(0);
test.equal(2, upsert.index);
test.ok(upsert._id != null);
// Finish up test
db.close();
test.done();
});
});
// END
}
}
/**************************************************************************
*
* CRUD TESTS
*
*************************************************************************/
/**
* Example of a simple insertOne operation using a Promise.
*
* @example-class Collection
* @example-method insertOne
* @ignore
*/
exports['Should correctly execute insertOne operation With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('insert_one_with_promise');
col.insertOne({a:1}).then(function(r) {
test.equal(1, r.insertedCount);
// Finish up test
db.close();
test.done();
});
});
// END
}
}
/**
* Example of a simple insertMany operation using a Promise.
*
* @example-class Collection
* @example-method insertMany
* @ignore
*/
exports['Should correctly execute insertMany operation With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('insert_many_with_promise');
col.insertMany([{a:1}, {a:2}]).then(function(r) {
test.equal(2, r.insertedCount);
// Finish up test
db.close();
test.done();
});
});
// END
}
}
/**
* Example of a simple updateOne operation using a Promise.
*
* @example-class Collection
* @example-method updateOne
* @ignore
*/
exports['Should correctly execute updateOne operation With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('update_one_with_promise');
col.updateOne({a:1}
, {$set: {a:2}}
, {upsert:true}).then(function(r) {
test.equal(0, r.matchedCount);
test.equal(1, r.upsertedCount);
// Finish up test
db.close();
test.done();
});
});
// END
}
}
/**
* Example of a simple updateMany operation using a Promise.
*
* @example-class Collection
* @example-method updateMany
* @ignore
*/
exports['Should correctly execute updateMany operation With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('update_many_with_promise');
col.insertMany([{a:1}, {a:1}]).then(function(r) {
test.equal(2, r.insertedCount);
// Update all documents
col.updateMany({a:1}, {$set: {b: 1}}).then(function(r) {
if(r.n) {
test.equal(2, r.n);
} else {
test.equal(2, r.matchedCount);
test.equal(2, r.modifiedCount);
}
// Finish up test
db.close();
test.done();
});
});
});
// END
}
}
/**
* Example of a simple removeOne operation using a Promise.
*
* @example-class Collection
* @example-method removeOne
* @ignore
*/
exports['Should correctly execute removeOne operation With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('remove_one_with_promise');
col.insertMany([{a:1}, {a:1}]).then(function(r) {
test.equal(2, r.insertedCount);
col.removeOne({a:1}).then(function(r) {
test.equal(1, r.deletedCount);
// Finish up test
db.close();
test.done();
});
});
});
// END
}
}
/**
* Example of a simple removeMany operation using a Promise.
*
* @example-class Collection
* @example-method removeMany
* @ignore
*/
exports['Should correctly execute removeMany operation With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('remove_many_with_promise');
col.insertMany([{a:1}, {a:1}]).then(function(r) {
test.equal(2, r.insertedCount);
// Update all documents
col.removeMany({a:1}).then(function(r) {
test.equal(2, r.deletedCount);
// Finish up test
db.close();
test.done();
});
});
});
// END
}
}
/**
* Example of a simple bulkWrite operation using a Promise.
*
* @example-class Collection
* @example-method bulkWrite
* @ignore
*/
exports['Should correctly execute bulkWrite operation With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('bulk_write_with_promise');
col.bulkWrite([
{ insertOne: { document: { a: 1 } } }
, { updateOne: { filter: {a:2}, update: {$set: {a:2}}, upsert:true } }
, { updateMany: { filter: {a:2}, update: {$set: {a:2}}, upsert:true } }
, { deleteOne: { filter: {c:1} } }
, { deleteMany: { filter: {c:1} } }
, { replaceOne: { filter: {c:3}, replacement: {c:4}, upsert:true}}]
, {ordered:true, w:1}).then(function(r) {
// console.log(JSON.stringify(r, null, 2))
test.equal(1, r.nInserted);
test.equal(2, r.nUpserted);
test.equal(0, r.nRemoved);
// Crud fields
test.equal(1, r.insertedCount);
test.equal(1, Object.keys(r.insertedIds).length);
test.equal(1, r.matchedCount);
test.ok(r.modifiedCount == 0 || r.modifiedCount == 1);
test.equal(0, r.deletedCount);
test.equal(2, r.upsertedCount);
test.equal(2, Object.keys(r.upsertedIds).length);
// Ordered bulk operation
db.close();
test.done();
});
});
// END
}
}
/**
* Duplicate key error
*/
exports['Should correctly handle duplicate key error with bulkWrite'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// Get the collection
var col = db.collection('bulk_write_with_promise_write_error');
col.bulkWrite([
{ insertOne: { document: { _id: 1 } } },
{ insertOne: { document: { _id: 1 } } }]
, {ordered:true, w:1}).then(function(r) {
test.equal(true, r.hasWriteErrors());
// Ordered bulk operation
db.close();
test.done();
});
});
}
}
/**
* Example of a simple findOneAndDelete operation using a Promise.
*
* @example-class Collection
* @example-method findOneAndDelete
* @ignore
*/
exports['Should correctly execute findOneAndDelete operation With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('find_one_and_delete_with_promise');
col.insertMany([{a:1, b:1}], {w:1}).then(function(r) {
test.equal(1, r.result.n);
col.findOneAndDelete({a:1}
, { projection: {b:1}, sort: {a:1} }
).then(function(r) {
test.equal(1, r.lastErrorObject.n);
test.equal(1, r.value.b);
db.close();
test.done();
});
});
});
// END
}
}
/**
* Example of a simple findOneAndReplace operation using a Promise.
*
* @example-class Collection
* @example-method findOneAndReplace
* @ignore
*/
exports['Should correctly execute findOneAndReplace operation With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('find_one_and_replace_with_promise');
col.insertMany([{a:1, b:1}], {w:1}).then(function(r) {
test.equal(1, r.result.n);
col.findOneAndReplace({a:1}
, {c:1, b:1}
, {
projection: {b:1, c:1}
, sort: {a:1}
, returnOriginal: false
, upsert: true
}
).then(function(r) {
test.equal(1, r.lastErrorObject.n);
test.equal(1, r.value.b);
test.equal(1, r.value.c);
db.close();
test.done();
});
});
});
// END
}
}
/**
* Example of a simple findOneAndUpdate operation using a Promise.
*
* @example-class Collection
* @example-method findOneAndUpdate
* @ignore
*/
exports['Should correctly execute findOneAndUpdate operation With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Get the collection
var col = db.collection('find_one_and_update_with_promise');
col.insertMany([{a:1, b:1}], {w:1}).then(function(r) {
test.equal(1, r.result.n);
col.findOneAndUpdate({a:1}
, {$set: {d:1}}
, {
projection: {b:1, d:1}
, sort: {a:1}
, returnOriginal: false
, upsert: true
}
).then(function(r) {
test.equal(1, r.lastErrorObject.n);
test.equal(1, r.value.b);
test.equal(1, r.value.d);
db.close();
test.done();
});
});
});
// END
}
}
/**
* A simple example showing the listening to a capped collection using a Promise.
*
* @example-class Db
* @example-method createCollection
* @ignore
*/
exports['Should correctly add capped collection options to cursor With Promises'] = {
metadata: { requires: { promises:true, topology: ['single'] } },
// The actual test we wish to run
test: function(configuration, test) {
var db = configuration.newDbInstance(configuration.writeConcernMax(), {poolSize:1, auto_reconnect:false});
db.open().then(function(db) {
// LINE var MongoClient = require('mongodb').MongoClient,
// LINE test = require('assert');
// LINE MongoClient.connect('mongodb://localhost:27017/test', function(err, db) {
// REPLACE configuration.writeConcernMax() WITH {w:1}
// REMOVE-LINE test.done();
// BEGIN
// Create a capped collection with a maximum of 1000 documents
db.createCollection("a_simple_collection_2_with_promise", {capped:true, size:100000, max:10000, w:1}).then(function(collection) {
var docs = [];
for(var i = 0; i < 1000; i++) docs.push({a:i});
// Insert a document in the capped collection
collection.insertMany(docs, configuration.writeConcernMax()).then(function(result) {
// Start date
var s = new Date();
var total = 0;
// Get the cursor
var cursor = collection.find({a: {$gte:0}})
.addCursorFlag('tailable', true)
.addCursorFlag('awaitData', true)
cursor.on('data', function(d) {
total = total + 1;
if(total == 1000) {
cursor.kill();
}
});
cursor.on('end', function() {
console.dir(new Date().getTime() - s.getTime())
test.ok((new Date().getTime() - s.getTime()) > 1000);
db.close();
test.done();
});
});
});
});
// END
}
}
| 1 | 13,828 | nitpick: the only reason we have a catch for every one of these promises is because the test isn't properly chaining promises (returning them within each `then`). We could reduce the `catch` with `console.dir` to a single instance if that was done | mongodb-node-mongodb-native | js |
@@ -85,6 +85,8 @@ TEST(NebulaCodec, encode) {
// check empty values
std::vector<boost::any> emptyV;
std::string emptyEncoded = codec.encode(emptyV);
+
+
SchemaWriter emptyWriter;
auto emptySchema = std::make_shared<ResultSchemaProvider>(emptyWriter.moveSchema());
auto emptyReader = RowReader::getRowReader(emptyEncoded, emptySchema); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include <map>
#include "base/Base.h"
#include <gtest/gtest.h>
#include "dataman/RowReader.h"
#include "dataman/SchemaWriter.h"
#include "dataman/NebulaCodecImpl.h"
namespace nebula {
TEST(NebulaCodec, encode) {
std::vector<boost::any> v;
v.emplace_back(1);
v.emplace_back(false);
v.emplace_back(3.14F);
v.emplace_back(3.14);
v.emplace_back(std::string("hi"));
EXPECT_EQ(boost::any_cast<int>(v[0]), 1);
EXPECT_EQ(boost::any_cast<bool>(v[1]), false);
EXPECT_EQ(boost::any_cast<float>(v[2]), 3.14F);
EXPECT_EQ(boost::any_cast<double>(v[3]), 3.14);
EXPECT_EQ(boost::any_cast<std::string>(v[4]), "hi");
SchemaWriter schemaWriter;
schemaWriter.appendCol("i_field", cpp2::SupportedType::INT);
schemaWriter.appendCol("b_field", cpp2::SupportedType::BOOL);
schemaWriter.appendCol("f_field", cpp2::SupportedType::FLOAT);
schemaWriter.appendCol("d_field", cpp2::SupportedType::DOUBLE);
schemaWriter.appendCol("s_field", cpp2::SupportedType::STRING);
auto schema = std::make_shared<ResultSchemaProvider>(schemaWriter.moveSchema());
dataman::NebulaCodecImpl codec;
std::string encoded = codec.encode(v, schema);
auto reader = RowReader::getRowReader(encoded, schema);
EXPECT_EQ(5, reader->numFields());
// check int field
int32_t iVal;
EXPECT_EQ(ResultType::SUCCEEDED, reader->getInt(0, iVal));
EXPECT_EQ(1, iVal);
iVal = 0;
EXPECT_EQ(ResultType::SUCCEEDED, reader->getInt("i_field", iVal));
EXPECT_EQ(1, iVal);
// check bool field
bool bVal;
EXPECT_EQ(ResultType::SUCCEEDED, reader->getBool(1, bVal));
EXPECT_FALSE(bVal);
bVal = true;
EXPECT_EQ(ResultType::SUCCEEDED, reader->getBool("b_field", bVal));
EXPECT_FALSE(bVal);
// check float field
float fVal;
EXPECT_EQ(ResultType::SUCCEEDED, reader->getFloat(2, fVal));
EXPECT_FLOAT_EQ(3.14, fVal);
fVal = 0.0;
EXPECT_EQ(ResultType::SUCCEEDED, reader->getFloat("f_field", fVal));
EXPECT_FLOAT_EQ(3.14, fVal);
// check double field
double dVal;
EXPECT_EQ(ResultType::SUCCEEDED, reader->getDouble(3, dVal));
EXPECT_DOUBLE_EQ(3.14, dVal);
dVal = 0.0;
EXPECT_EQ(ResultType::SUCCEEDED, reader->getDouble("d_field", dVal));
EXPECT_DOUBLE_EQ(3.14, dVal);
// check string field
folly::StringPiece sVal;
EXPECT_EQ(ResultType::SUCCEEDED, reader->getString(4, sVal));
EXPECT_EQ("hi", sVal.toString());
sVal.clear();
EXPECT_EQ(ResultType::SUCCEEDED, reader->getString("s_field", sVal));
EXPECT_EQ("hi", sVal.toString());
// check empty values
std::vector<boost::any> emptyV;
std::string emptyEncoded = codec.encode(emptyV);
SchemaWriter emptyWriter;
auto emptySchema = std::make_shared<ResultSchemaProvider>(emptyWriter.moveSchema());
auto emptyReader = RowReader::getRowReader(emptyEncoded, emptySchema);
EXPECT_EQ(0, emptyReader->numFields());
}
TEST(NebulaCodec, decode) {
std::string encoded;
// Single byte header (Schema version is 0, no offset)
encoded.append(1, 0x00);
// bool column
encoded.append(1, 0x01);
// int column
uint8_t buffer[10];
size_t i_size = folly::encodeVarint(64, buffer);
encoded.append(reinterpret_cast<char*>(buffer), i_size);
// vid column
int64_t vid = 0x1122334455667788L;
encoded.append(reinterpret_cast<char*>(&vid), sizeof(int64_t));
// float column
float pi = 3.14F;
encoded.append(reinterpret_cast<char*>(&pi), sizeof(float));
// double column
double e = 2.718;
encoded.append(reinterpret_cast<char*>(&e), sizeof(double));
// string column
const char* str_value = "Hello World!";
size_t s_size = folly::encodeVarint(strlen(str_value), buffer);
encoded.append(reinterpret_cast<char*>(buffer), s_size);
encoded.append(str_value, strlen(str_value));
SchemaWriter schemaWriter;
schemaWriter.appendCol("b_field", cpp2::SupportedType::BOOL);
schemaWriter.appendCol("i_field", cpp2::SupportedType::INT);
schemaWriter.appendCol("v_field", cpp2::SupportedType::VID);
schemaWriter.appendCol("f_field", cpp2::SupportedType::FLOAT);
schemaWriter.appendCol("d_field", cpp2::SupportedType::DOUBLE);
schemaWriter.appendCol("s_field", cpp2::SupportedType::STRING);
auto schema = std::make_shared<ResultSchemaProvider>(schemaWriter.moveSchema());
dataman::NebulaCodecImpl codec;
auto result = codec.decode(encoded, schema);
EXPECT_TRUE(boost::any_cast<bool>(result.value()["b_field"]));
EXPECT_EQ(boost::any_cast<int>(result.value()["i_field"]), 64);
EXPECT_EQ(boost::any_cast<int64_t>(result.value()["v_field"]), 0x1122334455667788L);
EXPECT_EQ(boost::any_cast<float>(result.value()["f_field"]), 3.14F);
EXPECT_EQ(boost::any_cast<double>(result.value()["d_field"]), 2.718);
EXPECT_EQ(boost::any_cast<std::string>(result.value()["s_field"]), "Hello World!");
// check empty encoded string
auto empty_encoded = codec.decode("", schema);
ASSERT_FALSE(empty_encoded.ok());
ASSERT_FALSE(empty_encoded.status().ok());
ASSERT_EQ("encoded string is empty", empty_encoded.status().toString());
// check empty schema
auto empty_schema = codec.decode(encoded, nullptr);
ASSERT_FALSE(empty_schema.ok());
ASSERT_FALSE(empty_schema.status().ok());
ASSERT_EQ("schema is not set", empty_schema.status().toString());
}
} // namespace nebula
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
folly::init(&argc, &argv, true);
google::SetStderrLogging(google::INFO);
return RUN_ALL_TESTS();
}
| 1 | 20,117 | why do this? | vesoft-inc-nebula | cpp |
@@ -17,7 +17,7 @@ class MediaLibrary extends SystemMediaLibrary
*/
protected function init()
{
- traceLog('Class Cms\Classes\MediaLibrary has been deprecated, use System\Classes\MediaLibrary instead.');
+ traceLog('Class Cms\Classes\MediaLibrary has been deprecated, use ' . SystemMediaLibrary::class . ' instead.');
parent::init();
}
} | 1 | <?php namespace Cms\Classes;
use System\Classes\MediaLibrary as SystemMediaLibrary;
/**
* Provides abstraction level for the Media Library operations.
* Implements the library caching features and security checks.
*
* @package october\cms
* @author Alexey Bobkov, Samuel Georges
* @deprecated Use System\Classes\MediaLibrary. Remove if year >= 2020.
*/
class MediaLibrary extends SystemMediaLibrary
{
/**
* Initialize this singleton.
*/
protected function init()
{
traceLog('Class Cms\Classes\MediaLibrary has been deprecated, use System\Classes\MediaLibrary instead.');
parent::init();
}
}
| 1 | 12,999 | For consistency, would it not be best to also replace the first class name? `traceLog('Class ' . static::class . ' has been deprecated, use ' . SystemMediaLibrary::class . ' instead.')` | octobercms-october | php |
@@ -123,6 +123,9 @@ type PrometheusSpec struct {
// The labels to add to any time series or alerts when communicating with
// external systems (federation, remote storage, Alertmanager).
ExternalLabels map[string]string `json:"externalLabels,omitempty"`
+ // Enable access to prometheus web admin API. Defaults to the value of `true`.
+ // For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
+ EnableAdminApi string `json:"enableAdminApi,omitempty"`
// The external URL the Prometheus instances will be available under. This is
// necessary to generate correct URLs. This is necessary if Prometheus is not
// served from root of a DNS name. | 1 | // Copyright 2018 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
Version = "v1"
PrometheusesKind = "Prometheus"
PrometheusName = "prometheuses"
PrometheusKindKey = "prometheus"
AlertmanagersKind = "Alertmanager"
AlertmanagerName = "alertmanagers"
AlertManagerKindKey = "alertmanager"
ServiceMonitorsKind = "ServiceMonitor"
ServiceMonitorName = "servicemonitors"
ServiceMonitorKindKey = "servicemonitor"
PrometheusRuleKind = "PrometheusRule"
PrometheusRuleName = "prometheusrules"
PrometheusRuleKindKey = "prometheusrule"
)
// Prometheus defines a Prometheus deployment.
// +genclient
// +k8s:openapi-gen=true
type Prometheus struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// +k8s:openapi-gen=false
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of the Prometheus cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Spec PrometheusSpec `json:"spec"`
// Most recent observed status of the Prometheus cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Status *PrometheusStatus `json:"status,omitempty"`
}
// PrometheusList is a list of Prometheuses.
// +k8s:openapi-gen=true
type PrometheusList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of Prometheuses
Items []*Prometheus `json:"items"`
}
// PrometheusSpec is a specification of the desired behavior of the Prometheus cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type PrometheusSpec struct {
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// Metadata Labels and Annotations gets propagated to the prometheus pods.
PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"`
// ServiceMonitors to be selected for target discovery.
ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"`
// Namespaces to be selected for ServiceMonitor discovery. If nil, only
// check own namespace.
ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"`
// Version of Prometheus to be deployed.
Version string `json:"version,omitempty"`
// Tag of Prometheus container image to be deployed. Defaults to the value of `version`.
// Version is ignored if Tag is set.
Tag string `json:"tag,omitempty"`
// SHA of Prometheus container image to be deployed. Defaults to the value of `version`.
// Similar to a tag, but the SHA explicitly deploys an immutable container image.
// Version and Tag are ignored if SHA is set.
SHA string `json:"sha,omitempty"`
// When a Prometheus deployment is paused, no actions except for deletion
// will be performed on the underlying objects.
Paused bool `json:"paused,omitempty"`
// Image if specified has precedence over baseImage, tag and sha
// combinations. Specifying the version is still necessary to ensure the
// Prometheus Operator knows what version of Prometheus is being
// configured.
Image *string `json:"image,omitempty"`
// Base image to use for a Prometheus deployment.
BaseImage string `json:"baseImage,omitempty"`
// An optional list of references to secrets in the same namespace
// to use for pulling prometheus and alertmanager images from registries
// see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// Number of instances to deploy for a Prometheus deployment.
Replicas *int32 `json:"replicas,omitempty"`
// Time duration Prometheus shall retain data for. Default is '24h',
// and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years).
Retention string `json:"retention,omitempty"`
// Log level for Prometheus to be configured with.
LogLevel string `json:"logLevel,omitempty"`
// Interval between consecutive scrapes.
ScrapeInterval string `json:"scrapeInterval,omitempty"`
// Interval between consecutive evaluations.
EvaluationInterval string `json:"evaluationInterval,omitempty"`
// /--rules.*/ command-line arguments.
Rules Rules `json:"rules,omitempty"`
// The labels to add to any time series or alerts when communicating with
// external systems (federation, remote storage, Alertmanager).
ExternalLabels map[string]string `json:"externalLabels,omitempty"`
// The external URL the Prometheus instances will be available under. This is
// necessary to generate correct URLs. This is necessary if Prometheus is not
// served from root of a DNS name.
ExternalURL string `json:"externalUrl,omitempty"`
// The route prefix Prometheus registers HTTP handlers for. This is useful,
// if using ExternalURL and a proxy is rewriting HTTP routes of a request,
// and the actual ExternalURL is still true, but the server serves requests
// under a different route prefix. For example for use with `kubectl proxy`.
RoutePrefix string `json:"routePrefix,omitempty"`
// QuerySpec defines the query command line flags when starting Prometheus.
Query *QuerySpec `json:"query,omitempty"`
// Storage spec to specify how storage shall be used.
Storage *StorageSpec `json:"storage,omitempty"`
// A selector to select which PrometheusRules to mount for loading alerting
// rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus
// Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom
// resources selected by RuleSelector. Make sure it does not match any config
// maps that you do not want to be migrated.
RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"`
// Namespaces to be selected for PrometheusRules discovery. If unspecified, only
// the same namespace as the Prometheus object is in is used.
RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"`
// Define details regarding alerting.
Alerting *AlertingSpec `json:"alerting,omitempty"`
// Define resources requests and limits for single Pods.
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// Define which Nodes the Pods are scheduled on.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// ServiceAccountName is the name of the ServiceAccount to use to run the
// Prometheus Pods.
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// Secrets is a list of Secrets in the same namespace as the Prometheus
// object, which shall be mounted into the Prometheus Pods.
// The Secrets are mounted into /etc/prometheus/secrets/<secret-name>.
Secrets []string `json:"secrets,omitempty"`
// ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus
// object, which shall be mounted into the Prometheus Pods.
// The ConfigMaps are mounted into /etc/prometheus/configmaps/<configmap-name>.
ConfigMaps []string `json:"configMaps,omitempty"`
// If specified, the pod's scheduling constraints.
Affinity *v1.Affinity `json:"affinity,omitempty"`
// If specified, the pod's tolerations.
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
// If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way.
RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"`
// If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way.
RemoteRead []RemoteReadSpec `json:"remoteRead,omitempty"`
// SecurityContext holds pod-level security attributes and common container settings.
// This defaults to non root user with uid 1000 and gid 2000 for Prometheus >v2.0 and
// default PodSecurityContext for other versions.
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
// ListenLocal makes the Prometheus server listen on loopback, so that it
// does not bind against the Pod IP.
ListenLocal bool `json:"listenLocal,omitempty"`
// Containers allows injecting additional containers. This is meant to
// allow adding an authentication proxy to a Prometheus pod.
Containers []v1.Container `json:"containers,omitempty"`
// AdditionalScrapeConfigs allows specifying a key of a Secret containing
// additional Prometheus scrape configurations. Scrape configurations
// specified are appended to the configurations generated by the Prometheus
// Operator. Job configurations specified must have the form as specified
// in the official Prometheus documentation:
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>.
// As scrape configs are appended, the user is responsible to make sure it
// is valid. Note that using this feature may expose the possibility to
// break upgrades of Prometheus. It is advised to review Prometheus release
// notes to ensure that no incompatible scrape configs are going to break
// Prometheus after the upgrade.
AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"`
// AdditionalAlertRelabelConfigs allows specifying a key of a Secret containing
// additional Prometheus alert relabel configurations. Alert relabel configurations
// specified are appended to the configurations generated by the Prometheus
// Operator. Alert relabel configurations specified must have the form as specified
// in the official Prometheus documentation:
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
// As alert relabel configs are appended, the user is responsible to make sure it
// is valid. Note that using this feature may expose the possibility to
// break upgrades of Prometheus. It is advised to review Prometheus release
// notes to ensure that no incompatible alert relabel configs are going to break
// Prometheus after the upgrade.
AdditionalAlertRelabelConfigs *v1.SecretKeySelector `json:"additionalAlertRelabelConfigs,omitempty"`
// AdditionalAlertManagerConfigs allows specifying a key of a Secret containing
// additional Prometheus AlertManager configurations. AlertManager configurations
// specified are appended to the configurations generated by the Prometheus
// Operator. Job configurations specified must have the form as specified
// in the official Prometheus documentation:
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<alertmanager_config>.
// As AlertManager configs are appended, the user is responsible to make sure it
// is valid. Note that using this feature may expose the possibility to
// break upgrades of Prometheus. It is advised to review Prometheus release
// notes to ensure that no incompatible AlertManager configs are going to break
// Prometheus after the upgrade.
AdditionalAlertManagerConfigs *v1.SecretKeySelector `json:"additionalAlertManagerConfigs,omitempty"`
// APIServerConfig allows specifying a host and auth methods to access apiserver.
// If left empty, Prometheus is assumed to run inside of the cluster
// and will discover API servers automatically and use the pod's CA certificate
// and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/.
APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"`
// Thanos configuration allows configuring various aspects of a Prometheus
// server in a Thanos environment.
//
// This section is experimental, it may change significantly without
// deprecation notice in any release.
//
// This is experimental and may change significantly without backward
// compatibility in any release.
Thanos *ThanosSpec `json:"thanos,omitempty"`
// Priority class assigned to the Pods
PriorityClassName string `json:"priorityClassName,omitempty"`
}
// PrometheusStatus is the most recent observed status of the Prometheus cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type PrometheusStatus struct {
// Represents whether any actions on the underlaying managed objects are
// being performed. Only delete actions will be performed.
Paused bool `json:"paused"`
// Total number of non-terminated pods targeted by this Prometheus deployment
// (their labels match the selector).
Replicas int32 `json:"replicas"`
// Total number of non-terminated pods targeted by this Prometheus deployment
// that have the desired version spec.
UpdatedReplicas int32 `json:"updatedReplicas"`
// Total number of available pods (ready for at least minReadySeconds)
// targeted by this Prometheus deployment.
AvailableReplicas int32 `json:"availableReplicas"`
// Total number of unavailable pods targeted by this Prometheus deployment.
UnavailableReplicas int32 `json:"unavailableReplicas"`
}
// AlertingSpec defines parameters for alerting configuration of Prometheus servers.
// +k8s:openapi-gen=true
type AlertingSpec struct {
// AlertmanagerEndpoints Prometheus should fire alerts against.
Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"`
}
// StorageSpec defines the configured storage for a group Prometheus servers.
// If neither `emptyDir` nor `volumeClaimTemplate` is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) will be used.
// +k8s:openapi-gen=true
type StorageSpec struct {
// EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More
// info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir
EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"`
// A PVC spec to be used by the Prometheus StatefulSets.
VolumeClaimTemplate v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
}
// QuerySpec defines the query command line flags when starting Prometheus.
// +k8s:openapi-gen=true
type QuerySpec struct {
// The delta difference allowed for retrieving metrics during expression evaluations.
LookbackDelta *string `json:"lookbackDelta,omitempty"`
// Number of concurrent queries that can be run at once.
MaxConcurrency *int32 `json:"maxConcurrency,omitempty"`
// Maximum time a query may take before being aborted.
Timeout *string `json:"timeout,omitempty"`
}
// ThanosSpec defines parameters for a Prometheus server within a Thanos deployment.
// +k8s:openapi-gen=true
type ThanosSpec struct {
// Peers is a DNS name for Thanos to discover peers through.
Peers *string `json:"peers,omitempty"`
// Image if specified has precedence over baseImage, tag and sha
// combinations. Specifying the version is still necessary to ensure the
// Prometheus Operator knows what version of Thanos is being
// configured.
Image *string `json:"image,omitempty"`
// Version describes the version of Thanos to use.
Version *string `json:"version,omitempty"`
// Tag of Thanos sidecar container image to be deployed. Defaults to the value of `version`.
// Version is ignored if Tag is set.
Tag *string `json:"tag,omitempty"`
// SHA of Thanos container image to be deployed. Defaults to the value of `version`.
// Similar to a tag, but the SHA explicitly deploys an immutable container image.
// Version and Tag are ignored if SHA is set.
SHA *string `json:"sha,omitempty"`
// Thanos base image if other than default.
BaseImage *string `json:"baseImage,omitempty"`
// Resources defines the resource requirements for the Thanos sidecar.
// If not provided, no requests/limits will be set
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// Deprecated: GCS should be configured with an ObjectStorageConfig secret
// starting with Thanos v0.2.0. This field will be removed.
GCS *ThanosGCSSpec `json:"gcs,omitempty"`
// Deprecated: S3 should be configured with an ObjectStorageConfig secret
// starting with Thanos v0.2.0. This field will be removed.
S3 *ThanosS3Spec `json:"s3,omitempty"`
// ObjectStorageConfig configures object storage in Thanos.
ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"`
}
// Deprecated: ThanosGCSSpec should be configured with an ObjectStorageConfig
// secret starting with Thanos v0.2.0. ThanosGCSSpec will be removed.
//
// +k8s:openapi-gen=true
type ThanosGCSSpec struct {
// Google Cloud Storage bucket name for stored blocks. If empty it won't
// store any block inside Google Cloud Storage.
Bucket *string `json:"bucket,omitempty"`
// Secret to access our Bucket.
SecretKey *v1.SecretKeySelector `json:"credentials,omitempty"`
}
// Deprecated: ThanosS3Spec should be configured with an ObjectStorageConfig
// secret starting with Thanos v0.2.0. ThanosS3Spec will be removed.
//
// +k8s:openapi-gen=true
type ThanosS3Spec struct {
// S3-Compatible API bucket name for stored blocks.
Bucket *string `json:"bucket,omitempty"`
// S3-Compatible API endpoint for stored blocks.
Endpoint *string `json:"endpoint,omitempty"`
// AccessKey for an S3-Compatible API.
AccessKey *v1.SecretKeySelector `json:"accessKey,omitempty"`
// SecretKey for an S3-Compatible API.
SecretKey *v1.SecretKeySelector `json:"secretKey,omitempty"`
// Whether to use an insecure connection with an S3-Compatible API.
Insecure *bool `json:"insecure,omitempty"`
// Whether to use S3 Signature Version 2; otherwise Signature Version 4 will be used.
SignatureVersion2 *bool `json:"signatureVersion2,omitempty"`
// Whether to use Server Side Encryption
EncryptSSE *bool `json:"encryptsse,omitempty"`
}
// RemoteWriteSpec defines the remote_write configuration for prometheus.
// +k8s:openapi-gen=true
type RemoteWriteSpec struct {
//The URL of the endpoint to send samples to.
URL string `json:"url"`
//Timeout for requests to the remote write endpoint.
RemoteTimeout string `json:"remoteTimeout,omitempty"`
//The list of remote write relabel configurations.
WriteRelabelConfigs []RelabelConfig `json:"writeRelabelConfigs,omitempty"`
//BasicAuth for the URL.
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// File to read bearer token for remote write.
BearerToken string `json:"bearerToken,omitempty"`
// File to read bearer token for remote write.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// TLS Config to use for remote write.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
//Optional ProxyURL
ProxyURL string `json:"proxyUrl,omitempty"`
// QueueConfig allows tuning of the remote write queue parameters.
QueueConfig *QueueConfig `json:"queueConfig,omitempty"`
}
// QueueConfig allows the tuning of remote_write queue_config parameters. This object
// is referenced in the RemoteWriteSpec object.
// +k8s:openapi-gen=true
type QueueConfig struct {
// Capacity is the number of samples to buffer per shard before we start dropping them.
Capacity int `json:"capacity,omitempty"`
// MinShards is the minimum number of shards, i.e. amount of concurrency.
MinShards int `json:"minShards,omitempty"`
// MaxShards is the maximum number of shards, i.e. amount of concurrency.
MaxShards int `json:"maxShards,omitempty"`
// MaxSamplesPerSend is the maximum number of samples per send.
MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"`
// BatchSendDeadline is the maximum time a sample will wait in buffer.
BatchSendDeadline string `json:"batchSendDeadline,omitempty"`
// MaxRetries is the maximum number of times to retry a batch on recoverable errors.
MaxRetries int `json:"maxRetries,omitempty"`
// MinBackoff is the initial retry delay. Gets doubled for every retry.
MinBackoff string `json:"minBackoff,omitempty"`
// MaxBackoff is the maximum retry delay.
MaxBackoff string `json:"maxBackoff,omitempty"`
}
// RemoteReadSpec defines the remote_read configuration for prometheus.
// +k8s:openapi-gen=true
type RemoteReadSpec struct {
//The URL of the endpoint to send samples to.
URL string `json:"url"`
//An optional list of equality matchers which have to be present
// in a selector to query the remote read endpoint.
RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"`
//Timeout for requests to the remote read endpoint.
RemoteTimeout string `json:"remoteTimeout,omitempty"`
//Whether reads should be made for queries for time ranges that
// the local storage should have complete data for.
ReadRecent bool `json:"readRecent,omitempty"`
//BasicAuth for the URL.
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// bearer token for remote read.
BearerToken string `json:"bearerToken,omitempty"`
// File to read bearer token for remote read.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// TLS Config to use for remote read.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
//Optional ProxyURL
ProxyURL string `json:"proxyUrl,omitempty"`
}
// RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion.
// It defines `<metric_relabel_configs>`-section of Prometheus configuration.
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
// +k8s:openapi-gen=true
type RelabelConfig struct {
//The source labels select values from existing labels. Their content is concatenated
//using the configured separator and matched against the configured regular expression
//for the replace, keep, and drop actions.
SourceLabels []string `json:"sourceLabels,omitempty"`
//Separator placed between concatenated source label values. default is ';'.
Separator string `json:"separator,omitempty"`
//Label to which the resulting value is written in a replace action.
//It is mandatory for replace actions. Regex capture groups are available.
TargetLabel string `json:"targetLabel,omitempty"`
//Regular expression against which the extracted value is matched. defailt is '(.*)'
Regex string `json:"regex,omitempty"`
// Modulus to take of the hash of the source label values.
Modulus uint64 `json:"modulus,omitempty"`
//Replacement value against which a regex replace is performed if the
//regular expression matches. Regex capture groups are available. Default is '$1'
Replacement string `json:"replacement,omitempty"`
// Action to perform based on regex matching. Default is 'replace'
Action string `json:"action,omitempty"`
}
// APIServerConfig defines a host and auth methods to access apiserver.
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config
// +k8s:openapi-gen=true
type APIServerConfig struct {
// Host of apiserver.
// A valid string consisting of a hostname or IP followed by an optional port number
Host string `json:"host"`
// BasicAuth allow an endpoint to authenticate over basic authentication
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// Bearer token for accessing apiserver.
BearerToken string `json:"bearerToken,omitempty"`
// File to read bearer token for accessing apiserver.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// TLS Config to use for accessing apiserver.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
}
// AlertmanagerEndpoints defines a selection of a single Endpoints object
// containing alertmanager IPs to fire alerts against.
// +k8s:openapi-gen=true
type AlertmanagerEndpoints struct {
// Namespace of Endpoints object.
Namespace string `json:"namespace"`
// Name of Endpoints object in Namespace.
Name string `json:"name"`
// Port the Alertmanager API is exposed on.
Port intstr.IntOrString `json:"port"`
// Scheme to use when firing alerts.
Scheme string `json:"scheme,omitempty"`
// Prefix for the HTTP path alerts are pushed to.
PathPrefix string `json:"pathPrefix,omitempty"`
// TLS Config to use for alertmanager connection.
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
// BearerTokenFile to read from filesystem to use when authenticating to
// Alertmanager.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
}
// ServiceMonitor defines monitoring for a set of services.
// +genclient
// +k8s:openapi-gen=true
type ServiceMonitor struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// +k8s:openapi-gen=false
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of desired Service selection for target discrovery by
// Prometheus.
Spec ServiceMonitorSpec `json:"spec"`
}
// ServiceMonitorSpec contains specification parameters for a ServiceMonitor.
// +k8s:openapi-gen=true
type ServiceMonitorSpec struct {
// The label to use to retrieve the job name from.
JobLabel string `json:"jobLabel,omitempty"`
// TargetLabels transfers labels on the Kubernetes Service onto the target.
TargetLabels []string `json:"targetLabels,omitempty"`
// PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
PodTargetLabels []string `json:"podTargetLabels,omitempty"`
// A list of endpoints allowed as part of this ServiceMonitor.
Endpoints []Endpoint `json:"endpoints"`
// Selector to select Endpoints objects.
Selector metav1.LabelSelector `json:"selector"`
// Selector to select which namespaces the Endpoints objects are discovered from.
NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"`
// SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
SampleLimit uint64 `json:"sampleLimit,omitempty"`
}
// Endpoint defines a scrapeable endpoint serving Prometheus metrics.
// +k8s:openapi-gen=true
type Endpoint struct {
// Name of the service port this endpoint refers to. Mutually exclusive with targetPort.
Port string `json:"port,omitempty"`
// Name or number of the target port of the endpoint. Mutually exclusive with port.
TargetPort *intstr.IntOrString `json:"targetPort,omitempty"`
// HTTP path to scrape for metrics.
Path string `json:"path,omitempty"`
// HTTP scheme to use for scraping.
Scheme string `json:"scheme,omitempty"`
// Optional HTTP URL parameters
Params map[string][]string `json:"params,omitempty"`
// Interval at which metrics should be scraped
Interval string `json:"interval,omitempty"`
// Timeout after which the scrape is ended
ScrapeTimeout string `json:"scrapeTimeout,omitempty"`
// TLS configuration to use when scraping the endpoint
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
// File to read bearer token for scraping targets.
BearerTokenFile string `json:"bearerTokenFile,omitempty"`
// HonorLabels chooses the metric's labels on collisions with target labels.
HonorLabels bool `json:"honorLabels,omitempty"`
// BasicAuth allow an endpoint to authenticate over basic authentication
// More info: https://prometheus.io/docs/operating/configuration/#endpoints
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
// MetricRelabelConfigs to apply to samples before ingestion.
MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"`
// RelabelConfigs to apply to samples before ingestion.
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<relabel_config>
RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"`
// ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint.
ProxyURL *string `json:"proxyUrl,omitempty"`
}
// BasicAuth allow an endpoint to authenticate over basic authentication
// More info: https://prometheus.io/docs/operating/configuration/#endpoints
// +k8s:openapi-gen=true
type BasicAuth struct {
// The secret that contains the username for authenticate
Username v1.SecretKeySelector `json:"username,omitempty"`
// The secret that contains the password for authenticate
Password v1.SecretKeySelector `json:"password,omitempty"`
}
// TLSConfig specifies TLS configuration parameters.
// +k8s:openapi-gen=true
type TLSConfig struct {
// The CA cert to use for the targets.
CAFile string `json:"caFile,omitempty"`
// The client cert file for the targets.
CertFile string `json:"certFile,omitempty"`
// The client key file for the targets.
KeyFile string `json:"keyFile,omitempty"`
// Used to verify the hostname for the targets.
ServerName string `json:"serverName,omitempty"`
// Disable target certificate validation.
InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"`
}
// ServiceMonitorList is a list of ServiceMonitors.
// +k8s:openapi-gen=true
type ServiceMonitorList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of ServiceMonitors
Items []*ServiceMonitor `json:"items"`
}
// PrometheusRuleList is a list of PrometheusRules.
// +k8s:openapi-gen=true
type PrometheusRuleList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of Rules
Items []*PrometheusRule `json:"items"`
}
// PrometheusRule defines alerting rules for a Prometheus instance
// +genclient
// +k8s:openapi-gen=true
type PrometheusRule struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of desired alerting rule definitions for Prometheus.
Spec PrometheusRuleSpec `json:"spec"`
}
// PrometheusRuleSpec contains specification parameters for a Rule.
// +k8s:openapi-gen=true
type PrometheusRuleSpec struct {
// Content of Prometheus rule file
Groups []RuleGroup `json:"groups,omitempty"`
}
// RuleGroup and Rule are copied instead of vendored because the
// upstream Prometheus struct definitions don't have json struct tags.
// RuleGroup is a list of sequentially evaluated recording and alerting rules.
// +k8s:openapi-gen=true
type RuleGroup struct {
Name string `json:"name"`
Interval string `json:"interval,omitempty"`
Rules []Rule `json:"rules"`
}
// Rule describes an alerting or recording rule.
// +k8s:openapi-gen=true
type Rule struct {
Record string `json:"record,omitempty"`
Alert string `json:"alert,omitempty"`
Expr intstr.IntOrString `json:"expr"`
For string `json:"for,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
}
// Alertmanager describes an Alertmanager cluster.
// +genclient
// +k8s:openapi-gen=true
type Alertmanager struct {
metav1.TypeMeta `json:",inline"`
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// +k8s:openapi-gen=false
metav1.ObjectMeta `json:"metadata,omitempty"`
// Specification of the desired behavior of the Alertmanager cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Spec AlertmanagerSpec `json:"spec"`
// Most recent observed status of the Alertmanager cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
Status *AlertmanagerStatus `json:"status,omitempty"`
}
// AlertmanagerSpec is a specification of the desired behavior of the Alertmanager cluster. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type AlertmanagerSpec struct {
// Standard object’s metadata. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
// Metadata Labels and Annotations gets propagated to the prometheus pods.
PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"`
// Image if specified has precedence over baseImage, tag and sha
// combinations. Specifying the version is still necessary to ensure the
// Prometheus Operator knows what version of Alertmanager is being
// configured.
Image *string `json:"image,omitempty"`
// Version the cluster should be on.
Version string `json:"version,omitempty"`
// Tag of Alertmanager container image to be deployed. Defaults to the value of `version`.
// Version is ignored if Tag is set.
Tag string `json:"tag,omitempty"`
// SHA of Alertmanager container image to be deployed. Defaults to the value of `version`.
// Similar to a tag, but the SHA explicitly deploys an immutable container image.
// Version and Tag are ignored if SHA is set.
SHA string `json:"sha,omitempty"`
// Base image that is used to deploy pods, without tag.
BaseImage string `json:"baseImage,omitempty"`
// An optional list of references to secrets in the same namespace
// to use for pulling prometheus and alertmanager images from registries
// see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
// Secrets is a list of Secrets in the same namespace as the Alertmanager
// object, which shall be mounted into the Alertmanager Pods.
// The Secrets are mounted into /etc/alertmanager/secrets/<secret-name>.
Secrets []string `json:"secrets,omitempty"`
// ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager
// object, which shall be mounted into the Alertmanager Pods.
// The ConfigMaps are mounted into /etc/alertmanager/configmaps/<configmap-name>.
ConfigMaps []string `json:"configMaps,omitempty"`
// Log level for Alertmanager to be configured with.
LogLevel string `json:"logLevel,omitempty"`
// Size is the expected size of the alertmanager cluster. The controller will
// eventually make the size of the running cluster equal to the expected
// size.
Replicas *int32 `json:"replicas,omitempty"`
// Time duration Alertmanager shall retain data for. Default is '120h',
// and must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours).
Retention string `json:"retention,omitempty"`
// Storage is the definition of how storage will be used by the Alertmanager
// instances.
Storage *StorageSpec `json:"storage,omitempty"`
// The external URL the Alertmanager instances will be available under. This is
// necessary to generate correct URLs. This is necessary if Alertmanager is not
// served from root of a DNS name.
ExternalURL string `json:"externalUrl,omitempty"`
// The route prefix Alertmanager registers HTTP handlers for. This is useful,
// if using ExternalURL and a proxy is rewriting HTTP routes of a request,
// and the actual ExternalURL is still true, but the server serves requests
// under a different route prefix. For example for use with `kubectl proxy`.
RoutePrefix string `json:"routePrefix,omitempty"`
// If set to true all actions on the underlaying managed objects are not
// goint to be performed, except for delete actions.
Paused bool `json:"paused,omitempty"`
// Define which Nodes the Pods are scheduled on.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Define resources requests and limits for single Pods.
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// If specified, the pod's scheduling constraints.
Affinity *v1.Affinity `json:"affinity,omitempty"`
// If specified, the pod's tolerations.
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
// SecurityContext holds pod-level security attributes and common container settings.
// This defaults to non root user with uid 1000 and gid 2000.
SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"`
// ServiceAccountName is the name of the ServiceAccount to use to run the
// Prometheus Pods.
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// ListenLocal makes the Alertmanager server listen on loopback, so that it
// does not bind against the Pod IP. Note this is only for the Alertmanager
// UI, not the gossip communication.
ListenLocal bool `json:"listenLocal,omitempty"`
// Containers allows injecting additional containers. This is meant to
// allow adding an authentication proxy to an Alertmanager pod.
Containers []v1.Container `json:"containers,omitempty"`
// Priority class assigned to the Pods
PriorityClassName string `json:"priorityClassName,omitempty"`
// AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
AdditionalPeers []string `json:"additionalPeers,omitempty"`
}
// AlertmanagerList is a list of Alertmanagers.
// +k8s:openapi-gen=true
type AlertmanagerList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty"`
// List of Alertmanagers
Items []Alertmanager `json:"items"`
}
// AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. Not
// included when requesting from the apiserver, only from the Prometheus
// Operator API itself. More info:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type AlertmanagerStatus struct {
// Represents whether any actions on the underlaying managed objects are
// being performed. Only delete actions will be performed.
Paused bool `json:"paused"`
// Total number of non-terminated pods targeted by this Alertmanager
// cluster (their labels match the selector).
Replicas int32 `json:"replicas"`
// Total number of non-terminated pods targeted by this Alertmanager
// cluster that have the desired version spec.
UpdatedReplicas int32 `json:"updatedReplicas"`
// Total number of available pods (ready for at least minReadySeconds)
// targeted by this Alertmanager cluster.
AvailableReplicas int32 `json:"availableReplicas"`
// Total number of unavailable pods targeted by this Alertmanager cluster.
UnavailableReplicas int32 `json:"unavailableReplicas"`
}
// NamespaceSelector is a selector for selecting either all namespaces or a
// list of namespaces.
// +k8s:openapi-gen=true
type NamespaceSelector struct {
// Boolean describing whether all namespaces are selected in contrast to a
// list restricting them.
Any bool `json:"any,omitempty"`
// List of namespace names.
MatchNames []string `json:"matchNames,omitempty"`
// TODO(fabxc): this should embed metav1.LabelSelector eventually.
// Currently the selector is only used for namespaces which require more complex
// implementation to support label selections.
}
// /--rules.*/ command-line arguments
// +k8s:openapi-gen=true
type Rules struct {
Alert RulesAlert `json:"alert,omitempty"`
}
// /--rules.alert.*/ command-line arguments
// +k8s:openapi-gen=true
type RulesAlert struct {
// Max time to tolerate prometheus outage for restoring 'for' state of alert.
ForOutageTolerance string `json:"forOutageTolerance,omitempty"`
// Minimum duration between alert and restored 'for' state.
// This is maintained only for alerts with configured 'for' time greater than grace period.
ForGracePeriod string `json:"forGracePeriod,omitempty"`
// Minimum amount of time to wait before resending an alert to Alertmanager.
ResendDelay string `json:"resendDelay,omitempty"`
}
// DeepCopyObject implements the runtime.Object interface.
func (l *Alertmanager) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *AlertmanagerList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *Prometheus) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *PrometheusList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *ServiceMonitor) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *ServiceMonitorList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (f *PrometheusRule) DeepCopyObject() runtime.Object {
return f.DeepCopy()
}
// DeepCopyObject implements the runtime.Object interface.
func (l *PrometheusRuleList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
| 1 | 12,166 | this should be `EnabledAdminAPI` and the json string should then be `enableAdminAPI` | prometheus-operator-prometheus-operator | go |
@@ -4,6 +4,8 @@ from listenbrainz import webserver
from werkzeug.serving import run_simple
import os
import click
+import sqlalchemy
+from time import sleep
from listenbrainz.utils import safely_import_config
safely_import_config() | 1 | from listenbrainz import db
from listenbrainz.db import timescale as ts
from listenbrainz import webserver
from werkzeug.serving import run_simple
import os
import click
from listenbrainz.utils import safely_import_config
safely_import_config()
@click.group()
def cli():
pass
ADMIN_SQL_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'admin', 'sql')
MSB_ADMIN_SQL_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'admin', 'messybrainz', 'sql')
TIMESCALE_SQL_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'admin', 'timescale')
@cli.command(name="run_api_compat_server")
@click.option("--host", "-h", default="0.0.0.0", show_default=True)
@click.option("--port", "-p", default=8080, show_default=True)
@click.option("--debug", "-d", is_flag=True,
help="Turns debugging mode on or off. If specified, overrides "
"'DEBUG' value in the config file.")
def run_api_compat_server(host, port, debug=False):
application = webserver.create_api_compat_app()
run_simple(
hostname=host,
port=port,
application=application,
use_debugger=debug,
use_reloader=debug,
processes=5
)
@cli.command(name="run_websockets")
@click.option("--host", "-h", default="0.0.0.0", show_default=True)
@click.option("--port", "-p", default=8082, show_default=True)
@click.option("--debug", "-d", is_flag=True,
help="Turns debugging mode on or off. If specified, overrides "
"'DEBUG' value in the config file.")
def run_websockets(host, port, debug=True):
from listenbrainz.websockets.websockets import run_websockets
run_websockets(host=host, port=port, debug=debug)
@cli.command(name="init_db")
@click.option("--force", "-f", is_flag=True, help="Drop existing database and user.")
@click.option("--create-db", is_flag=True, help="Create the database and user.")
def init_db(force, create_db):
"""Initializes database.
This process involves several steps:
1. Table structure is created.
2. Primary keys and foreign keys are created.
3. Indexes are created.
"""
from listenbrainz import config
db.init_db_connection(config.POSTGRES_ADMIN_URI)
if force:
res = db.run_sql_script_without_transaction(os.path.join(ADMIN_SQL_DIR, 'drop_db.sql'))
if not res:
raise Exception('Failed to drop existing database and user! Exit code: %i' % res)
if create_db or force:
print('PG: Creating user and a database...')
res = db.run_sql_script_without_transaction(os.path.join(ADMIN_SQL_DIR, 'create_db.sql'))
if not res:
raise Exception('Failed to create new database and user! Exit code: %i' % res)
db.init_db_connection(config.POSTGRES_ADMIN_LB_URI)
print('PG: Creating database extensions...')
res = db.run_sql_script_without_transaction(os.path.join(ADMIN_SQL_DIR, 'create_extensions.sql'))
# Don't raise an exception if the extension already exists
application = webserver.create_app()
with application.app_context():
print('PG: Creating schema...')
db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_schema.sql'))
print('PG: Creating Types...')
db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_types.sql'))
print('PG: Creating tables...')
db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_tables.sql'))
print('PG: Creating primary and foreign keys...')
db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_primary_keys.sql'))
db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_foreign_keys.sql'))
print('PG: Creating indexes...')
db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_indexes.sql'))
print("Done!")
@cli.command(name="init_msb_db")
@click.option("--force", "-f", is_flag=True, help="Drop existing database and user.")
@click.option("--create-db", is_flag=True, help="Skip creating database and user. Tables/indexes only.")
def init_msb_db(force, create_db):
"""Initializes database.
This process involves several steps:
1. Table structure is created.
2. Primary keys and foreign keys are created.
3. Indexes are created.
"""
from listenbrainz import config
db.init_db_connection(config.POSTGRES_ADMIN_URI)
if force:
res = db.run_sql_script_without_transaction(os.path.join(MSB_ADMIN_SQL_DIR, 'drop_db.sql'))
if not res:
raise Exception('Failed to drop existing database and user! Exit code: %s' % res)
if create_db or force:
print('PG: Creating user and a database...')
res = db.run_sql_script_without_transaction(os.path.join(MSB_ADMIN_SQL_DIR, 'create_db.sql'))
if not res:
raise Exception('Failed to create new database and user! Exit code: %s' % res)
print('PG: Creating database extensions...')
res = db.run_sql_script_without_transaction(os.path.join(MSB_ADMIN_SQL_DIR, 'create_extensions.sql'))
# Don't raise an exception if the extension already exists
db.engine.dispose()
# print('PG: Creating schema...')
# exit_code = run_psql_script('create_schema.sql')
# if exit_code != 0:
# raise Exception('Failed to create database schema! Exit code: %i' % exit_code)
db.init_db_connection(config.MESSYBRAINZ_SQLALCHEMY_DATABASE_URI)
print('PG: Creating tables...')
db.run_sql_script(os.path.join(MSB_ADMIN_SQL_DIR, 'create_tables.sql'))
print('PG: Creating primary and foreign keys...')
db.run_sql_script(os.path.join(MSB_ADMIN_SQL_DIR, 'create_primary_keys.sql'))
db.run_sql_script(os.path.join(MSB_ADMIN_SQL_DIR, 'create_foreign_keys.sql'))
print('PG: Creating functions...')
db.run_sql_script(os.path.join(MSB_ADMIN_SQL_DIR, 'create_functions.sql'))
print('PG: Creating indexes...')
db.run_sql_script(os.path.join(MSB_ADMIN_SQL_DIR, 'create_indexes.sql'))
print("Done!")
@cli.command(name="init_ts_db")
@click.option("--force", "-f", is_flag=True, help="Drop existing database and user.")
@click.option("--create-db", is_flag=True, help="Create the database and user.")
def init_ts_db(force, create_db):
"""Initializes database.
This process involves several steps:
1. Table structure is created.
2. Indexes are created.
3. Views are created
"""
from listenbrainz import config
ts.init_db_connection(config.TIMESCALE_ADMIN_URI)
if force:
res = ts.run_sql_script_without_transaction(os.path.join(TIMESCALE_SQL_DIR, 'drop_db.sql'))
if not res:
raise Exception('Failed to drop existing database and user! Exit code: %i' % res)
if create_db or force:
print('TS: Creating user and a database...')
retries = 0
while True:
try:
res = ts.run_sql_script_without_transaction(os.path.join(TIMESCALE_SQL_DIR, 'create_db.sql'))
break
except sqlalchemy.exc.OperationalError:
print("Trapped template1 access error, FFS! Sleeping, trying again.")
retries += 1
if retries == 5:
raise
sleep(1)
continue
if not res:
raise Exception('Failed to create new database and user! Exit code: %i' % res)
ts.init_db_connection(config.TIMESCALE_ADMIN_LB_URI)
print('TS: Creating database extensions...')
res = ts.run_sql_script_without_transaction(os.path.join(TIMESCALE_SQL_DIR, 'create_extensions.sql'))
# Don't raise an exception if the extension already exists
ts.init_db_connection(config.SQLALCHEMY_TIMESCALE_URI)
application = webserver.create_app()
with application.app_context():
print('TS: Creating Schemas...')
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_schemas.sql'))
print('TS: Creating tables...')
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_tables.sql'))
print('TS: Creating Functions...')
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_functions.sql'))
print('TS: Creating views...')
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_views.sql'))
print('TS: Creating indexes...')
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_indexes.sql'))
print('TS: Creating Primary and Foreign Keys...')
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_primary_keys.sql'))
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_foreign_keys.sql'))
print("Done!")
# Add other commands here
import listenbrainz.spark.request_manage as spark_request_manage
cli.add_command(spark_request_manage.cli, name="spark")
import listenbrainz.db.dump_manager as dump_manager
cli.add_command(dump_manager.cli, name="dump")
if __name__ == '__main__':
cli()
| 1 | 18,099 | Noticed some missing imports in manage.py so thought I'd add them | metabrainz-listenbrainz-server | py |
@@ -301,15 +301,10 @@ func (exp *flowExporter) sendFlowRecords() error {
exp.numDataSetsSent = exp.numDataSetsSent + 1
if flowexporter.IsConnectionDying(&record.Conn) {
- // If the connection is in dying state or connection is not in conntrack table,
- // we will delete the flow records from records map.
- klog.V(2).Infof("Deleting the inactive flow records with key: %v from record map", key)
- if err := exp.flowRecords.DeleteFlowRecordWithoutLock(key); err != nil {
- return err
- }
- if err := exp.conntrackConnStore.SetExportDone(key); err != nil {
- return err
- }
+ // If the connection is in dying state or connection is not in conntrack
+ // table, we set the DyingAndDoneExport flag to do the deletion later.
+ record.DyingAndDoneExport = true
+ exp.flowRecords.AddFlowRecordWithoutLock(&key, &record)
} else {
exp.flowRecords.ValidateAndUpdateStats(key, record)
} | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exporter
import (
"fmt"
"hash/fnv"
"net"
"time"
ipfixentities "github.com/vmware/go-ipfix/pkg/entities"
"github.com/vmware/go-ipfix/pkg/exporter"
ipfixregistry "github.com/vmware/go-ipfix/pkg/registry"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"antrea.io/antrea/pkg/agent/controller/noderoute"
"antrea.io/antrea/pkg/agent/flowexporter"
"antrea.io/antrea/pkg/agent/flowexporter/connections"
"antrea.io/antrea/pkg/agent/flowexporter/flowrecords"
"antrea.io/antrea/pkg/agent/openflow"
"antrea.io/antrea/pkg/ipfix"
"antrea.io/antrea/pkg/util/env"
)
var (
IANAInfoElementsCommon = []string{
"flowStartSeconds",
"flowEndSeconds",
"flowEndReason",
"sourceTransportPort",
"destinationTransportPort",
"protocolIdentifier",
"packetTotalCount",
"octetTotalCount",
"packetDeltaCount",
"octetDeltaCount",
}
IANAInfoElementsIPv4 = append(IANAInfoElementsCommon, []string{"sourceIPv4Address", "destinationIPv4Address"}...)
IANAInfoElementsIPv6 = append(IANAInfoElementsCommon, []string{"sourceIPv6Address", "destinationIPv6Address"}...)
// Substring "reverse" is an indication to get reverse element of go-ipfix library.
IANAReverseInfoElements = []string{
"reversePacketTotalCount",
"reverseOctetTotalCount",
"reversePacketDeltaCount",
"reverseOctetDeltaCount",
}
antreaInfoElementsCommon = []string{
"sourcePodName",
"sourcePodNamespace",
"sourceNodeName",
"destinationPodName",
"destinationPodNamespace",
"destinationNodeName",
"destinationServicePort",
"destinationServicePortName",
"ingressNetworkPolicyName",
"ingressNetworkPolicyNamespace",
"ingressNetworkPolicyType",
"ingressNetworkPolicyRuleName",
"ingressNetworkPolicyRuleAction",
"egressNetworkPolicyName",
"egressNetworkPolicyNamespace",
"egressNetworkPolicyType",
"egressNetworkPolicyRuleName",
"egressNetworkPolicyRuleAction",
"tcpState",
"flowType",
}
AntreaInfoElementsIPv4 = append(antreaInfoElementsCommon, []string{"destinationClusterIPv4"}...)
AntreaInfoElementsIPv6 = append(antreaInfoElementsCommon, []string{"destinationClusterIPv6"}...)
)
type flowExporter struct {
conntrackConnStore *connections.ConntrackConnectionStore
flowRecords *flowrecords.FlowRecords
denyConnStore *connections.DenyConnectionStore
process ipfix.IPFIXExportingProcess
elementsListv4 []*ipfixentities.InfoElementWithValue
elementsListv6 []*ipfixentities.InfoElementWithValue
ipfixSet ipfixentities.Set
numDataSetsSent uint64 // used for unit tests.
templateIDv4 uint16
templateIDv6 uint16
registry ipfix.IPFIXRegistry
v4Enabled bool
v6Enabled bool
exporterInput exporter.ExporterInput
activeFlowTimeout time.Duration
idleFlowTimeout time.Duration
k8sClient kubernetes.Interface
nodeRouteController *noderoute.Controller
isNetworkPolicyOnly bool
nodeName string
}
func genObservationID(nodeName string) uint32 {
h := fnv.New32()
h.Write([]byte(nodeName))
return h.Sum32()
}
func prepareExporterInputArgs(collectorAddr, collectorProto, nodeName string) exporter.ExporterInput {
expInput := exporter.ExporterInput{}
// Exporting process requires domain observation ID.
expInput.ObservationDomainID = genObservationID(nodeName)
expInput.CollectorAddress = collectorAddr
if collectorProto == "tls" {
expInput.IsEncrypted = true
expInput.CollectorProtocol = "tcp"
} else {
expInput.IsEncrypted = false
expInput.CollectorProtocol = collectorProto
}
expInput.PathMTU = 0
return expInput
}
func NewFlowExporter(connStore *connections.ConntrackConnectionStore, records *flowrecords.FlowRecords, denyConnStore *connections.DenyConnectionStore,
collectorAddr string, collectorProto string, activeFlowTimeout time.Duration, idleFlowTimeout time.Duration,
v4Enabled bool, v6Enabled bool, k8sClient kubernetes.Interface,
nodeRouteController *noderoute.Controller, isNetworkPolicyOnly bool) (*flowExporter, error) {
// Initialize IPFIX registry
registry := ipfix.NewIPFIXRegistry()
registry.LoadRegistry()
// Prepare input args for IPFIX exporting process.
nodeName, err := env.GetNodeName()
if err != nil {
return nil, err
}
expInput := prepareExporterInputArgs(collectorAddr, collectorProto, nodeName)
return &flowExporter{
conntrackConnStore: connStore,
flowRecords: records,
denyConnStore: denyConnStore,
registry: registry,
v4Enabled: v4Enabled,
v6Enabled: v6Enabled,
exporterInput: expInput,
activeFlowTimeout: activeFlowTimeout,
idleFlowTimeout: idleFlowTimeout,
ipfixSet: ipfixentities.NewSet(false),
k8sClient: k8sClient,
nodeRouteController: nodeRouteController,
isNetworkPolicyOnly: isNetworkPolicyOnly,
nodeName: nodeName,
}, nil
}
// Run calls Export function periodically to check if flow records need to be exported
// based on active flow and idle flow timeouts.
func (exp *flowExporter) Run(stopCh <-chan struct{}) {
go wait.Until(exp.Export, time.Second, stopCh)
<-stopCh
}
func (exp *flowExporter) Export() {
// Retry to connect to IPFIX collector if the exporting process gets reset
if exp.process == nil {
err := exp.initFlowExporter()
if err != nil {
klog.Errorf("Error when initializing flow exporter: %v", err)
// There could be other errors while initializing flow exporter other than connecting to IPFIX collector,
// therefore closing the connection and resetting the process.
if exp.process != nil {
exp.process.CloseConnToCollector()
exp.process = nil
}
return
}
}
// Send flow records to IPFIX collector.
err := exp.sendFlowRecords()
if err != nil {
klog.Errorf("Error when sending flow records: %v", err)
// If there is an error when sending flow records because of intermittent connectivity, we reset the connection
// to IPFIX collector and retry in the next export cycle to reinitialize the connection and send flow records.
exp.process.CloseConnToCollector()
exp.process = nil
return
}
klog.V(2).Infof("Successfully exported IPFIX flow records")
}
func (exp *flowExporter) initFlowExporter() error {
var err error
if exp.exporterInput.IsEncrypted {
// if CA certificate, client certificate and key do not exist during initialization,
// it will retry to obtain the credentials in next export cycle
exp.exporterInput.CACert, err = getCACert(exp.k8sClient)
if err != nil {
return fmt.Errorf("cannot retrieve CA cert: %v", err)
}
exp.exporterInput.ClientCert, exp.exporterInput.ClientKey, err = getClientCertKey(exp.k8sClient)
if err != nil {
return fmt.Errorf("cannot retrieve client cert and key: %v", err)
}
// TLS transport does not need any tempRefTimeout, so sending 0.
exp.exporterInput.TempRefTimeout = 0
} else if exp.exporterInput.CollectorProtocol == "tcp" {
// TCP transport does not need any tempRefTimeout, so sending 0.
// tempRefTimeout is the template refresh timeout, which specifies how often
// the exporting process should send the template again.
exp.exporterInput.TempRefTimeout = 0
} else {
// For UDP transport, hardcoding tempRefTimeout value as 1800s.
exp.exporterInput.TempRefTimeout = 1800
}
expProcess, err := ipfix.NewIPFIXExportingProcess(exp.exporterInput)
if err != nil {
return fmt.Errorf("error when starting exporter: %v", err)
}
exp.process = expProcess
if exp.v4Enabled {
templateID := exp.process.NewTemplateID()
exp.templateIDv4 = templateID
sentBytes, err := exp.sendTemplateSet(false)
if err != nil {
return err
}
klog.V(2).Infof("Initialized flow exporter for IPv4 flow records and sent %d bytes size of template record", sentBytes)
}
if exp.v6Enabled {
templateID := exp.process.NewTemplateID()
exp.templateIDv6 = templateID
sentBytes, err := exp.sendTemplateSet(true)
if err != nil {
return err
}
klog.V(2).Infof("Initialized flow exporter for IPv6 flow records and sent %d bytes size of template record", sentBytes)
}
return nil
}
func (exp *flowExporter) sendFlowRecords() error {
updateOrSendFlowRecord := func(key flowexporter.ConnectionKey, record flowexporter.FlowRecord) error {
recordNeedsSending := false
// We do not check for any timeout as the connection is still idle since
// the idleFlowTimeout was triggered.
if !record.IsActive {
return nil
}
// Send a flow record if the conditions for either timeout
// (activeFlowTimeout or idleFlowTimeout) are met. A flow is considered
// to be idle if its packet counts haven't changed since the last export.
if time.Since(record.LastExportTime) >= exp.idleFlowTimeout {
if ((record.Conn.OriginalPackets <= record.PrevPackets) && (record.Conn.ReversePackets <= record.PrevReversePackets)) || flowexporter.IsConnectionDying(&record.Conn) {
// Idle flow timeout
record.IsActive = false
recordNeedsSending = true
}
}
if time.Since(record.LastExportTime) >= exp.activeFlowTimeout {
// Active flow timeout
recordNeedsSending = true
}
if recordNeedsSending {
exp.ipfixSet.ResetSet()
if record.IsIPv6 {
if err := exp.ipfixSet.PrepareSet(ipfixentities.Data, exp.templateIDv6); err != nil {
return err
}
// TODO: more records per data set will be supported when go-ipfix supports size check when adding records
if err := exp.addRecordToSet(record); err != nil {
return err
}
if _, err := exp.sendDataSet(); err != nil {
return err
}
} else {
if err := exp.ipfixSet.PrepareSet(ipfixentities.Data, exp.templateIDv4); err != nil {
return err
}
// TODO: more records per data set will be supported when go-ipfix supports size check when adding records
if err := exp.addRecordToSet(record); err != nil {
return err
}
if _, err := exp.sendDataSet(); err != nil {
return err
}
}
exp.numDataSetsSent = exp.numDataSetsSent + 1
if flowexporter.IsConnectionDying(&record.Conn) {
// If the connection is in dying state or connection is not in conntrack table,
// we will delete the flow records from records map.
klog.V(2).Infof("Deleting the inactive flow records with key: %v from record map", key)
if err := exp.flowRecords.DeleteFlowRecordWithoutLock(key); err != nil {
return err
}
if err := exp.conntrackConnStore.SetExportDone(key); err != nil {
return err
}
} else {
exp.flowRecords.ValidateAndUpdateStats(key, record)
}
klog.V(4).InfoS("Record sent successfully", "flowKey", key, "record", record)
}
return nil
}
err := exp.flowRecords.ForAllFlowRecordsDo(updateOrSendFlowRecord)
if err != nil {
return fmt.Errorf("error when iterating flow records: %v", err)
}
exportDenyConn := func(connKey flowexporter.ConnectionKey, conn *flowexporter.Connection) error {
if conn.DeltaPackets > 0 && time.Since(conn.LastExportTime) >= exp.activeFlowTimeout {
if err := exp.addDenyConnToSet(conn, ipfixregistry.ActiveTimeoutReason); err != nil {
return err
}
if _, err := exp.sendDataSet(); err != nil {
return err
}
exp.numDataSetsSent = exp.numDataSetsSent + 1
klog.V(4).InfoS("Record for deny connection sent successfully", "flowKey", connKey, "connection", conn)
exp.denyConnStore.ResetConnStatsWithoutLock(connKey)
}
if time.Since(conn.LastExportTime) >= exp.idleFlowTimeout {
if err := exp.addDenyConnToSet(conn, ipfixregistry.IdleTimeoutReason); err != nil {
return err
}
if _, err := exp.sendDataSet(); err != nil {
return err
}
exp.numDataSetsSent = exp.numDataSetsSent + 1
klog.V(4).InfoS("Record for deny connection sent successfully", "flowKey", connKey, "connection", conn)
exp.denyConnStore.DeleteConnWithoutLock(connKey)
}
return nil
}
err = exp.denyConnStore.ForAllConnectionsDo(exportDenyConn)
if err != nil {
return fmt.Errorf("error when iterating deny connections: %v", err)
}
return nil
}
func (exp *flowExporter) sendTemplateSet(isIPv6 bool) (int, error) {
elements := make([]*ipfixentities.InfoElementWithValue, 0)
IANAInfoElements := IANAInfoElementsIPv4
AntreaInfoElements := AntreaInfoElementsIPv4
templateID := exp.templateIDv4
if isIPv6 {
IANAInfoElements = IANAInfoElementsIPv6
AntreaInfoElements = AntreaInfoElementsIPv6
templateID = exp.templateIDv6
}
for _, ie := range IANAInfoElements {
element, err := exp.registry.GetInfoElement(ie, ipfixregistry.IANAEnterpriseID)
if err != nil {
return 0, fmt.Errorf("%s not present. returned error: %v", ie, err)
}
ieWithValue := ipfixentities.NewInfoElementWithValue(element, nil)
elements = append(elements, ieWithValue)
}
for _, ie := range IANAReverseInfoElements {
element, err := exp.registry.GetInfoElement(ie, ipfixregistry.IANAReversedEnterpriseID)
if err != nil {
return 0, fmt.Errorf("%s not present. returned error: %v", ie, err)
}
ieWithValue := ipfixentities.NewInfoElementWithValue(element, nil)
elements = append(elements, ieWithValue)
}
for _, ie := range AntreaInfoElements {
element, err := exp.registry.GetInfoElement(ie, ipfixregistry.AntreaEnterpriseID)
if err != nil {
return 0, fmt.Errorf("information element %s is not present in Antrea registry", ie)
}
ieWithValue := ipfixentities.NewInfoElementWithValue(element, nil)
elements = append(elements, ieWithValue)
}
exp.ipfixSet.ResetSet()
if err := exp.ipfixSet.PrepareSet(ipfixentities.Template, templateID); err != nil {
return 0, err
}
err := exp.ipfixSet.AddRecord(elements, templateID)
if err != nil {
return 0, fmt.Errorf("error in adding record to template set: %v", err)
}
sentBytes, err := exp.process.SendSet(exp.ipfixSet)
if err != nil {
return 0, fmt.Errorf("error in IPFIX exporting process when sending template record: %v", err)
}
// Get all elements from template record.
if !isIPv6 {
exp.elementsListv4 = elements
} else {
exp.elementsListv6 = elements
}
return sentBytes, nil
}
func (exp *flowExporter) addRecordToSet(record flowexporter.FlowRecord) error {
// Iterate over all infoElements in the list
eL := exp.elementsListv4
if record.IsIPv6 {
eL = exp.elementsListv6
}
for _, ie := range eL {
switch ieName := ie.Element.Name; ieName {
case "flowStartSeconds":
ie.Value = uint32(record.Conn.StartTime.Unix())
case "flowEndSeconds":
ie.Value = uint32(record.Conn.StopTime.Unix())
case "flowEndReason":
if flowexporter.IsConnectionDying(&record.Conn) {
ie.Value = ipfixregistry.EndOfFlowReason
} else if record.IsActive {
ie.Value = ipfixregistry.ActiveTimeoutReason
} else {
ie.Value = ipfixregistry.IdleTimeoutReason
}
case "sourceIPv4Address":
ie.Value = record.Conn.FlowKey.SourceAddress
case "destinationIPv4Address":
ie.Value = record.Conn.FlowKey.DestinationAddress
case "sourceIPv6Address":
ie.Value = record.Conn.FlowKey.SourceAddress
case "destinationIPv6Address":
ie.Value = record.Conn.FlowKey.DestinationAddress
case "sourceTransportPort":
ie.Value = record.Conn.FlowKey.SourcePort
case "destinationTransportPort":
ie.Value = record.Conn.FlowKey.DestinationPort
case "protocolIdentifier":
ie.Value = record.Conn.FlowKey.Protocol
case "packetTotalCount":
ie.Value = record.Conn.OriginalPackets
case "octetTotalCount":
ie.Value = record.Conn.OriginalBytes
case "packetDeltaCount":
deltaPkts := int64(record.Conn.OriginalPackets) - int64(record.PrevPackets)
if deltaPkts < 0 {
klog.Warningf("Packet delta count for connection should not be negative: %d", deltaPkts)
}
ie.Value = uint64(deltaPkts)
case "octetDeltaCount":
deltaBytes := int64(record.Conn.OriginalBytes) - int64(record.PrevBytes)
if deltaBytes < 0 {
klog.Warningf("Byte delta count for connection should not be negative: %d", deltaBytes)
}
ie.Value = uint64(deltaBytes)
case "reversePacketTotalCount":
ie.Value = record.Conn.ReversePackets
case "reverseOctetTotalCount":
ie.Value = record.Conn.ReverseBytes
case "reversePacketDeltaCount":
deltaPkts := int64(record.Conn.ReversePackets) - int64(record.PrevReversePackets)
if deltaPkts < 0 {
klog.Warningf("Packet delta count for connection should not be negative: %d", deltaPkts)
}
ie.Value = uint64(deltaPkts)
case "reverseOctetDeltaCount":
deltaBytes := int64(record.Conn.ReverseBytes) - int64(record.PrevReverseBytes)
if deltaBytes < 0 {
klog.Warningf("Byte delta count for connection should not be negative: %d", deltaBytes)
}
ie.Value = uint64(deltaBytes)
case "sourcePodNamespace":
ie.Value = record.Conn.SourcePodNamespace
case "sourcePodName":
ie.Value = record.Conn.SourcePodName
case "sourceNodeName":
// Add nodeName for only local pods whose pod names are resolved.
if record.Conn.SourcePodName != "" {
ie.Value = exp.nodeName
} else {
ie.Value = ""
}
case "destinationPodNamespace":
ie.Value = record.Conn.DestinationPodNamespace
case "destinationPodName":
ie.Value = record.Conn.DestinationPodName
case "destinationNodeName":
// Add nodeName for only local pods whose pod names are resolved.
if record.Conn.DestinationPodName != "" {
ie.Value = exp.nodeName
} else {
ie.Value = ""
}
case "destinationClusterIPv4":
if record.Conn.DestinationServicePortName != "" {
ie.Value = record.Conn.DestinationServiceAddress
} else {
// Sending dummy IP as IPFIX collector expects constant length of data for IP field.
// We should probably think of better approach as this involves customization of IPFIX collector to ignore
// this dummy IP address.
ie.Value = net.IP{0, 0, 0, 0}
}
case "destinationClusterIPv6":
if record.Conn.DestinationServicePortName != "" {
ie.Value = record.Conn.DestinationServiceAddress
} else {
// Same as destinationClusterIPv4.
ie.Value = net.ParseIP("::")
}
case "destinationServicePort":
if record.Conn.DestinationServicePortName != "" {
ie.Value = record.Conn.DestinationServicePort
} else {
ie.Value = uint16(0)
}
case "destinationServicePortName":
if record.Conn.DestinationServicePortName != "" {
ie.Value = record.Conn.DestinationServicePortName
} else {
ie.Value = ""
}
case "ingressNetworkPolicyName":
ie.Value = record.Conn.IngressNetworkPolicyName
case "ingressNetworkPolicyNamespace":
ie.Value = record.Conn.IngressNetworkPolicyNamespace
case "ingressNetworkPolicyType":
ie.Value = record.Conn.IngressNetworkPolicyType
case "ingressNetworkPolicyRuleName":
ie.Value = record.Conn.IngressNetworkPolicyRuleName
case "ingressNetworkPolicyRuleAction":
ie.Value = record.Conn.IngressNetworkPolicyRuleAction
case "egressNetworkPolicyName":
ie.Value = record.Conn.EgressNetworkPolicyName
case "egressNetworkPolicyNamespace":
ie.Value = record.Conn.EgressNetworkPolicyNamespace
case "egressNetworkPolicyType":
ie.Value = record.Conn.EgressNetworkPolicyType
case "egressNetworkPolicyRuleName":
ie.Value = record.Conn.EgressNetworkPolicyRuleName
case "egressNetworkPolicyRuleAction":
ie.Value = record.Conn.EgressNetworkPolicyRuleAction
case "tcpState":
ie.Value = record.Conn.TCPState
case "flowType":
ie.Value = exp.findFlowType(record.Conn)
}
}
templateID := exp.templateIDv4
if record.IsIPv6 {
templateID = exp.templateIDv6
}
err := exp.ipfixSet.AddRecord(eL, templateID)
if err != nil {
return fmt.Errorf("error in adding record to data set: %v", err)
}
return nil
}
func (exp *flowExporter) addDenyConnToSet(conn *flowexporter.Connection, flowEndReason uint8) error {
exp.ipfixSet.ResetSet()
eL := exp.elementsListv4
templateID := exp.templateIDv4
if conn.FlowKey.SourceAddress.To4() == nil {
templateID = exp.templateIDv6
eL = exp.elementsListv6
}
if err := exp.ipfixSet.PrepareSet(ipfixentities.Data, templateID); err != nil {
return err
}
// Iterate over all infoElements in the list
for _, ie := range eL {
switch ieName := ie.Element.Name; ieName {
case "flowStartSeconds":
ie.Value = uint32(conn.StartTime.Unix())
case "flowEndSeconds":
ie.Value = uint32(conn.StopTime.Unix())
case "flowEndReason":
ie.Value = flowEndReason
case "sourceIPv4Address":
ie.Value = conn.FlowKey.SourceAddress
case "destinationIPv4Address":
ie.Value = conn.FlowKey.DestinationAddress
case "sourceIPv6Address":
ie.Value = conn.FlowKey.SourceAddress
case "destinationIPv6Address":
ie.Value = conn.FlowKey.DestinationAddress
case "sourceTransportPort":
ie.Value = conn.FlowKey.SourcePort
case "destinationTransportPort":
ie.Value = conn.FlowKey.DestinationPort
case "protocolIdentifier":
ie.Value = conn.FlowKey.Protocol
case "packetTotalCount":
ie.Value = conn.OriginalPackets
case "octetTotalCount":
ie.Value = conn.OriginalBytes
case "packetDeltaCount":
ie.Value = conn.DeltaPackets
case "octetDeltaCount":
ie.Value = conn.DeltaBytes
case "reversePacketTotalCount", "reverseOctetTotalCount", "reversePacketDeltaCount", "reverseOctetDeltaCount":
ie.Value = uint64(0)
case "sourcePodNamespace":
ie.Value = conn.SourcePodNamespace
case "sourcePodName":
ie.Value = conn.SourcePodName
case "sourceNodeName":
// Add nodeName for only local pods whose pod names are resolved.
if conn.SourcePodName != "" {
ie.Value = exp.nodeName
} else {
ie.Value = ""
}
case "destinationPodNamespace":
ie.Value = conn.DestinationPodNamespace
case "destinationPodName":
ie.Value = conn.DestinationPodName
case "destinationNodeName":
// Add nodeName for only local pods whose pod names are resolved.
if conn.DestinationPodName != "" {
ie.Value = exp.nodeName
} else {
ie.Value = ""
}
case "destinationClusterIPv4":
if conn.DestinationServicePortName != "" {
ie.Value = conn.DestinationServiceAddress
} else {
ie.Value = net.IP{0, 0, 0, 0}
}
case "destinationClusterIPv6":
if conn.DestinationServicePortName != "" {
ie.Value = conn.DestinationServiceAddress
} else {
ie.Value = net.ParseIP("::")
}
case "destinationServicePort":
if conn.DestinationServicePortName != "" {
ie.Value = conn.DestinationServicePort
} else {
ie.Value = uint16(0)
}
case "destinationServicePortName":
ie.Value = conn.DestinationServicePortName
case "ingressNetworkPolicyName":
ie.Value = conn.IngressNetworkPolicyName
case "ingressNetworkPolicyNamespace":
ie.Value = conn.IngressNetworkPolicyNamespace
case "ingressNetworkPolicyType":
ie.Value = conn.IngressNetworkPolicyType
case "ingressNetworkPolicyRuleName":
ie.Value = conn.IngressNetworkPolicyRuleName
case "ingressNetworkPolicyRuleAction":
ie.Value = conn.IngressNetworkPolicyRuleAction
case "egressNetworkPolicyName":
ie.Value = conn.EgressNetworkPolicyName
case "egressNetworkPolicyNamespace":
ie.Value = conn.EgressNetworkPolicyNamespace
case "egressNetworkPolicyType":
ie.Value = conn.EgressNetworkPolicyType
case "egressNetworkPolicyRuleName":
ie.Value = conn.EgressNetworkPolicyRuleName
case "egressNetworkPolicyRuleAction":
ie.Value = conn.EgressNetworkPolicyRuleAction
case "tcpState":
ie.Value = ""
case "flowType":
ie.Value = exp.findFlowType(*conn)
}
}
err := exp.ipfixSet.AddRecord(eL, templateID)
if err != nil {
return fmt.Errorf("error in adding record to data set: %v", err)
}
return nil
}
func (exp *flowExporter) sendDataSet() (int, error) {
sentBytes, err := exp.process.SendSet(exp.ipfixSet)
if err != nil {
return 0, fmt.Errorf("error when sending data set: %v", err)
}
klog.V(4).Infof("Data set sent successfully. Bytes sent: %d", sentBytes)
return sentBytes, nil
}
func (exp *flowExporter) findFlowType(conn flowexporter.Connection) uint8 {
// TODO: support Pod-To-External flows in network policy only mode.
if exp.isNetworkPolicyOnly {
if conn.SourcePodName == "" || conn.DestinationPodName == "" {
return ipfixregistry.FlowTypeInterNode
}
return ipfixregistry.FlowTypeIntraNode
}
if exp.nodeRouteController == nil {
klog.Warningf("Can't find flowType without nodeRouteController")
return 0
}
if exp.nodeRouteController.IPInPodSubnets(conn.FlowKey.SourceAddress) {
if conn.Mark == openflow.ServiceCTMark || exp.nodeRouteController.IPInPodSubnets(conn.FlowKey.DestinationAddress) {
if conn.SourcePodName == "" || conn.DestinationPodName == "" {
return ipfixregistry.FlowTypeInterNode
}
return ipfixregistry.FlowTypeIntraNode
} else {
return ipfixregistry.FlowTypeToExternal
}
} else {
// We do not support External-To-Pod flows for now.
klog.Warningf("Source IP: %s doesn't exist in PodCIDRs", conn.FlowKey.SourceAddress.String())
return 0
}
}
| 1 | 40,882 | as a future improvement, maybe we should just change `ForAllFlowRecordsDo` so that `updateOrSendFlowRecord` uses a flow record pointer instead of a copy of stored flow record. This whole code is executed with the lock any way. | antrea-io-antrea | go |
@@ -37,6 +37,7 @@ import java.util.function.Consumer;
* A literal string.
* <br/><code>"Hello World!"</code>
* <br/><code>"\"\n"</code>
+ * <br/><code>"\u2122"</code>
* <br/><code>"™"</code>
* <br/><code>"💩"</code>
* | 1 | /*
* Copyright (C) 2007-2010 Júlio Vilmar Gesser.
* Copyright (C) 2011, 2013-2016 The JavaParser Team.
*
* This file is part of JavaParser.
*
* JavaParser can be used either under the terms of
* a) the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* b) the terms of the Apache License
*
* You should have received a copy of both licenses in LICENCE.LGPL and
* LICENCE.APACHE. Please refer to those files for details.
*
* JavaParser is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*/
package com.github.javaparser.ast.expr;
import com.github.javaparser.ast.AllFieldsConstructor;
import com.github.javaparser.ast.Node;
import com.github.javaparser.ast.visitor.CloneVisitor;
import com.github.javaparser.ast.visitor.GenericVisitor;
import com.github.javaparser.ast.visitor.VoidVisitor;
import com.github.javaparser.metamodel.JavaParserMetaModel;
import com.github.javaparser.metamodel.StringLiteralExprMetaModel;
import com.github.javaparser.utils.StringEscapeUtils;
import com.github.javaparser.utils.Utils;
import javax.annotation.Generated;
import com.github.javaparser.TokenRange;
import java.util.function.Consumer;
/**
* A literal string.
* <br/><code>"Hello World!"</code>
* <br/><code>"\"\n"</code>
* <br/><code>"™"</code>
* <br/><code>"💩"</code>
*
* @author Julio Vilmar Gesser
*/
public final class StringLiteralExpr extends LiteralStringValueExpr {
public StringLiteralExpr() {
this(null, "empty");
}
/**
* Creates a string literal expression from given string. Escapes EOL characters.
*
* @param value the value of the literal
*/
@AllFieldsConstructor
public StringLiteralExpr(final String value) {
this(null, Utils.escapeEndOfLines(value));
}
/**
* Utility method that creates a new StringLiteralExpr. Escapes EOL characters.
*
* @deprecated Use {@link #StringLiteralExpr(String)} instead.
*/
@Deprecated
public static StringLiteralExpr escape(String string) {
return new StringLiteralExpr(Utils.escapeEndOfLines(string));
}
/**
* This constructor is used by the parser and is considered private.
*/
@Generated("com.github.javaparser.generator.core.node.MainConstructorGenerator")
public StringLiteralExpr(TokenRange tokenRange, String value) {
super(tokenRange, value);
customInitialization();
}
@Override
@Generated("com.github.javaparser.generator.core.node.AcceptGenerator")
public <R, A> R accept(final GenericVisitor<R, A> v, final A arg) {
return v.visit(this, arg);
}
@Override
@Generated("com.github.javaparser.generator.core.node.AcceptGenerator")
public <A> void accept(final VoidVisitor<A> v, final A arg) {
v.visit(this, arg);
}
@Override
@Generated("com.github.javaparser.generator.core.node.RemoveMethodGenerator")
public boolean remove(Node node) {
if (node == null)
return false;
return super.remove(node);
}
/**
* Sets the content of this expressions to given value. Escapes EOL characters.
*
* @param value the new literal value
* @return self
*/
public StringLiteralExpr setEscapedValue(String value) {
this.value = Utils.escapeEndOfLines(value);
return this;
}
/**
* @return the unescaped literal value
*/
public String asString() {
return StringEscapeUtils.unescapeJava(value);
}
/**
* Escapes the given string from special characters and uses it as the literal value.
*
* @param value unescaped string
* @return this literal expression
*/
public StringLiteralExpr setString(String value) {
this.value = StringEscapeUtils.escapeJava(value);
return this;
}
@Override
@Generated("com.github.javaparser.generator.core.node.CloneGenerator")
public StringLiteralExpr clone() {
return (StringLiteralExpr) accept(new CloneVisitor(), null);
}
@Override
@Generated("com.github.javaparser.generator.core.node.GetMetaModelGenerator")
public StringLiteralExprMetaModel getMetaModel() {
return JavaParserMetaModel.stringLiteralExprMetaModel;
}
@Override
@Generated("com.github.javaparser.generator.core.node.ReplaceMethodGenerator")
public boolean replace(Node node, Node replacementNode) {
if (node == null)
return false;
return super.replace(node, replacementNode);
}
@Override
@Generated("com.github.javaparser.generator.core.node.TypeCastingGenerator")
public boolean isStringLiteralExpr() {
return true;
}
@Override
@Generated("com.github.javaparser.generator.core.node.TypeCastingGenerator")
public StringLiteralExpr asStringLiteralExpr() {
return this;
}
@Generated("com.github.javaparser.generator.core.node.TypeCastingGenerator")
public void ifStringLiteralExpr(Consumer<StringLiteralExpr> action) {
action.accept(this);
}
}
| 1 | 11,854 | I am not sure what GitHub wants to tell me with this icon here :) | javaparser-javaparser | java |
@@ -385,8 +385,8 @@ BOOST_AUTO_TEST_CASE(valid_nearest_urls)
BOOST_AUTO_TEST_CASE(valid_tile_urls)
{
- TileParameters reference_1{1, 2, 3};
- auto result_1 = parseParameters<TileParameters>("tile(1,2,3).mvt");
+ TileParameters reference_1{1, 2, 12};
+ auto result_1 = parseParameters<TileParameters>("tile(1,2,12).mvt");
BOOST_CHECK(result_1);
BOOST_CHECK_EQUAL(reference_1.x, result_1->x);
BOOST_CHECK_EQUAL(reference_1.y, result_1->y); | 1 | #include "server/api/parameters_parser.hpp"
#include "parameters_io.hpp"
#include "engine/api/base_parameters.hpp"
#include "engine/api/match_parameters.hpp"
#include "engine/api/nearest_parameters.hpp"
#include "engine/api/route_parameters.hpp"
#include "engine/api/table_parameters.hpp"
#include "engine/api/tile_parameters.hpp"
#include "engine/api/trip_parameters.hpp"
#include <boost/optional/optional_io.hpp>
#include <boost/test/test_tools.hpp>
#include <boost/test/unit_test.hpp>
#define CHECK_EQUAL_RANGE(R1, R2) \
BOOST_CHECK_EQUAL_COLLECTIONS(R1.begin(), R1.end(), R2.begin(), R2.end());
BOOST_AUTO_TEST_SUITE(api_parameters_parser)
using namespace osrm;
using namespace osrm::server;
using namespace osrm::server::api;
using namespace osrm::engine::api;
// returns distance to front
template <typename ParameterT> std::size_t testInvalidOptions(std::string options)
{
auto iter = options.begin();
auto result = parseParameters<ParameterT>(iter, options.end());
BOOST_CHECK(!result);
return std::distance(options.begin(), iter);
}
BOOST_AUTO_TEST_CASE(invalid_route_urls)
{
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("a;3,4"), 0UL);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("120;3,4"), 3UL);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("90000000,2;3,4"), 0UL);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("1,2;3,4?overview=false&bla=foo"), 22UL);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("1,2;3,4?overview=false&bearings=foo"),
32UL);
BOOST_CHECK_EQUAL(
testInvalidOptions<RouteParameters>("1,2;3,4?overview=false&continue_straight=foo"), 41UL);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("1,2;3,4?overview=false&radiuses=foo"),
32UL);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("1,2;3,4?overview=false&hints=foo"),
29UL);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("1,2;3,4?overview=false&hints=;;; ;"),
32UL);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("1,2;3,4?overview=false&geometries=foo"),
34UL);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("1,2;3,4?overview=false&overview=foo"),
32L);
BOOST_CHECK_EQUAL(
testInvalidOptions<RouteParameters>("1,2;3,4?overview=false&alternatives=foo"), 36UL);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>(""), 0);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("1,2;3.4.unsupported"), 7);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("1,2;3,4.json?nooptions"), 13);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("1,2;3,4..json?nooptions"), 14);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>("1,2;3,4.0.json?nooptions"), 15);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>(std::string{"1,2;3,4"} + '\0' + ".json"),
7);
BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>(std::string{"1,2;3,"} + '\0'), 6);
// BOOST_CHECK_EQUAL(testInvalidOptions<RouteParameters>(), );
}
BOOST_AUTO_TEST_CASE(invalid_table_urls)
{
BOOST_CHECK_EQUAL(testInvalidOptions<TableParameters>("1,2;3,4?sources=1&bla=foo"), 17UL);
BOOST_CHECK_EQUAL(testInvalidOptions<TableParameters>("1,2;3,4?destinations=1&bla=foo"), 22UL);
BOOST_CHECK_EQUAL(
testInvalidOptions<TableParameters>("1,2;3,4?sources=1&destinations=1&bla=foo"), 32UL);
BOOST_CHECK_EQUAL(testInvalidOptions<TableParameters>("1,2;3,4?sources=foo"), 16UL);
BOOST_CHECK_EQUAL(testInvalidOptions<TableParameters>("1,2;3,4?destinations=foo"), 21UL);
}
BOOST_AUTO_TEST_CASE(valid_route_hint)
{
auto hint = engine::Hint::FromBase64(
"XAYAgP___3-QAAAABAAAACEAAAA_AAAAHgAAAHsFAAAUAAAAaWhxALeCmwI7aHEAy4KbAgUAAQE0h8Z2");
BOOST_CHECK_EQUAL(
hint.phantom.input_location,
util::Coordinate(util::FloatLongitude{7.432251}, util::FloatLatitude{43.745995}));
}
BOOST_AUTO_TEST_CASE(valid_route_urls)
{
std::vector<util::Coordinate> coords_1 = {{util::FloatLongitude{1}, util::FloatLatitude{2}},
{util::FloatLongitude{3}, util::FloatLatitude{4}}};
RouteParameters reference_1{};
reference_1.coordinates = coords_1;
auto result_1 = parseParameters<RouteParameters>("1,2;3,4");
BOOST_CHECK(result_1);
BOOST_CHECK_EQUAL(reference_1.steps, result_1->steps);
BOOST_CHECK_EQUAL(reference_1.alternatives, result_1->alternatives);
BOOST_CHECK_EQUAL(reference_1.geometries, result_1->geometries);
BOOST_CHECK_EQUAL(reference_1.annotations, result_1->annotations);
BOOST_CHECK_EQUAL(reference_1.overview, result_1->overview);
BOOST_CHECK_EQUAL(reference_1.continue_straight, result_1->continue_straight);
CHECK_EQUAL_RANGE(reference_1.bearings, result_1->bearings);
CHECK_EQUAL_RANGE(reference_1.radiuses, result_1->radiuses);
CHECK_EQUAL_RANGE(reference_1.coordinates, result_1->coordinates);
CHECK_EQUAL_RANGE(reference_1.hints, result_1->hints);
RouteParameters reference_2{};
reference_2.alternatives = true;
reference_2.steps = true;
reference_2.annotations = true;
reference_2.coordinates = coords_1;
auto result_2 =
parseParameters<RouteParameters>("1,2;3,4?steps=true&alternatives=true&geometries=polyline&"
"overview=simplified&annotations=true");
BOOST_CHECK(result_2);
BOOST_CHECK_EQUAL(reference_2.steps, result_2->steps);
BOOST_CHECK_EQUAL(reference_2.alternatives, result_2->alternatives);
BOOST_CHECK_EQUAL(reference_2.geometries, result_2->geometries);
BOOST_CHECK_EQUAL(reference_2.annotations, result_2->annotations);
BOOST_CHECK_EQUAL(reference_2.overview, result_2->overview);
BOOST_CHECK_EQUAL(reference_2.continue_straight, result_2->continue_straight);
CHECK_EQUAL_RANGE(reference_2.bearings, result_2->bearings);
CHECK_EQUAL_RANGE(reference_2.radiuses, result_2->radiuses);
CHECK_EQUAL_RANGE(reference_2.coordinates, result_2->coordinates);
CHECK_EQUAL_RANGE(reference_2.hints, result_2->hints);
RouteParameters reference_3{false,
false,
false,
RouteParameters::GeometriesType::GeoJSON,
RouteParameters::OverviewType::False,
true};
reference_3.coordinates = coords_1;
auto result_3 = api::parseParameters<engine::api::RouteParameters>(
"1,2;3,4?steps=false&alternatives=false&geometries=geojson&overview=false&continue_"
"straight=true");
BOOST_CHECK(result_3);
BOOST_CHECK_EQUAL(reference_3.steps, result_3->steps);
BOOST_CHECK_EQUAL(reference_3.alternatives, result_3->alternatives);
BOOST_CHECK_EQUAL(reference_3.geometries, result_3->geometries);
BOOST_CHECK_EQUAL(reference_3.annotations, result_3->annotations);
BOOST_CHECK_EQUAL(reference_3.overview, result_3->overview);
BOOST_CHECK_EQUAL(reference_3.continue_straight, result_3->continue_straight);
CHECK_EQUAL_RANGE(reference_3.bearings, result_3->bearings);
CHECK_EQUAL_RANGE(reference_3.radiuses, result_3->radiuses);
CHECK_EQUAL_RANGE(reference_3.coordinates, result_3->coordinates);
CHECK_EQUAL_RANGE(reference_3.hints, result_3->hints);
std::vector<boost::optional<engine::Hint>> hints_4 = {
engine::Hint::FromBase64(
"XAYAgP___3-QAAAABAAAACEAAAA_AAAAHgAAAHsFAAAUAAAAaWhxALeCmwI7aHEAy4KbAgUAAQE0h8Z2"),
engine::Hint::FromBase64(
"lgQAgP___3-QAAAADwAAABMAAAAoAAAALAAAADQAAAAUAAAAmWFxAL1zmwLcYXEAu3ObAgQAAQE0h8Z2"),
engine::Hint::FromBase64(
"OAUAgMUFAIAAAAAADwAAAAIAAAAAAAAAnQAAALwEAAAUAAAAgz5xAE9WmwKIPnEAUFabAgAAAQE0h8Z2")};
RouteParameters reference_4{false,
false,
false,
RouteParameters::GeometriesType::Polyline,
RouteParameters::OverviewType::Simplified,
boost::optional<bool>{},
coords_1,
hints_4,
std::vector<boost::optional<double>>{},
std::vector<boost::optional<engine::Bearing>>{}};
auto result_4 = parseParameters<RouteParameters>(
"1,2;3,4?steps=false&hints="
"XAYAgP___3-QAAAABAAAACEAAAA_AAAAHgAAAHsFAAAUAAAAaWhxALeCmwI7aHEAy4KbAgUAAQE0h8Z2;"
"lgQAgP___3-QAAAADwAAABMAAAAoAAAALAAAADQAAAAUAAAAmWFxAL1zmwLcYXEAu3ObAgQAAQE0h8Z2;"
"OAUAgMUFAIAAAAAADwAAAAIAAAAAAAAAnQAAALwEAAAUAAAAgz5xAE9WmwKIPnEAUFabAgAAAQE0h8Z2");
BOOST_CHECK(result_4);
BOOST_CHECK_EQUAL(reference_4.steps, result_4->steps);
BOOST_CHECK_EQUAL(reference_4.alternatives, result_4->alternatives);
BOOST_CHECK_EQUAL(reference_4.geometries, result_4->geometries);
BOOST_CHECK_EQUAL(reference_4.annotations, result_4->annotations);
BOOST_CHECK_EQUAL(reference_4.overview, result_4->overview);
BOOST_CHECK_EQUAL(reference_4.continue_straight, result_4->continue_straight);
CHECK_EQUAL_RANGE(reference_4.bearings, result_4->bearings);
CHECK_EQUAL_RANGE(reference_4.radiuses, result_4->radiuses);
CHECK_EQUAL_RANGE(reference_4.coordinates, result_4->coordinates);
CHECK_EQUAL_RANGE(reference_4.hints, result_4->hints);
std::vector<boost::optional<engine::Bearing>> bearings_4 = {
boost::none, engine::Bearing{200, 10}, engine::Bearing{100, 5},
};
RouteParameters reference_5{false,
false,
false,
RouteParameters::GeometriesType::Polyline,
RouteParameters::OverviewType::Simplified,
boost::optional<bool>{},
coords_1,
std::vector<boost::optional<engine::Hint>>{},
std::vector<boost::optional<double>>{},
bearings_4};
auto result_5 = parseParameters<RouteParameters>("1,2;3,4?steps=false&bearings=;200,10;100,5");
BOOST_CHECK(result_5);
BOOST_CHECK_EQUAL(reference_5.steps, result_5->steps);
BOOST_CHECK_EQUAL(reference_5.alternatives, result_5->alternatives);
BOOST_CHECK_EQUAL(reference_5.geometries, result_5->geometries);
BOOST_CHECK_EQUAL(reference_5.annotations, result_5->annotations);
BOOST_CHECK_EQUAL(reference_5.overview, result_5->overview);
BOOST_CHECK_EQUAL(reference_5.continue_straight, result_5->continue_straight);
CHECK_EQUAL_RANGE(reference_5.bearings, result_5->bearings);
CHECK_EQUAL_RANGE(reference_5.radiuses, result_5->radiuses);
CHECK_EQUAL_RANGE(reference_5.coordinates, result_5->coordinates);
CHECK_EQUAL_RANGE(reference_5.hints, result_5->hints);
std::vector<util::Coordinate> coords_2 = {{util::FloatLongitude{0}, util::FloatLatitude{1}},
{util::FloatLongitude{2}, util::FloatLatitude{3}},
{util::FloatLongitude{4}, util::FloatLatitude{5}}};
RouteParameters reference_6{};
reference_6.coordinates = coords_2;
auto result_6 = parseParameters<RouteParameters>("polyline(_ibE?_seK_seK_seK_seK)");
BOOST_CHECK(result_6);
BOOST_CHECK_EQUAL(reference_6.steps, result_6->steps);
BOOST_CHECK_EQUAL(reference_6.alternatives, result_6->alternatives);
BOOST_CHECK_EQUAL(reference_6.geometries, result_6->geometries);
BOOST_CHECK_EQUAL(reference_6.annotations, result_6->annotations);
BOOST_CHECK_EQUAL(reference_6.overview, result_6->overview);
BOOST_CHECK_EQUAL(reference_6.continue_straight, result_6->continue_straight);
CHECK_EQUAL_RANGE(reference_6.bearings, result_6->bearings);
CHECK_EQUAL_RANGE(reference_6.radiuses, result_6->radiuses);
CHECK_EQUAL_RANGE(reference_6.coordinates, result_6->coordinates);
CHECK_EQUAL_RANGE(reference_6.hints, result_6->hints);
auto result_7 = parseParameters<RouteParameters>("1,2;3,4?radiuses=;unlimited");
RouteParameters reference_7{};
reference_7.coordinates = coords_1;
reference_7.radiuses = {boost::none,
boost::make_optional(std::numeric_limits<double>::infinity())};
BOOST_CHECK(result_7);
BOOST_CHECK_EQUAL(reference_7.steps, result_7->steps);
BOOST_CHECK_EQUAL(reference_7.alternatives, result_7->alternatives);
BOOST_CHECK_EQUAL(reference_7.geometries, result_7->geometries);
BOOST_CHECK_EQUAL(reference_7.annotations, result_7->annotations);
BOOST_CHECK_EQUAL(reference_7.overview, result_7->overview);
BOOST_CHECK_EQUAL(reference_7.continue_straight, result_7->continue_straight);
CHECK_EQUAL_RANGE(reference_7.bearings, result_7->bearings);
CHECK_EQUAL_RANGE(reference_7.radiuses, result_7->radiuses);
CHECK_EQUAL_RANGE(reference_7.coordinates, result_7->coordinates);
CHECK_EQUAL_RANGE(reference_7.hints, result_7->hints);
auto result_8 = parseParameters<RouteParameters>("1,2;3,4?radiuses=;");
RouteParameters reference_8{};
reference_8.coordinates = coords_1;
reference_8.radiuses = {boost::none, boost::none};
BOOST_CHECK(result_8);
CHECK_EQUAL_RANGE(reference_8.radiuses, result_8->radiuses);
auto result_9 = parseParameters<RouteParameters>("1,2?radiuses=");
RouteParameters reference_9{};
reference_9.coordinates = coords_1;
reference_9.radiuses = {boost::none};
BOOST_CHECK(result_9);
CHECK_EQUAL_RANGE(reference_9.radiuses, result_9->radiuses);
// Some Hint's are empty
std::vector<util::Coordinate> coords_3 = {{util::FloatLongitude{1}, util::FloatLatitude{2}},
{util::FloatLongitude{3}, util::FloatLatitude{4}},
{util::FloatLongitude{5}, util::FloatLatitude{6}},
{util::FloatLongitude{7}, util::FloatLatitude{8}}};
std::vector<boost::optional<engine::Hint>> hints_10 = {
engine::Hint::FromBase64(
"XAYAgP___3-QAAAABAAAACEAAAA_AAAAHgAAAHsFAAAUAAAAaWhxALeCmwI7aHEAy4KbAgUAAQE0h8Z2"),
boost::none,
engine::Hint::FromBase64(
"lgQAgP___3-QAAAADwAAABMAAAAoAAAALAAAADQAAAAUAAAAmWFxAL1zmwLcYXEAu3ObAgQAAQE0h8Z2"),
boost::none};
RouteParameters reference_10{false,
false,
false,
RouteParameters::GeometriesType::Polyline,
RouteParameters::OverviewType::Simplified,
boost::optional<bool>{},
coords_3,
hints_10,
std::vector<boost::optional<double>>{},
std::vector<boost::optional<engine::Bearing>>{}};
auto result_10 = parseParameters<RouteParameters>(
"1,2;3,4;5,6;7,8?steps=false&hints="
"XAYAgP___3-QAAAABAAAACEAAAA_AAAAHgAAAHsFAAAUAAAAaWhxALeCmwI7aHEAy4KbAgUAAQE0h8Z2;;"
"lgQAgP___3-QAAAADwAAABMAAAAoAAAALAAAADQAAAAUAAAAmWFxAL1zmwLcYXEAu3ObAgQAAQE0h8Z2"
";");
BOOST_CHECK(result_10);
BOOST_CHECK_EQUAL(reference_10.steps, result_10->steps);
BOOST_CHECK_EQUAL(reference_10.alternatives, result_10->alternatives);
BOOST_CHECK_EQUAL(reference_10.geometries, result_10->geometries);
BOOST_CHECK_EQUAL(reference_10.annotations, result_10->annotations);
BOOST_CHECK_EQUAL(reference_10.overview, result_10->overview);
BOOST_CHECK_EQUAL(reference_10.continue_straight, result_10->continue_straight);
CHECK_EQUAL_RANGE(reference_10.bearings, result_10->bearings);
CHECK_EQUAL_RANGE(reference_10.radiuses, result_10->radiuses);
CHECK_EQUAL_RANGE(reference_10.coordinates, result_10->coordinates);
CHECK_EQUAL_RANGE(reference_10.hints, result_10->hints);
}
BOOST_AUTO_TEST_CASE(valid_table_urls)
{
std::vector<util::Coordinate> coords_1 = {{util::FloatLongitude{1}, util::FloatLatitude{2}},
{util::FloatLongitude{3}, util::FloatLatitude{4}}};
TableParameters reference_1{};
reference_1.coordinates = coords_1;
auto result_1 = parseParameters<TableParameters>("1,2;3,4");
BOOST_CHECK(result_1);
CHECK_EQUAL_RANGE(reference_1.sources, result_1->sources);
CHECK_EQUAL_RANGE(reference_1.destinations, result_1->destinations);
CHECK_EQUAL_RANGE(reference_1.bearings, result_1->bearings);
CHECK_EQUAL_RANGE(reference_1.radiuses, result_1->radiuses);
CHECK_EQUAL_RANGE(reference_1.coordinates, result_1->coordinates);
std::vector<std::size_t> sources_2 = {1, 2, 3};
std::vector<std::size_t> destinations_2 = {4, 5};
TableParameters reference_2{sources_2, destinations_2};
reference_2.coordinates = coords_1;
auto result_2 = parseParameters<TableParameters>("1,2;3,4?sources=1;2;3&destinations=4;5");
BOOST_CHECK(result_2);
CHECK_EQUAL_RANGE(reference_2.sources, result_2->sources);
CHECK_EQUAL_RANGE(reference_2.destinations, result_2->destinations);
CHECK_EQUAL_RANGE(reference_2.bearings, result_2->bearings);
CHECK_EQUAL_RANGE(reference_2.radiuses, result_2->radiuses);
CHECK_EQUAL_RANGE(reference_2.coordinates, result_2->coordinates);
auto result_3 = parseParameters<TableParameters>("1,2;3,4?sources=all&destinations=all");
BOOST_CHECK(result_3);
CHECK_EQUAL_RANGE(reference_1.sources, result_3->sources);
CHECK_EQUAL_RANGE(reference_1.destinations, result_3->destinations);
CHECK_EQUAL_RANGE(reference_1.bearings, result_3->bearings);
CHECK_EQUAL_RANGE(reference_1.radiuses, result_3->radiuses);
CHECK_EQUAL_RANGE(reference_1.coordinates, result_3->coordinates);
}
BOOST_AUTO_TEST_CASE(valid_match_urls)
{
std::vector<util::Coordinate> coords_1 = {{util::FloatLongitude{1}, util::FloatLatitude{2}},
{util::FloatLongitude{3}, util::FloatLatitude{4}}};
MatchParameters reference_1{};
reference_1.coordinates = coords_1;
auto result_1 = parseParameters<MatchParameters>("1,2;3,4");
BOOST_CHECK(result_1);
CHECK_EQUAL_RANGE(reference_1.timestamps, result_1->timestamps);
CHECK_EQUAL_RANGE(reference_1.bearings, result_1->bearings);
CHECK_EQUAL_RANGE(reference_1.radiuses, result_1->radiuses);
CHECK_EQUAL_RANGE(reference_1.coordinates, result_1->coordinates);
MatchParameters reference_2{};
reference_2.coordinates = coords_1;
reference_2.timestamps = {5, 6};
auto result_2 = parseParameters<MatchParameters>("1,2;3,4?timestamps=5;6");
BOOST_CHECK(result_2);
CHECK_EQUAL_RANGE(reference_2.timestamps, result_2->timestamps);
CHECK_EQUAL_RANGE(reference_2.bearings, result_2->bearings);
CHECK_EQUAL_RANGE(reference_2.radiuses, result_2->radiuses);
CHECK_EQUAL_RANGE(reference_2.coordinates, result_2->coordinates);
}
BOOST_AUTO_TEST_CASE(valid_nearest_urls)
{
std::vector<util::Coordinate> coords_1 = {{util::FloatLongitude{1}, util::FloatLatitude{2}}};
NearestParameters reference_1{};
reference_1.coordinates = coords_1;
auto result_1 = parseParameters<NearestParameters>("1,2");
BOOST_CHECK(result_1);
BOOST_CHECK_EQUAL(reference_1.number_of_results, result_1->number_of_results);
CHECK_EQUAL_RANGE(reference_1.bearings, result_1->bearings);
CHECK_EQUAL_RANGE(reference_1.radiuses, result_1->radiuses);
CHECK_EQUAL_RANGE(reference_1.coordinates, result_1->coordinates);
NearestParameters reference_2{};
reference_2.coordinates = coords_1;
reference_2.number_of_results = 42;
auto result_2 = parseParameters<NearestParameters>("1,2?number=42");
BOOST_CHECK(result_2);
BOOST_CHECK_EQUAL(reference_2.number_of_results, result_2->number_of_results);
CHECK_EQUAL_RANGE(reference_2.bearings, result_2->bearings);
CHECK_EQUAL_RANGE(reference_2.radiuses, result_2->radiuses);
CHECK_EQUAL_RANGE(reference_2.coordinates, result_2->coordinates);
}
BOOST_AUTO_TEST_CASE(valid_tile_urls)
{
TileParameters reference_1{1, 2, 3};
auto result_1 = parseParameters<TileParameters>("tile(1,2,3).mvt");
BOOST_CHECK(result_1);
BOOST_CHECK_EQUAL(reference_1.x, result_1->x);
BOOST_CHECK_EQUAL(reference_1.y, result_1->y);
BOOST_CHECK_EQUAL(reference_1.z, result_1->z);
}
BOOST_AUTO_TEST_CASE(valid_trip_urls)
{
std::vector<util::Coordinate> coords_1 = {{util::FloatLongitude{1}, util::FloatLatitude{2}},
{util::FloatLongitude{3}, util::FloatLatitude{4}}};
TripParameters reference_1{};
reference_1.coordinates = coords_1;
auto result_1 = parseParameters<TripParameters>("1,2;3,4");
BOOST_CHECK(result_1);
CHECK_EQUAL_RANGE(reference_1.bearings, result_1->bearings);
CHECK_EQUAL_RANGE(reference_1.radiuses, result_1->radiuses);
CHECK_EQUAL_RANGE(reference_1.coordinates, result_1->coordinates);
}
BOOST_AUTO_TEST_SUITE_END()
| 1 | 17,999 | can we also check for smaller zoom levels returning valid error codes? | Project-OSRM-osrm-backend | cpp |
@@ -15,6 +15,16 @@ namespace ScenarioMeasurement
Console.WriteLine(message);
}
+ public void LogHeader1(string message)
+ {
+ Console.WriteLine($"=============== {message} ================ ");
+ }
+
+ public void LogHeader2(string message)
+ {
+ Console.WriteLine($"***{message}***");
+ }
+
public void LogVerbose(string message)
{
Console.WriteLine(message); | 1 | using System;
using System.Collections.Generic;
using System.Text;
namespace ScenarioMeasurement
{
public class Logger
{
public Logger(string fileName)
{
}
public void Log(string message)
{
Console.WriteLine(message);
}
public void LogVerbose(string message)
{
Console.WriteLine(message);
}
}
}
| 1 | 10,978 | 1) These names are not great, doesn't help explain what they do. 2) Instead of methods like this that hard code some number of = and *, why not a method that takes the string, defaults to a char to surround with, and a width? 3) Do we really even need headers like this in the output? Maybe we ditch them. | dotnet-performance | .cs |
@@ -2943,7 +2943,7 @@ describe('AutocompleteEditor', () => {
keyDownUp('arrow_up');
- expect(hot.getActiveEditor().htEditor.getSelected()).toEqual([[0, 0, 0, 0]]);
+ expect(hot.getActiveEditor().htEditor.getSelected()).toEqual([[11, 0, 11, 0]]);
done();
}, 200);
}); | 1 | describe('AutocompleteEditor', () => {
var id = 'testContainer';
var choices = ['yellow', 'red', 'orange', 'green', 'blue', 'gray', 'black', 'white', 'purple', 'lime', 'olive', 'cyan'];
var hot;
beforeEach(function() {
this.$container = $(`<div id="${id}" style="width: 300px; height: 200px; overflow: auto"></div>`).appendTo('body');
});
afterEach(function() {
if (hot) {
hot = null;
}
if (this.$container) {
destroy();
this.$container.remove();
}
});
describe('open editor', () => {
it('should display editor (after hitting ENTER)', () => {
handsontable({
columns: [
{
editor: 'autocomplete',
source: choices
}
]
});
selectCell(0, 0);
var editor = $('.autocompleteEditor');
expect(editor.is(':visible')).toBe(false);
keyDownUp('enter');
expect(editor.is(':visible')).toBe(true);
});
it('should display editor (after hitting F2)', () => {
handsontable({
columns: [
{
editor: 'autocomplete',
source: choices
}
]
});
selectCell(0, 0);
var editor = $('.autocompleteEditor');
expect(editor.is(':visible')).toBe(false);
keyDownUp('f2');
expect(editor.is(':visible')).toBe(true);
});
it('should display editor (after doubleclicking)', () => {
handsontable({
columns: [
{
editor: 'autocomplete',
source: choices
}
]
});
selectCell(0, 0);
var editor = $('.autocompleteEditor');
expect(editor.is(':visible')).toBe(false);
mouseDoubleClick($(getCell(0, 0)));
expect(editor.is(':visible')).toBe(true);
});
// see https://github.com/handsontable/handsontable/issues/3380
it('should not throw error while selecting the next cell by hitting enter key', () => {
var spy = jasmine.createSpyObj('error', ['test']);
var prevError = window.onerror;
window.onerror = function(messageOrEvent, source, lineno, colno, error) {
spy.test();
};
handsontable({
columns: [{
editor: 'autocomplete',
source: choices
}]
});
selectCell(0, 0);
keyDownUp('enter');
keyDownUp('enter');
keyDownUp('enter');
expect(spy.test.calls.count()).toBe(0);
window.onerror = prevError;
});
});
describe('choices', () => {
it('should display given choices (array)', (done) => {
handsontable({
columns: [
{
editor: 'autocomplete',
source: choices
}
]
});
selectCell(0, 0);
var editor = $('.autocompleteEditor');
keyDownUp('enter');
setTimeout(() => {
expect(editor.find('tbody td:eq(0)').text()).toEqual(choices[0]);
expect(editor.find('tbody td:eq(1)').text()).toEqual(choices[1]);
expect(editor.find('tbody td:eq(2)').text()).toEqual(choices[2]);
expect(editor.find('tbody td:eq(3)').text()).toEqual(choices[3]);
expect(editor.find('tbody td:eq(4)').text()).toEqual(choices[4]);
done();
}, 100);
});
it('should call source function with context set as cellProperties', (done) => {
var source = jasmine.createSpy('source');
var context;
source.and.callFake(function(query, process) {
process(choices);
context = this;
});
var hot = handsontable({
columns: [
{
editor: 'autocomplete',
source
}
]
});
selectCell(0, 0);
source.calls.reset();
keyDownUp('enter');
setTimeout(() => {
expect(context.instance).toBe(hot);
expect(context.row).toBe(0);
expect(context.col).toBe(0);
done();
}, 200);
});
it('should display given choices (sync function)', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
var editor = $('.autocompleteEditor');
syncSources.calls.reset();
keyDownUp('enter');
setTimeout(() => {
expect(editor.find('tbody td:eq(0)').text()).toEqual(choices[0]);
expect(editor.find('tbody td:eq(1)').text()).toEqual(choices[1]);
expect(editor.find('tbody td:eq(2)').text()).toEqual(choices[2]);
expect(editor.find('tbody td:eq(3)').text()).toEqual(choices[3]);
expect(editor.find('tbody td:eq(4)').text()).toEqual(choices[4]);
done();
}, 200);
});
it('should display given choices (async function)', (done) => {
var asyncSources = jasmine.createSpy('asyncSources');
asyncSources.and.callFake((process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source(query, process) {
setTimeout(() => {
asyncSources(process);
}, 0);
}
}
]
});
selectCell(0, 0);
var editor = $('.autocompleteEditor');
keyDownUp('enter');
setTimeout(() => {
expect(asyncSources.calls.count()).toEqual(1);
expect(editor.find('tbody td:eq(0)').text()).toEqual(choices[0]);
expect(editor.find('tbody td:eq(1)').text()).toEqual(choices[1]);
expect(editor.find('tbody td:eq(2)').text()).toEqual(choices[2]);
expect(editor.find('tbody td:eq(3)').text()).toEqual(choices[3]);
expect(editor.find('tbody td:eq(4)').text()).toEqual(choices[4]);
done();
}, 200);
});
it('should NOT update choices list, after cursor leaves and enters the list (#1330)', (done) => {
spyOn(Handsontable.editors.AutocompleteEditor.prototype, 'updateChoicesList').and.callThrough();
var updateChoicesList = Handsontable.editors.AutocompleteEditor.prototype.updateChoicesList;
var hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: choices
}
]
});
selectCell(0, 0);
var editor = hot.getActiveEditor();
keyDownUp('enter');
setTimeout(() => {
updateChoicesList.calls.reset();
$(editor.htContainer).find('.htCore tr:eq(0) td:eq(0)').mouseenter();
$(editor.htContainer).find('.htCore tr:eq(0) td:eq(0)').mouseleave();
$(editor.htContainer).find('.htCore tr:eq(0) td:eq(0)').mouseenter();
}, 200);
setTimeout(() => {
expect(updateChoicesList).not.toHaveBeenCalled();
done();
}, 300);
});
it('should update choices list exactly once after a key is pressed (#1330)', (done) => {
spyOn(Handsontable.editors.AutocompleteEditor.prototype, 'updateChoicesList').and.callThrough();
var updateChoicesList = Handsontable.editors.AutocompleteEditor.prototype.updateChoicesList;
var hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: choices
}
]
});
selectCell(0, 0);
var editor = hot.getActiveEditor();
updateChoicesList.calls.reset();
keyDownUp('enter');
setTimeout(() => {
updateChoicesList.calls.reset();
editor.TEXTAREA.value = 'red';
$(editor.TEXTAREA).simulate('keydown', {
keyCode: 'd'.charCodeAt(0)
});
}, 200);
setTimeout(() => {
expect(updateChoicesList.calls.count()).toEqual(1);
done();
}, 100);
});
it('should not initialize the dropdown with unneeded scrollbars (scrollbar causing a scrollbar issue)', (done) => {
spyOn(Handsontable.editors.AutocompleteEditor.prototype, 'updateChoicesList').and.callThrough();
var updateChoicesList = Handsontable.editors.AutocompleteEditor.prototype.updateChoicesList;
var hot = handsontable({
data: [
[
'blue'
],
[],
[],
[]
],
columns: [
{
editor: 'autocomplete',
source: choices
}
]
});
selectCell(0, 0);
var editor = hot.getActiveEditor();
updateChoicesList.calls.reset();
keyDownUp('enter');
setTimeout(() => {
expect(editor.htContainer.scrollWidth).toEqual(editor.htContainer.clientWidth);
done();
}, 200);
});
it('autocomplete list should have textarea dimensions', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
colWidths: [200],
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
var editor = $('.handsontableInputHolder');
syncSources.calls.reset();
keyDownUp('enter');
setTimeout(() => {
// -2 for transparent borders
expect(editor.find('.autocompleteEditor .htCore td').width()).toEqual(editor.find('.handsontableInput').width() - 2);
expect(editor.find('.autocompleteEditor .htCore td').width()).toBeGreaterThan(187);
done();
}, 200);
});
it('autocomplete list should have the suggestion table dimensions, when trimDropdown option is set to false', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(['long text', 'even longer text', 'extremely long text in the suggestion list', 'short text', 'text', 'another', 'yellow', 'black']);
});
var hot = handsontable({
colWidths: [200],
columns: [
{
editor: 'autocomplete',
source: syncSources
}
],
trimDropdown: false,
});
selectCell(0, 0);
var editor = $('.handsontableInputHolder');
syncSources.calls.reset();
keyDownUp('enter');
setTimeout(() => {
expect(editor.find('.autocompleteEditor .htCore td').eq(0).width()).toBeGreaterThan(editor.find('.handsontableInput').width());
done();
}, 200);
});
it('autocomplete textarea should have cell dimensions (after render)', (done) => {
var data = [
['a', 'b'],
['c', 'd']
];
hot = handsontable({
data,
minRows: 4,
minCols: 4,
minSpareRows: 4,
minSpareCols: 4,
cells() {
return {
type: Handsontable.AutocompleteCell
};
}
});
selectCell(1, 1);
keyDownUp('enter');
data[1][1] = 'dddddddddddddddddddd';
render();
setTimeout(() => {
var $td = spec().$container.find('.htCore tbody tr:eq(1) td:eq(1)');
expect(autocompleteEditor().width()).toEqual($td.width());
done();
}, 10);
});
it('should invoke beginEditing only once after dobleclicking on a cell (#1011)', () => {
var hot = handsontable({
columns: [
{},
{},
{
type: 'autocomplete',
source: choices
}
]
});
selectCell(0, 2);
spyOn(hot.getActiveEditor(), 'beginEditing');
expect(hot.getActiveEditor().beginEditing.calls.count()).toBe(0);
mouseDoubleClick(getCell(0, 2));
expect(hot.getActiveEditor().beginEditing.calls.count()).toBe(1);
mouseDoubleClick(getCell(1, 2));
expect(hot.getActiveEditor().beginEditing.calls.count()).toBe(2);
mouseDoubleClick(getCell(2, 2));
expect(hot.getActiveEditor().beginEditing.calls.count()).toBe(3);
});
it('should not display all the choices from a long source list and not leave any unused space in the dropdown', async () => {
const hot = handsontable({
columns: [
{
type: 'autocomplete',
source: [
'Acura', 'Audi', 'BMW', 'Buick', 'Cadillac', 'Chevrolet', 'Chrysler', 'Citroen', 'Dodge', 'Eagle', 'Ferrari',
'Ford', 'General Motors', 'GMC', 'Honda', 'Hummer', 'Hyundai', 'Infiniti', 'Isuzu', 'Jaguar', 'Jeep', 'Kia',
'Lamborghini', 'Land Rover', 'Lexus', 'Lincoln', 'Lotus', 'Mazda', 'Mercedes-Benz', 'Mercury', 'Mitsubishi',
'Nissan', 'Oldsmobile', 'Peugeot', 'Pontiac', 'Porsche', 'Regal', 'Renault', 'Saab', 'Saturn', 'Seat', 'Skoda',
'Subaru', 'Suzuki', 'Toyota', 'Volkswagen', 'Volvo']
}
]
});
selectCell(0, 0);
keyDownUp('enter');
const $autocomplete = autocomplete();
const $autocompleteHolder = $autocomplete.find('.ht_master .wtHolder').first();
await sleep(100);
expect($autocomplete.find('td').first().text()).toEqual('Acura');
$autocompleteHolder.scrollTop($autocompleteHolder[0].scrollHeight);
await sleep(100);
expect($autocomplete.find('td').last().text()).toEqual('Volvo');
});
it('should display the choices, regardless if they\'re declared as string or numeric', (done) => {
handsontable({
columns: [
{
editor: 'autocomplete',
source: ['1', '2', 3, '4', 5, 6]
}
]
});
selectCell(0, 0);
var editor = $('.autocompleteEditor');
keyDownUp('enter');
setTimeout(() => {
expect(editor.find('tbody td:eq(0)').text()).toEqual('1');
expect(editor.find('tbody td:eq(1)').text()).toEqual('2');
expect(editor.find('tbody td:eq(2)').text()).toEqual('3');
expect(editor.find('tbody td:eq(3)').text()).toEqual('4');
expect(editor.find('tbody td:eq(4)').text()).toEqual('5');
expect(editor.find('tbody td:eq(5)').text()).toEqual('6');
done();
}, 100);
});
it('should display the choices, regardless if they\'re declared as string or numeric, when data is present', (done) => {
handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 1),
columns: [
{
editor: 'autocomplete',
source: ['1', '2', 3, '4', 5, 6]
}
]
});
selectCell(0, 0);
keyDownUp('backspace');
var editor = $('.autocompleteEditor');
keyDownUp('enter');
setTimeout(() => {
expect(editor.find('tbody td:eq(0)').text()).toEqual('1');
expect(editor.find('tbody td:eq(1)').text()).toEqual('2');
expect(editor.find('tbody td:eq(2)').text()).toEqual('3');
expect(editor.find('tbody td:eq(3)').text()).toEqual('4');
expect(editor.find('tbody td:eq(4)').text()).toEqual('5');
expect(editor.find('tbody td:eq(5)').text()).toEqual('6');
done();
}, 100);
});
it('should display the dropdown above the editor, when there is not enough space below the cell AND there is more space above the cell', (done) => {
var hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(30, 30),
columns: [
{
editor: 'autocomplete',
source: choices
}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}
],
width: 400,
height: 400
});
setDataAtCell(29, 0, '');
selectCell(29, 0);
mouseDoubleClick($(getCell(29, 0)));
setTimeout(() => {
var autocompleteEditor = $('.autocompleteEditor');
expect(autocompleteEditor.css('position')).toEqual('absolute');
expect(autocompleteEditor.css('top')).toEqual(`${(-1) * autocompleteEditor.height()}px`);
done();
}, 200);
});
it('should flip the dropdown upwards when there is no more room left below the cell after filtering the choice list', async () => {
var hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(30, 30),
columns: [
{
editor: 'autocomplete',
source: choices
}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}
],
width: 400,
height: 400
});
setDataAtCell(26, 0, 'b');
selectCell(26, 0);
hot.view.wt.wtTable.holder.scrollTop = 999;
mouseDoubleClick($(getCell(26, 0)));
var autocompleteEditor = $('.autocompleteEditor');
await sleep(100);
expect(autocompleteEditor.css('position')).toEqual('relative');
autocompleteEditor.siblings('textarea').first().val('');
keyDownUp('backspace');
await sleep(100);
expect(autocompleteEditor.css('position')).toEqual('absolute');
expect(autocompleteEditor.css('top')).toEqual(`${(-1) * autocompleteEditor.height()}px`);
});
});
describe('closing editor', () => {
it('should destroy editor when value change with mouse click on suggestion', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
autocomplete().find('tbody td:eq(3)').simulate('mousedown');
expect(getDataAtCell(0, 0)).toEqual('green');
done();
}, 200);
});
it('should not change value type from `numeric` to `string` after mouse click suggestion - ' +
'test no. 1 #4143', (done) => {
handsontable({
columns: [
{
editor: 'autocomplete',
source: [1, 2, 3, 4, 5, 11, 14]
}
]
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
autocomplete().find('tbody td:eq(0)').simulate('mousedown');
expect(typeof getDataAtCell(0, 0)).toEqual('number');
done();
}, 200);
});
it('should not change value type from `numeric` to `string` after mouse click on suggestion - ' +
'test no. 2 #4143', (done) => {
const syncSources = jasmine.createSpy('syncSources');
const source = [1, 2, 3, 4, 5, 11, 14];
syncSources.and.callFake((query, process) => {
process(source);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
autocomplete().find('tbody td:eq(0)').simulate('mousedown');
expect(typeof getDataAtCell(0, 0)).toEqual('number');
done();
}, 200);
});
it('should call `afterChange` hook with proper value types - test no. 1 #4143', (done) => {
let changesInside;
let sourceInside;
const afterChange = (changes, source) => {
if (source !== 'loadData') {
changesInside = changes;
sourceInside = source;
}
};
handsontable({
columns: [
{
editor: 'autocomplete',
source: [1, 2, 3, 4, 5, 11, 14]
}
],
afterChange
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
autocomplete().find('tbody td:eq(1)').simulate('mousedown');
expect(changesInside[0]).toEqual([0, 0, null, 2]);
done();
}, 200);
});
it('should call `afterChange` hook with proper value types - test no. 2 #4143', (done) => {
let changesInside;
let sourceInside;
const afterChange = (changes, source) => {
if (source !== 'loadData') {
changesInside = changes;
sourceInside = source;
}
};
const syncSources = jasmine.createSpy('syncSources');
const source = [1, 2, 3, 4, 5, 11, 14];
syncSources.and.callFake((query, process) => {
process(source);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
],
afterChange
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
autocomplete().find('tbody td:eq(1)').simulate('mousedown');
expect(changesInside[0]).toEqual([0, 0, null, 2]);
done();
}, 200);
});
it('should not change value type from `numeric` to `string` when written down value from set of suggestions #4143', (done) => {
const syncSources = jasmine.createSpy('syncSources');
const source = [1, 2, 3, 4, 5, 11, 14];
syncSources.and.callFake((query, process) => {
process(source);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
keyDownUp('enter');
keyDownUp('backspace');
document.activeElement.value = '1';
$(document.activeElement).simulate('keyup');
setTimeout(() => {
keyDownUp('enter');
expect(getDataAtCell(0, 0)).toEqual(1);
done();
}, 200);
});
it('should destroy editor when value change with Enter on suggestion', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
keyDownUp('arrow_down');
keyDownUp('arrow_down');
keyDownUp('arrow_down');
keyDownUp('arrow_down');
keyDownUp('enter');
expect(getDataAtCell(0, 0)).toEqual('green');
done();
}, 200);
});
it('should destroy editor when pressed Enter then Esc', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
expect(autocompleteEditor().is(':visible')).toBe(true);
keyDownUp('esc');
expect(autocompleteEditor().is(':visible')).toBe(false);
done();
}, 200);
});
it('should destroy editor when mouse double clicked then Esc', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
mouseDoubleClick(getCell(0, 0));
setTimeout(() => {
expect(autocompleteEditor().is(':visible')).toBe(true);
keyDownUp('esc');
expect(autocompleteEditor().is(':visible')).toBe(false);
done();
}, 200);
});
it('cancel editing (Esc) should restore the previous value', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
setDataAtCell(0, 0, 'black');
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
autocomplete().siblings('.handsontableInput').val('ye');
keyDownUp(69); // e
keyDownUp('esc');
expect(getDataAtCell(0, 0)).toEqual('black');
done();
}, 200);
});
it('should destroy editor when clicked outside the table', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
mouseDoubleClick(getCell(0, 0));
setTimeout(() => {
expect(autocompleteEditor().is(':visible')).toBe(true);
$('body').simulate('mousedown');
expect(autocompleteEditor().is(':visible')).toBe(false);
done();
}, 200);
});
it('should show fillHandle element again after close editor', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.plan = function(query, process) {
process(choices.filter((choice) => choice.indexOf(query) != -1));
};
var hot = handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
},
{}
]
});
selectCell(1, 0);
keyDownUp('x'); // Trigger quick edit mode
keyDownUp('enter');
setTimeout(() => {
expect($('#testContainer.handsontable > .handsontable .wtBorder.current.corner:visible').length).toEqual(1);
done();
}, 200);
});
});
describe('non strict mode', () => {
it('should allow any value in non strict mode (close editor with ENTER)', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
var editor = $('.handsontableInput');
editor.val('foo');
keyDownUp('enter');
expect(getDataAtCell(0, 0)).toEqual('foo');
done();
}, 200);
});
it('should allow any value in non strict mode (close editor by clicking on table)', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
var editor = $('.handsontableInput');
editor.val('foo');
spec().$container.find('tbody tr:eq(1) td:eq(0)').simulate('mousedown');
expect(getDataAtCell(0, 0)).toEqual('foo');
done();
}, 200);
});
it('should save the value from textarea after hitting ENTER', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
syncSources.calls.reset();
editorInput.val('b');
keyDownUp('b'.charCodeAt(0));
}, 200);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['blue'],
['black']
]);
var selected = innerHot.getSelected();
expect(selected).toBeUndefined();
keyDownUp('enter');
expect(getDataAtCell(0, 0)).toEqual('b');
done();
}, 400);
});
});
describe('strict mode', () => {
it('strict mode should NOT use value if it DOES NOT match the list (sync reponse is empty)', (done) => {
var onAfterValidate = jasmine.createSpy('onAfterValidate');
var onAfterChange = jasmine.createSpy('onAfterChange');
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process([]); // hardcoded empty result
});
handsontable({
data: [
['one', 'two'],
['three', 'four']
],
columns: [
{
type: 'autocomplete',
source: syncSources,
allowInvalid: false,
strict: true
},
{}
],
afterValidate: onAfterValidate,
afterChange: onAfterChange
});
setDataAtCell(0, 0, 'unexistent');
setTimeout(() => {
expect(getData()).toEqual([
['one', 'two'],
['three', 'four']
]);
expect(syncSources.calls.count()).toEqual(1);
expect(onAfterValidate.calls.count()).toEqual(1);
expect(onAfterChange.calls.count()).toEqual(1); // 1 for loadData (it is not called after failed edit)
done();
}, 200);
});
it('strict mode should use value if it DOES match the list (sync reponse is not empty)', (done) => {
var onAfterValidate = jasmine.createSpy('onAfterValidate');
var onAfterChange = jasmine.createSpy('onAfterChange');
var syncSources = jasmine.createSpy('asyncSources');
syncSources.and.callFake((query, process) => {
process(choices); // hardcoded empty result
});
handsontable({
data: [
['one', 'two'],
['three', 'four']
],
columns: [
{
type: 'autocomplete',
source: syncSources,
allowInvalid: false,
strict: true
},
{}
],
afterValidate: onAfterValidate,
afterChange: onAfterChange
});
setDataAtCell(0, 0, 'yellow');
setTimeout(() => {
expect(getData()).toEqual([
['yellow', 'two'],
['three', 'four']
]);
expect(syncSources.calls.count()).toEqual(1);
expect(onAfterValidate.calls.count()).toEqual(1);
expect(onAfterChange.calls.count()).toEqual(2); // 1 for loadData and 1 for setDataAtCell
done();
}, 200);
});
it('strict mode should NOT use value if it DOES NOT match the list (async reponse is empty)', (done) => {
var onAfterValidate = jasmine.createSpy('onAfterValidate');
var onAfterChange = jasmine.createSpy('onAfterChange');
var asyncSources = jasmine.createSpy('asyncSources');
asyncSources.and.callFake((query, process) => {
setTimeout(() => {
process([]); // hardcoded empty result
});
});
handsontable({
data: [
['one', 'two'],
['three', 'four']
],
columns: [
{
type: 'autocomplete',
source: asyncSources,
allowInvalid: false,
strict: true
},
{}
],
afterValidate: onAfterValidate,
afterChange: onAfterChange
});
setDataAtCell(0, 0, 'unexistent');
setTimeout(() => {
expect(getData()).toEqual([
['one', 'two'],
['three', 'four']
]);
expect(asyncSources.calls.count()).toEqual(1);
expect(onAfterValidate.calls.count()).toEqual(1);
expect(onAfterChange.calls.count()).toEqual(1); // 1 for loadData (it is not called after failed edit)
done();
}, 200);
});
it('strict mode should use value if it DOES match the list (async reponse is not empty)', (done) => {
var onAfterValidate = jasmine.createSpy('onAfterValidate');
var onAfterChange = jasmine.createSpy('onAfterChange');
var asyncSources = jasmine.createSpy('asyncSources');
asyncSources.and.callFake((query, process) => {
setTimeout(() => {
process(choices); // hardcoded empty result
});
});
handsontable({
data: [
['one', 'two'],
['three', 'four']
],
columns: [
{
type: 'autocomplete',
source: asyncSources,
allowInvalid: false,
strict: true
},
{}
],
afterValidate: onAfterValidate,
afterChange: onAfterChange
});
setDataAtCell(0, 0, 'yellow');
setTimeout(() => {
expect(getData()).toEqual([
['yellow', 'two'],
['three', 'four']
]);
expect(asyncSources.calls.count()).toEqual(1);
expect(onAfterValidate.calls.count()).toEqual(1);
expect(onAfterChange.calls.count()).toEqual(2); // 1 for loadData and 1 for setDataAtCell
done();
}, 200);
});
it('strict mode mark value as invalid if it DOES NOT match the list (sync reponse is empty)', (done) => {
var onAfterValidate = jasmine.createSpy('onAfterValidate');
var onAfterChange = jasmine.createSpy('onAfterChange');
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process([]); // hardcoded empty result
});
handsontable({
data: [
['one', 'two'],
['three', 'four']
],
columns: [
{
type: 'autocomplete',
source: syncSources,
allowInvalid: true,
strict: true
},
{}
],
afterValidate: onAfterValidate,
afterChange: onAfterChange
});
expect(getCellMeta(0, 0).valid).not.toBe(false);
expect($(getCell(0, 0)).hasClass('htInvalid')).toBe(false);
setDataAtCell(0, 0, 'unexistent');
setTimeout(() => {
expect(getData()).toEqual([
['unexistent', 'two'],
['three', 'four']
]);
expect(getCellMeta(0, 0).valid).toBe(false);
expect($(getCell(0, 0)).hasClass('htInvalid')).toBe(true);
done();
}, 200);
});
it('should select the best matching option after hitting ENTER', (done) => {
var onAfterValidate = jasmine.createSpy('onAfterValidate');
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources,
strict: true
}
],
afterValidate: onAfterValidate
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
syncSources.calls.reset();
editorInput.val('b');
keyDownUp('b'.charCodeAt(0));
}, 200);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['blue'],
['black']
]);
var selected = innerHot.getSelected()[0];
var selectedData = innerHot.getDataAtCell(selected[0], selected[1]);
expect(selectedData).toEqual('blue');
onAfterValidate.calls.reset();
keyDownUp('enter');
}, 400);
setTimeout(() => {
expect(getDataAtCell(0, 0)).toEqual('blue');
done();
}, 600);
});
it('should select the best matching option after hitting TAB', (done) => {
var onAfterValidate = jasmine.createSpy('onAfterValidate');
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources,
strict: true
}
],
afterValidate: onAfterValidate
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
syncSources.calls.reset();
editorInput.val('b');
keyDownUp('b'.charCodeAt(0));
}, 200);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['blue'],
['black']
]);
var selected = innerHot.getSelected()[0];
var selectedData = innerHot.getDataAtCell(selected[0], selected[1]);
expect(selectedData).toEqual('blue');
onAfterValidate.calls.reset();
keyDownUp('tab');
}, 400);
setTimeout(() => {
expect(getDataAtCell(0, 0)).toEqual('blue');
done();
}, 600);
});
it('should mark list item corresponding to current cell value as selected', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(['red', 'dark-yellow', 'yellow', 'light-yellow', 'black']);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources,
strict: true
}
],
data: [
['yellow'],
['red'],
['blue']
]
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
expect(autocomplete().find('.current').text()).toEqual(getDataAtCell(0, 0));
done();
}, 200);
});
});
describe('filtering', () => {
it('typing in textarea should filter the lookup list', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
syncSources.calls.reset();
editorInput.val('e');
keyDownUp(69); // e
}, 200);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['red'],
['yellow'],
['green'],
['blue'],
['lime'],
['white'],
['olive'],
['orange'],
['purple']
]);
syncSources.calls.reset();
editorInput.val('ed');
keyDownUp(68); // d
}, 400);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['red']
]);
done();
}, 600);
});
it('default filtering should be case insensitive', (done) => {
hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: choices
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
editorInput.val('e');
keyDownUp(69); // e
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['red'],
['yellow'],
['green'],
['blue'],
['lime'],
['white'],
['olive'],
['orange'],
['purple']
]);
editorInput.val('e');
keyDownUp(69); // E (same as 'e')
}, 50);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['red'],
['yellow'],
['green'],
['blue'],
['lime'],
['white'],
['olive'],
['orange'],
['purple']
]);
done();
}, 100);
});
it('default filtering should be case sensitive when filteringCaseSensitive is false', (done) => {
hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: choices,
filteringCaseSensitive: true
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
editorInput.val('e');
keyDownUp(69); // e
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['red'],
['yellow'],
['green'],
['blue'],
['lime'],
['white'],
['olive'],
['orange'],
['purple']
]);
editorInput.val('E');
keyDownUp(69); // E (same as 'e')
}, 50);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([]);
expect(innerHot.getSourceData()).toEqual([]);
done();
}, 200);
});
it('typing in textarea should NOT filter the lookup list when filtering is disabled', (done) => {
hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: choices,
filter: false
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
editorInput.val('e');
keyDownUp('e'.charCodeAt(0)); // e
}, 20);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual(Handsontable.helper.pivot([choices]));
editorInput.val('ed');
keyDownUp('d'.charCodeAt(0)); // d
}, 40);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual(Handsontable.helper.pivot([choices]));
done();
}, 60);
});
it('typing in textarea should highlight the matching phrase', (done) => {
var choices = ['Male', 'Female'];
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.search(new RegExp(query, 'i')) != -1));
});
hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources,
filter: false
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
syncSources.calls.reset();
editorInput.val('Male');
keyDownUp(69); // e
}, 200);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
var autocompleteList = $(innerHot.rootElement);
expect(autocompleteList.find('td:eq(0)').html()).toMatch(/<(strong|STRONG)>Male<\/(strong|STRONG)>/); // IE8 makes the tag names UPPERCASE
expect(autocompleteList.find('td:eq(1)').html()).toMatch(/Fe<(strong|STRONG)>male<\/(strong|STRONG)>/);
done();
}, 400);
});
it('text in textarea should not be interpreted as regexp', (done) => {
spyOn(Handsontable.editors.AutocompleteEditor.prototype, 'queryChoices').and.callThrough();
var queryChoices = Handsontable.editors.AutocompleteEditor.prototype.queryChoices;
hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: choices
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
queryChoices.calls.reset();
editorInput.val('yellow|red');
keyDownUp('d'.charCodeAt(0));
}, 200);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData().length).toEqual(0);
done();
}, 400);
});
it('text in textarea should not be interpreted as regexp when highlighting the matching phrase', (done) => {
var choices = ['Male', 'Female'];
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.search(new RegExp(query, 'i')) != -1));
});
hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources,
filter: false
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
syncSources.calls.reset();
editorInput.val('M|F');
keyDownUp('F'.charCodeAt(0));
}, 200);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
var autocompleteList = $(innerHot.rootElement);
expect(autocompleteList.find('td:eq(0)').html()).toEqual('Male');
expect(autocompleteList.find('td:eq(1)').html()).toEqual('Female');
done();
}, 400);
});
it('should allow any value if filter === false and allowInvalid === true', (done) => {
spyOn(Handsontable.editors.AutocompleteEditor.prototype, 'queryChoices').and.callThrough();
var queryChoices = Handsontable.editors.AutocompleteEditor.prototype.queryChoices;
handsontable({
columns: [
{
editor: 'autocomplete',
source: choices,
filter: false,
strict: true,
allowInvalid: true
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
queryChoices.calls.reset();
editorInput.val('foobar');
keyDownUp(82); // r
}, 200);
setTimeout(() => {
keyDownUp(Handsontable.helper.KEY_CODES.ENTER);
expect(getDataAtCell(0, 0)).toEqual('foobar');
done();
}, 400);
});
it('typing in textarea should highlight best choice, if strict === true', (done) => {
var choices = ['Male', 'Female'];
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.search(new RegExp(query, 'i')) != -1));
});
var hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources,
filter: false,
strict: true
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
syncSources.calls.reset();
editorInput.val('e');
keyDownUp(69); // e
}, 200);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getSelected()).toEqual([[1, 0, 1, 0]]);
done();
}, 400);
});
});
it('should restore the old value when hovered over a autocomplete menu item and then clicked outside of the table', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
]
});
selectCell(0, 0);
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
autocomplete().find('tbody td:eq(1)').simulate('mouseenter');
autocomplete().find('tbody td:eq(1)').simulate('mouseleave');
spec().$container.simulate('mousedown');
expect(getDataAtCell(0, 0)).toBeNull();
done();
}, 200);
});
it('should be able to use empty value ("")', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(['', 'BMW', 'Bentley']);
});
handsontable({
data: [
['one', 'two'],
['three', 'four']
],
columns: [
{
editor: 'autocomplete',
source: syncSources,
filter: false
}
]
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
expect(getDataAtCell(0, 0)).toEqual('one');
autocomplete().find('tbody td:eq(0)').simulate('mousedown');
expect(getDataAtCell(0, 0)).toEqual('');
done();
}, 200);
});
describe('allow html mode', () => {
it('should allow inject html items (async mode)', (done) => {
hot = handsontable({
columns: [
{
type: 'autocomplete',
source(query, cb) {
cb(['<b>foo <span>zip</span></b>', '<i>bar</i>', '<strong>baz</strong>']);
},
allowHtml: true,
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
editorInput.val('b');
keyDownUp('b'.charCodeAt(0));
}, 200);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['<i>bar</i>'],
['<strong>baz</strong>'],
]);
editorInput.val('bar');
keyDownUp('a'.charCodeAt(0));
keyDownUp('r'.charCodeAt(0));
}, 400);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['<i>bar</i>']
]);
keyDownUp('arrow_down');
keyDownUp('enter');
}, 600);
setTimeout(() => {
expect(getCell(0, 0).querySelector('i').textContent).toBe('bar');
done();
}, 700);
});
it('should allow inject html items (sync mode)', (done) => {
hot = handsontable({
columns: [
{
type: 'autocomplete',
source: ['<b>foo <span>zip</span></b>', '<i>bar</i>', '<strong>baz</strong>'],
allowHtml: true,
}
]
});
selectCell(0, 0);
var editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
setTimeout(() => {
editorInput.val('b');
keyDownUp('b'.charCodeAt(0));
}, 200);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['<i>bar</i>'],
['<strong>baz</strong>'],
]);
editorInput.val('bar');
keyDownUp('a'.charCodeAt(0));
keyDownUp('r'.charCodeAt(0));
}, 400);
setTimeout(() => {
var ac = hot.getActiveEditor();
var innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['<i>bar</i>']
]);
keyDownUp('arrow_down');
keyDownUp('enter');
}, 600);
setTimeout(() => {
expect(getCell(0, 0).querySelector('i').textContent).toBe('bar');
done();
}, 700);
});
});
describe('disallow html mode', () => {
it('should be disabled by default', () => {
hot = handsontable({
columns: [
{
type: 'autocomplete',
source(query, cb) {
cb(['<b>foo <span>zip</span></b>', '<i>bar</i>', '<strong>baz</strong>']);
},
allowHtml: false,
}
]
});
expect(hot.getCellMeta(0, 0).allowHtml).toBeFalsy();
});
it('should strip html from strings provided in source (async mode)', async () => {
const hot = handsontable({
columns: [
{
type: 'autocomplete',
source(query, cb) {
cb(['<b>foo <span>zip</span></b>', '<i>bar</i>', '<strong>baz</strong>']);
},
allowHtml: false,
}
]
});
selectCell(0, 0);
const editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
await sleep(200);
editorInput.val('b');
keyDownUp('b'.charCodeAt(0));
await sleep(200);
{
const ac = hot.getActiveEditor();
const innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['bar'],
['baz'],
]);
editorInput.val('bar');
keyDownUp('a'.charCodeAt(0));
keyDownUp('r'.charCodeAt(0));
}
await sleep(200);
{
const ac = hot.getActiveEditor();
const innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['bar']
]);
keyDownUp('arrow_down');
keyDownUp('enter');
}
await sleep(200);
expect(getCell(0, 0).querySelector('i')).toBeNull();
expect(getCell(0, 0).textContent).toMatch('bar');
});
it('should strip html from strings provided in source (sync mode)', async () => {
const hot = handsontable({
columns: [
{
type: 'autocomplete',
source: ['<b>foo <span>zip</span></b>', '<i>bar</i>', '<strong>baz</strong>'],
allowHtml: false,
}
]
});
selectCell(0, 0);
const editorInput = $('.handsontableInput');
expect(getDataAtCell(0, 0)).toBeNull();
keyDownUp('enter');
await sleep(200);
editorInput.val('b');
keyDownUp('b'.charCodeAt(0));
await sleep(200);
{
const ac = hot.getActiveEditor();
const innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['bar'],
['baz'],
]);
}
editorInput.val('bar');
keyDownUp('a'.charCodeAt(0));
keyDownUp('r'.charCodeAt(0));
await sleep(200);
{
const ac = hot.getActiveEditor();
const innerHot = ac.htEditor;
expect(innerHot.getData()).toEqual([
['bar']
]);
}
keyDownUp('arrow_down');
keyDownUp('enter');
await sleep(100);
expect(getCell(0, 0).querySelector('i')).toBeNull();
expect(getCell(0, 0).textContent).toMatch('bar');
});
});
describe('Autocomplete helper functions:', () => {
describe('sortByRelevance', () => {
it('should sort the provided array, so items more relevant to the provided value are listed first', () => {
var choices = [
'Wayne', // 0
'Draven', // 1
'Banner', // 2
'Stark', // 3
'Parker', // 4
'Kent', // 5
'Gordon', // 6
'Kyle', // 7
'Simmons'// 8
];
let value = 'a';
var sorted = Handsontable.editors.AutocompleteEditor.sortByRelevance(value, choices);
expect(sorted).toEqual([0, 2, 4, 3, 1]);
value = 'o';
sorted = Handsontable.editors.AutocompleteEditor.sortByRelevance(value, choices);
expect(sorted).toEqual([6, 8]);
value = 'er';
sorted = Handsontable.editors.AutocompleteEditor.sortByRelevance(value, choices);
expect(sorted).toEqual([2, 4]);
});
});
});
it('should not modify the suggestion lists\' order, when the sortByRelevance option is set to false', (done) => {
var choices = [
'Wayne', 'Draven', 'Banner', 'Stark', 'Parker', 'Kent', 'Gordon', 'Kyle', 'Simmons'
];
var hot = handsontable({
columns: [
{
editor: 'autocomplete',
source: choices,
sortByRelevance: false
}
]
});
selectCell(0, 0);
keyDownUp('enter');
var $editorInput = $('.handsontableInput');
$editorInput.val('a');
keyDownUp('a'.charCodeAt(0));
Handsontable.dom.setCaretPosition($editorInput[0], 1);
setTimeout(() => {
var dropdownList = $('.autocompleteEditor tbody').first();
var listLength = dropdownList.find('tr').size();
expect(listLength).toBe(9);
for (var i = 1; i <= listLength; i++) {
expect(dropdownList.find(`tr:nth-child(${i}) td`).text()).toEqual(choices[i - 1]);
}
done();
}, 30);
});
it('should fire one afterChange event when value is changed', (done) => {
var onAfterChange = jasmine.createSpy('onAfterChange');
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{
editor: 'autocomplete',
source: syncSources
}
],
afterChange: onAfterChange
});
selectCell(0, 0);
keyDownUp('enter');
setTimeout(() => {
onAfterChange.calls.reset();
autocomplete().find('tbody td:eq(1)').simulate('mousedown');
expect(getDataAtCell(0, 0)).toEqual('red');
expect(onAfterChange.calls.count()).toEqual(1);
expect(onAfterChange).toHaveBeenCalledWith([[0, 0, null, 'red']], 'edit', undefined, undefined, undefined, undefined);
done();
}, 200);
});
it('should not affect other cell values after clicking on autocomplete cell (#1021)', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices);
});
handsontable({
columns: [
{},
{},
{
editor: 'autocomplete',
source: syncSources
},
{}
],
data: [
[null, null, 'yellow', null],
[null, null, 'red', null],
[null, null, 'blue', null]
]
});
expect($(getCell(0, 2)).text()).toMatch('yellow');
mouseDoubleClick(getCell(0, 2));
expect($(getCell(1, 2)).text()).toMatch('red');
mouseDoubleClick(getCell(1, 2));
expect($(getCell(2, 2)).text()).toMatch('blue');
mouseDoubleClick(getCell(2, 2));
setTimeout(() => {
expect(getDataAtCol(2)).toEqual(['yellow', 'red', 'blue']);
done();
}, 200);
});
it('should handle editor if cell data is a function', (done) => {
spyOn(Handsontable.editors.AutocompleteEditor.prototype, 'updateChoicesList').and.callThrough();
var updateChoicesList = Handsontable.editors.AutocompleteEditor.prototype.updateChoicesList;
var afterValidateCallback = jasmine.createSpy('afterValidateCallbak');
var hot = handsontable({
data: [
new Model({
id: 1,
name: 'Ted Right',
address: ''
}),
new Model({
id: 2,
name: 'Frank Honest',
address: ''
}),
new Model({
id: 3,
name: 'Joan Well',
address: ''
})],
dataSchema: Model,
colHeaders: ['ID', 'Name', 'Address'],
columns: [
{
data: createAccessorForProperty('id'),
type: 'autocomplete',
source: ['1', '2', '3'],
filter: false,
strict: true
},
{
data: createAccessorForProperty('name')
},
{
data: createAccessorForProperty('address')
}
],
minSpareRows: 1,
afterValidate: afterValidateCallback
});
selectCell(0, 0);
expect(hot.getActiveEditor().isOpened()).toBe(false);
keyDownUp('enter');
setTimeout(() => {
expect(hot.getActiveEditor().isOpened()).toBe(true);
afterValidateCallback.calls.reset();
$(hot.getActiveEditor().htContainer).find('tr:eq(1) td:eq(0)').simulate('mousedown');
}, 200);
setTimeout(() => {
expect(getDataAtCell(0, 0)).toEqual('2');
done();
}, 400);
});
// Input element can not lose the focus while entering new characters. It breaks IME editor functionality for Asian users.
it('should not lose the focus on input element while inserting new characters (#839)', async () => {
let blured = false;
const listener = () => {
blured = true;
};
const hot = handsontable({
data: [
['one', 'two'],
['three', 'four']
],
columns: [
{
type: 'autocomplete',
source: choices,
},
{},
],
});
selectCell(0, 0);
keyDownUp('enter');
hot.getActiveEditor().TEXTAREA.addEventListener('blur', listener);
await sleep(200);
hot.getActiveEditor().TEXTAREA.value = 't';
keyDownUp('t'.charCodeAt(0));
hot.getActiveEditor().TEXTAREA.value = 'te';
keyDownUp('e'.charCodeAt(0));
hot.getActiveEditor().TEXTAREA.value = 'teo';
keyDownUp('o'.charCodeAt(0));
expect(blured).toBeFalsy();
hot.getActiveEditor().TEXTAREA.removeEventListener('blur', listener);
});
it('should not lose the focus from the editor after selecting items from the choice list', async () => {
const hot = handsontable({
data: [
['', 'two'],
['three', 'four']
],
columns: [
{
type: 'autocomplete',
source: ['brown', 'yellow', 'green'],
},
{},
],
});
selectCell(0, 0);
keyDownUp('enter');
await sleep(0);
keyDownUp('arrow_down');
keyDownUp('arrow_down');
keyDownUp('arrow_down');
hot.getActiveEditor().TEXTAREA.value = 'r';
keyDownUp('R'.charCodeAt(0));
await sleep(0);
// Check if ESCAPE key is responsive.
keyDownUp('esc');
expect(hot.isListening()).toBeTruthy();
expect(Handsontable.dom.isVisible(hot.getActiveEditor().htEditor.rootElement)).toBeFalsy();
});
it('should not call the `source` has been selected', () => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process([]); // hardcoded empty result
});
handsontable({
data: [
['one', 'two'],
['three', 'four']
],
columns: [
{
type: 'autocomplete',
source: syncSources,
allowInvalid: false,
strict: true
},
{}
],
cells(row, col) {
var cellProperties = {};
if (row === 0 && col === 0) {
cellProperties.readOnly = true;
}
return cellProperties;
}
});
expect(getCellMeta(0, 0).readOnly).toBe(true);
expect(syncSources).not.toHaveBeenCalled();
selectCell(0, 0);
expect(syncSources).not.toHaveBeenCalled();
expect(getCellMeta(1, 0).readOnly).toBeFalsy();
selectCell(1, 0);
expect(syncSources).not.toHaveBeenCalled();
});
it('should not call the `source` method if cell is read only and the arrow has been clicked', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process([]); // hardcoded empty result
});
handsontable({
data: [
['one', 'two'],
['three', 'four']
],
columns: [
{
type: 'autocomplete',
source: syncSources,
allowInvalid: false,
strict: true
},
{}
],
cells(row, col) {
var cellProperties = {};
if (row === 0 && col === 0) {
cellProperties.readOnly = true;
}
return cellProperties;
}
});
expect(getCellMeta(0, 0).readOnly).toBe(true);
expect(syncSources).not.toHaveBeenCalled();
selectCell(0, 0);
$(getCell(0, 0)).find('.htAutocompleteArrow').simulate('mousedown');
setTimeout(() => {
expect(syncSources).not.toHaveBeenCalled();
syncSources.calls.reset();
expect(getCellMeta(1, 0).readOnly).toBeFalsy();
selectCell(1, 0);
$(getCell(1, 0)).find('.htAutocompleteArrow').simulate('mousedown');
}, 100);
setTimeout(() => {
expect(syncSources).toHaveBeenCalled();
expect(syncSources.calls.count()).toEqual(1);
done();
}, 200);
});
it('should add a scrollbar to the autocomplete dropdown, only if number of displayed choices exceeds 10', function(done) {
var hot = handsontable({
data: [
['', 'two', 'three'],
['four', 'five', 'six']
],
columns: [
{
type: 'autocomplete',
source: choices,
allowInvalid: false,
strict: false
},
{},
{}
]
});
this.$container.css({
height: 600
});
expect(choices.length).toBeGreaterThan(10);
selectCell(0, 0);
$(getCell(0, 0)).find('.htAutocompleteArrow').simulate('mousedown');
var dropdown = hot.getActiveEditor().htContainer;
var dropdownHolder = hot.getActiveEditor().htEditor.view.wt.wtTable.holder;
setTimeout(() => {
expect(dropdownHolder.scrollHeight).toBeGreaterThan(dropdownHolder.clientHeight);
keyDownUp('esc');
hot.getSettings().columns[0].source = hot.getSettings().columns[0].source.slice(0).splice(3);
hot.updateSettings({});
selectCell(0, 0);
$(getCell(0, 0)).find('.htAutocompleteArrow').simulate('mousedown');
}, 30);
setTimeout(() => {
expect(dropdownHolder.scrollHeight > dropdownHolder.clientHeight).toBe(false);
done();
}, 60);
});
it('should not close editor on scrolling', async () => {
const hot = handsontable({
data: [
['', 'two', 'three'],
['four', 'five', 'six']
],
columns: [
{
type: 'autocomplete',
source: choices,
allowInvalid: false,
strict: false
},
{},
{}
]
});
expect(choices.length).toBeGreaterThan(10);
selectCell(0, 0);
$(getCell(0, 0)).find('.htAutocompleteArrow').simulate('mousedown');
$(getCell(0, 0)).find('.htAutocompleteArrow').simulate('mouseup');
const dropdown = hot.getActiveEditor().htContainer;
hot.view.wt.wtOverlays.topOverlay.scrollTo(1);
await sleep(50);
expect($(dropdown).is(':visible')).toBe(true);
selectCell(0, 0);
await sleep(50);
$(getCell(0, 0)).find('.htAutocompleteArrow').simulate('mousedown');
$(getCell(0, 0)).find('.htAutocompleteArrow').simulate('mouseup');
hot.view.wt.wtOverlays.topOverlay.scrollTo(3);
await sleep(50);
expect($(dropdown).is(':visible')).toBe(true);
});
it('should keep textarea caret position, after moving the selection to the suggestion list (pressing down arrow)', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
}
]
});
selectCell(0, 0);
keyDownUp('enter');
var $editorInput = $('.handsontableInput');
$editorInput.val('an');
keyDownUp(65); // a
keyDownUp(78); // n
Handsontable.dom.setCaretPosition($editorInput[0], 1);
setTimeout(() => {
keyDownUp('arrow_down');
expect(Handsontable.dom.getCaretPosition($editorInput[0])).toEqual(1);
keyDownUp('arrow_down');
expect(Handsontable.dom.getCaretPosition($editorInput[0])).toEqual(1);
done();
}, 200);
});
it('should keep textarea selection, after moving the selection to the suggestion list (pressing down arrow)', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
}
]
});
selectCell(0, 0);
keyDownUp('enter');
var $editorInput = $('.handsontableInput');
$editorInput.val('an');
keyDownUp(65); // a
keyDownUp(78); // n
Handsontable.dom.setCaretPosition($editorInput[0], 1, 2);
setTimeout(() => {
keyDownUp('arrow_down');
expect(Handsontable.dom.getCaretPosition($editorInput[0])).toEqual(1);
expect(Handsontable.dom.getSelectionEndPosition($editorInput[0])).toEqual(2);
keyDownUp('arrow_down');
expect(Handsontable.dom.getCaretPosition($editorInput[0])).toEqual(1);
expect(Handsontable.dom.getSelectionEndPosition($editorInput[0])).toEqual(2);
done();
}, 200);
});
it('should jump to the sibling cell, after pressing up key in quick edit mode', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
},
{}
]
});
selectCell(1, 0);
keyDownUp('x'); // trigger quick edit mode
var $editorInput = $('.handsontableInput');
$editorInput.val('an');
keyDownUp(65); // a
keyDownUp(78); // n
setTimeout(() => {
keyDownUp('arrow_up');
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
done();
}, 200);
});
it('should jump to the next cell, after pressing right key in quick edit mode', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.plan = function(query, process) {
process(choices.filter((choice) => choice.indexOf(query) != -1));
};
handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
},
{}
]
});
selectCell(1, 0);
keyDownUp('x'); // trigger quick edit mode
var $editorInput = $('.handsontableInput');
$editorInput.val('an');
keyDownUp(65); // a
keyDownUp(78); // n
setTimeout(() => {
keyDownUp('arrow_right');
expect(getSelected()).toEqual([[1, 1, 1, 1]]);
done();
}, 200);
});
it('should jump to the next cell, after pressing left key in quick edit mode', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
handsontable({
columns: [
{},
{
type: 'autocomplete',
source: syncSources,
strict: false
}
]
});
selectCell(1, 1);
keyDownUp('x'); // trigger quick edit mode
var $editorInput = $('.handsontableInput');
$editorInput.val('an');
keyDownUp(65); // a
keyDownUp(78); // n
// put caret on the end of the text to ensure that editor will be closed after hit left arrow key
Handsontable.dom.setCaretPosition($editorInput[0], 2, 2);
setTimeout(() => {
keyDownUp('arrow_left');
expect(getSelected()).toEqual([[1, 0, 1, 0]]);
done();
}, 200);
});
it('should jump to the next cell, after pressing down key in quick edit mode', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
},
{}
]
});
selectCell(1, 0);
keyDownUp('x'); // trigger quick edit mode
var $editorInput = $('.handsontableInput');
$editorInput.val('an');
keyDownUp(65); // a
keyDownUp(78); // n
setTimeout(() => {
keyDownUp('arrow_down');
expect(getSelected()).toEqual([[1, 0, 1, 0]]);
done();
}, 200);
});
it('should jump to the next cell, after pressing down key in quick edit mode when no matching option list found', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
},
{}
]
});
selectCell(1, 0);
keyDownUp('x'); // trigger quick edit mode
var $editorInput = $('.handsontableInput');
$editorInput.val('anananan');
keyDownUp(65); // a
keyDownUp(78); // n
keyDownUp(65); // a
keyDownUp(78); // n
keyDownUp(65); // a
keyDownUp(78); // n
keyDownUp(65); // a
keyDownUp(78); // n
setTimeout(() => {
keyDownUp('arrow_down');
expect(getSelected()).toEqual([[2, 0, 2, 0]]);
done();
}, 200);
});
it('should not jump to the next cell, after pressing down key in quick edit mode when options list was opened', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
},
{}
]
});
selectCell(1, 0);
keyDownUp('x'); // trigger quick edit mode
var $editorInput = $('.handsontableInput');
$editorInput.val('an');
keyDownUp(65); // a
keyDownUp(78); // n
setTimeout(() => {
keyDownUp('arrow_down');
expect(getSelected()).toEqual([[1, 0, 1, 0]]);
done();
}, 200);
});
it('should select option in opened editor after pressing down key in quick edit mode', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
var hot = handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
},
{}
]
});
selectCell(1, 0);
keyDownUp('x'); // Trigger quick edit mode
setTimeout(() => {
keyDownUp('arrow_down');
expect(hot.getActiveEditor().htEditor.getSelected()).toEqual([[0, 0, 0, 0]]);
keyDownUp('arrow_down');
expect(hot.getActiveEditor().htEditor.getSelected()).toEqual([[1, 0, 1, 0]]);
keyDownUp('arrow_down');
expect(hot.getActiveEditor().htEditor.getSelected()).toEqual([[2, 0, 2, 0]]);
done();
}, 200);
});
it('should select option in opened editor after pressing up key in quick edit mode', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
var hot = handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
},
{}
]
});
selectCell(1, 0);
keyDownUp('x'); // Trigger quick edit mode
setTimeout(() => {
hot.getActiveEditor().htEditor.selectCell(2, 0);
expect(hot.getActiveEditor().htEditor.getSelected()).toEqual([[2, 0, 2, 0]]);
keyDownUp('arrow_up');
expect(hot.getActiveEditor().htEditor.getSelected()).toEqual([[1, 0, 1, 0]]);
keyDownUp('arrow_up');
expect(hot.getActiveEditor().htEditor.getSelected()).toEqual([[0, 0, 0, 0]]);
keyDownUp('arrow_up');
expect(hot.getActiveEditor().htEditor.getSelected()).toEqual([[0, 0, 0, 0]]);
done();
}, 200);
});
it('should not close editor in quick edit mode after pressing down key when last option is selected', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
var hot = handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
},
{}
]
});
selectCell(1, 0);
keyDownUp('x'); // Trigger quick edit mode
setTimeout(() => {
hot.getActiveEditor().htEditor.selectCell(7, 0);
hot.listen();
keyDownUp('arrow_down');
keyDownUp('arrow_down');
keyDownUp('arrow_down');
keyDownUp('arrow_down');
keyDownUp('arrow_down');
expect(hot.getActiveEditor().isOpened()).toBe(true);
done();
}, 200);
});
it('should close editor in quick edit mode after pressing up key when no option is selected', (done) => {
var syncSources = jasmine.createSpy('syncSources');
syncSources.and.callFake((query, process) => {
process(choices.filter((choice) => choice.indexOf(query) != -1));
});
var hot = handsontable({
columns: [
{
type: 'autocomplete',
source: syncSources,
strict: false
},
{}
]
});
selectCell(1, 0);
keyDownUp('x'); // Trigger quick edit mode
setTimeout(() => {
hot.getActiveEditor().htEditor.selectCell(1, 0);
hot.listen();
keyDownUp('arrow_up');
keyDownUp('arrow_up');
keyDownUp('arrow_up');
expect(getSelected()).toEqual([[0, 0, 0, 0]]);
done();
}, 200);
});
});
| 1 | 14,825 | The autocomplete editor shouldn't change its own behavior, I assume that `autoWrapCol` and `autoWrapRow` options should be set to `false` in handsontable editor. | handsontable-handsontable | js |
@@ -266,7 +266,7 @@ func testInstallTunnelFlows(t *testing.T, config *testConfig) {
}
func testInstallServiceFlows(t *testing.T, config *testConfig) {
- err := c.InstallClusterServiceFlows()
+ err := c.InstallClusterServiceFlows(true, true)
if err != nil {
t.Fatalf("Failed to install Openflow entries to skip service CIDR from egress table: %v", err)
} | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"encoding/hex"
"fmt"
"net"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/metrics/testutil"
config1 "github.com/vmware-tanzu/antrea/pkg/agent/config"
"github.com/vmware-tanzu/antrea/pkg/agent/metrics"
ofClient "github.com/vmware-tanzu/antrea/pkg/agent/openflow"
"github.com/vmware-tanzu/antrea/pkg/agent/openflow/cookie"
k8stypes "github.com/vmware-tanzu/antrea/pkg/agent/proxy/types"
"github.com/vmware-tanzu/antrea/pkg/agent/types"
"github.com/vmware-tanzu/antrea/pkg/apis/controlplane/v1beta2"
secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1"
ofconfig "github.com/vmware-tanzu/antrea/pkg/ovs/openflow"
"github.com/vmware-tanzu/antrea/pkg/ovs/ovsconfig"
"github.com/vmware-tanzu/antrea/pkg/ovs/ovsctl"
ofTestUtils "github.com/vmware-tanzu/antrea/test/integration/ovs"
k8sproxy "github.com/vmware-tanzu/antrea/third_party/proxy"
)
var (
br = "br01"
c ofClient.Client
roundInfo = types.RoundInfo{RoundNum: 0, PrevRoundNum: nil}
ovsCtlClient = ovsctl.NewClient(br)
bridgeMgmtAddr = ofconfig.GetMgmtAddress(ovsconfig.DefaultOVSRunDir, br)
)
const (
ingressRuleTable = uint8(90)
ingressDefaultTable = uint8(100)
contrackCommitTable = uint8(105)
priorityNormal = 200
)
type expectTableFlows struct {
tableID uint8
flows []*ofTestUtils.ExpectFlow
}
type testPortConfig struct {
ips []net.IP
mac net.HardwareAddr
ofPort uint32
}
type testLocalPodConfig struct {
name string
*testPortConfig
}
type testPeerConfig struct {
name string
nodeAddress net.IP
subnet net.IPNet
gateway net.IP
}
type testConfig struct {
bridge string
localGateway *testPortConfig
localPods []*testLocalPodConfig
peers []*testPeerConfig
tunnelOFPort uint32
serviceCIDR *net.IPNet
globalMAC net.HardwareAddr
enableIPv6 bool
enableIPv4 bool
}
var (
_, podIPv4CIDR, _ = net.ParseCIDR("192.168.1.0/24")
_, podIPv6CIDR, _ = net.ParseCIDR("fd74:ca9b:172:19::/64")
)
func TestConnectivityFlows(t *testing.T) {
// Initialize ovs metrics (Prometheus) to test them
metrics.InitializeOVSMetrics()
c = ofClient.NewClient(br, bridgeMgmtAddr, true, false)
err := ofTestUtils.PrepareOVSBridge(br)
require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err))
defer func() {
err = c.Disconnect()
assert.Nil(t, err, fmt.Sprintf("Error while disconnecting from OVS bridge: %v", err))
err = ofTestUtils.DeleteOVSBridge(br)
assert.Nil(t, err, fmt.Sprintf("Error while deleting OVS bridge: %v", err))
}()
config := prepareConfiguration()
for _, f := range []func(t *testing.T, config *testConfig){
testInitialize,
testInstallGatewayFlows,
testInstallServiceFlows,
testInstallTunnelFlows,
testInstallNodeFlows,
testInstallPodFlows,
testUninstallPodFlows,
testUninstallNodeFlows,
testExternalFlows,
} {
f(t, config)
}
}
func TestReplayFlowsConnectivityFlows(t *testing.T) {
c = ofClient.NewClient(br, bridgeMgmtAddr, true, false)
err := ofTestUtils.PrepareOVSBridge(br)
require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err))
defer func() {
err = c.Disconnect()
assert.Nil(t, err, fmt.Sprintf("Error while disconnecting from OVS bridge: %v", err))
err = ofTestUtils.DeleteOVSBridge(br)
assert.Nil(t, err, fmt.Sprintf("Error while deleting OVS bridge: %v", err))
}()
config := prepareConfiguration()
for _, f := range []func(t *testing.T, config *testConfig){
testInitialize,
testInstallGatewayFlows,
testInstallServiceFlows,
testInstallTunnelFlows,
testInstallNodeFlows,
testInstallPodFlows,
} {
f(t, config)
}
testReplayFlows(t)
}
func TestReplayFlowsNetworkPolicyFlows(t *testing.T) {
c = ofClient.NewClient(br, bridgeMgmtAddr, true, false)
err := ofTestUtils.PrepareOVSBridge(br)
require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err))
_, err = c.Initialize(roundInfo, &config1.NodeConfig{}, config1.TrafficEncapModeEncap, config1.HostGatewayOFPort)
require.Nil(t, err, "Failed to initialize OFClient")
defer func() {
err = c.Disconnect()
assert.Nil(t, err, fmt.Sprintf("Error while disconnecting from OVS bridge: %v", err))
err = ofTestUtils.DeleteOVSBridge(br)
assert.Nil(t, err, fmt.Sprintf("Error while deleting OVS bridge: %v", err))
}()
ruleID := uint32(100)
fromList := []string{"192.168.1.3", "192.168.1.25", "192.168.2.4"}
toList := []string{"192.168.3.4", "192.168.3.5"}
port2 := intstr.FromInt(8080)
tcpProtocol := v1beta2.ProtocolTCP
defaultAction := secv1alpha1.RuleActionAllow
npPort1 := v1beta2.Service{Protocol: &tcpProtocol, Port: &port2}
toIPList := prepareIPAddresses(toList)
rule := &types.PolicyRule{
Direction: v1beta2.DirectionIn,
From: prepareIPAddresses(fromList),
To: toIPList,
Service: []v1beta2.Service{npPort1},
Action: &defaultAction,
FlowID: ruleID,
TableID: ofClient.IngressRuleTable,
PolicyRef: &v1beta2.NetworkPolicyReference{
Type: v1beta2.K8sNetworkPolicy,
Namespace: "ns1",
Name: "np1",
UID: "uid1",
},
}
err = c.InstallPolicyRuleFlows(rule)
require.Nil(t, err, "Failed to InstallPolicyRuleFlows")
err = c.AddPolicyRuleAddress(ruleID, types.SrcAddress, prepareIPNetAddresses([]string{"192.168.5.0/24", "192.169.1.0/24"}), nil)
require.Nil(t, err, "Failed to AddPolicyRuleAddress")
ofport := int32(100)
err = c.AddPolicyRuleAddress(ruleID, types.DstAddress, []types.Address{ofClient.NewOFPortAddress(ofport)}, nil)
require.Nil(t, err, "Failed to AddPolicyRuleAddress")
testReplayFlows(t)
}
func testExternalFlows(t *testing.T, config *testConfig) {
nodeIP := net.ParseIP("10.10.10.1")
_, localSubnet, _ := net.ParseCIDR("172.16.1.0/24")
if err := c.InstallExternalFlows(nodeIP, *localSubnet); err != nil {
t.Errorf("Failed to install OpenFlow entries to allow Pod to communicate to the external addresses: %v", err)
}
for _, tableFlow := range prepareExternalFlows(nodeIP, localSubnet) {
ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, true, tableFlow.flows)
}
}
func testReplayFlows(t *testing.T) {
var err error
countFlows := func() int {
flowList, err := ofTestUtils.OfctlDumpFlows(ovsCtlClient)
require.Nil(t, err, "Error when dumping flows from OVS bridge")
return len(flowList)
}
count1 := countFlows()
t.Logf("Counted %d flows before deletion & reconciliation", count1)
err = ofTestUtils.OfctlDeleteFlows(ovsCtlClient)
require.Nil(t, err, "Error when deleting flows from OVS bridge")
count2 := countFlows()
assert.Zero(t, count2, "Expected no flows after deletion")
c.ReplayFlows()
count3 := countFlows()
t.Logf("Counted %d flows after reconciliation", count3)
assert.Equal(t, count1, count3, "Expected same number of flows after reconciliation")
}
func testInitialize(t *testing.T, config *testConfig) {
nodeConfig := &config1.NodeConfig{}
if config.enableIPv4 {
nodeConfig.PodIPv4CIDR = podIPv4CIDR
}
if config.enableIPv6 {
nodeConfig.PodIPv6CIDR = podIPv6CIDR
}
if _, err := c.Initialize(roundInfo, nodeConfig, config1.TrafficEncapModeEncap, config1.HostGatewayOFPort); err != nil {
t.Errorf("Failed to initialize openflow client: %v", err)
}
for _, tableFlow := range prepareDefaultFlows(config) {
ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, true, tableFlow.flows)
}
checkOVSFlowMetrics(t, c)
}
func testInstallTunnelFlows(t *testing.T, config *testConfig) {
err := c.InstallDefaultTunnelFlows(config.tunnelOFPort)
if err != nil {
t.Fatalf("Failed to install Openflow entries for tunnel port: %v", err)
}
for _, tableFlow := range prepareTunnelFlows(config.tunnelOFPort, config.globalMAC) {
ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, true, tableFlow.flows)
}
}
func testInstallServiceFlows(t *testing.T, config *testConfig) {
err := c.InstallClusterServiceFlows()
if err != nil {
t.Fatalf("Failed to install Openflow entries to skip service CIDR from egress table: %v", err)
}
for _, tableFlow := range prepareServiceHelperFlows() {
ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, true, tableFlow.flows)
}
}
func testInstallNodeFlows(t *testing.T, config *testConfig) {
for _, node := range config.peers {
peerConfig := map[*net.IPNet]net.IP{
&node.subnet: node.gateway,
}
err := c.InstallNodeFlows(node.name, config.localGateway.mac, peerConfig, node.nodeAddress, config.tunnelOFPort, 0)
if err != nil {
t.Fatalf("Failed to install Openflow entries for node connectivity: %v", err)
}
for _, tableFlow := range prepareNodeFlows(config.tunnelOFPort, node.subnet, node.gateway, node.nodeAddress, config.globalMAC, config.localGateway.mac) {
ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, true, tableFlow.flows)
}
}
}
func testUninstallNodeFlows(t *testing.T, config *testConfig) {
for _, node := range config.peers {
err := c.UninstallNodeFlows(node.name)
if err != nil {
t.Fatalf("Failed to uninstall Openflow entries for node connectivity: %v", err)
}
for _, tableFlow := range prepareNodeFlows(config.tunnelOFPort, node.subnet, node.gateway, node.nodeAddress, config.globalMAC, config.localGateway.mac) {
ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, false, tableFlow.flows)
}
}
}
func testInstallPodFlows(t *testing.T, config *testConfig) {
for _, pod := range config.localPods {
err := c.InstallPodFlows(pod.name, pod.ips, pod.mac, config.localGateway.mac, pod.ofPort)
if err != nil {
t.Fatalf("Failed to install Openflow entries for pod: %v", err)
}
for _, tableFlow := range preparePodFlows(pod.ips, pod.mac, pod.ofPort, config.localGateway.mac, config.globalMAC) {
ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, true, tableFlow.flows)
}
}
}
func testUninstallPodFlows(t *testing.T, config *testConfig) {
for _, pod := range config.localPods {
err := c.UninstallPodFlows(pod.name)
if err != nil {
t.Fatalf("Failed to uninstall Openflow entries for pod: %v", err)
}
for _, tableFlow := range preparePodFlows(pod.ips, pod.mac, pod.ofPort, config.localGateway.mac, config.globalMAC) {
ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, false, tableFlow.flows)
}
}
}
func TestNetworkPolicyFlows(t *testing.T) {
// Initialize ovs metrics (Prometheus) to test them
metrics.InitializeOVSMetrics()
c = ofClient.NewClient(br, bridgeMgmtAddr, true, false)
err := ofTestUtils.PrepareOVSBridge(br)
require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br))
_, err = c.Initialize(roundInfo, &config1.NodeConfig{PodIPv4CIDR: podIPv4CIDR, PodIPv6CIDR: podIPv6CIDR}, config1.TrafficEncapModeEncap, config1.HostGatewayOFPort)
require.Nil(t, err, "Failed to initialize OFClient")
defer func() {
err = c.Disconnect()
assert.Nil(t, err, fmt.Sprintf("Error while disconnecting from OVS bridge: %v", err))
err = ofTestUtils.DeleteOVSBridge(br)
assert.Nil(t, err, fmt.Sprintf("Error while deleting OVS bridge: %v", err))
}()
ruleID := uint32(100)
fromList := []string{"192.168.1.3", "192.168.1.25", "192.168.2.4", "fd12:ab:34:a001::3"}
toList := []string{"192.168.3.4", "192.168.3.5", "fd12:ab:34:a002::4"}
port2 := intstr.FromInt(8080)
tcpProtocol := v1beta2.ProtocolTCP
defaultAction := secv1alpha1.RuleActionAllow
npPort1 := v1beta2.Service{Protocol: &tcpProtocol, Port: &port2}
toIPList := prepareIPAddresses(toList)
rule := &types.PolicyRule{
Direction: v1beta2.DirectionIn,
From: prepareIPAddresses(fromList),
To: toIPList,
Service: []v1beta2.Service{npPort1},
Action: &defaultAction,
FlowID: ruleID,
TableID: ofClient.IngressRuleTable,
PolicyRef: &v1beta2.NetworkPolicyReference{
Type: v1beta2.K8sNetworkPolicy,
Namespace: "ns1",
Name: "np1",
UID: "uid1",
},
}
err = c.InstallPolicyRuleFlows(rule)
require.Nil(t, err, "Failed to InstallPolicyRuleFlows")
checkConjunctionFlows(t, ingressRuleTable, ingressDefaultTable, contrackCommitTable, priorityNormal, ruleID, rule, assert.True)
checkDefaultDropFlows(t, ingressDefaultTable, priorityNormal, types.DstAddress, toIPList, true)
addedFrom := prepareIPNetAddresses([]string{"192.168.5.0/24", "192.169.1.0/24", "fd12:ab:34:a003::/64"})
checkAddAddress(t, ingressRuleTable, priorityNormal, ruleID, addedFrom, types.SrcAddress)
checkDeleteAddress(t, ingressRuleTable, priorityNormal, ruleID, addedFrom, types.SrcAddress)
ofport := int32(100)
err = c.AddPolicyRuleAddress(ruleID, types.DstAddress, []types.Address{ofClient.NewOFPortAddress(ofport)}, nil)
require.Nil(t, err, "Failed to AddPolicyRuleAddress")
// Dump flows.
flowList, err := ofTestUtils.OfctlDumpTableFlows(ovsCtlClient, ingressRuleTable)
require.Nil(t, err, "Failed to dump flows")
conjMatch := fmt.Sprintf("priority=%d,reg1=0x%x", priorityNormal, ofport)
flow := &ofTestUtils.ExpectFlow{MatchStr: conjMatch, ActStr: fmt.Sprintf("conjunction(%d,2/3)", ruleID)}
assert.True(t, ofTestUtils.OfctlFlowMatch(flowList, ingressRuleTable, flow), "Failed to install conjunctive match flow")
// Verify multiple conjunctions share the same match conditions.
ruleID2 := uint32(101)
toList2 := []string{"192.168.3.4", "fd12:ab:34:a002::4"}
toIPList2 := prepareIPAddresses(toList2)
udpProtocol := v1beta2.ProtocolUDP
npPort2 := v1beta2.Service{Protocol: &udpProtocol}
rule2 := &types.PolicyRule{
Direction: v1beta2.DirectionIn,
To: toIPList2,
Service: []v1beta2.Service{npPort2},
Action: &defaultAction,
FlowID: ruleID2,
TableID: ofClient.IngressRuleTable,
PolicyRef: &v1beta2.NetworkPolicyReference{
Type: v1beta2.K8sNetworkPolicy,
Namespace: "ns1",
Name: "np1",
UID: "uid1",
},
}
err = c.InstallPolicyRuleFlows(rule2)
require.Nil(t, err, "Failed to InstallPolicyRuleFlows")
// Dump flows
flowList, err = ofTestUtils.OfctlDumpTableFlows(ovsCtlClient, ingressRuleTable)
require.Nil(t, err, "Failed to dump flows")
for _, addr := range toIPList2 {
_, ipProto := getIPProtoStr(addr)
conjMatch = fmt.Sprintf("priority=%d,%s,%s=%s", priorityNormal, ipProto, addr.GetMatchKey(types.DstAddress).GetKeyString(), addr.GetMatchValue())
flow1 := &ofTestUtils.ExpectFlow{MatchStr: conjMatch, ActStr: fmt.Sprintf("conjunction(%d,2/3),conjunction(%d,1/2)", ruleID, ruleID2)}
flow2 := &ofTestUtils.ExpectFlow{MatchStr: conjMatch, ActStr: fmt.Sprintf("conjunction(%d,1/2),conjunction(%d,2/3)", ruleID2, ruleID)}
if !ofTestUtils.OfctlFlowMatch(flowList, ingressRuleTable, flow1) && !ofTestUtils.OfctlFlowMatch(flowList, ingressRuleTable, flow2) {
t.Errorf("Failed to install conjunctive match flow")
}
}
checkOVSFlowMetrics(t, c)
_, err = c.UninstallPolicyRuleFlows(ruleID2)
require.Nil(t, err, "Failed to InstallPolicyRuleFlows")
checkDefaultDropFlows(t, ingressDefaultTable, priorityNormal, types.DstAddress, toIPList2, true)
_, err = c.UninstallPolicyRuleFlows(ruleID)
require.Nil(t, err, "Failed to DeletePolicyRuleService")
checkConjunctionFlows(t, ingressRuleTable, ingressDefaultTable, contrackCommitTable, priorityNormal, ruleID, rule, assert.False)
checkDefaultDropFlows(t, ingressDefaultTable, priorityNormal, types.DstAddress, toIPList, false)
checkOVSFlowMetrics(t, c)
}
func TestIPv6ConnectivityFlows(t *testing.T) {
// Initialize ovs metrics (Prometheus) to test them
metrics.InitializeOVSMetrics()
c = ofClient.NewClient(br, bridgeMgmtAddr, true, false)
err := ofTestUtils.PrepareOVSBridge(br)
require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err))
defer func() {
err = c.Disconnect()
assert.Nil(t, err, fmt.Sprintf("Error while disconnecting from OVS bridge: %v", err))
err = ofTestUtils.DeleteOVSBridge(br)
assert.Nil(t, err, fmt.Sprintf("Error while deleting OVS bridge: %v", err))
}()
config := prepareIPv6Configuration()
for _, f := range []func(t *testing.T, config *testConfig){
testInitialize,
testInstallNodeFlows,
testInstallPodFlows,
testInstallGatewayFlows,
testUninstallPodFlows,
testUninstallNodeFlows,
} {
f(t, config)
}
}
type svcConfig struct {
ip net.IP
port uint16
protocol ofconfig.Protocol
withSessionAffinity bool
}
func TestProxyServiceFlows(t *testing.T) {
c = ofClient.NewClient(br, bridgeMgmtAddr, true, false)
err := ofTestUtils.PrepareOVSBridge(br)
require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br))
_, err = c.Initialize(roundInfo, &config1.NodeConfig{}, config1.TrafficEncapModeEncap, config1.HostGatewayOFPort)
require.Nil(t, err, "Failed to initialize OFClient")
defer func() {
err = c.Disconnect()
assert.Nil(t, err, fmt.Sprintf("Error while disconnecting from OVS bridge: %v", err))
err = ofTestUtils.DeleteOVSBridge(br)
assert.Nil(t, err, fmt.Sprintf("Error while deleting OVS bridge: %v", err))
}()
endpoints := []k8sproxy.Endpoint{
k8stypes.NewEndpointInfo(&k8sproxy.BaseEndpointInfo{
Endpoint: net.JoinHostPort("10.20.0.11", "8081"),
IsLocal: true,
}),
k8stypes.NewEndpointInfo(&k8sproxy.BaseEndpointInfo{
Endpoint: net.JoinHostPort("10.20.1.11", "8081"),
IsLocal: false,
}),
}
stickyMaxAgeSeconds := uint16(30)
tcs := []struct {
svc svcConfig
gid uint32
endpoints []k8sproxy.Endpoint
stickyAge uint16
}{
{
svc: svcConfig{
protocol: ofconfig.ProtocolTCP,
ip: net.ParseIP("10.20.30.41"),
port: uint16(8000),
},
gid: 2,
endpoints: endpoints,
stickyAge: stickyMaxAgeSeconds,
},
{
svc: svcConfig{
protocol: ofconfig.ProtocolUDP,
ip: net.ParseIP("10.20.30.42"),
port: uint16(8000),
},
gid: 3,
endpoints: endpoints,
stickyAge: stickyMaxAgeSeconds,
},
{
svc: svcConfig{
protocol: ofconfig.ProtocolSCTP,
ip: net.ParseIP("10.20.30.43"),
port: uint16(8000),
},
gid: 4,
endpoints: endpoints,
stickyAge: stickyMaxAgeSeconds,
},
}
for _, tc := range tcs {
groupID := ofconfig.GroupIDType(tc.gid)
expTableFlows, expGroupBuckets := expectedProxyServiceGroupAndFlows(tc.gid, tc.svc, tc.endpoints, tc.stickyAge)
installServiceFlows(t, tc.gid, tc.svc, tc.endpoints, tc.stickyAge)
for _, tableFlow := range expTableFlows {
ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, true, tableFlow.flows)
}
ofTestUtils.CheckGroupExists(t, ovsCtlClient, groupID, "select", expGroupBuckets, true)
uninstallServiceFlowsFunc(t, tc.gid, tc.svc, tc.endpoints)
for _, tableFlow := range expTableFlows {
ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, false, tableFlow.flows)
}
ofTestUtils.CheckGroupExists(t, ovsCtlClient, groupID, "select", expGroupBuckets, false)
}
}
func installServiceFlows(t *testing.T, gid uint32, svc svcConfig, endpointList []k8sproxy.Endpoint, stickyMaxAgeSeconds uint16) {
groupID := ofconfig.GroupIDType(gid)
err := c.InstallEndpointFlows(svc.protocol, endpointList)
assert.NoError(t, err, "no error should return when installing flows for Endpoints")
err = c.InstallServiceGroup(groupID, svc.withSessionAffinity, endpointList)
assert.NoError(t, err, "no error should return when installing groups for Service")
err = c.InstallServiceFlows(groupID, svc.ip, svc.port, svc.protocol, stickyMaxAgeSeconds)
assert.NoError(t, err, "no error should return when installing flows for Service")
}
func uninstallServiceFlowsFunc(t *testing.T, gid uint32, svc svcConfig, endpointList []k8sproxy.Endpoint) {
groupID := ofconfig.GroupIDType(gid)
err := c.UninstallServiceFlows(svc.ip, svc.port, svc.protocol)
assert.Nil(t, err)
err = c.UninstallServiceGroup(groupID)
assert.Nil(t, err)
for _, ep := range endpointList {
err := c.UninstallEndpointFlows(svc.protocol, ep)
assert.Nil(t, err)
}
}
func expectedProxyServiceGroupAndFlows(gid uint32, svc svcConfig, endpointList []k8sproxy.Endpoint, stickyAge uint16) (tableFlows []expectTableFlows, groupBuckets []string) {
nw_proto := 6
learnProtoField := "NXM_OF_TCP_DST[]"
if svc.protocol == ofconfig.ProtocolUDP {
nw_proto = 17
learnProtoField = "NXM_OF_UDP_DST[]"
} else if svc.protocol == ofconfig.ProtocolSCTP {
nw_proto = 132
learnProtoField = "OXM_OF_SCTP_DST[]"
}
cookieAllocator := cookie.NewAllocator(roundInfo.RoundNum)
svcFlows := expectTableFlows{tableID: 41, flows: []*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,%s,reg4=0x10000/0x70000,nw_dst=%s,tp_dst=%d", string(svc.protocol), svc.ip.String(), svc.port),
ActStr: fmt.Sprintf("group:%d", gid),
},
{
MatchStr: fmt.Sprintf("priority=190,%s,reg4=0x30000/0x70000,nw_dst=%s,tp_dst=%d", string(svc.protocol), svc.ip.String(), svc.port),
ActStr: fmt.Sprintf("learn(table=40,idle_timeout=%d,priority=200,delete_learned,cookie=0x%x,eth_type=0x800,nw_proto=%d,%s,NXM_OF_IP_DST[],NXM_OF_IP_SRC[],load:NXM_NX_REG3[]->NXM_NX_REG3[],load:NXM_NX_REG4[0..15]->NXM_NX_REG4[0..15],load:0x2->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG0[19]),load:0x2->NXM_NX_REG4[16..18],goto_table:42", stickyAge, cookieAllocator.RequestWithObjectID(4, gid).Raw(), nw_proto, learnProtoField),
},
}}
epDNATFlows := expectTableFlows{tableID: 42, flows: []*ofTestUtils.ExpectFlow{}}
hairpinFlows := expectTableFlows{tableID: 106, flows: []*ofTestUtils.ExpectFlow{}}
groupBuckets = make([]string, 0)
for _, ep := range endpointList {
epIP := ipToHexString(net.ParseIP(ep.IP()))
epPort, _ := ep.Port()
bucket := fmt.Sprintf("weight:100,actions=load:%s->NXM_NX_REG3[],load:0x%x->NXM_NX_REG4[0..15],load:0x2->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG0[19],resubmit(,42)", epIP, epPort)
groupBuckets = append(groupBuckets, bucket)
unionVal := (0b010 << 16) + uint32(epPort)
epDNATFlows.flows = append(epDNATFlows.flows, &ofTestUtils.ExpectFlow{
MatchStr: fmt.Sprintf("priority=200,%s,reg3=%s,reg4=0x%x/0x7ffff", string(svc.protocol), epIP, unionVal),
ActStr: fmt.Sprintf("ct(commit,table=50,zone=65520,nat(dst=%s:%d),exec(load:0x21->NXM_NX_CT_MARK[])", ep.IP(), epPort),
})
if ep.GetIsLocal() {
hairpinFlows.flows = append(hairpinFlows.flows, &ofTestUtils.ExpectFlow{
MatchStr: fmt.Sprintf("priority=200,ip,nw_src=%s,nw_dst=%s", ep.IP(), ep.IP()),
ActStr: "set_field:169.254.169.252->ip_src,load:0x1->NXM_NX_REG0[18],goto_table:110",
})
}
}
tableFlows = []expectTableFlows{svcFlows, epDNATFlows, hairpinFlows}
return
}
func ipToHexString(ip net.IP) string {
ipBytes := ip
if ip.To4() != nil {
ipBytes = []byte(ip)[12:16]
}
ipStr := hex.EncodeToString(ipBytes)
// Trim "0" at the beginning of the string to be compatible with OVS printed values.
ipStr = "0x" + strings.TrimLeft(ipStr, "0")
return ipStr
}
func checkDefaultDropFlows(t *testing.T, table uint8, priority int, addrType types.AddressType, addresses []types.Address, add bool) {
// dump flows
flowList, err := ofTestUtils.OfctlDumpTableFlows(ovsCtlClient, table)
assert.Nil(t, err, fmt.Sprintf("Failed to dump flows: %v", err))
for _, addr := range addresses {
_, ipProto := getIPProtoStr(addr)
conjMatch := fmt.Sprintf("priority=%d,%s,%s=%s", priority, ipProto, addr.GetMatchKey(addrType).GetKeyString(), addr.GetMatchValue())
flow := &ofTestUtils.ExpectFlow{MatchStr: conjMatch, ActStr: "drop"}
if add {
assert.True(t, ofTestUtils.OfctlFlowMatch(flowList, table, flow), "Failed to install conjunctive match flow")
} else {
assert.False(t, ofTestUtils.OfctlFlowMatch(flowList, table, flow), "Failed to uninstall conjunctive match flow")
}
}
}
func checkAddAddress(t *testing.T, ruleTable uint8, priority int, ruleID uint32, addedAddress []types.Address, addrType types.AddressType) {
err := c.AddPolicyRuleAddress(ruleID, addrType, addedAddress, nil)
require.Nil(t, err, "Failed to AddPolicyRuleAddress")
// dump flows
flowList, err := ofTestUtils.OfctlDumpTableFlows(ovsCtlClient, ruleTable)
require.Nil(t, err, "Failed to dump flows")
action := fmt.Sprintf("conjunction(%d,1/3)", ruleID)
if addrType == types.DstAddress {
action = fmt.Sprintf("conjunction(%d,2/3)", ruleID)
}
for _, addr := range addedAddress {
_, ipProto := getIPProtoStr(addr)
conjMatch := fmt.Sprintf("priority=%d,%s,%s=%s", priority, ipProto, addr.GetMatchKey(addrType).GetKeyString(), addr.GetMatchValue())
flow := &ofTestUtils.ExpectFlow{MatchStr: conjMatch, ActStr: action}
assert.True(t, ofTestUtils.OfctlFlowMatch(flowList, ruleTable, flow), "Failed to install conjunctive match flow")
}
tableStatus := c.GetFlowTableStatus()
for _, tableStatus := range tableStatus {
if tableStatus.ID == uint(ruleTable) {
assert.Equal(t, tableStatus.FlowCount, uint(len(flowList)),
fmt.Sprintf("Cached table status in %d is incorrect, expect: %d, actual %d", tableStatus.ID, tableStatus.FlowCount, len(flowList)))
}
}
}
func checkDeleteAddress(t *testing.T, ruleTable uint8, priority int, ruleID uint32, addedAddress []types.Address, addrType types.AddressType) {
err := c.DeletePolicyRuleAddress(ruleID, addrType, addedAddress, nil)
require.Nil(t, err, "Failed to AddPolicyRuleAddress")
flowList, err := ofTestUtils.OfctlDumpTableFlows(ovsCtlClient, ruleTable)
require.Nil(t, err, "Failed to dump flows")
action := fmt.Sprintf("conjunction(%d,1/3)", ruleID)
if addrType == types.DstAddress {
action = fmt.Sprintf("conjunction(%d,2/3)", ruleID)
}
for _, addr := range addedAddress {
_, ipProto := getIPProtoStr(addr)
conjMatch := fmt.Sprintf("priority=%d,%s,%s=%s", priority, ipProto, addr.GetMatchKey(addrType).GetKeyString(), addr.GetMatchValue())
flow := &ofTestUtils.ExpectFlow{MatchStr: conjMatch, ActStr: action}
assert.False(t, ofTestUtils.OfctlFlowMatch(flowList, ruleTable, flow), "Failed to install conjunctive match flow")
}
tableStatus := c.GetFlowTableStatus()
for _, tableStatus := range tableStatus {
if tableStatus.ID == uint(ruleTable) {
assert.Equal(t, tableStatus.FlowCount, uint(len(flowList)),
fmt.Sprintf("Cached table status in %d is incorrect, expect: %d, actual %d", tableStatus.ID, tableStatus.FlowCount, len(flowList)))
}
}
}
func checkConjunctionFlows(t *testing.T, ruleTable uint8, dropTable uint8, allowTable uint8, priority int, ruleID uint32, rule *types.PolicyRule, testFunc func(t assert.TestingT, value bool, msgAndArgs ...interface{}) bool) {
flowList, err := ofTestUtils.OfctlDumpTableFlows(ovsCtlClient, ruleTable)
require.Nil(t, err, "Failed to dump flows")
conjunctionActionMatch := fmt.Sprintf("priority=%d,conj_id=%d,ip", priority-10, ruleID)
conjReg := 6
nextTable := ofClient.IngressMetricTable
if ruleTable == uint8(ofClient.EgressRuleTable) {
nextTable = ofClient.EgressMetricTable
}
flow := &ofTestUtils.ExpectFlow{MatchStr: conjunctionActionMatch, ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG%d[],ct(commit,table=%d,zone=65520,exec(load:0x%x->NXM_NX_CT_LABEL[0..31])", ruleID, conjReg, nextTable, ruleID)}
testFunc(t, ofTestUtils.OfctlFlowMatch(flowList, ruleTable, flow), "Failed to update conjunction action flow")
useIPv4 := false
useIPv6 := false
for _, addr := range rule.From {
isIPv6, ipProto := getIPProtoStr(addr)
if isIPv6 && !useIPv6 {
useIPv6 = true
} else if !isIPv6 && !useIPv4 {
useIPv4 = true
}
conjMatch := fmt.Sprintf("priority=%d,%s,%s=%s", priority, ipProto, addr.GetMatchKey(types.SrcAddress).GetKeyString(), addr.GetMatchValue())
flow := &ofTestUtils.ExpectFlow{MatchStr: conjMatch, ActStr: fmt.Sprintf("conjunction(%d,1/3)", ruleID)}
testFunc(t, ofTestUtils.OfctlFlowMatch(flowList, ruleTable, flow), "Failed to install conjunctive match flow for clause1")
}
for _, addr := range rule.To {
isIPv6, ipProto := getIPProtoStr(addr)
if isIPv6 && !useIPv6 {
useIPv6 = true
} else if !isIPv6 && !useIPv4 {
useIPv4 = true
}
conjMatch := fmt.Sprintf("priority=%d,%s,%s=%s", priority, ipProto, addr.GetMatchKey(types.DstAddress).GetKeyString(), addr.GetMatchValue())
flow := &ofTestUtils.ExpectFlow{MatchStr: conjMatch, ActStr: fmt.Sprintf("conjunction(%d,2/3)", ruleID)}
testFunc(t, ofTestUtils.OfctlFlowMatch(flowList, ruleTable, flow), "Failed to install conjunctive match flow for clause2")
}
for _, service := range rule.Service {
if useIPv4 {
conjMatch1 := fmt.Sprintf("priority=%d,%s,tp_dst=%d", priority, strings.ToLower(string(*service.Protocol)), service.Port.IntVal)
flow := &ofTestUtils.ExpectFlow{MatchStr: conjMatch1, ActStr: fmt.Sprintf("conjunction(%d,3/3)", ruleID)}
testFunc(t, ofTestUtils.OfctlFlowMatch(flowList, ruleTable, flow), "Failed to install conjunctive match flow for clause3")
}
if useIPv6 {
conjMatch1 := fmt.Sprintf("priority=%d,%s6,tp_dst=%d", priority, strings.ToLower(string(*service.Protocol)), service.Port.IntVal)
flow := &ofTestUtils.ExpectFlow{MatchStr: conjMatch1, ActStr: fmt.Sprintf("conjunction(%d,3/3)", ruleID)}
testFunc(t, ofTestUtils.OfctlFlowMatch(flowList, ruleTable, flow), "Failed to install conjunctive match flow for clause3")
}
}
tablesStatus := c.GetFlowTableStatus()
for _, tableStatus := range tablesStatus {
if tableStatus.ID == uint(ruleTable) {
assert.Equal(t, tableStatus.FlowCount, uint(len(flowList)),
fmt.Sprintf("Cached table status in %d is incorrect, expect: %d, actual %d", tableStatus.ID, tableStatus.FlowCount, len(flowList)))
}
}
}
func getIPProtoStr(addr types.Address) (bool, string) {
var addrIP net.IP
switch v := addr.GetValue().(type) {
case net.IP:
addrIP = v
case net.IPNet:
addrIP = v.IP
}
if addrIP.To4() != nil {
return false, "ip"
} else {
return true, "ipv6"
}
}
func checkOVSFlowMetrics(t *testing.T, client ofClient.Client) {
expectedFlowCount := `
# HELP antrea_agent_ovs_flow_count [STABLE] Flow count for each OVS flow table. The TableID is used as a label.
# TYPE antrea_agent_ovs_flow_count gauge
`
tableStatus := client.GetFlowTableStatus()
totalFlowCount := 0
for _, table := range tableStatus {
expectedFlowCount = expectedFlowCount + fmt.Sprintf("antrea_agent_ovs_flow_count{table_id=\"%d\"} %d\n", table.ID, table.FlowCount)
totalFlowCount = totalFlowCount + int(table.FlowCount)
}
expectedTotalFlowCount := `
# HELP antrea_agent_ovs_total_flow_count [STABLE] Total flow count of all OVS flow tables.
# TYPE antrea_agent_ovs_total_flow_count gauge
`
expectedTotalFlowCount = expectedTotalFlowCount + fmt.Sprintf("antrea_agent_ovs_total_flow_count %d\n", totalFlowCount)
assert.Equal(t, nil, testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expectedTotalFlowCount), "antrea_agent_ovs_total_flow_count"))
assert.Equal(t, nil, testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expectedFlowCount), "antrea_agent_ovs_flow_count"))
}
func testInstallGatewayFlows(t *testing.T, config *testConfig) {
err := c.InstallGatewayFlows(config.localGateway.ips, config.localGateway.mac, config.localGateway.ofPort)
if err != nil {
t.Fatalf("Failed to install Openflow entries for gateway: %v", err)
}
for _, tableFlow := range prepareGatewayFlows(config.localGateway.ips, config.localGateway.mac, config.localGateway.ofPort, config.globalMAC) {
ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, true, tableFlow.flows)
}
}
func prepareConfiguration() *testConfig {
podMAC, _ := net.ParseMAC("aa:aa:aa:aa:aa:13")
gwMAC, _ := net.ParseMAC("aa:aa:aa:aa:aa:11")
podCfg := &testLocalPodConfig{
name: "container-1",
testPortConfig: &testPortConfig{
ips: []net.IP{net.ParseIP("192.168.1.3")},
mac: podMAC,
ofPort: uint32(3),
},
}
gwCfg := &testPortConfig{
ips: []net.IP{net.ParseIP("192.168.1.1")},
mac: gwMAC,
ofPort: uint32(1),
}
_, serviceCIDR, _ := net.ParseCIDR("172.16.0.0/16")
_, peerSubnet, _ := net.ParseCIDR("192.168.2.0/24")
peerNode := &testPeerConfig{
name: "n2",
nodeAddress: net.ParseIP("10.1.1.2"),
subnet: *peerSubnet,
gateway: net.ParseIP("192.168.2.1"),
}
vMAC, _ := net.ParseMAC("aa:bb:cc:dd:ee:ff")
return &testConfig{
bridge: br,
localGateway: gwCfg,
localPods: []*testLocalPodConfig{podCfg},
peers: []*testPeerConfig{peerNode},
tunnelOFPort: uint32(2),
serviceCIDR: serviceCIDR,
globalMAC: vMAC,
enableIPv4: true,
enableIPv6: false,
}
}
func prepareIPv6Configuration() *testConfig {
podMAC, _ := net.ParseMAC("aa:aa:aa:aa:aa:13")
gwMAC, _ := net.ParseMAC("aa:aa:aa:aa:aa:11")
podCfg := &testLocalPodConfig{
name: "container-1",
testPortConfig: &testPortConfig{
ips: []net.IP{net.ParseIP("fd74:ca9b:172:19::3")},
mac: podMAC,
ofPort: uint32(3),
},
}
gwCfg := &testPortConfig{
ips: []net.IP{net.ParseIP("fd74:ca9b:172:19::1")},
mac: gwMAC,
ofPort: uint32(1),
}
_, serviceCIDR, _ := net.ParseCIDR("ee74:ca9b:2345:a33::/64")
_, peerSubnet, _ := net.ParseCIDR("fd74:ca9b:172:20::/64")
peerNode := &testPeerConfig{
name: "n2",
nodeAddress: net.ParseIP("10.1.1.2"),
subnet: *peerSubnet,
gateway: net.ParseIP("fd74:ca9b:172:20::1"),
}
vMAC, _ := net.ParseMAC("aa:bb:cc:dd:ee:ff")
return &testConfig{
bridge: br,
localGateway: gwCfg,
localPods: []*testLocalPodConfig{podCfg},
peers: []*testPeerConfig{peerNode},
tunnelOFPort: uint32(2),
serviceCIDR: serviceCIDR,
globalMAC: vMAC,
enableIPv4: false,
enableIPv6: true,
}
}
func preparePodFlows(podIPs []net.IP, podMAC net.HardwareAddr, podOFPort uint32, gwMAC, vMAC net.HardwareAddr) []expectTableFlows {
flows := []expectTableFlows{
{
uint8(0),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=190,in_port=%d", podOFPort),
ActStr: "load:0x2->NXM_NX_REG0[0..15],goto_table:10",
},
},
},
{
uint8(80),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,dl_dst=%s", podMAC.String()),
ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[16],goto_table:90", podOFPort),
},
},
},
}
for _, podIP := range podIPs {
var ipProto, nwSrcField, nwDstField string
var nextTableForSpoofguard uint8
if podIP.To4() != nil {
ipProto = "ip"
nwSrcField = "nw_src"
nwDstField = "nw_dst"
flows = append(flows,
expectTableFlows{
uint8(10),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,arp,in_port=%d,arp_spa=%s,arp_sha=%s", podOFPort, podIP.String(), podMAC.String()),
ActStr: "goto_table:20",
},
},
})
nextTableForSpoofguard = 29
} else {
ipProto = "ipv6"
nwSrcField = "ipv6_src"
nwDstField = "ipv6_dst"
nextTableForSpoofguard = 21
}
flows = append(flows,
expectTableFlows{
uint8(10),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,%s,in_port=%d,dl_src=%s,%s=%s", ipProto, podOFPort, podMAC.String(), nwSrcField, podIP.String()),
ActStr: fmt.Sprintf("goto_table:%d", nextTableForSpoofguard),
},
},
},
expectTableFlows{
uint8(70),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,%s,reg0=0x80000/0x80000,%s=%s", ipProto, nwDstField, podIP.String()),
ActStr: fmt.Sprintf("set_field:%s->eth_src,set_field:%s->eth_dst,dec_ttl,goto_table:80", gwMAC.String(), podMAC.String()),
},
},
},
)
}
return flows
}
func prepareGatewayFlows(gwIPs []net.IP, gwMAC net.HardwareAddr, gwOFPort uint32, vMAC net.HardwareAddr) []expectTableFlows {
flows := []expectTableFlows{
{
uint8(0),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,in_port=%d", gwOFPort),
ActStr: "load:0x1->NXM_NX_REG0[0..15],goto_table:10",
},
},
},
{
uint8(80),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,dl_dst=%s", gwMAC.String()),
ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[16],goto_table:90", gwOFPort),
},
},
},
}
for _, gwIP := range gwIPs {
var ipProtoStr, nwSrcStr, nwDstStr string
if gwIP.To4() != nil {
ipProtoStr = "ip"
nwSrcStr = "nw_src"
nwDstStr = "nw_dst"
flows = append(flows,
expectTableFlows{
uint8(10),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,arp,in_port=%d,arp_spa=%s,arp_sha=%s", gwOFPort, gwIP, gwMAC),
ActStr: "goto_table:20",
},
{
MatchStr: fmt.Sprintf("priority=200,ip,in_port=%d", gwOFPort),
ActStr: "goto_table:29",
},
},
})
} else {
ipProtoStr = "ipv6"
nwSrcStr = "ipv6_src"
nwDstStr = "ipv6_dst"
}
flows = append(flows,
expectTableFlows{
uint8(70),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,%s,dl_dst=%s,%s=%s", ipProtoStr, vMAC.String(), nwDstStr, gwIP.String()),
ActStr: fmt.Sprintf("set_field:%s->eth_dst,goto_table:80", gwMAC.String()),
},
},
},
expectTableFlows{
tableID: uint8(90),
flows: []*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=210,%s,%s=%s", ipProtoStr, nwSrcStr, gwIP.String()),
ActStr: "goto_table:105",
},
},
},
expectTableFlows{
uint8(31),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,ct_state=-new+trk,ct_mark=0x20,%s", ipProtoStr),
ActStr: fmt.Sprintf("load:0x%s->NXM_OF_ETH_DST[],goto_table:42", strings.Replace(gwMAC.String(), ":", "", -1)),
},
},
},
)
}
return flows
}
func prepareTunnelFlows(tunnelPort uint32, vMAC net.HardwareAddr) []expectTableFlows {
return []expectTableFlows{
{
uint8(0),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,in_port=%d", tunnelPort),
ActStr: "load:0->NXM_NX_REG0[0..15],load:0x1->NXM_NX_REG0[19],goto_table:30",
},
},
},
}
}
func prepareNodeFlows(tunnelPort uint32, peerSubnet net.IPNet, peerGwIP, peerNodeIP net.IP, vMAC, localGwMAC net.HardwareAddr) []expectTableFlows {
var expFlows []expectTableFlows
var ipProtoStr, nwDstFieldName string
if peerGwIP.To4() != nil {
ipProtoStr = "ip"
nwDstFieldName = "nw_dst"
expFlows = append(expFlows, expectTableFlows{
uint8(20),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,arp,arp_tpa=%s,arp_op=1", peerGwIP.String()),
ActStr: fmt.Sprintf("move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],set_field:%s->eth_src,load:0x2->NXM_OF_ARP_OP[],move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],set_field:%s->arp_sha,move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],set_field:%s->arp_spa,IN_PORT", vMAC.String(), vMAC.String(), peerGwIP.String()),
},
},
})
} else {
ipProtoStr = "ipv6"
nwDstFieldName = "ipv6_dst"
}
expFlows = append(expFlows, expectTableFlows{
uint8(70),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,%s,%s=%s", ipProtoStr, nwDstFieldName, peerSubnet.String()),
ActStr: fmt.Sprintf("dec_ttl,set_field:%s->eth_src,set_field:%s->eth_dst,load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[16],set_field:%s->tun_dst,goto_table:105", localGwMAC.String(), vMAC.String(), tunnelPort, peerNodeIP.String())},
},
})
return expFlows
}
func prepareServiceHelperFlows() []expectTableFlows {
return []expectTableFlows{
{
uint8(40),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprint("priority=0"),
ActStr: fmt.Sprint("load:0x1->NXM_NX_REG4[16..18]"),
},
},
},
}
}
func prepareDefaultFlows(config *testConfig) []expectTableFlows {
table31Flows := expectTableFlows{
tableID: 31,
flows: []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "resubmit(,40),resubmit(,41)"}},
}
table105Flows := expectTableFlows{
tableID: 105,
flows: []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:106"}},
}
if config.enableIPv4 {
table31Flows.flows = append(table31Flows.flows,
&ofTestUtils.ExpectFlow{MatchStr: "priority=210,ct_state=-new+trk,ct_mark=0x20,ip,reg0=0x1/0xffff", ActStr: "goto_table:42"},
&ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+inv+trk,ip", ActStr: "drop"},
)
table105Flows.flows = append(table105Flows.flows,
&ofTestUtils.ExpectFlow{MatchStr: "priority=200,ct_state=+new+trk,ip,reg0=0x1/0xffff", ActStr: "ct(commit,table=106,zone=65520,exec(load:0x20->NXM_NX_CT_MARK[])"},
&ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+new+trk,ip", ActStr: "ct(commit,table=106,zone=65520)"},
)
}
if config.enableIPv6 {
table31Flows.flows = append(table31Flows.flows,
&ofTestUtils.ExpectFlow{MatchStr: "priority=210,ct_state=-new+trk,ct_mark=0x20,ipv6,reg0=0x1/0xffff", ActStr: "goto_table:42"},
&ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+inv+trk,ipv6", ActStr: "drop"},
)
table105Flows.flows = append(table105Flows.flows,
&ofTestUtils.ExpectFlow{MatchStr: "priority=200,ct_state=+new+trk,ipv6,reg0=0x1/0xffff", ActStr: "ct(commit,table=106,zone=65510,exec(load:0x20->NXM_NX_CT_MARK[])"},
&ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+new+trk,ipv6", ActStr: "ct(commit,table=106,zone=65510)"},
)
}
return []expectTableFlows{
table31Flows, table105Flows,
{
uint8(0),
[]*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "drop"}},
},
{
uint8(10),
[]*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "drop"}},
},
{
uint8(20),
[]*ofTestUtils.ExpectFlow{
{MatchStr: "priority=190,arp", ActStr: "NORMAL"},
{MatchStr: "priority=0", ActStr: "drop"},
},
},
{
uint8(30),
[]*ofTestUtils.ExpectFlow{
{MatchStr: "priority=200,ip", ActStr: "ct(table=31,zone=65520,nat)"},
},
},
{
uint8(42),
[]*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:50"}},
},
{
uint8(50),
[]*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:60"}},
},
{
uint8(60),
[]*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:61"}},
},
{
uint8(61),
[]*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:70"}},
},
{
uint8(70),
[]*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:80"}},
},
{
uint8(80),
[]*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:90"}},
},
{
uint8(90),
[]*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:100"}},
},
{
uint8(100),
[]*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:101"}},
},
{
uint8(101),
[]*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:105"}},
},
{
uint8(110),
[]*ofTestUtils.ExpectFlow{
{MatchStr: "priority=200,ip,reg0=0x10000/0x10000", ActStr: "output:NXM_NX_REG1[]"},
},
},
}
}
func prepareIPAddresses(addresses []string) []types.Address {
var ipAddresses = make([]types.Address, 0)
for _, addr := range addresses {
ip := net.ParseIP(addr)
ipAddresses = append(ipAddresses, ofClient.NewIPAddress(ip))
}
return ipAddresses
}
func prepareIPNetAddresses(addresses []string) []types.Address {
var ipAddresses = make([]types.Address, 0)
for _, addr := range addresses {
_, ipNet, _ := net.ParseCIDR(addr)
ipAddresses = append(ipAddresses, ofClient.NewIPNetAddress(*ipNet))
}
return ipAddresses
}
func prepareExternalFlows(nodeIP net.IP, localSubnet *net.IPNet) []expectTableFlows {
return []expectTableFlows{
{
uint8(0),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=210,ip,in_port=LOCAL,nw_dst=%s", localSubnet.String()),
ActStr: "load:0x1->NXM_NX_REG0[19],goto_table:30",
},
},
},
{
uint8(5),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: fmt.Sprintf("priority=200,ip"),
ActStr: "load:0x4->NXM_NX_REG0[0..15],goto_table:30",
},
},
},
{
uint8(30),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: "priority=200,ip", ActStr: "ct(table=31,zone=65520,nat)",
},
},
},
{
uint8(31),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: "priority=210,ct_state=-new+trk,ct_mark=0x40,ip,reg0=0x4/0xffff",
ActStr: "load:0xaabbccddeeff->NXM_OF_ETH_DST[],load:0x1->NXM_NX_REG0[19],goto_table:42",
},
{
MatchStr: "priority=200,ct_state=-new+trk,ct_mark=0x40,ip",
ActStr: "goto_table:42",
},
{
MatchStr: fmt.Sprintf("priority=200,ip,in_port=%d", config1.UplinkOFPort),
ActStr: "LOCAL",
},
},
},
{
uint8(70),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: "priority=200,ct_mark=0x20,ip,reg0=0x2/0xffff", ActStr: "goto_table:80",
},
{
MatchStr: fmt.Sprintf("priority=190,ip,reg0=0x2/0xffff,nw_dst=%s", nodeIP.String()),
ActStr: "goto_table:80",
},
{
MatchStr: "priority=180,ip,reg0=0x2/0xffff",
ActStr: "load:0x1->NXM_NX_REG0[17],goto_table:90",
},
},
},
{
uint8(105),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: "priority=200,ct_state=+new+trk,ip,reg0=0x20000/0x20000",
ActStr: fmt.Sprintf("ct(commit,table=110,zone=65520,nat(src=%s),exec(load:0x40->NXM_NX_CT_MARK[]))", nodeIP.String()),
},
},
},
{
uint8(110),
[]*ofTestUtils.ExpectFlow{
{
MatchStr: "priority=200,ip,reg0=0x20000/0x20000",
ActStr: fmt.Sprintf("output:%d", config1.HostGatewayOFPort),
},
},
},
}
}
| 1 | 22,926 | out of curiosity, with IPv6 introduced, why there's no change to the expected output of openflow in the test? | antrea-io-antrea | go |
@@ -67,7 +67,10 @@ byte *
decode_eflags_usage(dcontext_t *dcontext, byte *pc, uint *usage,
dr_opnd_query_flags_t flags)
{
- *usage = 0; /* FIXME i#1569 */
+ /* As long as precise flag information is not provided we conservatively
+ * assume each instructions may read or write all flags.
+ */
+ *usage = EFLAGS_WRITE_ALL | EFLAGS_READ_ALL; /* FIXME i#2263 */
return pc + 4;
}
| 1 | /* **********************************************************
* Copyright (c) 2016 ARM Limited. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of ARM Limited nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL ARM LIMITED OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include "../globals.h"
#include "instr.h"
#include "decode.h"
#include "decode_fast.h" /* ensure we export decode_next_pc, decode_sizeof */
#include "instr_create.h"
#include "codec.h"
bool
is_isa_mode_legal(dr_isa_mode_t mode)
{
return (mode == DR_ISA_ARM_A64);
}
app_pc
canonicalize_pc_target(dcontext_t *dcontext, app_pc pc)
{
return pc;
}
DR_API
app_pc
dr_app_pc_as_jump_target(dr_isa_mode_t isa_mode, app_pc pc)
{
return pc;
}
DR_API
app_pc
dr_app_pc_as_load_target(dr_isa_mode_t isa_mode, app_pc pc)
{
return pc;
}
byte *
decode_eflags_usage(dcontext_t *dcontext, byte *pc, uint *usage,
dr_opnd_query_flags_t flags)
{
*usage = 0; /* FIXME i#1569 */
return pc + 4;
}
byte *
decode_opcode(dcontext_t *dcontext, byte *pc, instr_t *instr)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return NULL;
}
byte *
decode(dcontext_t *dcontext, byte *pc, instr_t *instr)
{
return decode_common(dcontext, pc, pc, instr);
}
byte *
decode_from_copy(dcontext_t *dcontext, byte *copy_pc, byte *orig_pc, instr_t *instr)
{
return decode_common(dcontext, copy_pc, orig_pc, instr);
}
byte *
decode_cti(dcontext_t *dcontext, byte *pc, instr_t *instr)
{
return decode(dcontext, pc, instr);
}
byte *
decode_next_pc(dcontext_t *dcontext, byte *pc)
{
return pc + AARCH64_INSTR_SIZE;
}
int
decode_sizeof(dcontext_t *dcontext, byte *pc, int *num_prefixes)
{
return AARCH64_INSTR_SIZE;
}
byte *
decode_raw(dcontext_t *dcontext, byte *pc, instr_t *instr)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return NULL;
}
bool
decode_raw_is_jmp(dcontext_t *dcontext, byte *pc)
{
uint enc = *(uint *)pc;
return ((enc & 0xfc000000) == 0x14000000);
}
byte *
decode_raw_jmp_target(dcontext_t *dcontext, byte *pc)
{
uint enc = *(uint *)pc;
return pc + ((enc & 0x1ffffff) << 2) - ((enc & 0x2000000) << 2);
}
const instr_info_t *
instr_info_extra_opnds(const instr_info_t *info)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return NULL;
}
byte
instr_info_opnd_type(const instr_info_t *info, bool src, int num)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return 0;
}
const instr_info_t *
get_next_instr_info(const instr_info_t * info)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return NULL;
}
byte
decode_first_opcode_byte(int opcode)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return 0;
}
const instr_info_t *
opcode_to_encoding_info(uint opc, dr_isa_mode_t isa_mode)
{
/* We do not use instr_info_t encoding info on AArch64. */
ASSERT_NOT_REACHED();
return NULL;
}
DR_API
const char *
decode_opcode_name(int opcode)
{
return get_opcode_name(opcode);
}
opnd_size_t
resolve_variable_size(decode_info_t *di, opnd_size_t sz, bool is_reg)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return 0;
}
bool
optype_is_indir_reg(int optype)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return false;
}
bool
optype_is_reg(int optype)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return false;
}
bool
optype_is_gpr(int optype)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return false;
}
#ifdef DEBUG
# ifndef STANDALONE_DECODER
void
check_encode_decode_consistency(dcontext_t *dcontext, instrlist_t *ilist)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
}
# endif /* STANDALONE_DECODER */
void
decode_debug_checks_arch(void)
{
/* FIXME i#1569: NYI */
}
#endif /* DEBUG */
#ifdef DECODE_UNIT_TEST
# include "instr_create.h"
int main()
{
bool res = true;
standalone_init();
return res;
}
#endif /* DECODE_UNIT_TEST */
| 1 | 10,873 | nit: grammar: instruction | DynamoRIO-dynamorio | c |
@@ -19,6 +19,7 @@ package org.openqa.selenium.grid.graphql;
import graphql.schema.DataFetcher;
import graphql.schema.DataFetchingEnvironment;
+
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.internal.Require;
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.graphql;
import graphql.schema.DataFetcher;
import graphql.schema.DataFetchingEnvironment;
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.internal.Require;
import java.net.URI;
public class GridData implements DataFetcher {
private final Distributor distributor;
private final URI publicUri;
public GridData(Distributor distributor, URI publicUri) {
this.distributor = Require.nonNull("Distributor", distributor);
this.publicUri = Require.nonNull("Grid's public URI", publicUri);
}
@Override
public Object get(DataFetchingEnvironment environment) {
return new Grid(distributor, publicUri);
}
}
| 1 | 17,791 | We can revert this to reduce the diff of the PR. | SeleniumHQ-selenium | py |
@@ -69,7 +69,12 @@ func Add(mgr manager.Manager) error {
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
- return &ReconcileHiveConfig{Client: mgr.GetClient(), scheme: mgr.GetScheme(), restConfig: mgr.GetConfig(), mgr: mgr}
+ return &ReconcileHiveConfig{
+ Client: mgr.GetClient(),
+ scheme: mgr.GetScheme(),
+ restConfig: mgr.GetConfig(),
+ mgr: mgr,
+ }
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler | 1 | package hive
import (
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"os"
"reflect"
"time"
log "github.com/sirupsen/logrus"
hivev1 "github.com/openshift/hive/apis/hive/v1"
"github.com/openshift/hive/pkg/resource"
"github.com/openshift/library-go/pkg/operator/events"
apiextclientv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1"
apiregclientv1 "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
// hiveConfigName is the one and only name for a HiveConfig supported in the cluster. Any others will be ignored.
hiveConfigName = "hive"
hiveOperatorDeploymentName = "hive-operator"
managedConfigNamespace = "openshift-config-managed"
aggregatorCAConfigMapName = "kube-apiserver-aggregator-client-ca"
// HiveOperatorNamespaceEnvVar is the environment variable we expect to be given with the namespace the hive-operator is running in.
HiveOperatorNamespaceEnvVar = "HIVE_OPERATOR_NS"
// watchResyncInterval is used for a couple handcrafted watches we do with our own informers.
watchResyncInterval = 30 * time.Minute
)
// Add creates a new Hive Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
return &ReconcileHiveConfig{Client: mgr.GetClient(), scheme: mgr.GetScheme(), restConfig: mgr.GetConfig(), mgr: mgr}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("hive-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Provide a ref to the controller on the reconciler, which is used to establish a watch on
// secrets in the hive namespace, which isn't known until we have a HiveConfig.
r.(*ReconcileHiveConfig).ctrlr = c
r.(*ReconcileHiveConfig).kubeClient, err = kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
return err
}
r.(*ReconcileHiveConfig).apiregClient, err = apiregclientv1.NewForConfig(mgr.GetConfig())
if err != nil {
return err
}
r.(*ReconcileHiveConfig).apiextClient, err = apiextclientv1beta1.NewForConfig(mgr.GetConfig())
if err != nil {
return err
}
r.(*ReconcileHiveConfig).discoveryClient, err = discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
return err
}
r.(*ReconcileHiveConfig).dynamicClient, err = dynamic.NewForConfig(mgr.GetConfig())
if err != nil {
return err
}
// Regular manager client is not fully initialized here, create our own for some
// initialization API communication:
tempClient, err := client.New(mgr.GetConfig(), client.Options{Scheme: mgr.GetScheme()})
if err != nil {
return err
}
hiveOperatorNS := os.Getenv(HiveOperatorNamespaceEnvVar)
r.(*ReconcileHiveConfig).hiveOperatorNamespace = hiveOperatorNS
log.Infof("hive operator NS: %s", hiveOperatorNS)
// Determine if the openshift-config-managed namespace exists (> v4.0). If so, setup a watch
// for configmaps in that namespace.
ns := &corev1.Namespace{}
log.Debugf("checking for existence of the %s namespace", managedConfigNamespace)
err = tempClient.Get(context.TODO(), types.NamespacedName{Name: managedConfigNamespace}, ns)
if err != nil && !errors.IsNotFound(err) {
log.WithError(err).Errorf("error checking existence of the %s namespace", managedConfigNamespace)
return err
}
if err == nil {
log.Debugf("the %s namespace exists, setting up a watch for configmaps on it", managedConfigNamespace)
// Create an informer that only listens to events in the OpenShift managed namespace
kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(r.(*ReconcileHiveConfig).kubeClient, watchResyncInterval, kubeinformers.WithNamespace(managedConfigNamespace))
configMapInformer := kubeInformerFactory.Core().V1().ConfigMaps().Informer()
mgr.Add(&informerRunnable{informer: configMapInformer})
// Watch for changes to cm/kube-apiserver-aggregator-client-ca in the OpenShift managed namespace
err = c.Watch(&source.Informer{Informer: configMapInformer}, handler.EnqueueRequestsFromMapFunc(handler.MapFunc(aggregatorCAConfigMapHandler)))
if err != nil {
return err
}
r.(*ReconcileHiveConfig).syncAggregatorCA = true
r.(*ReconcileHiveConfig).managedConfigCMLister = kubeInformerFactory.Core().V1().ConfigMaps().Lister()
} else {
log.Debugf("the %s namespace was not found, skipping watch for the aggregator CA configmap", managedConfigNamespace)
}
// Watch for changes to HiveConfig:
err = c.Watch(&source.Kind{Type: &hivev1.HiveConfig{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
// Monitor changes to DaemonSets:
err = c.Watch(&source.Kind{Type: &appsv1.DaemonSet{}}, &handler.EnqueueRequestForOwner{
OwnerType: &hivev1.HiveConfig{},
})
if err != nil {
return err
}
// Monitor changes to Deployments:
err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{
OwnerType: &hivev1.HiveConfig{},
})
if err != nil {
return err
}
// Monitor changes to Services:
err = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{
OwnerType: &hivev1.HiveConfig{},
})
if err != nil {
return err
}
// Monitor changes to StatefulSets:
err = c.Watch(&source.Kind{Type: &appsv1.StatefulSet{}}, &handler.EnqueueRequestForOwner{
OwnerType: &hivev1.HiveConfig{},
})
if err != nil {
return err
}
// Lookup the hive-operator Deployment image, we will assume hive components should all be
// using the same image as the operator.
operatorDeployment := &appsv1.Deployment{}
err = tempClient.Get(context.Background(),
types.NamespacedName{Name: hiveOperatorDeploymentName, Namespace: hiveOperatorNS},
operatorDeployment)
if err == nil {
img := operatorDeployment.Spec.Template.Spec.Containers[0].Image
pullPolicy := operatorDeployment.Spec.Template.Spec.Containers[0].ImagePullPolicy
log.Debugf("loaded hive image from hive-operator deployment: %s (%s)", img, pullPolicy)
r.(*ReconcileHiveConfig).hiveImage = img
r.(*ReconcileHiveConfig).hiveImagePullPolicy = pullPolicy
} else {
log.WithError(err).Fatal("unable to lookup hive image from hive-operator Deployment, image overriding disabled")
}
// TODO: Monitor CRDs but do not try to use an owner ref. (as they are global,
// and our config is namespaced)
// TODO: it would be nice to monitor the global resources ValidatingWebhookConfiguration
// and APIService, CRDs, but these cannot have OwnerReferences (which are not namespaced) as they
// are global. Need to use a different predicate to the Watch function.
return nil
}
var _ reconcile.Reconciler = &ReconcileHiveConfig{}
// ReconcileHiveConfig reconciles a Hive object
type ReconcileHiveConfig struct {
client.Client
scheme *runtime.Scheme
kubeClient kubernetes.Interface
apiextClient *apiextclientv1beta1.ApiextensionsV1beta1Client
apiregClient *apiregclientv1.ApiregistrationV1Client
discoveryClient discovery.DiscoveryInterface
dynamicClient dynamic.Interface
restConfig *rest.Config
hiveImage string
hiveOperatorNamespace string
hiveImagePullPolicy corev1.PullPolicy
syncAggregatorCA bool
managedConfigCMLister corev1listers.ConfigMapLister
ctrlr controller.Controller
servingCertSecretWatchEstablished bool
mgr manager.Manager
}
// Reconcile reads that state of the cluster for a Hive object and makes changes based on the state read
// and what is in the Hive.Spec
func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
hLog := log.WithField("controller", "hive")
hLog.Info("Reconciling Hive components")
// Fetch the Hive instance
instance := &hivev1.HiveConfig{}
// We only support one HiveConfig per cluster, and it must be called "hive". This prevents installing
// Hive more than once in the cluster.
if request.NamespacedName.Name != hiveConfigName {
hLog.WithField("hiveConfig", request.NamespacedName.Name).Warn(
"invalid HiveConfig name, only one HiveConfig supported per cluster and must be named 'hive'")
return reconcile.Result{}, nil
}
// NOTE: ignoring the Namespace that seems to get set on request when syncing on namespaced objects,
// when our HiveConfig is ClusterScoped.
err := r.Get(context.TODO(), types.NamespacedName{Name: request.NamespacedName.Name}, instance)
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
hLog.Debug("HiveConfig not found, deleted?")
r.servingCertSecretWatchEstablished = false
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
hLog.WithError(err).Error("error reading HiveConfig")
return reconcile.Result{}, err
}
origHiveConfig := instance.DeepCopy()
hiveNSName := getHiveNamespace(instance)
if err := r.establishSecretWatch(hLog, hiveNSName); err != nil {
return reconcile.Result{}, err
}
recorder := events.NewRecorder(r.kubeClient.CoreV1().Events(r.hiveOperatorNamespace), "hive-operator", &corev1.ObjectReference{
Name: request.Name,
Namespace: r.hiveOperatorNamespace,
})
// Ensure the target namespace for hive components exists and create if not:
hiveNamespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: hiveNSName,
},
}
if err := r.Client.Create(context.Background(), hiveNamespace); err != nil {
if apierrors.IsAlreadyExists(err) {
hLog.WithField("hiveNS", hiveNSName).Debug("target namespace already exists")
} else {
hLog.WithError(err).Error("error creating hive target namespace")
return reconcile.Result{}, err
}
} else {
hLog.WithField("hiveNS", hiveNSName).Info("target namespace created")
}
if r.syncAggregatorCA {
// We use the configmap lister and not the regular client which only watches resources in the hive namespace
aggregatorCAConfigMap, err := r.managedConfigCMLister.ConfigMaps(managedConfigNamespace).Get(aggregatorCAConfigMapName)
// If an error other than not found, retry. If not found, it means we don't need to do anything with
// admission pods yet.
cmLog := hLog.WithField("configmap", fmt.Sprintf("%s/%s", managedConfigNamespace, aggregatorCAConfigMapName))
switch {
case errors.IsNotFound(err):
cmLog.Warningf("configmap was not found, will not sync aggregator CA with admission pods")
case err != nil:
cmLog.WithError(err).Errorf("cannot retrieve configmap")
return reconcile.Result{}, err
default:
caHash := computeHash(aggregatorCAConfigMap.Data)
cmLog.WithField("hash", caHash).Debugf("computed hash for configmap")
if instance.Status.AggregatorClientCAHash != caHash {
cmLog.WithField("oldHash", instance.Status.AggregatorClientCAHash).
Info("configmap has changed, admission pods will restart on the next sync")
instance.Status.AggregatorClientCAHash = caHash
cmLog.Debugf("updating status with new aggregator CA configmap hash")
err := r.updateHiveConfigStatus(origHiveConfig, instance, cmLog, true)
if err != nil {
cmLog.WithError(err).Error("cannot update hash in config status")
}
return reconcile.Result{}, err
}
cmLog.Debug("configmap unchanged, nothing to do")
}
}
h, err := resource.NewHelperFromRESTConfig(r.restConfig, hLog)
if err != nil {
hLog.WithError(err).Error("error creating resource helper")
r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false)
return reconcile.Result{}, err
}
managedDomainsConfigMap, err := r.configureManagedDomains(hLog, instance)
if err != nil {
hLog.WithError(err).Error("error setting up managed domains")
r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false)
return reconcile.Result{}, err
}
plConfigHash, err := r.deployAWSPrivateLinkConfigMap(hLog, h, instance)
if err != nil {
hLog.WithError(err).Error("error deploying aws privatelink configmap")
r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false)
return reconcile.Result{}, err
}
confighash, err := r.deployHiveControllersConfigMap(hLog, h, instance, plConfigHash)
if err != nil {
hLog.WithError(err).Error("error deploying controllers configmap")
r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false)
return reconcile.Result{}, err
}
fgConfigHash, err := r.deployFeatureGatesConfigMap(hLog, h, instance)
if err != nil {
hLog.WithError(err).Error("error deploying feature gates configmap")
r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false)
return reconcile.Result{}, err
}
err = r.deployHive(hLog, h, instance, recorder, managedDomainsConfigMap, confighash)
if err != nil {
hLog.WithError(err).Error("error deploying Hive")
r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false)
return reconcile.Result{}, err
}
err = r.deployClusterSync(hLog, h, instance, confighash)
if err != nil {
hLog.WithError(err).Error("error deploying ClusterSync")
r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false)
return reconcile.Result{}, err
}
// Cleanup legacy objects:
if err := r.cleanupLegacyObjects(hLog); err != nil {
return reconcile.Result{}, err
}
err = r.deployHiveAdmission(hLog, h, instance, recorder, managedDomainsConfigMap, fgConfigHash, plConfigHash)
if err != nil {
hLog.WithError(err).Error("error deploying HiveAdmission")
r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false)
return reconcile.Result{}, err
}
if err := r.cleanupLegacySyncSetInstances(hLog); err != nil {
hLog.WithError(err).Error("error cleaning up legacy SyncSetInstances")
r.updateHiveConfigStatus(origHiveConfig, instance, hLog, false)
return reconcile.Result{}, err
}
if err := r.updateHiveConfigStatus(origHiveConfig, instance, hLog, true); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
func (r *ReconcileHiveConfig) establishSecretWatch(hLog *log.Entry, hiveNSName string) error {
// We need to establish a watch on Secret in the Hive namespace, one time only. We do not know this namespace until
// we have a HiveConfig.
if !r.servingCertSecretWatchEstablished {
hLog.WithField("namespace", hiveNSName).Info("establishing watch on secrets in hive namespace")
// Create an informer that only listens to events in the OpenShift managed namespace
kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(r.kubeClient, watchResyncInterval,
kubeinformers.WithNamespace(hiveNSName))
secretsInformer := kubeInformerFactory.Core().V1().Secrets().Informer()
if err := r.mgr.Add(&informerRunnable{informer: secretsInformer}); err != nil {
hLog.WithError(err).Error("error adding secret informer to manager")
return err
}
// Watch Secrets in hive namespace, so we can detect changes to the hiveadmission serving cert secret and
// force a deployment rollout.
err := r.ctrlr.Watch(&source.Informer{Informer: secretsInformer}, handler.Funcs{
CreateFunc: func(e event.CreateEvent, q workqueue.RateLimitingInterface) {
hLog.Debug("eventHandler CreateFunc")
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: hiveConfigName}})
},
UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) {
hLog.Debug("eventHandler UpdateFunc")
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: hiveConfigName}})
},
}, predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
hLog.WithField("predicateResponse", e.Object.GetName() == hiveAdmissionServingCertSecretName).Debug("secret CreateEvent")
return e.Object.GetName() == hiveAdmissionServingCertSecretName
},
UpdateFunc: func(e event.UpdateEvent) bool {
hLog.WithField("predicateResponse", e.ObjectNew.GetName() == hiveAdmissionServingCertSecretName).Debug("secret UpdateEvent")
return e.ObjectNew.GetName() == hiveAdmissionServingCertSecretName
},
})
if err != nil {
hLog.WithError(err).Error("error establishing secret watch")
return err
}
r.servingCertSecretWatchEstablished = true
} else {
hLog.Debug("secret watch already established")
}
return nil
}
func (r *ReconcileHiveConfig) cleanupLegacyObjects(hLog log.FieldLogger) error {
gvrNSNames := []gvrNSName{
{group: "rbac.authorization.k8s.io", version: "v1", resource: "clusterroles", name: "manager-role"},
{group: "rbac.authorization.k8s.io", version: "v1", resource: "clusterrolebindings", name: "manager-rolebinding"},
}
for _, gvrnsn := range gvrNSNames {
if err := dynamicDelete(r.dynamicClient, gvrnsn, hLog); err != nil {
return err
}
}
return nil
}
type informerRunnable struct {
informer cache.SharedIndexInformer
}
func (r *informerRunnable) Start(ctx context.Context) error {
stopch := ctx.Done()
r.informer.Run(stopch)
cache.WaitForCacheSync(stopch, r.informer.HasSynced)
return nil
}
func aggregatorCAConfigMapHandler(o client.Object) []reconcile.Request {
if o.GetName() == aggregatorCAConfigMapName {
return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: hiveConfigName}}}
}
return nil
}
func computeHash(data map[string]string) string {
hasher := md5.New()
hasher.Write([]byte(fmt.Sprintf("%v", data)))
return hex.EncodeToString(hasher.Sum(nil))
}
func (r *ReconcileHiveConfig) updateHiveConfigStatus(origHiveConfig, newHiveConfig *hivev1.HiveConfig, logger log.FieldLogger, succeeded bool) error {
newHiveConfig.Status.ObservedGeneration = newHiveConfig.Generation
newHiveConfig.Status.ConfigApplied = succeeded
if reflect.DeepEqual(origHiveConfig, newHiveConfig) {
logger.Debug("HiveConfig unchanged, no update required")
return nil
}
logger.Info("HiveConfig has changed, updating")
err := r.Status().Update(context.TODO(), newHiveConfig)
if err != nil {
logger.WithError(err).Error("failed to update HiveConfig status")
}
return err
}
| 1 | 17,422 | i think this missed the reconciler to callsite change | openshift-hive | go |
@@ -990,6 +990,10 @@ public class SurfaceNamer extends NameFormatterDelegator {
return getNotImplementedString("SurfaceNamer.getGrpcClientImportName");
}
+ public String getClientConfigImportModule(Interface service) {
+ return getNotImplementedString("SurfaceNamer.getClientConfigImportModule");
+ }
+
/////////////////////////////////// Docs & Annotations //////////////////////////////////////////
/** The documentation name of a parameter for the given lower-case field name. */ | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer;
import com.google.api.codegen.ReleaseLevel;
import com.google.api.codegen.config.FieldConfig;
import com.google.api.codegen.config.MethodConfig;
import com.google.api.codegen.config.OneofConfig;
import com.google.api.codegen.config.ResourceNameConfig;
import com.google.api.codegen.config.ResourceNameType;
import com.google.api.codegen.config.SingleResourceNameConfig;
import com.google.api.codegen.config.VisibilityConfig;
import com.google.api.codegen.util.CommentReformatter;
import com.google.api.codegen.util.CommonRenderingUtil;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.NameFormatter;
import com.google.api.codegen.util.NameFormatterDelegator;
import com.google.api.codegen.util.NamePath;
import com.google.api.codegen.util.SymbolTable;
import com.google.api.codegen.util.TypeNameConverter;
import com.google.api.codegen.viewmodel.ServiceMethodType;
import com.google.api.tools.framework.aspects.documentation.model.DocumentationUtil;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.ProtoElement;
import com.google.api.tools.framework.model.ProtoFile;
import com.google.api.tools.framework.model.TypeRef;
import com.google.common.collect.ImmutableList;
import io.grpc.Status;
import java.util.ArrayList;
import java.util.List;
/**
* A SurfaceNamer provides language-specific names for specific components of a view for a surface.
*
* <p>Naming is composed of two steps:
*
* <p>1. Composing a Name instance with the name pieces 2. Formatting the Name for the particular
* type of identifier needed.
*
* <p>This class delegates step 2 to the provided name formatter, which generally would be a
* language-specific namer.
*/
public class SurfaceNamer extends NameFormatterDelegator {
private final ModelTypeFormatter modelTypeFormatter;
private final TypeNameConverter typeNameConverter;
private final CommentReformatter commentReformatter;
private final String packageName;
public SurfaceNamer(
NameFormatter languageNamer,
ModelTypeFormatter modelTypeFormatter,
TypeNameConverter typeNameConverter,
CommentReformatter commentReformatter,
String packageName) {
super(languageNamer);
this.modelTypeFormatter = modelTypeFormatter;
this.typeNameConverter = typeNameConverter;
this.commentReformatter = commentReformatter;
this.packageName = packageName;
}
public ModelTypeFormatter getModelTypeFormatter() {
return modelTypeFormatter;
}
public TypeNameConverter getTypeNameConverter() {
return typeNameConverter;
}
public String getPackageName() {
return packageName;
}
public String getNotImplementedString(String feature) {
return "$ NOT IMPLEMENTED: " + feature + " $";
}
/////////////////////////////////////// Service names ///////////////////////////////////////////
/**
* Returns the service name with common suffixes removed.
*
* <p>For example: "LoggingServiceV2" becomes Name("Logging")
*/
public Name getReducedServiceName(Interface service) {
String name = service.getSimpleName().replaceAll("V[0-9]+$", "");
name = name.replaceAll("Service$", "");
return Name.upperCamel(name);
}
/** Human-friendly name of this service */
public String getServicePhraseName(Interface service) {
return Name.upperCamel(service.getSimpleName()).toPhrase();
}
/////////////////////////////////////// Constructors /////////////////////////////////////////////
/** The name of the constructor for the service client. The client is VKit generated, not GRPC. */
public String getApiWrapperClassConstructorName(Interface interfaze) {
return publicClassName(Name.upperCamel(interfaze.getSimpleName(), "Client"));
}
/** Constructor name for the type with the given nickname. */
public String getTypeConstructor(String typeNickname) {
return typeNickname;
}
//////////////////////////////////// Package & module names /////////////////////////////////////
/** The local (unqualified) name of the package */
public String getLocalPackageName() {
return getNotImplementedString("SurfaceNamer.getLocalPackageName");
}
/**
* The name of a variable that holds an instance of the module that contains the implementation of
* a particular proto interface. So far it is used by just NodeJS.
*/
public String getApiWrapperModuleName() {
return getNotImplementedString("SurfaceNamer.getApiWrapperModuleName");
}
/**
* The version of a variable that holds an instance of the module that contains the implementation
* of a particular proto interface. So far it is used by just NodeJS.
*/
public String getApiWrapperModuleVersion() {
return getNotImplementedString("SurfaceNamer.getApiWrapperModuleVersion");
}
/** The qualified namespace of a service. */
public String getNamespace(Interface service) {
NamePath namePath = typeNameConverter.getNamePath(modelTypeFormatter.getFullNameFor(service));
return qualifiedName(namePath.withoutHead());
}
/** The modules of the package. */
public ImmutableList<String> getApiModules() {
return ImmutableList.<String>of();
}
/////////////////////////////////// Protos methods /////////////////////////////////////////////
/** The function name to set the given proto field. */
public String getFieldSetFunctionName(FeatureConfig featureConfig, FieldConfig fieldConfig) {
Field field = fieldConfig.getField();
if (featureConfig.useResourceNameFormatOption(fieldConfig)) {
return getResourceNameFieldSetFunctionName(fieldConfig.getMessageFieldConfig());
} else {
return getFieldSetFunctionName(field);
}
}
/** The function name to set the given proto field. */
public String getFieldSetFunctionName(Field field) {
return getFieldSetFunctionName(field.getType(), Name.from(field.getSimpleName()));
}
/** The function name to set a field having the given type and name. */
public String getFieldSetFunctionName(TypeRef type, Name identifier) {
if (type.isMap()) {
return publicMethodName(Name.from("put", "all").join(identifier));
} else if (type.isRepeated()) {
return publicMethodName(Name.from("add", "all").join(identifier));
} else {
return publicMethodName(Name.from("set").join(identifier));
}
}
/** The function name to add an element to a map or repeated field. */
public String getFieldAddFunctionName(Field field) {
return getFieldAddFunctionName(field.getType(), Name.from(field.getSimpleName()));
}
/** The function name to add an element to a map or repeated field. */
public String getFieldAddFunctionName(TypeRef type, Name identifier) {
return getNotImplementedString("SurfaceNamer.getFieldAddFunctionName");
}
/** The function name to set a field that is a resource name class. */
public String getResourceNameFieldSetFunctionName(FieldConfig fieldConfig) {
TypeRef type = fieldConfig.getField().getType();
Name identifier = Name.from(fieldConfig.getField().getSimpleName());
Name resourceName = getResourceTypeNameObject(fieldConfig.getResourceNameConfig());
if (type.isMap()) {
return getNotImplementedString("SurfaceNamer.getResourceNameFieldSetFunctionName:map-type");
} else if (type.isRepeated()) {
return publicMethodName(
Name.from("add", "all").join(identifier).join("with").join(resourceName).join("list"));
} else {
return publicMethodName(Name.from("set").join(identifier).join("with").join(resourceName));
}
}
/** The function name to get the given proto field. */
public String getFieldGetFunctionName(FeatureConfig featureConfig, FieldConfig fieldConfig) {
Field field = fieldConfig.getField();
if (featureConfig.useResourceNameFormatOption(fieldConfig)) {
return getResourceNameFieldGetFunctionName(fieldConfig.getMessageFieldConfig());
} else {
return getFieldGetFunctionName(field);
}
}
/** The function name to get the given proto field. */
public String getFieldGetFunctionName(Field field) {
return getFieldGetFunctionName(field.getType(), Name.from(field.getSimpleName()));
}
/** The function name to get a field having the given type and name. */
public String getFieldGetFunctionName(TypeRef type, Name identifier) {
if (type.isRepeated() && !type.isMap()) {
return publicMethodName(Name.from("get").join(identifier).join("list"));
} else {
return publicMethodName(Name.from("get").join(identifier));
}
}
/** The function name to get a field that is a resource name class. */
public String getResourceNameFieldGetFunctionName(FieldConfig fieldConfig) {
TypeRef type = fieldConfig.getField().getType();
Name identifier = Name.from(fieldConfig.getField().getSimpleName());
Name resourceName = getResourceTypeNameObject(fieldConfig.getResourceNameConfig());
if (type.isMap()) {
return getNotImplementedString("SurfaceNamer.getResourceNameFieldGetFunctionName:map-type");
} else if (type.isRepeated()) {
return publicMethodName(
Name.from("get").join(identifier).join("list_as").join(resourceName).join("list"));
} else {
return publicMethodName(Name.from("get").join(identifier).join("as").join(resourceName));
}
}
/**
* The function name to get the count of elements in the given field.
*
* @throws IllegalArgumentException if the field is not a repeated field.
*/
public String getFieldCountGetFunctionName(Field field) {
if (field.isRepeated()) {
return publicMethodName(Name.from("get", field.getSimpleName(), "count"));
} else {
throw new IllegalArgumentException(
"Non-repeated field " + field.getSimpleName() + " has no count function.");
}
}
/**
* The function name to get an element by index from the given field.
*
* @throws IllegalArgumentException if the field is not a repeated field.
*/
public String getByIndexGetFunctionName(Field field) {
if (field.isRepeated()) {
return publicMethodName(Name.from("get", field.getSimpleName()));
} else {
throw new IllegalArgumentException(
"Non-repeated field " + field.getSimpleName() + " has no get-by-index function.");
}
}
///////////////////////////////// Function & Callable names /////////////////////////////////////
/** The function name to retrieve default client option */
public String getDefaultApiSettingsFunctionName(Interface service) {
return getNotImplementedString("SurfaceNamer.getDefaultClientOptionFunctionName");
}
/** The method name to create a rerouted gRPC client. Used in C# */
public String getReroutedGrpcMethodName(MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getReroutedGrpcMethodName");
}
/** The name of the surface method which can call the given API method. */
public String getApiMethodName(Method method, VisibilityConfig visibility) {
return getApiMethodName(Name.upperCamel(method.getSimpleName()), visibility);
}
/** The name of the async surface method which can call the given API method. */
public String getAsyncApiMethodName(Method method, VisibilityConfig visibility) {
return getApiMethodName(Name.upperCamel(method.getSimpleName()).join("async"), visibility);
}
protected String getApiMethodName(Name name, VisibilityConfig visibility) {
switch (visibility) {
case PUBLIC:
return publicMethodName(name);
case PACKAGE:
case PRIVATE:
return privateMethodName(name);
default:
throw new IllegalArgumentException("cannot name method with visibility: " + visibility);
}
}
/**
* The name of the iterate method of the PagedListResponse type for a field, returning the
* resource type iterate method if available
*/
public String getPagedResponseIterateMethod(
FeatureConfig featureConfig, FieldConfig fieldConfig) {
if (featureConfig.useResourceNameFormatOption(fieldConfig)) {
Name resourceName = getResourceTypeNameObject(fieldConfig.getResourceNameConfig());
return publicMethodName(Name.from("iterate_all_as").join(resourceName));
} else {
return getPagedResponseIterateMethod();
}
}
/** The name of the iterate method of the PagedListResponse type for a field */
public String getPagedResponseIterateMethod() {
return publicMethodName(Name.from("iterate_all_elements"));
}
public String getResourceTypeParseMethodName(
ModelTypeTable typeTable, FieldConfig resourceFieldConfig) {
return getNotImplementedString("SurfaceNamer.getResourceTypeParseMethodName");
}
/** The name of the create method for the resource one-of for the given field config */
public String getResourceOneofCreateMethod(ModelTypeTable typeTable, FieldConfig fieldConfig) {
return getAndSaveResourceTypeName(typeTable, fieldConfig.getMessageFieldConfig())
+ "."
+ publicMethodName(Name.from("from"));
}
/** The method name of the retry filter for the given key */
public String retryFilterMethodName(String key) {
return privateMethodName(Name.from(key).join("retry").join("filter"));
}
/** The method name of the retry backoff for the given key */
public String retryBackoffMethodName(String key) {
return privateMethodName(Name.from("get").join(key).join("retry").join("backoff"));
}
/** The method name of the timeout backoff for the given key */
public String timeoutBackoffMethodName(String key) {
return privateMethodName(Name.from("get").join(key).join("timeout").join("backoff"));
}
/** The name of the GRPC streaming surface method which can call the given API method. */
public String getGrpcStreamingApiMethodName(Method method, VisibilityConfig visibility) {
return getApiMethodName(method, visibility);
}
/** The name of the return type of the given grpc streaming method. */
public String getGrpcStreamingApiReturnTypeName(Method method) {
return publicClassName(
Name.upperCamel(method.getOutputType().getMessageType().getSimpleName()));
}
/** The name of the callable for the paged callable variant of the given method. */
public String getPagedCallableName(Method method) {
return privateFieldName(Name.upperCamel(method.getSimpleName(), "PagedCallable"));
}
/** The name of the paged callable variant of the given method. */
public String getPagedCallableMethodName(Method method) {
return publicMethodName(Name.upperCamel(method.getSimpleName(), "PagedCallable"));
}
/** The name of the plain callable variant of the given method. */
public String getCallableMethodName(Method method) {
return publicMethodName(Name.upperCamel(method.getSimpleName(), "Callable"));
}
/** The name of the plain callable variant of the given method. */
public String getCallableAsyncMethodName(Method method) {
return publicMethodName(Name.upperCamel(method.getSimpleName(), "CallableAsync"));
}
/** The name of the operation callable variant of the given method. */
public String getOperationCallableMethodName(Method method) {
return publicMethodName(Name.upperCamel(method.getSimpleName(), "OperationCallable"));
}
/** The name of the plain callable for the given method. */
public String getCallableName(Method method) {
return privateFieldName(Name.upperCamel(method.getSimpleName(), "Callable"));
}
/** The name of the operation callable for the given method. */
public String getOperationCallableName(Method method) {
return privateFieldName(Name.upperCamel(method.getSimpleName(), "OperationCallable"));
}
/** The name of the settings member name for the given method. */
public String getSettingsMemberName(Method method) {
return publicMethodName(Name.upperCamel(method.getSimpleName(), "Settings"));
}
/** The getter function name for the settings for the given method. */
public String getSettingsFunctionName(Method method) {
return getSettingsMemberName(method);
}
/** The name of a method to apply modifications to this method request. */
public String getModifyMethodName(Method method) {
return getNotImplementedString("SurfaceNamer.getModifyMethodName");
}
/** The function name to retrieve default call option */
public String getDefaultCallSettingsFunctionName(Interface service) {
return publicMethodName(Name.upperCamel(service.getSimpleName(), "Settings"));
}
/** The name of the IAM resource getter function. */
public String getIamResourceGetterFunctionName(Field field) {
return getNotImplementedString("SurfaceNamer.getIamResourceGetterFunctionName");
}
/** The name of the function that will create a stub. */
public String getCreateStubFunctionName(Interface service) {
return privateMethodName(
Name.upperCamel("Create", service.getSimpleName(), "Stub", "Function"));
}
/** Function used to register the GRPC server. */
public String getServerRegisterFunctionName(Interface service) {
return getNotImplementedString("SurfaceNamer.getServerRegisterFunctionName");
}
/** The name of the LRO surface method which can call the given API method. */
public String getLroApiMethodName(Method method, VisibilityConfig visibility) {
return getAsyncApiMethodName(method, visibility);
}
/////////////////////////////////////// Variable names //////////////////////////////////////////
/**
* The name of a variable to hold a value for the given proto message field (such as a flattened
* parameter).
*/
public String getVariableName(Field field) {
return localVarName(Name.from(field.getSimpleName()));
}
/**
* The name of a variable that holds an instance of the class that implements a particular proto
* interface.
*/
public String getApiWrapperVariableName(Interface interfaze) {
return localVarName(Name.upperCamel(interfaze.getSimpleName(), "Client"));
}
/**
* The name of a variable that holds the settings class for a particular proto interface; not used
* in most languages.
*/
public String getApiSettingsVariableName(Interface interfaze) {
return localVarName(Name.upperCamel(interfaze.getSimpleName(), "Settings"));
}
/**
* The name of the builder class for the settings class for a particular proto interface; not used
* in most languages.
*/
public String getApiSettingsBuilderVarName(Interface interfaze) {
return localVarName(Name.upperCamel(interfaze.getSimpleName(), "SettingsBuilder"));
}
/** The variable name for the given identifier that is formatted. */
public String getFormattedVariableName(Name identifier) {
return localVarName(Name.from("formatted").join(identifier));
}
/** The variable name of the rerouted gRPC client. Used in C# */
public String getReroutedGrpcClientVarName(MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getGrpcClientName");
}
/** The name of the variable that will hold the stub for a service. */
public String getStubName(Interface service) {
return privateFieldName(Name.upperCamel(service.getSimpleName(), "Stub"));
}
/** The name of the array which will hold the methods for a given stub. */
public String getStubMethodsArrayName(Interface service) {
return privateMethodName(Name.upperCamel(service.getSimpleName(), "Stub", "Methods"));
}
/** The parameter name for the given lower-case field name. */
public String getParamName(String var) {
return localVarName(Name.from(var));
}
public String getPropertyName(String var) {
return publicMethodName(Name.from(var));
}
/* The name of a retry definition */
public String getRetryDefinitionName(String retryDefinitionKey) {
return privateMethodName(Name.from(retryDefinitionKey));
}
/** The name of the variable to hold the grpc client of a service. */
public String getGrpcClientVariableName(Interface service) {
return localVarName(Name.upperCamel(service.getSimpleName(), "Client"));
}
/** The name of the field. */
public String getFieldName(Field field) {
return publicFieldName(Name.from(field.getSimpleName()));
}
/** The page streaming descriptor name for the given method. */
public String getPageStreamingDescriptorName(Method method) {
return privateFieldName(Name.upperCamel(method.getSimpleName(), "PageStreamingDescriptor"));
}
/** The page streaming factory name for the given method. */
public String getPagedListResponseFactoryName(Method method) {
return privateFieldName(Name.upperCamel(method.getSimpleName(), "PagedListResponseFactory"));
}
/** The variable name of the gRPC request object. */
public String getRequestVariableName(Method method) {
return getNotImplementedString("SurfaceNamer.getRequestVariableName");
}
/////////////////////////////////////// Type names /////////////////////////////////////////////
/** The name of the class that implements a particular proto interface. */
public String getApiWrapperClassName(Interface interfaze) {
return publicClassName(Name.upperCamel(interfaze.getSimpleName(), "Client"));
}
/** The name of the implementation class that implements a particular proto interface. */
public String getApiWrapperClassImplName(Interface interfaze) {
return getNotImplementedString("SurfaceNamer.getApiWrapperClassImplName");
}
/** The name of the class that implements snippets for a particular proto interface. */
public String getApiSnippetsClassName(Interface interfaze) {
return publicClassName(Name.upperCamel(interfaze.getSimpleName(), "ApiSnippets"));
}
/**
* The name of the settings class for a particular proto interface; not used in most languages.
*/
public String getApiSettingsClassName(Interface interfaze) {
return publicClassName(Name.upperCamel(interfaze.getSimpleName(), "Settings"));
}
/** The name of the class that contains paged list response wrappers. */
public String getPagedResponseWrappersClassName() {
return publicClassName(Name.upperCamel("PagedResponseWrappers"));
}
/**
* The type name of the Grpc service class This needs to match what Grpc generates for the
* particular language.
*/
public String getGrpcServiceClassName(Interface service) {
NamePath namePath = typeNameConverter.getNamePath(modelTypeFormatter.getFullNameFor(service));
String grpcContainerName =
publicClassName(Name.upperCamelKeepUpperAcronyms(namePath.getHead(), "Grpc"));
String serviceClassName =
publicClassName(Name.upperCamelKeepUpperAcronyms(service.getSimpleName(), "ImplBase"));
return qualifiedName(namePath.withHead(grpcContainerName).append(serviceClassName));
}
/**
* The fully qualified class name of a an API service. TODO: Support the general pattern of
* package + class name in NameFormatter.
*/
public String getFullyQualifiedApiWrapperClassName(Interface interfaze) {
return getNotImplementedString("SurfaceNamer.getFullyQualifiedApiWrapperClassName");
}
protected Name getResourceTypeNameObject(ResourceNameConfig resourceNameConfig) {
String entityName = resourceNameConfig.getEntityName();
ResourceNameType resourceNameType = resourceNameConfig.getResourceNameType();
switch (resourceNameType) {
case ANY:
return getAnyResourceTypeName();
case FIXED:
return Name.from(entityName).join("name_fixed");
case ONEOF:
// Remove suffix "_oneof". This allows the collection oneof config to "share" an entity name
// with a collection config.
entityName = removeSuffix(entityName, "_oneof");
return Name.from(entityName).join("name_oneof");
case SINGLE:
return Name.from(entityName).join("name");
case NONE:
default:
throw new UnsupportedOperationException("unexpected entity name type");
}
}
protected Name getAnyResourceTypeName() {
return Name.from("resource_name");
}
public String getResourceTypeName(ResourceNameConfig resourceNameConfig) {
return publicClassName(getResourceTypeNameObject(resourceNameConfig));
}
/**
* The type name of the Grpc server class. This needs to match what Grpc generates for the
* particular language.
*/
public String getGrpcServerTypeName(Interface service) {
return getNotImplementedString("SurfaceNamer.getGrpcServerTypeName");
}
/**
* The type name of the Grpc client class. This needs to match what Grpc generates for the
* particular language.
*/
public String getGrpcClientTypeName(Interface service) {
return getNotImplementedString("SurfaceNamer.getGrpcClientTypeName");
}
/**
* Gets the type name of the Grpc client class, saves it to the type table provided, and returns
* the nickname.
*/
public String getAndSaveNicknameForGrpcClientTypeName(
ModelTypeTable typeTable, Interface service) {
return typeTable.getAndSaveNicknameFor(getGrpcClientTypeName(service));
}
/**
* The type name of the Grpc container class. This needs to match what Grpc generates for the
* particular language.
*/
public String getGrpcContainerTypeName(Interface service) {
NamePath namePath = typeNameConverter.getNamePath(modelTypeFormatter.getFullNameFor(service));
String publicClassName =
publicClassName(Name.upperCamelKeepUpperAcronyms(namePath.getHead(), "Grpc"));
return qualifiedName(namePath.withHead(publicClassName));
}
/** The type name for the method param */
public String getParamTypeName(ModelTypeTable typeTable, TypeRef type) {
return getNotImplementedString("SurfaceNamer.getParamTypeName");
}
/** The type name for retry settings. */
public String getRetrySettingsTypeName() {
return getNotImplementedString("SurfaceNamer.getRetrySettingsClassName");
}
/** The type name for an optional array argument; not used in most languages. */
public String getOptionalArrayTypeName() {
return getNotImplementedString("SurfaceNamer.getOptionalArrayTypeName");
}
/** The return type name in a dynamic language for the given method. */
public String getDynamicLangReturnTypeName(Method method, MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getDynamicReturnTypeName");
}
/** The return type name in a static language for the given method. */
public String getStaticLangReturnTypeName(Method method, MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getStaticLangReturnTypeName");
}
/** The return type name in a static language that is used by the caller */
public String getStaticLangCallerReturnTypeName(Method method, MethodConfig methodConfig) {
return getStaticLangReturnTypeName(method, methodConfig);
}
/** The async return type name in a static language for the given method. */
public String getStaticLangAsyncReturnTypeName(Method method, MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getStaticLangAsyncReturnTypeName");
}
/**
* Computes the nickname of the operation response type name for the given method, saves it in the
* given type table, and returns it.
*/
public String getAndSaveOperationResponseTypeName(
Method method, ModelTypeTable typeTable, MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getAndSaveOperationResponseTypeName");
}
/**
* In languages with pointers, strip the pointer, leaving only the base type. Eg, in C, "int*"
* would become "int".
*/
public String valueType(String type) {
return getNotImplementedString("SurfaceNamer.valueType");
}
/** The async return type name in a static language that is used by the caller */
public String getStaticLangCallerAsyncReturnTypeName(Method method, MethodConfig methodConfig) {
return getStaticLangAsyncReturnTypeName(method, methodConfig);
}
/** The name used in Grpc for the given API method. This needs to match what Grpc generates. */
public String getGrpcMethodName(Method method) {
// This might seem silly, but it makes clear what we're dealing with (upper camel).
// This is language-independent because of gRPC conventions.
return Name.upperCamelKeepUpperAcronyms(method.getSimpleName()).toUpperCamel();
}
/** The GRPC streaming server type name for a given method. */
public String getStreamingServerName(Method method) {
return getNotImplementedString("SurfaceNamer.getStreamingServerName");
}
/** The type name of call options */
public String getCallSettingsTypeName(Interface service) {
return publicClassName(Name.upperCamel(service.getSimpleName(), "Settings"));
}
/** The name of the return type of the given grpc streaming method. */
public String getGrpcStreamingApiReturnTypeName(Method method, ModelTypeTable typeTable) {
return publicClassName(
Name.upperCamel(method.getOutputType().getMessageType().getSimpleName()));
}
/**
* The generic-aware response type name for the given type. For example, in Java, this will be the
* type used for RpcFuture<...>.
*/
public String getGenericAwareResponseTypeName(TypeRef outputType) {
return getNotImplementedString("SurfaceNamer.getGenericAwareResponseType");
}
/**
* Computes the nickname of the paged response type name for the given method and resources field,
* saves it in the given type table, and returns it.
*/
public String getAndSavePagedResponseTypeName(
Method method, ModelTypeTable typeTable, FieldConfig resourcesFieldConfig) {
return getNotImplementedString("SurfaceNamer.getAndSavePagedResponseTypeName");
}
/** The inner type name of the paged response type for the given method and resources field. */
public String getPagedResponseTypeInnerName(
Method method, ModelTypeTable typeTable, Field resourcesField) {
return getNotImplementedString("SurfaceNamer.getAndSavePagedResponseTypeInnerName");
}
/**
* Computes the nickname of the async response type name for the given resource type, saves it in
* the given type table, and returns it.
*/
public String getAndSaveAsyncPagedResponseTypeName(
Method method, ModelTypeTable typeTable, FieldConfig resourcesFieldConfig) {
return getNotImplementedString("SurfaceNamer.getAndSavePagedAsyncResponseTypeName");
}
/**
* Computes the nickname of the response type name for the given resource type, as used by the
* caller, saves it in the given type table, and returns it.
*/
public String getAndSaveCallerPagedResponseTypeName(
Method method, ModelTypeTable typeTable, FieldConfig resourcesFieldConfig) {
return getAndSavePagedResponseTypeName(method, typeTable, resourcesFieldConfig);
}
/**
* Computes the nickname of the response type name for the given resource type, as used by the
* caller, saves it in the given type table, and returns it.
*/
public String getAndSaveCallerAsyncPagedResponseTypeName(
Method method, ModelTypeTable typeTable, FieldConfig resourcesFieldConfig) {
return getAndSaveAsyncPagedResponseTypeName(method, typeTable, resourcesFieldConfig);
}
/** The class name of the generated resource type from the entity name. */
public String getAndSaveResourceTypeName(ModelTypeTable typeTable, FieldConfig fieldConfig) {
String resourceClassName =
publicClassName(getResourceTypeNameObject(fieldConfig.getResourceNameConfig()));
return typeTable.getAndSaveNicknameForTypedResourceName(fieldConfig, resourceClassName);
}
/** The class name of the generated resource type from the entity name. */
public String getAndSaveElementResourceTypeName(
ModelTypeTable typeTable, FieldConfig fieldConfig) {
String resourceClassName =
publicClassName(getResourceTypeNameObject(fieldConfig.getResourceNameConfig()));
return typeTable.getAndSaveNicknameForResourceNameElementType(fieldConfig, resourceClassName);
}
/** The fully qualified type name for the stub of a service. */
public String getFullyQualifiedStubType(Interface service) {
return getNotImplementedString("SurfaceNamer.getFullyQualifiedStubType");
}
/** The type name of the API callable class for this service method type. */
public String getApiCallableTypeName(ServiceMethodType serviceMethodType) {
return getNotImplementedString("SurfaceNamer.getApiCallableTypeName");
}
/** Return the type name used to discriminate oneof variants. */
public String getOneofVariantTypeName(OneofConfig oneof) {
return getNotImplementedString("SurfaceNamer.getOneofVariantTypeName");
}
/**
* The formatted name of a type used in long running operations, i.e. the operation payload and
* metadata,
*/
public String getLongRunningOperationTypeName(ModelTypeTable typeTable, TypeRef type) {
return getNotImplementedString("SurfaceNamer.getLongRunningOperationTypeName");
}
/** The type name for the gPRC request. */
public String getRequestTypeName(ModelTypeTable typeTable, TypeRef type) {
return getNotImplementedString("SurfaceNamer.getRequestTypeName");
}
/////////////////////////////////////// Resource names //////////////////////////////////////////
public String getResourceParameterName(ResourceNameConfig resourceNameConfig) {
return localVarName(getResourceTypeNameObject(resourceNameConfig));
}
public String getResourcePropertyName(ResourceNameConfig resourceNameConfig) {
return publicMethodName(getResourceTypeNameObject(resourceNameConfig));
}
public String getResourceEnumName(ResourceNameConfig resourceNameConfig) {
return getResourceTypeNameObject(resourceNameConfig).toUpperUnderscore().toUpperCase();
}
/** The parameter name of the IAM resource. */
public String getIamResourceParamName(Field field) {
return localVarName(Name.upperCamel(field.getParent().getSimpleName()));
}
/////////////////////////////////////// Path Template ////////////////////////////////////////
/**
* The name of a path template constant for the given collection, to be held in an API wrapper
* class.
*/
public String getPathTemplateName(
Interface service, SingleResourceNameConfig resourceNameConfig) {
return inittedConstantName(Name.from(resourceNameConfig.getEntityName(), "path", "template"));
}
/** The name of a getter function to get a particular path template for the given collection. */
public String getPathTemplateNameGetter(
Interface service, SingleResourceNameConfig resourceNameConfig) {
return publicMethodName(
Name.from("get", resourceNameConfig.getEntityName(), "name", "template"));
}
/** The name of the path template resource, in human format. */
public String getPathTemplateResourcePhraseName(SingleResourceNameConfig resourceNameConfig) {
return Name.from(resourceNameConfig.getEntityName()).toPhrase();
}
/** The function name to format the entity for the given collection. */
public String getFormatFunctionName(
Interface service, SingleResourceNameConfig resourceNameConfig) {
return staticFunctionName(Name.from("format", resourceNameConfig.getEntityName(), "name"));
}
/**
* The function name to parse a variable from the string representing the entity for the given
* collection.
*/
public String getParseFunctionName(String var, SingleResourceNameConfig resourceNameConfig) {
return staticFunctionName(
Name.from("parse", var, "from", resourceNameConfig.getEntityName(), "name"));
}
/** The entity name for the given collection. */
public String getEntityName(SingleResourceNameConfig resourceNameConfig) {
return localVarName(Name.from(resourceNameConfig.getEntityName()));
}
/** The parameter name for the entity for the given collection config. */
public String getEntityNameParamName(SingleResourceNameConfig resourceNameConfig) {
return localVarName(Name.from(resourceNameConfig.getEntityName(), "name"));
}
///////////////////////////////////// Constant & Keyword ////////////////////////////////////////
/** The name of the constant to hold the bundling descriptor for the given method. */
public String getBundlingDescriptorConstName(Method method) {
return inittedConstantName(Name.upperCamel(method.getSimpleName()).join("bundling_desc"));
}
/** The key to use in a dictionary for the given method. */
public String getMethodKey(Method method) {
return keyName(Name.upperCamel(method.getSimpleName()));
}
/** The key to use in a dictionary for the given field. */
public String getFieldKey(Field field) {
return keyName(Name.from(field.getSimpleName()));
}
/** The path to the client config for the given interface. */
public String getClientConfigPath(Interface service) {
return getNotImplementedString("SurfaceNamer.getClientConfigPath");
}
/**
* The type name of the method constant in the Grpc container class. This needs to match what Grpc
* generates for the particular language.
*/
public String getGrpcMethodConstant(Method method) {
return inittedConstantName(
Name.from("method").join(Name.upperCamelKeepUpperAcronyms(method.getSimpleName())));
}
/** The keyword controlling the visiblity, eg "public", "protected". */
public String getVisiblityKeyword(VisibilityConfig visibility) {
switch (visibility) {
case PUBLIC:
return "public";
case PACKAGE:
return "/* package-private */";
case PRIVATE:
return "private";
default:
throw new IllegalArgumentException("invalid visibility: " + visibility);
}
}
/** The public access modifier for the current language. */
public String getPublicAccessModifier() {
return "public";
}
/** The private access modifier for the current language. */
public String getPrivateAccessModifier() {
return "private";
}
/** The name of an RPC status code */
public String getStatusCodeName(Status.Code code) {
return privateMethodName(Name.upperUnderscore(code.toString()));
}
/** The name of the constant to hold the page streaming descriptor for the given method. */
public String getPageStreamingDescriptorConstName(Method method) {
return inittedConstantName(Name.upperCamel(method.getSimpleName()).join("page_str_desc"));
}
/** The name of the constant to hold the page streaming factory for the given method. */
public String getPagedListResponseFactoryConstName(Method method) {
return inittedConstantName(Name.upperCamel(method.getSimpleName()).join("page_str_fact"));
}
/** The string used to identify the method in the gRPC stub. Not all languages will use this. */
public String getGrpcStubCallString(Interface service, Method method) {
return getNotImplementedString("SurfaceNamer.getGrpcStubCallString");
}
///////////////////////////////////////// Imports ///////////////////////////////////////////////
/** Returns true if the request object param type for the given field should be imported. */
public boolean shouldImportRequestObjectParamType(Field field) {
return true;
}
/**
* Returns true if the request object param element type for the given field should be imported.
*/
public boolean shouldImportRequestObjectParamElementType(Field field) {
return true;
}
public String getServiceFileImportName(String filename) {
return getNotImplementedString("SurfaceNamer.getServiceFileImportName");
}
public String getProtoFileImportName(String filename) {
return getNotImplementedString("SurfaceNamer.getProtoFileImportName");
}
/** The name of the import for a specific grpcClient */
public String getGrpcClientImportName(Interface service) {
return getNotImplementedString("SurfaceNamer.getGrpcClientImportName");
}
/////////////////////////////////// Docs & Annotations //////////////////////////////////////////
/** The documentation name of a parameter for the given lower-case field name. */
public String getParamDocName(String var) {
return localVarName(Name.from(var));
}
/** Converts the given text to doc lines in the format of the current language. */
public List<String> getDocLines(String text) {
return CommonRenderingUtil.getDocLines(commentReformatter.reformat(text));
}
/** Provides the doc lines for the given proto element in the current language. */
public List<String> getDocLines(ProtoElement element) {
return getDocLines(DocumentationUtil.getScopedDescription(element));
}
/** Provides the doc lines for the given method element in the current language. */
public List<String> getDocLines(Method method, MethodConfig methodConfig) {
return getDocLines(method);
}
/** The doc lines that declare what exception(s) are thrown for an API method. */
public List<String> getThrowsDocLines() {
return new ArrayList<>();
}
/** The doc lines that describe the return value for an API method. */
public List<String> getReturnDocLines(
SurfaceTransformerContext context, MethodConfig methodConfig, Synchronicity synchronicity) {
return new ArrayList<>();
}
public String getReleaseAnnotation(ReleaseLevel releaseLevel) {
return getNotImplementedString("SurfaceNamer.getReleaseAnnotation");
}
//////////////////////////////////////// File names ////////////////////////////////////////////
/** The file name for an API service. */
public String getServiceFileName(Interface service) {
return getNotImplementedString("SurfaceNamer.getServiceFileName");
}
public String getSourceFilePath(String path, String publicClassName) {
return getNotImplementedString("SurfaceNamer.getSourceFilePath");
}
/** The language-specific file name for a proto file. */
public String getProtoFileName(ProtoFile file) {
return getNotImplementedString("SurfaceNamer.getProtoFileName");
}
////////////////////////////////////////// Test /////////////////////////////////////////////
public String getTestPackageName() {
return getNotImplementedString("SurfaceNamer.getTestPackageName");
}
/** The test case name for the given method. */
public String getTestCaseName(SymbolTable symbolTable, Method method) {
Name testCaseName = symbolTable.getNewSymbol(Name.upperCamel(method.getSimpleName(), "Test"));
return publicMethodName(testCaseName);
}
/** The exception test case name for the given method. */
public String getExceptionTestCaseName(SymbolTable symbolTable, Method method) {
Name testCaseName =
symbolTable.getNewSymbol(Name.upperCamel(method.getSimpleName(), "ExceptionTest"));
return publicMethodName(testCaseName);
}
/** The unit test class name for the given API service. */
public String getUnitTestClassName(Interface service) {
return publicClassName(Name.upperCamel(service.getSimpleName(), "Client", "Test"));
}
/** The smoke test class name for the given API service. */
public String getSmokeTestClassName(Interface service) {
return publicClassName(Name.upperCamel(service.getSimpleName(), "Smoke", "Test"));
}
/** The class name of the mock gRPC service for the given API service. */
public String getMockServiceClassName(Interface service) {
return publicClassName(Name.upperCamelKeepUpperAcronyms("Mock", service.getSimpleName()));
}
/** The class name of a variable to hold the mock gRPC service for the given API service. */
public String getMockServiceVarName(Interface service) {
return localVarName(Name.upperCamelKeepUpperAcronyms("Mock", service.getSimpleName()));
}
/** The class name of the mock gRPC service implementation for the given API service. */
public String getMockGrpcServiceImplName(Interface service) {
return publicClassName(
Name.upperCamelKeepUpperAcronyms("Mock", service.getSimpleName(), "Impl"));
}
/** Inject random value generator code to the given string. */
public String injectRandomStringGeneratorCode(String randomString) {
return getNotImplementedString("SurfaceNamer.getRandomStringValue");
}
////////////////////////////////////////// Examples ////////////////////////////////////////////
/** The name of the example package */
public String getExamplePackageName() {
return getNotImplementedString("SurfaceNamer.getExamplePackageName");
}
/** The local (unqualified) name of the example package */
public String getLocalExamplePackageName() {
return getNotImplementedString("SurfaceNamer.getLocalExamplePackageName");
}
/**
* The name of example of the constructor for the service client. The client is VKit generated,
* not GRPC.
*/
public String getApiWrapperClassConstructorExampleName(Interface interfaze) {
return getApiWrapperClassConstructorName(interfaze);
}
/** The name of the example for the paged callable variant. */
public String getPagedCallableMethodExampleName(Interface interfaze, Method method) {
return getPagedCallableMethodName(method);
}
/** The name of the example for the plain callable variant. */
public String getCallableMethodExampleName(Interface interfaze, Method method) {
return getCallableMethodName(method);
}
/** The name of the example for the operation callable variant of the given method. */
public String getOperationCallableMethodExampleName(Interface interfaze, Method method) {
return getOperationCallableMethodName(method);
}
/** The name of the example for the method. */
public String getApiMethodExampleName(Interface interfaze, Method method) {
return getApiMethodName(method, VisibilityConfig.PUBLIC);
}
/** The name of the example for the async variant of the given method. */
public String getAsyncApiMethodExampleName(Interface interfaze, Method method) {
return getAsyncApiMethodName(method, VisibilityConfig.PUBLIC);
}
/**
* The name of the example of the GRPC streaming surface method which can call the given API
* method.
*/
public String getGrpcStreamingApiMethodExampleName(Interface interfaze, Method method) {
return getGrpcStreamingApiMethodName(method, VisibilityConfig.PUBLIC);
}
/** The example name of the IAM resource getter function. */
public String getIamResourceGetterFunctionExampleName(Interface service, Field field) {
return getIamResourceGetterFunctionName(field);
}
/** The file name for the example of an API service. */
public String getExampleFileName(Interface service) {
return getNotImplementedString("SurfaceNamer.getExampleFileName");
}
////////////////////////////////////////// Utility /////////////////////////////////////////////
/** Indicates whether the specified method supports retry settings. */
public boolean methodHasRetrySettings(MethodConfig methodConfig) {
return true;
}
/** Indicates whether the specified method supports timeout settings. */
public boolean methodHasTimeoutSettings(MethodConfig methodConfig) {
return true;
}
private static String removeSuffix(String original, String suffix) {
if (original.endsWith(suffix)) {
original = original.substring(0, original.length() - suffix.length());
}
return original;
}
/** Make the given type name able to accept nulls, if it is a primitive type */
public String makePrimitiveTypeNullable(String typeName, TypeRef type) {
return typeName;
}
/** Is this type a primitive, according to target language. */
public boolean isPrimitive(TypeRef type) {
return type.isPrimitive();
}
/** The default value for an optional field, null if no default value required. */
public String getOptionalFieldDefaultValue(
FieldConfig fieldConfig, MethodTransformerContext context) {
return getNotImplementedString("SurfaceNamer.getOptionalFieldDefaultValue");
}
}
| 1 | 21,630 | Please reuse `getClientConfigPath`. | googleapis-gapic-generator | java |
@@ -19,7 +19,7 @@ const { eachAsyncSeries, ns } = require('../../../src/utils');
const chai = require('chai');
chai.use(require('chai-subset'));
-chai.use(require('../../functional/spec-runner/matcher').default);
+chai.use(require('../../tools/spec-runner/matcher').default);
const expect = chai.expect;
| 1 | 'use strict';
const fs = require('fs');
const path = require('path');
const { Topology } = require('../../../src/sdam/topology');
const { TopologyType } = require('../../../src/sdam/common');
const { Server } = require('../../../src/sdam/server');
const { ServerDescription } = require('../../../src/sdam/server_description');
const sdamEvents = require('../../../src/sdam/events');
const { parseOptions } = require('../../../src/connection_string');
const sinon = require('sinon');
const { EJSON } = require('bson');
const { ConnectionPool } = require('../../../src/cmap/connection_pool');
const {
MongoNetworkError,
MongoNetworkTimeoutError,
MongoServerError
} = require('../../../src/error');
const { eachAsyncSeries, ns } = require('../../../src/utils');
const chai = require('chai');
chai.use(require('chai-subset'));
chai.use(require('../../functional/spec-runner/matcher').default);
const expect = chai.expect;
const specDir = path.resolve(__dirname, '../../spec/server-discovery-and-monitoring');
function collectTests() {
const testTypes = fs
.readdirSync(specDir)
.filter(d => fs.statSync(path.resolve(specDir, d)).isDirectory())
.filter(d => d !== 'integration');
const tests = {};
testTypes.forEach(testType => {
tests[testType] = fs
.readdirSync(path.join(specDir, testType))
.filter(f => path.extname(f) === '.json')
.map(f => {
const result = EJSON.parse(fs.readFileSync(path.join(specDir, testType, f)), {
relaxed: true
});
result.type = testType;
return result;
});
});
return tests;
}
describe('Server Discovery and Monitoring (spec)', function () {
let serverConnect;
before(() => {
serverConnect = sinon.stub(Server.prototype, 'connect').callsFake(function () {
this.s.state = 'connected';
this.emit('connect');
});
});
after(() => {
serverConnect.restore();
});
// DRIVERS-1249 should add directConnection and then update spec, remove skip
const shouldSkip = desc => {
const descriptions = [
'Monitoring a standalone connection',
'Monitoring a standalone connection - suppress update events for equal server descriptions'
];
return descriptions.includes(desc);
};
const specTests = collectTests();
Object.keys(specTests).forEach(specTestName => {
describe(specTestName, () => {
specTests[specTestName].forEach(testData => {
const skip = shouldSkip(testData.description);
const type = skip ? it.skip : it;
type(testData.description, {
metadata: { requires: { topology: 'single' } },
test: function (done) {
executeSDAMTest(testData, done);
}
});
});
});
});
});
const OUTCOME_TRANSLATIONS = new Map();
OUTCOME_TRANSLATIONS.set('topologyType', 'type');
function translateOutcomeKey(key) {
if (OUTCOME_TRANSLATIONS.has(key)) {
return OUTCOME_TRANSLATIONS.get(key);
}
return key;
}
function convertOutcomeEvents(events) {
return events.map(event => {
const eventType = Object.keys(event)[0];
const args = [];
Object.keys(event[eventType]).forEach(key => {
let argument = event[eventType][key];
if (argument.servers) {
argument.servers = argument.servers.reduce((result, server) => {
result[server.address] = normalizeServerDescription(server);
return result;
}, {});
}
Object.keys(argument).forEach(key => {
if (OUTCOME_TRANSLATIONS.has(key)) {
argument[OUTCOME_TRANSLATIONS.get(key)] = argument[key];
delete argument[key];
}
});
args.push(argument);
});
// convert snake case to camelCase with capital first letter
let eventClass = eventType.replace(/_\w/g, c => c[1].toUpperCase());
eventClass = eventClass.charAt(0).toUpperCase() + eventClass.slice(1);
args.unshift(null);
const eventConstructor = sdamEvents[eventClass];
const eventInstance = new (Function.prototype.bind.apply(eventConstructor, args))();
return eventInstance;
});
}
// iterates through expectation building a path of keys that should not exist (null), and
// removes them from the expectation (NOTE: this mutates the expectation)
function findOmittedFields(expected) {
const result = [];
Object.keys(expected).forEach(key => {
if (expected[key] == null) {
result.push(key);
delete expected[key];
}
});
return result;
}
function normalizeServerDescription(serverDescription) {
if (serverDescription.type === 'PossiblePrimary') {
// Some single-threaded drivers care a lot about ordering potential primary
// servers, in order to speed up selection. We don't care, so we'll just mark
// it as `Unknown`.
serverDescription.type = 'Unknown';
}
return serverDescription;
}
function cloneMap(map) {
const result = Object.create(null);
for (let key of map.keys()) {
result[key] = JSON.parse(JSON.stringify(map.get(key)));
}
return result;
}
function cloneForCompare(event) {
const result = JSON.parse(JSON.stringify(event));
['previousDescription', 'newDescription'].forEach(key => {
if (event[key] != null && event[key].servers != null) {
result[key].servers = cloneMap(event[key].servers);
}
});
return result;
}
function executeSDAMTest(testData, testDone) {
const options = parseOptions(testData.uri);
// create the topology
const topology = new Topology(options.hosts, options);
// Each test will attempt to connect by doing server selection. We want to make the first
// call to `selectServers` call a fake, and then immediately restore the original behavior.
let topologySelectServers = sinon
.stub(Topology.prototype, 'selectServer')
.callsFake(function (selector, options, callback) {
topologySelectServers.restore();
const fakeServer = { s: { state: 'connected' }, removeListener: () => {} };
callback(undefined, fakeServer);
});
// listen for SDAM monitoring events
let events = [];
[
'serverOpening',
'serverClosed',
'serverDescriptionChanged',
'topologyOpening',
'topologyClosed',
'topologyDescriptionChanged',
'serverHeartbeatStarted',
'serverHeartbeatSucceeded',
'serverHeartbeatFailed'
].forEach(eventName => {
topology.on(eventName, event => events.push(event));
});
function done(err) {
topology.close(e => testDone(e || err));
}
const incompatibilityHandler = err => {
if (err.message.match(/but this version of the driver/)) return;
throw err;
};
// connect the topology
topology.connect(options, err => {
expect(err).to.not.exist;
eachAsyncSeries(
testData.phases,
(phase, cb) => {
function phaseDone() {
if (phase.outcome) {
assertOutcomeExpectations(topology, events, phase.outcome);
}
// remove error handler
topology.removeListener('error', incompatibilityHandler);
// reset the captured events for each phase
events = [];
cb();
}
const incompatibilityExpected = phase.outcome ? !phase.outcome.compatible : false;
if (incompatibilityExpected) {
topology.on('error', incompatibilityHandler);
}
// if (phase.description) {
// console.log(`[phase] ${phase.description}`);
// }
if (phase.responses) {
// simulate each hello response
phase.responses.forEach(response =>
topology.serverUpdateHandler(new ServerDescription(response[0], response[1]))
);
phaseDone();
} else if (phase.applicationErrors) {
eachAsyncSeries(
phase.applicationErrors,
(appError, phaseCb) => {
let withConnectionStub = sinon
.stub(ConnectionPool.prototype, 'withConnection')
.callsFake(withConnectionStubImpl(appError));
const server = topology.s.servers.get(appError.address);
server.command(ns('admin.$cmd'), { ping: 1 }, undefined, err => {
expect(err).to.exist;
withConnectionStub.restore();
phaseCb();
});
},
err => {
expect(err).to.not.exist;
phaseDone();
}
);
} else {
phaseDone();
}
},
err => {
expect(err).to.not.exist;
done();
}
);
});
}
function withConnectionStubImpl(appError) {
return function (conn, fn, callback) {
const connectionPool = this; // we are stubbing `withConnection` on the `ConnectionPool` class
const fakeConnection = {
generation:
typeof appError.generation === 'number' ? appError.generation : connectionPool.generation,
command: (ns, cmd, options, callback) => {
if (appError.type === 'network') {
callback(new MongoNetworkError('test generated'));
} else if (appError.type === 'timeout') {
callback(
new MongoNetworkTimeoutError('xxx timed out', {
beforeHandshake: appError.when === 'beforeHandshakeCompletes'
})
);
} else {
callback(new MongoServerError(appError.response));
}
}
};
fn(undefined, fakeConnection, (fnErr, result) => {
if (typeof callback === 'function') {
if (fnErr) {
callback(fnErr);
} else {
callback(undefined, result);
}
}
});
};
}
function assertOutcomeExpectations(topology, events, outcome) {
// then verify the resulting outcome
const description = topology.description;
Object.keys(outcome).forEach(key => {
const outcomeValue = outcome[key];
const translatedKey = translateOutcomeKey(key);
if (key === 'servers') {
expect(description).to.include.keys(translatedKey);
const expectedServers = outcomeValue;
const actualServers = description[translatedKey];
Object.keys(expectedServers).forEach(serverName => {
expect(actualServers).to.include.keys(serverName);
// TODO: clean all this up, always operate directly on `Server` not `ServerDescription`
if (expectedServers[serverName].pool) {
const expectedPool = expectedServers[serverName].pool;
delete expectedServers[serverName].pool;
const actualPoolGeneration = topology.s.servers.get(serverName).s.pool.generation;
expect(actualPoolGeneration).to.equal(expectedPool.generation);
}
const expectedServer = normalizeServerDescription(expectedServers[serverName]);
const omittedFields = findOmittedFields(expectedServer);
const actualServer = actualServers.get(serverName);
expect(actualServer).to.matchMongoSpec(expectedServer);
if (omittedFields.length) {
expect(actualServer).to.not.have.all.keys(omittedFields);
}
});
return;
}
// Load balancer mode has no monitor hello response and
// only expects address and compatible to be set in the
// server description.
if (description.type === TopologyType.LoadBalanced) {
if (key !== 'address' || key !== 'compatible') {
return;
}
}
if (key === 'events') {
const expectedEvents = convertOutcomeEvents(outcomeValue);
expect(events).to.have.length(expectedEvents.length);
for (let i = 0; i < events.length; ++i) {
const expectedEvent = expectedEvents[i];
const actualEvent = cloneForCompare(events[i]);
expect(actualEvent).to.matchMongoSpec(expectedEvent);
}
return;
}
if (key === 'compatible' || key === 'setName') {
if (outcomeValue == null) {
expect(topology.description[key]).to.not.exist;
} else {
expect(topology.description).property(key).to.equal(outcomeValue);
}
return;
}
expect(description).to.include.keys(translatedKey);
if (outcomeValue == null) {
expect(description[translatedKey]).to.not.exist;
} else {
expect(description).property(translatedKey).to.eql(outcomeValue, `(key="${translatedKey}")`);
}
});
}
| 1 | 21,923 | Should we use the shared chai-addon.js file here? Actually we should consolidate any `chai.use` calls, I see 20 instances, quick win? | mongodb-node-mongodb-native | js |
@@ -109,7 +109,7 @@ namespace Nethermind.Network.P2P.ProtocolHandlers
HandleHello(Deserialize<HelloMessage>(msg.Data));
foreach (Capability capability in
- _agreedCapabilities.GroupBy(c => c.ProtocolCode).Select(c => c.OrderBy(v => v.Version).Last()))
+ _agreedCapabilities.GroupBy(c => c.ProtocolCode).Select(c => c.OrderBy(v => v.Version).Last()).OrderBy(c => c.ProtocolCode))
{
if (Logger.IsTrace) Logger.Trace($"{Session} Starting protocolHandler for {capability.ProtocolCode} v{capability.Version} on {Session.RemotePort}");
SubprotocolRequested?.Invoke(this, new ProtocolEventArgs(capability.ProtocolCode, capability.Version)); | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Logging;
using Nethermind.Network.P2P.EventArg;
using Nethermind.Network.P2P.Messages;
using Nethermind.Network.Rlpx;
using Nethermind.Stats;
using Nethermind.Stats.Model;
namespace Nethermind.Network.P2P.ProtocolHandlers
{
public class P2PProtocolHandler : ProtocolHandlerBase, IPingSender, IP2PProtocolHandler
{
private TaskCompletionSource<Packet> _pongCompletionSource;
private readonly INodeStatsManager _nodeStatsManager;
private bool _sentHello;
private List<Capability> _agreedCapabilities { get; }
private List<Capability> _availableCapabilities { get; set; }
public P2PProtocolHandler(
ISession session,
PublicKey localNodeId,
INodeStatsManager nodeStatsManager,
IMessageSerializationService serializer,
ILogManager logManager) : base(session, nodeStatsManager, serializer, logManager)
{
_nodeStatsManager = nodeStatsManager ?? throw new ArgumentNullException(nameof(nodeStatsManager));
LocalNodeId = localNodeId;
ListenPort = session.LocalPort;
_agreedCapabilities = new List<Capability>();
_availableCapabilities = new List<Capability>();
}
public IReadOnlyList<Capability> AgreedCapabilities { get { return _agreedCapabilities; } }
public IReadOnlyList<Capability> AvailableCapabilities { get { return _availableCapabilities; } }
public int ListenPort { get; }
public PublicKey LocalNodeId { get; }
public string RemoteClientId { get; private set; }
public bool HasAvailableCapability(Capability capability) => _availableCapabilities.Contains(capability);
public bool HasAgreedCapability(Capability capability) => _agreedCapabilities.Contains(capability);
public void AddSupportedCapability(Capability capability)
{
if (SupportedCapabilities.Contains(capability))
{
return;
}
SupportedCapabilities.Add(capability);
}
public override event EventHandler<ProtocolInitializedEventArgs> ProtocolInitialized;
public override event EventHandler<ProtocolEventArgs> SubprotocolRequested;
public override void Init()
{
SendHello();
// We are expecting to receive Hello message anytime from the handshake completion,
// irrespective of sending Hello from our side
CheckProtocolInitTimeout().ContinueWith(x =>
{
if (x.IsFaulted && Logger.IsError)
{
Logger.Error("Error during p2pProtocol handler timeout logic", x.Exception);
}
});
}
private byte _protocolVersion = 5;
public override byte ProtocolVersion => _protocolVersion;
public override string ProtocolCode => Protocol.P2P;
public override int MessageIdSpaceSize => 0x10;
public override void HandleMessage(Packet msg)
{
switch (msg.PacketType)
{
case P2PMessageCode.Hello:
{
Metrics.HellosReceived++;
HandleHello(Deserialize<HelloMessage>(msg.Data));
foreach (Capability capability in
_agreedCapabilities.GroupBy(c => c.ProtocolCode).Select(c => c.OrderBy(v => v.Version).Last()))
{
if (Logger.IsTrace) Logger.Trace($"{Session} Starting protocolHandler for {capability.ProtocolCode} v{capability.Version} on {Session.RemotePort}");
SubprotocolRequested?.Invoke(this, new ProtocolEventArgs(capability.ProtocolCode, capability.Version));
}
break;
}
case P2PMessageCode.Disconnect:
{
DisconnectMessage disconnectMessage = Deserialize<DisconnectMessage>(msg.Data);
ReportIn(disconnectMessage);
if (Logger.IsTrace)
{
string reason = Enum.IsDefined(typeof(DisconnectReason), (byte) disconnectMessage.Reason)
? ((DisconnectReason) disconnectMessage.Reason).ToString()
: disconnectMessage.Reason.ToString();
Logger.Trace($"{Session} Received disconnect ({reason}) on {Session.RemotePort}");
}
Close(disconnectMessage.Reason);
break;
}
case P2PMessageCode.Ping:
{
if (Logger.IsTrace) Logger.Trace($"{Session} Received PING on {Session.RemotePort}");
HandlePing();
break;
}
case P2PMessageCode.Pong:
{
if (Logger.IsTrace) Logger.Trace($"{Session} Received PONG on {Session.RemotePort}");
HandlePong(msg);
break;
}
case P2PMessageCode.AddCapability:
{
AddCapabilityMessage message = Deserialize<AddCapabilityMessage>(msg.Data);
Capability capability = message.Capability;
_agreedCapabilities.Add(message.Capability);
SupportedCapabilities.Add(message.Capability);
if (Logger.IsTrace)
Logger.Trace($"{Session.RemoteNodeId} Starting handler for {capability} on {Session.RemotePort}");
SubprotocolRequested?.Invoke(this, new ProtocolEventArgs(capability.ProtocolCode, capability.Version));
break;
}
default:
Logger.Error($"{Session.RemoteNodeId} Unhandled packet type: {msg.PacketType}");
break;
}
}
private void HandleHello(HelloMessage hello)
{
ReportIn(hello);
bool isInbound = !_sentHello;
if (Logger.IsTrace) Logger.Trace($"{Session} P2P received hello.");
if (!hello.NodeId.Equals(Session.RemoteNodeId))
{
if (Logger.IsDebug)
Logger.Debug($"Inconsistent Node ID details - expected {Session.RemoteNodeId}, " +
$"received hello with {hello.NodeId} " +
$"on {(isInbound ? "IN connection" : "OUT connection")}");
// it does not really matter if there is mismatch - we do not use it anywhere
// throw new NodeDetailsMismatchException();
}
RemoteClientId = hello.ClientId;
Session.Node.ClientId = hello.ClientId;
if(Logger.IsTrace) Logger.Trace(!_sentHello
? $"{Session.RemoteNodeId} P2P initiating inbound {hello.Protocol}.{hello.P2PVersion} " +
$"on {hello.ListenPort} ({hello.ClientId})"
: $"{Session.RemoteNodeId} P2P initiating outbound {hello.Protocol}.{hello.P2PVersion} " +
$"on {hello.ListenPort} ({hello.ClientId})");
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-8.md
// Clients implementing a newer version simply send a packet with higher version and possibly additional list elements.
// * If such a packet is received by a node with lower version,
// it will blindly assume that the remote end is backwards-compatible and respond with the old handshake.
// * If the packet is received by a node with equal version,
// new features of the protocol can be used.
// * If the packet is received by a node with higher version,
// it can enable backwards-compatibility logic or drop the connection.
_protocolVersion = hello.P2PVersion;
List<Capability> capabilities = hello.Capabilities;
_availableCapabilities = new List<Capability>(capabilities);
foreach (Capability theirCapability in capabilities)
{
if (SupportedCapabilities.Contains(theirCapability))
{
if (Logger.IsTrace)
Logger.Trace($"{Session.RemoteNodeId} Agreed on {theirCapability.ProtocolCode} v{theirCapability.Version}");
_agreedCapabilities.Add(theirCapability);
}
else
{
if (Logger.IsTrace)
Logger.Trace($"{Session.RemoteNodeId} Capability not supported " +
$"{theirCapability.ProtocolCode} v{theirCapability.Version}");
}
}
if (_agreedCapabilities.Count == 0)
{
Session.InitiateDisconnect(
DisconnectReason.UselessPeer,
$"capabilities: {string.Join(", ", capabilities)}");
}
ReceivedProtocolInitMsg(hello);
P2PProtocolInitializedEventArgs eventArgs = new P2PProtocolInitializedEventArgs(this)
{
P2PVersion = ProtocolVersion,
ClientId = RemoteClientId,
Capabilities = capabilities,
ListenPort = hello.ListenPort
};
ProtocolInitialized?.Invoke(this, eventArgs);
}
[SuppressMessage("ReSharper", "HeuristicUnreachableCode")]
public async Task<bool> SendPing()
{
// ReSharper disable once AssignNullToNotNullAttribute
TaskCompletionSource<Packet> previousSource =
Interlocked.CompareExchange(ref _pongCompletionSource, new TaskCompletionSource<Packet>(), null);
// ReSharper disable once ConditionIsAlwaysTrueOrFalse
if (previousSource != null)
{
if (Logger.IsWarn) Logger.Warn($"Another ping request in process: {Session.Node:c}");
return true;
}
Task<Packet> pongTask = _pongCompletionSource.Task;
if (Logger.IsTrace) Logger.Trace($"{Session} P2P sending ping on {Session.RemotePort} ({RemoteClientId})");
Send(PingMessage.Instance);
_nodeStatsManager.ReportEvent(Session.Node, NodeStatsEventType.P2PPingOut);
Stopwatch stopwatch = Stopwatch.StartNew();
CancellationTokenSource delayCancellation = new CancellationTokenSource();
try
{
Task firstTask = await Task.WhenAny(pongTask, Task.Delay(Timeouts.P2PPing, delayCancellation.Token));
if (firstTask != pongTask)
{
_nodeStatsManager.ReportTransferSpeedEvent(
Session.Node,
TransferSpeedType.Latency,
(long) Timeouts.P2PPing.TotalMilliseconds);
return false;
}
long latency = stopwatch.ElapsedMilliseconds;
_nodeStatsManager.ReportTransferSpeedEvent(Session.Node, TransferSpeedType.Latency, latency);
return true;
}
finally
{
delayCancellation?.Cancel(); // do not remove ? -> ReSharper issue
_pongCompletionSource = null;
}
}
public override void DisconnectProtocol(DisconnectReason disconnectReason, string details)
{
if (Logger.IsTrace)
Logger.Trace($"Sending disconnect {disconnectReason} ({details}) to {Session.Node:s}");
DisconnectMessage message = new DisconnectMessage(disconnectReason);
Send(message);
if(NetworkDiagTracer.IsEnabled)
NetworkDiagTracer.ReportDisconnect(Session.Node.Address, $"Local {disconnectReason} {details}");
}
protected override TimeSpan InitTimeout => Timeouts.P2PHello;
public static readonly IEnumerable<Capability> DefaultCapabilities = new Capability[]
{
new Capability(Protocol.Eth, 62),
new Capability(Protocol.Eth, 63),
new Capability(Protocol.Eth, 64),
new Capability(Protocol.Eth, 65),
new Capability(Protocol.Eth, 66),
// new Capability(Protocol.Les, 3)
};
private readonly List<Capability> SupportedCapabilities = DefaultCapabilities.ToList();
private void SendHello()
{
if (Logger.IsTrace)
{
Logger.Trace($"{Session} {Name} sending hello with Client ID {ClientVersion.Description}, " +
$"protocol {Name}, listen port {ListenPort}");
}
HelloMessage helloMessage = new HelloMessage
{
Capabilities = SupportedCapabilities,
ClientId = ClientVersion.Description,
NodeId = LocalNodeId,
ListenPort = ListenPort,
P2PVersion = ProtocolVersion
};
_sentHello = true;
Send(helloMessage);
Metrics.HellosSent++;
}
private void HandlePing()
{
ReportIn("Ping");
if (Logger.IsTrace) Logger.Trace($"{Session} P2P responding to ping");
Send(PongMessage.Instance);
}
private void Close(int disconnectReasonId)
{
DisconnectReason disconnectReason = (DisconnectReason) disconnectReasonId;
if (disconnectReason != DisconnectReason.TooManyPeers &&
disconnectReason != DisconnectReason.Other &&
disconnectReason != DisconnectReason.DisconnectRequested)
{
if (Logger.IsDebug) Logger.Debug($"{Session} received disconnect [{disconnectReason}]");
}
else
{
if (Logger.IsTrace) Logger.Trace($"{Session} P2P received disconnect [{disconnectReason}]");
}
// Received disconnect message, triggering direct TCP disconnection
Session.MarkDisconnected(disconnectReason, DisconnectType.Remote, "message");
}
public override string Name => Protocol.P2P;
private void HandlePong(Packet msg)
{
ReportIn("Pong");
if (Logger.IsTrace) Logger.Trace($"{Session} sending P2P pong");
_nodeStatsManager.ReportEvent(Session.Node, NodeStatsEventType.P2PPingIn);
_pongCompletionSource?.TrySetResult(msg);
}
public override void Dispose()
{
}
}
}
| 1 | 26,501 | Maybe move _agreedCapabilities and _availableCapabilities from List to SortedSet ? We could order them by code and then by version already. | NethermindEth-nethermind | .cs |
@@ -3031,11 +3031,9 @@ func (o *consumer) setInitialPendingAndStart() {
// Here we are filtered.
dp := o.cfg.DeliverPolicy
if dp == DeliverLastPerSubject && o.hasSkipListPending() && o.sseq < o.lss.resume {
- if o.lss != nil {
- ss := mset.store.FilteredState(o.lss.resume, o.cfg.FilterSubject)
- o.sseq = o.lss.seqs[0]
- o.sgap = ss.Msgs + uint64(len(o.lss.seqs))
- }
+ ss := mset.store.FilteredState(o.lss.resume+1, o.cfg.FilterSubject)
+ o.sseq = o.lss.seqs[0]
+ o.sgap = ss.Msgs + uint64(len(o.lss.seqs))
} else if ss := mset.store.FilteredState(o.sseq, o.cfg.FilterSubject); ss.Msgs > 0 {
o.sgap = ss.Msgs
// See if we should update our starting sequence. | 1 | // Copyright 2019-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math/rand"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/nuid"
"golang.org/x/time/rate"
)
type ConsumerInfo struct {
Stream string `json:"stream_name"`
Name string `json:"name"`
Created time.Time `json:"created"`
Config *ConsumerConfig `json:"config,omitempty"`
Delivered SequencePair `json:"delivered"`
AckFloor SequencePair `json:"ack_floor"`
NumAckPending int `json:"num_ack_pending"`
NumRedelivered int `json:"num_redelivered"`
NumWaiting int `json:"num_waiting"`
NumPending uint64 `json:"num_pending"`
Cluster *ClusterInfo `json:"cluster,omitempty"`
}
type ConsumerConfig struct {
Durable string `json:"durable_name,omitempty"`
Description string `json:"description,omitempty"`
DeliverSubject string `json:"deliver_subject,omitempty"`
DeliverPolicy DeliverPolicy `json:"deliver_policy"`
OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
OptStartTime *time.Time `json:"opt_start_time,omitempty"`
AckPolicy AckPolicy `json:"ack_policy"`
AckWait time.Duration `json:"ack_wait,omitempty"`
MaxDeliver int `json:"max_deliver,omitempty"`
FilterSubject string `json:"filter_subject,omitempty"`
ReplayPolicy ReplayPolicy `json:"replay_policy"`
RateLimit uint64 `json:"rate_limit_bps,omitempty"` // Bits per sec
SampleFrequency string `json:"sample_freq,omitempty"`
MaxWaiting int `json:"max_waiting,omitempty"`
MaxAckPending int `json:"max_ack_pending,omitempty"`
Heartbeat time.Duration `json:"idle_heartbeat,omitempty"`
FlowControl bool `json:"flow_control,omitempty"`
// Don't add to general clients.
Direct bool `json:"direct,omitempty"`
}
type CreateConsumerRequest struct {
Stream string `json:"stream_name"`
Config ConsumerConfig `json:"config"`
}
// DeliverPolicy determines how the consumer should select the first message to deliver.
type DeliverPolicy int
const (
// DeliverAll will be the default so can be omitted from the request.
DeliverAll DeliverPolicy = iota
// DeliverLast will start the consumer with the last sequence received.
DeliverLast
// DeliverNew will only deliver new messages that are sent after the consumer is created.
DeliverNew
// DeliverByStartSequence will look for a defined starting sequence to start.
DeliverByStartSequence
// DeliverByStartTime will select the first messsage with a timestamp >= to StartTime.
DeliverByStartTime
// DeliverLastPerSubject will start the consumer with the last message for all subjects received.
DeliverLastPerSubject
)
func (dp DeliverPolicy) String() string {
switch dp {
case DeliverAll:
return "all"
case DeliverLast:
return "last"
case DeliverNew:
return "new"
case DeliverByStartSequence:
return "by_start_sequence"
case DeliverByStartTime:
return "by_start_time"
case DeliverLastPerSubject:
return "last_per_subject"
default:
return "undefined"
}
}
// AckPolicy determines how the consumer should acknowledge delivered messages.
type AckPolicy int
const (
// AckNone requires no acks for delivered messages.
AckNone AckPolicy = iota
// AckAll when acking a sequence number, this implicitly acks all sequences below this one as well.
AckAll
// AckExplicit requires ack or nack for all messages.
AckExplicit
)
func (a AckPolicy) String() string {
switch a {
case AckNone:
return "none"
case AckAll:
return "all"
default:
return "explicit"
}
}
// ReplayPolicy determines how the consumer should replay messages it already has queued in the stream.
type ReplayPolicy int
const (
// ReplayInstant will replay messages as fast as possible.
ReplayInstant ReplayPolicy = iota
// ReplayOriginal will maintain the same timing as the messages were received.
ReplayOriginal
)
func (r ReplayPolicy) String() string {
switch r {
case ReplayInstant:
return "instant"
default:
return "original"
}
}
// OK
const OK = "+OK"
// Ack responses. Note that a nil or no payload is same as AckAck
var (
// Ack
AckAck = []byte("+ACK") // nil or no payload to ack subject also means ACK
AckOK = []byte(OK) // deprecated but +OK meant ack as well.
// Nack
AckNak = []byte("-NAK")
// Progress indicator
AckProgress = []byte("+WPI")
// Ack + Deliver the next message(s).
AckNext = []byte("+NXT")
// Terminate delivery of the message.
AckTerm = []byte("+TERM")
)
// Consumer is a jetstream consumer.
type consumer struct {
mu sync.RWMutex
js *jetStream
mset *stream
acc *Account
srv *Server
client *client
sysc *client
sid int
name string
stream string
sseq uint64
dseq uint64
adflr uint64
asflr uint64
sgap uint64
dsubj string
lss *lastSeqSkipList
rlimit *rate.Limiter
reqSub *subscription
ackSub *subscription
ackReplyT string
ackSubj string
nextMsgSubj string
maxp int
pblimit int
maxpb int
pbytes int
fcsz int
fcid string
fcSub *subscription
outq *jsOutQ
pending map[uint64]*Pending
ptmr *time.Timer
rdq []uint64
rdqi map[uint64]struct{}
rdc map[uint64]uint64
maxdc uint64
waiting *waitQueue
cfg ConsumerConfig
store ConsumerStore
active bool
replay bool
filterWC bool
dtmr *time.Timer
gwdtmr *time.Timer
dthresh time.Duration
mch chan struct{}
qch chan struct{}
inch chan bool
sfreq int32
ackEventT string
deliveryExcEventT string
created time.Time
closed bool
// Clustered.
ca *consumerAssignment
node RaftNode
infoSub *subscription
lqsent time.Time
// R>1 proposals
pch chan struct{}
phead *proposal
ptail *proposal
}
type proposal struct {
data []byte
next *proposal
}
const (
// JsAckWaitDefault is the default AckWait, only applicable on explicit ack policy consumers.
JsAckWaitDefault = 30 * time.Second
// JsDeleteWaitTimeDefault is the default amount of time we will wait for non-durable
// consumers to be in an inactive state before deleting them.
JsDeleteWaitTimeDefault = 5 * time.Second
// JsFlowControlMaxPending specifies default pending bytes during flow control that can be
// outstanding.
JsFlowControlMaxPending = 1 * 1024 * 1024
// JsDefaultMaxAckPending is set for consumers with explicit ack that do not set the max ack pending.
JsDefaultMaxAckPending = 20_000
)
func (mset *stream) addConsumer(config *ConsumerConfig) (*consumer, error) {
return mset.addConsumerWithAssignment(config, _EMPTY_, nil)
}
func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname string, ca *consumerAssignment) (*consumer, error) {
mset.mu.RLock()
s, jsa := mset.srv, mset.jsa
mset.mu.RUnlock()
// If we do not have the consumer currently assigned to us in cluster mode we will proceed but warn.
// This can happen on startup with restored state where on meta replay we still do not have
// the assignment. Running in single server mode this always returns true.
if oname != _EMPTY_ && !jsa.consumerAssigned(mset.name(), oname) {
s.Debugf("Consumer %q > %q does not seem to be assigned to this server", mset.name(), oname)
}
if config == nil {
return nil, ApiErrors[JSConsumerConfigRequiredErr]
}
if len(config.Description) > JSMaxDescriptionLen {
return nil, ApiErrors[JSConsumerDescriptionTooLongErrF].NewT("{max}", JSMaxDescriptionLen)
}
var err error
// For now expect a literal subject if its not empty. Empty means work queue mode (pull mode).
if config.DeliverSubject != _EMPTY_ {
if !subjectIsLiteral(config.DeliverSubject) {
return nil, ApiErrors[JSConsumerDeliverToWildcardsErr]
}
if mset.deliveryFormsCycle(config.DeliverSubject) {
return nil, ApiErrors[JSConsumerDeliverCycleErr]
}
if config.MaxWaiting != 0 {
return nil, ApiErrors[JSConsumerDeliverToWildcardsErr]
}
if config.MaxAckPending > 0 && config.AckPolicy == AckNone {
return nil, ApiErrors[JSConsumerMaxPendingAckPolicyRequiredErr]
}
if config.Heartbeat > 0 && config.Heartbeat < 100*time.Millisecond {
return nil, ApiErrors[JSConsumerSmallHeartbeatErr]
}
} else {
// Pull mode / work queue mode require explicit ack.
if config.AckPolicy != AckExplicit {
return nil, ApiErrors[JSConsumerPullRequiresAckErr]
}
// They are also required to be durable since otherwise we will not know when to
// clean them up.
if config.Durable == _EMPTY_ {
return nil, ApiErrors[JSConsumerPullNotDurableErr]
}
if config.RateLimit > 0 {
return nil, ApiErrors[JSConsumerPullWithRateLimitErr]
}
if config.MaxWaiting < 0 {
return nil, ApiErrors[JSConsumerMaxWaitingNegativeErr]
}
// Set to default if not specified.
if config.MaxWaiting == 0 {
config.MaxWaiting = JSWaitQueueDefaultMax
}
if config.Heartbeat > 0 {
return nil, ApiErrors[JSConsumerHBRequiresPushErr]
}
if config.FlowControl {
return nil, ApiErrors[JSConsumerFCRequiresPushErr]
}
}
// Direct need to be non-mapped ephemerals.
if config.Direct {
if config.DeliverSubject == _EMPTY_ {
return nil, ApiErrors[JSConsumerDirectRequiresPushErr]
}
if isDurableConsumer(config) {
return nil, ApiErrors[JSConsumerDirectRequiresEphemeralErr]
}
if ca != nil {
return nil, ApiErrors[JSConsumerOnMappedErr]
}
}
// Setup proper default for ack wait if we are in explicit ack mode.
if config.AckWait == 0 && (config.AckPolicy == AckExplicit || config.AckPolicy == AckAll) {
config.AckWait = JsAckWaitDefault
}
// Setup default of -1, meaning no limit for MaxDeliver.
if config.MaxDeliver == 0 {
config.MaxDeliver = -1
}
// Set proper default for max ack pending if we are ack explicit and none has been set.
if config.AckPolicy == AckExplicit && config.MaxAckPending == 0 {
config.MaxAckPending = JsDefaultMaxAckPending
}
// As best we can make sure the filtered subject is valid.
if config.FilterSubject != _EMPTY_ {
subjects, hasExt := mset.allSubjects()
if !validFilteredSubject(config.FilterSubject, subjects) && !hasExt {
return nil, ApiErrors[JSConsumerFilterNotSubsetErr]
}
}
// Check on start position conflicts.
switch config.DeliverPolicy {
case DeliverAll:
if config.OptStartSeq > 0 {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver all, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver all, but optional start time is also set")
}
case DeliverLast:
if config.OptStartSeq > 0 {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver last, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver last, but optional start time is also set")
}
case DeliverLastPerSubject:
if config.OptStartSeq > 0 {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver last per subject, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver last per subject, but optional start time is also set")
}
badConfig := config.FilterSubject == _EMPTY_
if !badConfig {
subjects, ext := mset.allSubjects()
if len(subjects) == 1 && !ext && subjects[0] == config.FilterSubject && subjectIsLiteral(subjects[0]) {
badConfig = true
}
}
if badConfig {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver last per subject, but filter subject is not set")
}
case DeliverNew:
if config.OptStartSeq > 0 {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver new, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver new, but optional start time is also set")
}
case DeliverByStartSequence:
if config.OptStartSeq == 0 {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver by start sequence, but optional start sequence is not set")
}
if config.OptStartTime != nil {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver by start sequence, but optional start time is also set")
}
case DeliverByStartTime:
if config.OptStartTime == nil {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver by start time, but optional start time is not set")
}
if config.OptStartSeq != 0 {
return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}",
"consumer delivery policy is deliver by start time, but optional start sequence is also set")
}
}
sampleFreq := 0
if config.SampleFrequency != _EMPTY_ {
s := strings.TrimSuffix(config.SampleFrequency, "%")
sampleFreq, err = strconv.Atoi(s)
if err != nil {
return nil, ApiErrors[JSConsumerInvalidSamplingErrF].NewT("{err}", err)
}
}
// Grab the client, account and server reference.
c := mset.client
if c == nil {
return nil, ApiErrors[JSStreamInvalidErr]
}
c.mu.Lock()
s, a := c.srv, c.acc
c.mu.Unlock()
// Hold mset lock here.
mset.mu.Lock()
// If this one is durable and already exists, we let that be ok as long as the configs match.
if isDurableConsumer(config) {
if eo, ok := mset.consumers[config.Durable]; ok {
mset.mu.Unlock()
ocfg := eo.config()
if reflect.DeepEqual(&ocfg, config) {
return eo, nil
} else {
// If we are a push mode and not active and the only difference
// is deliver subject then update and return.
if configsEqualSansDelivery(ocfg, *config) && eo.hasNoLocalInterest() {
eo.updateDeliverSubject(config.DeliverSubject)
return eo, nil
} else {
return nil, ApiErrors[JSConsumerNameExistErr]
}
}
}
}
// Check for any limits, if the config for the consumer sets a limit we check against that
// but if not we use the value from account limits, if account limits is more restrictive
// than stream config we prefer the account limits to handle cases where account limits are
// updated during the lifecycle of the stream
maxc := mset.cfg.MaxConsumers
if maxc <= 0 || (mset.jsa.limits.MaxConsumers > 0 && mset.jsa.limits.MaxConsumers < maxc) {
maxc = mset.jsa.limits.MaxConsumers
}
if maxc > 0 && len(mset.consumers) >= maxc {
mset.mu.Unlock()
return nil, ApiErrors[JSMaximumConsumersLimitErr]
}
// Check on stream type conflicts with WorkQueues.
if mset.cfg.Retention == WorkQueuePolicy && !config.Direct {
// Force explicit acks here.
if config.AckPolicy != AckExplicit {
mset.mu.Unlock()
return nil, ApiErrors[JSConsumerWQRequiresExplicitAckErr]
}
if len(mset.consumers) > 0 {
if config.FilterSubject == _EMPTY_ {
mset.mu.Unlock()
return nil, ApiErrors[JSConsumerWQMultipleUnfilteredErr]
} else if !mset.partitionUnique(config.FilterSubject) {
// We have a partition but it is not unique amongst the others.
mset.mu.Unlock()
return nil, ApiErrors[JSConsumerWQConsumerNotUniqueErr]
}
}
if config.DeliverPolicy != DeliverAll {
mset.mu.Unlock()
return nil, ApiErrors[JSConsumerWQConsumerNotDeliverAllErr]
}
}
// Set name, which will be durable name if set, otherwise we create one at random.
o := &consumer{
mset: mset,
js: s.getJetStream(),
acc: a,
srv: s,
client: s.createInternalJetStreamClient(),
sysc: s.createInternalJetStreamClient(),
cfg: *config,
dsubj: config.DeliverSubject,
outq: mset.outq,
active: true,
qch: make(chan struct{}),
mch: make(chan struct{}, 1),
sfreq: int32(sampleFreq),
maxdc: uint64(config.MaxDeliver),
maxp: config.MaxAckPending,
created: time.Now().UTC(),
}
// Bind internal client to the user account.
o.client.registerWithAccount(a)
// Bind to the system account.
o.sysc.registerWithAccount(s.SystemAccount())
if isDurableConsumer(config) {
if len(config.Durable) > JSMaxNameLen {
mset.mu.Unlock()
o.deleteWithoutAdvisory()
return nil, ApiErrors[JSConsumerNameTooLongErrF].NewT("{max}", JSMaxNameLen)
}
o.name = config.Durable
if o.isPullMode() {
o.waiting = newWaitQueue(config.MaxWaiting)
}
} else if oname != _EMPTY_ {
o.name = oname
} else {
for {
o.name = createConsumerName()
if _, ok := mset.consumers[o.name]; !ok {
break
}
}
}
// Check if we have a rate limit set.
if config.RateLimit != 0 {
// TODO(dlc) - Make sane values or error if not sane?
// We are configured in bits per sec so adjust to bytes.
rl := rate.Limit(config.RateLimit / 8)
// Burst should be set to maximum msg size for this account, etc.
var burst int
if mset.cfg.MaxMsgSize > 0 {
burst = int(mset.cfg.MaxMsgSize)
} else if mset.jsa.account.limits.mpay > 0 {
burst = int(mset.jsa.account.limits.mpay)
} else {
s := mset.jsa.account.srv
burst = int(s.getOpts().MaxPayload)
}
o.rlimit = rate.NewLimiter(rl, burst)
}
// Check if we have filtered subject that is a wildcard.
if config.FilterSubject != _EMPTY_ && subjectHasWildcard(config.FilterSubject) {
o.filterWC = true
}
// already under lock, mset.Name() would deadlock
o.stream = mset.cfg.Name
o.ackEventT = JSMetricConsumerAckPre + "." + o.stream + "." + o.name
o.deliveryExcEventT = JSAdvisoryConsumerMaxDeliveryExceedPre + "." + o.stream + "." + o.name
if !isValidName(o.name) {
mset.mu.Unlock()
o.deleteWithoutAdvisory()
return nil, ApiErrors[JSConsumerBadDurableNameErr]
}
// Select starting sequence number
o.selectStartingSeqNo()
if !config.Direct {
store, err := mset.store.ConsumerStore(o.name, config)
if err != nil {
mset.mu.Unlock()
o.deleteWithoutAdvisory()
return nil, ApiErrors[JSConsumerStoreFailedErrF].NewT("{err}", err)
}
o.store = store
}
// Now register with mset and create the ack subscription.
// Check if we already have this one registered.
if eo, ok := mset.consumers[o.name]; ok {
mset.mu.Unlock()
if !o.isDurable() || !o.isPushMode() {
o.name = _EMPTY_ // Prevent removal since same name.
o.deleteWithoutAdvisory()
return nil, ApiErrors[JSConsumerNameExistErr]
}
// If we are here we have already registered this durable. If it is still active that is an error.
if eo.isActive() {
o.name = _EMPTY_ // Prevent removal since same name.
o.deleteWithoutAdvisory()
return nil, ApiErrors[JSConsumerExistingActiveErr]
}
// Since we are here this means we have a potentially new durable so we should update here.
// Check that configs are the same.
if !configsEqualSansDelivery(o.cfg, eo.cfg) {
o.name = _EMPTY_ // Prevent removal since same name.
o.deleteWithoutAdvisory()
return nil, ApiErrors[JSConsumerReplacementWithDifferentNameErr]
}
// Once we are here we have a replacement push-based durable.
eo.updateDeliverSubject(o.cfg.DeliverSubject)
return eo, nil
}
// Set up the ack subscription for this consumer. Will use wildcard for all acks.
// We will remember the template to generate replies with sequence numbers and use
// that to scanf them back in.
mn := mset.cfg.Name
pre := fmt.Sprintf(jsAckT, mn, o.name)
o.ackReplyT = fmt.Sprintf("%s.%%d.%%d.%%d.%%d.%%d", pre)
o.ackSubj = fmt.Sprintf("%s.*.*.*.*.*", pre)
o.nextMsgSubj = fmt.Sprintf(JSApiRequestNextT, mn, o.name)
if o.isPushMode() {
o.dthresh = JsDeleteWaitTimeDefault
if !o.isDurable() {
// Check if we are not durable that the delivery subject has interest.
// Check in place here for interest. Will setup properly in setLeader.
r := o.acc.sl.Match(o.cfg.DeliverSubject)
if !o.hasDeliveryInterest(len(r.psubs)+len(r.qsubs) > 0) {
// Let the interest come to us eventually, but setup delete timer.
o.updateDeliveryInterest(false)
}
}
}
// Set our ca.
if ca != nil {
o.setConsumerAssignment(ca)
}
mset.setConsumer(o)
mset.mu.Unlock()
if config.Direct || (!s.JetStreamIsClustered() && s.standAloneMode()) {
o.setLeader(true)
}
// This is always true in single server mode.
if o.isLeader() {
// Send advisory.
var suppress bool
if !s.standAloneMode() && ca == nil {
suppress = true
} else if ca != nil {
suppress = ca.responded
}
if !suppress {
o.sendCreateAdvisory()
}
}
return o, nil
}
func (o *consumer) consumerAssignment() *consumerAssignment {
o.mu.RLock()
defer o.mu.RUnlock()
return o.ca
}
func (o *consumer) setConsumerAssignment(ca *consumerAssignment) {
o.mu.Lock()
defer o.mu.Unlock()
o.ca = ca
// Set our node.
if ca != nil {
o.node = ca.Group.node
}
}
// Lock should be held.
func (o *consumer) isLeader() bool {
if o.node != nil {
return o.node.Leader()
}
return true
}
func (o *consumer) setLeader(isLeader bool) {
o.mu.RLock()
mset := o.mset
isRunning := o.ackSub != nil
o.mu.RUnlock()
// If we are here we have a change in leader status.
if isLeader {
if mset == nil || isRunning {
return
}
mset.mu.RLock()
s, jsa, stream := mset.srv, mset.jsa, mset.cfg.Name
mset.mu.RUnlock()
o.mu.Lock()
// Restore our saved state. During non-leader status we just update our underlying store.
o.readStoredState()
// Do info sub.
if o.infoSub == nil && jsa != nil {
isubj := fmt.Sprintf(clusterConsumerInfoT, jsa.acc(), stream, o.name)
// Note below the way we subscribe here is so that we can send requests to ourselves.
o.infoSub, _ = s.systemSubscribe(isubj, _EMPTY_, false, o.sysc, o.handleClusterConsumerInfoRequest)
}
var err error
if o.ackSub, err = o.subscribeInternal(o.ackSubj, o.processAck); err != nil {
o.mu.Unlock()
o.deleteWithoutAdvisory()
return
}
// Setup the internal sub for next message requests regardless.
// Will error if wrong mode to provide feedback to users.
if o.reqSub, err = o.subscribeInternal(o.nextMsgSubj, o.processNextMsgReq); err != nil {
o.mu.Unlock()
o.deleteWithoutAdvisory()
return
}
// Check on flow control settings.
if o.cfg.FlowControl {
o.setMaxPendingBytes(JsFlowControlMaxPending)
fcsubj := fmt.Sprintf(jsFlowControl, stream, o.name)
if o.fcSub, err = o.subscribeInternal(fcsubj, o.processFlowControl); err != nil {
o.mu.Unlock()
o.deleteWithoutAdvisory()
return
}
}
// Setup initial pending and proper start sequence.
o.setInitialPendingAndStart()
// If push mode, register for notifications on interest.
if o.isPushMode() {
o.inch = make(chan bool, 8)
o.acc.sl.RegisterNotification(o.cfg.DeliverSubject, o.inch)
if o.active = <-o.inch; !o.active {
// Check gateways in case they are enabled.
if s.gateway.enabled {
o.active = s.hasGatewayInterest(o.acc.Name, o.cfg.DeliverSubject)
stopAndClearTimer(&o.gwdtmr)
o.gwdtmr = time.AfterFunc(time.Second, func() { o.watchGWinterest() })
}
}
}
// If we are not in ReplayInstant mode mark us as in replay state until resolved.
if o.cfg.ReplayPolicy != ReplayInstant {
o.replay = true
}
// Recreate quit channel.
o.qch = make(chan struct{})
qch := o.qch
node := o.node
if node != nil && o.pch == nil {
o.pch = make(chan struct{}, 1)
}
o.mu.Unlock()
// Now start up Go routine to deliver msgs.
go o.loopAndGatherMsgs(qch)
// If we are R>1 spin up our proposal loop.
if node != nil {
go o.loopAndForwardProposals(qch)
}
} else {
// Shutdown the go routines and the subscriptions.
o.mu.Lock()
o.unsubscribe(o.ackSub)
o.unsubscribe(o.reqSub)
o.unsubscribe(o.fcSub)
o.ackSub = nil
o.reqSub = nil
o.fcSub = nil
if o.infoSub != nil {
o.srv.sysUnsubscribe(o.infoSub)
o.infoSub = nil
}
if o.qch != nil {
close(o.qch)
o.qch = nil
}
o.mu.Unlock()
}
}
func (o *consumer) handleClusterConsumerInfoRequest(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
o.mu.RLock()
sysc := o.sysc
o.mu.RUnlock()
sysc.sendInternalMsg(reply, _EMPTY_, nil, o.info())
}
// Lock should be held.
func (o *consumer) subscribeInternal(subject string, cb msgHandler) (*subscription, error) {
c := o.client
if c == nil {
return nil, fmt.Errorf("invalid consumer")
}
if !c.srv.eventsEnabled() {
return nil, ErrNoSysAccount
}
if cb == nil {
return nil, fmt.Errorf("undefined message handler")
}
o.sid++
// Now create the subscription
return c.processSub([]byte(subject), nil, []byte(strconv.Itoa(o.sid)), cb, false)
}
// Unsubscribe from our subscription.
// Lock should be held.
func (o *consumer) unsubscribe(sub *subscription) {
if sub == nil || o.client == nil {
return
}
o.client.processUnsub(sub.sid)
}
// We need to make sure we protect access to the outq.
// Do all advisory sends here.
func (o *consumer) sendAdvisory(subj string, msg []byte) {
o.outq.sendMsg(subj, msg)
}
func (o *consumer) sendDeleteAdvisoryLocked() {
e := JSConsumerActionAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerActionAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
Action: DeleteEvent,
Domain: o.srv.getOpts().JetStreamDomain,
}
j, err := json.Marshal(e)
if err != nil {
return
}
subj := JSAdvisoryConsumerDeletedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
func (o *consumer) sendCreateAdvisory() {
o.mu.Lock()
defer o.mu.Unlock()
e := JSConsumerActionAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerActionAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
Action: CreateEvent,
Domain: o.srv.getOpts().JetStreamDomain,
}
j, err := json.Marshal(e)
if err != nil {
return
}
subj := JSAdvisoryConsumerCreatedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
// Created returns created time.
func (o *consumer) createdTime() time.Time {
o.mu.Lock()
created := o.created
o.mu.Unlock()
return created
}
// Internal to allow creation time to be restored.
func (o *consumer) setCreatedTime(created time.Time) {
o.mu.Lock()
o.created = created
o.mu.Unlock()
}
// This will check for extended interest in a subject. If we have local interest we just return
// that, but in the absence of local interest and presence of gateways or service imports we need
// to check those as well.
func (o *consumer) hasDeliveryInterest(localInterest bool) bool {
o.mu.Lock()
mset := o.mset
if mset == nil {
o.mu.Unlock()
return false
}
acc := o.acc
deliver := o.cfg.DeliverSubject
o.mu.Unlock()
if localInterest {
return true
}
// If we are here check gateways.
if s := acc.srv; s != nil && s.hasGatewayInterest(acc.Name, deliver) {
return true
}
return false
}
func (s *Server) hasGatewayInterest(account, subject string) bool {
gw := s.gateway
if !gw.enabled {
return false
}
gw.RLock()
defer gw.RUnlock()
for _, gwc := range gw.outo {
psi, qr := gwc.gatewayInterest(account, subject)
if psi || qr != nil {
return true
}
}
return false
}
// This processes an update to the local interest for a deliver subject.
func (o *consumer) updateDeliveryInterest(localInterest bool) bool {
interest := o.hasDeliveryInterest(localInterest)
o.mu.Lock()
defer o.mu.Unlock()
mset := o.mset
if mset == nil || o.isPullMode() {
return false
}
if interest && !o.active {
o.signalNewMessages()
}
o.active = interest
// If the delete timer has already been set do not clear here and return.
if o.dtmr != nil && !o.isDurable() && !interest {
return true
}
// Stop and clear the delete timer always.
stopAndClearTimer(&o.dtmr)
// If we do not have interest anymore and we are not durable start
// a timer to delete us. We wait for a bit in case of server reconnect.
if !o.isDurable() && !interest {
o.dtmr = time.AfterFunc(o.dthresh, func() { o.deleteNotActive() })
return true
}
return false
}
func (o *consumer) deleteNotActive() {
// Need to check again if there is not an interest now that the timer fires.
if !o.hasNoLocalInterest() {
return
}
o.mu.RLock()
if o.mset == nil {
o.mu.RUnlock()
return
}
s, js := o.mset.srv, o.mset.srv.js
acc, stream, name := o.acc.Name, o.stream, o.name
o.mu.RUnlock()
// If we are clustered, check if we still have this consumer assigned.
// If we do forward a proposal to delete ourselves to the metacontroller leader.
if s.JetStreamIsClustered() {
js.mu.RLock()
ca := js.consumerAssignment(acc, stream, name)
cc := js.cluster
js.mu.RUnlock()
if ca != nil && cc != nil {
cca := *ca
cca.Reply = _EMPTY_
meta, removeEntry := cc.meta, encodeDeleteConsumerAssignment(&cca)
meta.ForwardProposal(removeEntry)
// Check to make sure we went away.
// Don't think this needs to be a monitored go routine.
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
js.mu.RLock()
ca := js.consumerAssignment(acc, stream, name)
js.mu.RUnlock()
if ca != nil {
s.Warnf("Consumer assignment not cleaned up, retrying")
meta.ForwardProposal(removeEntry)
} else {
return
}
}
}()
}
}
// We will delete here regardless.
o.delete()
}
func (o *consumer) watchGWinterest() {
pa := o.isActive()
// If there is no local interest...
if o.hasNoLocalInterest() {
o.updateDeliveryInterest(false)
if !pa && o.isActive() {
o.signalNewMessages()
}
}
// We want this to always be running so we can also pick up on interest returning.
o.mu.Lock()
if o.gwdtmr != nil {
o.gwdtmr.Reset(time.Second)
} else {
stopAndClearTimer(&o.gwdtmr)
o.gwdtmr = time.AfterFunc(time.Second, func() { o.watchGWinterest() })
}
o.mu.Unlock()
}
// Config returns the consumer's configuration.
func (o *consumer) config() ConsumerConfig {
o.mu.Lock()
defer o.mu.Unlock()
return o.cfg
}
// Force expiration of all pending.
// Lock should be held.
func (o *consumer) forceExpirePending() {
var expired []uint64
for seq := range o.pending {
if !o.onRedeliverQueue(seq) {
expired = append(expired, seq)
}
}
if len(expired) > 0 {
sort.Slice(expired, func(i, j int) bool { return expired[i] < expired[j] })
o.addToRedeliverQueue(expired...)
// Now we should update the timestamp here since we are redelivering.
// We will use an incrementing time to preserve order for any other redelivery.
off := time.Now().UnixNano() - o.pending[expired[0]].Timestamp
for _, seq := range expired {
if p, ok := o.pending[seq]; ok && p != nil {
p.Timestamp += off
}
}
o.ptmr.Reset(o.ackWait(0))
}
o.signalNewMessages()
}
// This is a config change for the delivery subject for a
// push based consumer.
func (o *consumer) updateDeliverSubject(newDeliver string) {
// Update the config and the dsubj
o.mu.Lock()
defer o.mu.Unlock()
if o.closed || o.isPullMode() || o.cfg.DeliverSubject == newDeliver {
return
}
// Force redeliver of all pending on change of delivery subject.
if len(o.pending) > 0 {
o.forceExpirePending()
}
o.acc.sl.ClearNotification(o.dsubj, o.inch)
o.dsubj, o.cfg.DeliverSubject = newDeliver, newDeliver
// When we register new one it will deliver to update state loop.
o.acc.sl.RegisterNotification(newDeliver, o.inch)
}
// Check that configs are equal but allow delivery subjects to be different.
func configsEqualSansDelivery(a, b ConsumerConfig) bool {
// These were copied in so can set Delivery here.
a.DeliverSubject, b.DeliverSubject = _EMPTY_, _EMPTY_
return a == b
}
// Helper to send a reply to an ack.
func (o *consumer) sendAckReply(subj string) {
o.mu.Lock()
defer o.mu.Unlock()
o.sendAdvisory(subj, nil)
}
// Process a message for the ack reply subject delivered with a message.
func (o *consumer) processAck(_ *subscription, c *client, acc *Account, subject, reply string, rmsg []byte) {
_, msg := c.msgParts(rmsg)
sseq, dseq, dc := ackReplyInfo(subject)
skipAckReply := sseq == 0
switch {
case len(msg) == 0, bytes.Equal(msg, AckAck), bytes.Equal(msg, AckOK):
o.processAckMsg(sseq, dseq, dc, true)
case bytes.HasPrefix(msg, AckNext):
o.processAckMsg(sseq, dseq, dc, true)
// processNextMsgReq can be invoked from an internal subscription or from here.
// Therefore, it has to call msgParts(), so we can't simply pass msg[len(AckNext):]
// with current c.pa.hdr because it would cause a panic. We will save the current
// c.pa.hdr value and disable headers before calling processNextMsgReq and then
// restore so that we don't mess with the calling stack in case it is used
// somewhere else.
phdr := c.pa.hdr
c.pa.hdr = -1
o.processNextMsgReq(nil, c, acc, subject, reply, msg[len(AckNext):])
c.pa.hdr = phdr
skipAckReply = true
case bytes.Equal(msg, AckNak):
o.processNak(sseq, dseq)
case bytes.Equal(msg, AckProgress):
o.progressUpdate(sseq)
case bytes.Equal(msg, AckTerm):
o.processTerm(sseq, dseq, dc)
}
// Ack the ack if requested.
if len(reply) > 0 && !skipAckReply {
o.sendAckReply(reply)
}
}
// Used to process a working update to delay redelivery.
func (o *consumer) progressUpdate(seq uint64) {
o.mu.Lock()
if len(o.pending) > 0 {
if p, ok := o.pending[seq]; ok {
p.Timestamp = time.Now().UnixNano()
// Update store system.
o.updateDelivered(p.Sequence, seq, 1, p.Timestamp)
}
}
o.mu.Unlock()
}
// Lock should be held.
func (o *consumer) updateSkipped() {
// Clustered mode and R>1 only.
if o.node == nil || !o.isLeader() {
return
}
var b [1 + 8]byte
b[0] = byte(updateSkipOp)
var le = binary.LittleEndian
le.PutUint64(b[1:], o.sseq)
o.propose(b[:])
}
func (o *consumer) loopAndForwardProposals(qch chan struct{}) {
o.mu.RLock()
node, pch := o.node, o.pch
o.mu.RUnlock()
if node == nil || pch == nil {
return
}
forwardProposals := func() {
o.mu.Lock()
proposal := o.phead
o.phead, o.ptail = nil, nil
o.mu.Unlock()
// 256k max for now per batch.
const maxBatch = 256 * 1024
var entries []*Entry
for sz := 0; proposal != nil; proposal = proposal.next {
entries = append(entries, &Entry{EntryNormal, proposal.data})
sz += len(proposal.data)
if sz > maxBatch {
node.ProposeDirect(entries)
sz, entries = 0, entries[:0]
}
}
if len(entries) > 0 {
node.ProposeDirect(entries)
}
}
// In case we have anything pending on entry.
forwardProposals()
for {
select {
case <-qch:
forwardProposals()
return
case <-pch:
forwardProposals()
}
}
}
// Lock should be held.
func (o *consumer) propose(entry []byte) {
var notify bool
p := &proposal{data: entry}
if o.phead == nil {
o.phead = p
notify = true
} else {
o.ptail.next = p
}
o.ptail = p
// Kick our looper routine if needed.
if notify {
select {
case o.pch <- struct{}{}:
default:
}
}
}
// Lock should be held.
func (o *consumer) updateDelivered(dseq, sseq, dc uint64, ts int64) {
// Clustered mode and R>1.
if o.node != nil {
// Inline for now, use variable compression.
var b [4*binary.MaxVarintLen64 + 1]byte
b[0] = byte(updateDeliveredOp)
n := 1
n += binary.PutUvarint(b[n:], dseq)
n += binary.PutUvarint(b[n:], sseq)
n += binary.PutUvarint(b[n:], dc)
n += binary.PutVarint(b[n:], ts)
o.propose(b[:n])
}
if o.store != nil {
// Update local state always.
o.store.UpdateDelivered(dseq, sseq, dc, ts)
}
}
// Lock should be held.
func (o *consumer) updateAcks(dseq, sseq uint64) {
if o.node != nil {
// Inline for now, use variable compression.
var b [2*binary.MaxVarintLen64 + 1]byte
b[0] = byte(updateAcksOp)
n := 1
n += binary.PutUvarint(b[n:], dseq)
n += binary.PutUvarint(b[n:], sseq)
o.propose(b[:n])
} else if o.store != nil {
o.store.UpdateAcks(dseq, sseq)
}
}
// Process a NAK.
func (o *consumer) processNak(sseq, dseq uint64) {
o.mu.Lock()
defer o.mu.Unlock()
// Check for out of range.
if dseq <= o.adflr || dseq > o.dseq {
return
}
// If we are explicit ack make sure this is still on our pending list.
if len(o.pending) > 0 {
if _, ok := o.pending[sseq]; !ok {
return
}
}
// If already queued up also ignore.
if !o.onRedeliverQueue(sseq) {
o.addToRedeliverQueue(sseq)
}
o.signalNewMessages()
}
// Process a TERM
func (o *consumer) processTerm(sseq, dseq, dc uint64) {
// Treat like an ack to suppress redelivery.
o.processAckMsg(sseq, dseq, dc, false)
o.mu.Lock()
defer o.mu.Unlock()
// Deliver an advisory
e := JSConsumerDeliveryTerminatedAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerDeliveryTerminatedAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
ConsumerSeq: dseq,
StreamSeq: sseq,
Deliveries: dc,
Domain: o.srv.getOpts().JetStreamDomain,
}
j, err := json.Marshal(e)
if err != nil {
return
}
subj := JSAdvisoryConsumerMsgTerminatedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
// Introduce a small delay in when timer fires to check pending.
// Allows bursts to be treated in same time frame.
const ackWaitDelay = time.Millisecond
// ackWait returns how long to wait to fire the pending timer.
func (o *consumer) ackWait(next time.Duration) time.Duration {
if next > 0 {
return next + ackWaitDelay
}
return o.cfg.AckWait + ackWaitDelay
}
// This will restore the state from disk.
func (o *consumer) readStoredState() error {
if o.store == nil {
return nil
}
state, err := o.store.State()
if err == nil && state != nil {
o.applyState(state)
}
return err
}
// Apply the consumer stored state.
func (o *consumer) applyState(state *ConsumerState) {
if state == nil {
return
}
o.dseq = state.Delivered.Consumer + 1
o.sseq = state.Delivered.Stream + 1
o.adflr = state.AckFloor.Consumer
o.asflr = state.AckFloor.Stream
o.pending = state.Pending
o.rdc = state.Redelivered
// Setup tracking timer if we have restored pending.
if len(o.pending) > 0 && o.ptmr == nil {
o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending)
}
}
func (o *consumer) readStoreState() *ConsumerState {
o.mu.RLock()
defer o.mu.RUnlock()
if o.store == nil {
return nil
}
state, _ := o.store.State()
return state
}
// Sets our store state from another source. Used in clustered mode on snapshot restore.
func (o *consumer) setStoreState(state *ConsumerState) error {
if state == nil || o.store == nil {
return nil
}
o.applyState(state)
return o.store.Update(state)
}
// Update our state to the store.
func (o *consumer) writeStoreState() error {
o.mu.Lock()
defer o.mu.Unlock()
if o.store == nil {
return nil
}
state := ConsumerState{
Delivered: SequencePair{
Consumer: o.dseq - 1,
Stream: o.sseq - 1,
},
AckFloor: SequencePair{
Consumer: o.adflr,
Stream: o.asflr,
},
Pending: o.pending,
Redelivered: o.rdc,
}
return o.store.Update(&state)
}
// Info returns our current consumer state.
func (o *consumer) info() *ConsumerInfo {
o.mu.RLock()
mset := o.mset
if mset == nil || mset.srv == nil {
o.mu.RUnlock()
return nil
}
js := o.js
o.mu.RUnlock()
if js == nil {
return nil
}
ci := js.clusterInfo(o.raftGroup())
o.mu.RLock()
defer o.mu.RUnlock()
cfg := o.cfg
info := &ConsumerInfo{
Stream: o.stream,
Name: o.name,
Created: o.created,
Config: &cfg,
Delivered: SequencePair{
Consumer: o.dseq - 1,
Stream: o.sseq - 1,
},
AckFloor: SequencePair{
Consumer: o.adflr,
Stream: o.asflr,
},
NumAckPending: len(o.pending),
NumRedelivered: len(o.rdc),
NumPending: o.adjustedPending(),
Cluster: ci,
}
// If we are a pull mode consumer, report on number of waiting requests.
if o.isPullMode() {
info.NumWaiting = o.waiting.len()
}
return info
}
// Will signal us that new messages are available. Will break out of waiting.
func (o *consumer) signalNewMessages() {
// Kick our new message channel
select {
case o.mch <- struct{}{}:
default:
}
}
// shouldSample lets us know if we are sampling metrics on acks.
func (o *consumer) shouldSample() bool {
switch {
case o.sfreq <= 0:
return false
case o.sfreq >= 100:
return true
}
// TODO(ripienaar) this is a tad slow so we need to rethink here, however this will only
// hit for those with sampling enabled and its not the default
return rand.Int31n(100) <= o.sfreq
}
func (o *consumer) sampleAck(sseq, dseq, dc uint64) {
if !o.shouldSample() {
return
}
now := time.Now().UTC()
unow := now.UnixNano()
e := JSConsumerAckMetric{
TypedEvent: TypedEvent{
Type: JSConsumerAckMetricType,
ID: nuid.Next(),
Time: now,
},
Stream: o.stream,
Consumer: o.name,
ConsumerSeq: dseq,
StreamSeq: sseq,
Delay: unow - o.pending[sseq].Timestamp,
Deliveries: dc,
Domain: o.srv.getOpts().JetStreamDomain,
}
j, err := json.Marshal(e)
if err != nil {
return
}
o.sendAdvisory(o.ackEventT, j)
}
func (o *consumer) processAckMsg(sseq, dseq, dc uint64, doSample bool) {
o.mu.Lock()
var sagap uint64
var needSignal bool
switch o.cfg.AckPolicy {
case AckExplicit:
if p, ok := o.pending[sseq]; ok {
if doSample {
o.sampleAck(sseq, dseq, dc)
}
if o.maxp > 0 && len(o.pending) >= o.maxp {
needSignal = true
}
delete(o.pending, sseq)
// Use the original deliver sequence from our pending record.
dseq = p.Sequence
}
if len(o.pending) == 0 {
o.adflr, o.asflr = o.dseq-1, o.sseq-1
} else if dseq == o.adflr+1 {
o.adflr, o.asflr = dseq, sseq
for ss := sseq + 1; ss < o.sseq; ss++ {
if p, ok := o.pending[ss]; ok {
if p.Sequence > 0 {
o.adflr, o.asflr = p.Sequence-1, ss-1
}
break
}
}
}
// We do these regardless.
delete(o.rdc, sseq)
o.removeFromRedeliverQueue(sseq)
case AckAll:
// no-op
if dseq <= o.adflr || sseq <= o.asflr {
o.mu.Unlock()
return
}
if o.maxp > 0 && len(o.pending) >= o.maxp {
needSignal = true
}
sagap = sseq - o.asflr
o.adflr, o.asflr = dseq, sseq
for seq := sseq; seq > sseq-sagap; seq-- {
delete(o.pending, seq)
delete(o.rdc, seq)
o.removeFromRedeliverQueue(seq)
}
case AckNone:
// FIXME(dlc) - This is error but do we care?
o.mu.Unlock()
return
}
// Update underlying store.
o.updateAcks(dseq, sseq)
mset := o.mset
clustered := o.node != nil
o.mu.Unlock()
// Let the owning stream know if we are interest or workqueue retention based.
// If this consumer is clustered this will be handled by processReplicatedAck
// after the ack has propagated.
if !clustered && mset != nil && mset.cfg.Retention != LimitsPolicy {
if sagap > 1 {
// FIXME(dlc) - This is very inefficient, will need to fix.
for seq := sseq; seq > sseq-sagap; seq-- {
mset.ackMsg(o, seq)
}
} else {
mset.ackMsg(o, sseq)
}
}
// If we had max ack pending set and were at limit we need to unblock folks.
if needSignal {
o.signalNewMessages()
}
}
// Determine if this is a truly filtered consumer. Modern clients will place filtered subjects
// even if the stream only has a single non-wildcard subject designation.
// Read lock should be held.
func (o *consumer) isFiltered() bool {
if o.cfg.FilterSubject == _EMPTY_ {
return false
}
// If we are here we want to check if the filtered subject is
// a direct match for our only listed subject.
mset := o.mset
if mset == nil {
return true
}
if len(mset.cfg.Subjects) > 1 {
return true
}
return o.cfg.FilterSubject != mset.cfg.Subjects[0]
}
// Check if we need an ack for this store seq.
// This is called for interest based retention streams to remove messages.
func (o *consumer) needAck(sseq uint64) bool {
var needAck bool
var asflr, osseq uint64
var pending map[uint64]*Pending
o.mu.RLock()
if o.isLeader() {
asflr, osseq = o.asflr, o.sseq
pending = o.pending
} else {
if o.store == nil {
o.mu.RUnlock()
return false
}
state, err := o.store.State()
if err != nil || state == nil {
// Fall back to what we track internally for now.
needAck := sseq > o.asflr && !o.isFiltered()
o.mu.RUnlock()
return needAck
}
asflr, osseq = state.AckFloor.Stream, o.sseq
pending = state.Pending
}
switch o.cfg.AckPolicy {
case AckNone, AckAll:
needAck = sseq > asflr
case AckExplicit:
if sseq > asflr {
// Generally this means we need an ack, but just double check pending acks.
needAck = true
if sseq < osseq {
if len(pending) == 0 {
needAck = false
} else {
_, needAck = pending[sseq]
}
}
}
}
o.mu.RUnlock()
return needAck
}
// Helper for the next message requests.
func nextReqFromMsg(msg []byte) (time.Time, int, bool, error) {
req := bytes.TrimSpace(msg)
switch {
case len(req) == 0:
return time.Time{}, 1, false, nil
case req[0] == '{':
var cr JSApiConsumerGetNextRequest
if err := json.Unmarshal(req, &cr); err != nil {
return time.Time{}, -1, false, err
}
if cr.Expires == time.Duration(0) {
return time.Time{}, cr.Batch, cr.NoWait, nil
}
return time.Now().Add(cr.Expires), cr.Batch, cr.NoWait, nil
default:
if n, err := strconv.Atoi(string(req)); err == nil {
return time.Time{}, n, false, nil
}
}
return time.Time{}, 1, false, nil
}
// Represents a request that is on the internal waiting queue
type waitingRequest struct {
client *client
reply string
n int // For batching
expires time.Time
noWait bool
}
// waiting queue for requests that are waiting for new messages to arrive.
type waitQueue struct {
rp, wp int
reqs []*waitingRequest
}
// Create a new ring buffer with at most max items.
func newWaitQueue(max int) *waitQueue {
return &waitQueue{rp: -1, reqs: make([]*waitingRequest, max)}
}
var (
errWaitQueueFull = errors.New("wait queue is full")
errWaitQueueNil = errors.New("wait queue is nil")
)
// Adds in a new request.
func (wq *waitQueue) add(req *waitingRequest) error {
if wq == nil {
return errWaitQueueNil
}
if wq.isFull() {
return errWaitQueueFull
}
wq.reqs[wq.wp] = req
// TODO(dlc) - Could make pow2 and get rid of mod.
wq.wp = (wq.wp + 1) % cap(wq.reqs)
// Adjust read pointer if we were empty.
if wq.rp < 0 {
wq.rp = 0
}
return nil
}
func (wq *waitQueue) isFull() bool {
return wq.rp == wq.wp
}
func (wq *waitQueue) len() int {
if wq == nil || wq.rp < 0 {
return 0
}
if wq.rp < wq.wp {
return wq.wp - wq.rp
}
return cap(wq.reqs) - wq.rp + wq.wp
}
// Peek will return the next request waiting or nil if empty.
func (wq *waitQueue) peek() *waitingRequest {
if wq == nil {
return nil
}
var wr *waitingRequest
if wq.rp >= 0 {
wr = wq.reqs[wq.rp]
}
return wr
}
// pop will return the next request and move the read cursor.
func (wq *waitQueue) pop() *waitingRequest {
wr := wq.peek()
if wr != nil {
wr.n--
if wr.n <= 0 {
wq.reqs[wq.rp] = nil
wq.rp = (wq.rp + 1) % cap(wq.reqs)
// Check if we are empty.
if wq.rp == wq.wp {
wq.rp, wq.wp = -1, 0
}
}
}
return wr
}
// processNextMsgReq will process a request for the next message available. A nil message payload means deliver
// a single message. If the payload is a formal request or a number parseable with Atoi(), then we will send a
// batch of messages without requiring another request to this endpoint, or an ACK.
func (o *consumer) processNextMsgReq(_ *subscription, c *client, _ *Account, _, reply string, msg []byte) {
_, msg = c.msgParts(msg)
o.mu.Lock()
defer o.mu.Unlock()
s, mset, js := o.srv, o.mset, o.js
if mset == nil {
return
}
sendErr := func(status int, description string) {
hdr := []byte(fmt.Sprintf("NATS/1.0 %d %s\r\n\r\n", status, description))
o.outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0, nil})
}
if o.isPushMode() {
sendErr(409, "Consumer is push based")
return
}
if o.waiting.isFull() {
// Try to expire some of the requests.
if expired := o.expireWaiting(); expired == 0 {
// Force expiration if needed.
o.forceExpireFirstWaiting()
}
}
// Check payload here to see if they sent in batch size or a formal request.
expires, batchSize, noWait, err := nextReqFromMsg(msg)
if err != nil {
sendErr(400, fmt.Sprintf("Bad Request - %v", err))
return
}
if o.maxp > 0 && batchSize > o.maxp {
sendErr(409, "Exceeded MaxAckPending")
return
}
// In case we have to queue up this request.
wr := waitingRequest{client: c, reply: reply, n: batchSize, noWait: noWait, expires: expires}
// If we are in replay mode, defer to processReplay for delivery.
if o.replay {
o.waiting.add(&wr)
o.signalNewMessages()
return
}
sendBatch := func(wr *waitingRequest) {
for i, batchSize := 0, wr.n; i < batchSize; i++ {
// See if we have more messages available.
if subj, hdr, msg, seq, dc, ts, err := o.getNextMsg(); err == nil {
o.deliverMsg(reply, subj, hdr, msg, seq, dc, ts)
// Need to discount this from the total n for the request.
wr.n--
} else {
if wr.noWait {
switch err {
case errMaxAckPending:
sendErr(409, "Exceeded MaxAckPending")
default:
sendErr(404, "No Messages")
}
} else {
o.waiting.add(wr)
}
return
}
}
}
// If this is direct from a client can proceed inline.
if c.kind == CLIENT {
sendBatch(&wr)
} else {
// Check for API outstanding requests.
if apiOut := atomic.AddInt64(&js.apiCalls, 1); apiOut > 1024 {
atomic.AddInt64(&js.apiCalls, -1)
o.mu.Unlock()
sendErr(503, "JetStream API limit exceeded")
s.Warnf("JetStream API limit exceeded: %d calls outstanding", apiOut)
return
}
// Dispatch the API call to its own Go routine.
go func() {
o.mu.Lock()
sendBatch(&wr)
o.mu.Unlock()
atomic.AddInt64(&js.apiCalls, -1)
}()
}
}
// Increase the delivery count for this message.
// ONLY used on redelivery semantics.
// Lock should be held.
func (o *consumer) incDeliveryCount(sseq uint64) uint64 {
if o.rdc == nil {
o.rdc = make(map[uint64]uint64)
}
o.rdc[sseq] += 1
return o.rdc[sseq] + 1
}
// send a delivery exceeded advisory.
func (o *consumer) notifyDeliveryExceeded(sseq, dc uint64) {
e := JSConsumerDeliveryExceededAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerDeliveryExceededAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
StreamSeq: sseq,
Deliveries: dc,
Domain: o.srv.getOpts().JetStreamDomain,
}
j, err := json.Marshal(e)
if err != nil {
return
}
o.sendAdvisory(o.deliveryExcEventT, j)
}
// Check to see if the candidate subject matches a filter if its present.
// Lock should be held.
func (o *consumer) isFilteredMatch(subj string) bool {
// No filter is automatic match.
if o.cfg.FilterSubject == _EMPTY_ {
return true
}
if !o.filterWC {
return subj == o.cfg.FilterSubject
}
// If we are here we have a wildcard filter subject.
// TODO(dlc) at speed might be better to just do a sublist with L2 and/or possibly L1.
return subjectIsSubsetMatch(subj, o.cfg.FilterSubject)
}
var (
errMaxAckPending = errors.New("max ack pending reached")
errBadConsumer = errors.New("consumer not valid")
errNoInterest = errors.New("consumer requires interest for delivery subject when ephemeral")
)
// Get next available message from underlying store.
// Is partition aware and redeliver aware.
// Lock should be held.
func (o *consumer) getNextMsg() (subj string, hdr, msg []byte, seq uint64, dc uint64, ts int64, err error) {
if o.mset == nil || o.mset.store == nil {
return _EMPTY_, nil, nil, 0, 0, 0, errBadConsumer
}
for {
seq, dc := o.sseq, uint64(1)
if o.hasSkipListPending() {
seq = o.lss.seqs[0]
if len(o.lss.seqs) == 1 {
o.sseq = o.lss.resume
o.lss = nil
o.updateSkipped()
} else {
o.lss.seqs = o.lss.seqs[1:]
}
} else if o.hasRedeliveries() {
seq = o.getNextToRedeliver()
dc = o.incDeliveryCount(seq)
if o.maxdc > 0 && dc > o.maxdc {
// Only send once
if dc == o.maxdc+1 {
o.notifyDeliveryExceeded(seq, dc-1)
}
// Make sure to remove from pending.
delete(o.pending, seq)
continue
}
} else if o.maxp > 0 && len(o.pending) >= o.maxp {
// maxp only set when ack policy != AckNone and user set MaxAckPending
// Stall if we have hit max pending.
return _EMPTY_, nil, nil, 0, 0, 0, errMaxAckPending
}
subj, hdr, msg, ts, err := o.mset.store.LoadMsg(seq)
if err == nil {
if dc == 1 { // First delivery.
o.sseq++
if o.cfg.FilterSubject != _EMPTY_ && !o.isFilteredMatch(subj) {
o.updateSkipped()
continue
}
}
// We have the msg here.
return subj, hdr, msg, seq, dc, ts, nil
}
// We got an error here. If this is an EOF we will return, otherwise
// we can continue looking.
if err == ErrStoreEOF || err == ErrStoreClosed {
return _EMPTY_, nil, nil, 0, 0, 0, err
}
// Skip since its probably deleted or expired.
o.sseq++
}
}
// forceExpireFirstWaiting will force expire the first waiting.
// Lock should be held.
func (o *consumer) forceExpireFirstWaiting() *waitingRequest {
// FIXME(dlc) - Should we do advisory here as well?
wr := o.waiting.pop()
if wr == nil {
return wr
}
// If we are expiring this and we think there is still interest, alert.
if rr := o.acc.sl.Match(wr.reply); len(rr.psubs)+len(rr.qsubs) > 0 && o.mset != nil {
// We still appear to have interest, so send alert as courtesy.
hdr := []byte("NATS/1.0 408 Request Timeout\r\n\r\n")
o.outq.send(&jsPubMsg{wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0, nil})
}
return wr
}
// Will check for expiration and lack of interest on waiting requests.
func (o *consumer) expireWaiting() int {
var expired int
now := time.Now()
for wr := o.waiting.peek(); wr != nil; wr = o.waiting.peek() {
if !wr.expires.IsZero() && now.After(wr.expires) {
o.forceExpireFirstWaiting()
expired++
continue
}
s, acc := o.acc.srv, o.acc
rr := acc.sl.Match(wr.reply)
if len(rr.psubs)+len(rr.qsubs) > 0 {
break
}
// If we are here check on gateways.
if s != nil && s.hasGatewayInterest(acc.Name, wr.reply) {
break
}
// No more interest so go ahead and remove this one from our list.
o.forceExpireFirstWaiting()
expired++
}
return expired
}
// Will check to make sure those waiting still have registered interest.
func (o *consumer) checkWaitingForInterest() bool {
o.expireWaiting()
return o.waiting.len() > 0
}
// Lock should be held.
func (o *consumer) hbTimer() (time.Duration, *time.Timer) {
if o.cfg.Heartbeat == 0 {
return 0, nil
}
return o.cfg.Heartbeat, time.NewTimer(o.cfg.Heartbeat)
}
func (o *consumer) loopAndGatherMsgs(qch chan struct{}) {
// On startup check to see if we are in a a reply situation where replay policy is not instant.
var (
lts int64 // last time stamp seen, used for replay.
lseq uint64
)
o.mu.Lock()
s := o.srv
if o.replay {
// consumer is closed when mset is set to nil.
if o.mset == nil {
o.mu.Unlock()
return
}
lseq = o.mset.state().LastSeq
}
// For idle heartbeat support.
var hbc <-chan time.Time
hbd, hb := o.hbTimer()
if hb != nil {
hbc = hb.C
}
// Interest changes.
inch := o.inch
o.mu.Unlock()
// Deliver all the msgs we have now, once done or on a condition, we wait for new ones.
for {
var (
seq, dc uint64
subj, dsubj string
hdr []byte
msg []byte
err error
ts int64
delay time.Duration
)
o.mu.Lock()
// consumer is closed when mset is set to nil.
if o.mset == nil {
o.mu.Unlock()
return
}
// If we are in push mode and not active or under flowcontrol let's stop sending.
if o.isPushMode() {
if !o.active {
goto waitForMsgs
}
if o.maxpb > 0 && o.pbytes > o.maxpb {
goto waitForMsgs
}
}
// If we are in pull mode and no one is waiting already break and wait.
if o.isPullMode() && !o.checkWaitingForInterest() {
goto waitForMsgs
}
subj, hdr, msg, seq, dc, ts, err = o.getNextMsg()
// On error either wait or return.
if err != nil {
if err == ErrStoreMsgNotFound || err == ErrStoreEOF || err == errMaxAckPending {
goto waitForMsgs
} else {
o.mu.Unlock()
s.Errorf("Received an error looking up message for consumer: %v", err)
return
}
}
if wr := o.waiting.pop(); wr != nil {
dsubj = wr.reply
} else {
dsubj = o.dsubj
}
// If we are in a replay scenario and have not caught up check if we need to delay here.
if o.replay && lts > 0 {
if delay = time.Duration(ts - lts); delay > time.Millisecond {
o.mu.Unlock()
select {
case <-qch:
return
case <-time.After(delay):
}
o.mu.Lock()
}
}
// Track this regardless.
lts = ts
// If we have a rate limit set make sure we check that here.
if o.rlimit != nil {
now := time.Now()
r := o.rlimit.ReserveN(now, len(msg)+len(hdr)+len(subj)+len(dsubj)+len(o.ackReplyT))
delay := r.DelayFrom(now)
if delay > 0 {
o.mu.Unlock()
select {
case <-qch:
return
case <-time.After(delay):
}
o.mu.Lock()
}
}
// Do actual delivery.
o.deliverMsg(dsubj, subj, hdr, msg, seq, dc, ts)
// Reset our idle heartbeat timer if set.
if hb != nil {
hb.Reset(hbd)
}
o.mu.Unlock()
continue
waitForMsgs:
// If we were in a replay state check to see if we are caught up. If so clear.
if o.replay && o.sseq > lseq {
o.replay = false
}
// We will wait here for new messages to arrive.
mch, outq, odsubj, sseq, dseq := o.mch, o.outq, o.cfg.DeliverSubject, o.sseq-1, o.dseq-1
o.mu.Unlock()
select {
case interest := <-inch:
// inch can be nil on pull-based, but then this will
// just block and not fire.
o.updateDeliveryInterest(interest)
case <-qch:
return
case <-mch:
// Messages are waiting.
case <-hbc:
if o.isActive() {
const t = "NATS/1.0 100 Idle Heartbeat\r\n%s: %d\r\n%s: %d\r\n\r\n"
hdr := []byte(fmt.Sprintf(t, JSLastConsumerSeq, dseq, JSLastStreamSeq, sseq))
outq.send(&jsPubMsg{odsubj, _EMPTY_, _EMPTY_, hdr, nil, nil, 0, nil})
}
// Reset our idle heartbeat timer.
hb.Reset(hbd)
// Now check on flowcontrol if enabled. Make sure if we have any outstanding to resend.
if o.fcOut() {
o.sendFlowControl()
}
}
}
}
func (o *consumer) ackReply(sseq, dseq, dc uint64, ts int64, pending uint64) string {
return fmt.Sprintf(o.ackReplyT, dc, sseq, dseq, ts, pending)
}
// Used mostly for testing. Sets max pending bytes for flow control setups.
func (o *consumer) setMaxPendingBytes(limit int) {
o.pblimit = limit
o.maxpb = limit / 16
if o.maxpb == 0 {
o.maxpb = 1
}
}
// We have the case where a consumer can become greedy and pick up a messages before the stream has incremented our pending(sgap).
// Instead of trying to slow things down and synchronize we will allow this to wrap and go negative (biggest uint64) for a short time.
// This functions checks for that and returns 0.
// Lock should be held.
func (o *consumer) adjustedPending() uint64 {
if o.sgap&(1<<63) != 0 {
return 0
}
return o.sgap
}
// Deliver a msg to the consumer.
// Lock should be held and o.mset validated to be non-nil.
func (o *consumer) deliverMsg(dsubj, subj string, hdr, msg []byte, seq, dc uint64, ts int64) {
if o.mset == nil {
return
}
// Update pending on first attempt. This can go upside down for a short bit, that is ok.
// See adjustedPending().
if dc == 1 {
o.sgap--
}
dseq := o.dseq
o.dseq++
pmsg := &jsPubMsg{dsubj, subj, o.ackReply(seq, dseq, dc, ts, o.adjustedPending()), hdr, msg, o, seq, nil}
if o.maxpb > 0 {
o.pbytes += pmsg.size()
}
mset := o.mset
ap := o.cfg.AckPolicy
// Send message.
o.outq.send(pmsg)
// If we are ack none and mset is interest only we should make sure stream removes interest.
if ap == AckNone && mset.cfg.Retention != LimitsPolicy && mset.amch != nil {
mset.amch <- seq
}
if ap == AckExplicit || ap == AckAll {
o.trackPending(seq, dseq)
} else if ap == AckNone {
o.adflr = dseq
o.asflr = seq
}
// Flow control.
if o.maxpb > 0 && o.needFlowControl() {
o.sendFlowControl()
}
// FIXME(dlc) - Capture errors?
o.updateDelivered(dseq, seq, dc, ts)
}
func (o *consumer) needFlowControl() bool {
if o.maxpb == 0 {
return false
}
// Decide whether to send a flow control message which we will need the user to respond.
// We send when we are over 50% of our current window limit.
if o.fcid == _EMPTY_ && o.pbytes > o.maxpb/2 {
return true
}
return false
}
func (o *consumer) processFlowControl(_ *subscription, c *client, _ *Account, subj, _ string, _ []byte) {
o.mu.Lock()
defer o.mu.Unlock()
// Ignore if not the latest we have sent out.
if subj != o.fcid {
return
}
// For slow starts and ramping up.
if o.maxpb < o.pblimit {
o.maxpb *= 2
if o.maxpb > o.pblimit {
o.maxpb = o.pblimit
}
}
// Update accounting.
o.pbytes -= o.fcsz
o.fcid, o.fcsz = _EMPTY_, 0
// In case they are sent out of order or we get duplicates etc.
if o.pbytes < 0 {
o.pbytes = 0
}
o.signalNewMessages()
}
// Lock should be held.
func (o *consumer) fcReply() string {
var sb strings.Builder
sb.WriteString(jsFlowControlPre)
sb.WriteString(o.stream)
sb.WriteByte(btsep)
sb.WriteString(o.name)
sb.WriteByte(btsep)
var b [4]byte
rn := rand.Int63()
for i, l := 0, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
sb.Write(b[:])
return sb.String()
}
func (o *consumer) fcOut() bool {
o.mu.RLock()
defer o.mu.RUnlock()
return o.fcid != _EMPTY_
}
// sendFlowControl will send a flow control packet to the consumer.
// Lock should be held.
func (o *consumer) sendFlowControl() {
if !o.isPushMode() {
return
}
subj, rply := o.cfg.DeliverSubject, o.fcReply()
o.fcsz, o.fcid = o.pbytes, rply
hdr := []byte("NATS/1.0 100 FlowControl Request\r\n\r\n")
o.outq.send(&jsPubMsg{subj, _EMPTY_, rply, hdr, nil, nil, 0, nil})
}
// Tracks our outstanding pending acks. Only applicable to AckExplicit mode.
// Lock should be held.
func (o *consumer) trackPending(sseq, dseq uint64) {
if o.pending == nil {
o.pending = make(map[uint64]*Pending)
}
if o.ptmr == nil {
o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending)
}
if p, ok := o.pending[sseq]; ok {
p.Timestamp = time.Now().UnixNano()
} else {
o.pending[sseq] = &Pending{dseq, time.Now().UnixNano()}
}
}
// didNotDeliver is called when a delivery for a consumer message failed.
// Depending on our state, we will process the failure.
func (o *consumer) didNotDeliver(seq uint64) {
o.mu.Lock()
mset := o.mset
if mset == nil {
o.mu.Unlock()
return
}
var checkDeliveryInterest bool
if o.isPushMode() {
o.active = false
checkDeliveryInterest = true
} else if o.pending != nil {
// pull mode and we have pending.
if _, ok := o.pending[seq]; ok {
// We found this messsage on pending, we need
// to queue it up for immediate redelivery since
// we know it was not delivered.
if !o.onRedeliverQueue(seq) {
o.addToRedeliverQueue(seq)
o.signalNewMessages()
}
}
}
o.mu.Unlock()
// If we do not have interest update that here.
if checkDeliveryInterest && o.hasNoLocalInterest() {
o.updateDeliveryInterest(false)
}
}
// Lock should be held.
func (o *consumer) addToRedeliverQueue(seqs ...uint64) {
if o.rdqi == nil {
o.rdqi = make(map[uint64]struct{})
}
o.rdq = append(o.rdq, seqs...)
for _, seq := range seqs {
o.rdqi[seq] = struct{}{}
}
}
// Lock should be held.
func (o *consumer) hasRedeliveries() bool {
return len(o.rdq) > 0
}
func (o *consumer) getNextToRedeliver() uint64 {
if len(o.rdq) == 0 {
return 0
}
seq := o.rdq[0]
if len(o.rdq) == 1 {
o.rdq, o.rdqi = nil, nil
} else {
o.rdq = append(o.rdq[:0], o.rdq[1:]...)
delete(o.rdqi, seq)
}
return seq
}
// This checks if we already have this sequence queued for redelivery.
// FIXME(dlc) - This is O(n) but should be fast with small redeliver size.
// Lock should be held.
func (o *consumer) onRedeliverQueue(seq uint64) bool {
if o.rdqi == nil {
return false
}
_, ok := o.rdqi[seq]
return ok
}
// Remove a sequence from the redelivery queue.
// Lock should be held.
func (o *consumer) removeFromRedeliverQueue(seq uint64) bool {
if !o.onRedeliverQueue(seq) {
return false
}
for i, rseq := range o.rdq {
if rseq == seq {
if len(o.rdq) == 1 {
o.rdq, o.rdqi = nil, nil
} else {
o.rdq = append(o.rdq[:i], o.rdq[i+1:]...)
delete(o.rdqi, seq)
}
return true
}
}
return false
}
// Checks the pending messages.
func (o *consumer) checkPending() {
o.mu.Lock()
defer o.mu.Unlock()
mset := o.mset
if mset == nil {
return
}
ttl := int64(o.cfg.AckWait)
next := int64(o.ackWait(0))
now := time.Now().UnixNano()
// Since we can update timestamps, we have to review all pending.
// We may want to unlock here or warn if list is big.
var expired []uint64
for seq, p := range o.pending {
elapsed := now - p.Timestamp
if elapsed >= ttl {
if !o.onRedeliverQueue(seq) {
expired = append(expired, seq)
o.signalNewMessages()
}
} else if ttl-elapsed < next {
// Update when we should fire next.
next = ttl - elapsed
}
}
if len(expired) > 0 {
// We need to sort.
sort.Slice(expired, func(i, j int) bool { return expired[i] < expired[j] })
o.addToRedeliverQueue(expired...)
// Now we should update the timestamp here since we are redelivering.
// We will use an incrementing time to preserve order for any other redelivery.
off := now - o.pending[expired[0]].Timestamp
for _, seq := range expired {
if p, ok := o.pending[seq]; ok {
p.Timestamp += off
}
}
}
if len(o.pending) > 0 {
o.ptmr.Reset(o.ackWait(time.Duration(next)))
} else {
o.ptmr.Stop()
o.ptmr = nil
}
}
// SeqFromReply will extract a sequence number from a reply subject.
func (o *consumer) seqFromReply(reply string) uint64 {
_, dseq, _ := ackReplyInfo(reply)
return dseq
}
// StreamSeqFromReply will extract the stream sequence from the reply subject.
func (o *consumer) streamSeqFromReply(reply string) uint64 {
sseq, _, _ := ackReplyInfo(reply)
return sseq
}
// Quick parser for positive numbers in ack reply encoding.
func parseAckReplyNum(d string) (n int64) {
if len(d) == 0 {
return -1
}
for _, dec := range d {
if dec < asciiZero || dec > asciiNine {
return -1
}
n = n*10 + (int64(dec) - asciiZero)
}
return n
}
const expectedNumReplyTokens = 9
// Grab encoded information in the reply subject for a delivered message.
func replyInfo(subject string) (sseq, dseq, dc uint64, ts int64, pending uint64) {
tsa := [expectedNumReplyTokens]string{}
start, tokens := 0, tsa[:0]
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
if len(tokens) != expectedNumReplyTokens || tokens[0] != "$JS" || tokens[1] != "ACK" {
return 0, 0, 0, 0, 0
}
// TODO(dlc) - Should we error if we do not match consumer name?
// stream is tokens[2], consumer is 3.
dc = uint64(parseAckReplyNum(tokens[4]))
sseq, dseq = uint64(parseAckReplyNum(tokens[5])), uint64(parseAckReplyNum(tokens[6]))
ts = parseAckReplyNum(tokens[7])
pending = uint64(parseAckReplyNum(tokens[8]))
return sseq, dseq, dc, ts, pending
}
func ackReplyInfo(subject string) (sseq, dseq, dc uint64) {
tsa := [expectedNumReplyTokens]string{}
start, tokens := 0, tsa[:0]
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
if len(tokens) != expectedNumReplyTokens || tokens[0] != "$JS" || tokens[1] != "ACK" {
return 0, 0, 0
}
dc = uint64(parseAckReplyNum(tokens[4]))
sseq, dseq = uint64(parseAckReplyNum(tokens[5])), uint64(parseAckReplyNum(tokens[6]))
return sseq, dseq, dc
}
// NextSeq returns the next delivered sequence number for this consumer.
func (o *consumer) nextSeq() uint64 {
o.mu.RLock()
dseq := o.dseq
o.mu.RUnlock()
return dseq
}
// Used to hold skip list when deliver policy is last per subject.
type lastSeqSkipList struct {
resume uint64
seqs []uint64
}
// Will create a skip list for us from a store's subjects state.
func createLastSeqSkipList(mss map[string]SimpleState) []uint64 {
seqs := make([]uint64, 0, len(mss))
for _, ss := range mss {
seqs = append(seqs, ss.Last)
}
sort.Slice(seqs, func(i, j int) bool { return seqs[i] < seqs[j] })
return seqs
}
// Let's us know we have a skip list, which is for deliver last per subject and we are just starting.
// Lock should be held.
func (o *consumer) hasSkipListPending() bool {
return o.lss != nil && len(o.lss.seqs) > 0
}
// Will select the starting sequence.
func (o *consumer) selectStartingSeqNo() {
if o.mset == nil || o.mset.store == nil {
o.sseq = 1
} else {
stats := o.mset.store.State()
if o.cfg.OptStartSeq == 0 {
if o.cfg.DeliverPolicy == DeliverAll {
o.sseq = stats.FirstSeq
} else if o.cfg.DeliverPolicy == DeliverLast {
o.sseq = stats.LastSeq
// If we are partitioned here this will be properly set when we become leader.
if o.cfg.FilterSubject != _EMPTY_ {
ss := o.mset.store.FilteredState(1, o.cfg.FilterSubject)
o.sseq = ss.Last
}
} else if o.cfg.DeliverPolicy == DeliverLastPerSubject {
if mss := o.mset.store.SubjectsState(o.cfg.FilterSubject); len(mss) > 0 {
o.lss = &lastSeqSkipList{
resume: stats.LastSeq,
seqs: createLastSeqSkipList(mss),
}
o.sseq = o.lss.seqs[0]
} else {
// If no mapping info just set to last.
o.sseq = stats.LastSeq
}
} else if o.cfg.OptStartTime != nil {
// If we are here we are time based.
// TODO(dlc) - Once clustered can't rely on this.
o.sseq = o.mset.store.GetSeqFromTime(*o.cfg.OptStartTime)
} else {
o.sseq = stats.LastSeq + 1
}
} else {
o.sseq = o.cfg.OptStartSeq
}
if stats.FirstSeq == 0 {
o.sseq = 1
} else if o.sseq < stats.FirstSeq {
o.sseq = stats.FirstSeq
} else if o.sseq > stats.LastSeq {
o.sseq = stats.LastSeq + 1
}
}
// Always set delivery sequence to 1.
o.dseq = 1
// Set ack delivery floor to delivery-1
o.adflr = o.dseq - 1
// Set ack store floor to store-1
o.asflr = o.sseq - 1
}
// Test whether a config represents a durable subscriber.
func isDurableConsumer(config *ConsumerConfig) bool {
return config != nil && config.Durable != _EMPTY_
}
func (o *consumer) isDurable() bool {
return o.cfg.Durable != _EMPTY_
}
// Are we in push mode, delivery subject, etc.
func (o *consumer) isPushMode() bool {
return o.cfg.DeliverSubject != _EMPTY_
}
func (o *consumer) isPullMode() bool {
return o.cfg.DeliverSubject == _EMPTY_
}
// Name returns the name of this consumer.
func (o *consumer) String() string {
o.mu.RLock()
n := o.name
o.mu.RUnlock()
return n
}
func createConsumerName() string {
return string(getHash(nuid.Next()))
}
// deleteConsumer will delete the consumer from this stream.
func (mset *stream) deleteConsumer(o *consumer) error {
return o.delete()
}
func (o *consumer) streamName() string {
o.mu.RLock()
mset := o.mset
o.mu.RUnlock()
if mset != nil {
return mset.name()
}
return _EMPTY_
}
// Active indicates if this consumer is still active.
func (o *consumer) isActive() bool {
o.mu.RLock()
active := o.active && o.mset != nil
o.mu.RUnlock()
return active
}
// hasNoLocalInterest return true if we have no local interest.
func (o *consumer) hasNoLocalInterest() bool {
o.mu.RLock()
rr := o.acc.sl.Match(o.cfg.DeliverSubject)
o.mu.RUnlock()
return len(rr.psubs)+len(rr.qsubs) == 0
}
// This is when the underlying stream has been purged.
// sseq is the new first seq for the stream after purge.
func (o *consumer) purge(sseq uint64) {
// Do not update our state unless we know we are the leader.
if sseq == 0 || !o.isLeader() {
return
}
o.mu.Lock()
o.sseq = sseq
o.asflr = sseq - 1
o.adflr = o.dseq - 1
o.sgap = 0
o.pending = nil
// We need to remove all those being queued for redelivery under o.rdq
if len(o.rdq) > 0 {
rdq := o.rdq
o.rdq, o.rdqi = nil, nil
for _, sseq := range rdq {
if sseq >= o.sseq {
o.addToRedeliverQueue(sseq)
}
}
}
o.mu.Unlock()
o.writeStoreState()
}
func stopAndClearTimer(tp **time.Timer) {
if *tp == nil {
return
}
// Will get drained in normal course, do not try to
// drain here.
(*tp).Stop()
*tp = nil
}
// Stop will shutdown the consumer for the associated stream.
func (o *consumer) stop() error {
return o.stopWithFlags(false, false, true, false)
}
func (o *consumer) deleteWithoutAdvisory() error {
return o.stopWithFlags(true, false, true, false)
}
// Delete will delete the consumer for the associated stream and send advisories.
func (o *consumer) delete() error {
return o.stopWithFlags(true, false, true, true)
}
func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error {
o.mu.Lock()
if o.closed {
o.mu.Unlock()
return nil
}
o.closed = true
if dflag && advisory && o.isLeader() {
o.sendDeleteAdvisoryLocked()
}
if o.qch != nil {
close(o.qch)
o.qch = nil
}
a := o.acc
store := o.store
mset := o.mset
o.mset = nil
o.active = false
o.unsubscribe(o.ackSub)
o.unsubscribe(o.reqSub)
o.unsubscribe(o.fcSub)
o.ackSub = nil
o.reqSub = nil
o.fcSub = nil
if o.infoSub != nil {
o.srv.sysUnsubscribe(o.infoSub)
o.infoSub = nil
}
c := o.client
o.client = nil
sysc := o.sysc
o.sysc = nil
stopAndClearTimer(&o.ptmr)
stopAndClearTimer(&o.dtmr)
stopAndClearTimer(&o.gwdtmr)
delivery := o.cfg.DeliverSubject
o.waiting = nil
// Break us out of the readLoop.
if doSignal {
o.signalNewMessages()
}
n := o.node
o.mu.Unlock()
if c != nil {
c.closeConnection(ClientClosed)
}
if sysc != nil {
sysc.closeConnection(ClientClosed)
}
if delivery != _EMPTY_ {
a.sl.ClearNotification(delivery, o.inch)
}
mset.mu.Lock()
mset.removeConsumer(o)
rp := mset.cfg.Retention
mset.mu.Unlock()
// We need to optionally remove all messages since we are interest based retention.
// We will do this consistently on all replicas. Note that if in clustered mode the
// non-leader consumers will need to restore state first.
if dflag && rp == InterestPolicy {
stop := mset.lastSeq()
o.mu.Lock()
if !o.isLeader() {
o.readStoredState()
}
start := o.asflr
o.mu.Unlock()
var rmseqs []uint64
mset.mu.RLock()
for seq := start; seq <= stop; seq++ {
if !mset.checkInterest(seq, o) {
rmseqs = append(rmseqs, seq)
}
}
mset.mu.RUnlock()
for _, seq := range rmseqs {
mset.store.RemoveMsg(seq)
}
}
// Cluster cleanup.
if n != nil {
if dflag {
n.Delete()
} else {
n.Stop()
}
}
// Clean up our store.
var err error
if store != nil {
if dflag {
if sdflag {
err = store.StreamDelete()
} else {
err = store.Delete()
}
} else {
err = store.Stop()
}
}
return err
}
// Check that we do not form a cycle by delivering to a delivery subject
// that is part of the interest group.
func (mset *stream) deliveryFormsCycle(deliverySubject string) bool {
mset.mu.RLock()
defer mset.mu.RUnlock()
for _, subject := range mset.cfg.Subjects {
if subjectIsSubsetMatch(deliverySubject, subject) {
return true
}
}
return false
}
// Check that the filtered subject is valid given a set of stream subjects.
func validFilteredSubject(filteredSubject string, subjects []string) bool {
if !IsValidSubject(filteredSubject) {
return false
}
hasWC := subjectHasWildcard(filteredSubject)
for _, subject := range subjects {
if subjectIsSubsetMatch(filteredSubject, subject) {
return true
}
// If we have a wildcard as the filtered subject check to see if we are
// a wider scope but do match a subject.
if hasWC && subjectIsSubsetMatch(subject, filteredSubject) {
return true
}
}
return false
}
// SetInActiveDeleteThreshold sets the delete threshold for how long to wait
// before deleting an inactive ephemeral consumer.
func (o *consumer) setInActiveDeleteThreshold(dthresh time.Duration) error {
o.mu.Lock()
defer o.mu.Unlock()
if o.isPullMode() {
return fmt.Errorf("consumer is not push-based")
}
if o.isDurable() {
return fmt.Errorf("consumer is not durable")
}
deleteWasRunning := o.dtmr != nil
stopAndClearTimer(&o.dtmr)
o.dthresh = dthresh
if deleteWasRunning {
o.dtmr = time.AfterFunc(o.dthresh, func() { o.deleteNotActive() })
}
return nil
}
// switchToEphemeral is called on startup when recovering ephemerals.
func (o *consumer) switchToEphemeral() {
o.mu.Lock()
o.cfg.Durable = _EMPTY_
store, ok := o.store.(*consumerFileStore)
rr := o.acc.sl.Match(o.cfg.DeliverSubject)
o.mu.Unlock()
// Update interest
o.updateDeliveryInterest(len(rr.psubs)+len(rr.qsubs) > 0)
// Write out new config
if ok {
store.updateConfig(o.cfg)
}
}
// RequestNextMsgSubject returns the subject to request the next message when in pull or worker mode.
// Returns empty otherwise.
func (o *consumer) requestNextMsgSubject() string {
return o.nextMsgSubj
}
// Will set the initial pending and start sequence.
// mset lock should be held.
func (o *consumer) setInitialPendingAndStart() {
mset := o.mset
if mset == nil || mset.store == nil {
return
}
// notFiltered means we want all messages.
notFiltered := o.cfg.FilterSubject == _EMPTY_
if !notFiltered {
// Check to see if we directly match the configured stream.
// Many clients will always send a filtered subject.
cfg := &mset.cfg
if len(cfg.Subjects) == 1 && cfg.Subjects[0] == o.cfg.FilterSubject {
notFiltered = true
}
}
if notFiltered {
state := mset.store.State()
if state.Msgs > 0 {
o.sgap = state.Msgs - (o.sseq - state.FirstSeq)
}
} else {
// Here we are filtered.
dp := o.cfg.DeliverPolicy
if dp == DeliverLastPerSubject && o.hasSkipListPending() && o.sseq < o.lss.resume {
if o.lss != nil {
ss := mset.store.FilteredState(o.lss.resume, o.cfg.FilterSubject)
o.sseq = o.lss.seqs[0]
o.sgap = ss.Msgs + uint64(len(o.lss.seqs))
}
} else if ss := mset.store.FilteredState(o.sseq, o.cfg.FilterSubject); ss.Msgs > 0 {
o.sgap = ss.Msgs
// See if we should update our starting sequence.
if dp == DeliverLast || dp == DeliverLastPerSubject {
o.sseq = ss.Last
} else if dp == DeliverNew {
o.sseq = ss.Last + 1
} else {
// DeliverAll, DeliverByStartSequence, DeliverByStartTime
o.sseq = ss.First
}
// Cleanup lss when we take over in clustered mode.
if dp == DeliverLastPerSubject && o.hasSkipListPending() && o.sseq >= o.lss.resume {
o.lss = nil
}
}
o.updateSkipped()
}
}
func (o *consumer) decStreamPending(sseq uint64, subj string) {
o.mu.Lock()
// Ignore if we have already seen this one.
if sseq >= o.sseq && o.sgap > 0 && o.isFilteredMatch(subj) {
o.sgap--
}
// Check if this message was pending.
p, wasPending := o.pending[sseq]
var rdc uint64 = 1
if o.rdc != nil {
rdc = o.rdc[sseq]
}
o.mu.Unlock()
// If it was pending process it like an ack.
// TODO(dlc) - we could do a term here instead with a reason to generate the advisory.
if wasPending {
o.processAckMsg(sseq, p.Sequence, rdc, false)
}
}
func (o *consumer) account() *Account {
o.mu.RLock()
a := o.acc
o.mu.RUnlock()
return a
}
| 1 | 13,811 | I see the `+1` issue, but we used to check if `o.lss != nil` and we don't anymore, so we are sure that it won't be nil here, right? | nats-io-nats-server | go |
@@ -8,6 +8,11 @@
<% @documents&.each do |document| %>
<h1 class="modal-title"><%= document_heading(document) %></h1>
+ <% if document.respond_to?(:export_as_preferred_citation_txt) %>
+ <h2><%= t('blacklight.citation.preferred') %></h2>
+ <%= document.send(:export_as_preferred_citation_txt).html_safe %><br/><br/>
+ <% end %>
+
<% if document.respond_to?(:export_as_mla_citation_txt) %>
<h2><%= t('blacklight.citation.mla') %></h2>
<%= document.send(:export_as_mla_citation_txt).html_safe %><br/><br/> | 1 | <div class="modal-header">
<h1><%= t('blacklight.tools.citation') %></h1>
<button type="button" class="blacklight-modal-close close" data-dismiss="modal" aria-label="<%= t('blacklight.modal.close') %>">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<% @documents&.each do |document| %>
<h1 class="modal-title"><%= document_heading(document) %></h1>
<% if document.respond_to?(:export_as_mla_citation_txt) %>
<h2><%= t('blacklight.citation.mla') %></h2>
<%= document.send(:export_as_mla_citation_txt).html_safe %><br/><br/>
<% end %>
<% if document.respond_to?(:export_as_apa_citation_txt) %>
<h2><%= t('blacklight.citation.apa') %></h2>
<%= document.send(:export_as_apa_citation_txt).html_safe %><br/><br/>
<% end %>
<% if document.respond_to?(:export_as_chicago_citation_txt) %>
<h2><%= t('blacklight.citation.chicago') %></h2>
<%= document.send(:export_as_chicago_citation_txt).html_safe %>
<% end %>
<% end %>
</div>
| 1 | 8,749 | why is the `send` needed? | projectblacklight-blacklight | rb |
@@ -106,8 +106,9 @@ def bbox2result(bboxes, labels, num_classes):
if bboxes.shape[0] == 0:
return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)]
else:
- bboxes = bboxes.cpu().numpy()
- labels = labels.cpu().numpy()
+ if isinstance(bboxes, torch.Tensor):
+ bboxes = bboxes.cpu().numpy()
+ labels = labels.cpu().numpy()
return [bboxes[labels == i, :] for i in range(num_classes)]
| 1 | import numpy as np
import torch
def bbox_flip(bboxes, img_shape, direction='horizontal'):
"""Flip bboxes horizontally or vertically.
Args:
bboxes (Tensor): Shape (..., 4*k)
img_shape (tuple): Image shape.
direction (str): Flip direction, options are "horizontal" and
"vertical". Default: "horizontal"
Returns:
Tensor: Flipped bboxes.
"""
assert bboxes.shape[-1] % 4 == 0
assert direction in ['horizontal', 'vertical']
flipped = bboxes.clone()
if direction == 'vertical':
flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]
flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]
else:
flipped[:, 0::4] = img_shape[1] - bboxes[:, 2::4]
flipped[:, 2::4] = img_shape[1] - bboxes[:, 0::4]
return flipped
def bbox_mapping(bboxes,
img_shape,
scale_factor,
flip,
flip_direction='horizontal'):
"""Map bboxes from the original image scale to testing scale."""
new_bboxes = bboxes * bboxes.new_tensor(scale_factor)
if flip:
new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction)
return new_bboxes
def bbox_mapping_back(bboxes,
img_shape,
scale_factor,
flip,
flip_direction='horizontal'):
"""Map bboxes from testing scale to original image scale."""
new_bboxes = bbox_flip(bboxes, img_shape,
flip_direction) if flip else bboxes
new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor)
return new_bboxes.view(bboxes.shape)
def bbox2roi(bbox_list):
"""Convert a list of bboxes to roi format.
Args:
bbox_list (list[Tensor]): a list of bboxes corresponding to a batch
of images.
Returns:
Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2]
"""
rois_list = []
for img_id, bboxes in enumerate(bbox_list):
if bboxes.size(0) > 0:
img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1)
else:
rois = bboxes.new_zeros((0, 5))
rois_list.append(rois)
rois = torch.cat(rois_list, 0)
return rois
def roi2bbox(rois):
"""Convert rois to bounding box format.
Args:
rois (torch.Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
list[torch.Tensor]: Converted boxes of corresponding rois.
"""
bbox_list = []
img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
for img_id in img_ids:
inds = (rois[:, 0] == img_id.item())
bbox = rois[inds, 1:]
bbox_list.append(bbox)
return bbox_list
def bbox2result(bboxes, labels, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 5)
labels (Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)]
else:
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
return [bboxes[labels == i, :] for i in range(num_classes)]
def distance2bbox(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom).
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded bboxes.
"""
x1 = points[:, 0] - distance[:, 0]
y1 = points[:, 1] - distance[:, 1]
x2 = points[:, 0] + distance[:, 2]
y2 = points[:, 1] + distance[:, 3]
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1])
y1 = y1.clamp(min=0, max=max_shape[0])
x2 = x2.clamp(min=0, max=max_shape[1])
y2 = y2.clamp(min=0, max=max_shape[0])
return torch.stack([x1, y1, x2, y2], -1)
def bbox2distance(points, bbox, max_dis=None, eps=0.1):
"""Decode bounding box based on distances.
Args:
points (Tensor): Shape (n, 2), [x, y].
bbox (Tensor): Shape (n, 4), "xyxy" format
max_dis (float): Upper bound of the distance.
eps (float): a small value to ensure target < max_dis, instead <=
Returns:
Tensor: Decoded distances.
"""
left = points[:, 0] - bbox[:, 0]
top = points[:, 1] - bbox[:, 1]
right = bbox[:, 2] - points[:, 0]
bottom = bbox[:, 3] - points[:, 1]
if max_dis is not None:
left = left.clamp(min=0, max=max_dis - eps)
top = top.clamp(min=0, max=max_dis - eps)
right = right.clamp(min=0, max=max_dis - eps)
bottom = bottom.clamp(min=0, max=max_dis - eps)
return torch.stack([left, top, right, bottom], -1)
| 1 | 20,197 | Is this necessary? If so, we need to update the docstring. | open-mmlab-mmdetection | py |
@@ -116,8 +116,8 @@ module Ncr
# the highest approver on the stack, pending preferred if status indicates
def current_approver_email_address
- if self.individual_approvals.pending.first
- self.individual_approvals.pending.first.user.email_address
+ if self.pending?
+ self.individual_approvals.where(status: 'actionable').first.user.email_address
else
self.approving_official.email_address
end | 1 | require 'csv'
module Ncr
# Make sure all table names use 'ncr_XXX'
def self.table_name_prefix
'ncr_'
end
EXPENSE_TYPES = %w(BA60 BA61 BA80)
BUILDING_NUMBERS = YAML.load_file("#{Rails.root}/config/data/ncr/building_numbers.yml")
class WorkOrder < ActiveRecord::Base
# must define before include PurchaseCardMixin
def self.purchase_amount_column_name
:amount
end
include ValueHelper
include ProposalDelegate
include PurchaseCardMixin
# This is a hack to be able to attribute changes to the correct user. This attribute needs to be set explicitly, then the update comment will use them as the "commenter". Defaults to the requester.
attr_accessor :modifier
after_initialize :set_defaults
before_validation :normalize_values
before_update :record_changes
validates :cl_number, format: {
with: /\ACL\d{7}\z/,
message: "must start with 'CL', followed by seven numbers"
}, allow_blank: true
validates :expense_type, inclusion: {in: EXPENSE_TYPES}, presence: true
validates :function_code, format: {
with: /\APG[A-Z0-9]{3}\z/,
message: "must start with 'PG', followed by three letters or numbers"
}, allow_blank: true
validates :project_title, presence: true
validates :vendor, presence: true
validates :building_number, presence: true
validates :rwa_number, presence: true, if: :ba80?
validates :rwa_number, format: {
with: /\A[a-zA-Z][0-9]{7}\z/,
message: "must be one letter followed by 7 numbers"
}, allow_blank: true
validates :soc_code, format: {
with: /\A[A-Z0-9]{3}\z/,
message: "must be three letters or numbers"
}, allow_blank: true
def set_defaults
self.direct_pay ||= false
self.not_to_exceed ||= false
self.emergency ||= false
end
# For budget attributes, converts empty strings to `nil`, so that the request isn't shown as being modified when the fields appear in the edit form.
def normalize_values
if self.cl_number.present?
self.cl_number = self.cl_number.upcase
self.cl_number.prepend('CL') unless self.cl_number.start_with?('CL')
else
self.cl_number = nil
end
if self.function_code.present?
self.function_code.upcase!
self.function_code.prepend('PG') unless self.function_code.start_with?('PG')
else
self.function_code = nil
end
if self.soc_code.present?
self.soc_code.upcase!
else
self.soc_code = nil
end
end
def approver_email_frozen?
approval = self.individual_approvals.first
approval && !approval.actionable?
end
def approver_changed?(approval_email)
self.approving_official && self.approving_official.email_address != approval_email
end
# Check the approvers, accounting for frozen approving official
def approvers_emails(selected_approving_official_email)
emails = self.system_approver_emails
if self.approver_email_frozen?
emails.unshift(self.approving_official.email_address)
else
emails.unshift(selected_approving_official_email)
end
emails
end
def setup_approvals_and_observers(selected_approving_official_email)
emails = self.approvers_emails(selected_approving_official_email)
if self.emergency
emails.each{|e| self.add_observer(e)}
# skip state machine
self.proposal.update(status: 'approved')
else
original_approvers = self.proposal.individual_approvals.non_pending.map(&:user)
self.force_approvers(emails)
self.notify_removed_approvers(original_approvers)
end
end
def approving_official
self.approvers.first
end
# the highest approver on the stack, pending preferred if status indicates
def current_approver_email_address
if self.individual_approvals.pending.first
self.individual_approvals.pending.first.user.email_address
else
self.approving_official.email_address
end
end
def email_approvers
Dispatcher.on_proposal_update(self.proposal, self.modifier)
end
# Ignore values in certain fields if they aren't relevant. May want to
# split these into different models
def self.relevant_fields(expense_type)
fields = [:description, :amount, :expense_type, :vendor, :not_to_exceed,
:building_number, :org_code, :direct_pay, :cl_number, :function_code, :soc_code]
case expense_type
when 'BA61'
fields << :emergency
when 'BA80'
fields.concat([:rwa_number, :code])
end
fields
end
def relevant_fields
Ncr::WorkOrder.relevant_fields(self.expense_type)
end
# Methods for Client Data interface
def fields_for_display
attributes = self.relevant_fields
attributes.map{|key| [WorkOrder.human_attribute_name(key), self[key]]}
end
# will return nil if the `org_code` is blank or not present in Organization list
def organization
# TODO reference by `code` rather than storing the whole thing
code = (self.org_code || '').split(' ', 2)[0]
Ncr::Organization.find(code)
end
def ba80?
self.expense_type == 'BA80'
end
def public_identifier
"FY" + self.fiscal_year.to_s.rjust(2, "0") + "-#{self.proposal.id}"
end
def total_price
self.amount || 0.0
end
# may be replaced with paper-trail or similar at some point
def version
self.updated_at.to_i
end
def name
self.project_title
end
def system_approver_emails
results = []
if %w(BA60 BA61).include?(self.expense_type)
unless self.organization.try(:whsc?)
results << self.class.ba61_tier1_budget_mailbox
end
results << self.class.ba61_tier2_budget_mailbox
else # BA80
if self.organization.try(:ool?)
results << self.class.ool_ba80_budget_mailbox
else
results << self.class.ba80_budget_mailbox
end
end
results
end
def self.ba61_tier1_budget_mailbox
self.approver_with_role('BA61_tier1_budget_approver')
end
def self.ba61_tier2_budget_mailbox
self.approver_with_role('BA61_tier2_budget_approver')
end
def self.approver_with_role(role_name)
users = User.with_role(role_name).where(client_slug: 'ncr')
if users.empty?
fail "Missing User with role #{role_name} -- did you run rake db:migrate and rake db:seed?"
end
users.first.email_address
end
def self.ba80_budget_mailbox
self.approver_with_role('BA80_budget_approver')
end
def self.ool_ba80_budget_mailbox
self.approver_with_role('OOL_BA80_budget_approver')
end
def org_id
self.organization.try(:code)
end
def building_id
regex = /\A(\w{8}) .*\z/
if self.building_number && regex.match(self.building_number)
regex.match(self.building_number)[1]
else
self.building_number
end
end
def as_json
super.merge(org_id: self.org_id, building_id: self.building_id)
end
def fiscal_year
year = self.created_at.nil? ? Time.zone.now.year : self.created_at.year
month = self.created_at.nil? ? Time.zone.now.month : self.created_at.month
if month >= 10
year += 1
end
year % 100 # convert to two-digit
end
protected
# TODO move to Proposal model
def record_changes
changed_attributes = self.changed_attributes.except(:updated_at)
comment_texts = []
bullet = changed_attributes.length > 1 ? '- ' : ''
changed_attributes.each do |key, value|
former = property_to_s(self.send(key + "_was"))
value = property_to_s(self[key])
property_name = WorkOrder.human_attribute_name(key)
comment_texts << WorkOrder.update_comment_format(property_name, value, bullet, former)
end
if !comment_texts.empty?
if self.approved?
comment_texts << "_Modified post-approval_"
end
self.proposal.comments.create(
comment_text: comment_texts.join("\n"),
update_comment: true,
user: self.modifier || self.requester
)
end
end
def self.update_comment_format key, value, bullet, former=nil
from = former ? "from #{former} " : ''
"#{bullet}*#{key}* was changed " + from + "to #{value}"
end
# Generally shouldn't be called directly as it doesn't account for
# emergencies, or notify removed approvers
def force_approvers(emails)
individuals = emails.map do |email|
user = User.for_email(email)
# Reuse existing approvals, if present
self.proposal.existing_approval_for(user) || Approvals::Individual.new(user: user)
end
self.proposal.root_approval = Approvals::Serial.new(child_approvals: individuals)
end
def notify_removed_approvers(original_approvers)
current_approvers = self.proposal.individual_approvals.non_pending.map(&:user)
removed_approvers_to_notify = original_approvers - current_approvers
Dispatcher.on_approver_removal(self.proposal, removed_approvers_to_notify)
end
end
end
| 1 | 14,426 | Checking for actionable here instead of grabbing the first pending approval. I tested this with proposal ID 265 on staging. | 18F-C2 | rb |
@@ -100,7 +100,10 @@ class CallbackChecker(QObject):
if self._result is self.UNSET:
with self._qtbot.waitSignal(self.got_result, timeout=2000):
pass
- assert self._result == expected
+ self._assert_result(self._result, expected)
+
+ def _assert_result(self, result, expected):
+ assert result == expected
@pytest.fixture | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=invalid-name
"""pytest fixtures used by the whole testsuite.
See https://pytest.org/latest/fixture.html
"""
import sys
import tempfile
import itertools
import textwrap
import unittest.mock
import types
import attr
import pytest
import py.path # pylint: disable=no-name-in-module
import helpers.stubs as stubsmod
from qutebrowser.config import config, configdata, configtypes, configexc
from qutebrowser.utils import objreg, standarddir
from qutebrowser.browser.webkit import cookies
from qutebrowser.misc import savemanager, sql
from qutebrowser.keyinput import modeman
from PyQt5.QtCore import pyqtSignal, QEvent, QSize, Qt, QObject
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout
from PyQt5.QtNetwork import QNetworkCookieJar
class WinRegistryHelper:
"""Helper class for win_registry."""
@attr.s
class FakeWindow:
"""A fake window object for the registry."""
registry = attr.ib()
def windowTitle(self):
return 'window title - qutebrowser'
def __init__(self):
self._ids = []
def add_window(self, win_id):
assert win_id not in objreg.window_registry
registry = objreg.ObjectRegistry()
window = self.FakeWindow(registry)
objreg.window_registry[win_id] = window
self._ids.append(win_id)
def cleanup(self):
for win_id in self._ids:
del objreg.window_registry[win_id]
class CallbackChecker(QObject):
"""Check if a value provided by a callback is the expected one."""
got_result = pyqtSignal(object)
UNSET = object()
def __init__(self, qtbot, parent=None):
super().__init__(parent)
self._qtbot = qtbot
self._result = self.UNSET
def callback(self, result):
"""Callback which can be passed to runJavaScript."""
self._result = result
self.got_result.emit(result)
def check(self, expected):
"""Wait until the JS result arrived and compare it."""
if self._result is self.UNSET:
with self._qtbot.waitSignal(self.got_result, timeout=2000):
pass
assert self._result == expected
@pytest.fixture
def callback_checker(qtbot):
return CallbackChecker(qtbot)
class FakeStatusBar(QWidget):
"""Fake statusbar to test progressbar sizing."""
def __init__(self, parent=None):
super().__init__(parent)
self.hbox = QHBoxLayout(self)
self.hbox.addStretch()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.setAttribute(Qt.WA_StyledBackground, True)
self.setStyleSheet('background-color: red;')
def minimumSizeHint(self):
return QSize(1, self.fontMetrics().height())
@pytest.fixture
def fake_statusbar(qtbot):
"""Fixture providing a statusbar in a container window."""
container = QWidget()
qtbot.add_widget(container)
vbox = QVBoxLayout(container)
vbox.addStretch()
statusbar = FakeStatusBar(container)
# to make sure container isn't GCed
# pylint: disable=attribute-defined-outside-init
statusbar.container = container
vbox.addWidget(statusbar)
# pylint: enable=attribute-defined-outside-init
with qtbot.waitExposed(container):
container.show()
return statusbar
@pytest.fixture
def win_registry():
"""Fixture providing a window registry for win_id 0 and 1."""
helper = WinRegistryHelper()
helper.add_window(0)
yield helper
helper.cleanup()
@pytest.fixture
def tab_registry(win_registry):
"""Fixture providing a tab registry for win_id 0."""
registry = objreg.ObjectRegistry()
objreg.register('tab-registry', registry, scope='window', window=0)
yield registry
objreg.delete('tab-registry', scope='window', window=0)
@pytest.fixture
def fake_web_tab(stubs, tab_registry, mode_manager, qapp):
"""Fixture providing the FakeWebTab *class*."""
return stubs.FakeWebTab
def _generate_cmdline_tests():
"""Generate testcases for test_split_binding."""
@attr.s
class TestCase:
cmd = attr.ib()
valid = attr.ib()
separators = [';;', ' ;; ', ';; ', ' ;;']
invalid = ['foo', '']
valid = ['leave-mode', 'hint all']
# Valid command only -> valid
for item in valid:
yield TestCase(''.join(item), True)
# Invalid command only -> invalid
for item in invalid:
yield TestCase(''.join(item), False)
# Invalid command combined with invalid command -> invalid
for item in itertools.product(invalid, separators, invalid):
yield TestCase(''.join(item), False)
# Valid command combined with valid command -> valid
for item in itertools.product(valid, separators, valid):
yield TestCase(''.join(item), True)
# Valid command combined with invalid command -> invalid
for item in itertools.product(valid, separators, invalid):
yield TestCase(''.join(item), False)
# Invalid command combined with valid command -> invalid
for item in itertools.product(invalid, separators, valid):
yield TestCase(''.join(item), False)
# Command with no_cmd_split combined with an "invalid" command -> valid
for item in itertools.product(['bind x open'], separators, invalid):
yield TestCase(''.join(item), True)
# Partial command
yield TestCase('message-i', False)
@pytest.fixture(params=_generate_cmdline_tests(), ids=lambda e: e.cmd)
def cmdline_test(request):
"""Fixture which generates tests for things validating commandlines."""
return request.param
@pytest.fixture(scope='session')
def configdata_init():
"""Initialize configdata if needed."""
if configdata.DATA is None:
configdata.init()
@pytest.fixture
def config_stub(stubs, monkeypatch, configdata_init):
"""Fixture which provides a fake config object."""
yaml_config = stubs.FakeYamlConfig()
conf = config.Config(yaml_config=yaml_config)
monkeypatch.setattr(config, 'instance', conf)
container = config.ConfigContainer(conf)
monkeypatch.setattr(config, 'val', container)
try:
configtypes.Font.monospace_fonts = container.fonts.monospace
except configexc.NoOptionError:
# Completion tests patch configdata so fonts.monospace is unavailable.
pass
conf.val = container # For easier use in tests
return conf
@pytest.fixture
def key_config_stub(config_stub, monkeypatch):
"""Fixture which provides a fake key config object."""
keyconf = config.KeyConfig(config_stub)
monkeypatch.setattr(config, 'key_instance', keyconf)
return keyconf
@pytest.fixture
def host_blocker_stub(stubs):
"""Fixture which provides a fake host blocker object."""
stub = stubs.HostBlockerStub()
objreg.register('host-blocker', stub)
yield stub
objreg.delete('host-blocker')
@pytest.fixture
def quickmark_manager_stub(stubs):
"""Fixture which provides a fake quickmark manager object."""
stub = stubs.QuickmarkManagerStub()
objreg.register('quickmark-manager', stub)
yield stub
objreg.delete('quickmark-manager')
@pytest.fixture
def bookmark_manager_stub(stubs):
"""Fixture which provides a fake bookmark manager object."""
stub = stubs.BookmarkManagerStub()
objreg.register('bookmark-manager', stub)
yield stub
objreg.delete('bookmark-manager')
@pytest.fixture
def session_manager_stub(stubs):
"""Fixture which provides a fake session-manager object."""
stub = stubs.SessionManagerStub()
objreg.register('session-manager', stub)
yield stub
objreg.delete('session-manager')
@pytest.fixture
def tabbed_browser_stubs(qapp, stubs, win_registry):
"""Fixture providing a fake tabbed-browser object on win_id 0 and 1."""
win_registry.add_window(1)
stubs = [stubs.TabbedBrowserStub(), stubs.TabbedBrowserStub()]
objreg.register('tabbed-browser', stubs[0], scope='window', window=0)
objreg.register('tabbed-browser', stubs[1], scope='window', window=1)
yield stubs
objreg.delete('tabbed-browser', scope='window', window=0)
objreg.delete('tabbed-browser', scope='window', window=1)
@pytest.fixture
def app_stub(stubs):
"""Fixture which provides a fake app object."""
stub = stubs.ApplicationStub()
objreg.register('app', stub)
yield stub
objreg.delete('app')
@pytest.fixture
def status_command_stub(stubs, qtbot, win_registry):
"""Fixture which provides a fake status-command object."""
cmd = stubs.StatusBarCommandStub()
objreg.register('status-command', cmd, scope='window', window=0)
qtbot.addWidget(cmd)
yield cmd
objreg.delete('status-command', scope='window', window=0)
@pytest.fixture(scope='session')
def stubs():
"""Provide access to stub objects useful for testing."""
return stubsmod
@pytest.fixture(scope='session')
def unicode_encode_err():
"""Provide a fake UnicodeEncodeError exception."""
return UnicodeEncodeError('ascii', # codec
'', # object
0, # start
2, # end
'fake exception') # reason
@pytest.fixture(scope='session')
def qnam(qapp):
"""Session-wide QNetworkAccessManager."""
from PyQt5.QtNetwork import QNetworkAccessManager
nam = QNetworkAccessManager()
nam.setNetworkAccessible(QNetworkAccessManager.NotAccessible)
return nam
@pytest.fixture
def webengineview():
"""Get a QWebEngineView if QtWebEngine is available."""
QtWebEngineWidgets = pytest.importorskip('PyQt5.QtWebEngineWidgets')
return QtWebEngineWidgets.QWebEngineView()
@pytest.fixture
def webpage(qnam):
"""Get a new QWebPage object."""
QtWebKitWidgets = pytest.importorskip('PyQt5.QtWebKitWidgets')
page = QtWebKitWidgets.QWebPage()
page.networkAccessManager().deleteLater()
page.setNetworkAccessManager(qnam)
return page
@pytest.fixture
def webview(qtbot, webpage):
"""Get a new QWebView object."""
QtWebKitWidgets = pytest.importorskip('PyQt5.QtWebKitWidgets')
view = QtWebKitWidgets.QWebView()
qtbot.add_widget(view)
view.page().deleteLater()
view.setPage(webpage)
view.resize(640, 480)
return view
@pytest.fixture
def webframe(webpage):
"""Convenience fixture to get a mainFrame of a QWebPage."""
return webpage.mainFrame()
@pytest.fixture
def fake_keyevent_factory():
"""Fixture that when called will return a mock instance of a QKeyEvent."""
def fake_keyevent(key, modifiers=0, text='', typ=QEvent.KeyPress):
"""Generate a new fake QKeyPressEvent."""
evtmock = unittest.mock.create_autospec(QKeyEvent, instance=True)
evtmock.key.return_value = key
evtmock.modifiers.return_value = modifiers
evtmock.text.return_value = text
evtmock.type.return_value = typ
return evtmock
return fake_keyevent
@pytest.fixture
def cookiejar_and_cache(stubs):
"""Fixture providing a fake cookie jar and cache."""
jar = QNetworkCookieJar()
ram_jar = cookies.RAMCookieJar()
cache = stubs.FakeNetworkCache()
objreg.register('cookie-jar', jar)
objreg.register('ram-cookie-jar', ram_jar)
objreg.register('cache', cache)
yield
objreg.delete('cookie-jar')
objreg.delete('ram-cookie-jar')
objreg.delete('cache')
@pytest.fixture
def py_proc():
"""Get a python executable and args list which executes the given code."""
if getattr(sys, 'frozen', False):
pytest.skip("Can't be run when frozen")
def func(code):
return (sys.executable, ['-c', textwrap.dedent(code.strip('\n'))])
return func
@pytest.fixture
def fake_save_manager():
"""Create a mock of save-manager and register it into objreg."""
fake_save_manager = unittest.mock.Mock(spec=savemanager.SaveManager)
objreg.register('save-manager', fake_save_manager)
yield fake_save_manager
objreg.delete('save-manager')
@pytest.fixture
def fake_args(request):
ns = types.SimpleNamespace()
ns.backend = 'webengine' if request.config.webengine else 'webkit'
objreg.register('args', ns)
yield ns
objreg.delete('args')
@pytest.fixture
def mode_manager(win_registry, config_stub, qapp):
mm = modeman.ModeManager(0)
objreg.register('mode-manager', mm, scope='window', window=0)
yield mm
objreg.delete('mode-manager', scope='window', window=0)
@pytest.fixture
def config_tmpdir(monkeypatch, tmpdir):
"""Set tmpdir/config as the configdir.
Use this to avoid creating a 'real' config dir (~/.config/qute_test).
"""
confdir = tmpdir / 'config'
confdir.ensure(dir=True)
monkeypatch.setattr(standarddir, 'config', lambda auto=False: str(confdir))
return confdir
@pytest.fixture
def data_tmpdir(monkeypatch, tmpdir):
"""Set tmpdir/data as the datadir.
Use this to avoid creating a 'real' data dir (~/.local/share/qute_test).
"""
datadir = tmpdir / 'data'
datadir.ensure(dir=True)
monkeypatch.setattr(standarddir, 'data', lambda system=False: str(datadir))
return datadir
@pytest.fixture
def runtime_tmpdir(monkeypatch, tmpdir):
"""Set tmpdir/runtime as the runtime dir.
Use this to avoid creating a 'real' runtime dir.
"""
runtimedir = tmpdir / 'runtime'
runtimedir.ensure(dir=True)
monkeypatch.setattr(standarddir, 'runtime', lambda: str(runtimedir))
return runtimedir
@pytest.fixture
def redirect_webengine_data(data_tmpdir, monkeypatch):
"""Set XDG_DATA_HOME and HOME to a temp location.
While data_tmpdir covers most cases by redirecting standarddir.data(), this
is not enough for places QtWebEngine references the data dir internally.
For these, we need to set the environment variable to redirect data access.
We also set HOME as in some places, the home directory is used directly...
"""
monkeypatch.setenv('XDG_DATA_HOME', str(data_tmpdir))
monkeypatch.setenv('HOME', str(data_tmpdir))
@pytest.fixture()
def short_tmpdir():
"""A short temporary directory for a XDG_RUNTIME_DIR."""
with tempfile.TemporaryDirectory() as tdir:
yield py.path.local(tdir) # pylint: disable=no-member
@pytest.fixture
def init_sql(data_tmpdir):
"""Initialize the SQL module, and shut it down after the test."""
path = str(data_tmpdir / 'test.db')
sql.init(path)
yield
sql.close()
class ModelValidator:
"""Validates completion models."""
def __init__(self, modeltester):
modeltester.data_display_may_return_none = True
self._model = None
self._modeltester = modeltester
def set_model(self, model):
self._model = model
self._modeltester.check(model)
def validate(self, expected):
assert self._model.rowCount() == len(expected)
for row, items in enumerate(expected):
for col, item in enumerate(items):
assert self._model.data(self._model.index(row, col)) == item
@pytest.fixture
def model_validator(qtmodeltester):
return ModelValidator(qtmodeltester)
| 1 | 19,621 | I made this change because I wasn't getting printouts of the 'result' value when the assert was inlined for some reason. I think it's related to creating new CallbackCheckers maybe? | qutebrowser-qutebrowser | py |
@@ -0,0 +1,11 @@
+using System.IO.Pipelines;
+
+namespace Microsoft.AspNetCore.Server.Kestrel.Internal.Http
+{
+ public interface IHttpParser
+ {
+ bool ParseStartLine<T>(T handler, ReadableBuffer buffer, out ReadCursor consumed, out ReadCursor examined) where T : IHttpStartLineHandler;
+
+ bool ParseHeaders<T>(T handler, ReadableBuffer buffer, out ReadCursor consumed, out ReadCursor examined) where T : IHttpHeadersHandler;
+ }
+} | 1 | 1 | 11,699 | nit: the correct name is "request line", so let's use the right one here :) | aspnet-KestrelHttpServer | .cs |
|
@@ -8,9 +8,12 @@ import os
SQLALCHEMY_DATABASE_URI = os.environ['SQLALCHEMY_DATABASE_URI']
REGISTRY_URL = os.environ['REGISTRY_URL']
-CATALOG_HOST = os.environ['CATALOG_HOST']
-CATALOG_URL = 'https://%s' % CATALOG_HOST
+CATALOG_URL = os.environ.get('CATALOG_URL')
+if not CATALOG_URL.startswith("https"):
+ print("WARNING: INSECURE CONNECTION TO CATALOG")
+ # require verbose environment variable to be defined
+ assert os.environ['ALLOW_INSECURE_CATALOG_ACCESS']
PACKAGE_BUCKET_NAME = os.environ['PACKAGE_BUCKET_NAME']
| 1 | # Copyright (c) 2017 Quilt Data, Inc. All rights reserved.
"""
Config file for prod/stage. Overrides values in config.py.
"""
import os
SQLALCHEMY_DATABASE_URI = os.environ['SQLALCHEMY_DATABASE_URI']
REGISTRY_URL = os.environ['REGISTRY_URL']
CATALOG_HOST = os.environ['CATALOG_HOST']
CATALOG_URL = 'https://%s' % CATALOG_HOST
PACKAGE_BUCKET_NAME = os.environ['PACKAGE_BUCKET_NAME']
MAIL_SERVER = os.environ['SMTP_HOST']
MAIL_USERNAME = os.environ['SMTP_USERNAME']
MAIL_PASSWORD = os.environ['SMTP_PASSWORD']
# Optional
MIXPANEL_PROJECT_TOKEN = os.getenv('MIXPANEL_PROJECT_TOKEN')
DEPLOYMENT_ID = os.getenv('DEPLOYMENT_ID')
STRIPE_SECRET_KEY = os.getenv('STRIPE_SECRET_KEY')
ENABLE_USER_ENDPOINTS = bool(os.getenv('ENABLE_USER_ENDPOINTS', ''))
SECRET_KEY = os.environ['QUILT_SECRET_KEY']
DEFAULT_SENDER = os.environ['QUILT_DEFAULT_SENDER']
| 1 | 16,875 | This should now use `[]`, so it crashes if the var is missing | quiltdata-quilt | py |
@@ -162,6 +162,19 @@ static CALI_BPF_INLINE int skb_nat_l4_csum_ipv4(struct __sk_buff *skb, size_t of
return ret;
}
+static CALI_BPF_INLINE int update_state_map(struct cali_tc_state *state)
+{
+ int key = 0;
+ struct cali_tc_state *map_state = cali_v4_state_lookup_elem(&key);
+ if (!map_state) {
+ // Shouldn't be possible; the map is pre-allocated.
+ CALI_INFO("State map lookup failed: DROP\n");
+ return -1;
+ }
+ *map_state = *state;
+ return 0;
+}
+
static CALI_BPF_INLINE int forward_or_drop(struct __sk_buff *skb,
struct cali_tc_state *state,
struct fwd *fwd) | 1 | // Project Calico BPF dataplane programs.
// Copyright (c) 2020 Tigera, Inc. All rights reserved.
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#include <asm/types.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/icmp.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/if_ether.h>
#include <iproute2/bpf_elf.h>
#include <stdbool.h>
#include <stdint.h>
#include <stddef.h>
#include "bpf.h"
#include "log.h"
#include "skb.h"
#include "policy.h"
#include "conntrack.h"
#include "nat.h"
#include "routes.h"
#include "jump.h"
#include "reasons.h"
#include "icmp.h"
#ifndef CALI_FIB_LOOKUP_ENABLED
#define CALI_FIB_LOOKUP_ENABLED true
#endif
#ifndef CALI_DROP_WORKLOAD_TO_HOST
#define CALI_DROP_WORKLOAD_TO_HOST false
#endif
#ifdef CALI_DEBUG_ALLOW_ALL
/* If we want to just compile the code without defining any policies and to
* avoid compiling out code paths that are not reachable if traffic is denied,
* we can compile it with allow all
*/
static CALI_BPF_INLINE enum calico_policy_result execute_policy_norm(struct __sk_buff *skb,
__u8 ip_proto, __u32 saddr, __u32 daddr, __u16 sport, __u16 dport)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-label"
RULE_START(0);
RULE_END(0, allow);
return CALI_POL_NO_MATCH;
deny:
return CALI_POL_DENY;
allow:
return CALI_POL_ALLOW;
#pragma clang diagnostic pop
}
#else
static CALI_BPF_INLINE enum calico_policy_result execute_policy_norm(struct __sk_buff *skb,
__u8 ip_proto, __u32 saddr, __u32 daddr, __u16 sport, __u16 dport)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-label"
RULE_START(0);
RULE_END(0, deny);
return CALI_POL_NO_MATCH;
deny:
return CALI_POL_DENY;
allow:
return CALI_POL_ALLOW;
#pragma clang diagnostic pop
}
#endif /* CALI_DEBUG_ALLOW_ALL */
__attribute__((section("1/0")))
int calico_tc_norm_pol_tail(struct __sk_buff *skb)
{
CALI_DEBUG("Entering normal policy tail call\n");
__u32 key = 0;
struct cali_tc_state *state = cali_v4_state_lookup_elem(&key);
if (!state) {
CALI_DEBUG("State map lookup failed: DROP\n");
goto deny;
}
state->pol_rc = execute_policy_norm(skb, state->ip_proto, state->ip_src,
state->ip_dst, state->sport, state->dport);
bpf_tail_call(skb, &cali_jump, 1);
CALI_DEBUG("Tail call to post-policy program failed: DROP\n");
deny:
return TC_ACT_SHOT;
}
struct fwd {
int res;
uint32_t mark;
enum calico_reason reason;
#if FIB_ENABLED
uint32_t fib_flags;
bool fib;
#endif
};
#if FIB_ENABLED
#define fwd_fib(fwd) ((fwd)->fib)
#define fwd_fib_set(fwd, v) ((fwd)->fib = v)
#define fwd_fib_set_flags(fwd, flags) ((fwd)->fib_flags = flags)
#else
#define fwd_fib(fwd) false
#define fwd_fib_set(fwd, v)
#define fwd_fib_set_flags(fwd, flags)
#endif
static CALI_BPF_INLINE struct fwd calico_tc_skb_accepted(struct __sk_buff *skb,
struct iphdr *ip_header,
struct cali_tc_state *state,
struct calico_nat_dest *nat_dest);
static CALI_BPF_INLINE int skb_nat_l4_csum_ipv4(struct __sk_buff *skb, size_t off,
__be32 ip_from, __be32 ip_to,
__u16 port_from, __u16 port_to,
uint64_t flags)
{
int ret = 0;
if (ip_from != ip_to) {
CALI_DEBUG("L4 checksum update (csum is at %d) IP from %x to %x\n", off,
be32_to_host(ip_from), be32_to_host(ip_to));
ret = bpf_l4_csum_replace(skb, off, ip_from, ip_to, flags | BPF_F_PSEUDO_HDR | 4);
CALI_DEBUG("bpf_l4_csum_replace(IP): %d\n", ret);
}
if (port_from != port_to) {
CALI_DEBUG("L4 checksum update (csum is at %d) port from %d to %d\n",
off, be16_to_host(port_from), be16_to_host(port_to));
int rc = bpf_l4_csum_replace(skb, off, port_from, port_to, flags | 2);
CALI_DEBUG("bpf_l4_csum_replace(port): %d\n", rc);
ret |= rc;
}
return ret;
}
static CALI_BPF_INLINE int forward_or_drop(struct __sk_buff *skb,
struct cali_tc_state *state,
struct fwd *fwd)
{
int rc = fwd->res;
enum calico_reason reason = fwd->reason;
if (rc == TC_ACT_SHOT) {
goto deny;
}
if (rc == CALI_RES_REDIR_IFINDEX) {
int redir_flags = 0;
if (CALI_F_FROM_HOST) {
redir_flags = BPF_F_INGRESS;
}
/* Revalidate the access to the packet */
if ((void *)(long)skb->data + sizeof(struct ethhdr) > (void *)(long)skb->data_end) {
reason = CALI_REASON_SHORT;
goto deny;
}
/* Swap the MACs as we are turning it back */
struct ethhdr *eth_hdr = (void *)(long)skb->data;
unsigned char mac[ETH_ALEN];
__builtin_memcpy(mac, ð_hdr->h_dest, ETH_ALEN);
__builtin_memcpy(ð_hdr->h_dest, ð_hdr->h_source, ETH_ALEN);
__builtin_memcpy(ð_hdr->h_source, mac, ETH_ALEN);
rc = bpf_redirect(skb->ifindex, redir_flags);
if (rc == TC_ACT_REDIRECT) {
CALI_DEBUG("Redirect to the same interface (%d) succeeded\n", skb->ifindex);
goto skip_fib;
}
CALI_DEBUG("Redirect to the same interface (%d) failed\n", skb->ifindex);
goto deny;
}
#if FIB_ENABLED
// Try a short-circuit FIB lookup.
if (fwd_fib(fwd)) {
/* XXX we might include the tot_len in the fwd, set it once when
* we get the ip_header the first time and only adjust the value
* when we modify the packet - to avoid geting the header here
* again - it is simpler though.
*/
if (skb_too_short(skb)) {
reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
struct iphdr *ip_header = skb_iphdr(skb);
struct bpf_fib_lookup fib_params = {
.family = 2, /* AF_INET */
.tot_len = be16_to_host(ip_header->tot_len),
.ifindex = skb->ingress_ifindex,
.l4_protocol = state->ip_proto,
.sport = host_to_be16(state->sport),
.dport = host_to_be16(state->dport),
};
/* set the ipv4 here, otherwise the ipv4/6 unions do not get
* zeroed properly
*/
fib_params.ipv4_src = state->ip_src;
fib_params.ipv4_dst = state->ip_dst;
CALI_DEBUG("FIB family=%d\n", fib_params.family);
CALI_DEBUG("FIB tot_len=%d\n", fib_params.tot_len);
CALI_DEBUG("FIB ifindex=%d\n", fib_params.ifindex);
CALI_DEBUG("FIB l4_protocol=%d\n", fib_params.l4_protocol);
CALI_DEBUG("FIB sport=%d\n", be16_to_host(fib_params.sport));
CALI_DEBUG("FIB dport=%d\n", be16_to_host(fib_params.dport));
CALI_DEBUG("FIB ipv4_src=%x\n", be32_to_host(fib_params.ipv4_src));
CALI_DEBUG("FIB ipv4_dst=%x\n", be32_to_host(fib_params.ipv4_dst));
CALI_DEBUG("Traffic is towards the host namespace, doing Linux FIB lookup\n");
rc = bpf_fib_lookup(skb, &fib_params, sizeof(fib_params), fwd->fib_flags);
if (rc == 0) {
CALI_DEBUG("FIB lookup succeeded\n");
/* Since we are going to short circuit the IP stack on
* forward, check if TTL is still alive. If not, let the
* IP stack handle it. It was approved by policy, so it
* is safe.
*/
if ip_ttl_exceeded(ip_header) {
rc = TC_ACT_UNSPEC;
goto cancel_fib;
}
// Update the MACs. NAT may have invalidated pointer into the packet so need to
// revalidate.
if ((void *)(long)skb->data + sizeof(struct ethhdr) > (void *)(long)skb->data_end) {
reason = CALI_REASON_SHORT;
goto deny;
}
struct ethhdr *eth_hdr = (void *)(long)skb->data;
__builtin_memcpy(ð_hdr->h_source, fib_params.smac, sizeof(eth_hdr->h_source));
__builtin_memcpy(ð_hdr->h_dest, fib_params.dmac, sizeof(eth_hdr->h_dest));
// Redirect the packet.
CALI_DEBUG("Got Linux FIB hit, redirecting to iface %d.\n", fib_params.ifindex);
rc = bpf_redirect(fib_params.ifindex, 0);
/* now we know we will bypass IP stack and ip->ttl > 1, decrement it! */
if (rc == TC_ACT_REDIRECT) {
ip_dec_ttl(ip_header);
}
} else if (rc < 0) {
CALI_DEBUG("FIB lookup failed (bad input): %d.\n", rc);
rc = TC_ACT_UNSPEC;
} else {
CALI_DEBUG("FIB lookup failed (FIB problem): %d.\n", rc);
rc = TC_ACT_UNSPEC;
}
}
cancel_fib:
#endif /* FIB_ENABLED */
skip_fib:
if (CALI_F_TO_HOST) {
/* If we received the packet from the tunnel and we forward it to a
* workload we need to skip RPF check since there might be a better path
* for the packet if the host has multiple ifaces and might get dropped.
*
* XXX We should check ourselves that we got our tunnel packets only from
* XXX those devices where we expect them before we even decap.
*/
if (CALI_F_FROM_HEP && state->tun_ip != 0) {
fwd->mark = CALI_SKB_MARK_SKIP_RPF;
}
/* Packet is towards host namespace, mark it so that downstream
* programs know that they're not the first to see the packet.
*/
CALI_DEBUG("Traffic is towards host namespace, marking with %x.\n", fwd->mark);
/* FIXME: this ignores the mask that we should be using.
* However, if we mask off the bits, then clang spots that it
* can do a 16-bit store instead of a 32-bit load/modify/store,
* which trips up the validator.
*/
skb->mark = fwd->mark | CALI_SKB_MARK_SEEN; /* make sure that each pkt has SEEN mark */
}
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
uint64_t prog_end_time = bpf_ktime_get_ns();
CALI_INFO("Final result=ALLOW (%d). Program execution time: %lluns\n",
rc, prog_end_time-state->prog_start_time);
}
return rc;
deny:
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
uint64_t prog_end_time = bpf_ktime_get_ns();
CALI_INFO("Final result=DENY (%x). Program execution time: %lluns\n",
reason, prog_end_time-state->prog_start_time);
}
return TC_ACT_SHOT;
}
static CALI_BPF_INLINE int calico_tc(struct __sk_buff *skb)
{
struct cali_tc_state state = {};
struct fwd fwd = {
.res = TC_ACT_UNSPEC,
.reason = CALI_REASON_UNKNOWN,
};
struct calico_nat_dest *nat_dest = NULL;
bool nat_lvl1_drop = 0;
/* we assume we do FIB and from this point on, we only set it to false
* if we decide not to do it.
*/
fwd_fib_set(&fwd, true);
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
state.prog_start_time = bpf_ktime_get_ns();
}
state.tun_ip = 0;
#ifdef CALI_SET_SKB_MARK
/* workaround for test since bpftool run cannot set it in context, wont
* be necessary if fixed in kernel
*/
skb->mark = CALI_SET_SKB_MARK;
#endif
if (!CALI_F_TO_HOST && skb->mark == CALI_SKB_MARK_BYPASS) {
CALI_DEBUG("Packet pre-approved by another hook, allow.\n");
fwd.reason = CALI_REASON_BYPASS;
goto allow;
}
struct iphdr *ip_header;
if (CALI_F_TO_HEP || CALI_F_TO_WEP) {
switch (skb->mark) {
case CALI_SKB_MARK_BYPASS_FWD:
CALI_DEBUG("Packet approved for forward.\n");
fwd.reason = CALI_REASON_BYPASS;
goto allow;
case CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP:
CALI_DEBUG("Packet approved for forward - src ip fixup\n");
fwd.reason = CALI_REASON_BYPASS;
/* we need to fix up the right src host IP */
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
__be32 ip_src = ip_header->saddr;
if (ip_src == HOST_IP) {
CALI_DEBUG("src ip fixup not needed %x\n", be32_to_host(ip_src));
goto allow;
}
/* XXX do a proper CT lookup to find this */
ip_header->saddr = HOST_IP;
int l3_csum_off = skb_iphdr_offset(skb) + offsetof(struct iphdr, check);
int res = bpf_l3_csum_replace(skb, l3_csum_off, ip_src, HOST_IP, 4);
if (res) {
fwd.reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
goto allow;
}
}
// Parse the packet.
// TODO Do we need to handle any odd-ball frames here (e.g. with a 0 VLAN header)?
switch (host_to_be16(skb->protocol)) {
case ETH_P_IP:
break;
case ETH_P_ARP:
CALI_DEBUG("ARP: allowing packet\n");
fwd_fib_set(&fwd, false);
goto allow;
case ETH_P_IPV6:
if (CALI_F_WEP) {
CALI_DEBUG("IPv6 from workload: drop\n");
return TC_ACT_SHOT;
} else {
// FIXME: support IPv6.
CALI_DEBUG("IPv6 on host interface: allow\n");
return TC_ACT_UNSPEC;
}
default:
if (CALI_F_WEP) {
CALI_DEBUG("Unknown ethertype (%x), drop\n", be16_to_host(skb->protocol));
goto deny;
} else {
CALI_DEBUG("Unknown ethertype on host interface (%x), allow\n",
be16_to_host(skb->protocol));
return TC_ACT_UNSPEC;
}
}
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
if (dnat_should_decap() && is_vxlan_tunnel(ip_header)) {
struct udphdr *udp_header = (void*)(ip_header+1);
/* decap on host ep only if directly for the node */
CALI_DEBUG("VXLAN tunnel packet to %x (host IP=%x)\n", ip_header->daddr, HOST_IP);
if (rt_addr_is_local_host(ip_header->daddr) &&
vxlan_udp_csum_ok(udp_header) &&
vxlan_size_ok(skb, udp_header) &&
vxlan_vni_is_valid(skb, udp_header) &&
vxlan_vni(skb, udp_header) == CALI_VXLAN_VNI) {
state.tun_ip = ip_header->saddr;
CALI_DEBUG("vxlan decap\n");
if (vxlan_v4_decap(skb)) {
fwd.reason = CALI_REASON_DECAP_FAIL;
goto deny;
}
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short after VXLAN decap\n");
goto deny;
}
ip_header = skb_iphdr(skb);
CALI_DEBUG("vxlan decap origin %x\n", be32_to_host(state.tun_ip));
}
}
// Drop malformed IP packets
if (ip_header->ihl < 5) {
fwd.reason = CALI_REASON_IP_MALFORMED;
CALI_DEBUG("Drop malformed IP packets\n");
goto deny;
} else if (ip_header->ihl > 5) {
/* Drop packets with IP options from/to WEP.
* Also drop packets with IP options if the dest IP is not host IP
*/
if (CALI_F_WEP || (CALI_F_FROM_HEP && !rt_addr_is_local_host(ip_header->daddr))) {
fwd.reason = CALI_REASON_IP_OPTIONS;
CALI_DEBUG("Drop packets with IP options\n");
goto deny;
}
CALI_DEBUG("Allow packets with IP options and dst IP = hostIP\n");
goto allow;
}
// Setting all of these up-front to keep the verifier happy.
struct tcphdr *tcp_header = (void*)(ip_header+1);
struct udphdr *udp_header = (void*)(ip_header+1);
struct icmphdr *icmp_header = (void*)(ip_header+1);
tc_state_fill_from_iphdr(&state, ip_header);
switch (state.ip_proto) {
case IPPROTO_TCP:
// Re-check buffer space for TCP (has larger headers than UDP).
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
state.sport = be16_to_host(tcp_header->source);
state.dport = be16_to_host(tcp_header->dest);
CALI_DEBUG("TCP; ports: s=%d d=%d\n", state.sport, state.dport);
break;
case IPPROTO_UDP:
state.sport = be16_to_host(udp_header->source);
state.dport = be16_to_host(udp_header->dest);
CALI_DEBUG("UDP; ports: s=%d d=%d\n", state.sport, state.dport);
break;
case IPPROTO_ICMP:
icmp_header = (void*)(ip_header+1);
CALI_DEBUG("ICMP; type=%d code=%d\n",
icmp_header->type, icmp_header->code);
break;
case 4:
// IPIP
if (CALI_F_HEP) {
// TODO IPIP whitelist.
CALI_DEBUG("IPIP: allow\n");
fwd_fib_set(&fwd, false);
goto allow;
}
default:
CALI_DEBUG("Unknown protocol (%d), unable to extract ports\n", (int)state.ip_proto);
}
state.pol_rc = CALI_POL_NO_MATCH;
switch (state.ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_ICMP:
break;
default:
if (CALI_F_HEP) {
// FIXME: allow unknown protocols through on host endpoints.
goto allow;
}
// FIXME non-port based conntrack.
goto deny;
}
struct ct_ctx ct_lookup_ctx = {
.skb = skb,
.proto = state.ip_proto,
.src = state.ip_src,
.sport = state.sport,
.dst = state.ip_dst,
.dport = state.dport,
.tun_ip = state.tun_ip,
};
if (state.ip_proto == IPPROTO_TCP) {
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
tcp_header = (void*)(ip_header+1);
ct_lookup_ctx.tcp = tcp_header;
}
/* Do conntrack lookup before anything else */
state.ct_result = calico_ct_v4_lookup(&ct_lookup_ctx);
/* check if someone is trying to spoof a tunnel packet */
if (CALI_F_FROM_HEP && ct_result_tun_src_changed(state.ct_result.rc)) {
CALI_DEBUG("dropping tunnel pkt with changed source node\n");
goto deny;
}
if (state.ct_result.flags & CALI_CT_FLAG_NAT_OUT) {
state.flags |= CALI_ST_NAT_OUTGOING;
}
/* We are possibly past (D)NAT, but that is ok, we need to let the IP
* stack do the RPF check on the source, dest is not importatnt.
*/
if (CALI_F_TO_HOST && ct_result_rpf_failed(state.ct_result.rc)) {
fwd_fib_set(&fwd, false);
}
/* skip policy if we get conntrack hit */
if (ct_result_rc(state.ct_result.rc) != CALI_CT_NEW) {
goto skip_policy;
}
/* Unlike from WEP where we can do RPF by comparing to calico routing
* info, we must rely in Linux to do it for us when receiving packets
* from outside of the host. We enforce RPF failed on every new flow.
* This will make it to skip fib in calico_tc_skb_accepted()
*/
if (CALI_F_FROM_HEP) {
ct_result_set_flag(state.ct_result.rc, CALI_CT_RPF_FAILED);
}
/* No conntrack entry, check if we should do NAT */
nat_dest = calico_v4_nat_lookup2(state.ip_src, state.ip_dst,
state.ip_proto, state.dport,
state.tun_ip != 0, &nat_lvl1_drop);
if (nat_lvl1_drop) {
CALI_DEBUG("Packet is from an unauthorised source: DROP\n");
fwd.reason = CALI_REASON_UNAUTH_SOURCE;
goto deny;
}
if (nat_dest != NULL) {
state.post_nat_ip_dst = nat_dest->addr;
state.post_nat_dport = nat_dest->port;
} else {
state.post_nat_ip_dst = state.ip_dst;
state.post_nat_dport = state.dport;
}
if (CALI_F_TO_WEP &&
skb->mark != CALI_SKB_MARK_SEEN &&
cali_rt_flags_local_host(cali_rt_lookup_flags(state.ip_src))) {
/* Host to workload traffic always allowed. We discount traffic that was
* seen by another program since it must have come in via another interface.
*/
CALI_DEBUG("Packet is from the host: ACCEPT\n");
state.pol_rc = CALI_POL_ALLOW;
goto skip_policy;
}
if (CALI_F_FROM_WEP) {
/* Do RPF check since it's our responsibility to police that. */
CALI_DEBUG("Workload RPF check src=%x skb iface=%d.\n",
be32_to_host(state.ip_src), skb->ifindex);
struct cali_rt *r = cali_rt_lookup(state.ip_src);
if (!r) {
CALI_INFO("Workload RPF fail: missing route.\n");
goto deny;
}
if (!cali_rt_flags_local_workload(r->flags)) {
CALI_INFO("Workload RPF fail: not a local workload.\n");
goto deny;
}
if (r->if_index != skb->ifindex) {
CALI_INFO("Workload RPF fail skb iface (%d) != route iface (%d)\n",
skb->ifindex, r->if_index);
goto deny;
}
// Check whether the workload needs outgoing NAT to this address.
if (r->flags & CALI_RT_NAT_OUT) {
if (!(cali_rt_lookup_flags(state.post_nat_ip_dst) & CALI_RT_IN_POOL)) {
CALI_DEBUG("Source is in NAT-outgoing pool "
"but dest is not, need to SNAT.\n");
state.flags |= CALI_ST_NAT_OUTGOING;
}
}
}
/* icmp_type and icmp_code share storage with the ports; now we've used
* the ports set to 0 to do the conntrack lookup, we can set the ICMP fields
* for policy.
*/
if (state.ip_proto == IPPROTO_ICMP) {
state.icmp_type = icmp_header->type;
state.icmp_code = icmp_header->code;
}
// Set up an entry in the state map and then jump to the normal policy program.
int key = 0;
struct cali_tc_state *map_state = cali_v4_state_lookup_elem(&key);
if (!map_state) {
// Shouldn't be possible; the map is pre-allocated.
CALI_INFO("State map lookup failed: DROP\n");
goto deny;
}
state.pol_rc = CALI_POL_NO_MATCH;
if (nat_dest) {
state.nat_dest.addr = nat_dest->addr;
state.nat_dest.port = nat_dest->port;
} else {
state.nat_dest.addr = 0;
state.nat_dest.port = 0;
}
*map_state = state;
if (CALI_F_HEP) {
/* We don't support host-endpoint policy yet, skip straight to
* the epilogue program.
* FIXME we really want to just call calico_tc_skb_accepted()
* here but that runs out of stack space.
*/
map_state->pol_rc = CALI_POL_ALLOW;
bpf_tail_call(skb, &cali_jump, 1);
CALI_DEBUG("Tail call to epilogue program failed: ALLOW\n");
return TC_ACT_UNSPEC;
}
CALI_DEBUG("About to jump to policy program; lack of further "
"logs means policy dropped the packet...\n");
bpf_tail_call(skb, &cali_jump, 0);
CALI_DEBUG("Tail call to policy program failed: DROP\n");
return TC_ACT_SHOT;
skip_policy:
fwd = calico_tc_skb_accepted(skb, ip_header, &state, nat_dest);
allow:
finalize:
return forward_or_drop(skb, &state, &fwd);
deny:
fwd.res = TC_ACT_SHOT;
goto finalize;
}
__attribute__((section("1/1")))
int calico_tc_skb_accepted_entrypoint(struct __sk_buff *skb)
{
CALI_DEBUG("Entering calico_tc_skb_accepted_entrypoint\n");
struct iphdr *ip_header = NULL;
if (skb_too_short(skb)) {
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
__u32 key = 0;
struct cali_tc_state *state = bpf_map_lookup_elem(&cali_v4_state, &key);
if (!state) {
CALI_DEBUG("State map lookup failed: DROP\n");
goto deny;
}
struct calico_nat_dest *nat_dest = NULL;
struct calico_nat_dest nat_dest_2 = {
.addr=state->nat_dest.addr,
.port=state->nat_dest.port,
};
if (state->nat_dest.addr != 0) {
nat_dest = &nat_dest_2;
}
struct fwd fwd = calico_tc_skb_accepted(skb, ip_header, state, nat_dest);
return forward_or_drop(skb, state, &fwd);
deny:
return TC_ACT_SHOT;
}
static CALI_BPF_INLINE struct fwd calico_tc_skb_accepted(struct __sk_buff *skb,
struct iphdr *ip_header,
struct cali_tc_state *state,
struct calico_nat_dest *nat_dest)
{
CALI_DEBUG("Entering calico_tc_skb_accepted\n");
enum calico_reason reason = CALI_REASON_UNKNOWN;
int rc = TC_ACT_UNSPEC;
bool fib = false;
struct ct_ctx ct_nat_ctx = {};
int ct_rc = ct_result_rc(state->ct_result.rc);
bool ct_related = ct_result_is_related(state->ct_result.rc);
uint32_t seen_mark;
size_t l4_csum_off = 0, l3_csum_off;
uint32_t fib_flags = 0;
CALI_DEBUG("src=%x dst=%x\n", be32_to_host(state->ip_src), be32_to_host(state->ip_dst));
CALI_DEBUG("post_nat=%x:%d\n", be32_to_host(state->post_nat_ip_dst), state->post_nat_dport);
CALI_DEBUG("tun_ip=%x\n", state->tun_ip);
CALI_DEBUG("pol_rc=%d\n", state->pol_rc);
CALI_DEBUG("sport=%d\n", state->sport);
CALI_DEBUG("flags=%x\n", state->flags);
CALI_DEBUG("ct_rc=%d\n", ct_rc);
CALI_DEBUG("ct_related=%d\n", ct_related);
// Set the dport to 0, to make sure conntrack entries for icmp is proper as we use
// dport to hold icmp type and code
if (state->ip_proto == IPPROTO_ICMP) {
state->dport = 0;
}
if (CALI_F_FROM_WEP && (state->flags & CALI_ST_NAT_OUTGOING)) {
seen_mark = CALI_SKB_MARK_NAT_OUT;
} else {
/* XXX we do it here again because doing it in one place only
* XXX in calico_tc() irritates the verifier :'(
*/
if (!CALI_F_TO_HOST || !ct_result_rpf_failed(state->ct_result.rc)) {
fib = true;
}
seen_mark = CALI_SKB_MARK_SEEN;
}
/* We check the ttl here to avoid needing complicated handling of
* related trafic back from the host if we let the host to handle it.
*/
CALI_DEBUG("ip->ttl %d\n", ip_header->ttl);
if (ip_ttl_exceeded(ip_header)) {
switch (ct_rc){
case CALI_CT_NEW:
if (nat_dest) {
goto icmp_ttl_exceeded;
}
break;
case CALI_CT_ESTABLISHED_DNAT:
case CALI_CT_ESTABLISHED_SNAT:
goto icmp_ttl_exceeded;
}
}
l3_csum_off = skb_iphdr_offset(skb) + offsetof(struct iphdr, check);
if (ct_related) {
if (ip_header->protocol == IPPROTO_ICMP) {
struct icmphdr *icmp;
bool outer_ip_snat;
/* if we do SNAT ... */
outer_ip_snat = ct_rc == CALI_CT_ESTABLISHED_SNAT;
/* ... there is a return path to the tunnel ... */
outer_ip_snat = outer_ip_snat && state->ct_result.tun_ip;
/* ... and should do encap and it is not DSR or it is leaving host
* and either DSR from WEP or originated at host ... */
outer_ip_snat = outer_ip_snat &&
((dnat_return_should_encap() && !CALI_F_DSR) ||
(CALI_F_TO_HEP &&
((CALI_F_DSR && skb_seen(skb)) || !skb_seen(skb))));
/* ... then fix the outer header IP first */
if (outer_ip_snat) {
ip_header->saddr = state->ct_result.nat_ip;
int res = bpf_l3_csum_replace(skb, l3_csum_off,
state->ip_src, state->ct_result.nat_ip, 4);
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
CALI_DEBUG("ICMP related: outer IP SNAT to %x\n",
be32_to_host(state->ct_result.nat_ip));
}
if (!icmp_skb_get_hdr(skb, &icmp)) {
CALI_DEBUG("Ooops, we already passed one such a check!!!\n");
goto deny;
}
l3_csum_off += sizeof(*ip_header) + sizeof(*icmp);
ip_header = (struct iphdr *)(icmp + 1); /* skip to inner ip */
/* flip the direction, we need to reverse the original packet */
switch (ct_rc) {
case CALI_CT_ESTABLISHED_SNAT:
/* handle the DSR case, see CALI_CT_ESTABLISHED_SNAT where nat is done */
if (dnat_return_should_encap() && state->ct_result.tun_ip) {
if (CALI_F_DSR) {
/* SNAT will be done after routing, when leaving HEP */
CALI_DEBUG("DSR enabled, skipping SNAT + encap\n");
goto allow;
}
}
ct_rc = CALI_CT_ESTABLISHED_DNAT;
break;
case CALI_CT_ESTABLISHED_DNAT:
if (CALI_F_FROM_HEP && state->tun_ip && ct_result_np_node(state->ct_result)) {
/* Packet is returning from a NAT tunnel, just forward it. */
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
CALI_DEBUG("ICMP related returned from NAT tunnel\n");
goto allow;
}
ct_rc = CALI_CT_ESTABLISHED_SNAT;
break;
}
}
}
struct tcphdr *tcp_header = (void*)(ip_header+1);
struct udphdr *udp_header = (void*)(ip_header+1);
__u8 ihl = ip_header->ihl * 4;
int res = 0;
bool encap_needed = false;
if (state->ip_proto == IPPROTO_ICMP && ct_related) {
/* do not fix up embedded L4 checksum for related ICMP */
} else {
switch (ip_header->protocol) {
case IPPROTO_TCP:
l4_csum_off = skb_l4hdr_offset(skb, ihl) + offsetof(struct tcphdr, check);
break;
case IPPROTO_UDP:
l4_csum_off = skb_l4hdr_offset(skb, ihl) + offsetof(struct udphdr, check);
break;
}
}
switch (ct_rc){
case CALI_CT_NEW:
switch (state->pol_rc) {
case CALI_POL_NO_MATCH:
CALI_DEBUG("Implicitly denied by normal policy: DROP\n");
goto deny;
case CALI_POL_DENY:
CALI_DEBUG("Denied by normal policy: DROP\n");
goto deny;
case CALI_POL_ALLOW:
CALI_DEBUG("Allowed by normal policy: ACCEPT\n");
}
if (CALI_F_FROM_WEP &&
CALI_DROP_WORKLOAD_TO_HOST &&
cali_rt_flags_local_host(
cali_rt_lookup_flags(state->post_nat_ip_dst))) {
CALI_DEBUG("Workload to host traffic blocked by "
"DefaultEndpointToHostAction: DROP\n");
goto deny;
}
ct_nat_ctx.skb = skb;
ct_nat_ctx.proto = state->ip_proto;
ct_nat_ctx.src = state->ip_src;
ct_nat_ctx.sport = state->sport;
ct_nat_ctx.dst = state->post_nat_ip_dst;
ct_nat_ctx.dport = state->post_nat_dport;
ct_nat_ctx.tun_ip = state->tun_ip;
if (state->flags & CALI_ST_NAT_OUTGOING) {
ct_nat_ctx.flags |= CALI_CT_FLAG_NAT_OUT;
}
if (state->ip_proto == IPPROTO_TCP) {
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
tcp_header = (void*)(ip_header+1);
ct_nat_ctx.tcp = tcp_header;
}
// If we get here, we've passed policy.
if (nat_dest == NULL) {
if (conntrack_create(&ct_nat_ctx, CT_CREATE_NORMAL)) {
CALI_DEBUG("Creating normal conntrack failed\n");
goto deny;
}
goto allow;
}
ct_nat_ctx.orig_dst = state->ip_dst;
ct_nat_ctx.orig_dport = state->dport;
/* fall through as DNAT is now established */
case CALI_CT_ESTABLISHED_DNAT:
/* align with CALI_CT_NEW */
if (ct_rc == CALI_CT_ESTABLISHED_DNAT) {
if (CALI_F_FROM_HEP && state->tun_ip && ct_result_np_node(state->ct_result)) {
/* Packet is returning from a NAT tunnel,
* already SNATed, just forward it.
*/
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
CALI_DEBUG("returned from NAT tunnel\n");
goto allow;
}
state->post_nat_ip_dst = state->ct_result.nat_ip;
state->post_nat_dport = state->ct_result.nat_port;
}
CALI_DEBUG("CT: DNAT to %x:%d\n",
be32_to_host(state->post_nat_ip_dst), state->post_nat_dport);
encap_needed = dnat_should_encap();
/* We have not created the conntrack yet since we did not know
* if we need encap or not. Must do before MTU check and before
* we jump to do the encap.
*/
if (ct_rc == CALI_CT_NEW) {
struct cali_rt * rt;
int nat_type = CT_CREATE_NAT;
if (encap_needed) {
/* When we need to encap, we need to find out if the backend is
* local or not. If local, we actually do not need the encap.
*/
rt = cali_rt_lookup(state->post_nat_ip_dst);
if (!rt) {
reason = CALI_REASON_RT_UNKNOWN;
goto deny;
}
CALI_DEBUG("rt found for 0x%x local %d\n",
be32_to_host(state->post_nat_ip_dst), !!cali_rt_is_local(rt));
encap_needed = !cali_rt_is_local(rt);
if (encap_needed) {
if (CALI_F_FROM_HEP && state->tun_ip == 0) {
if (CALI_F_DSR) {
ct_nat_ctx.flags |= CALI_CT_FLAG_DSR_FWD;
}
ct_nat_ctx.flags |= CALI_CT_FLAG_NP_FWD;
}
nat_type = CT_CREATE_NAT_FWD;
ct_nat_ctx.tun_ip = rt->next_hop;
state->ip_dst = rt->next_hop;
}
}
if (conntrack_create(&ct_nat_ctx, nat_type)) {
CALI_DEBUG("Creating NAT conntrack failed\n");
goto deny;
}
} else {
if (encap_needed && ct_result_np_node(state->ct_result)) {
CALI_DEBUG("CT says encap to node %x\n", be32_to_host(state->ct_result.tun_ip));
state->ip_dst = state->ct_result.tun_ip;
} else {
encap_needed = false;
}
}
if (encap_needed) {
if (!(state->ip_proto == IPPROTO_TCP && skb_is_gso(skb)) &&
ip_is_dnf(ip_header) && vxlan_v4_encap_too_big(skb)) {
CALI_DEBUG("Request packet with DNF set is too big\n");
goto icmp_too_big;
}
state->ip_src = HOST_IP;
seen_mark = CALI_SKB_MARK_SKIP_RPF;
/* We cannot enforce RPF check on encapped traffic, do FIB if you can */
fib = true;
goto nat_encap;
}
ip_header->daddr = state->post_nat_ip_dst;
switch (ip_header->protocol) {
case IPPROTO_TCP:
tcp_header->dest = host_to_be16(state->post_nat_dport);
break;
case IPPROTO_UDP:
udp_header->dest = host_to_be16(state->post_nat_dport);
break;
}
CALI_VERB("L3 csum at %d L4 csum at %d\n", l3_csum_off, l4_csum_off);
if (l4_csum_off) {
res = skb_nat_l4_csum_ipv4(skb, l4_csum_off, state->ip_dst,
state->post_nat_ip_dst, host_to_be16(state->dport),
host_to_be16(state->post_nat_dport),
ip_header->protocol == IPPROTO_UDP ? BPF_F_MARK_MANGLED_0 : 0);
}
res |= bpf_l3_csum_replace(skb, l3_csum_off, state->ip_dst, state->post_nat_ip_dst, 4);
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
/* Handle returning ICMP related to tunnel
*
* N.B. we assume that we can fit in the MTU. Since it is ICMP
* and even though Linux sends up to min ipv4 MTU, it is
* unlikely that we are anywhere to close the MTU limit. If we
* are, we need to fail anyway.
*/
if (ct_related && state->ip_proto == IPPROTO_ICMP
&& state->ct_result.tun_ip
&& !CALI_F_DSR) {
if (dnat_return_should_encap()) {
CALI_DEBUG("Returning related ICMP from workload to tunnel\n");
state->ip_dst = state->ct_result.tun_ip;
seen_mark = CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP;
goto nat_encap;
} else if (CALI_F_TO_HEP) {
/* Special case for ICMP error being returned by the host with the
* backing workload into the tunnel back to the original host. It is
* ICMP related and there is a return tunnel path. We need to change
* both the source and destination at once.
*
* XXX the packet was routed to the original client as if it was XXX
* DSR and we might not be on the right iface!!! Should we XXX try
* to reinject it to fix the routing?
*/
CALI_DEBUG("Returning related ICMP from host to tunnel\n");
state->ip_src = HOST_IP;
state->ip_dst = state->ct_result.tun_ip;
goto nat_encap;
}
}
state->dport = state->post_nat_dport;
state->ip_dst = state->post_nat_ip_dst;
goto allow;
case CALI_CT_ESTABLISHED_SNAT:
CALI_DEBUG("CT: SNAT from %x:%d\n",
be32_to_host(state->ct_result.nat_ip), state->ct_result.nat_port);
if (dnat_return_should_encap() && state->ct_result.tun_ip) {
if (CALI_F_DSR) {
/* SNAT will be done after routing, when leaving HEP */
CALI_DEBUG("DSR enabled, skipping SNAT + encap\n");
goto allow;
}
if (!(state->ip_proto == IPPROTO_TCP && skb_is_gso(skb)) &&
ip_is_dnf(ip_header) && vxlan_v4_encap_too_big(skb)) {
CALI_DEBUG("Return ICMP mtu is too big\n");
goto icmp_too_big;
}
}
// Actually do the NAT.
ip_header->saddr = state->ct_result.nat_ip;
switch (ip_header->protocol) {
case IPPROTO_TCP:
tcp_header->source = host_to_be16(state->ct_result.nat_port);
break;
case IPPROTO_UDP:
udp_header->source = host_to_be16(state->ct_result.nat_port);
break;
}
CALI_VERB("L3 csum at %d L4 csum at %d\n", l3_csum_off, l4_csum_off);
if (l4_csum_off) {
res = skb_nat_l4_csum_ipv4(skb, l4_csum_off, state->ip_src,
state->ct_result.nat_ip, host_to_be16(state->sport),
host_to_be16(state->ct_result.nat_port),
ip_header->protocol == IPPROTO_UDP ? BPF_F_MARK_MANGLED_0 : 0);
}
CALI_VERB("L3 checksum update (csum is at %d) port from %x to %x\n",
l3_csum_off, state->ip_src, state->ct_result.nat_ip);
int csum_rc = bpf_l3_csum_replace(skb, l3_csum_off,
state->ip_src, state->ct_result.nat_ip, 4);
CALI_VERB("bpf_l3_csum_replace(IP): %d\n", csum_rc);
res |= csum_rc;
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
if (dnat_return_should_encap() && state->ct_result.tun_ip) {
state->ip_dst = state->ct_result.tun_ip;
seen_mark = CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP;
goto nat_encap;
}
state->sport = state->ct_result.nat_port;
state->ip_src = state->ct_result.nat_ip;
goto allow;
case CALI_CT_ESTABLISHED_BYPASS:
seen_mark = CALI_SKB_MARK_BYPASS;
// fall through
case CALI_CT_ESTABLISHED:
goto allow;
default:
if (CALI_F_FROM_HEP) {
/* Since we're using the host endpoint program for TC-redirect
* acceleration for workloads (but we haven't fully implemented
* host endpoint support yet), we can get an incorrect conntrack
* invalid for host traffic.
*
* FIXME: Properly handle host endpoint conntrack failures
*/
CALI_DEBUG("Traffic is towards host namespace but not conntracked, "
"falling through to iptables\n");
fib = false;
goto allow;
}
goto deny;
}
CALI_INFO("We should never fall through here\n");
goto deny;
icmp_ttl_exceeded:
if (skb_too_short(skb)) {
reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
/* we silently drop the packet if things go wrong */
/* XXX we should check if it is broadcast or multicast and not respond */
/* do not respond to IP fragments except the first */
if (ip_frag_no(ip_header)) {
goto deny;
}
if (icmp_v4_ttl_exceeded(skb)) {
goto deny;
}
/* we need to allow the reponse for the IP stack to route it back.
* XXX we might want to send it back the same iface
*/
goto icmp_allow;
icmp_too_big:
if (icmp_v4_too_big(skb)) {
reason = CALI_REASON_ICMP_DF;
goto deny;
}
/* XXX we might use skb->ifindex to redirect it straight back
* to where it came from if it is guaranteed to be the path
*/
fib_flags |= BPF_FIB_LOOKUP_OUTPUT;
if (CALI_F_FROM_WEP) {
/* we know it came from workload, just send it back the same way */
rc = CALI_RES_REDIR_IFINDEX;
}
goto icmp_allow;
icmp_allow:
/* recheck the size of the packet after it was turned into icmp and set
* state so that it can processed further.
*/
if (skb_shorter(skb, ETH_IPV4_UDP_SIZE)) {
reason = CALI_REASON_SHORT;
goto deny;
}
ip_header = skb_iphdr(skb);
tc_state_fill_from_iphdr(state, ip_header);
state->sport = state->dport = 0;
/* packet was created because of approved traffic, treat it as related */
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
goto allow;
nat_encap:
if (vxlan_v4_encap(skb, state->ip_src, state->ip_dst)) {
reason = CALI_REASON_ENCAP_FAIL;
goto deny;
}
state->sport = state->dport = CALI_VXLAN_PORT;
state->ip_proto = IPPROTO_UDP;
allow:
{
struct fwd fwd = {
.res = rc,
.mark = seen_mark,
};
fwd_fib_set(&fwd, fib);
fwd_fib_set_flags(&fwd, fib_flags);
return fwd;
}
deny:
{
struct fwd fwd = {
.res = TC_ACT_SHOT,
.reason = reason,
};
return fwd;
}
}
#ifndef CALI_ENTRYPOINT_NAME
#define CALI_ENTRYPOINT_NAME calico_entrypoint
#endif
// Entrypoint with definable name. It's useful to redefine the name for each entrypoint
// because the name is exposed by bpftool et al.
__attribute__((section(XSTR(CALI_ENTRYPOINT_NAME))))
int tc_calico_entry(struct __sk_buff *skb)
{
return calico_tc(skb);
}
char ____license[] __attribute__((section("license"), used)) = "GPL";
| 1 | 18,064 | you could use `bpf_map_update_elem` with `BPF_EXIST` | projectcalico-felix | c |
@@ -57,7 +57,7 @@ public class S3OutputFile extends BaseS3File implements OutputFile {
try {
return new S3OutputStream(client(), uri(), awsProperties());
} catch (IOException e) {
- throw new UncheckedIOException("Filed to create output stream for location: " + uri(), e);
+ throw new UncheckedIOException("Failed to create output stream for location: " + uri(), e);
}
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.aws.s3;
import java.io.IOException;
import java.io.UncheckedIOException;
import org.apache.iceberg.aws.AwsProperties;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.io.PositionOutputStream;
import software.amazon.awssdk.services.s3.S3Client;
public class S3OutputFile extends BaseS3File implements OutputFile {
public S3OutputFile(S3Client client, S3URI uri) {
this(client, uri, new AwsProperties());
}
public S3OutputFile(S3Client client, S3URI uri, AwsProperties awsProperties) {
super(client, uri, awsProperties);
}
/**
* Create an output stream for the specified location if the target object
* does not exist in S3 at the time of invocation.
*
* @return output stream
*/
@Override
public PositionOutputStream create() {
if (!exists()) {
return createOrOverwrite();
} else {
throw new AlreadyExistsException("Location already exists: %s", uri());
}
}
@Override
public PositionOutputStream createOrOverwrite() {
try {
return new S3OutputStream(client(), uri(), awsProperties());
} catch (IOException e) {
throw new UncheckedIOException("Filed to create output stream for location: " + uri(), e);
}
}
@Override
public InputFile toInputFile() {
return new S3InputFile(client(), uri(), awsProperties());
}
}
| 1 | 33,604 | thanks for noticing this! But I think we should not have it in this PR. | apache-iceberg | java |
@@ -414,6 +414,11 @@ Transaction.prototype.lockUntilDate = function(time) {
if (_.isDate(time)) {
time = time.getTime() / 1000;
}
+
+ for (var i = 0; i < this.inputs.length; i++) {
+ this.inputs[i].sequence = Input.DEFAULT_LOCKTIME_SEQNUMBER;
+ }
+
this.nLockTime = time;
return this;
}; | 1 | 'use strict';
var _ = require('lodash');
var $ = require('../util/preconditions');
var buffer = require('buffer');
var errors = require('../errors');
var BufferUtil = require('../util/buffer');
var JSUtil = require('../util/js');
var BufferReader = require('../encoding/bufferreader');
var BufferWriter = require('../encoding/bufferwriter');
var Hash = require('../crypto/hash');
var Signature = require('../crypto/signature');
var Sighash = require('./sighash');
var Address = require('../address');
var UnspentOutput = require('./unspentoutput');
var Input = require('./input');
var PublicKeyHashInput = Input.PublicKeyHash;
var PublicKeyInput = Input.PublicKey;
var MultiSigScriptHashInput = Input.MultiSigScriptHash;
var Output = require('./output');
var Script = require('../script');
var PrivateKey = require('../privatekey');
var BN = require('../crypto/bn');
/**
* Represents a transaction, a set of inputs and outputs to change ownership of tokens
*
* @param {*} serialized
* @constructor
*/
function Transaction(serialized) {
if (!(this instanceof Transaction)) {
return new Transaction(serialized);
}
this.inputs = [];
this.outputs = [];
this._inputAmount = undefined;
this._outputAmount = undefined;
if (serialized) {
if (serialized instanceof Transaction) {
return Transaction.shallowCopy(serialized);
} else if (JSUtil.isHexa(serialized)) {
this.fromString(serialized);
} else if (BufferUtil.isBuffer(serialized)) {
this.fromBuffer(serialized);
} else if (_.isObject(serialized)) {
this.fromObject(serialized);
} else {
throw new errors.InvalidArgument('Must provide an object or string to deserialize a transaction');
}
} else {
this._newTransaction();
}
}
var CURRENT_VERSION = 1;
var DEFAULT_NLOCKTIME = 0;
var MAX_BLOCK_SIZE = 1000000;
// Minimum amount for an output for it not to be considered a dust output
Transaction.DUST_AMOUNT = 546;
// Margin of error to allow fees in the vecinity of the expected value but doesn't allow a big difference
Transaction.FEE_SECURITY_MARGIN = 15;
// max amount of satoshis in circulation
Transaction.MAX_MONEY = 21000000 * 1e8;
// nlocktime limit to be considered block height rather than a timestamp
Transaction.NLOCKTIME_BLOCKHEIGHT_LIMIT = 5e8;
// Max value for an unsigned 32 bit value
Transaction.NLOCKTIME_MAX_VALUE = 4294967295;
// Value used for fee estimation (satoshis per kilobyte)
Transaction.FEE_PER_KB = 10000;
// Safe upper bound for change address script size in bytes
Transaction.CHANGE_OUTPUT_MAX_SIZE = 20 + 4 + 34 + 4;
Transaction.MAXIMUM_EXTRA_SIZE = 4 + 9 + 9 + 4;
/* Constructors and Serialization */
/**
* Create a 'shallow' copy of the transaction, by serializing and deserializing
* it dropping any additional information that inputs and outputs may have hold
*
* @param {Transaction} transaction
* @return {Transaction}
*/
Transaction.shallowCopy = function(transaction) {
var copy = new Transaction(transaction.toBuffer());
return copy;
};
var hashProperty = {
configurable: false,
enumerable: true,
get: function() {
return new BufferReader(this._getHash()).readReverse().toString('hex');
}
};
Object.defineProperty(Transaction.prototype, 'hash', hashProperty);
Object.defineProperty(Transaction.prototype, 'id', hashProperty);
var ioProperty = {
configurable: false,
enumerable: true,
get: function() {
return this._getInputAmount();
}
};
Object.defineProperty(Transaction.prototype, 'inputAmount', ioProperty);
ioProperty.get = function() {
return this._getOutputAmount();
};
Object.defineProperty(Transaction.prototype, 'outputAmount', ioProperty);
/**
* Retrieve the little endian hash of the transaction (used for serialization)
* @return {Buffer}
*/
Transaction.prototype._getHash = function() {
return Hash.sha256sha256(this.toBuffer());
};
/**
* Retrieve a hexa string that can be used with bitcoind's CLI interface
* (decoderawtransaction, sendrawtransaction)
*
* @param {Object|boolean=} unsafe if true, skip all tests. if it's an object,
* it's expected to contain a set of flags to skip certain tests:
* * `disableAll`: disable all checks
* * `disableSmallFees`: disable checking for fees that are too small
* * `disableLargeFees`: disable checking for fees that are too large
* * `disableNotFullySigned`: disable checking if all inputs are fully signed
* * `disableDustOutputs`: disable checking if there are no outputs that are dust amounts
* * `disableMoreOutputThanInput`: disable checking if the transaction spends more bitcoins than the sum of the input amounts
* @return {string}
*/
Transaction.prototype.serialize = function(unsafe) {
if (true === unsafe || unsafe && unsafe.disableAll) {
return this.uncheckedSerialize();
} else {
return this.checkedSerialize(unsafe);
}
};
Transaction.prototype.uncheckedSerialize = Transaction.prototype.toString = function() {
return this.toBuffer().toString('hex');
};
/**
* Retrieve a hexa string that can be used with bitcoind's CLI interface
* (decoderawtransaction, sendrawtransaction)
*
* @param {Object} opts allows to skip certain tests. {@see Transaction#serialize}
* @return {string}
*/
Transaction.prototype.checkedSerialize = function(opts) {
var serializationError = this.getSerializationError(opts);
if (serializationError) {
serializationError.message += ' Use Transaction#uncheckedSerialize if you want to skip security checks. ' +
'See http://bitcore.io/guide/transaction.html#Serialization for more info.';
throw serializationError;
}
return this.uncheckedSerialize();
};
Transaction.prototype.invalidSatoshis = function() {
var invalid = false;
for (var i = 0; i < this.outputs.length; i++) {
if (this.outputs[i].invalidSatoshis()) {
invalid = true;
}
}
return invalid;
};
/**
* Retrieve a possible error that could appear when trying to serialize and
* broadcast this transaction.
*
* @param {Object} opts allows to skip certain tests. {@see Transaction#serialize}
* @return {bitcore.Error}
*/
Transaction.prototype.getSerializationError = function(opts) {
opts = opts || {};
if (this.invalidSatoshis()) {
return new errors.Transaction.InvalidSatoshis();
}
var unspent = this._getUnspentValue();
var unspentError;
if (unspent < 0) {
if (!opts.disableMoreOutputThanInput) {
unspentError = new errors.Transaction.InvalidOutputAmountSum();
}
} else {
unspentError = this._hasFeeError(opts, unspent);
}
return unspentError ||
this._hasDustOutputs(opts) ||
this._isMissingSignatures(opts);
};
Transaction.prototype._hasFeeError = function(opts, unspent) {
if (!_.isUndefined(this._fee) && this._fee !== unspent) {
return new errors.Transaction.FeeError.Different(
'Unspent value is ' + unspent + ' but specified fee is ' + this._fee
);
}
if (!opts.disableLargeFees) {
var maximumFee = Math.floor(Transaction.FEE_SECURITY_MARGIN * this._estimateFee());
if (unspent > maximumFee) {
if (this._missingChange()) {
return new errors.Transaction.ChangeAddressMissing(
'Fee is too large and no change address was provided'
);
}
return new errors.Transaction.FeeError.TooLarge(
'expected less than ' + maximumFee + ' but got ' + unspent
);
}
}
if (!opts.disableSmallFees) {
var minimumFee = Math.ceil(this._estimateFee() / Transaction.FEE_SECURITY_MARGIN);
if (unspent < minimumFee) {
return new errors.Transaction.FeeError.TooSmall(
'expected more than ' + minimumFee + ' but got ' + unspent
);
}
}
};
Transaction.prototype._missingChange = function() {
return !this._changeScript;
};
Transaction.prototype._hasDustOutputs = function(opts) {
if (opts.disableDustOutputs) {
return;
}
var index, output;
for (index in this.outputs) {
output = this.outputs[index];
if (output.satoshis < Transaction.DUST_AMOUNT && !output.script.isDataOut()) {
return new errors.Transaction.DustOutputs();
}
}
};
Transaction.prototype._isMissingSignatures = function(opts) {
if (opts.disableIsFullySigned) {
return;
}
if (!this.isFullySigned()) {
return new errors.Transaction.MissingSignatures();
}
};
Transaction.prototype.inspect = function() {
return '<Transaction: ' + this.uncheckedSerialize() + '>';
};
Transaction.prototype.toBuffer = function() {
var writer = new BufferWriter();
return this.toBufferWriter(writer).toBuffer();
};
Transaction.prototype.toBufferWriter = function(writer) {
writer.writeUInt32LE(this.version);
writer.writeVarintNum(this.inputs.length);
_.each(this.inputs, function(input) {
input.toBufferWriter(writer);
});
writer.writeVarintNum(this.outputs.length);
_.each(this.outputs, function(output) {
output.toBufferWriter(writer);
});
writer.writeUInt32LE(this.nLockTime);
return writer;
};
Transaction.prototype.fromBuffer = function(buffer) {
var reader = new BufferReader(buffer);
return this.fromBufferReader(reader);
};
Transaction.prototype.fromBufferReader = function(reader) {
$.checkArgument(!reader.finished(), 'No transaction data received');
var i, sizeTxIns, sizeTxOuts;
this.version = reader.readUInt32LE();
sizeTxIns = reader.readVarintNum();
for (i = 0; i < sizeTxIns; i++) {
var input = Input.fromBufferReader(reader);
this.inputs.push(input);
}
sizeTxOuts = reader.readVarintNum();
for (i = 0; i < sizeTxOuts; i++) {
this.outputs.push(Output.fromBufferReader(reader));
}
this.nLockTime = reader.readUInt32LE();
return this;
};
Transaction.prototype.toObject = Transaction.prototype.toJSON = function toObject() {
var inputs = [];
this.inputs.forEach(function(input) {
inputs.push(input.toObject());
});
var outputs = [];
this.outputs.forEach(function(output) {
outputs.push(output.toObject());
});
var obj = {
hash: this.hash,
version: this.version,
inputs: inputs,
outputs: outputs,
nLockTime: this.nLockTime
};
if (this._changeScript) {
obj.changeScript = this._changeScript.toString();
}
if (!_.isUndefined(this._changeIndex)) {
obj.changeIndex = this._changeIndex;
}
if (!_.isUndefined(this._fee)) {
obj.fee = this._fee;
}
return obj;
};
Transaction.prototype.fromObject = function fromObject(arg) {
/* jshint maxstatements: 20 */
$.checkArgument(_.isObject(arg) || arg instanceof Transaction);
var self = this;
var transaction;
if (arg instanceof Transaction) {
transaction = transaction.toObject();
} else {
transaction = arg;
}
_.each(transaction.inputs, function(input) {
if (!input.output || !input.output.script) {
self.uncheckedAddInput(new Input(input));
return;
}
var script = new Script(input.output.script);
var txin;
if (script.isPublicKeyHashOut()) {
txin = new Input.PublicKeyHash(input);
} else if (script.isScriptHashOut() && input.publicKeys && input.threshold) {
txin = new Input.MultiSigScriptHash(
input, input.publicKeys, input.threshold, input.signatures
);
} else {
throw new errors.Transaction.Input.UnsupportedScript(input.output.script);
}
self.addInput(txin);
});
_.each(transaction.outputs, function(output) {
self.addOutput(new Output(output));
});
if (transaction.changeIndex) {
this._changeIndex = transaction.changeIndex;
}
if (transaction.changeScript) {
this._changeScript = new Script(transaction.changeScript);
}
if (transaction.fee) {
this.fee(transaction.fee);
}
this.nLockTime = transaction.nLockTime;
this.version = transaction.version;
this._checkConsistency(arg);
return this;
};
Transaction.prototype._checkConsistency = function(arg) {
if (!_.isUndefined(this._changeIndex)) {
$.checkState(this._changeScript);
$.checkState(this.outputs[this._changeIndex]);
$.checkState(this.outputs[this._changeIndex].script.toString() ===
this._changeScript.toString());
}
if (arg && arg.hash) {
$.checkState(arg.hash === this.hash, 'Hash in object does not match transaction hash');
}
};
/**
* Sets nLockTime so that transaction is not valid until the desired date(a
* timestamp in seconds since UNIX epoch is also accepted)
*
* @param {Date | Number} time
* @return {Transaction} this
*/
Transaction.prototype.lockUntilDate = function(time) {
$.checkArgument(time);
if (_.isNumber(time) && time < Transaction.NLOCKTIME_BLOCKHEIGHT_LIMIT) {
throw new errors.Transaction.LockTimeTooEarly();
}
if (_.isDate(time)) {
time = time.getTime() / 1000;
}
this.nLockTime = time;
return this;
};
/**
* Sets nLockTime so that transaction is not valid until the desired block
* height.
*
* @param {Number} height
* @return {Transaction} this
*/
Transaction.prototype.lockUntilBlockHeight = function(height) {
$.checkArgument(_.isNumber(height));
if (height >= Transaction.NLOCKTIME_BLOCKHEIGHT_LIMIT) {
throw new errors.Transaction.BlockHeightTooHigh();
}
if (height < 0) {
throw new errors.Transaction.NLockTimeOutOfRange();
}
this.nLockTime = height;
return this;
};
/**
* Returns a semantic version of the transaction's nLockTime.
* @return {Number|Date}
* If nLockTime is 0, it returns null,
* if it is < 500000000, it returns a block height (number)
* else it returns a Date object.
*/
Transaction.prototype.getLockTime = function() {
if (!this.nLockTime) {
return null;
}
if (this.nLockTime < Transaction.NLOCKTIME_BLOCKHEIGHT_LIMIT) {
return this.nLockTime;
}
return new Date(1000 * this.nLockTime);
};
Transaction.prototype.fromString = function(string) {
this.fromBuffer(new buffer.Buffer(string, 'hex'));
};
Transaction.prototype._newTransaction = function() {
this.version = CURRENT_VERSION;
this.nLockTime = DEFAULT_NLOCKTIME;
};
/* Transaction creation interface */
/**
* Add an input to this transaction. This is a high level interface
* to add an input, for more control, use @{link Transaction#addInput}.
*
* Can receive, as output information, the output of bitcoind's `listunspent` command,
* and a slightly fancier format recognized by bitcore:
*
* ```
* {
* address: 'mszYqVnqKoQx4jcTdJXxwKAissE3Jbrrc1',
* txId: 'a477af6b2667c29670467e4e0728b685ee07b240235771862318e29ddbe58458',
* outputIndex: 0,
* script: Script.empty(),
* satoshis: 1020000
* }
* ```
* Where `address` can be either a string or a bitcore Address object. The
* same is true for `script`, which can be a string or a bitcore Script.
*
* Beware that this resets all the signatures for inputs (in further versions,
* SIGHASH_SINGLE or SIGHASH_NONE signatures will not be reset).
*
* @example
* ```javascript
* var transaction = new Transaction();
*
* // From a pay to public key hash output from bitcoind's listunspent
* transaction.from({'txid': '0000...', vout: 0, amount: 0.1, scriptPubKey: 'OP_DUP ...'});
*
* // From a pay to public key hash output
* transaction.from({'txId': '0000...', outputIndex: 0, satoshis: 1000, script: 'OP_DUP ...'});
*
* // From a multisig P2SH output
* transaction.from({'txId': '0000...', inputIndex: 0, satoshis: 1000, script: '... OP_HASH'},
* ['03000...', '02000...'], 2);
* ```
*
* @param {Object} utxo
* @param {Array=} pubkeys
* @param {number=} threshold
*/
Transaction.prototype.from = function(utxo, pubkeys, threshold) {
if (_.isArray(utxo)) {
var self = this;
_.each(utxo, function(utxo) {
self.from(utxo, pubkeys, threshold);
});
return this;
}
var exists = _.any(this.inputs, function(input) {
// TODO: Maybe prevTxId should be a string? Or defined as read only property?
return input.prevTxId.toString('hex') === utxo.txId && input.outputIndex === utxo.outputIndex;
});
if (exists) {
return;
}
if (pubkeys && threshold) {
this._fromMultisigUtxo(utxo, pubkeys, threshold);
} else {
this._fromNonP2SH(utxo);
}
return this;
};
Transaction.prototype._fromNonP2SH = function(utxo) {
var clazz;
utxo = new UnspentOutput(utxo);
if (utxo.script.isPublicKeyHashOut()) {
clazz = PublicKeyHashInput;
} else if (utxo.script.isPublicKeyOut()) {
clazz = PublicKeyInput;
} else {
clazz = Input;
}
this.addInput(new clazz({
output: new Output({
script: utxo.script,
satoshis: utxo.satoshis
}),
prevTxId: utxo.txId,
outputIndex: utxo.outputIndex,
script: Script.empty()
}));
};
Transaction.prototype._fromMultisigUtxo = function(utxo, pubkeys, threshold) {
$.checkArgument(threshold <= pubkeys.length,
'Number of required signatures must be greater than the number of public keys');
utxo = new UnspentOutput(utxo);
this.addInput(new MultiSigScriptHashInput({
output: new Output({
script: utxo.script,
satoshis: utxo.satoshis
}),
prevTxId: utxo.txId,
outputIndex: utxo.outputIndex,
script: Script.empty()
}, pubkeys, threshold));
};
/**
* Add an input to this transaction. The input must be an instance of the `Input` class.
* It should have information about the Output that it's spending, but if it's not already
* set, two additional parameters, `outputScript` and `satoshis` can be provided.
*
* @param {Input} input
* @param {String|Script} outputScript
* @param {number} satoshis
* @return Transaction this, for chaining
*/
Transaction.prototype.addInput = function(input, outputScript, satoshis) {
$.checkArgumentType(input, Input, 'input');
if (!input.output && (_.isUndefined(outputScript) || _.isUndefined(satoshis))) {
throw new errors.Transaction.NeedMoreInfo('Need information about the UTXO script and satoshis');
}
if (!input.output && outputScript && !_.isUndefined(satoshis)) {
outputScript = outputScript instanceof Script ? outputScript : new Script(outputScript);
$.checkArgumentType(satoshis, 'number', 'satoshis');
input.output = new Output({
script: outputScript,
satoshis: satoshis
});
}
return this.uncheckedAddInput(input);
};
/**
* Add an input to this transaction, without checking that the input has information about
* the output that it's spending.
*
* @param {Input} input
* @return Transaction this, for chaining
*/
Transaction.prototype.uncheckedAddInput = function(input) {
$.checkArgumentType(input, Input, 'input');
this.inputs.push(input);
this._inputAmount = undefined;
this._updateChangeOutput();
return this;
};
/**
* Returns true if the transaction has enough info on all inputs to be correctly validated
*
* @return {boolean}
*/
Transaction.prototype.hasAllUtxoInfo = function() {
return _.all(this.inputs.map(function(input) {
return !!input.output;
}));
};
/**
* Manually set the fee for this transaction. Beware that this resets all the signatures
* for inputs (in further versions, SIGHASH_SINGLE or SIGHASH_NONE signatures will not
* be reset).
*
* @param {number} amount satoshis to be sent
* @return {Transaction} this, for chaining
*/
Transaction.prototype.fee = function(amount) {
$.checkArgument(_.isNumber(amount), 'amount must be a number');
this._fee = amount;
this._updateChangeOutput();
return this;
};
/**
* Manually set the fee per KB for this transaction. Beware that this resets all the signatures
* for inputs (in further versions, SIGHASH_SINGLE or SIGHASH_NONE signatures will not
* be reset).
*
* @param {number} amount satoshis per KB to be sent
* @return {Transaction} this, for chaining
*/
Transaction.prototype.feePerKb = function(amount) {
$.checkArgument(_.isNumber(amount), 'amount must be a number');
this._feePerKb = amount;
this._updateChangeOutput();
return this;
};
/* Output management */
/**
* Set the change address for this transaction
*
* Beware that this resets all the signatures for inputs (in further versions,
* SIGHASH_SINGLE or SIGHASH_NONE signatures will not be reset).
*
* @param {Address} address An address for change to be sent to.
* @return {Transaction} this, for chaining
*/
Transaction.prototype.change = function(address) {
$.checkArgument(address, 'address is required');
this._changeScript = Script.fromAddress(address);
this._updateChangeOutput();
return this;
};
/**
* @return {Output} change output, if it exists
*/
Transaction.prototype.getChangeOutput = function() {
if (!_.isUndefined(this._changeIndex)) {
return this.outputs[this._changeIndex];
}
return null;
};
/**
* Add an output to the transaction.
*
* Beware that this resets all the signatures for inputs (in further versions,
* SIGHASH_SINGLE or SIGHASH_NONE signatures will not be reset).
*
* @param {string|Address} address
* @param {number} amount in satoshis
* @return {Transaction} this, for chaining
*/
Transaction.prototype.to = function(address, amount) {
$.checkArgument(
JSUtil.isNaturalNumber(amount),
'Amount is expected to be a positive integer'
);
this.addOutput(new Output({
script: Script(new Address(address)),
satoshis: amount
}));
return this;
};
/**
* Add an OP_RETURN output to the transaction.
*
* Beware that this resets all the signatures for inputs (in further versions,
* SIGHASH_SINGLE or SIGHASH_NONE signatures will not be reset).
*
* @param {Buffer|string} value the data to be stored in the OP_RETURN output.
* In case of a string, the UTF-8 representation will be stored
* @return {Transaction} this, for chaining
*/
Transaction.prototype.addData = function(value) {
this.addOutput(new Output({
script: Script.buildDataOut(value),
satoshis: 0
}));
return this;
};
/**
* Add an output to the transaction.
*
* @param {Output} output the output to add.
* @return {Transaction} this, for chaining
*/
Transaction.prototype.addOutput = function(output) {
$.checkArgumentType(output, Output, 'output');
this._addOutput(output);
this._updateChangeOutput();
return this;
};
/**
* Remove all outputs from the transaction.
*
* @return {Transaction} this, for chaining
*/
Transaction.prototype.clearOutputs = function() {
this.outputs = [];
this._clearSignatures();
this._outputAmount = undefined;
this._changeIndex = undefined;
this._updateChangeOutput();
return this;
};
Transaction.prototype._addOutput = function(output) {
this.outputs.push(output);
this._outputAmount = undefined;
};
/**
* Calculates or gets the total output amount in satoshis
*
* @return {Number} the transaction total output amount
*/
Transaction.prototype._getOutputAmount = function() {
if (_.isUndefined(this._outputAmount)) {
var self = this;
this._outputAmount = 0;
_.each(this.outputs, function(output) {
self._outputAmount += output.satoshis;
});
}
return this._outputAmount;
};
/**
* Calculates or gets the total input amount in satoshis
*
* @return {Number} the transaction total input amount
*/
Transaction.prototype._getInputAmount = function() {
if (_.isUndefined(this._inputAmount)) {
var self = this;
this._inputAmount = 0;
_.each(this.inputs, function(input) {
if (_.isUndefined(input.output)) {
throw new errors.Transaction.Input.MissingPreviousOutput();
}
self._inputAmount += input.output.satoshis;
});
}
return this._inputAmount;
};
Transaction.prototype._updateChangeOutput = function() {
if (!this._changeScript) {
return;
}
this._clearSignatures();
if (!_.isUndefined(this._changeIndex)) {
this._removeOutput(this._changeIndex);
}
var available = this._getUnspentValue();
var fee = this.getFee();
var changeAmount = available - fee;
if (changeAmount > 0) {
this._changeIndex = this.outputs.length;
this._addOutput(new Output({
script: this._changeScript,
satoshis: changeAmount
}));
} else {
this._changeIndex = undefined;
}
};
/**
* Calculates the fee of the transaction.
*
* If there's a fixed fee set, return that.
*
* If there is no change output set, the fee is the
* total value of the outputs minus inputs. Note that
* a serialized transaction only specifies the value
* of its outputs. (The value of inputs are recorded
* in the previous transaction outputs being spent.)
* This method therefore raises a "MissingPreviousOutput"
* error when called on a serialized transaction.
*
* If there's no fee set and no change address,
* estimate the fee based on size.
*
* @return {Number} fee of this transaction in satoshis
*/
Transaction.prototype.getFee = function() {
if (!_.isUndefined(this._fee)) {
return this._fee;
}
// if no change output is set, fees should equal all the unspent amount
if (!this._changeScript) {
return this._getUnspentValue();
}
return this._estimateFee();
};
/**
* Estimates fee from serialized transaction size in bytes.
*/
Transaction.prototype._estimateFee = function() {
var estimatedSize = this._estimateSize();
var available = this._getUnspentValue();
return Transaction._estimateFee(estimatedSize, available, this._feePerKb);
};
Transaction.prototype._getUnspentValue = function() {
return this._getInputAmount() - this._getOutputAmount();
};
Transaction.prototype._clearSignatures = function() {
_.each(this.inputs, function(input) {
input.clearSignatures();
});
};
Transaction._estimateFee = function(size, amountAvailable, feePerKb) {
var fee = Math.ceil(size / 1000) * (feePerKb || Transaction.FEE_PER_KB);
if (amountAvailable > fee) {
size += Transaction.CHANGE_OUTPUT_MAX_SIZE;
}
return Math.ceil(size / 1000) * (feePerKb || Transaction.FEE_PER_KB);
};
Transaction.prototype._estimateSize = function() {
var result = Transaction.MAXIMUM_EXTRA_SIZE;
_.each(this.inputs, function(input) {
result += input._estimateSize();
});
_.each(this.outputs, function(output) {
result += output.script.toBuffer().length + 9;
});
return result;
};
Transaction.prototype._removeOutput = function(index) {
var output = this.outputs[index];
this.outputs = _.without(this.outputs, output);
this._outputAmount = undefined;
};
Transaction.prototype.removeOutput = function(index) {
this._removeOutput(index);
this._updateChangeOutput();
};
/**
* Randomize this transaction's outputs ordering. The shuffling algorithm is a
* version of the Fisher-Yates shuffle, provided by lodash's _.shuffle().
*
* @return {Transaction} this
*/
Transaction.prototype.shuffleOutputs = function() {
return this.sortOutputs(_.shuffle);
};
/**
* Sort this transaction's outputs, according to a given sorting function that
* takes an array as argument and returns a new array, with the same elements
* but with a different order. The argument function MUST NOT modify the order
* of the original array
*
* @param {Function} sortingFunction
* @return {Transaction} this
*/
Transaction.prototype.sortOutputs = function(sortingFunction) {
var outs = sortingFunction(this.outputs);
return this._newOutputOrder(outs);
};
Transaction.prototype._newOutputOrder = function(newOutputs) {
var isInvalidSorting = (this.outputs.length !== newOutputs.length ||
_.difference(this.outputs, newOutputs).length !== 0);
if (isInvalidSorting) {
throw new errors.Transaction.InvalidSorting();
}
if (!_.isUndefined(this._changeIndex)) {
var changeOutput = this.outputs[this._changeIndex];
this._changeIndex = _.findIndex(newOutputs, changeOutput);
}
this.outputs = newOutputs;
return this;
};
Transaction.prototype.removeInput = function(txId, outputIndex) {
var index;
if (!outputIndex && _.isNumber(txId)) {
index = txId;
} else {
index = _.findIndex(this.inputs, function(input) {
return input.prevTxId.toString('hex') === txId && input.outputIndex === outputIndex;
});
}
if (index < 0 || index >= this.inputs.length) {
throw new errors.Transaction.InvalidIndex(index, this.inputs.length);
}
var input = this.inputs[index];
this.inputs = _.without(this.inputs, input);
this._inputAmount = undefined;
this._updateChangeOutput();
};
/* Signature handling */
/**
* Sign the transaction using one or more private keys.
*
* It tries to sign each input, verifying that the signature will be valid
* (matches a public key).
*
* @param {Array|String|PrivateKey} privateKey
* @param {number} sigtype
* @return {Transaction} this, for chaining
*/
Transaction.prototype.sign = function(privateKey, sigtype) {
$.checkState(this.hasAllUtxoInfo());
var self = this;
if (_.isArray(privateKey)) {
_.each(privateKey, function(privateKey) {
self.sign(privateKey, sigtype);
});
return this;
}
_.each(this.getSignatures(privateKey, sigtype), function(signature) {
self.applySignature(signature);
});
return this;
};
Transaction.prototype.getSignatures = function(privKey, sigtype) {
privKey = new PrivateKey(privKey);
sigtype = sigtype || Signature.SIGHASH_ALL;
var transaction = this;
var results = [];
var hashData = Hash.sha256ripemd160(privKey.publicKey.toBuffer());
_.each(this.inputs, function forEachInput(input, index) {
_.each(input.getSignatures(transaction, privKey, index, sigtype, hashData), function(signature) {
results.push(signature);
});
});
return results;
};
/**
* Add a signature to the transaction
*
* @param {Object} signature
* @param {number} signature.inputIndex
* @param {number} signature.sigtype
* @param {PublicKey} signature.publicKey
* @param {Signature} signature.signature
* @return {Transaction} this, for chaining
*/
Transaction.prototype.applySignature = function(signature) {
this.inputs[signature.inputIndex].addSignature(this, signature);
return this;
};
Transaction.prototype.isFullySigned = function() {
_.each(this.inputs, function(input) {
if (input.isFullySigned === Input.prototype.isFullySigned) {
throw new errors.Transaction.UnableToVerifySignature(
'Unrecognized script kind, or not enough information to execute script.' +
'This usually happens when creating a transaction from a serialized transaction'
);
}
});
return _.all(_.map(this.inputs, function(input) {
return input.isFullySigned();
}));
};
Transaction.prototype.isValidSignature = function(signature) {
var self = this;
if (this.inputs[signature.inputIndex].isValidSignature === Input.prototype.isValidSignature) {
throw new errors.Transaction.UnableToVerifySignature(
'Unrecognized script kind, or not enough information to execute script.' +
'This usually happens when creating a transaction from a serialized transaction'
);
}
return this.inputs[signature.inputIndex].isValidSignature(self, signature);
};
/**
* @returns {bool} whether the signature is valid for this transaction input
*/
Transaction.prototype.verifySignature = function(sig, pubkey, nin, subscript) {
return Sighash.verify(this, sig, pubkey, nin, subscript);
};
/**
* Check that a transaction passes basic sanity tests. If not, return a string
* describing the error. This function contains the same logic as
* CheckTransaction in bitcoin core.
*/
Transaction.prototype.verify = function() {
// Basic checks that don't depend on any context
if (this.inputs.length === 0) {
return 'transaction txins empty';
}
if (this.outputs.length === 0) {
return 'transaction txouts empty';
}
// Check for negative or overflow output values
var valueoutbn = new BN(0);
for (var i = 0; i < this.outputs.length; i++) {
var txout = this.outputs[i];
if (txout.invalidSatoshis()) {
return 'transaction txout ' + i + ' satoshis is invalid';
}
if (txout._satoshisBN.gt(new BN(Transaction.MAX_MONEY, 10))) {
return 'transaction txout ' + i + ' greater than MAX_MONEY';
}
valueoutbn = valueoutbn.add(txout._satoshisBN);
if (valueoutbn.gt(new BN(Transaction.MAX_MONEY))) {
return 'transaction txout ' + i + ' total output greater than MAX_MONEY';
}
}
// Size limits
if (this.toBuffer().length > MAX_BLOCK_SIZE) {
return 'transaction over the maximum block size';
}
// Check for duplicate inputs
var txinmap = {};
for (i = 0; i < this.inputs.length; i++) {
var txin = this.inputs[i];
var inputid = txin.prevTxId + ':' + txin.outputIndex;
if (!_.isUndefined(txinmap[inputid])) {
return 'transaction input ' + i + ' duplicate input';
}
txinmap[inputid] = true;
}
var isCoinbase = this.isCoinbase();
if (isCoinbase) {
var buf = this.inputs[0]._scriptBuffer;
if (buf.length < 2 || buf.length > 100) {
return 'coinbase transaction script size invalid';
}
} else {
for (i = 0; i < this.inputs.length; i++) {
if (this.inputs[i].isNull()) {
return 'transaction input ' + i + ' has null input';
}
}
}
return true;
};
/**
* Analagous to bitcoind's IsCoinBase function in transaction.h
*/
Transaction.prototype.isCoinbase = function() {
return (this.inputs.length === 1 && this.inputs[0].isNull());
};
module.exports = Transaction;
| 1 | 14,509 | A few things: - This may not serialize because I think the property for the sequence number on an input is `.sequenceNumber`. - There should be tests to cover these statements - Indentation is incorrect (should be two spaces, as similar to the statements above) | bitpay-bitcore | js |
@@ -89,3 +89,16 @@ class BaseNotification(object):
output_filename = filename_template.format(
self.resource, self.cycle_timestamp, output_timestamp)
return output_filename
+
+ @classmethod
+ def check_data_format(cls, data_format):
+ """Raise `InvalidDataFormatError` unless `data_format` is supported.
+
+ Args:
+ data_format (string): should be either 'csv' or 'json'
+
+ Raises:
+ InvalidDataFormatError: if not valid
+ """
+ if data_format not in cls.supported_data_formats:
+ raise InvalidDataFormatError('GCS uploader', data_format) | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base notifier to perform notifications"""
import abc
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats
LOGGER = logger.get_logger(__name__)
class InvalidDataFormatError(Exception):
"""Raised in case of an invalid notifier data format."""
def __init__(self, notifier, invalid_data_format):
"""Constructor for the base notifier.
Args:
notifier (str): the notifier module/name
invalid_data_format (str): the invalid data format in question.
"""
super(InvalidDataFormatError, self).__init__(
'%s: invalid data format: %s' % (notifier, invalid_data_format))
class BaseNotification(object):
"""Base notifier to perform notifications"""
__metaclass__ = abc.ABCMeta
supported_data_formats = ['csv', 'json']
def __init__(self, resource, cycle_timestamp,
violations, global_configs, notifier_config,
notification_config):
"""Constructor for the base notifier.
Args:
resource (str): Violation resource name.
cycle_timestamp (str): Snapshot timestamp,
formatted as YYYYMMDDTHHMMSSZ.
violations (dict): Violations.
global_configs (dict): Global configurations.
notifier_config (dict): Notifier configurations.
notification_config (dict): notifier configurations.
"""
self.cycle_timestamp = cycle_timestamp
self.resource = resource
self.global_configs = global_configs
self.notifier_config = notifier_config
self.notification_config = notification_config
# TODO: import api_client
# self.api_client = api_client
# Get violations
self.violations = violations
@abc.abstractmethod
def run(self):
"""Runs the notifier."""
pass
def _get_output_filename(self, filename_template):
"""Create the output filename.
Args:
filename_template (string): template to use for the output filename
Returns:
str: The output filename for the violations CSV file.
"""
utc_now_datetime = date_time.get_utc_now_datetime()
output_timestamp = utc_now_datetime.strftime(
string_formats.TIMESTAMP_TIMEZONE_FILES)
output_filename = filename_template.format(
self.resource, self.cycle_timestamp, output_timestamp)
return output_filename
| 1 | 29,972 | If this is a base method that could be used by others, should you hardcode 'GCS uploader' here? | forseti-security-forseti-security | py |
@@ -386,6 +386,18 @@ public final class CharSeq implements CharSequence, IndexedSeq<Character>, Seria
return result;
}
+ @Override
+ public CharSeq padTo(int length, Character element) {
+ if(length <= back.length()) {
+ return this;
+ }
+ final StringBuilder sb = new StringBuilder(back);
+ for (int i = 0; i < length - back.length(); i++) {
+ sb.append(element);
+ }
+ return new CharSeq(sb.toString());
+ }
+
public CharSeq mapChars(CharUnaryOperator mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) { | 1 | /* / \____ _ ______ _____ / \____ ____ _____
* / \__ \/ \ / \__ \ / __// \__ \ / \/ __ \ Javaslang
* _/ // _\ \ \/ / _\ \\_ \/ // _\ \ /\ \__/ / Copyright 2014-2015 Daniel Dietrich
* /___/ \_____/\____/\_____/____/\___\_____/_/ \_/____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.*;
import javaslang.control.None;
import javaslang.control.Option;
import javaslang.control.Some;
import java.io.Serializable;
import java.io.UnsupportedEncodingException;
import java.lang.Iterable;
import java.nio.charset.Charset;
import java.util.*;
import java.util.HashSet;
import java.util.function.*;
import java.util.regex.PatternSyntaxException;
import java.util.stream.Collector;
/**
* TODO javadoc
*/
public final class CharSeq implements CharSequence, IndexedSeq<Character>, Serializable {
private static final long serialVersionUID = 1L;
private static final CharSeq EMPTY = new CharSeq("");
private final java.lang.String back;
private CharSeq(java.lang.String javaString) {
this.back = javaString;
}
public static CharSeq empty() {
return EMPTY;
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link CharSeq}s.
*
* @return A {@code CharSeq} Collector.
*/
public static Collector<Character, ArrayList<Character>, CharSeq> collector() {
final Supplier<ArrayList<Character>> supplier = ArrayList::new;
final BiConsumer<ArrayList<Character>, Character> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<Character>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<Character>, CharSeq> finisher = CharSeq::ofAll;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Returns a singleton {@code CharSeq}, i.e. a {@code CharSeq} of one element.
*
* @param element An element.
* @return A new {@code CharSeq} instance containing the given element
*/
public static CharSeq of(Character element) {
return new CharSeq(new java.lang.String(new char[] { element }));
}
/**
* Creates a String of the given elements.
*
* @param elements Zero or more elements.
* @return A string containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
public static CharSeq of(Character... elements) {
Objects.requireNonNull(elements, "elements is null");
final char[] chrs = new char[elements.length];
for (int i = 0; i < elements.length; i++) {
chrs[i] = elements[i];
}
return new CharSeq(new java.lang.String(chrs));
}
/**
* Creates a String of {@code CharSequence}.
*
* @param sequence {@code CharSequence} instance.
* @return A new {@code javaslang.String}
*/
public static CharSeq of(CharSequence sequence) {
Objects.requireNonNull(sequence, "sequence is null");
return sequence.length() == 0 ? empty() : new CharSeq(sequence.toString());
}
/**
* Creates a String of the given elements.
*
* The resulting string has the same iteration order as the given iterable of elements
* if the iteration order of the elements is stable.
*
* @param elements An Iterable of elements.
* @return A string containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
public static CharSeq ofAll(java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final StringBuilder sb = new StringBuilder();
for (Character character : elements) {
sb.append(character);
}
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
/**
* Creates a CharSeq based on the elements of a char array.
*
* @param array a char array
* @return A new List of Character values
*/
static CharSeq ofAll(char[] array) {
Objects.requireNonNull(array, "array is null");
return new CharSeq(String.valueOf(array));
}
//
//
// IndexedSeq
//
//
@Override
public CharSeq append(Character element) {
return of(back + element);
}
@Override
public CharSeq appendAll(java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final StringBuilder sb = new StringBuilder(back);
for (char element : elements) {
sb.append(element);
}
return of(sb.toString());
}
@Override
public CharSeq clear() {
return EMPTY;
}
@Override
public Vector<Tuple2<Character, Character>> crossProduct() {
return crossProduct(this);
}
@Override
public <U> Vector<Tuple2<Character, U>> crossProduct(java.lang.Iterable<? extends U> that) {
Objects.requireNonNull(that, "that is null");
final Vector<U> other = Vector.ofAll(that);
return flatMap(a -> other.map(b -> Tuple.of(a, b)));
}
@Override
public Vector<CharSeq> combinations() {
return Vector.rangeClosed(0, length()).map(this::combinations).flatMap(Function.identity());
}
@Override
public Vector<CharSeq> combinations(int k) {
class Recursion {
Vector<CharSeq> combinations(CharSeq elements, int k) {
return (k == 0)
? Vector.of(CharSeq.empty())
: elements.zipWithIndex().flatMap(t -> combinations(elements.drop(t._2 + 1), (k - 1))
.map((CharSeq c) -> c.prepend(t._1)));
}
}
return new Recursion().combinations(this, Math.max(k, 0));
}
@Override
public CharSeq distinct() {
return distinctBy(Function.identity());
}
@Override
public CharSeq distinctBy(Comparator<? super Character> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
final java.util.Set<Character> seen = new java.util.TreeSet<>(comparator);
return filter(seen::add);
}
@Override
public <U> CharSeq distinctBy(Function<? super Character, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
final java.util.Set<U> seen = new java.util.HashSet<>();
return filter(t -> seen.add(keyExtractor.apply(t)));
}
@Override
public CharSeq drop(int n) {
if (n >= length()) {
return EMPTY;
}
if (n <= 0) {
return this;
} else {
return of(back.substring(n));
}
}
@Override
public CharSeq dropRight(int n) {
if (n >= length()) {
return EMPTY;
}
if (n <= 0) {
return this;
} else {
return of(back.substring(0, length() - n));
}
}
@Override
public CharSeq dropWhile(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
int index = 0;
while (index < length() && predicate.test(charAt(index))) {
index++;
}
return index < length() ? (index == 0 ? this : of(back.substring(index))) : empty();
}
@Override
public CharSeq filter(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < back.length(); i++) {
final char ch = back.charAt(i);
if (predicate.test(ch)) {
sb.append(ch);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public <U> Vector<U> flatMap(Function<? super Character, ? extends java.lang.Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return Vector.empty();
} else {
Vector<U> result = Vector.empty();
for (int i = 0; i < length(); i++) {
for (U u : mapper.apply(get(i))) {
result = result.append(u);
}
}
return result;
}
}
public CharSeq flatMapChars(CharFunction<? extends CharSequence> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return this;
} else {
final StringBuilder builder = new StringBuilder();
back.chars().forEach(c -> builder.append(mapper.apply((char) c)));
return new CharSeq(builder.toString());
}
}
@Override
public <U> Vector<U> flatMapVal(Function<? super Character, ? extends Value<? extends U>> mapper) {
return flatMap(mapper);
}
@Override
public Vector<Object> flatten() {
return Vector.ofAll(iterator());
}
@Override
public <C> Map<C, CharSeq> groupBy(Function<? super Character, ? extends C> classifier) {
Objects.requireNonNull(classifier, "classifier is null");
return foldLeft(HashMap.empty(), (map, t) -> {
final C key = classifier.apply(t);
final CharSeq values = map.get(key).map(ts -> ts.append(t)).orElse(CharSeq.of(t));
return map.put(key, values);
});
}
@Override
public boolean hasDefiniteSize() {
return true;
}
@Override
public CharSeq init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty string");
} else {
return of(back.substring(0, length() - 1));
}
}
@Override
public Option<CharSeq> initOption() {
if (isEmpty()) {
return None.instance();
} else {
return new Some<>(init());
}
}
@Override
public CharSeq insert(int index, Character element) {
if (index < 0) {
throw new IndexOutOfBoundsException("insert(" + index + ", e)");
}
if (index > length()) {
throw new IndexOutOfBoundsException("insert(" + index + ", e) on String of length " + length());
}
return of(new StringBuilder(back).insert(index, element).toString());
}
@Override
public CharSeq insertAll(int index, java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
if (index < 0) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements)");
}
if (index > length()) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements) on String of length " + length());
}
final java.lang.String javaString = back;
final StringBuilder sb = new StringBuilder(javaString.substring(0, index));
for (Character element : elements) {
sb.append(element);
}
sb.append(javaString.substring(index));
return of(sb.toString());
}
@Override
public Iterator<Character> iterator() {
return new Iterator<Character>() {
private int index = 0;
@Override
public boolean hasNext() {
return index < back.length();
}
@Override
public Character next() {
if (index >= back.length()) {
throw new NoSuchElementException();
}
return back.charAt(index++);
}
};
}
@Override
public CharSeq intersperse(Character element) {
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
if (i > 0) {
sb.append(element);
}
sb.append(get(i));
}
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public <U> Vector<U> map(Function<? super Character, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
Vector<U> result = Vector.empty();
for (int i = 0; i < length(); i++) {
result = result.append(mapper.apply(get(i)));
}
return result;
}
public CharSeq mapChars(CharUnaryOperator mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return this;
} else {
final char[] chars = back.toCharArray();
for (int i = 0; i < chars.length; i++) {
chars[i] = mapper.apply(chars[i]);
}
return CharSeq.ofAll(chars);
}
}
@Override
public Tuple2<CharSeq, CharSeq> partition(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
final StringBuilder right = new StringBuilder();
for (int i = 0; i < length(); i++) {
Character t = get(i);
(predicate.test(t) ? left : right).append(t);
}
if (left.length() == 0) {
return Tuple.of(EMPTY, of(right.toString()));
} else if (right.length() == 0) {
return Tuple.of(of(left.toString()), EMPTY);
} else {
return Tuple.of(of(left.toString()), of(right.toString()));
}
}
@Override
public CharSeq peek(Consumer<? super Character> action) {
Objects.requireNonNull(action, "action is null");
if (!isEmpty()) {
action.accept(back.charAt(0));
}
return this;
}
@Override
public Vector<CharSeq> permutations() {
if (isEmpty()) {
return Vector.empty();
} else {
if (length() == 1) {
return Vector.of(this);
} else {
Vector<CharSeq> result = Vector.empty();
for (Character t : distinct()) {
for (CharSeq ts : remove(t).permutations()) {
result = result.append(ts);
}
}
return result;
}
}
}
@Override
public CharSeq prepend(Character element) {
return of(element + back);
}
@Override
public CharSeq prependAll(java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final StringBuilder sb = new StringBuilder();
for (Character element : elements) {
sb.append(element);
}
sb.append(back);
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public CharSeq remove(Character element) {
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
char c = get(i);
if (!found && c == element) {
found = true;
} else {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public CharSeq removeFirst(Predicate<Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < back.length(); i++) {
final char ch = back.charAt(i);
if (predicate.test(ch)) {
if (found) {
sb.append(ch);
}
found = true;
} else {
sb.append(ch);
}
}
return found ? (sb.length() == 0 ? EMPTY : of(sb.toString())) : this;
}
@Override
public CharSeq removeLast(Predicate<Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
for (int i = length() - 1; i >= 0; i--) {
if (predicate.test(back.charAt(i))) {
return removeAt(i);
}
}
return this;
}
@Override
public CharSeq removeAt(int indx) {
final java.lang.String removed = back.substring(0, indx) + back.substring(indx + 1);
return removed.isEmpty() ? EMPTY : of(removed);
}
@Override
public CharSeq removeAll(Character element) {
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (c != element) {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public CharSeq removeAll(java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final java.util.Set<Character> distinct = new HashSet<>();
for (Character element : elements) {
distinct.add(element);
}
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (!distinct.contains(c)) {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public CharSeq replace(Character currentElement, Character newElement) {
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (c == currentElement && !found) {
sb.append(newElement);
found = true;
} else {
sb.append(c);
}
}
return found ? (sb.length() == 0 ? EMPTY : of(sb.toString())) : this;
}
@Override
public CharSeq replaceAll(Character currentElement, Character newElement) {
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (c == currentElement) {
sb.append(newElement);
found = true;
} else {
sb.append(c);
}
}
return found ? (sb.length() == 0 ? EMPTY : of(sb.toString())) : this;
}
@Override
public CharSeq replaceAll(UnaryOperator<Character> operator) {
Objects.requireNonNull(operator, "operator is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
sb.append(operator.apply(back.charAt(i)));
}
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public CharSeq retainAll(java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final java.util.Set<Character> keeped = new HashSet<>();
for (Character element : elements) {
keeped.add(element);
}
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (keeped.contains(c)) {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public CharSeq reverse() {
return of(new StringBuilder(back).reverse().toString());
}
@Override
public CharSeq set(int index, Character element) {
if (index < 0) {
throw new IndexOutOfBoundsException("set(" + index + ")");
}
if (index >= length()) {
throw new IndexOutOfBoundsException("set(" + index + ")");
}
return of(back.substring(0, index) + element + back.substring(index + 1));
}
@Override
public CharSeq slice(int beginIndex) {
if (beginIndex < 0) {
throw new IndexOutOfBoundsException("slice(" + beginIndex + ")");
}
if (beginIndex > length()) {
throw new IndexOutOfBoundsException("slice(" + beginIndex + ")");
}
return of(back.substring(beginIndex));
}
@Override
public CharSeq slice(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex > endIndex || endIndex > length()) {
throw new IndexOutOfBoundsException(
java.lang.String.format("slice(%s, %s) on List of length %s", beginIndex, endIndex, length()));
}
if (beginIndex == endIndex) {
return EMPTY;
}
return of(back.substring(beginIndex, endIndex));
}
@Override
public CharSeq sort() {
return isEmpty() ? this : toJavaStream().sorted().collect(CharSeq.collector());
}
@Override
public CharSeq sort(Comparator<? super Character> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return isEmpty() ? this : toJavaStream().sorted(comparator).collect(CharSeq.collector());
}
@Override
public Tuple2<CharSeq, CharSeq> span(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (predicate.test(c)) {
sb.append(c);
} else {
break;
}
}
if (sb.length() == 0) {
return Tuple.of(EMPTY, this);
} else if (sb.length() == length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(sb.toString()), of(back.substring(sb.length())));
}
}
@Override
public Spliterator<Character> spliterator() {
return Spliterators.spliterator(iterator(), length(), Spliterator.ORDERED | Spliterator.IMMUTABLE);
}
@Override
public CharSeq tail() {
if (isEmpty()) {
throw new UnsupportedOperationException("tail of empty string");
} else {
return of(back.substring(1));
}
}
@Override
public Option<CharSeq> tailOption() {
if (isEmpty()) {
return None.instance();
} else {
return new Some<>(of(back.substring(1)));
}
}
@Override
public CharSeq take(int n) {
if (n >= length()) {
return this;
}
if (n <= 0) {
return EMPTY;
} else {
return of(back.substring(0, n));
}
}
@Override
public CharSeq takeRight(int n) {
if (n >= length()) {
return this;
}
if (n <= 0) {
return EMPTY;
} else {
return of(back.substring(length() - n));
}
}
@Override
public CharSeq takeWhile(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
char c = back.charAt(i);
if (!predicate.test(c)) {
break;
}
sb.append(c);
}
return sb.length() == length() ? this : sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public <U> Vector<U> unit(java.lang.Iterable<? extends U> iterable) {
Objects.requireNonNull(iterable, "iterable is null");
return Vector.ofAll(iterable);
}
@Override
public <T1, T2> Tuple2<Vector<T1>, Vector<T2>> unzip(Function<? super Character, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
Vector<T1> xs = Vector.empty();
Vector<T2> ys = Vector.empty();
for (int i = 0; i < length(); i++) {
final Tuple2<? extends T1, ? extends T2> t = unzipper.apply(back.charAt(i));
xs = xs.append(t._1);
ys = ys.append(t._2);
}
return Tuple.of(xs.length() == 0 ? Vector.<T1> empty() : xs, ys.length() == 0 ? Vector.<T2> empty() : ys);
}
@Override
public <U> Vector<Tuple2<Character, U>> zip(java.lang.Iterable<U> that) {
Objects.requireNonNull(that, "that is null");
Vector<Tuple2<Character, U>> result = Vector.empty();
Iterator<Character> list1 = iterator();
java.util.Iterator<U> list2 = that.iterator();
while (list1.hasNext() && list2.hasNext()) {
result = result.append(Tuple.of(list1.next(), list2.next()));
}
return result;
}
@Override
public <U> Vector<Tuple2<Character, U>> zipAll(java.lang.Iterable<U> that, Character thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
Vector<Tuple2<Character, U>> result = Vector.empty();
Iterator<Character> list1 = iterator();
java.util.Iterator<U> list2 = that.iterator();
while (list1.hasNext() || list2.hasNext()) {
final Character elem1 = list1.hasNext() ? list1.next() : thisElem;
final U elem2 = list2.hasNext() ? list2.next() : thatElem;
result = result.append(Tuple.of(elem1, elem2));
}
return result;
}
@Override
public Vector<Tuple2<Character, Integer>> zipWithIndex() {
Vector<Tuple2<Character, Integer>> result = Vector.empty();
for (int i = 0; i < length(); i++) {
result = result.append(Tuple.of(get(i), i));
}
return result;
}
@Override
public Character get(int index) {
return back.charAt(index);
}
@Override
public int indexOf(Character element, int from) {
return back.indexOf(element, from);
}
@Override
public int lastIndexOf(Character element, int end) {
return back.lastIndexOf(element, end);
}
@Override
public Tuple2<CharSeq, CharSeq> splitAt(int n) {
if (n <= 0) {
return Tuple.of(EMPTY, this);
} else if (n >= length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(back.substring(0, n)), of(back.substring(n)));
}
}
@Override
public Tuple2<CharSeq, CharSeq> splitAt(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
for (int i = 0; i < length(); i++) {
Character t = get(i);
if (!predicate.test(t)) {
left.append(t);
} else {
break;
}
}
if (left.length() == 0) {
return Tuple.of(EMPTY, this);
} else if (left.length() == length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(left.toString()), of(back.substring(left.length())));
}
}
@Override
public Tuple2<CharSeq, CharSeq> splitAtInclusive(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
for (int i = 0; i < length(); i++) {
Character t = get(i);
left.append(t);
if (predicate.test(t)) {
break;
}
}
if (left.length() == 0) {
return Tuple.of(EMPTY, this);
} else if (left.length() == length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(left.toString()), of(back.substring(left.length())));
}
}
@Override
public boolean startsWith(Iterable<? extends Character> that, int offset) {
return startsWith(CharSeq.ofAll(that), offset);
}
@Override
public Character head() {
if (isEmpty()) {
throw new NoSuchElementException("head of empty string");
} else {
return back.charAt(0);
}
}
@Override
public Option<Character> headOption() {
if (isEmpty()) {
return None.instance();
} else {
return new Some<>(back.charAt(0));
}
}
@Override
public boolean isEmpty() {
return back.isEmpty();
}
@Override
public boolean isTraversableAgain() {
return true;
}
private Object readResolve() {
return isEmpty() ? EMPTY : this;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o instanceof CharSeq) {
return ((CharSeq) o).back.equals(back);
} else {
return false;
}
}
@Override
public int hashCode() {
return back.hashCode();
}
//
//
// java.lang.CharSequence
//
//
/**
* Returns the {@code char} value at the
* specified index. An index ranges from {@code 0} to
* {@code length() - 1}. The first {@code char} value of the sequence
* is at index {@code 0}, the next at index {@code 1},
* and so on, as for array indexing.
*
* <p>If the {@code char} value specified by the index is a
* <a href="Character.html#unicode">surrogate</a>, the surrogate
* value is returned.
*
* @param index the index of the {@code char} value.
* @return the {@code char} value at the specified index of this string.
* The first {@code char} value is at index {@code 0}.
* @throws IndexOutOfBoundsException if the {@code index}
* argument is negative or not less than the length of this
* string.
*/
@Override
public char charAt(int index) {
return back.charAt(index);
}
/**
* Returns the length of this string.
* The length is equal to the number of <a href="Character.html#unicode">Unicode
* code units</a> in the string.
*
* @return the length of the sequence of characters represented by this
* object.
*/
@Override
public int length() {
return back.length();
}
//
//
// java.lang.String
//
//
/**
* Returns the character (Unicode code point) at the specified
* index. The index refers to {@code char} values
* (Unicode code units) and ranges from {@code 0} to
* {@link #length()}{@code - 1}.
*
* <p> If the {@code char} value specified at the given index
* is in the high-surrogate range, the following index is less
* than the length of this {@code CharSeq}, and the
* {@code char} value at the following index is in the
* low-surrogate range, then the supplementary code point
* corresponding to this surrogate pair is returned. Otherwise,
* the {@code char} value at the given index is returned.
*
* @param index the index to the {@code char} values
* @return the code point value of the character at the
* {@code index}
* @throws IndexOutOfBoundsException if the {@code index}
* argument is negative or not less than the length of this
* string.
*/
public int codePointAt(int index) {
return back.codePointAt(index);
}
/**
* Returns the character (Unicode code point) before the specified
* index. The index refers to {@code char} values
* (Unicode code units) and ranges from {@code 1} to {@link
* CharSequence#length() length}.
*
* <p> If the {@code char} value at {@code (index - 1)}
* is in the low-surrogate range, {@code (index - 2)} is not
* negative, and the {@code char} value at {@code (index -
* 2)} is in the high-surrogate range, then the
* supplementary code point value of the surrogate pair is
* returned. If the {@code char} value at {@code index -
* 1} is an unpaired low-surrogate or a high-surrogate, the
* surrogate value is returned.
*
* @param index the index following the code point that should be returned
* @return the Unicode code point value before the given index.
* @throws IndexOutOfBoundsException if the {@code index}
* argument is less than 1 or greater than the length
* of this string.
*/
public int codePointBefore(int index) {
return back.codePointBefore(index);
}
/**
* Returns the number of Unicode code points in the specified text
* range of this {@code CharSeq}. The text range begins at the
* specified {@code beginIndex} and extends to the
* {@code char} at index {@code endIndex - 1}. Thus the
* length (in {@code char}s) of the text range is
* {@code endIndex-beginIndex}. Unpaired surrogates within
* the text range count as one code point each.
*
* @param beginIndex the index to the first {@code char} of
* the text range.
* @param endIndex the index after the last {@code char} of
* the text range.
* @return the number of Unicode code points in the specified text
* range
* @throws IndexOutOfBoundsException if the
* {@code beginIndex} is negative, or {@code endIndex}
* is larger than the length of this {@code CharSeq}, or
* {@code beginIndex} is larger than {@code endIndex}.
*/
public int codePointCount(int beginIndex, int endIndex) {
return back.codePointCount(beginIndex, endIndex);
}
/**
* Returns the index within this {@code CharSeq} that is
* offset from the given {@code index} by
* {@code codePointOffset} code points. Unpaired surrogates
* within the text range given by {@code index} and
* {@code codePointOffset} count as one code point each.
*
* @param index the index to be offset
* @param codePointOffset the offset in code points
* @return the index within this {@code CharSeq}
* @throws IndexOutOfBoundsException if {@code index}
* is negative or larger then the length of this
* {@code CharSeq}, or if {@code codePointOffset} is positive
* and the substring starting with {@code index} has fewer
* than {@code codePointOffset} code points,
* or if {@code codePointOffset} is negative and the substring
* before {@code index} has fewer than the absolute value
* of {@code codePointOffset} code points.
*/
public int offsetByCodePoints(int index, int codePointOffset) {
return back.offsetByCodePoints(index, codePointOffset);
}
/**
* Copies characters from this string into the destination character
* array.
* <p>
* The first character to be copied is at index {@code srcBegin};
* the last character to be copied is at index {@code srcEnd-1}
* (thus the total number of characters to be copied is
* {@code srcEnd-srcBegin}). The characters are copied into the
* subarray of {@code dst} starting at index {@code dstBegin}
* and ending at index:
* <blockquote><pre>
* dstbegin + (srcEnd-srcBegin) - 1
* </pre></blockquote>
*
* @param srcBegin index of the first character in the string
* to copy.
* @param srcEnd index after the last character in the string
* to copy.
* @param dst the destination array.
* @param dstBegin the start offset in the destination array.
* @throws IndexOutOfBoundsException If any of the following
* is true:
* <ul><li>{@code srcBegin} is negative.
* <li>{@code srcBegin} is greater than {@code srcEnd}
* <li>{@code srcEnd} is greater than the length of this
* string
* <li>{@code dstBegin} is negative
* <li>{@code dstBegin+(srcEnd-srcBegin)} is larger than
* {@code dst.length}</ul>
*/
public void getChars(int srcBegin, int srcEnd, char dst[], int dstBegin) {
back.getChars(srcBegin, srcEnd, dst, dstBegin);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the named
* charset, storing the result into a new byte array.
*
* <p> The behavior of this method when this string cannot be encoded in
* the given charset is unspecified. The {@link
* java.nio.charset.CharsetEncoder} class should be used when more control
* over the encoding process is required.
*
* @param charsetName The name of a supported {@linkplain java.nio.charset.Charset
* charset}
* @return The resultant byte array
* @throws UnsupportedEncodingException If the named charset is not supported
*/
public byte[] getBytes(java.lang.String charsetName) throws UnsupportedEncodingException {
return back.getBytes(charsetName);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the given
* {@linkplain java.nio.charset.Charset charset}, storing the result into a
* new byte array.
*
* <p> This method always replaces malformed-input and unmappable-character
* sequences with this charset's default replacement byte array. The
* {@link java.nio.charset.CharsetEncoder} class should be used when more
* control over the encoding process is required.
*
* @param charset The {@linkplain java.nio.charset.Charset} to be used to encode
* the {@code CharSeq}
* @return The resultant byte array
*/
public byte[] getBytes(Charset charset) {
return back.getBytes(charset);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the
* platform's default charset, storing the result into a new byte array.
*
* <p> The behavior of this method when this string cannot be encoded in
* the default charset is unspecified. The {@link
* java.nio.charset.CharsetEncoder} class should be used when more control
* over the encoding process is required.
*
* @return The resultant byte array
*/
public byte[] getBytes() {
return back.getBytes();
}
/**
* Compares this string to the specified {@code StringBuffer}. The result
* is {@code true} if and only if this {@code CharSeq} represents the same
* sequence of characters as the specified {@code StringBuffer}. This method
* synchronizes on the {@code StringBuffer}.
*
* @param sb The {@code StringBuffer} to compare this {@code CharSeq} against
* @return {@code true} if this {@code CharSeq} represents the same
* sequence of characters as the specified {@code StringBuffer},
* {@code false} otherwise
*/
public boolean contentEquals(StringBuffer sb) {
return back.contentEquals(sb);
}
/**
* Compares this string to the specified {@code CharSequence}. The
* result is {@code true} if and only if this {@code CharSeq} represents the
* same sequence of char values as the specified sequence. Note that if the
* {@code CharSequence} is a {@code StringBuffer} then the method
* synchronizes on it.
*
* @param cs The sequence to compare this {@code CharSeq} against
* @return {@code true} if this {@code CharSeq} represents the same
* sequence of char values as the specified sequence, {@code
* false} otherwise
*/
public boolean contentEquals(CharSequence cs) {
return back.contentEquals(cs);
}
/**
* Compares this {@code CharSeq} to another {@code CharSeq}, ignoring case
* considerations. Two strings are considered equal ignoring case if they
* are of the same length and corresponding characters in the two strings
* are equal ignoring case.
*
* <p> Two characters {@code c1} and {@code c2} are considered the same
* ignoring case if at least one of the following is true:
* <ul>
* <li> The two characters are the same (as compared by the
* {@code ==} operator)
* <li> Applying the method {@link
* java.lang.Character#toUpperCase(char)} to each character
* produces the same result
* <li> Applying the method {@link
* java.lang.Character#toLowerCase(char)} to each character
* produces the same result
* </ul>
*
* @param anotherString The {@code CharSeq} to compare this {@code CharSeq} against
* @return {@code true} if the argument is not {@code null} and it
* represents an equivalent {@code CharSeq} ignoring case; {@code
* false} otherwise
* @see #equals(Object)
*/
public boolean equalsIgnoreCase(CharSeq anotherString) {
return back.equalsIgnoreCase(anotherString.back);
}
/**
* Compares two strings lexicographically.
* The comparison is based on the Unicode value of each character in
* the strings. The character sequence represented by this
* {@code CharSeq} object is compared lexicographically to the
* character sequence represented by the argument string. The result is
* a negative integer if this {@code CharSeq} object
* lexicographically precedes the argument string. The result is a
* positive integer if this {@code CharSeq} object lexicographically
* follows the argument string. The result is zero if the strings
* are equal; {@code compareTo} returns {@code 0} exactly when
* the {@link #equals(Object)} method would return {@code true}.
* <p>
* This is the definition of lexicographic ordering. If two strings are
* different, then either they have different characters at some index
* that is a valid index for both strings, or their lengths are different,
* or both. If they have different characters at one or more index
* positions, let <i>k</i> be the smallest such index; then the string
* whose character at position <i>k</i> has the smaller value, as
* determined by using the < operator, lexicographically precedes the
* other string. In this case, {@code compareTo} returns the
* difference of the two character values at position {@code k} in
* the two string -- that is, the value:
* <blockquote><pre>
* this.charAt(k)-anotherString.charAt(k)
* </pre></blockquote>
* If there is no index position at which they differ, then the shorter
* string lexicographically precedes the longer string. In this case,
* {@code compareTo} returns the difference of the lengths of the
* strings -- that is, the value:
* <blockquote><pre>
* this.length()-anotherString.length()
* </pre></blockquote>
*
* @param anotherString the {@code CharSeq} to be compared.
* @return the value {@code 0} if the argument string is equal to
* this string; a value less than {@code 0} if this string
* is lexicographically less than the string argument; and a
* value greater than {@code 0} if this string is
* lexicographically greater than the string argument.
*/
public int compareTo(CharSeq anotherString) {
return back.compareTo(anotherString.back);
}
/**
* Compares two strings lexicographically, ignoring case
* differences. This method returns an integer whose sign is that of
* calling {@code compareTo} with normalized versions of the strings
* where case differences have been eliminated by calling
* {@code Character.toLowerCase(Character.toUpperCase(character))} on
* each character.
* <p>
* Note that this method does <em>not</em> take locale into account,
* and will result in an unsatisfactory ordering for certain locales.
* The java.text package provides <em>collators</em> to allow
* locale-sensitive ordering.
*
* @param str the {@code CharSeq} to be compared.
* @return a negative integer, zero, or a positive integer as the
* specified String is greater than, equal to, or less
* than this String, ignoring case considerations.
*/
public int compareToIgnoreCase(CharSeq str) {
return back.compareToIgnoreCase(str.back);
}
/**
* Tests if two string regions are equal.
* <p>
* A substring of this {@code CharSeq} object is compared to a substring
* of the argument other. The result is true if these substrings
* represent identical character sequences. The substring of this
* {@code CharSeq} object to be compared begins at index {@code toffset}
* and has length {@code len}. The substring of other to be compared
* begins at index {@code ooffset} and has length {@code len}. The
* result is {@code false} if and only if at least one of the following
* is true:
* <ul><li>{@code toffset} is negative.
* <li>{@code ooffset} is negative.
* <li>{@code toffset+len} is greater than the length of this
* {@code CharSeq} object.
* <li>{@code ooffset+len} is greater than the length of the other
* argument.
* <li>There is some nonnegative integer <i>k</i> less than {@code len}
* such that:
* {@code this.charAt(toffset + }<i>k</i>{@code ) != other.charAt(ooffset + }
* <i>k</i>{@code )}
* </ul>
*
* @param toffset the starting offset of the subregion in this string.
* @param other the string argument.
* @param ooffset the starting offset of the subregion in the string
* argument.
* @param len the number of characters to compare.
* @return {@code true} if the specified subregion of this string
* exactly matches the specified subregion of the string argument;
* {@code false} otherwise.
*/
public boolean regionMatches(int toffset, CharSeq other, int ooffset, int len) {
return back.regionMatches(toffset, other.back, ooffset, len);
}
/**
* Tests if two string regions are equal.
* <p>
* A substring of this {@code CharSeq} object is compared to a substring
* of the argument {@code other}. The result is {@code true} if these
* substrings represent character sequences that are the same, ignoring
* case if and only if {@code ignoreCase} is true. The substring of
* this {@code CharSeq} object to be compared begins at index
* {@code toffset} and has length {@code len}. The substring of
* {@code other} to be compared begins at index {@code ooffset} and
* has length {@code len}. The result is {@code false} if and only if
* at least one of the following is true:
* <ul><li>{@code toffset} is negative.
* <li>{@code ooffset} is negative.
* <li>{@code toffset+len} is greater than the length of this
* {@code CharSeq} object.
* <li>{@code ooffset+len} is greater than the length of the other
* argument.
* <li>{@code ignoreCase} is {@code false} and there is some nonnegative
* integer <i>k</i> less than {@code len} such that:
* <blockquote><pre>
* this.charAt(toffset+k) != other.charAt(ooffset+k)
* </pre></blockquote>
* <li>{@code ignoreCase} is {@code true} and there is some nonnegative
* integer <i>k</i> less than {@code len} such that:
* <blockquote><pre>
* Character.toLowerCase(this.charAt(toffset+k)) !=
* Character.toLowerCase(other.charAt(ooffset+k))
* </pre></blockquote>
* and:
* <blockquote><pre>
* Character.toUpperCase(this.charAt(toffset+k)) !=
* Character.toUpperCase(other.charAt(ooffset+k))
* </pre></blockquote>
* </ul>
*
* @param ignoreCase if {@code true}, ignore case when comparing
* characters.
* @param toffset the starting offset of the subregion in this
* string.
* @param other the string argument.
* @param ooffset the starting offset of the subregion in the string
* argument.
* @param len the number of characters to compare.
* @return {@code true} if the specified subregion of this string
* matches the specified subregion of the string argument;
* {@code false} otherwise. Whether the matching is exact
* or case insensitive depends on the {@code ignoreCase}
* argument.
*/
public boolean regionMatches(boolean ignoreCase, int toffset, CharSeq other, int ooffset, int len) {
return back.regionMatches(ignoreCase, toffset, other.back, ooffset, len);
}
@Override
public CharSeq subSequence(int beginIndex, int endIndex) {
return slice(beginIndex, endIndex);
}
/**
* Tests if the substring of this string beginning at the
* specified index starts with the specified prefix.
*
* @param prefix the prefix.
* @param toffset where to begin looking in this string.
* @return {@code true} if the character sequence represented by the
* argument is a prefix of the substring of this object starting
* at index {@code toffset}; {@code false} otherwise.
* The result is {@code false} if {@code toffset} is
* negative or greater than the length of this
* {@code CharSeq} object; otherwise the result is the same
* as the result of the expression
* <pre>
* this.substring(toffset).startsWith(prefix)
* </pre>
*/
public boolean startsWith(CharSeq prefix, int toffset) {
return back.startsWith(prefix.back, toffset);
}
/**
* Tests if this string starts with the specified prefix.
*
* @param prefix the prefix.
* @return {@code true} if the character sequence represented by the
* argument is a prefix of the character sequence represented by
* this string; {@code false} otherwise.
* Note also that {@code true} will be returned if the
* argument is an empty string or is equal to this
* {@code CharSeq} object as determined by the
* {@link #equals(Object)} method.
*/
public boolean startsWith(CharSeq prefix) {
return back.startsWith(prefix.back);
}
/**
* Tests if this string ends with the specified suffix.
*
* @param suffix the suffix.
* @return {@code true} if the character sequence represented by the
* argument is a suffix of the character sequence represented by
* this object; {@code false} otherwise. Note that the
* result will be {@code true} if the argument is the
* empty string or is equal to this {@code CharSeq} object
* as determined by the {@link #equals(Object)} method.
*/
public boolean endsWith(CharSeq suffix) {
return back.endsWith(suffix.back);
}
/**
* Returns the index within this string of the first occurrence of
* the specified character. If a character with value
* {@code ch} occurs in the character sequence represented by
* this {@code CharSeq} object, then the index (in Unicode
* code units) of the first such occurrence is returned. For
* values of {@code ch} in the range from 0 to 0xFFFF
* (inclusive), this is the smallest value <i>k</i> such that:
* <blockquote><pre>
* this.charAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* smallest value <i>k</i> such that:
* <blockquote><pre>
* this.codePointAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string, then {@code -1} is returned.
*
* @param ch a character (Unicode code point).
* @return the index of the first occurrence of the character in the
* character sequence represented by this object, or
* {@code -1} if the character does not occur.
*/
public int indexOf(int ch) {
return back.indexOf(ch);
}
/**
* Returns the index within this string of the first occurrence of the
* specified character, starting the search at the specified index.
* <p>
* If a character with value {@code ch} occurs in the
* character sequence represented by this {@code CharSeq}
* object at an index no smaller than {@code fromIndex}, then
* the index of the first such occurrence is returned. For values
* of {@code ch} in the range from 0 to 0xFFFF (inclusive),
* this is the smallest value <i>k</i> such that:
* <blockquote><pre>
* (this.charAt(<i>k</i>) == ch) {@code &&} (<i>k</i> >= fromIndex)
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* smallest value <i>k</i> such that:
* <blockquote><pre>
* (this.codePointAt(<i>k</i>) == ch) {@code &&} (<i>k</i> >= fromIndex)
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string at or after position {@code fromIndex}, then
* {@code -1} is returned.
*
* <p>
* There is no restriction on the value of {@code fromIndex}. If it
* is negative, it has the same effect as if it were zero: this entire
* string may be searched. If it is greater than the length of this
* string, it has the same effect as if it were equal to the length of
* this string: {@code -1} is returned.
*
* <p>All indices are specified in {@code char} values
* (Unicode code units).
*
* @param ch a character (Unicode code point).
* @param fromIndex the index to start the search from.
* @return the index of the first occurrence of the character in the
* character sequence represented by this object that is greater
* than or equal to {@code fromIndex}, or {@code -1}
* if the character does not occur.
*/
public int indexOf(int ch, int fromIndex) {
return back.indexOf(ch, fromIndex);
}
/**
* Returns the index within this string of the last occurrence of
* the specified character. For values of {@code ch} in the
* range from 0 to 0xFFFF (inclusive), the index (in Unicode code
* units) returned is the largest value <i>k</i> such that:
* <blockquote><pre>
* this.charAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* largest value <i>k</i> such that:
* <blockquote><pre>
* this.codePointAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string, then {@code -1} is returned. The
* {@code CharSeq} is searched backwards starting at the last
* character.
*
* @param ch a character (Unicode code point).
* @return the index of the last occurrence of the character in the
* character sequence represented by this object, or
* {@code -1} if the character does not occur.
*/
public int lastIndexOf(int ch) {
return back.lastIndexOf(ch);
}
/**
* Returns the index within this string of the last occurrence of
* the specified character, searching backward starting at the
* specified index. For values of {@code ch} in the range
* from 0 to 0xFFFF (inclusive), the index returned is the largest
* value <i>k</i> such that:
* <blockquote><pre>
* (this.charAt(<i>k</i>) == ch) {@code &&} (<i>k</i> <= fromIndex)
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* largest value <i>k</i> such that:
* <blockquote><pre>
* (this.codePointAt(<i>k</i>) == ch) {@code &&} (<i>k</i> <= fromIndex)
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string at or before position {@code fromIndex}, then
* {@code -1} is returned.
*
* <p>All indices are specified in {@code char} values
* (Unicode code units).
*
* @param ch a character (Unicode code point).
* @param fromIndex the index to start the search from. There is no
* restriction on the value of {@code fromIndex}. If it is
* greater than or equal to the length of this string, it has
* the same effect as if it were equal to one less than the
* length of this string: this entire string may be searched.
* If it is negative, it has the same effect as if it were -1:
* -1 is returned.
* @return the index of the last occurrence of the character in the
* character sequence represented by this object that is less
* than or equal to {@code fromIndex}, or {@code -1}
* if the character does not occur before that point.
*/
public int lastIndexOf(int ch, int fromIndex) {
return back.lastIndexOf(ch, fromIndex);
}
/**
* Returns the index within this string of the first occurrence of the
* specified substring.
*
* <p>The returned index is the smallest value <i>k</i> for which:
* <blockquote><pre>
* this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @return the index of the first occurrence of the specified substring,
* or {@code -1} if there is no such occurrence.
*/
public int indexOf(CharSeq str) {
return back.indexOf(str.back);
}
/**
* Returns the index within this string of the first occurrence of the
* specified substring, starting at the specified index.
*
* <p>The returned index is the smallest value <i>k</i> for which:
* <blockquote><pre>
* <i>k</i> >= fromIndex {@code &&} this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @param fromIndex the index from which to start the search.
* @return the index of the first occurrence of the specified substring,
* starting at the specified index,
* or {@code -1} if there is no such occurrence.
*/
public int indexOf(CharSeq str, int fromIndex) {
return back.indexOf(str.back, fromIndex);
}
/**
* Returns the index within this string of the last occurrence of the
* specified substring. The last occurrence of the empty string ""
* is considered to occur at the index value {@code this.length()}.
*
* <p>The returned index is the largest value <i>k</i> for which:
* <blockquote><pre>
* this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @return the index of the last occurrence of the specified substring,
* or {@code -1} if there is no such occurrence.
*/
public int lastIndexOf(CharSeq str) {
return back.lastIndexOf(str.back);
}
/**
* Returns the index within this string of the last occurrence of the
* specified substring, searching backward starting at the specified index.
*
* <p>The returned index is the largest value <i>k</i> for which:
* <blockquote><pre>
* <i>k</i> {@code <=} fromIndex {@code &&} this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @param fromIndex the index to start the search from.
* @return the index of the last occurrence of the specified substring,
* searching backward from the specified index,
* or {@code -1} if there is no such occurrence.
*/
public int lastIndexOf(CharSeq str, int fromIndex) {
return back.lastIndexOf(str.back, fromIndex);
}
/**
* Returns a string that is a substring of this string. The
* substring begins with the character at the specified index and
* extends to the end of this string. <p>
* Examples:
* <blockquote><pre>
* "unhappy".substring(2) returns "happy"
* "Harbison".substring(3) returns "bison"
* "emptiness".substring(9) returns "" (an empty string)
* </pre></blockquote>
*
* @param beginIndex the beginning index, inclusive.
* @return the specified substring.
* @throws IndexOutOfBoundsException if
* {@code beginIndex} is negative or larger than the
* length of this {@code CharSeq} object.
*/
public CharSeq substring(int beginIndex) {
return of(back.substring(beginIndex));
}
/**
* Returns a string that is a substring of this string. The
* substring begins at the specified {@code beginIndex} and
* extends to the character at index {@code endIndex - 1}.
* Thus the length of the substring is {@code endIndex-beginIndex}.
* <p>
* Examples:
* <blockquote><pre>
* "hamburger".substring(4, 8) returns "urge"
* "smiles".substring(1, 5) returns "mile"
* </pre></blockquote>
*
* @param beginIndex the beginning index, inclusive.
* @param endIndex the ending index, exclusive.
* @return the specified substring.
* @throws IndexOutOfBoundsException if the
* {@code beginIndex} is negative, or
* {@code endIndex} is larger than the length of
* this {@code CharSeq} object, or
* {@code beginIndex} is larger than
* {@code endIndex}.
*/
public CharSeq substring(int beginIndex, int endIndex) {
return of(back.substring(beginIndex, endIndex));
}
/**
* Returns a string containing the characters in this sequence in the same
* order as this sequence. The length of the string will be the length of
* this sequence.
*
* @return a string consisting of exactly this sequence of characters
*/
@Override
public java.lang.String toString() {
return back;
}
/**
* Concatenates the specified string to the end of this string.
* <p>
* If the length of the argument string is {@code 0}, then this
* {@code CharSeq} object is returned. Otherwise, a
* {@code CharSeq} object is returned that represents a character
* sequence that is the concatenation of the character sequence
* represented by this {@code CharSeq} object and the character
* sequence represented by the argument string.<p>
* Examples:
* <blockquote><pre>
* "cares".concat("s") returns "caress"
* "to".concat("get").concat("her") returns "together"
* </pre></blockquote>
*
* @param str the {@code CharSeq} that is concatenated to the end
* of this {@code CharSeq}.
* @return a string that represents the concatenation of this object's
* characters followed by the string argument's characters.
*/
public CharSeq concat(CharSeq str) {
return of(back.concat(str.back));
}
/**
* Tells whether or not this string matches the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a>.
*
* <p> An invocation of this method of the form
* <i>str</i>{@code .matches(}<i>regex</i>{@code )} yields exactly the
* same result as the expression
*
* <blockquote>
* {@link java.util.regex.Pattern}.{@link java.util.regex.Pattern#matches(java.lang.String, CharSequence)
* matches(<i>regex</i>, <i>str</i>)}
* </blockquote>
*
* @param regex the regular expression to which this string is to be matched
* @return {@code true} if, and only if, this string matches the
* given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public boolean matches(java.lang.String regex) {
return back.matches(regex);
}
/**
* Returns true if and only if this string contains the specified
* sequence of char values.
*
* @param s the sequence to search for
* @return true if this string contains {@code s}, false otherwise
*/
public boolean contains(CharSequence s) {
return back.contains(s);
}
/**
* Replaces the first substring of this string that matches the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a> with the
* given replacement.
*
* <p> An invocation of this method of the form
* <i>str</i>{@code .replaceFirst(}<i>regex</i>{@code ,} <i>repl</i>{@code )}
* yields exactly the same result as the expression
*
* <blockquote>
* <code>
* {@link java.util.regex.Pattern}.{@link
* java.util.regex.Pattern#compile compile}(<i>regex</i>).{@link
* java.util.regex.Pattern#matcher(java.lang.CharSequence) matcher}(<i>str</i>).{@link
* java.util.regex.Matcher#replaceFirst replaceFirst}(<i>repl</i>)
* </code>
* </blockquote>
*
* <p>
* Note that backslashes ({@code \}) and dollar signs ({@code $}) in the
* replacement string may cause the results to be different than if it were
* being treated as a literal replacement string; see
* {@link java.util.regex.Matcher#replaceFirst}.
* Use {@link java.util.regex.Matcher#quoteReplacement} to suppress the special
* meaning of these characters, if desired.
*
* @param regex the regular expression to which this string is to be matched
* @param replacement the string to be substituted for the first match
* @return The resulting {@code CharSeq}
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq replaceFirst(java.lang.String regex, java.lang.String replacement) {
return of(back.replaceFirst(regex, replacement));
}
/**
* Replaces each substring of this string that matches the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a> with the
* given replacement.
*
* <p> An invocation of this method of the form
* <i>str</i>{@code .replaceAll(}<i>regex</i>{@code ,} <i>repl</i>{@code )}
* yields exactly the same result as the expression
*
* <blockquote>
* <code>
* {@link java.util.regex.Pattern}.{@link
* java.util.regex.Pattern#compile compile}(<i>regex</i>).{@link
* java.util.regex.Pattern#matcher(java.lang.CharSequence) matcher}(<i>str</i>).{@link
* java.util.regex.Matcher#replaceAll replaceAll}(<i>repl</i>)
* </code>
* </blockquote>
*
* <p>
* Note that backslashes ({@code \}) and dollar signs ({@code $}) in the
* replacement string may cause the results to be different than if it were
* being treated as a literal replacement string; see
* {@link java.util.regex.Matcher#replaceAll Matcher.replaceAll}.
* Use {@link java.util.regex.Matcher#quoteReplacement} to suppress the special
* meaning of these characters, if desired.
*
* @param regex the regular expression to which this string is to be matched
* @param replacement the string to be substituted for each match
* @return The resulting {@code CharSeq}
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq replaceAll(java.lang.String regex, java.lang.String replacement) {
return of(back.replaceAll(regex, replacement));
}
/**
* Replaces each substring of this string that matches the literal target
* sequence with the specified literal replacement sequence. The
* replacement proceeds from the beginning of the string to the end, for
* example, replacing "aa" with "b" in the string "aaa" will result in
* "ba" rather than "ab".
*
* @param target The sequence of char values to be replaced
* @param replacement The replacement sequence of char values
* @return The resulting string
*/
public CharSeq replace(CharSequence target, CharSequence replacement) {
return of(back.replace(target, replacement));
}
/**
* Splits this string around matches of the given
* <a href="../util/regex/Pattern.html#sum">regular expression</a>.
*
* <p> The array returned by this method contains each substring of this
* string that is terminated by another substring that matches the given
* expression or is terminated by the end of the string. The substrings in
* the array are in the order in which they occur in this string. If the
* expression does not match any part of the input then the resulting array
* has just one element, namely this string.
*
* <p> When there is a positive-width match at the beginning of this
* string then an empty leading substring is included at the beginning
* of the resulting array. A zero-width match at the beginning however
* never produces such empty leading substring.
*
* <p> The {@code limit} parameter controls the number of times the
* pattern is applied and therefore affects the length of the resulting
* array. If the limit <i>n</i> is greater than zero then the pattern
* will be applied at most <i>n</i> - 1 times, the array's
* length will be no greater than <i>n</i>, and the array's last entry
* will contain all input beyond the last matched delimiter. If <i>n</i>
* is non-positive then the pattern will be applied as many times as
* possible and the array can have any length. If <i>n</i> is zero then
* the pattern will be applied as many times as possible, the array can
* have any length, and trailing empty strings will be discarded.
*
* <p> The string {@code "boo:and:foo"}, for example, yields the
* following results with these parameters:
*
* <blockquote><table cellpadding=1 cellspacing=0 summary="Split example showing regex, limit, and result">
* <tr>
* <th>Regex</th>
* <th>Limit</th>
* <th>Result</th>
* </tr>
* <tr><td align=center>:</td>
* <td align=center>2</td>
* <td>{@code { "boo", "and:foo" }}</td></tr>
* <tr><td align=center>:</td>
* <td align=center>5</td>
* <td>{@code { "boo", "and", "foo" }}</td></tr>
* <tr><td align=center>:</td>
* <td align=center>-2</td>
* <td>{@code { "boo", "and", "foo" }}</td></tr>
* <tr><td align=center>o</td>
* <td align=center>5</td>
* <td>{@code { "b", "", ":and:f", "", "" }}</td></tr>
* <tr><td align=center>o</td>
* <td align=center>-2</td>
* <td>{@code { "b", "", ":and:f", "", "" }}</td></tr>
* <tr><td align=center>o</td>
* <td align=center>0</td>
* <td>{@code { "b", "", ":and:f" }}</td></tr>
* </table></blockquote>
*
* <p> An invocation of this method of the form
* <i>str.</i>{@code split(}<i>regex</i>{@code ,} <i>n</i>{@code )}
* yields the same result as the expression
*
* <blockquote>
* <code>
* {@link java.util.regex.Pattern}.{@link
* java.util.regex.Pattern#compile compile}(<i>regex</i>).{@link
* java.util.regex.Pattern#split(java.lang.CharSequence, int) split}(<i>str</i>, <i>n</i>)
* </code>
* </blockquote>
*
* @param regex the delimiting regular expression
* @param limit the result threshold, as described above
* @return the array of strings computed by splitting this string
* around matches of the given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq[] split(java.lang.String regex, int limit) {
final java.lang.String[] javaStrings = back.split(regex, limit);
final CharSeq[] strings = new CharSeq[javaStrings.length];
for (int i = 0; i < strings.length; i++) {
strings[i] = of(javaStrings[i]);
}
return strings;
}
/**
* Splits this string around matches of the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a>.
*
* <p> This method works as if by invoking the two-argument {@link
* #split(java.lang.String, int) split} method with the given expression and a limit
* argument of zero. Trailing empty strings are therefore not included in
* the resulting array.
*
* <p> The string {@code "boo:and:foo"}, for example, yields the following
* results with these expressions:
*
* <blockquote><table cellpadding=1 cellspacing=0 summary="Split examples showing regex and result">
* <tr>
* <th>Regex</th>
* <th>Result</th>
* </tr>
* <tr><td align=center>:</td>
* <td>{@code { "boo", "and", "foo" }}</td></tr>
* <tr><td align=center>o</td>
* <td>{@code { "b", "", ":and:f" }}</td></tr>
* </table></blockquote>
*
* @param regex the delimiting regular expression
* @return the array of strings computed by splitting this string
* around matches of the given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq[] split(java.lang.String regex) {
return split(regex, 0);
}
/**
* Converts all of the characters in this {@code CharSeq} to lower
* case using the rules of the given {@code Locale}. Case mapping is based
* on the Unicode Standard version specified by the {@link java.lang.Character Character}
* class. Since case mappings are not always 1:1 char mappings, the resulting
* {@code CharSeq} may be a different length than the original {@code CharSeq}.
* <p>
* Examples of lowercase mappings are in the following table:
* <table border="1" summary="Lowercase mapping examples showing language code of locale, upper case, lower case, and description">
* <tr>
* <th>Language Code of Locale</th>
* <th>Upper Case</th>
* <th>Lower Case</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0130</td>
* <td>\u0069</td>
* <td>capital letter I with dot above -> small letter i</td>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0049</td>
* <td>\u0131</td>
* <td>capital letter I -> small letter dotless i </td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>French Fries</td>
* <td>french fries</td>
* <td>lowercased all chars in String</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td><img src="doc-files/capiota.gif" alt="capiota"><img src="doc-files/capchi.gif" alt="capchi">
* <img src="doc-files/captheta.gif" alt="captheta"><img src="doc-files/capupsil.gif" alt="capupsil">
* <img src="doc-files/capsigma.gif" alt="capsigma"></td>
* <td><img src="doc-files/iota.gif" alt="iota"><img src="doc-files/chi.gif" alt="chi">
* <img src="doc-files/theta.gif" alt="theta"><img src="doc-files/upsilon.gif" alt="upsilon">
* <img src="doc-files/sigma1.gif" alt="sigma"></td>
* <td>lowercased all chars in String</td>
* </tr>
* </table>
*
* @param locale use the case transformation rules for this locale
* @return the {@code CharSeq}, converted to lowercase.
* @see java.lang.String#toLowerCase()
* @see java.lang.String#toUpperCase()
* @see java.lang.String#toUpperCase(Locale)
*/
public CharSeq toLowerCase(Locale locale) {
return of(back.toLowerCase(locale));
}
/**
* Converts all of the characters in this {@code CharSeq} to lower
* case using the rules of the default locale. This is equivalent to calling
* {@code toLowerCase(Locale.getDefault())}.
* <p>
* <b>Note:</b> This method is locale sensitive, and may produce unexpected
* results if used for strings that are intended to be interpreted locale
* independently.
* Examples are programming language identifiers, protocol keys, and HTML
* tags.
* For instance, {@code "TITLE".toLowerCase()} in a Turkish locale
* returns {@code "t\u005Cu0131tle"}, where '\u005Cu0131' is the
* LATIN SMALL LETTER DOTLESS I character.
* To obtain correct results for locale insensitive strings, use
* {@code toLowerCase(Locale.ROOT)}.
* <p>
*
* @return the {@code CharSeq}, converted to lowercase.
* @see java.lang.String#toLowerCase(Locale)
*/
public CharSeq toLowerCase() {
return toLowerCase(Locale.getDefault());
}
/**
* Converts all of the characters in this {@code CharSeq} to upper
* case using the rules of the given {@code Locale}. Case mapping is based
* on the Unicode Standard version specified by the {@link java.lang.Character Character}
* class. Since case mappings are not always 1:1 char mappings, the resulting
* {@code CharSeq} may be a different length than the original {@code CharSeq}.
* <p>
* Examples of locale-sensitive and 1:M case mappings are in the following table.
*
* <table border="1" summary="Examples of locale-sensitive and 1:M case mappings. Shows Language code of locale, lower case, upper case, and description.">
* <tr>
* <th>Language Code of Locale</th>
* <th>Lower Case</th>
* <th>Upper Case</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0069</td>
* <td>\u0130</td>
* <td>small letter i -> capital letter I with dot above</td>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0131</td>
* <td>\u0049</td>
* <td>small letter dotless i -> capital letter I</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>\u00df</td>
* <td>\u0053 \u0053</td>
* <td>small letter sharp s -> two letters: SS</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>Fahrvergnügen</td>
* <td>FAHRVERGNÜGEN</td>
* <td></td>
* </tr>
* </table>
*
* @param locale use the case transformation rules for this locale
* @return the {@code CharSeq}, converted to uppercase.
* @see java.lang.String#toUpperCase()
* @see java.lang.String#toLowerCase()
* @see java.lang.String#toLowerCase(Locale)
*/
public CharSeq toUpperCase(Locale locale) {
return of(back.toUpperCase(locale));
}
/**
* Converts all of the characters in this {@code CharSeq} to upper
* case using the rules of the default locale. This method is equivalent to
* {@code toUpperCase(Locale.getDefault())}.
* <p>
* <b>Note:</b> This method is locale sensitive, and may produce unexpected
* results if used for strings that are intended to be interpreted locale
* independently.
* Examples are programming language identifiers, protocol keys, and HTML
* tags.
* For instance, {@code "title".toUpperCase()} in a Turkish locale
* returns {@code "T\u005Cu0130TLE"}, where '\u005Cu0130' is the
* LATIN CAPITAL LETTER I WITH DOT ABOVE character.
* To obtain correct results for locale insensitive strings, use
* {@code toUpperCase(Locale.ROOT)}.
* <p>
*
* @return the {@code CharSeq}, converted to uppercase.
* @see java.lang.String#toUpperCase(Locale)
*/
public CharSeq toUpperCase() {
return toUpperCase(Locale.getDefault());
}
/**
* Returns a string whose value is this string, with any leading and trailing
* whitespace removed.
* <p>
* If this {@code CharSeq} object represents an empty character
* sequence, or the first and last characters of character sequence
* represented by this {@code CharSeq} object both have codes
* greater than {@code '\u005Cu0020'} (the space character), then a
* reference to this {@code CharSeq} object is returned.
* <p>
* Otherwise, if there is no character with a code greater than
* {@code '\u005Cu0020'} in the string, then a
* {@code CharSeq} object representing an empty string is
* returned.
* <p>
* Otherwise, let <i>k</i> be the index of the first character in the
* string whose code is greater than {@code '\u005Cu0020'}, and let
* <i>m</i> be the index of the last character in the string whose code
* is greater than {@code '\u005Cu0020'}. A {@code CharSeq}
* object is returned, representing the substring of this string that
* begins with the character at index <i>k</i> and ends with the
* character at index <i>m</i>-that is, the result of
* {@code this.substring(k, m + 1)}.
* <p>
* This method may be used to trim whitespace (as defined above) from
* the beginning and end of a string.
*
* @return A string whose value is this string, with any leading and trailing white
* space removed, or this string if it has no leading or
* trailing white space.
*/
public CharSeq trim() {
return of(back.trim());
}
/**
* Converts this string to a new character array.
*
* @return a newly allocated character array whose length is the length
* of this string and whose contents are initialized to contain
* the character sequence represented by this string.
*/
public char[] toCharArray() {
return back.toCharArray();
}
@FunctionalInterface
interface CharUnaryOperator {
char apply(char c);
}
@FunctionalInterface
interface CharFunction<R> {
R apply(char c);
}
}
| 1 | 6,329 | just a little thing: let's introduce a `final int bound = length - back.length();` that saves us calculating the bound each time checking the loop condition. | vavr-io-vavr | java |
@@ -87,6 +87,16 @@ func (s *server) setupRouting() {
"GET": http.HandlerFunc(s.peerBalanceHandler),
})
+ router.Handle("/settlements", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.settlementsHandler),
+ })
+ router.Handle("/settlements/{peer}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.peerSettlementsHandler),
+ })
+ router.Handle("/settlements/pay/{peer}", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.payPeerHandler),
+ })
+
baseRouter.Handle("/", web.ChainHandlers(
logging.NewHTTPAccessLogHandler(s.Logger, logrus.InfoLevel, "debug api access"),
handlers.CompressHandler, | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package debugapi
import (
"expvar"
"net/http"
"net/http/pprof"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/logging"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
"resenje.org/web"
)
func (s *server) setupRouting() {
baseRouter := http.NewServeMux()
baseRouter.Handle("/metrics", web.ChainHandlers(
logging.SetAccessLogLevelHandler(0), // suppress access log messages
web.FinalHandler(promhttp.InstrumentMetricHandler(
s.metricsRegistry,
promhttp.HandlerFor(s.metricsRegistry, promhttp.HandlerOpts{}),
)),
))
router := mux.NewRouter()
router.NotFoundHandler = http.HandlerFunc(jsonhttp.NotFoundHandler)
router.Handle("/debug/pprof", http.HandlerFunc(pprof.Index))
router.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
router.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
router.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
router.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
router.PathPrefix("/debug/pprof/").Handler(http.HandlerFunc(pprof.Index))
router.Handle("/debug/vars", expvar.Handler())
router.Handle("/health", web.ChainHandlers(
logging.SetAccessLogLevelHandler(0), // suppress access log messages
web.FinalHandlerFunc(s.statusHandler),
))
router.Handle("/readiness", web.ChainHandlers(
logging.SetAccessLogLevelHandler(0), // suppress access log messages
web.FinalHandlerFunc(s.statusHandler),
))
router.Handle("/pingpong/{peer-id}", jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.pingpongHandler),
})
router.Handle("/addresses", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.addressesHandler),
})
router.Handle("/connect/{multi-address:.+}", jsonhttp.MethodHandler{
"POST": http.HandlerFunc(s.peerConnectHandler),
})
router.Handle("/peers", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.peersHandler),
})
router.Handle("/peers/{address}", jsonhttp.MethodHandler{
"DELETE": http.HandlerFunc(s.peerDisconnectHandler),
})
router.Handle("/chunks/{address}", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.hasChunkHandler),
"DELETE": http.HandlerFunc(s.removeChunk),
})
router.Handle("/topology", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.topologyHandler),
})
router.Handle("/welcome-message", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.getWelcomeMessageHandler),
"POST": web.ChainHandlers(
jsonhttp.NewMaxBodyBytesHandler(welcomeMessageMaxRequestSize),
web.FinalHandlerFunc(s.setWelcomeMessageHandler),
),
})
router.Handle("/balances", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.balancesHandler),
})
router.Handle("/balances/{peer}", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.peerBalanceHandler),
})
baseRouter.Handle("/", web.ChainHandlers(
logging.NewHTTPAccessLogHandler(s.Logger, logrus.InfoLevel, "debug api access"),
handlers.CompressHandler,
// todo: add recovery handler
web.NoCacheHeadersHandler,
web.FinalHandler(router),
))
s.Handler = baseRouter
}
| 1 | 12,016 | I think that this should be a POST method, as it changes the state. Get requests should not change the state. With different method, I would suggest to have the handler under `"/settlements/{peer}"` path. | ethersphere-bee | go |
@@ -31,6 +31,7 @@ var errShortRemoteTip = errors.New("Unexpected remote less than tip")
// L2GasPrice slot refers to the storage slot that the execution price is stored
// in the L2 predeploy contract, the GasPriceOracle
var l2GasPriceSlot = common.BigToHash(big.NewInt(1))
+var l2GasPriceOracleAddress = common.HexToAddress("0x420000000000000000000000000000000000000F")
// SyncService implements the main functionality around pulling in transactions
// and executing them. It can be configured to run in both sequencer mode and in | 1 | package rollup
import (
"context"
"errors"
"fmt"
"math/big"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/rollup/fees"
)
// errShortRemoteTip is an error for when the remote tip is shorter than the
// local tip
var errShortRemoteTip = errors.New("Unexpected remote less than tip")
// L2GasPrice slot refers to the storage slot that the execution price is stored
// in the L2 predeploy contract, the GasPriceOracle
var l2GasPriceSlot = common.BigToHash(big.NewInt(1))
// SyncService implements the main functionality around pulling in transactions
// and executing them. It can be configured to run in both sequencer mode and in
// verifier mode.
type SyncService struct {
ctx context.Context
cancel context.CancelFunc
verifier bool
db ethdb.Database
scope event.SubscriptionScope
txFeed event.Feed
txLock sync.Mutex
loopLock sync.Mutex
enable bool
eth1ChainId uint64
bc *core.BlockChain
txpool *core.TxPool
RollupGpo *gasprice.RollupOracle
client RollupClient
syncing atomic.Value
chainHeadSub event.Subscription
OVMContext OVMContext
confirmationDepth uint64
pollInterval time.Duration
timestampRefreshThreshold time.Duration
chainHeadCh chan core.ChainHeadEvent
backend Backend
gpoAddress common.Address
enableL2GasPolling bool
enforceFees bool
}
// NewSyncService returns an initialized sync service
func NewSyncService(ctx context.Context, cfg Config, txpool *core.TxPool, bc *core.BlockChain, db ethdb.Database) (*SyncService, error) {
if bc == nil {
return nil, errors.New("Must pass BlockChain to SyncService")
}
ctx, cancel := context.WithCancel(ctx)
_ = cancel // satisfy govet
if cfg.IsVerifier {
log.Info("Running in verifier mode", "sync-backend", cfg.Backend.String())
} else {
log.Info("Running in sequencer mode", "sync-backend", cfg.Backend.String())
}
pollInterval := cfg.PollInterval
if pollInterval == 0 {
log.Info("Sanitizing poll interval to 15 seconds")
pollInterval = time.Second * 15
}
timestampRefreshThreshold := cfg.TimestampRefreshThreshold
if timestampRefreshThreshold == 0 {
log.Info("Sanitizing timestamp refresh threshold to 3 minutes")
timestampRefreshThreshold = time.Minute * 3
}
// Layer 2 chainid
chainID := bc.Config().ChainID
if chainID == nil {
return nil, errors.New("Must configure with chain id")
}
// Initialize the rollup client
client := NewClient(cfg.RollupClientHttp, chainID)
log.Info("Configured rollup client", "url", cfg.RollupClientHttp, "chain-id", chainID.Uint64(), "ctc-deploy-height", cfg.CanonicalTransactionChainDeployHeight)
log.Info("Enforce Fees", "set", cfg.EnforceFees)
service := SyncService{
ctx: ctx,
cancel: cancel,
verifier: cfg.IsVerifier,
enable: cfg.Eth1SyncServiceEnable,
confirmationDepth: cfg.Eth1ConfirmationDepth,
syncing: atomic.Value{},
bc: bc,
txpool: txpool,
chainHeadCh: make(chan core.ChainHeadEvent, 1),
eth1ChainId: cfg.Eth1ChainId,
client: client,
db: db,
pollInterval: pollInterval,
timestampRefreshThreshold: timestampRefreshThreshold,
backend: cfg.Backend,
gpoAddress: cfg.GasPriceOracleAddress,
enableL2GasPolling: cfg.EnableL2GasPolling,
enforceFees: cfg.EnforceFees,
}
// The chainHeadSub is used to synchronize the SyncService with the chain.
// As the SyncService processes transactions, it waits until the transaction
// is added to the chain. This synchronization is required for handling
// reorgs and also favors safety over liveliness. If a transaction breaks
// things downstream, it is expected that this channel will halt ingestion
// of additional transactions by the SyncService.
service.chainHeadSub = service.bc.SubscribeChainHeadEvent(service.chainHeadCh)
// Initial sync service setup if it is enabled. This code depends on
// a remote server that indexes the layer one contracts. Place this
// code behind this if statement so that this can run without the
// requirement of the remote server being up.
if service.enable {
// Ensure that the rollup client can connect to a remote server
// before starting.
err := service.ensureClient()
if err != nil {
return nil, fmt.Errorf("Rollup client unable to connect: %w", err)
}
// Wait until the remote service is done syncing
for {
status, err := service.client.SyncStatus(service.backend)
if err != nil {
log.Error("Cannot get sync status")
continue
}
if !status.Syncing {
break
}
log.Info("Still syncing", "index", status.CurrentTransactionIndex, "tip", status.HighestKnownTransactionIndex)
time.Sleep(10 * time.Second)
}
// Initialize the latest L1 data here to make sure that
// it happens before the RPC endpoints open up
// Only do it if the sync service is enabled so that this
// can be ran without needing to have a configured RollupClient.
err = service.initializeLatestL1(cfg.CanonicalTransactionChainDeployHeight)
if err != nil {
return nil, fmt.Errorf("Cannot initialize latest L1 data: %w", err)
}
// Log the OVMContext information on startup
bn := service.GetLatestL1BlockNumber()
ts := service.GetLatestL1Timestamp()
log.Info("Initialized Latest L1 Info", "blocknumber", bn, "timestamp", ts)
index := service.GetLatestIndex()
queueIndex := service.GetLatestEnqueueIndex()
verifiedIndex := service.GetLatestVerifiedIndex()
block := service.bc.CurrentBlock()
if block == nil {
block = types.NewBlock(&types.Header{}, nil, nil, nil)
}
header := block.Header()
log.Info("Initial Rollup State", "state", header.Root.Hex(), "index", stringify(index), "queue-index", stringify(queueIndex), "verified-index", verifiedIndex)
// The sequencer needs to sync to the tip at start up
// By setting the sync status to true, it will prevent RPC calls.
// Be sure this is set to false later.
if !service.verifier {
service.setSyncStatus(true)
}
}
return &service, nil
}
// ensureClient checks to make sure that the remote transaction source is
// available. It will return an error if it cannot connect via HTTP
func (s *SyncService) ensureClient() error {
_, err := s.client.GetLatestEthContext()
if err != nil {
return fmt.Errorf("Cannot connect to data service: %w", err)
}
return nil
}
// Start initializes the service
func (s *SyncService) Start() error {
if !s.enable {
log.Info("Running without syncing enabled")
return nil
}
log.Info("Initializing Sync Service", "eth1-chainid", s.eth1ChainId)
s.updateL2GasPrice(nil)
s.updateL1GasPrice()
if s.verifier {
go s.VerifierLoop()
} else {
// The sequencer must sync the transactions to the tip and the
// pending queue transactions on start before setting sync status
// to false and opening up the RPC to accept transactions.
if err := s.syncTransactionsToTip(); err != nil {
return fmt.Errorf("Sequencer cannot sync transactions to tip: %w", err)
}
if err := s.syncQueueToTip(); err != nil {
return fmt.Errorf("Sequencer cannot sync queue to tip: %w", err)
}
s.setSyncStatus(false)
go s.SequencerLoop()
}
return nil
}
// initializeLatestL1 sets the initial values of the `L1BlockNumber`
// and `L1Timestamp` to the deploy height of the Canonical Transaction
// chain if the chain is empty, otherwise set it from the last
// transaction processed. This must complete before transactions
// are accepted via RPC when running as a sequencer.
func (s *SyncService) initializeLatestL1(ctcDeployHeight *big.Int) error {
index := s.GetLatestIndex()
if index == nil {
if ctcDeployHeight == nil {
return errors.New("Must configure with canonical transaction chain deploy height")
}
log.Info("Initializing initial OVM Context", "ctc-deploy-height", ctcDeployHeight.Uint64())
context, err := s.client.GetEthContext(ctcDeployHeight.Uint64())
if err != nil {
return fmt.Errorf("Cannot fetch ctc deploy block at height %d: %w", ctcDeployHeight.Uint64(), err)
}
s.SetLatestL1Timestamp(context.Timestamp)
s.SetLatestL1BlockNumber(context.BlockNumber)
} else {
log.Info("Found latest index", "index", *index)
block := s.bc.GetBlockByNumber(*index - 1)
if block == nil {
block = s.bc.CurrentBlock()
idx := block.Number().Uint64()
if idx > *index {
// This is recoverable with a reorg but should never happen
return fmt.Errorf("Current block height greater than index")
}
s.SetLatestIndex(&idx)
log.Info("Block not found, resetting index", "new", idx, "old", *index-1)
}
txs := block.Transactions()
if len(txs) != 1 {
log.Error("Unexpected number of transactions in block: %d", len(txs))
}
tx := txs[0]
s.SetLatestL1Timestamp(tx.L1Timestamp())
s.SetLatestL1BlockNumber(tx.L1BlockNumber().Uint64())
}
queueIndex := s.GetLatestEnqueueIndex()
if queueIndex == nil {
enqueue, err := s.client.GetLastConfirmedEnqueue()
// There are no enqueues yet
if errors.Is(err, errElementNotFound) {
return nil
}
// Other unexpected error
if err != nil {
return fmt.Errorf("Cannot fetch last confirmed queue tx: %w", err)
}
// No error, the queue element was found
queueIndex = enqueue.GetMeta().QueueIndex
}
s.SetLatestEnqueueIndex(queueIndex)
return nil
}
// setSyncStatus sets the `syncing` field as well as prevents
// any transactions from coming in via RPC.
// `syncing` should never be set directly outside of this function.
func (s *SyncService) setSyncStatus(status bool) {
log.Info("Setting sync status", "status", status)
s.syncing.Store(status)
}
// IsSyncing returns the syncing status of the syncservice.
// Returns false if not yet set.
func (s *SyncService) IsSyncing() bool {
value := s.syncing.Load()
val, ok := value.(bool)
if !ok {
return false
}
return val
}
// Stop will close the open channels and cancel the goroutines
// started by this service.
func (s *SyncService) Stop() error {
s.scope.Close()
s.chainHeadSub.Unsubscribe()
close(s.chainHeadCh)
if s.cancel != nil {
defer s.cancel()
}
return nil
}
// VerifierLoop is the main loop for Verifier mode
func (s *SyncService) VerifierLoop() {
log.Info("Starting Verifier Loop", "poll-interval", s.pollInterval, "timestamp-refresh-threshold", s.timestampRefreshThreshold)
for {
if err := s.updateL1GasPrice(); err != nil {
log.Error("Cannot update L1 gas price", "msg", err)
}
if err := s.verify(); err != nil {
log.Error("Could not verify", "error", err)
}
if err := s.updateL2GasPrice(nil); err != nil {
log.Error("Cannot update L2 gas price", "msg", err)
}
time.Sleep(s.pollInterval)
}
}
// verify is the main logic for the Verifier. The verifier logic is different
// depending on the Backend
func (s *SyncService) verify() error {
switch s.backend {
case BackendL1:
if err := s.syncBatchesToTip(); err != nil {
return fmt.Errorf("Verifier cannot sync transaction batches to tip: %w", err)
}
case BackendL2:
if err := s.syncTransactionsToTip(); err != nil {
return fmt.Errorf("Verifier cannot sync transactions with BackendL2: %w", err)
}
}
return nil
}
// SequencerLoop is the polling loop that runs in sequencer mode. It sequences
// transactions and then updates the EthContext.
func (s *SyncService) SequencerLoop() {
log.Info("Starting Sequencer Loop", "poll-interval", s.pollInterval, "timestamp-refresh-threshold", s.timestampRefreshThreshold)
for {
if err := s.updateL1GasPrice(); err != nil {
log.Error("Cannot update L1 gas price", "msg", err)
}
s.txLock.Lock()
if err := s.sequence(); err != nil {
log.Error("Could not sequence", "error", err)
}
s.txLock.Unlock()
if err := s.updateL2GasPrice(nil); err != nil {
log.Error("Cannot update L2 gas price", "msg", err)
}
if err := s.updateContext(); err != nil {
log.Error("Could not update execution context", "error", err)
}
time.Sleep(s.pollInterval)
}
}
// sequence is the main logic for the Sequencer. It will sync any `enqueue`
// transactions it has yet to sync and then pull in transaction batches to
// compare against the transactions it has in its local state. The sequencer
// should reorg based on the transaction batches that are posted because
// L1 is the source of truth. The sequencer concurrently accepts user
// transactions via the RPC.
func (s *SyncService) sequence() error {
if err := s.syncQueueToTip(); err != nil {
return fmt.Errorf("Sequencer cannot sequence queue: %w", err)
}
if err := s.syncBatchesToTip(); err != nil {
return fmt.Errorf("Sequencer cannot sync transaction batches: %w", err)
}
return nil
}
func (s *SyncService) syncQueueToTip() error {
if err := s.syncToTip(s.syncQueue, s.client.GetLatestEnqueueIndex); err != nil {
return fmt.Errorf("Cannot sync queue to tip: %w", err)
}
return nil
}
func (s *SyncService) syncBatchesToTip() error {
if err := s.syncToTip(s.syncBatches, s.client.GetLatestTransactionBatchIndex); err != nil {
return fmt.Errorf("Cannot sync transaction batches to tip: %w", err)
}
return nil
}
func (s *SyncService) syncTransactionsToTip() error {
sync := func() (*uint64, error) {
return s.syncTransactions(s.backend)
}
check := func() (*uint64, error) {
return s.client.GetLatestTransactionIndex(s.backend)
}
if err := s.syncToTip(sync, check); err != nil {
return fmt.Errorf("Verifier cannot sync transactions with backend %s: %w", s.backend.String(), err)
}
return nil
}
// updateL1GasPrice queries for the current L1 gas price and then stores it
// in the L1 Gas Price Oracle. This must be called over time to properly
// estimate the transaction fees that the sequencer should charge.
func (s *SyncService) updateL1GasPrice() error {
l1GasPrice, err := s.client.GetL1GasPrice()
if err != nil {
return fmt.Errorf("cannot fetch L1 gas price: %w", err)
}
s.RollupGpo.SetL1GasPrice(l1GasPrice)
return nil
}
// updateL2GasPrice accepts a state root and reads the gas price from the gas
// price oracle at the state that corresponds to the state root. If no state
// root is passed in, then the tip is used.
func (s *SyncService) updateL2GasPrice(hash *common.Hash) error {
// TODO(mark): this is temporary and will be able to be rmoved when the
// OVM_GasPriceOracle is moved into the predeploy contracts
if !s.enableL2GasPolling {
return nil
}
var state *state.StateDB
var err error
if hash != nil {
state, err = s.bc.StateAt(*hash)
} else {
state, err = s.bc.State()
}
if err != nil {
return err
}
result := state.GetState(s.gpoAddress, l2GasPriceSlot)
s.RollupGpo.SetL2GasPrice(result.Big())
return nil
}
/// Update the execution context's timestamp and blocknumber
/// over time. This is only necessary for the sequencer.
func (s *SyncService) updateContext() error {
context, err := s.client.GetLatestEthContext()
if err != nil {
return err
}
current := time.Unix(int64(s.GetLatestL1Timestamp()), 0)
next := time.Unix(int64(context.Timestamp), 0)
if next.Sub(current) > s.timestampRefreshThreshold {
log.Info("Updating Eth Context", "timetamp", context.Timestamp, "blocknumber", context.BlockNumber)
s.SetLatestL1BlockNumber(context.BlockNumber)
s.SetLatestL1Timestamp(context.Timestamp)
}
return nil
}
// Methods for safely accessing and storing the latest
// L1 blocknumber and timestamp. These are held in memory.
// GetLatestL1Timestamp returns the OVMContext timestamp
func (s *SyncService) GetLatestL1Timestamp() uint64 {
return atomic.LoadUint64(&s.OVMContext.timestamp)
}
// GetLatestL1BlockNumber returns the OVMContext blocknumber
func (s *SyncService) GetLatestL1BlockNumber() uint64 {
return atomic.LoadUint64(&s.OVMContext.blockNumber)
}
// SetLatestL1Timestamp will set the OVMContext timestamp
func (s *SyncService) SetLatestL1Timestamp(ts uint64) {
atomic.StoreUint64(&s.OVMContext.timestamp, ts)
}
// SetLatestL1BlockNumber will set the OVMContext blocknumber
func (s *SyncService) SetLatestL1BlockNumber(bn uint64) {
atomic.StoreUint64(&s.OVMContext.blockNumber, bn)
}
// GetLatestEnqueueIndex reads the last queue index processed
func (s *SyncService) GetLatestEnqueueIndex() *uint64 {
return rawdb.ReadHeadQueueIndex(s.db)
}
// GetNextEnqueueIndex returns the next queue index to process
func (s *SyncService) GetNextEnqueueIndex() uint64 {
latest := s.GetLatestEnqueueIndex()
if latest == nil {
return 0
}
return *latest + 1
}
// SetLatestEnqueueIndex writes the last queue index that was processed
func (s *SyncService) SetLatestEnqueueIndex(index *uint64) {
if index != nil {
rawdb.WriteHeadQueueIndex(s.db, *index)
}
}
// GetLatestIndex reads the last CTC index that was processed
func (s *SyncService) GetLatestIndex() *uint64 {
return rawdb.ReadHeadIndex(s.db)
}
// GetNextIndex reads the next CTC index to process
func (s *SyncService) GetNextIndex() uint64 {
latest := s.GetLatestIndex()
if latest == nil {
return 0
}
return *latest + 1
}
// SetLatestIndex writes the last CTC index that was processed
func (s *SyncService) SetLatestIndex(index *uint64) {
if index != nil {
rawdb.WriteHeadIndex(s.db, *index)
}
}
// GetLatestVerifiedIndex reads the last verified CTC index that was processed
// These are set by processing batches of transactions that were submitted to
// the Canonical Transaction Chain.
func (s *SyncService) GetLatestVerifiedIndex() *uint64 {
return rawdb.ReadHeadVerifiedIndex(s.db)
}
// GetNextVerifiedIndex reads the next verified index
func (s *SyncService) GetNextVerifiedIndex() uint64 {
index := s.GetLatestVerifiedIndex()
if index == nil {
return 0
}
return *index + 1
}
// SetLatestVerifiedIndex writes the last verified index that was processed
func (s *SyncService) SetLatestVerifiedIndex(index *uint64) {
if index != nil {
rawdb.WriteHeadVerifiedIndex(s.db, *index)
}
}
// GetLatestBatchIndex reads the last processed transaction batch
func (s *SyncService) GetLatestBatchIndex() *uint64 {
return rawdb.ReadHeadBatchIndex(s.db)
}
// GetNextBatchIndex reads the index of the next transaction batch to process
func (s *SyncService) GetNextBatchIndex() uint64 {
index := s.GetLatestBatchIndex()
if index == nil {
return 0
}
return *index + 1
}
// SetLatestBatchIndex writes the last index of the transaction batch that was processed
func (s *SyncService) SetLatestBatchIndex(index *uint64) {
if index != nil {
rawdb.WriteHeadBatchIndex(s.db, *index)
}
}
// applyTransaction is a higher level API for applying a transaction
func (s *SyncService) applyTransaction(tx *types.Transaction) error {
if tx.GetMeta().Index != nil {
return s.applyIndexedTransaction(tx)
}
return s.applyTransactionToTip(tx)
}
// applyIndexedTransaction applys a transaction that has an index. This means
// that the source of the transaction was either a L1 batch or from the
// sequencer.
func (s *SyncService) applyIndexedTransaction(tx *types.Transaction) error {
if tx == nil {
return errors.New("Transaction is nil in applyIndexedTransaction")
}
index := tx.GetMeta().Index
if index == nil {
return errors.New("No index found in applyIndexedTransaction")
}
log.Trace("Applying indexed transaction", "index", *index)
next := s.GetNextIndex()
if *index == next {
return s.applyTransactionToTip(tx)
}
if *index < next {
return s.applyHistoricalTransaction(tx)
}
return fmt.Errorf("Received tx at index %d when looking for %d", *index, next)
}
// applyHistoricalTransaction will compare a historical transaction against what
// is locally indexed. This will trigger a reorg in the future
func (s *SyncService) applyHistoricalTransaction(tx *types.Transaction) error {
if tx == nil {
return errors.New("Transaction is nil in applyHistoricalTransaction")
}
index := tx.GetMeta().Index
if index == nil {
return errors.New("No index is found in applyHistoricalTransaction")
}
// Handle the off by one
block := s.bc.GetBlockByNumber(*index + 1)
if block == nil {
return fmt.Errorf("Block %d is not found", *index+1)
}
txs := block.Transactions()
if len(txs) != 1 {
return fmt.Errorf("More than one transaction found in block %d", *index+1)
}
if !isCtcTxEqual(tx, txs[0]) {
log.Error("Mismatched transaction", "index", *index)
} else {
log.Debug("Historical transaction matches", "index", *index, "hash", tx.Hash().Hex())
}
return nil
}
// applyTransactionToTip will do sanity checks on the transaction before
// applying it to the tip. It blocks until the transaction has been included in
// the chain. It is assumed that validation around the index has already
// happened.
func (s *SyncService) applyTransactionToTip(tx *types.Transaction) error {
if tx == nil {
return errors.New("nil transaction passed to applyTransactionToTip")
}
// Queue Origin L1 to L2 transactions must have a timestamp that is set by
// the L1 block that holds the transaction. This should never happen but is
// a sanity check to prevent fraudulent execution.
if tx.QueueOrigin() == types.QueueOriginL1ToL2 {
if tx.L1Timestamp() == 0 {
return fmt.Errorf("Queue origin L1 to L2 transaction without a timestamp: %s", tx.Hash().Hex())
}
}
// If there is no OVM timestamp assigned to the transaction, then assign a
// timestamp and blocknumber to it. This should only be the case for queue
// origin sequencer transactions that come in via RPC. The L1 to L2
// transactions that come in via `enqueue` should have a timestamp set based
// on the L1 block that it was included in.
// Note that Ethereum Layer one consensus rules dictate that the timestamp
// must be strictly increasing between blocks, so no need to check both the
// timestamp and the blocknumber.
if tx.L1Timestamp() == 0 {
ts := s.GetLatestL1Timestamp()
bn := s.GetLatestL1BlockNumber()
tx.SetL1Timestamp(ts)
tx.SetL1BlockNumber(bn)
} else if tx.L1Timestamp() > s.GetLatestL1Timestamp() {
// If the timestamp of the transaction is greater than the sync
// service's locally maintained timestamp, update the timestamp and
// blocknumber to equal that of the transaction's. This should happen
// with `enqueue` transactions.
ts := tx.L1Timestamp()
bn := tx.L1BlockNumber()
s.SetLatestL1Timestamp(ts)
s.SetLatestL1BlockNumber(bn.Uint64())
log.Debug("Updating OVM context based on new transaction", "timestamp", ts, "blocknumber", bn.Uint64(), "queue-origin", tx.QueueOrigin())
} else if tx.L1Timestamp() < s.GetLatestL1Timestamp() {
log.Error("Timestamp monotonicity violation", "hash", tx.Hash().Hex())
}
if tx.GetMeta().Index == nil {
index := s.GetLatestIndex()
if index == nil {
tx.SetIndex(0)
} else {
tx.SetIndex(*index + 1)
}
}
s.SetLatestIndex(tx.GetMeta().Index)
if tx.GetMeta().QueueIndex != nil {
s.SetLatestEnqueueIndex(tx.GetMeta().QueueIndex)
}
// The index was set above so it is safe to dereference
log.Debug("Applying transaction to tip", "index", *tx.GetMeta().Index, "hash", tx.Hash().Hex())
txs := types.Transactions{tx}
s.txFeed.Send(core.NewTxsEvent{Txs: txs})
// Block until the transaction has been added to the chain
log.Trace("Waiting for transaction to be added to chain", "hash", tx.Hash().Hex())
<-s.chainHeadCh
return nil
}
// applyBatchedTransaction applies transactions that were batched to layer one.
// The sequencer checks for batches over time to make sure that it does not
// deviate from the L1 state and this is the main method of transaction
// ingestion for the verifier.
func (s *SyncService) applyBatchedTransaction(tx *types.Transaction) error {
if tx == nil {
return errors.New("nil transaction passed into applyBatchedTransaction")
}
index := tx.GetMeta().Index
if index == nil {
return errors.New("No index found on transaction")
}
log.Trace("Applying batched transaction", "index", *index)
err := s.applyIndexedTransaction(tx)
if err != nil {
return fmt.Errorf("Cannot apply batched transaction: %w", err)
}
s.SetLatestVerifiedIndex(index)
return nil
}
// verifyFee will verify that a valid fee is being paid.
func (s *SyncService) verifyFee(tx *types.Transaction) error {
if tx.GasPrice().Cmp(common.Big0) == 0 {
// Exit early if fees are enforced and the gasPrice is set to 0
if s.enforceFees {
return errors.New("cannot accept 0 gas price transaction")
}
// If fees are not enforced and the gas price is 0, return early
return nil
}
// When the gas price is non zero, it must be equal to the constant
if tx.GasPrice().Cmp(fees.BigTxGasPrice) != 0 {
return fmt.Errorf("tx.gasPrice must be %d", fees.TxGasPrice)
}
l1GasPrice, err := s.RollupGpo.SuggestL1GasPrice(context.Background())
if err != nil {
return err
}
l2GasPrice, err := s.RollupGpo.SuggestL2GasPrice(context.Background())
if err != nil {
return err
}
// Calculate the fee based on decoded L2 gas limit
gas := new(big.Int).SetUint64(tx.Gas())
l2GasLimit := fees.DecodeL2GasLimit(gas)
// Only count the calldata here as the overhead of the fully encoded
// RLP transaction is handled inside of EncodeL2GasLimit
fee := fees.EncodeTxGasLimit(tx.Data(), l1GasPrice, l2GasLimit, l2GasPrice)
if err != nil {
return err
}
// This should only happen if the transaction fee is greater than 18.44 ETH
if !fee.IsUint64() {
return fmt.Errorf("fee overflow: %s", fee.String())
}
// Compute the user's fee
paying := new(big.Int).Mul(new(big.Int).SetUint64(tx.Gas()), tx.GasPrice())
// Compute the minimum expected fee
expecting := new(big.Int).Mul(fee, fees.BigTxGasPrice)
if paying.Cmp(expecting) == -1 {
return fmt.Errorf("fee too low: %d, use at least tx.gasLimit = %d and tx.gasPrice = %d", paying, fee.Uint64(), fees.BigTxGasPrice)
}
// Protect users from overpaying by too much
overpaying := new(big.Int).Sub(paying, expecting)
threshold := new(big.Int).Mul(expecting, common.Big3)
if overpaying.Cmp(threshold) == 1 {
return fmt.Errorf("fee too large: %d", paying)
}
return nil
}
// Higher level API for applying transactions. Should only be called for
// queue origin sequencer transactions, as the contracts on L1 manage the same
// validity checks that are done here.
func (s *SyncService) ValidateAndApplySequencerTransaction(tx *types.Transaction) error {
if s.verifier {
return errors.New("Verifier does not accept transactions out of band")
}
if tx == nil {
return errors.New("nil transaction passed to ValidateAndApplySequencerTransaction")
}
if err := s.verifyFee(tx); err != nil {
return err
}
s.txLock.Lock()
defer s.txLock.Unlock()
log.Trace("Sequencer transaction validation", "hash", tx.Hash().Hex())
qo := tx.QueueOrigin()
if qo != types.QueueOriginSequencer {
return fmt.Errorf("invalid transaction with queue origin %d", qo)
}
err := s.txpool.ValidateTx(tx)
if err != nil {
return fmt.Errorf("invalid transaction: %w", err)
}
return s.applyTransaction(tx)
}
// syncer represents a function that can sync remote items and then returns the
// index that it synced to as well as an error if it encountered one. It has
// side effects on the state and its functionality depends on the current state
type syncer func() (*uint64, error)
// rangeSyncer represents a function that syncs a range of items between its two
// arguments (inclusive)
type rangeSyncer func(uint64, uint64) error
// nextGetter is a type that represents a function that will return the next
// index
type nextGetter func() uint64
// indexGetter is a type that represents a function that returns an index and an
// error if there is a problem fetching the index. The different types of
// indices are canonical transaction chain indices, queue indices and batch
// indices. It does not induce side effects on state
type indexGetter func() (*uint64, error)
// isAtTip is a function that will determine if the local chain is at the tip
// of the remote datasource
func (s *SyncService) isAtTip(index *uint64, get indexGetter) (bool, error) {
latest, err := get()
if errors.Is(err, errElementNotFound) {
if index == nil {
return true, nil
}
return false, nil
}
if err != nil {
return false, err
}
// There are no known enqueue transactions locally or remotely
if latest == nil && index == nil {
return true, nil
}
// Only one of the transactions are nil due to the check above so they
// cannot be equal
if latest == nil || index == nil {
return false, nil
}
// The indices are equal
if *latest == *index {
return true, nil
}
// The local tip is greater than the remote tip. This should never happen
if *latest < *index {
return false, fmt.Errorf("is at tip mismatch: remote (%d) - local (%d): %w", *latest, *index, errShortRemoteTip)
}
// The indices are not equal
return false, nil
}
// syncToTip is a function that can be used to sync to the tip of an ordered
// list of things. It is used to sync transactions, enqueue elements and batches
func (s *SyncService) syncToTip(sync syncer, getTip indexGetter) error {
s.loopLock.Lock()
defer s.loopLock.Unlock()
for {
index, err := sync()
if errors.Is(err, errElementNotFound) {
return nil
}
if err != nil {
return err
}
isAtTip, err := s.isAtTip(index, getTip)
if err != nil {
return err
}
if isAtTip {
return nil
}
}
}
// sync will sync a range of items
func (s *SyncService) sync(getLatest indexGetter, getNext nextGetter, syncer rangeSyncer) (*uint64, error) {
latestIndex, err := getLatest()
if err != nil {
return nil, fmt.Errorf("Cannot sync: %w", err)
}
if latestIndex == nil {
return nil, errors.New("Latest index is not defined")
}
nextIndex := getNext()
if nextIndex == *latestIndex+1 {
return latestIndex, nil
}
if err := syncer(nextIndex, *latestIndex); err != nil {
return nil, err
}
return latestIndex, nil
}
// syncBatches will sync a range of batches from the current known tip to the
// remote tip.
func (s *SyncService) syncBatches() (*uint64, error) {
index, err := s.sync(s.client.GetLatestTransactionBatchIndex, s.GetNextBatchIndex, s.syncTransactionBatchRange)
if err != nil {
return nil, fmt.Errorf("Cannot sync batches: %w", err)
}
return index, nil
}
// syncTransactionBatchRange will sync a range of batched transactions from
// start to end (inclusive)
func (s *SyncService) syncTransactionBatchRange(start, end uint64) error {
log.Info("Syncing transaction batch range", "start", start, "end", end)
for i := start; i <= end; i++ {
log.Debug("Fetching transaction batch", "index", i)
_, txs, err := s.client.GetTransactionBatch(i)
if err != nil {
return fmt.Errorf("Cannot get transaction batch: %w", err)
}
for _, tx := range txs {
if err := s.applyBatchedTransaction(tx); err != nil {
return fmt.Errorf("cannot apply batched transaction: %w", err)
}
}
s.SetLatestBatchIndex(&i)
}
return nil
}
// syncQueue will sync from the local tip to the known tip of the remote
// enqueue transaction feed.
func (s *SyncService) syncQueue() (*uint64, error) {
index, err := s.sync(s.client.GetLatestEnqueueIndex, s.GetNextEnqueueIndex, s.syncQueueTransactionRange)
if err != nil {
return nil, fmt.Errorf("Cannot sync queue: %w", err)
}
return index, nil
}
// syncQueueTransactionRange will apply a range of queue transactions from
// start to end (inclusive)
func (s *SyncService) syncQueueTransactionRange(start, end uint64) error {
log.Info("Syncing enqueue transactions range", "start", start, "end", end)
for i := start; i <= end; i++ {
tx, err := s.client.GetEnqueue(i)
if err != nil {
return fmt.Errorf("Canot get enqueue transaction; %w", err)
}
if err := s.applyTransaction(tx); err != nil {
return fmt.Errorf("Cannot apply transaction: %w", err)
}
}
return nil
}
// syncTransactions will sync transactions to the remote tip based on the
// backend
func (s *SyncService) syncTransactions(backend Backend) (*uint64, error) {
getLatest := func() (*uint64, error) {
return s.client.GetLatestTransactionIndex(backend)
}
sync := func(start, end uint64) error {
return s.syncTransactionRange(start, end, backend)
}
index, err := s.sync(getLatest, s.GetNextIndex, sync)
if err != nil {
return nil, fmt.Errorf("Cannot sync transactions with backend %s: %w", backend.String(), err)
}
return index, nil
}
// syncTransactionRange will sync a range of transactions from
// start to end (inclusive) from a specific Backend
func (s *SyncService) syncTransactionRange(start, end uint64, backend Backend) error {
log.Info("Syncing transaction range", "start", start, "end", end, "backend", backend.String())
for i := start; i <= end; i++ {
tx, err := s.client.GetTransaction(i, backend)
if err != nil {
return fmt.Errorf("cannot fetch transaction %d: %w", i, err)
}
if err = s.applyTransaction(tx); err != nil {
return fmt.Errorf("Cannot apply transaction: %w", err)
}
}
return nil
}
// updateEthContext will update the OVM execution context's
// timestamp and blocknumber if enough time has passed since
// it was last updated. This is a sequencer only function.
func (s *SyncService) updateEthContext() error {
context, err := s.client.GetLatestEthContext()
if err != nil {
return fmt.Errorf("Cannot get eth context: %w", err)
}
current := time.Unix(int64(s.GetLatestL1Timestamp()), 0)
next := time.Unix(int64(context.Timestamp), 0)
if next.Sub(current) > s.timestampRefreshThreshold {
log.Info("Updating Eth Context", "timetamp", context.Timestamp, "blocknumber", context.BlockNumber)
s.SetLatestL1BlockNumber(context.BlockNumber)
s.SetLatestL1Timestamp(context.Timestamp)
}
return nil
}
// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
// starts sending event to the given channel.
func (s *SyncService) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return s.scope.Track(s.txFeed.Subscribe(ch))
}
func stringify(i *uint64) string {
if i == nil {
return "<nil>"
}
return strconv.FormatUint(*i, 10)
}
// IngestTransaction should only be called by trusted parties as it skips all
// validation and applies the transaction
func (s *SyncService) IngestTransaction(tx *types.Transaction) error {
return s.applyTransaction(tx)
}
| 1 | 17,824 | This is the default value? | ethereum-optimism-optimism | go |
@@ -1,8 +1,8 @@
import os
import codecs
-
+from time import strftime
from cliquet import utils as cliquet_utils
-
+from kinto import __version__
from kinto import logger
HERE = os.path.abspath(os.path.dirname(__file__)) | 1 | import os
import codecs
from cliquet import utils as cliquet_utils
from kinto import logger
HERE = os.path.abspath(os.path.dirname(__file__))
def render_template(template, destination, **kwargs):
template = os.path.join(HERE, template)
folder = os.path.dirname(destination)
if folder and not os.path.exists(folder):
os.makedirs(folder)
logger.info("Created config {}".format(os.path.abspath(destination)))
with codecs.open(template, 'r', encoding='utf-8') as f:
raw_template = f.read()
rendered = raw_template.format(**kwargs)
with codecs.open(destination, 'w+', encoding='utf-8') as output:
output.write(rendered)
def init(config_file, backend):
values = {}
values['secret'] = cliquet_utils.random_bytes_hex(32)
values['storage_backend'] = "cliquet.storage.%s" % backend
values['cache_backend'] = "cliquet.cache.%s" % backend
values['permission_backend'] = "cliquet.permission.%s" % backend
if backend == 'postgresql':
postgresql_url = "postgres://postgres:postgres@localhost/postgres"
values['storage_url'] = postgresql_url
values['cache_url'] = postgresql_url
values['permission_url'] = postgresql_url
elif backend == 'redis':
redis_url = "redis://localhost:6379"
values['storage_url'] = redis_url + "/1"
values['cache_url'] = redis_url + "/2"
values['permission_url'] = redis_url + "/3"
else:
values['storage_url'] = ''
values['cache_url'] = ''
values['permission_url'] = ''
render_template("kinto.tpl", config_file, **values)
| 1 | 8,914 | The spaces are actually legitimate here: in python, you: - first import standard library modules (hint: time is one) - then import 3rd party library modules - then import the modules from you project (here kinto). Between each block you need an empty line. | Kinto-kinto | py |
@@ -571,7 +571,7 @@ func (pi *PackageInfo) MarkedSource(obj types.Object) *cpb.MarkedSource {
// the object has its own non-blank name, that is used; otherwise if the object
// is of a named type, that type's name is used. Otherwise the result is "_".
func objectName(obj types.Object) string {
- if name := obj.Name(); name != "" && name != "" {
+ if name := obj.Name(); name != "" {
return name // the object's given name
} else if name := typeName(obj.Type()); name != "" {
return name // the object's type's name | 1 | /*
* Copyright 2015 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package indexer implements a Kythe indexer for the Go language.
//
// Usage example: Indexing a Kythe CompilationUnit message.
//
// // Obtain a compilation from some source, e.g., an index pack.
// var pack *indexpack.Archive = ...
// var unit *apb.CompilationUnit = ...
//
// // Parse the sources and resolve types.
// pi, err := indexer.Resolve(unit, pack, &indexer.ResolveOptions{
// Info: indexer.AllTypeInfo(),
// })
// if err != nil {
// log.Fatal("Resolving failed: %v", err)
// }
// // Type information from http://godoc.org/go/types is now available
// // from pi.Info, which is a *types.Info record.
//
package indexer
import (
"bytes"
"errors"
"fmt"
"go/ast"
"go/build"
"go/parser"
"go/token"
"go/types"
"io"
"io/ioutil"
"log"
"path/filepath"
"strconv"
"strings"
"kythe.io/kythe/go/extractors/govname"
"kythe.io/kythe/go/util/metadata"
"kythe.io/kythe/go/util/ptypes"
"bitbucket.org/creachadair/stringset"
"github.com/golang/protobuf/proto"
"golang.org/x/tools/go/gcexportdata"
apb "kythe.io/kythe/proto/analysis_go_proto"
cpb "kythe.io/kythe/proto/common_go_proto"
gopb "kythe.io/kythe/proto/go_go_proto"
spb "kythe.io/kythe/proto/storage_go_proto"
)
// A Fetcher retrieves the contents of a file given its path and/or hex-encoded
// SHA256 digest, at least one of which must be set.
type Fetcher interface {
Fetch(path, digest string) ([]byte, error)
}
// PackageInfo records information about the Go packages defined by a
// compilation unit and its dependencies.
type PackageInfo struct {
Name string // The (short) name of the package
ImportPath string // The nominal import path of the package
Package *types.Package // The package for this compilation
Dependencies map[string]*types.Package // Packages imported from dependencies
VName *spb.VName // The base vname for this package
PackageVName map[*types.Package]*spb.VName // Resolved package to vname
FileSet *token.FileSet // Location info for the source files
Files []*ast.File // The parsed ASTs of the source files
SourceText map[*ast.File]string // The text of the source files
Rules map[*ast.File]metadata.Rules // Mapping metadata for each source file
Info *types.Info // If non-nil, contains type-checker results
Errors []error // All errors reported by the type checker
// A lazily-initialized mapping from an object on the RHS of a selection
// (lhs.RHS) to the nearest enclosing named struct or interface type; or in
// the body of a function or method to the nearest enclosing named method.
owner map[types.Object]types.Object
// A lazily-initialized mapping from from AST nodes to their corresponding
// VNames. Only nodes that do not resolve directly to a type object are
// included in this map, e.g., function literals.
function map[ast.Node]*funcInfo
// A lazily-initialized set of standard library package import paths for
// which a node has been emitted.
standardLib stringset.Set
// A dummy function representing the undeclared package initilization
// functions, one per file in the package.
packageInit map[*ast.File]*funcInfo
// A cache of source file vnames.
fileVName map[*ast.File]*spb.VName
// A cache of file location mappings. This lets us get back from the
// parser's location to the vname for the enclosing file, which is in turn
// affected by the build configuration.
fileLoc map[*token.File]*ast.File
// A cache of already-computed signatures.
sigs map[types.Object]string
// The number of package-level init declarations seen.
numInits int
// The Go-specific details from the compilation record.
details *gopb.GoDetails
}
type funcInfo struct {
vname *spb.VName
numAnons int // number of anonymous functions defined inside this one
}
// packageImporter implements the types.Importer interface by fetching files
// from the required inputs of a compilation unit.
type packageImporter struct {
deps map[string]*types.Package // packages already loaded
fileSet *token.FileSet // source location information
fileMap map[string]*apb.FileInfo // :: import path → required input location
fetcher Fetcher // access to required input contents
}
// Import satisfies the types.Importer interface using the captured data from
// the compilation unit.
func (pi *packageImporter) Import(importPath string) (*types.Package, error) {
if pkg := pi.deps[importPath]; pkg != nil && pkg.Complete() {
return pkg, nil
} else if importPath == "unsafe" {
// The "unsafe" package is special, and isn't usually added by the
// resolver into the dependency map.
pi.deps[importPath] = types.Unsafe
return types.Unsafe, nil
}
// Fetch the required input holding the package for this import path, and
// load its export data for use by the type resolver.
if fi := pi.fileMap[importPath]; fi != nil {
data, err := pi.fetcher.Fetch(fi.Path, fi.Digest)
if err != nil {
return nil, fmt.Errorf("fetching %q (%s): %v", fi.Path, fi.Digest, err)
}
r, err := gcexportdata.NewReader(bytes.NewReader(data))
if err != nil {
return nil, fmt.Errorf("reading export data in %q (%s): %v", fi.Path, fi.Digest, err)
}
return gcexportdata.Read(r, pi.fileSet, pi.deps, importPath)
}
return nil, fmt.Errorf("package %q not found", importPath)
}
// ResolveOptions control the behaviour of the Resolve function. A nil options
// pointer provides default values.
type ResolveOptions struct {
// Passes a value whose non-nil map fields will be filled in by the type
// checker during resolution. The value will also be copied to the Info
// field of the PackageInfo returned by Resolve.
Info *types.Info
// If set, this function is called for each required input to check whether
// it contains metadata rules.
//
// Valid return are:
// rs, nil -- a valid ruleset
// nil, nil -- no ruleset found
// _, err -- an error attempting to load a ruleset
//
CheckRules func(ri *apb.CompilationUnit_FileInput, f Fetcher) (*Ruleset, error)
}
func (r *ResolveOptions) info() *types.Info {
if r != nil {
return r.Info
}
return nil
}
func (r *ResolveOptions) checkRules(ri *apb.CompilationUnit_FileInput, f Fetcher) (*Ruleset, error) {
if r == nil || r.CheckRules == nil {
return nil, nil
}
return r.CheckRules(ri, f)
}
// A Ruleset represents a collection of mapping rules applicable to a source
// file in a compilation to be indexed.
type Ruleset struct {
Path string // the file path this rule set applies to
Rules metadata.Rules // the rules that apply to the path
}
// Resolve resolves the package information for unit and its dependencies. On
// success the package corresponding to unit is located via ImportPath in the
// Packages map of the returned value.
func Resolve(unit *apb.CompilationUnit, f Fetcher, opts *ResolveOptions) (*PackageInfo, error) {
sourceFiles := stringset.New(unit.SourceFile...)
imap := make(map[string]*spb.VName) // import path → vname
srcs := make(map[*ast.File]string) // file → text
fmap := make(map[string]*apb.FileInfo) // import path → file info
smap := make(map[string]*ast.File) // file path → file (sources)
filev := make(map[*ast.File]*spb.VName) // file → vname
floc := make(map[*token.File]*ast.File) // file → ast
fset := token.NewFileSet() // location info for the parser
details := goDetails(unit)
var files []*ast.File // parsed sources
var rules []*Ruleset // parsed linkage rules
// Classify the required inputs as either sources, which are to be parsed,
// or dependencies, which are to be "imported" via the type-checker's
// import mechanism. If successful, this populates fset and files with the
// lexical and syntactic content of the package's own sources.
//
// The build context is used to check build tags.
bc := &build.Context{
GOOS: details.GetGoos(),
GOARCH: details.GetGoarch(),
BuildTags: details.GetBuildTags(),
}
for _, ri := range unit.RequiredInput {
if ri.Info == nil {
return nil, errors.New("required input file info missing")
}
// Source inputs need to be parsed, so we can give their ASTs to the
// type checker later on.
fpath := ri.Info.Path
if sourceFiles.Contains(fpath) {
data, err := f.Fetch(fpath, ri.Info.Digest)
if err != nil {
return nil, fmt.Errorf("fetching %q (%s): %v", fpath, ri.Info.Digest, err)
}
if !matchesBuildTags(fpath, data, bc) {
log.Printf("Skipped source file %q because build tags do not match", fpath)
continue
}
vpath := ri.VName.GetPath()
if vpath == "" {
vpath = fpath
}
parsed, err := parser.ParseFile(fset, vpath, data, parser.AllErrors|parser.ParseComments)
if err != nil {
return nil, fmt.Errorf("parsing %q: %v", fpath, err)
}
// Cache file VNames based on the required input.
files = append(files, parsed)
vname := proto.Clone(ri.VName).(*spb.VName)
if vname == nil {
vname = proto.Clone(unit.VName).(*spb.VName)
vname.Signature = ""
vname.Language = ""
}
vname.Path = vpath
filev[parsed] = vname
srcs[parsed] = string(data)
smap[fpath] = parsed
continue
}
// Check for mapping metadata.
if rs, err := opts.checkRules(ri, f); err != nil {
log.Printf("Error checking rules in %q: %v", fpath, err)
} else if rs != nil {
log.Printf("Found %d metadata rules for path %q", len(rs.Rules), rs.Path)
rules = append(rules, rs)
continue
}
// Files may be source or compiled archives with type information for
// other packages, or may be other ancillary files like C headers to
// support cgo. Use the vname to determine which import path for each
// and save that mapping for use by the importer.
if ri.VName == nil {
return nil, fmt.Errorf("missing vname for %q", fpath)
}
ipath := vnameToImport(ri.VName, details.GetGoroot())
imap[ipath] = ri.VName
fmap[ipath] = ri.Info
}
if len(files) == 0 {
return nil, errors.New("no source files in package")
}
// Populate the location mapping. This relies on the fact that Iterate
// reports its files in the order they were added to the set, which in turn
// is their order in the files list.
i := 0
fset.Iterate(func(f *token.File) bool {
floc[f] = files[i]
i++
return true
})
pi := &PackageInfo{
Name: files[0].Name.Name,
ImportPath: vnameToImport(unit.VName, details.GetGoroot()),
FileSet: fset,
Files: files,
Info: opts.info(),
SourceText: srcs,
PackageVName: make(map[*types.Package]*spb.VName),
Dependencies: make(map[string]*types.Package), // :: import path → package
function: make(map[ast.Node]*funcInfo),
sigs: make(map[types.Object]string),
packageInit: make(map[*ast.File]*funcInfo),
fileVName: filev,
fileLoc: floc,
details: details,
}
// If mapping rules were found, populate the corresponding field.
if len(rules) != 0 {
pi.Rules = make(map[*ast.File]metadata.Rules)
for _, rs := range rules {
f, ok := smap[rs.Path]
if ok {
pi.Rules[f] = rs.Rules
}
}
}
// Run the type-checker and collect any errors it generates. Errors in the
// type checker are not returned directly; the caller can read them from
// the Errors field.
c := &types.Config{
FakeImportC: true, // so we can handle cgo
DisableUnusedImportCheck: true, // this is not fatal to type-checking
Importer: &packageImporter{
deps: pi.Dependencies,
fileSet: pi.FileSet,
fileMap: fmap,
fetcher: f,
},
Error: func(err error) { pi.Errors = append(pi.Errors, err) },
}
pi.Package, _ = c.Check(pi.Name, pi.FileSet, pi.Files, pi.Info)
pi.PackageVName[pi.Package] = unit.VName
// Fill in the mapping from packages to vnames.
for ip, vname := range imap {
if pkg := pi.Dependencies[ip]; pkg != nil {
pi.PackageVName[pkg] = proto.Clone(vname).(*spb.VName)
pi.PackageVName[pkg].Signature = "package"
pi.PackageVName[pkg].Language = govname.Language
}
}
if _, ok := pi.Dependencies["unsafe"]; ok {
pi.PackageVName[types.Unsafe] = govname.ForStandardLibrary("unsafe")
}
// Set this package's own vname.
pi.VName = proto.Clone(unit.VName).(*spb.VName)
pi.VName.Language = govname.Language
pi.VName.Signature = "package"
return pi, nil
}
// String renders a human-readable synopsis of the package information.
func (pi *PackageInfo) String() string {
if pi == nil {
return "#<package-info nil>"
}
return fmt.Sprintf("#<package-info %q ip=%q pkg=%p #deps=%d #src=%d #errs=%d>",
pi.Name, pi.ImportPath, pi.Package, len(pi.Dependencies), len(pi.Files), len(pi.Errors))
}
// Signature returns a signature for obj, suitable for use in a vname.
func (pi *PackageInfo) Signature(obj types.Object) string {
if obj == nil {
return ""
} else if pi.owner == nil {
pi.owner = make(map[types.Object]types.Object)
pi.addOwners(pi.Package)
for _, pkg := range pi.Dependencies {
pi.addOwners(pkg)
}
}
if sig, ok := pi.sigs[obj]; ok {
return sig
}
tag, base := pi.newSignature(obj)
sig := base
if tag != "" {
sig = tag + " " + base
}
pi.sigs[obj] = sig
return sig
}
// ObjectVName returns a VName for obj relative to that of its package.
func (pi *PackageInfo) ObjectVName(obj types.Object) *spb.VName {
if pkg, ok := obj.(*types.PkgName); ok {
return pi.PackageVName[pkg.Imported()]
}
sig := pi.Signature(obj)
pkg := obj.Pkg()
var vname *spb.VName
if base := pi.PackageVName[pkg]; base != nil {
vname = proto.Clone(base).(*spb.VName)
} else if pkg == nil {
return govname.ForBuiltin(sig)
} else {
// This is an indirect import, that is, a name imported but not
// mentioned explicitly by the package being indexed.
// TODO(T273): This is a workaround, and may not be correct in all
// cases; work out a more comprehensive solution (possibly during
// extraction).
vname = proto.Clone(pi.VName).(*spb.VName)
vname.Path = strings.TrimPrefix(pkg.Path(), vname.Corpus+"/")
}
vname.Signature = sig
return vname
}
// MarkedSource returns a MarkedSource message describing obj.
// See: http://www.kythe.io/docs/schema/marked-source.html.
func (pi *PackageInfo) MarkedSource(obj types.Object) *cpb.MarkedSource {
ms := &cpb.MarkedSource{
Child: []*cpb.MarkedSource{{
Kind: cpb.MarkedSource_IDENTIFIER,
PreText: objectName(obj),
}},
}
// Include the package name as context, and for objects that hang off a
// named struct or interface, a label for that type.
//
// For example, given
// package p
// var v int // context is "p"
// type s struct { v int } // context is "p.s"
// func (v) f(x int) {}
// ^ ^--------------- context is "p.v.f"
// \----------------- context is "p.v"
//
// The tree structure is:
//
// (box)
// |
// (ctx)-----+-------(id)
// | |
// +----"."----+(".") name
// | |
// (id) pkg type
//
if ctx := pi.typeContext(obj); len(ctx) != 0 {
ms.Child = append([]*cpb.MarkedSource{{
Kind: cpb.MarkedSource_CONTEXT,
PostChildText: ".",
AddFinalListToken: true,
Child: ctx,
}}, ms.Child...)
}
// Handle types with "interesting" superstructure specially.
switch t := obj.(type) {
case *types.Func:
// For functions we include the parameters and return values, and for
// methods the receiver.
//
// Methods: func (R) Name(p1, ...) (r0, ...)
// Functions: func Name(p0, ...) (r0, ...)
fn := &cpb.MarkedSource{
Kind: cpb.MarkedSource_BOX,
Child: []*cpb.MarkedSource{{PreText: "func "}},
}
sig := t.Type().(*types.Signature)
firstParam := 0
if recv := sig.Recv(); recv != nil {
// Parenthesized receiver type, e.g. (R).
fn.Child = append(fn.Child, &cpb.MarkedSource{
Kind: cpb.MarkedSource_PARAMETER,
PreText: "(",
PostText: ") ",
Child: []*cpb.MarkedSource{{
Kind: cpb.MarkedSource_TYPE,
PreText: typeName(recv.Type()),
}},
})
firstParam = 1
}
fn.Child = append(fn.Child, ms)
// If there are no parameters, the lookup will not produce anything.
// Ensure when this happens we still get parentheses for notational
// purposes.
if sig.Params().Len() == 0 {
fn.Child = append(fn.Child, &cpb.MarkedSource{
Kind: cpb.MarkedSource_PARAMETER,
PreText: "()",
})
} else {
fn.Child = append(fn.Child, &cpb.MarkedSource{
Kind: cpb.MarkedSource_PARAMETER_LOOKUP_BY_PARAM,
PreText: "(",
PostChildText: ", ",
PostText: ")",
LookupIndex: uint32(firstParam),
})
}
if res := sig.Results(); res != nil && res.Len() > 0 {
rms := &cpb.MarkedSource{PreText: " "}
if res.Len() > 1 {
// If there is more than one result type, parenthesize.
rms.PreText = " ("
rms.PostText = ")"
rms.PostChildText = ", "
}
for i := 0; i < res.Len(); i++ {
rms.Child = append(rms.Child, &cpb.MarkedSource{
PreText: objectName(res.At(i)),
})
}
fn.Child = append(fn.Child, rms)
}
ms = fn
case *types.Var:
// For variables and fields, include the type.
repl := &cpb.MarkedSource{
Kind: cpb.MarkedSource_BOX,
PostChildText: " ",
Child: []*cpb.MarkedSource{
ms,
{Kind: cpb.MarkedSource_TYPE, PreText: typeName(t.Type())},
},
}
ms = repl
case *types.TypeName:
// For named types, include the underlying type.
repl := &cpb.MarkedSource{
Kind: cpb.MarkedSource_BOX,
PostChildText: " ",
Child: []*cpb.MarkedSource{
{PreText: "type"},
ms,
{Kind: cpb.MarkedSource_TYPE, PreText: typeName(t.Type().Underlying())},
},
}
ms = repl
default:
// TODO(fromberger): Handle other variations from go/types.
}
return ms
}
// objectName returns a human-readable name for obj if one can be inferred. If
// the object has its own non-blank name, that is used; otherwise if the object
// is of a named type, that type's name is used. Otherwise the result is "_".
func objectName(obj types.Object) string {
if name := obj.Name(); name != "" && name != "" {
return name // the object's given name
} else if name := typeName(obj.Type()); name != "" {
return name // the object's type's name
}
return "_" // not sure what to call it
}
// typeName returns a human readable name for typ.
func typeName(typ types.Type) string {
switch t := typ.(type) {
case *types.Named:
return t.Obj().Name()
case *types.Basic:
return t.Name()
case *types.Struct:
return "struct {...}"
case *types.Interface:
return "interface {...}"
case *types.Pointer:
return "*" + typeName(t.Elem())
}
return typ.String()
}
// typeContext returns the package, type, and function context identifiers that
// qualify the name of obj, if any are applicable. The result is empty if there
// are no appropriate qualifiers.
func (pi *PackageInfo) typeContext(obj types.Object) []*cpb.MarkedSource {
var ms []*cpb.MarkedSource
addID := func(s string) {
ms = append(ms, &cpb.MarkedSource{
Kind: cpb.MarkedSource_IDENTIFIER,
PreText: s,
})
}
for cur := pi.owner[obj]; cur != nil; cur = pi.owner[cur] {
if t, ok := cur.(interface {
Name() string
}); ok {
addID(t.Name())
} else {
addID(typeName(cur.Type()))
}
}
if pkg := obj.Pkg(); pkg != nil {
addID(pi.importPath(pkg))
}
for i, j := 0, len(ms)-1; i < j; {
ms[i], ms[j] = ms[j], ms[i]
i++
j--
}
return ms
}
// FileVName returns a VName for path relative to the package base.
func (pi *PackageInfo) FileVName(file *ast.File) *spb.VName {
if v := pi.fileVName[file]; v != nil {
return v
}
v := proto.Clone(pi.VName).(*spb.VName)
v.Language = ""
v.Signature = ""
v.Path = pi.FileSet.Position(file.Pos()).Filename
return v
}
// AnchorVName returns a VName for the given file and offsets.
func (pi *PackageInfo) AnchorVName(file *ast.File, start, end int) *spb.VName {
vname := proto.Clone(pi.FileVName(file)).(*spb.VName)
vname.Signature = "#" + strconv.Itoa(start) + ":" + strconv.Itoa(end)
vname.Language = govname.Language
return vname
}
// Span returns the containing file and 0-based offset range of the given AST
// node. The range is half-open, including the start position but excluding
// the end.
//
// If node == nil or lacks a valid start position, Span returns nil -1, -1. If
// the end position of node is invalid, start == end.
func (pi *PackageInfo) Span(node ast.Node) (file *ast.File, start, end int) {
if node == nil {
return nil, -1, -1
}
pos := node.Pos()
if pos == token.NoPos {
return nil, -1, -1
}
sp := pi.FileSet.Position(pos)
file = pi.fileLoc[pi.FileSet.File(pos)]
start = sp.Offset
end = start
if pos := node.End(); pos != token.NoPos {
end = pi.FileSet.Position(pos).Offset
}
return
}
const (
isBuiltin = "builtin-"
tagConst = "const"
tagField = "field"
tagFunc = "func"
tagLabel = "label"
tagMethod = "method"
tagParam = "param"
tagType = "type"
tagVar = "var"
)
// newSignature constructs and returns a tag and base signature for obj. The
// tag represents the "kind" of signature, to disambiguate built-in types from
// user-defined names, fields from methods, and so on. The base is a unique
// name for obj within its package, modulo the tag.
func (pi *PackageInfo) newSignature(obj types.Object) (tag, base string) {
if obj.Name() == "" {
return tagVar, "_"
}
topLevelTag := tagVar
switch t := obj.(type) {
case *types.Builtin:
return isBuiltin + tagFunc, t.Name()
case *types.Nil:
return isBuiltin + tagConst, "nil"
case *types.PkgName:
return "", "package" // the vname corpus and path carry the package name
case *types.Const:
topLevelTag = tagConst
if t.Pkg() == nil {
return isBuiltin + tagConst, t.Name()
}
case *types.Var:
if t.IsField() {
if owner, ok := pi.owner[t]; ok {
_, base := pi.newSignature(owner)
return tagField, base + "." + t.Name()
}
return tagField, fmt.Sprintf("[%p].%s", t, t.Name())
} else if owner, ok := pi.owner[t]; ok {
_, base := pi.newSignature(owner)
return tagParam, base + ":" + t.Name()
}
case *types.Func:
topLevelTag = tagFunc
if recv := t.Type().(*types.Signature).Recv(); recv != nil { // method
if owner, ok := pi.owner[t]; ok {
_, base := pi.newSignature(owner)
return tagMethod, base + "." + t.Name()
}
// If the receiver is defined in this package, fully qualify the
// name so references from elsewhere will work. Strictly speaking
// this is only necessary for exported methods, but it's simpler to
// do it for everything.
return tagMethod, fmt.Sprintf("(%s).%s", types.TypeString(recv.Type(), func(pkg *types.Package) string {
return pkg.Name()
}), t.Name())
}
case *types.TypeName:
topLevelTag = tagType
if t.Pkg() == nil {
return isBuiltin + tagType, t.Name()
}
case *types.Label:
return tagLabel, fmt.Sprintf("[%p].%s", t, t.Name())
default:
log.Panicf("Unexpected object kind: %T", obj)
}
// At this point, we have eliminated built-in objects; everything else must
// be defined in a package.
if obj.Pkg() == nil {
log.Panic("Object without a package: ", obj)
}
// Objects at package scope (i.e., parent scope is package scope).
if obj.Parent() == obj.Pkg().Scope() {
return topLevelTag, obj.Name()
}
// Objects in interior (local) scopes, i.e., everything else.
return topLevelTag, fmt.Sprintf("[%p].%s", obj, obj.Name())
}
// addOwners updates pi.owner from the types in pkg, adding mapping from fields
// of package-level named struct types to the owning named struct type; from
// methods of package-level named interface types to the owning named interface
// type; and from parameters of package-level named function or method types to
// the owning named function or method.
//
// This relation is used to construct signatures for these fields/methods,
// since they may be referenced from another package and thus need
// deterministic names. An object does not expose its "owner"; indeed it may
// have several.
//
// Caveats:
//
// (1) This mapping is deterministic but not necessarily the best one according
// to the original syntax, to which, in general, we do not have access. In
// these two examples, the type checker considers field X as belonging equally
// to types T and U, even though according the syntax, it belongs primarily to
// T in the first example and U in the second:
//
// type T struct {X int}
// type U T
//
// type T U
// type U struct {X int}
//
// Similarly:
//
// type U struct {X int}
// type V struct {U}
//
// TODO(adonovan): sameer@ points out a useful heuristic: in a case of struct
// or interface embedding, if one struct/interface has fewer fields/methods,
// then it must be the primary one.
//
// (2) This pass is not exhaustive: there remain objects that may be referenced
// from outside the package but for which we can't easily come up with good
// names. Here are some examples:
//
// // package p
// var V1, V2 struct {X int} = ...
// func F() struct{X int} {...}
// type T struct {
// Y struct { X int }
// }
//
// // main
// p.V2.X = 1
// print(p.F().X)
// new(p.T).Y[0].X
//
// Also note that there may be arbitrary pointer, struct, chan, map, array, and
// slice type constructors between the type of the exported package member (V2,
// F or T) and the type of its X subelement. For now, we simply ignore such
// names. They should be rare in readable code.
func (pi *PackageInfo) addOwners(pkg *types.Package) {
scope := pkg.Scope()
addFunc := func(obj *types.Func) {
// Inspect the receiver, parameters, and result values.
fsig := obj.Type().(*types.Signature)
if recv := fsig.Recv(); recv != nil {
pi.owner[recv] = obj
}
if params := fsig.Params(); params != nil {
for i := 0; i < params.Len(); i++ {
pi.owner[params.At(i)] = obj
}
}
if res := fsig.Results(); res != nil {
for i := 0; i < res.Len(); i++ {
pi.owner[res.At(i)] = obj
}
}
}
addMethods := func(obj types.Object, n int, method func(i int) *types.Func) {
for i := 0; i < n; i++ {
if m := method(i); m.Pkg() == pkg {
if _, ok := pi.owner[m]; !ok {
pi.owner[m] = obj
addFunc(m)
}
}
}
}
for _, name := range scope.Names() {
switch obj := scope.Lookup(name).(type) {
case *types.TypeName:
// Go 1.9 will have support for type aliases. For now, skip these
// so we don't wind up emitting redundant declaration sites for the
// aliased type.
named, ok := obj.Type().(*types.Named)
if !ok {
continue
}
switch t := named.Underlying().(type) {
case *types.Struct:
// Inspect the fields of a struct.
for i := 0; i < t.NumFields(); i++ {
f := t.Field(i)
if f.Pkg() != pkg {
continue // wrong package
}
if _, ok := pi.owner[f]; !ok {
pi.owner[f] = obj
}
}
addMethods(obj, named.NumMethods(), named.Method)
case *types.Interface:
// Inspect the declared methods of an interface.
addMethods(obj, t.NumExplicitMethods(), t.ExplicitMethod)
default:
// Inspect declared methods of other named types.
addMethods(obj, named.NumMethods(), named.Method)
}
case *types.Func:
addFunc(obj)
}
}
}
// findFieldName tries to resolve the identifier that names an embedded
// anonymous field declaration at expr, and reports whether successful.
func (pi *PackageInfo) findFieldName(expr ast.Expr) (id *ast.Ident, ok bool) {
// There are three cases we care about here:
//
// A bare identifier (foo), which refers to a type defined in
// this package, or a builtin type,
//
// A selector expression (pkg.Foo) referring to an exported
// type defined in another package, or
//
// A pointer to either of these.
switch t := expr.(type) {
case *ast.StarExpr: // *T
return pi.findFieldName(t.X)
case *ast.Ident: // T declared locally
return t, true
case *ast.SelectorExpr: // pkg.T declared elsewhere
return t.Sel, true
default:
// No idea what this is; possibly malformed code.
return nil, false
}
}
// importPath returns the import path of pkg.
func (pi *PackageInfo) importPath(pkg *types.Package) string {
if v := pi.PackageVName[pkg]; v != nil {
return vnameToImport(v, pi.details.GetGoroot())
}
return pkg.Name()
}
// isPackageInit reports whether fi belongs to a package-level init function.
func (pi *PackageInfo) isPackageInit(fi *funcInfo) bool {
for _, v := range pi.packageInit {
if fi == v {
return true
}
}
return false
}
// vnameToImport returns the putative Go import path corresponding to v. The
// resulting string corresponds to the string literal appearing in source at
// the import site for the package so named.
func vnameToImport(v *spb.VName, goRoot string) string {
if govname.IsStandardLibrary(v) || (goRoot != "" && v.Root == goRoot) {
return v.Path
} else if tail, ok := rootRelative(goRoot, v.Path); ok {
// Paths under a nonempty GOROOT are treated as if they were standard
// library packages even if they are not labelled as "golang.org", so
// that nonstandard install locations will work sensibly.
return strings.TrimSuffix(tail, filepath.Ext(tail))
}
trimmed := strings.TrimSuffix(v.Path, filepath.Ext(v.Path))
return filepath.Join(v.Corpus, trimmed)
}
// rootRelative reports whether path has the form
//
// root[/pkg/os_arch/]tail
//
// and if so, returns the tail. It returns path, false if path does not have
// this form.
func rootRelative(root, path string) (string, bool) {
trimmed := strings.TrimPrefix(path, root+"/")
if root == "" || trimmed == path {
return path, false
}
if tail := strings.TrimPrefix(trimmed, "pkg/"); tail != trimmed {
parts := strings.SplitN(tail, "/", 2)
if len(parts) == 2 && strings.Contains(parts[0], "_") {
return parts[1], true
}
}
return trimmed, true
}
// goDetails returns the GoDetails message attached to unit, if there is one;
// otherwise it returns nil.
func goDetails(unit *apb.CompilationUnit) *gopb.GoDetails {
for _, msg := range unit.Details {
var dets gopb.GoDetails
if err := ptypes.UnmarshalAny(msg, &dets); err == nil {
return &dets
}
}
return nil
}
// matchesBuildTags reports whether the file at fpath, whose content is in
// data, would be matched by the settings in bc.
func matchesBuildTags(fpath string, data []byte, bc *build.Context) bool {
dir, name := filepath.Split(fpath)
bc.OpenFile = func(path string) (io.ReadCloser, error) {
if path != fpath {
return nil, errors.New("file not found")
}
return ioutil.NopCloser(bytes.NewReader(data)), nil
}
match, err := bc.MatchFile(dir, name)
return err == nil && match
}
// AllTypeInfo creates a new types.Info value with empty maps for each of the
// fields that can be filled in by the type-checker.
func AllTypeInfo() *types.Info {
return &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
Scopes: make(map[ast.Node]*types.Scope),
}
}
// XRefTypeInfo creates a new types.Info value with empty maps for each of the
// fields needed for cross-reference indexing.
func XRefTypeInfo() *types.Info {
return &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
}
}
| 1 | 8,345 | Haha, whoa, I'm not sure how I let that one go by. :) | kythe-kythe | go |
@@ -577,6 +577,7 @@ public class DownloadInstanceWriter implements MessageBodyWriter<DownloadInstanc
}
}
+ // TODO: Return ".md" for "text/markdown" as well as other extensions in MimeTypeDetectionByFileExtension.properties
private String getFileExtension(AuxiliaryFile auxFile) {
String fileExtension = "";
if (auxFile == null) { | 1 | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package edu.harvard.iq.dataverse.api;
import edu.harvard.iq.dataverse.AuxiliaryFile;
import java.lang.reflect.Type;
import java.lang.annotation.Annotation;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.IOException;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.MessageBodyWriter;
import javax.ws.rs.ext.Provider;
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.dataaccess.*;
import edu.harvard.iq.dataverse.datavariable.DataVariable;
import edu.harvard.iq.dataverse.engine.command.Command;
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
import edu.harvard.iq.dataverse.engine.command.impl.CreateGuestbookResponseCommand;
import edu.harvard.iq.dataverse.makedatacount.MakeDataCountLoggingServiceBean;
import edu.harvard.iq.dataverse.makedatacount.MakeDataCountLoggingServiceBean.MakeDataCountEntry;
import edu.harvard.iq.dataverse.util.FileUtil;
import java.io.File;
import java.io.FileInputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.inject.Inject;
import javax.ws.rs.ClientErrorException;
import javax.ws.rs.NotFoundException;
import javax.ws.rs.RedirectionException;
import javax.ws.rs.ServiceUnavailableException;
import javax.ws.rs.core.HttpHeaders;
import org.apache.tika.mime.MimeType;
import org.apache.tika.mime.MimeTypeException;
import org.apache.tika.mime.MimeTypes;
/**
*
* @author Leonid Andreev
*/
@Provider
public class DownloadInstanceWriter implements MessageBodyWriter<DownloadInstance> {
@Inject
MakeDataCountLoggingServiceBean mdcLogService;
private static final Logger logger = Logger.getLogger(DownloadInstanceWriter.class.getCanonicalName());
@Override
public boolean isWriteable(Class<?> clazz, Type type, Annotation[] annotation, MediaType mediaType) {
return clazz == DownloadInstance.class;
}
@Override
public long getSize(DownloadInstance di, Class<?> clazz, Type type, Annotation[] annotation, MediaType mediaType) {
return -1;
//return getFileSize(di);
}
@Override
public void writeTo(DownloadInstance di, Class<?> clazz, Type type, Annotation[] annotation, MediaType mediaType, MultivaluedMap<String, Object> httpHeaders, OutputStream outstream) throws IOException, WebApplicationException {
if (di.getDownloadInfo() != null && di.getDownloadInfo().getDataFile() != null) {
DataAccessRequest daReq = new DataAccessRequest();
DataFile dataFile = di.getDownloadInfo().getDataFile();
StorageIO<DataFile> storageIO = DataAccess.getStorageIO(dataFile, daReq);
if (storageIO != null) {
try {
storageIO.open();
} catch (IOException ioex) {
//throw new WebApplicationException(Response.Status.SERVICE_UNAVAILABLE);
logger.log(Level.INFO, "Datafile {0}: Failed to locate and/or open physical file. Error message: {1}", new Object[]{dataFile.getId(), ioex.getLocalizedMessage()});
throw new NotFoundException("Datafile " + dataFile.getId() + ": Failed to locate and/or open physical file.");
}
// Before we do anything else, check if this download can be handled
// by a redirect to remote storage (only supported on S3, as of 5.4):
if (storageIO instanceof S3AccessIO && ((S3AccessIO) storageIO).downloadRedirectEnabled()) {
// Even if the above is true, there are a few cases where a
// redirect is not applicable.
// For example, for a tabular file, we can redirect a request
// for a saved original; but CANNOT if it is a column subsetting
// request (must be streamed in real time locally); or a format
// conversion that hasn't been cached and saved on S3 yet.
boolean redirectSupported = true;
String auxiliaryTag = null;
String auxiliaryType = null;
String auxiliaryFileName = null;
if ("imageThumb".equals(di.getConversionParam())) {
// Can redirect - but only if already generated and cached.
int requestedSize = 0;
if (!"".equals(di.getConversionParamValue())) {
try {
requestedSize = new Integer(di.getConversionParamValue());
} catch (java.lang.NumberFormatException ex) {
// it's ok, the default size will be used.
}
}
auxiliaryTag = ImageThumbConverter.THUMBNAIL_SUFFIX + (requestedSize > 0 ? requestedSize : ImageThumbConverter.DEFAULT_THUMBNAIL_SIZE);
if (isAuxiliaryObjectCached(storageIO, auxiliaryTag)) {
auxiliaryType = ImageThumbConverter.THUMBNAIL_MIME_TYPE;
String fileName = storageIO.getFileName();
if (fileName != null) {
auxiliaryFileName = fileName.replaceAll("\\.[^\\.]*$", ImageThumbConverter.THUMBNAIL_FILE_EXTENSION);
}
} else {
redirectSupported = false;
}
} else if (di.getAuxiliaryFile() != null) {
// We should support redirects to auxiliary files too.
auxiliaryTag = di.getAuxiliaryFile().getFormatTag();
String auxVersion = di.getAuxiliaryFile().getFormatVersion();
if (auxVersion != null) {
auxiliaryTag = auxiliaryTag + "_" + auxVersion;
}
if (isAuxiliaryObjectCached(storageIO, auxiliaryTag)) {
String fileExtension = getFileExtension(di.getAuxiliaryFile());
auxiliaryFileName = storageIO.getFileName() + "." + auxiliaryTag + fileExtension;
auxiliaryType = di.getAuxiliaryFile().getContentType();
} else {
redirectSupported = false;
}
} else if (dataFile.isTabularData()) {
// Many separate special cases here.
if (di.getConversionParam() != null) {
if (di.getConversionParam().equals("format")) {
if ("original".equals(di.getConversionParamValue())) {
auxiliaryTag = StoredOriginalFile.SAVED_ORIGINAL_FILENAME_EXTENSION;
auxiliaryType = dataFile.getOriginalFileFormat();
auxiliaryFileName = dataFile.getOriginalFileName();
} else {
// format conversions - can redirect, but only if
// it has been cached already.
auxiliaryTag = di.getConversionParamValue();
if (isAuxiliaryObjectCached(storageIO, auxiliaryTag)) {
auxiliaryType = di.getServiceFormatType(di.getConversionParam(), auxiliaryTag);
auxiliaryFileName = FileUtil.replaceExtension(storageIO.getFileName(), auxiliaryTag);
} else {
redirectSupported = false;
}
}
} else if (!di.getConversionParam().equals("noVarHeader")) {
// This is a subset request - can't do.
redirectSupported = false;
}
} else {
redirectSupported = false;
}
}
if (redirectSupported) {
// definitely close the (still open) S3 input stream,
// since we are not going to use it. The S3 documentation
// emphasizes that it is very important not to leave these
// lying around un-closed, since they are going to fill
// up the S3 connection pool!
try {
storageIO.getInputStream().close();
} catch (IOException ioex) {
}
// [attempt to] redirect:
String redirect_url_str;
try {
redirect_url_str = ((S3AccessIO) storageIO).generateTemporaryS3Url(auxiliaryTag, auxiliaryType, auxiliaryFileName);
} catch (IOException ioex) {
redirect_url_str = null;
}
if (redirect_url_str == null) {
throw new ServiceUnavailableException();
}
logger.fine("Data Access API: direct S3 url: " + redirect_url_str);
URI redirect_uri;
try {
redirect_uri = new URI(redirect_url_str);
} catch (URISyntaxException ex) {
logger.info("Data Access API: failed to create S3 redirect url (" + redirect_url_str + ")");
redirect_uri = null;
}
if (redirect_uri != null) {
// increment the download count, if necessary:
if (di.getGbr() != null && !(isThumbnailDownload(di) || isPreprocessedMetadataDownload(di))) {
try {
logger.fine("writing guestbook response, for an S3 download redirect.");
Command<?> cmd = new CreateGuestbookResponseCommand(di.getDataverseRequestService().getDataverseRequest(), di.getGbr(), di.getGbr().getDataFile().getOwner());
di.getCommand().submit(cmd);
MakeDataCountEntry entry = new MakeDataCountEntry(di.getRequestUriInfo(), di.getRequestHttpHeaders(), di.getDataverseRequestService(), di.getGbr().getDataFile());
mdcLogService.logEntry(entry);
} catch (CommandException e) {
}
}
// finally, issue the redirect:
Response response = Response.seeOther(redirect_uri).build();
logger.fine("Issuing redirect to the file location on S3.");
throw new RedirectionException(response);
}
throw new ServiceUnavailableException();
}
}
if (di.getConversionParam() != null) {
// Image Thumbnail and Tabular data conversion:
// NOTE: only supported on local files, as of 4.0.2!
// NOTE: should be supported on all files for which StorageIO drivers
// are available (but not on harvested files1) -- L.A. 4.6.2
if (di.getConversionParam().equals("imageThumb") && !dataFile.isHarvested()) {
if ("".equals(di.getConversionParamValue())) {
storageIO = ImageThumbConverter.getImageThumbnailAsInputStream(storageIO, ImageThumbConverter.DEFAULT_THUMBNAIL_SIZE);
} else {
try {
int size = new Integer(di.getConversionParamValue());
if (size > 0) {
storageIO = ImageThumbConverter.getImageThumbnailAsInputStream(storageIO, size);
}
} catch (java.lang.NumberFormatException ex) {
storageIO = ImageThumbConverter.getImageThumbnailAsInputStream(storageIO, ImageThumbConverter.DEFAULT_THUMBNAIL_SIZE);
}
// and, since we now have tabular data files that can
// have thumbnail previews... obviously, we don't want to
// add the variable header to the image stream!
storageIO.setNoVarHeader(Boolean.TRUE);
storageIO.setVarHeader(null);
}
} else if (dataFile.isTabularData()) {
logger.fine("request for tabular data download;");
// We can now generate thumbnails for some tabular data files (specifically,
// tab files tagged as "geospatial"). We are going to assume that you can
// do only ONE thing at a time - request the thumbnail for the file, or
// request any tabular-specific services.
if (di.getConversionParam().equals("noVarHeader")) {
logger.fine("tabular data with no var header requested");
storageIO.setNoVarHeader(Boolean.TRUE);
storageIO.setVarHeader(null);
} else if (di.getConversionParam().equals("format")) {
// Conversions, and downloads of "stored originals" are
// now supported on all DataFiles for which StorageIO
// access drivers are available.
if ("original".equals(di.getConversionParamValue())) {
logger.fine("stored original of an ingested file requested");
storageIO = StoredOriginalFile.retreive(storageIO);
} else {
// Other format conversions:
logger.fine("format conversion on a tabular file requested (" + di.getConversionParamValue() + ")");
String requestedMimeType = di.getServiceFormatType(di.getConversionParam(), di.getConversionParamValue());
if (requestedMimeType == null) {
// default mime type, in case real type is unknown;
// (this shouldn't happen in real life - but just in case):
requestedMimeType = "application/octet-stream";
}
storageIO
= DataConverter.performFormatConversion(dataFile,
storageIO,
di.getConversionParamValue(), requestedMimeType);
}
} else if (di.getConversionParam().equals("subset")) {
logger.fine("processing subset request.");
// TODO:
// If there are parameters on the list that are
// not valid variable ids, or if the do not belong to
// the datafile referenced - I simply skip them;
// perhaps I should throw an invalid argument exception
// instead.
if (di.getExtraArguments() != null && di.getExtraArguments().size() > 0) {
logger.fine("processing extra arguments list of length " + di.getExtraArguments().size());
List<Integer> variablePositionIndex = new ArrayList<>();
String subsetVariableHeader = null;
for (int i = 0; i < di.getExtraArguments().size(); i++) {
DataVariable variable = (DataVariable) di.getExtraArguments().get(i);
if (variable != null) {
if (variable.getDataTable().getDataFile().getId().equals(dataFile.getId())) {
logger.fine("adding variable id " + variable.getId() + " to the list.");
variablePositionIndex.add(variable.getFileOrder());
if (subsetVariableHeader == null) {
subsetVariableHeader = variable.getName();
} else {
subsetVariableHeader = subsetVariableHeader.concat("\t");
subsetVariableHeader = subsetVariableHeader.concat(variable.getName());
}
} else {
logger.warning("variable does not belong to this data file.");
}
}
}
if (variablePositionIndex.size() > 0) {
try {
File tempSubsetFile = File.createTempFile("tempSubsetFile", ".tmp");
TabularSubsetGenerator tabularSubsetGenerator = new TabularSubsetGenerator();
tabularSubsetGenerator.subsetFile(storageIO.getInputStream(), tempSubsetFile.getAbsolutePath(), variablePositionIndex, dataFile.getDataTable().getCaseQuantity(), "\t");
if (tempSubsetFile.exists()) {
FileInputStream subsetStream = new FileInputStream(tempSubsetFile);
long subsetSize = tempSubsetFile.length();
InputStreamIO subsetStreamIO = new InputStreamIO(subsetStream, subsetSize);
logger.fine("successfully created subset output stream.");
subsetVariableHeader = subsetVariableHeader.concat("\n");
subsetStreamIO.setVarHeader(subsetVariableHeader);
String tabularFileName = storageIO.getFileName();
if (tabularFileName != null && tabularFileName.endsWith(".tab")) {
tabularFileName = tabularFileName.replaceAll("\\.tab$", "-subset.tab");
} else if (tabularFileName != null && !"".equals(tabularFileName)) {
tabularFileName = tabularFileName.concat("-subset.tab");
} else {
tabularFileName = "subset.tab";
}
subsetStreamIO.setFileName(tabularFileName);
subsetStreamIO.setMimeType(storageIO.getMimeType());
storageIO = subsetStreamIO;
} else {
storageIO = null;
}
} catch (IOException ioex) {
storageIO = null;
}
}
} else {
logger.fine("empty list of extra arguments.");
}
}
}
if (storageIO == null) {
//throw new WebApplicationException(Response.Status.SERVICE_UNAVAILABLE);
// 404/not found may be a better return code option here
// (similarly to what the Access API returns when a thumbnail is requested on a text file, etc.)
throw new NotFoundException("datafile access error: requested optional service (image scaling, format conversion, etc.) could not be performed on this datafile.");
}
} else if (di.getAuxiliaryFile() != null) {
// Make sure to close the InputStream for the main datafile:
try {
storageIO.getInputStream().close();
} catch (IOException ioex) {
}
String auxTag = di.getAuxiliaryFile().getFormatTag();
String auxVersion = di.getAuxiliaryFile().getFormatVersion();
if (auxVersion != null) {
auxTag = auxTag + "_" + auxVersion;
}
long auxFileSize = di.getAuxiliaryFile().getFileSize();
InputStreamIO auxStreamIO = new InputStreamIO(storageIO.getAuxFileAsInputStream(auxTag), auxFileSize);
String fileExtension = getFileExtension(di.getAuxiliaryFile());
auxStreamIO.setFileName(storageIO.getFileName() + "." + auxTag + fileExtension);
auxStreamIO.setMimeType(di.getAuxiliaryFile().getContentType());
storageIO = auxStreamIO;
}
try (InputStream instream = storageIO.getInputStream()) {
if (instream != null) {
// headers:
String fileName = storageIO.getFileName();
String mimeType = storageIO.getMimeType();
// Provide both the "Content-disposition" and "Content-Type" headers,
// to satisfy the widest selection of browsers out there.
// Encode the filename as UTF-8, then deal with spaces. "encode" changes
// a space to + so we change it back to a space (%20).
String finalFileName = URLEncoder.encode(fileName, "UTF-8").replaceAll("\\+", "%20");
httpHeaders.add("Content-disposition", "attachment; filename=\"" + finalFileName + "\"");
httpHeaders.add("Content-Type", mimeType + "; name=\"" + finalFileName + "\"");
long contentSize;
// User may have requested a rangeHeader of bytes.
// Ranges are only supported when the size of the content
// stream is known (i.e., it's not a dynamically generated
// stream.
List<Range> ranges = new ArrayList<>();
String rangeHeader = null;
HttpHeaders headers = di.getRequestHttpHeaders();
if (headers != null) {
rangeHeader = headers.getHeaderString("Range");
}
long offset = 0;
long leftToRead = -1L;
// Moving the "left to read" var. here; - since we may need
// to start counting our rangeHeader bytes outside the main .write()
// loop, if it's a tabular file with a header.
if ((contentSize = getContentSize(storageIO)) > 0) {
try {
ranges = getRanges(rangeHeader, contentSize);
} catch (Exception ex) {
logger.fine("Exception caught processing Range header: " + ex.getLocalizedMessage());
throw new ClientErrorException("Error due to Range header: " + ex.getLocalizedMessage(), Response.Status.REQUESTED_RANGE_NOT_SATISFIABLE);
}
if (ranges.isEmpty()) {
logger.fine("Content size (retrieved from the AccessObject): " + contentSize);
httpHeaders.add("Content-Length", contentSize);
} else {
// For now we only support a single rangeHeader.
long rangeContentSize = ranges.get(0).getLength();
logger.fine("Content size (Range header in use): " + rangeContentSize);
httpHeaders.add("Content-Length", rangeContentSize);
offset = ranges.get(0).getStart();
leftToRead = rangeContentSize;
}
} else {
// Content size unknown, must be a dynamically
// generated stream, such as a subsetting request.
// We do NOT want to support rangeHeader requests on such streams:
if (rangeHeader != null) {
throw new NotFoundException("Range headers are not supported on dynamically-generated content, such as tabular subsetting.");
}
}
// (the httpHeaders map must be modified *before* writing any
// data in the output stream!)
int bufsize;
byte[] bffr = new byte[4 * 8192];
// Before writing out any bytes from the input stream, write
// any extra content, such as the variable header for the
// subsettable files:
if (storageIO.getVarHeader() != null) {
logger.fine("storageIO.getVarHeader().getBytes().length: " + storageIO.getVarHeader().getBytes().length);
if (storageIO.getVarHeader().getBytes().length > 0) {
// If a rangeHeader is not being requested, let's call that the normal case.
// Write the entire line of variable headers. Later, the rest of the file
// will be written.
if (ranges.isEmpty()) {
logger.fine("writing the entire variable header");
outstream.write(storageIO.getVarHeader().getBytes());
} else {
// Range requested. Since the output stream of a
// tabular file is made up of the varHeader and the body of
// the physical file, we should assume that the requested
// rangeHeader may span any portion of the combined stream.
// Thus we may or may not have to write the header, or a
// portion thereof.
int headerLength = storageIO.getVarHeader().getBytes().length;
if (offset >= headerLength) {
// We can skip the entire header.
// All we need to do is adjust the byte offset
// in the physical file; the number of bytes
// left to write stays unchanged, since we haven't
// written anything.
logger.fine("Skipping the variable header completely.");
offset -= headerLength;
} else {
// We need to write some portion of the header;
// Once we are done, we may or may not still have
// some bytes left to write from the main physical file.
if (offset + leftToRead <= headerLength) {
// This is a more straightforward case - we just need to
// write a portion of the header, and then we are done!
logger.fine("Writing this many bytes of the variable header line: " + leftToRead);
outstream.write(Arrays.copyOfRange(storageIO.getVarHeader().getBytes(), (int)offset, (int)offset + (int)leftToRead));
// set "left to read" to zero, indicating that we are done:
leftToRead = 0;
} else {
// write the requested portion of the header:
logger.fine("Writing this many bytes of the variable header line: " + (headerLength - offset));
outstream.write(Arrays.copyOfRange(storageIO.getVarHeader().getBytes(), (int)offset, headerLength));
// and adjust the file offset and remaining number of bytes accordingly:
leftToRead -= (headerLength - offset);
offset = 0;
}
}
}
}
}
// Dynamic streams, etc. Normal operation. No leftToRead.
if (ranges.isEmpty()) {
logger.fine("Normal, non-range request of file id " + dataFile.getId());
while ((bufsize = instream.read(bffr)) != -1) {
outstream.write(bffr, 0, bufsize);
}
} else if (leftToRead > 0) {
// This is a rangeHeader request, and we still have bytes to read
// (for a tabular file, we may have already written enough
// bytes from the variable header!)
storageIO.setOffset(offset);
// Thinking about it, we could just do instream.skip(offset)
// here... But I would like to have this offset functionality
// in StorageIO, for any future cases where we may not
// be able to do that on the stream directly (?) -- L.A.
logger.fine("Range request of file id " + dataFile.getId());
// Read a rangeHeader of bytes instead of the whole file. We'll count down as we write.
// For now we only support a single rangeHeader.
while ((bufsize = instream.read(bffr)) != -1) {
if ((leftToRead -= bufsize) > 0) {
// Just do a normal write. Potentially lots to go. Don't break.
outstream.write(bffr, 0, bufsize);
} else {
// Get those last bytes or bytes equal to bufsize. Last one. Then break.
outstream.write(bffr, 0, (int) leftToRead + bufsize);
break;
}
}
}
logger.fine("di conversion param: " + di.getConversionParam() + ", value: " + di.getConversionParamValue());
// Downloads of thumbnail images (scaled down, low-res versions of graphic image files) and
// "preprocessed metadata" records for tabular data files are NOT considered "real" downloads,
// so these should not produce guestbook entries:
if (di.getGbr() != null && !(isThumbnailDownload(di) || isPreprocessedMetadataDownload(di))) {
try {
logger.fine("writing guestbook response.");
Command<?> cmd = new CreateGuestbookResponseCommand(di.getDataverseRequestService().getDataverseRequest(), di.getGbr(), di.getGbr().getDataFile().getOwner());
di.getCommand().submit(cmd);
MakeDataCountEntry entry = new MakeDataCountEntry(di.getRequestUriInfo(), di.getRequestHttpHeaders(), di.getDataverseRequestService(), di.getGbr().getDataFile());
mdcLogService.logEntry(entry);
} catch (CommandException e) {
}
} else {
logger.fine("not writing guestbook response");
}
outstream.close();
return;
}
}
}
}
throw new NotFoundException();
}
private boolean isAuxiliaryObjectCached(StorageIO storageIO, String auxiliaryTag) {
try {
return storageIO.isAuxObjectCached(auxiliaryTag);
} catch (IOException cachedIOE) {
return false;
}
}
private String getFileExtension(AuxiliaryFile auxFile) {
String fileExtension = "";
if (auxFile == null) {
return fileExtension;
}
String contentType = auxFile.getContentType();
if (contentType != null) {
MimeTypes allTypes = MimeTypes.getDefaultMimeTypes();
try {
MimeType mimeType = allTypes.forName(contentType);
fileExtension = mimeType.getExtension();
} catch (MimeTypeException ex) {
}
}
return fileExtension;
}
private boolean isThumbnailDownload(DownloadInstance downloadInstance) {
if (downloadInstance == null) {
return false;
}
if (downloadInstance.getConversionParam() == null) {
return false;
}
return downloadInstance.getConversionParam().equals("imageThumb");
}
private boolean isPreprocessedMetadataDownload(DownloadInstance downloadInstance) {
if (downloadInstance == null) {
return false;
}
if (downloadInstance.getConversionParam() == null) {
return false;
}
if (downloadInstance.getConversionParamValue() == null) {
return false;
}
return downloadInstance.getConversionParam().equals("format") && downloadInstance.getConversionParamValue().equals("prep");
}
private long getContentSize(StorageIO<?> accessObject) {
long contentSize = 0;
if (accessObject.getSize() > -1) {
contentSize += accessObject.getSize();
if (accessObject.getVarHeader() != null) {
if (accessObject.getVarHeader().getBytes().length > 0) {
contentSize += accessObject.getVarHeader().getBytes().length;
}
}
return contentSize;
}
return -1;
}
private long getFileSize(DownloadInstance di) {
return getFileSize(di, null);
}
private long getFileSize(DownloadInstance di, String extraHeader) {
if (di.getDownloadInfo() != null && di.getDownloadInfo().getDataFile() != null) {
DataFile df = di.getDownloadInfo().getDataFile();
// For non-tabular files, we probably know the file size:
// (except for when this is a thumbNail rquest on an image file -
// because the size will obviously be different... can still be
// figured out - but perhaps we shouldn't bother; since thumbnails
// are essentially guaranteed to be small)
if (!df.isTabularData() && (di.getConversionParam() == null || "".equals(di.getConversionParam()))) {
if (df.getFilesize() > 0) {
return df.getFilesize();
}
}
// For Tabular files:
// If it's just a straight file download, it's pretty easy - we
// already know the size of the file on disk (just like in the
// fragment above); we just need to make sure if we are also supplying
// the additional variable name header - then we need to add its
// size to the total... But the cases when it's a format conversion
// and, especially, subsets are of course trickier. (these are not
// supported yet).
if (df.isTabularData() && (di.getConversionParam() == null || "".equals(di.getConversionParam()))) {
long fileSize = df.getFilesize();
if (fileSize > 0) {
if (extraHeader != null) {
fileSize += extraHeader.getBytes().length;
}
return fileSize;
}
}
}
return -1;
}
/**
* @param range "bytes 0-10" for example. Found in the "Range" HTTP header.
* @param fileSize File size in bytes.
* @throws RunTimeException on any problems processing the Range header.
*/
public List<Range> getRanges(String range, long fileSize) {
// Inspired by https://gist.github.com/davinkevin/b97e39d7ce89198774b4
// via https://stackoverflow.com/questions/28427339/how-to-implement-http-byte-rangeHeader-requests-in-spring-mvc/28479001#28479001
List<Range> ranges = new ArrayList<>();
if (range != null) {
logger.fine("Range header supplied: " + range);
// Technically this regex supports multiple ranges.
// Below we have a check to enforce a single range.
if (!range.matches("^bytes=\\d*-\\d*(,\\d*-\\d*)*$")) {
throw new RuntimeException("The format is bytes=<range-start>-<range-end> where start and end are optional.");
}
// The 6 is to remove "bytes="
String[] parts = range.substring(6).split(",");
if (parts.length > 1) {
// Only allow a single range.
throw new RuntimeException("Only one range is allowed.");
}
// This loop is here in case we ever want to support multiple ranges.
for (String part : parts) {
long start = getRangeStart(part);
long end = getRangeEnd(part);
if (start == -1) {
// start does not exist. Base start off of how many bytes from end.
start = fileSize - end;
end = fileSize - 1;
} else if (end == -1 || end > fileSize - 1) {
// Set end when it doesn't exist.
// Also, automatically set end to size of file if end is beyond
// the file size (rather than throwing an error).
end = fileSize - 1;
}
if (start > end) {
throw new RuntimeException("Start is larger than end or size of file.");
}
ranges.add(new Range(start, end));
}
}
return ranges;
}
/**
* @return Return a positive long or -1 if start does not exist.
*/
public static long getRangeStart(String part) {
// Get everything before the "-".
String start = part.substring(0, part.indexOf("-"));
return (start.length() > 0) ? Long.parseLong(start) : -1;
}
/**
* @return Return a positive long or -1 if end does not exist.
*/
public static long getRangeEnd(String part) {
// Get everything after the "-".
String end = part.substring(part.indexOf("-") + 1, part.length());
return (end.length() > 0) ? Long.parseLong(end) : -1;
}
}
| 1 | 45,579 | FWIW: We have FileUtil.generateOriginalExtension for tabular files but it uses a hardcoded list rather than the properties file. | IQSS-dataverse | java |
@@ -297,6 +297,9 @@ func StartDataplaneDriver(configParams *config.Config,
InterfaceName: configParams.WireguardInterfaceName,
MTU: configParams.WireguardMTU,
RouteSource: configParams.RouteSource,
+ // currently, AKS *must* encrypt hostnetwork traffic, while it is optional on EKS
+ HostEncryptionEnabled: configParams.KubernetesProvider() == config.ProviderAKS ||
+ (configParams.WireguardHostEncryptionEnabled && configParams.KubernetesProvider() == config.ProviderEKS),
},
IPIPMTU: configParams.IpInIpMtu,
VXLANMTU: configParams.VXLANMTU, | 1 | // Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package dataplane
import (
"math/bits"
"net"
"net/http"
"os/exec"
"runtime/debug"
"strconv"
"time"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/kubernetes"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/felix/aws"
"github.com/projectcalico/felix/bpf"
"github.com/projectcalico/felix/bpf/conntrack"
"github.com/projectcalico/felix/bpf/tc"
"github.com/projectcalico/felix/config"
extdataplane "github.com/projectcalico/felix/dataplane/external"
"github.com/projectcalico/felix/dataplane/inactive"
intdataplane "github.com/projectcalico/felix/dataplane/linux"
"github.com/projectcalico/felix/idalloc"
"github.com/projectcalico/felix/ifacemonitor"
"github.com/projectcalico/felix/ipsets"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/markbits"
"github.com/projectcalico/felix/rules"
"github.com/projectcalico/felix/wireguard"
"github.com/projectcalico/libcalico-go/lib/health"
)
func StartDataplaneDriver(configParams *config.Config,
healthAggregator *health.HealthAggregator,
configChangedRestartCallback func(),
fatalErrorCallback func(error),
k8sClientSet *kubernetes.Clientset) (DataplaneDriver, *exec.Cmd) {
if !configParams.IsLeader() {
// Return an inactive dataplane, since we're not the leader.
log.Info("Not the leader, using an inactive dataplane")
return &inactive.InactiveDataplane{}, nil
}
if configParams.UseInternalDataplaneDriver {
log.Info("Using internal (linux) dataplane driver.")
// If kube ipvs interface is present, enable ipvs support. In BPF mode, we bypass kube-proxy so IPVS
// is irrelevant.
kubeIPVSSupportEnabled := false
if ifacemonitor.IsInterfacePresent(intdataplane.KubeIPVSInterface) {
if configParams.BPFEnabled {
log.Info("kube-proxy IPVS device found but we're in BPF mode, ignoring.")
} else {
kubeIPVSSupportEnabled = true
log.Info("Kube-proxy in ipvs mode, enabling felix kube-proxy ipvs support.")
}
}
if configChangedRestartCallback == nil || fatalErrorCallback == nil {
log.Panic("Starting dataplane with nil callback func.")
}
allowedMarkBits := configParams.IptablesMarkMask
if configParams.BPFEnabled {
// In BPF mode, the BPF programs use mark bits that are not configurable. Make sure that those
// bits are covered by our allowed mask.
if allowedMarkBits&tc.MarksMask != tc.MarksMask {
log.WithFields(log.Fields{
"Name": "felix-iptables",
"MarkMask": allowedMarkBits,
"RequiredBPFBits": tc.MarksMask,
}).Panic("IptablesMarkMask doesn't cover bits that are used (unconditionally) by eBPF mode.")
}
allowedMarkBits ^= allowedMarkBits & tc.MarksMask
log.WithField("updatedBits", allowedMarkBits).Info(
"Removed BPF program bits from available mark bits.")
}
markBitsManager := markbits.NewMarkBitsManager(allowedMarkBits, "felix-iptables")
// Allocate mark bits; only the accept, scratch-0 and Wireguard bits are used in BPF mode so we
// avoid allocating the others to minimize the number of bits in use.
// The accept bit is a long-lived bit used to communicate between chains.
var markAccept, markPass, markScratch0, markScratch1, markWireguard, markEndpointNonCaliEndpoint uint32
markAccept, _ = markBitsManager.NextSingleBitMark()
if !configParams.BPFEnabled {
// The pass bit is used to communicate from a policy chain up to the endpoint chain.
markPass, _ = markBitsManager.NextSingleBitMark()
}
// Scratch bits are short-lived bits used for calculating multi-rule results.
markScratch0, _ = markBitsManager.NextSingleBitMark()
if !configParams.BPFEnabled {
markScratch1, _ = markBitsManager.NextSingleBitMark()
}
if configParams.WireguardEnabled {
log.Info("Wireguard enabled, allocating a mark bit")
markWireguard, _ = markBitsManager.NextSingleBitMark()
if markWireguard == 0 {
log.WithFields(log.Fields{
"Name": "felix-iptables",
"MarkMask": allowedMarkBits,
}).Panic("Failed to allocate a mark bit for wireguard, not enough mark bits available.")
}
}
// markPass and the scratch-1 bits are only used in iptables mode.
if markAccept == 0 || markScratch0 == 0 || !configParams.BPFEnabled && (markPass == 0 || markScratch1 == 0) {
log.WithFields(log.Fields{
"Name": "felix-iptables",
"MarkMask": allowedMarkBits,
}).Panic("Not enough mark bits available.")
}
// Mark bits for endpoint mark. Currently Felix takes the rest bits from mask available for use.
markEndpointMark, allocated := markBitsManager.NextBlockBitsMark(markBitsManager.AvailableMarkBitCount())
if kubeIPVSSupportEnabled {
if allocated == 0 {
log.WithFields(log.Fields{
"Name": "felix-iptables",
"MarkMask": allowedMarkBits,
}).Panic("Not enough mark bits available for endpoint mark.")
}
// Take lowest bit position (position 1) from endpoint mark mask reserved for non-calico endpoint.
markEndpointNonCaliEndpoint = uint32(1) << uint(bits.TrailingZeros32(markEndpointMark))
}
log.WithFields(log.Fields{
"acceptMark": markAccept,
"passMark": markPass,
"scratch0Mark": markScratch0,
"scratch1Mark": markScratch1,
"endpointMark": markEndpointMark,
"endpointMarkNonCali": markEndpointNonCaliEndpoint,
}).Info("Calculated iptables mark bits")
// Create a routing table manager. There are certain components that should take specific indices in the range
// to simplify table tidy-up.
routeTableIndexAllocator := idalloc.NewIndexAllocator(configParams.RouteTableRange)
// Always allocate the wireguard table index (even when not enabled). This ensures we can tidy up entries
// if wireguard is disabled after being previously enabled.
var wireguardEnabled bool
var wireguardTableIndex int
if idx, err := routeTableIndexAllocator.GrabIndex(); err == nil {
log.Debugf("Assigned wireguard table index: %d", idx)
wireguardEnabled = configParams.WireguardEnabled
wireguardTableIndex = idx
} else {
log.WithError(err).Warning("Unable to assign table index for wireguard")
}
// If wireguard is enabled, update the failsafe ports to include the wireguard port.
failsafeInboundHostPorts := configParams.FailsafeInboundHostPorts
failsafeOutboundHostPorts := configParams.FailsafeOutboundHostPorts
if configParams.WireguardEnabled {
var found = false
for _, i := range failsafeInboundHostPorts {
if i.Port == uint16(configParams.WireguardListeningPort) && i.Protocol == "udp" {
log.WithFields(log.Fields{
"net": i.Net,
"port": i.Port,
"protocol": i.Protocol,
}).Debug("FailsafeInboundHostPorts is already configured for wireguard")
found = true
break
}
}
if !found {
failsafeInboundHostPorts = make([]config.ProtoPort, len(configParams.FailsafeInboundHostPorts)+1)
copy(failsafeInboundHostPorts, configParams.FailsafeInboundHostPorts)
log.Debug("Adding permissive FailsafeInboundHostPorts for wireguard")
failsafeInboundHostPorts[len(configParams.FailsafeInboundHostPorts)] = config.ProtoPort{
Port: uint16(configParams.WireguardListeningPort),
Protocol: "udp",
}
}
found = false
for _, i := range failsafeOutboundHostPorts {
if i.Port == uint16(configParams.WireguardListeningPort) && i.Protocol == "udp" {
log.WithFields(log.Fields{
"net": i.Net,
"port": i.Port,
"protocol": i.Protocol,
}).Debug("FailsafeOutboundHostPorts is already configured for wireguard")
found = true
break
}
}
if !found {
failsafeOutboundHostPorts = make([]config.ProtoPort, len(configParams.FailsafeOutboundHostPorts)+1)
copy(failsafeOutboundHostPorts, configParams.FailsafeOutboundHostPorts)
log.Debug("Adding permissive FailsafeOutboundHostPorts for wireguard")
failsafeOutboundHostPorts[len(configParams.FailsafeOutboundHostPorts)] = config.ProtoPort{
Port: uint16(configParams.WireguardListeningPort),
Protocol: "udp",
}
}
}
dpConfig := intdataplane.Config{
Hostname: configParams.FelixHostname,
IfaceMonitorConfig: ifacemonitor.Config{
InterfaceExcludes: configParams.InterfaceExclude,
ResyncInterval: configParams.InterfaceRefreshInterval,
},
RulesConfig: rules.Config{
WorkloadIfacePrefixes: configParams.InterfacePrefixes(),
IPSetConfigV4: ipsets.NewIPVersionConfig(
ipsets.IPFamilyV4,
rules.IPSetNamePrefix,
rules.AllHistoricIPSetNamePrefixes,
rules.LegacyV4IPSetNames,
),
IPSetConfigV6: ipsets.NewIPVersionConfig(
ipsets.IPFamilyV6,
rules.IPSetNamePrefix,
rules.AllHistoricIPSetNamePrefixes,
nil,
),
KubeNodePortRanges: configParams.KubeNodePortRanges,
KubeIPVSSupportEnabled: kubeIPVSSupportEnabled,
OpenStackSpecialCasesEnabled: configParams.OpenstackActive(),
OpenStackMetadataIP: net.ParseIP(configParams.MetadataAddr),
OpenStackMetadataPort: uint16(configParams.MetadataPort),
IptablesMarkAccept: markAccept,
IptablesMarkPass: markPass,
IptablesMarkScratch0: markScratch0,
IptablesMarkScratch1: markScratch1,
IptablesMarkEndpoint: markEndpointMark,
IptablesMarkNonCaliEndpoint: markEndpointNonCaliEndpoint,
VXLANEnabled: configParams.VXLANEnabled,
VXLANPort: configParams.VXLANPort,
VXLANVNI: configParams.VXLANVNI,
IPIPEnabled: configParams.IpInIpEnabled,
IPIPTunnelAddress: configParams.IpInIpTunnelAddr,
VXLANTunnelAddress: configParams.IPv4VXLANTunnelAddr,
AllowVXLANPacketsFromWorkloads: configParams.AllowVXLANPacketsFromWorkloads,
AllowIPIPPacketsFromWorkloads: configParams.AllowIPIPPacketsFromWorkloads,
WireguardEnabled: configParams.WireguardEnabled,
WireguardInterfaceName: configParams.WireguardInterfaceName,
WireguardIptablesMark: markWireguard,
WireguardListeningPort: configParams.WireguardListeningPort,
RouteSource: configParams.RouteSource,
IptablesLogPrefix: configParams.LogPrefix,
EndpointToHostAction: configParams.DefaultEndpointToHostAction,
IptablesFilterAllowAction: configParams.IptablesFilterAllowAction,
IptablesMangleAllowAction: configParams.IptablesMangleAllowAction,
FailsafeInboundHostPorts: failsafeInboundHostPorts,
FailsafeOutboundHostPorts: failsafeOutboundHostPorts,
DisableConntrackInvalid: configParams.DisableConntrackInvalidCheck,
NATPortRange: configParams.NATPortRange,
IptablesNATOutgoingInterfaceFilter: configParams.IptablesNATOutgoingInterfaceFilter,
NATOutgoingAddress: configParams.NATOutgoingAddress,
BPFEnabled: configParams.BPFEnabled,
ServiceLoopPrevention: configParams.ServiceLoopPrevention,
},
Wireguard: wireguard.Config{
Enabled: wireguardEnabled,
ListeningPort: configParams.WireguardListeningPort,
FirewallMark: int(markWireguard),
RoutingRulePriority: configParams.WireguardRoutingRulePriority,
RoutingTableIndex: wireguardTableIndex,
InterfaceName: configParams.WireguardInterfaceName,
MTU: configParams.WireguardMTU,
RouteSource: configParams.RouteSource,
},
IPIPMTU: configParams.IpInIpMtu,
VXLANMTU: configParams.VXLANMTU,
VXLANPort: configParams.VXLANPort,
IptablesBackend: configParams.IptablesBackend,
IptablesRefreshInterval: configParams.IptablesRefreshInterval,
RouteRefreshInterval: configParams.RouteRefreshInterval,
DeviceRouteSourceAddress: configParams.DeviceRouteSourceAddress,
DeviceRouteProtocol: configParams.DeviceRouteProtocol,
RemoveExternalRoutes: configParams.RemoveExternalRoutes,
IPSetsRefreshInterval: configParams.IpsetsRefreshInterval,
IptablesPostWriteCheckInterval: configParams.IptablesPostWriteCheckIntervalSecs,
IptablesInsertMode: configParams.ChainInsertMode,
IptablesLockFilePath: configParams.IptablesLockFilePath,
IptablesLockTimeout: configParams.IptablesLockTimeoutSecs,
IptablesLockProbeInterval: configParams.IptablesLockProbeIntervalMillis,
MaxIPSetSize: configParams.MaxIpsetSize,
IPv6Enabled: configParams.Ipv6Support,
StatusReportingInterval: configParams.ReportingIntervalSecs,
XDPRefreshInterval: configParams.XDPRefreshInterval,
NetlinkTimeout: configParams.NetlinkTimeoutSecs,
ConfigChangedRestartCallback: configChangedRestartCallback,
FatalErrorRestartCallback: fatalErrorCallback,
PostInSyncCallback: func() {
// The initial resync uses a lot of scratch space so now is
// a good time to force a GC and return any RAM that we can.
debug.FreeOSMemory()
if configParams.DebugMemoryProfilePath == "" {
return
}
logutils.DumpHeapMemoryProfile(configParams.DebugMemoryProfilePath)
},
HealthAggregator: healthAggregator,
DebugSimulateDataplaneHangAfter: configParams.DebugSimulateDataplaneHangAfter,
ExternalNodesCidrs: configParams.ExternalNodesCIDRList,
SidecarAccelerationEnabled: configParams.SidecarAccelerationEnabled,
BPFEnabled: configParams.BPFEnabled,
BPFDisableUnprivileged: configParams.BPFDisableUnprivileged,
BPFConnTimeLBEnabled: configParams.BPFConnectTimeLoadBalancingEnabled,
BPFKubeProxyIptablesCleanupEnabled: configParams.BPFKubeProxyIptablesCleanupEnabled,
BPFLogLevel: configParams.BPFLogLevel,
BPFExtToServiceConnmark: configParams.BPFExtToServiceConnmark,
BPFDataIfacePattern: configParams.BPFDataIfacePattern,
BPFCgroupV2: configParams.DebugBPFCgroupV2,
BPFMapRepin: configParams.DebugBPFMapRepinEnabled,
KubeProxyMinSyncPeriod: configParams.BPFKubeProxyMinSyncPeriod,
KubeProxyEndpointSlicesEnabled: configParams.BPFKubeProxyEndpointSlicesEnabled,
XDPEnabled: configParams.XDPEnabled,
XDPAllowGeneric: configParams.GenericXDPEnabled,
BPFConntrackTimeouts: conntrack.DefaultTimeouts(), // FIXME make timeouts configurable
RouteTableManager: routeTableIndexAllocator,
MTUIfacePattern: configParams.MTUIfacePattern,
KubeClientSet: k8sClientSet,
FeatureDetectOverrides: configParams.FeatureDetectOverride,
RouteSource: configParams.RouteSource,
KubernetesProvider: configParams.KubernetesProvider(),
}
if configParams.BPFExternalServiceMode == "dsr" {
dpConfig.BPFNodePortDSREnabled = true
}
intDP := intdataplane.NewIntDataplaneDriver(dpConfig)
intDP.Start()
// Set source-destination-check on AWS EC2 instance.
if configParams.AWSSrcDstCheck != string(apiv3.AWSSrcDstCheckOptionDoNothing) {
c := &clock.RealClock{}
updater := aws.NewEC2SrcDstCheckUpdater()
go aws.WaitForEC2SrcDstCheckUpdate(configParams.AWSSrcDstCheck, healthAggregator, updater, c)
}
return intDP, nil
} else {
log.WithField("driver", configParams.DataplaneDriver).Info(
"Using external dataplane driver.")
return extdataplane.StartExtDataplaneDriver(configParams.DataplaneDriver)
}
}
func SupportsBPF() error {
return bpf.SupportsBPFDataplane()
}
func ServePrometheusMetrics(configParams *config.Config) {
log.WithFields(log.Fields{
"host": configParams.PrometheusMetricsHost,
"port": configParams.PrometheusMetricsPort,
}).Info("Starting prometheus metrics endpoint")
if configParams.PrometheusGoMetricsEnabled && configParams.PrometheusProcessMetricsEnabled && configParams.PrometheusWireGuardMetricsEnabled {
log.Info("Including Golang, Process and WireGuard metrics")
} else {
if !configParams.PrometheusGoMetricsEnabled {
log.Info("Discarding Golang metrics")
prometheus.Unregister(prometheus.NewGoCollector())
}
if !configParams.PrometheusProcessMetricsEnabled {
log.Info("Discarding process metrics")
prometheus.Unregister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
}
if !configParams.PrometheusWireGuardMetricsEnabled {
log.Info("Discarding WireGuard metrics")
prometheus.Unregister(wireguard.MustNewWireguardMetrics())
}
}
http.Handle("/metrics", promhttp.Handler())
addr := net.JoinHostPort(configParams.PrometheusMetricsHost, strconv.Itoa(configParams.PrometheusMetricsPort))
for {
err := http.ListenAndServe(addr, nil)
log.WithError(err).Error(
"Prometheus metrics endpoint failed, trying to restart it...")
time.Sleep(1 * time.Second)
}
}
| 1 | 19,213 | I would consider extracting this logic in to a helper function to make it a bit more readable, but that's just my opinion. | projectcalico-felix | c |
@@ -252,6 +252,7 @@ type Config struct {
DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"`
DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"`
DebugPanicAfter time.Duration `config:"seconds;0"`
+ DebugSimulateDataRace bool `config:"bool;false"`
// Configure where Felix gets its routing information.
// - workloadIPs: use workload endpoints to construct routes. | 1 | // Copyright (c) 2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"net"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
"github.com/projectcalico/libcalico-go/lib/names"
"github.com/projectcalico/libcalico-go/lib/numorstring"
"github.com/projectcalico/felix/idalloc"
)
var (
// RegexpIfaceElemRegexp matches an individual element in the overall interface list;
// assumes the value represents a regular expression and is marked by '/' at the start
// and end and cannot have spaces
RegexpIfaceElemRegexp = regexp.MustCompile(`^\/[^\s]+\/$`)
// NonRegexpIfaceElemRegexp matches an individual element in the overall interface list;
// assumes the value is between 1-15 chars long and only be alphanumeric or - or _
NonRegexpIfaceElemRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}$`)
IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`)
AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`)
HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
StringRegexp = regexp.MustCompile(`^.*$`)
IfaceParamRegexp = regexp.MustCompile(`^[a-zA-Z0-9:._+-]{1,15}$`)
// Hostname have to be valid ipv4, ipv6 or strings up to 64 characters.
HostAddressRegexp = regexp.MustCompile(`^[a-zA-Z0-9:._+-]{1,64}$`)
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
)
// Source of a config value. Values from higher-numbered sources override
// those from lower-numbered sources. Note: some parameters (such as those
// needed to connect to the datastore) can only be set from a local source.
type Source uint8
const (
Default = iota
DatastoreGlobal
DatastorePerHost
ConfigFile
EnvironmentVariable
InternalOverride
)
var SourcesInDescendingOrder = []Source{InternalOverride, EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal}
func (source Source) String() string {
switch source {
case Default:
return "<default>"
case DatastoreGlobal:
return "datastore (global)"
case DatastorePerHost:
return "datastore (per-host)"
case ConfigFile:
return "config file"
case EnvironmentVariable:
return "environment variable"
case InternalOverride:
return "internal override"
}
return fmt.Sprintf("<unknown(%v)>", uint8(source))
}
func (source Source) Local() bool {
switch source {
case Default, ConfigFile, EnvironmentVariable, InternalOverride:
return true
default:
return false
}
}
// Config contains the best, parsed config values loaded from the various sources.
// We use tags to control the parsing and validation.
type Config struct {
// Configuration parameters.
UseInternalDataplaneDriver bool `config:"bool;true"`
DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"`
// Wireguard configuration
WireguardEnabled bool `config:"bool;false"`
WireguardListeningPort int `config:"int;51820"`
WireguardRoutingRulePriority int `config:"int;99"`
WireguardInterfaceName string `config:"iface-param;wireguard.cali;non-zero"`
WireguardMTU int `config:"int;1420;non-zero"`
BPFEnabled bool `config:"bool;false"`
BPFDisableUnprivileged bool `config:"bool;true"`
BPFLogLevel string `config:"oneof(off,info,debug);off;non-zero"`
BPFDataIfacePattern *regexp.Regexp `config:"regexp;^(en.*|eth.*|tunl0$)"`
BPFConnectTimeLoadBalancingEnabled bool `config:"bool;true"`
BPFExternalServiceMode string `config:"oneof(tunnel,dsr);tunnel;non-zero"`
BPFKubeProxyIptablesCleanupEnabled bool `config:"bool;true"`
BPFKubeProxyMinSyncPeriod time.Duration `config:"seconds;1"`
BPFKubeProxyEndpointSlicesEnabled bool `config:"bool;false"`
// DebugBPFCgroupV2 controls the cgroup v2 path that we apply the connect-time load balancer to. Most distros
// are configured for cgroup v1, which prevents all but hte root cgroup v2 from working so this is only useful
// for development right now.
DebugBPFCgroupV2 string `config:"string;;local"`
// DebugBPFMapRepinEnabled can be used to prevent Felix from repinning its BPF maps at startup. This is useful for
// testing with multiple Felix instances running on one host.
DebugBPFMapRepinEnabled bool `config:"bool;true;local"`
DatastoreType string `config:"oneof(kubernetes,etcdv3);etcdv3;non-zero,die-on-fail,local"`
FelixHostname string `config:"hostname;;local,non-zero"`
EtcdAddr string `config:"authority;127.0.0.1:2379;local"`
EtcdScheme string `config:"oneof(http,https);http;local"`
EtcdKeyFile string `config:"file(must-exist);;local"`
EtcdCertFile string `config:"file(must-exist);;local"`
EtcdCaFile string `config:"file(must-exist);;local"`
EtcdEndpoints []string `config:"endpoint-list;;local"`
TyphaAddr string `config:"authority;;local"`
TyphaK8sServiceName string `config:"string;;local"`
TyphaK8sNamespace string `config:"string;kube-system;non-zero,local"`
TyphaReadTimeout time.Duration `config:"seconds;30;local"`
TyphaWriteTimeout time.Duration `config:"seconds;10;local"`
// Client-side TLS config for Felix's communication with Typha. If any of these are
// specified, they _all_ must be - except that either TyphaCN or TyphaURISAN may be left
// unset. Felix will then initiate a secure (TLS) connection to Typha. Typha must present
// a certificate signed by a CA in TyphaCAFile, and with CN matching TyphaCN or URI SAN
// matching TyphaURISAN.
TyphaKeyFile string `config:"file(must-exist);;local"`
TyphaCertFile string `config:"file(must-exist);;local"`
TyphaCAFile string `config:"file(must-exist);;local"`
TyphaCN string `config:"string;;local"`
TyphaURISAN string `config:"string;;local"`
Ipv6Support bool `config:"bool;true"`
IptablesBackend string `config:"oneof(legacy,nft,auto);legacy"`
RouteRefreshInterval time.Duration `config:"seconds;90"`
DeviceRouteSourceAddress net.IP `config:"ipv4;"`
DeviceRouteProtocol int `config:"int;3"`
RemoveExternalRoutes bool `config:"bool;true"`
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"`
IptablesLockFilePath string `config:"file;/run/xtables.lock"`
IptablesLockTimeoutSecs time.Duration `config:"seconds;0"`
IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"`
IpsetsRefreshInterval time.Duration `config:"seconds;10"`
MaxIpsetSize int `config:"int;1048576;non-zero"`
XDPRefreshInterval time.Duration `config:"seconds;90"`
PolicySyncPathPrefix string `config:"file;;"`
NetlinkTimeoutSecs time.Duration `config:"seconds;10"`
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
OpenstackRegion string `config:"region;;die-on-fail"`
InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
InterfaceExclude []*regexp.Regexp `config:"iface-list-regexp;kube-ipvs0"`
ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"`
DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"`
IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
LogPrefix string `config:"string;calico-packet"`
LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"`
LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
VXLANEnabled bool `config:"bool;false"`
VXLANPort int `config:"int;4789"`
VXLANVNI int `config:"int;4096"`
VXLANMTU int `config:"int;1410;non-zero"`
IPv4VXLANTunnelAddr net.IP `config:"ipv4;"`
VXLANTunnelMACAddr string `config:"string;"`
IpInIpEnabled bool `config:"bool;false"`
IpInIpMtu int `config:"int;1440;non-zero"`
IpInIpTunnelAddr net.IP `config:"ipv4;"`
ReportingIntervalSecs time.Duration `config:"seconds;30"`
ReportingTTLSecs time.Duration `config:"seconds;90"`
EndpointReportingEnabled bool `config:"bool;false"`
EndpointReportingDelaySecs time.Duration `config:"seconds;1"`
IptablesMarkMask uint32 `config:"mark-bitmask;0xffff0000;non-zero,die-on-fail"`
DisableConntrackInvalidCheck bool `config:"bool;false"`
HealthEnabled bool `config:"bool;false"`
HealthPort int `config:"int(0,65535);9099"`
HealthHost string `config:"host-address;localhost"`
PrometheusMetricsEnabled bool `config:"bool;false"`
PrometheusMetricsHost string `config:"host-address;"`
PrometheusMetricsPort int `config:"int(0,65535);9091"`
PrometheusGoMetricsEnabled bool `config:"bool;true"`
PrometheusProcessMetricsEnabled bool `config:"bool;true"`
FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68,tcp:179,tcp:2379,tcp:2380,tcp:6443,tcp:6666,tcp:6667;die-on-fail"`
FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:6443,tcp:6666,tcp:6667;die-on-fail"`
KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"`
NATPortRange numorstring.Port `config:"portrange;"`
NATOutgoingAddress net.IP `config:"ipv4;"`
UsageReportingEnabled bool `config:"bool;true"`
UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"`
UsageReportingIntervalSecs time.Duration `config:"seconds;86400"`
ClusterGUID string `config:"string;baddecaf"`
ClusterType string `config:"string;"`
CalicoVersion string `config:"string;"`
ExternalNodesCIDRList []string `config:"cidr-list;;die-on-fail"`
DebugMemoryProfilePath string `config:"file;;"`
DebugCPUProfilePath string `config:"file;/tmp/felix-cpu-<timestamp>.pprof;"`
DebugDisableLogDropping bool `config:"bool;false"`
DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"`
DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"`
DebugPanicAfter time.Duration `config:"seconds;0"`
// Configure where Felix gets its routing information.
// - workloadIPs: use workload endpoints to construct routes.
// - calicoIPAM: use IPAM data to contruct routes.
RouteSource string `config:"oneof(WorkloadIPs,CalicoIPAM);CalicoIPAM"`
RouteTableRange idalloc.IndexRange `config:"route-table-range;1-250;die-on-fail"`
IptablesNATOutgoingInterfaceFilter string `config:"iface-param;"`
SidecarAccelerationEnabled bool `config:"bool;false"`
XDPEnabled bool `config:"bool;true"`
GenericXDPEnabled bool `config:"bool;false"`
// State tracking.
// internalOverrides contains our highest priority config source, generated from internal constraints
// such as kernel version support.
internalOverrides map[string]string
// sourceToRawConfig maps each source to the set of config that was give to us via UpdateFrom.
sourceToRawConfig map[Source]map[string]string
// rawValues maps keys to the current highest-priority raw value.
rawValues map[string]string
// Err holds the most recent error from a config update.
Err error
loadClientConfigFromEnvironment func() (*apiconfig.CalicoAPIConfig, error)
useNodeResourceUpdates bool
}
type ProtoPort struct {
Protocol string
Port uint16
}
// Load parses and merges the rawData from one particular source into this config object.
// If there is a config value already loaded from a higher-priority source, then
// the new value will be ignored (after validation).
func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) {
log.Infof("Merging in config from %v: %v", source, rawData)
// Defensively take a copy of the raw data, in case we've been handed
// a mutable map by mistake.
rawDataCopy := make(map[string]string)
for k, v := range rawData {
if v == "" {
log.WithFields(log.Fields{
"name": k,
"source": source,
}).Info("Ignoring empty configuration parameter. Use value 'none' if " +
"your intention is to explicitly disable the default value.")
continue
}
rawDataCopy[k] = v
}
config.sourceToRawConfig[source] = rawDataCopy
changed, err = config.resolve()
return
}
func (c *Config) InterfacePrefixes() []string {
return strings.Split(c.InterfacePrefix, ",")
}
func (config *Config) OpenstackActive() bool {
if strings.Contains(strings.ToLower(config.ClusterType), "openstack") {
// OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin
// set this flag.
log.Debug("Cluster type contains OpenStack")
return true
}
// If we get here, either OpenStack isn't present or we're running against an old version
// of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the
// presence of the OpenStack-related parameters.
if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" {
log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active")
return true
}
if config.MetadataPort != 0 && config.MetadataPort != 8775 {
log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active")
return true
}
for _, prefix := range config.InterfacePrefixes() {
if prefix == "tap" {
log.Debug("Interface prefix list contains 'tap', assuming OpenStack")
return true
}
}
log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases")
return false
}
func (config *Config) resolve() (changed bool, err error) {
newRawValues := make(map[string]string)
// Map from lower-case version of name to the highest-priority source found so far.
// We use the lower-case version of the name since we can calculate it both for
// expected and "raw" parameters, which may be used by plugins.
nameToSource := make(map[string]Source)
for _, source := range SourcesInDescendingOrder {
valueLoop:
for rawName, rawValue := range config.sourceToRawConfig[source] {
lowerCaseName := strings.ToLower(rawName)
currentSource := nameToSource[lowerCaseName]
param, ok := knownParams[lowerCaseName]
if !ok {
if source >= currentSource {
// Stash the raw value in case it's useful for an external
// dataplane driver. Use the raw name since the driver may
// want it.
newRawValues[rawName] = rawValue
nameToSource[lowerCaseName] = source
}
log.WithField("raw name", rawName).Info(
"Ignoring unknown config param.")
continue valueLoop
}
metadata := param.GetMetadata()
name := metadata.Name
if metadata.Local && !source.Local() {
log.Warningf("Ignoring local-only configuration for %v from %v",
name, source)
continue valueLoop
}
log.Infof("Parsing value for %v: %v (from %v)",
name, rawValue, source)
var value interface{}
if strings.ToLower(rawValue) == "none" {
// Special case: we allow a value of "none" to force the value to
// the zero value for a field. The zero value often differs from
// the default value. Typically, the zero value means "turn off
// the feature".
if metadata.NonZero {
err = errors.New("non-zero field cannot be set to none")
log.Errorf(
"Failed to parse value for %v: %v from source %v. %v",
name, rawValue, source, err)
config.Err = err
return
}
value = metadata.ZeroValue
log.Infof("Value set to 'none', replacing with zero-value: %#v.",
value)
} else {
value, err = param.Parse(rawValue)
if err != nil {
logCxt := log.WithError(err).WithField("source", source)
if metadata.DieOnParseFailure {
logCxt.Error("Invalid (required) config value.")
config.Err = err
return
} else {
logCxt.WithField("default", metadata.Default).Warn(
"Replacing invalid value with default")
value = metadata.Default
err = nil
}
}
}
log.Infof("Parsed value for %v: %v (from %v)",
name, value, source)
if source < currentSource {
log.Infof("Skipping config value for %v from %v; "+
"already have a value from %v", name,
source, currentSource)
continue
}
field := reflect.ValueOf(config).Elem().FieldByName(name)
field.Set(reflect.ValueOf(value))
newRawValues[name] = rawValue
nameToSource[lowerCaseName] = source
}
}
changed = !reflect.DeepEqual(newRawValues, config.rawValues)
config.rawValues = newRawValues
return
}
func (config *Config) setBy(name string, source Source) bool {
_, set := config.sourceToRawConfig[source][name]
return set
}
func (config *Config) setByConfigFileOrEnvironment(name string) bool {
return config.setBy(name, ConfigFile) || config.setBy(name, EnvironmentVariable)
}
func (config *Config) DatastoreConfig() apiconfig.CalicoAPIConfig {
// We want Felix's datastore connection to be fully configurable using the same
// CALICO_XXX_YYY (or just XXX_YYY) environment variables that work for any libcalico-go
// client - for both the etcdv3 and KDD cases. However, for the etcd case, Felix has for a
// long time supported FELIX_XXXYYY environment variables, and we want those to keep working
// too.
// To achieve that, first build a CalicoAPIConfig using libcalico-go's
// LoadClientConfigFromEnvironment - which means incorporating defaults and CALICO_XXX_YYY
// and XXX_YYY variables.
cfg, err := config.loadClientConfigFromEnvironment()
if err != nil {
log.WithError(err).Panic("Failed to create datastore config")
}
// Now allow FELIX_XXXYYY variables or XxxYyy config file settings to override that, in the
// etcd case. Note that that etcd options are set even if the DatastoreType isn't etcdv3.
// This allows the user to rely the default DatastoreType being etcdv3 and still being able
// to configure the other etcdv3 options. As of the time of this code change, the etcd options
// have no affect if the DatastoreType is not etcdv3.
// Datastore type, either etcdv3 or kubernetes
if config.setByConfigFileOrEnvironment("DatastoreType") {
log.Infof("Overriding DatastoreType from felix config to %s", config.DatastoreType)
if config.DatastoreType == string(apiconfig.EtcdV3) {
cfg.Spec.DatastoreType = apiconfig.EtcdV3
} else if config.DatastoreType == string(apiconfig.Kubernetes) {
cfg.Spec.DatastoreType = apiconfig.Kubernetes
}
}
// Endpoints.
if config.setByConfigFileOrEnvironment("EtcdEndpoints") && len(config.EtcdEndpoints) > 0 {
log.Infof("Overriding EtcdEndpoints from felix config to %s", config.EtcdEndpoints)
cfg.Spec.EtcdEndpoints = strings.Join(config.EtcdEndpoints, ",")
} else if config.setByConfigFileOrEnvironment("EtcdAddr") {
etcdEndpoints := config.EtcdScheme + "://" + config.EtcdAddr
log.Infof("Overriding EtcdEndpoints from felix config to %s", etcdEndpoints)
cfg.Spec.EtcdEndpoints = etcdEndpoints
}
// TLS.
if config.setByConfigFileOrEnvironment("EtcdKeyFile") {
log.Infof("Overriding EtcdKeyFile from felix config to %s", config.EtcdKeyFile)
cfg.Spec.EtcdKeyFile = config.EtcdKeyFile
}
if config.setByConfigFileOrEnvironment("EtcdCertFile") {
log.Infof("Overriding EtcdCertFile from felix config to %s", config.EtcdCertFile)
cfg.Spec.EtcdCertFile = config.EtcdCertFile
}
if config.setByConfigFileOrEnvironment("EtcdCaFile") {
log.Infof("Overriding EtcdCaFile from felix config to %s", config.EtcdCaFile)
cfg.Spec.EtcdCACertFile = config.EtcdCaFile
}
if !(config.IpInIpEnabled || config.VXLANEnabled || config.BPFEnabled) {
// Polling k8s for node updates is expensive (because we get many superfluous
// updates) so disable if we don't need it.
log.Info("Encap disabled, disabling node poll (if KDD is in use).")
cfg.Spec.K8sDisableNodePoll = true
}
return *cfg
}
// Validate() performs cross-field validation.
func (config *Config) Validate() (err error) {
if config.FelixHostname == "" {
err = errors.New("Failed to determine hostname")
}
if config.DatastoreType == "etcdv3" && len(config.EtcdEndpoints) == 0 {
if config.EtcdScheme == "" {
err = errors.New("EtcdEndpoints and EtcdScheme both missing")
}
if config.EtcdAddr == "" {
err = errors.New("EtcdEndpoints and EtcdAddr both missing")
}
}
// If any client-side TLS config parameters are specified, they _all_ must be - except that
// either TyphaCN or TyphaURISAN may be left unset.
if config.TyphaCAFile != "" ||
config.TyphaCertFile != "" ||
config.TyphaKeyFile != "" ||
config.TyphaCN != "" ||
config.TyphaURISAN != "" {
// Some TLS config specified.
if config.TyphaKeyFile == "" ||
config.TyphaCertFile == "" ||
config.TyphaCAFile == "" ||
(config.TyphaCN == "" && config.TyphaURISAN == "") {
err = errors.New("If any Felix-Typha TLS config parameters are specified," +
" they _all_ must be" +
" - except that either TyphaCN or TyphaURISAN may be left unset.")
}
}
if err != nil {
config.Err = err
}
return
}
var knownParams map[string]param
func loadParams() {
knownParams = make(map[string]param)
config := Config{}
kind := reflect.TypeOf(config)
metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` +
`([^;]*)(?:;` +
`([^;]*))?$`)
for ii := 0; ii < kind.NumField(); ii++ {
field := kind.Field(ii)
tag := field.Tag.Get("config")
if tag == "" {
continue
}
captures := metaRegexp.FindStringSubmatch(tag)
if len(captures) == 0 {
log.Panicf("Failed to parse metadata for config param %v", field.Name)
}
log.Debugf("%v: metadata captures: %#v", field.Name, captures)
kind := captures[1] // Type: "int|oneof|bool|port-list|..."
kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https"
defaultStr := captures[3] // Default value e.g "1.0"
flags := captures[4]
var param param
var err error
switch kind {
case "bool":
param = &BoolParam{}
case "int":
min := minInt
max := maxInt
if kindParams != "" {
minAndMax := strings.Split(kindParams, ",")
min, err = strconv.Atoi(minAndMax[0])
if err != nil {
log.Panicf("Failed to parse min value for %v", field.Name)
}
max, err = strconv.Atoi(minAndMax[1])
if err != nil {
log.Panicf("Failed to parse max value for %v", field.Name)
}
}
param = &IntParam{Min: min, Max: max}
case "int32":
param = &Int32Param{}
case "mark-bitmask":
param = &MarkBitmaskParam{}
case "float":
param = &FloatParam{}
case "seconds":
param = &SecondsParam{}
case "millis":
param = &MillisParam{}
case "iface-list":
param = &RegexpParam{Regexp: IfaceListRegexp,
Msg: "invalid Linux interface name"}
case "iface-list-regexp":
param = &RegexpPatternListParam{
NonRegexpElemRegexp: NonRegexpIfaceElemRegexp,
RegexpElemRegexp: RegexpIfaceElemRegexp,
Delimiter: ",",
Msg: "list contains invalid Linux interface name or regex pattern",
}
case "regexp":
param = &RegexpPatternParam{}
case "iface-param":
param = &RegexpParam{Regexp: IfaceParamRegexp,
Msg: "invalid Linux interface parameter"}
case "file":
param = &FileParam{
MustExist: strings.Contains(kindParams, "must-exist"),
Executable: strings.Contains(kindParams, "executable"),
}
case "authority":
param = &RegexpParam{Regexp: AuthorityRegexp,
Msg: "invalid URL authority"}
case "ipv4":
param = &Ipv4Param{}
case "endpoint-list":
param = &EndpointListParam{}
case "port-list":
param = &PortListParam{}
case "portrange":
param = &PortRangeParam{}
case "portrange-list":
param = &PortRangeListParam{}
case "hostname":
param = &RegexpParam{Regexp: HostnameRegexp,
Msg: "invalid hostname"}
case "host-address":
param = &RegexpParam{Regexp: HostAddressRegexp,
Msg: "invalid host address"}
case "region":
param = &RegionParam{}
case "oneof":
options := strings.Split(kindParams, ",")
lowerCaseToCanon := make(map[string]string)
for _, option := range options {
lowerCaseToCanon[strings.ToLower(option)] = option
}
param = &OneofListParam{
lowerCaseOptionsToCanonical: lowerCaseToCanon}
case "string":
param = &RegexpParam{Regexp: StringRegexp,
Msg: "invalid string"}
case "cidr-list":
param = &CIDRListParam{}
case "route-table-range":
param = &RouteTableRangeParam{}
default:
log.Panicf("Unknown type of parameter: %v", kind)
}
metadata := param.GetMetadata()
metadata.Name = field.Name
metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface()
if strings.Contains(flags, "non-zero") {
metadata.NonZero = true
}
if strings.Contains(flags, "die-on-fail") {
metadata.DieOnParseFailure = true
}
if strings.Contains(flags, "local") {
metadata.Local = true
}
if defaultStr != "" {
if strings.Contains(flags, "skip-default-validation") {
metadata.Default = defaultStr
} else {
// Parse the default value and save it in the metadata. Doing
// that here ensures that we syntax-check the defaults now.
defaultVal, err := param.Parse(defaultStr)
if err != nil {
log.Panicf("Invalid default value: %v", err)
}
metadata.Default = defaultVal
}
} else {
metadata.Default = metadata.ZeroValue
}
knownParams[strings.ToLower(field.Name)] = param
}
}
func (config *Config) SetUseNodeResourceUpdates(b bool) {
config.useNodeResourceUpdates = b
}
func (config *Config) UseNodeResourceUpdates() bool {
return config.useNodeResourceUpdates
}
func (config *Config) RawValues() map[string]string {
return config.rawValues
}
func (config *Config) SetLoadClientConfigFromEnvironmentFunction(fnc func() (*apiconfig.CalicoAPIConfig, error)) {
config.loadClientConfigFromEnvironment = fnc
}
// OverrideParam installs a maximum priority parameter override for the given parameter. This is useful for
// disabling features that are found to be unsupported, for example. By using an extra priority class, the
// override will persist even if the host/global config is updated.
func (config *Config) OverrideParam(name, value string) (bool, error) {
config.internalOverrides[name] = value
return config.UpdateFrom(config.internalOverrides, InternalOverride)
}
func New() *Config {
if knownParams == nil {
loadParams()
}
p := &Config{
rawValues: map[string]string{},
sourceToRawConfig: map[Source]map[string]string{},
internalOverrides: map[string]string{},
}
for _, param := range knownParams {
param.setDefault(p)
}
hostname, err := names.Hostname()
if err != nil {
log.Warningf("Failed to get hostname from kernel, "+
"trying HOSTNAME variable: %v", err)
hostname = strings.ToLower(os.Getenv("HOSTNAME"))
}
p.FelixHostname = hostname
p.loadClientConfigFromEnvironment = apiconfig.LoadClientConfigFromEnvironment
return p
}
type param interface {
GetMetadata() *Metadata
Parse(raw string) (result interface{}, err error)
setDefault(*Config)
}
| 1 | 18,007 | Are all fields beginning with "Debug" automatically `;local` ? (I guess so, but just checking.) | projectcalico-felix | go |
@@ -54,6 +54,10 @@ class EvalHook(Hook):
save_best=None,
rule=None,
**eval_kwargs):
+ warnings.warn(
+ 'DeprecationWarning: EvalHook and DistEvalHook is a deprecated, '
+ 'please use "mmcv.runner.EvalHook" or "mmcv.runner.DistEvalHook" '
+ 'instead')
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}') | 1 | import os.path as osp
import warnings
from math import inf
import mmcv
import torch.distributed as dist
from mmcv.runner import Hook
from torch.nn.modules.batchnorm import _BatchNorm
from torch.utils.data import DataLoader
from mmdet.utils import get_root_logger
class EvalHook(Hook):
"""Evaluation hook.
Notes:
If new arguments are added for EvalHook, tools/test.py,
tools/analysis_tools/eval_metric.py may be effected.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
start (int, optional): Evaluation starting epoch. It enables evaluation
before the training starts if ``start`` <= the resuming epoch.
If None, whether to evaluate is merely decided by ``interval``.
Default: None.
interval (int): Evaluation interval (by epochs). Default: 1.
save_best (str, optional): If a metric is specified, it would measure
the best checkpoint during evaluation. The information about best
checkpoint would be save in best.json.
Options are the evaluation metrics to the test dataset. e.g.,
``bbox_mAP``, ``segm_mAP`` for bbox detection and instance
segmentation. ``AR@100`` for proposal recall. If ``save_best`` is
``auto``, the first key will be used. The interval of
``CheckpointHook`` should device EvalHook. Default: None.
rule (str, optional): Comparison rule for best score. If set to None,
it will infer a reasonable rule. Keys such as 'mAP' or 'AR' will
be inferred by 'greater' rule. Keys contain 'loss' will be inferred
by 'less' rule. Options are 'greater', 'less'. Default: None.
**eval_kwargs: Evaluation arguments fed into the evaluate function of
the dataset.
"""
rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y}
init_value_map = {'greater': -inf, 'less': inf}
greater_keys = ['mAP', 'AR']
less_keys = ['loss']
def __init__(self,
dataloader,
start=None,
interval=1,
by_epoch=True,
save_best=None,
rule=None,
**eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}')
if not interval > 0:
raise ValueError(f'interval must be positive, but got {interval}')
if start is not None and start < 0:
warnings.warn(
f'The evaluation start epoch {start} is smaller than 0, '
f'use 0 instead', UserWarning)
start = 0
self.dataloader = dataloader
self.interval = interval
self.by_epoch = by_epoch
self.start = start
assert isinstance(save_best, str) or save_best is None
self.save_best = save_best
self.eval_kwargs = eval_kwargs
self.initial_epoch_flag = True
self.logger = get_root_logger()
if self.save_best is not None:
self._init_rule(rule, self.save_best)
def _init_rule(self, rule, key_indicator):
"""Initialize rule, key_indicator, comparison_func, and best score.
Args:
rule (str | None): Comparison rule for best score.
key_indicator (str | None): Key indicator to determine the
comparison rule.
"""
if rule not in self.rule_map and rule is not None:
raise KeyError(f'rule must be greater, less or None, '
f'but got {rule}.')
if rule is None:
if key_indicator != 'auto':
if any(key in key_indicator for key in self.greater_keys):
rule = 'greater'
elif any(key in key_indicator for key in self.less_keys):
rule = 'less'
else:
raise ValueError(f'Cannot infer the rule for key '
f'{key_indicator}, thus a specific rule '
f'must be specified.')
self.rule = rule
self.key_indicator = key_indicator
if self.rule is not None:
self.compare_func = self.rule_map[self.rule]
def before_run(self, runner):
if self.save_best is not None:
if runner.meta is None:
warnings.warn('runner.meta is None. Creating a empty one.')
runner.meta = dict()
runner.meta.setdefault('hook_msgs', dict())
def before_train_epoch(self, runner):
"""Evaluate the model only at the start of training."""
if not self.initial_epoch_flag:
return
if self.start is not None and runner.epoch >= self.start:
self.after_train_epoch(runner)
self.initial_epoch_flag = False
def evaluation_flag(self, runner):
"""Judge whether to perform_evaluation after this epoch.
Returns:
bool: The flag indicating whether to perform evaluation.
"""
if self.start is None:
if not self.every_n_epochs(runner, self.interval):
# No evaluation during the interval epochs.
return False
elif (runner.epoch + 1) < self.start:
# No evaluation if start is larger than the current epoch.
return False
else:
# Evaluation only at epochs 3, 5, 7... if start==3 and interval==2
if (runner.epoch + 1 - self.start) % self.interval:
return False
return True
def after_train_epoch(self, runner):
if not self.by_epoch or not self.evaluation_flag(runner):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
key_score = self.evaluate(runner, results)
if self.save_best:
self.save_best_checkpoint(runner, key_score)
def after_train_iter(self, runner):
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
key_score = self.evaluate(runner, results)
if self.save_best:
self.save_best_checkpoint(runner, key_score)
def save_best_checkpoint(self, runner, key_score):
best_score = runner.meta['hook_msgs'].get(
'best_score', self.init_value_map[self.rule])
if self.compare_func(key_score, best_score):
best_score = key_score
runner.meta['hook_msgs']['best_score'] = best_score
last_ckpt = runner.meta['hook_msgs']['last_ckpt']
runner.meta['hook_msgs']['best_ckpt'] = last_ckpt
mmcv.symlink(
last_ckpt,
osp.join(runner.work_dir, f'best_{self.key_indicator}.pth'))
time_stamp = runner.epoch + 1 if self.by_epoch else runner.iter + 1
self.logger.info(f'Now best checkpoint is epoch_{time_stamp}.pth.'
f'Best {self.key_indicator} is {best_score:0.4f}')
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
if self.save_best is not None:
if self.key_indicator == 'auto':
# infer from eval_results
self._init_rule(self.rule, list(eval_res.keys())[0])
return eval_res[self.key_indicator]
else:
return None
class DistEvalHook(EvalHook):
"""Distributed evaluation hook.
Notes:
If new arguments are added, tools/test.py may be effected.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
start (int, optional): Evaluation starting epoch. It enables evaluation
before the training starts if ``start`` <= the resuming epoch.
If None, whether to evaluate is merely decided by ``interval``.
Default: None.
interval (int): Evaluation interval (by epochs). Default: 1.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
save_best (str, optional): If a metric is specified, it would measure
the best checkpoint during evaluation. The information about best
checkpoint would be save in best.json.
Options are the evaluation metrics to the test dataset. e.g.,
``bbox_mAP``, ``segm_mAP`` for bbox detection and instance
segmentation. ``AR@100`` for proposal recall. If ``save_best`` is
``auto``, the first key will be used. The interval of
``CheckpointHook`` should device EvalHook. Default: None.
rule (str | None): Comparison rule for best score. If set to None,
it will infer a reasonable rule. Default: 'None'.
broadcast_bn_buffer (bool): Whether to broadcast the
buffer(running_mean and running_var) of rank 0 to other rank
before evaluation. Default: True.
**eval_kwargs: Evaluation arguments fed into the evaluate function of
the dataset.
"""
def __init__(self,
dataloader,
start=None,
interval=1,
by_epoch=True,
tmpdir=None,
gpu_collect=False,
save_best=None,
rule=None,
broadcast_bn_buffer=True,
**eval_kwargs):
super().__init__(
dataloader,
start=start,
interval=interval,
by_epoch=by_epoch,
save_best=save_best,
rule=rule,
**eval_kwargs)
self.broadcast_bn_buffer = broadcast_bn_buffer
self.tmpdir = tmpdir
self.gpu_collect = gpu_collect
def _broadcast_bn_buffer(self, runner):
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
def after_train_epoch(self, runner):
if not self.by_epoch or not self.evaluation_flag(runner):
return
if self.broadcast_bn_buffer:
self._broadcast_bn_buffer(runner)
from mmdet.apis import multi_gpu_test
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
key_score = self.evaluate(runner, results)
if self.save_best:
self.save_best_checkpoint(runner, key_score)
def after_train_iter(self, runner):
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
if self.broadcast_bn_buffer:
self._broadcast_bn_buffer(runner)
from mmdet.apis import multi_gpu_test
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
key_score = self.evaluate(runner, results)
if self.save_best:
self.save_best_checkpoint(runner, key_score)
| 1 | 23,045 | We should let this module inherits from the EvalHook supported in MMCV, and only override the `__init__` function by adding the warnings. The other functions can use those in MMCV. | open-mmlab-mmdetection | py |
@@ -267,14 +267,15 @@ public class PostgreExecutionContext extends JDBCExecutionContext implements DBC
}
}
- void setSessionRole(final DBRProgressMonitor monitor) throws DBCException {
+ private void setSessionRole(@NotNull DBRProgressMonitor monitor) throws DBCException {
final String roleName = getDataSource().getContainer().getConnectionConfiguration().getProviderProperty(PostgreConstants.PROP_CHOSEN_ROLE);
if (CommonUtils.isEmpty(roleName)) {
return;
}
try (JDBCSession session = openSession(monitor, DBCExecutionPurpose.UTIL, "Set active role")) {
try (JDBCStatement dbStat = session.createStatement()) {
- dbStat.executeUpdate("SET ROLE " + roleName);
+ String sql = "SET ROLE " + getDataSource().getSQLDialect().getQuotedIdentifier(roleName, false, true);
+ dbStat.executeUpdate(sql);
}
} catch (SQLException e) {
throw new DBCException(e, this); | 1 | /*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2021 DBeaver Corp and others
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ext.postgresql.model;
import org.jkiss.code.NotNull;
import org.jkiss.code.Nullable;
import org.jkiss.dbeaver.DBException;
import org.jkiss.dbeaver.ext.postgresql.PostgreConstants;
import org.jkiss.dbeaver.model.DBUtils;
import org.jkiss.dbeaver.model.connection.DBPConnectionBootstrap;
import org.jkiss.dbeaver.model.exec.DBCException;
import org.jkiss.dbeaver.model.exec.DBCExecutionContextDefaults;
import org.jkiss.dbeaver.model.exec.DBCExecutionPurpose;
import org.jkiss.dbeaver.model.exec.jdbc.JDBCPreparedStatement;
import org.jkiss.dbeaver.model.exec.jdbc.JDBCResultSet;
import org.jkiss.dbeaver.model.exec.jdbc.JDBCSession;
import org.jkiss.dbeaver.model.exec.jdbc.JDBCStatement;
import org.jkiss.dbeaver.model.impl.jdbc.JDBCExecutionContext;
import org.jkiss.dbeaver.model.impl.jdbc.JDBCUtils;
import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor;
import org.jkiss.dbeaver.model.struct.DBSObject;
import org.jkiss.utils.CommonUtils;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
/**
* PostgreExecutionContext
*/
public class PostgreExecutionContext extends JDBCExecutionContext implements DBCExecutionContextDefaults<PostgreDatabase, PostgreSchema> {
private PostgreSchema activeSchema;
private final List<String> searchPath = new ArrayList<>();
private List<String> defaultSearchPath = new ArrayList<>();
private String activeUser;
private boolean isolatedContext;
public PostgreExecutionContext(@NotNull PostgreDatabase database, String purpose) {
super(database, purpose);
}
@NotNull
@Override
public PostgreDataSource getDataSource() {
return (PostgreDataSource) super.getDataSource();
}
@Nullable
@Override
public PostgreExecutionContext getContextDefaults() {
return this;
}
@Override
public PostgreDatabase getDefaultCatalog() {
return (PostgreDatabase) getOwnerInstance();
}
@Override
public PostgreSchema getDefaultSchema() {
return activeSchema;
}
@Override
public boolean supportsCatalogChange() {
return true;
}
@Override
public boolean supportsSchemaChange() {
return true;
}
@Override
public void setDefaultCatalog(DBRProgressMonitor monitor, PostgreDatabase catalog, PostgreSchema schema) throws DBCException {
setDefaultCatalog(monitor, catalog, schema, false);
}
void setDefaultCatalog(@NotNull DBRProgressMonitor monitor, @NotNull PostgreDatabase catalog, @Nullable PostgreSchema schema, boolean force)
throws DBCException {
try {
catalog.checkInstanceConnection(monitor);
DBSObject oldInstance = getOwnerInstance();
boolean catalogChanged = false, schemaChanged = false;
if (oldInstance != catalog) {
// Changing catalog means reconnect
// Change it only for isolated editor contexts
if (isolatedContext) {
disconnect();
setOwnerInstance(catalog);
connect(monitor, null, null, null, false);
} else {
getDataSource().setActiveDatabase(catalog);
}
catalogChanged = true;
}
if (schema != null) {
if (catalogChanged) {
// Catalog has been changed. Get the new one and change schema there
PostgreDatabase newInstance = getDataSource().getDefaultInstance();
PostgreExecutionContext newContext = (PostgreExecutionContext) newInstance.getDefaultContext(false);
newContext.changeDefaultSchema(monitor, schema, true, force);
} else {
schemaChanged = changeDefaultSchema(monitor, schema, true, force);
}
}
if (catalogChanged || schemaChanged) {
DBUtils.fireObjectSelectionChange(oldInstance, catalog);
}
} catch (DBException e) {
throw new DBCException("Error changing default database", e);
}
}
@Override
public void setDefaultSchema(DBRProgressMonitor monitor, PostgreSchema schema) throws DBCException {
setDefaultCatalog(monitor, schema.getDatabase(), schema, false);
}
boolean changeDefaultSchema(DBRProgressMonitor monitor, PostgreSchema schema, boolean reflect, boolean force) throws DBCException {
PostgreSchema oldActiveSchema = this.activeSchema;
if (oldActiveSchema == schema && !force) {
return false;
}
setSearchPath(monitor, schema);
this.activeSchema = schema;
setSearchPath(schema.getName());
if (reflect) {
DBUtils.fireObjectSelectionChange(oldActiveSchema, activeSchema);
}
return true;
}
@Override
public boolean refreshDefaults(DBRProgressMonitor monitor, boolean useBootstrapSettings) throws DBException {
// Check default active schema
try (JDBCSession session = openSession(monitor, DBCExecutionPurpose.META, "Read context defaults")) {
try (JDBCPreparedStatement stat = session.prepareStatement("SELECT current_schema(),session_user")) {
try (JDBCResultSet rs = stat.executeQuery()) {
if (rs.nextRow()) {
String activeSchemaName = JDBCUtils.safeGetString(rs, 1);
if (!CommonUtils.isEmpty(activeSchemaName)) {
activeSchema = getDefaultCatalog().getSchema(monitor, activeSchemaName);
}
activeUser = JDBCUtils.safeGetString(rs, 2);
}
}
}
String searchPathStr = JDBCUtils.queryString(session, "SHOW search_path");
this.searchPath.clear();
if (searchPathStr != null) {
for (String str : searchPathStr.split(",")) {
str = str.trim();
String spSchema = DBUtils.getUnQuotedIdentifier(getDataSource(), str);
if (!searchPath.contains(spSchema)) {
this.searchPath.add(spSchema);
}
}
if (activeSchema == null) {
// This may happen
for (String schemaName : searchPath) {
activeSchema = getDefaultCatalog().getSchema(monitor, schemaName);
if (activeSchema != null) {
break;
}
}
}
} else {
this.searchPath.add(PostgreConstants.PUBLIC_SCHEMA_NAME);
}
if (defaultSearchPath.isEmpty()) {
defaultSearchPath = new ArrayList<>(searchPath);
}
if (useBootstrapSettings) {
DBPConnectionBootstrap bootstrap = getBootstrapSettings();
String bsSchemaName = bootstrap.getDefaultSchemaName();
if (!CommonUtils.isEmpty(bsSchemaName)) {
setSearchPath(monitor, bsSchemaName);
PostgreSchema bsSchema = getDefaultCatalog().getSchema(monitor, bsSchemaName);
if (bsSchema != null) {
activeSchema = bsSchema;
}
}
}
} catch (SQLException e) {
throw new DBCException(e, this);
}
setSessionRole(monitor);
return true;
}
public String getActiveUser() {
return activeUser;
}
public List<String> getSearchPath() {
return searchPath;
}
List<String> getDefaultSearchPath() {
return defaultSearchPath;
}
private void setSearchPath(DBRProgressMonitor monitor, PostgreSchema schema) throws DBCException {
// Construct search path from current search path but put default schema first
setSearchPath(monitor, schema.getName());
}
private void setSearchPath(DBRProgressMonitor monitor, String defSchemaName) throws DBCException {
List<String> newSearchPath = new ArrayList<>(getDefaultSearchPath());
int schemaIndex = newSearchPath.indexOf(defSchemaName);
/*if (schemaIndex == 0 || (schemaIndex == 1 && isUserFirstInPath(newSearchPath))) {
// Already default schema
return;
} else*/
{
if (schemaIndex > 0) {
// Remove from previous position
newSearchPath.remove(schemaIndex);
}
// Add it first (or after $user)
int newIndex = isUserFirstInPath(newSearchPath) ? 1 : 0;
newSearchPath.add(newIndex, defSchemaName);
}
StringBuilder spString = new StringBuilder();
for (String sp : newSearchPath) {
if (spString.length() > 0) spString.append(",");
spString.append(DBUtils.getQuotedIdentifier(getDataSource(), sp));
}
try (JDBCSession session = openSession(monitor, DBCExecutionPurpose.UTIL, "Change search path")) {
JDBCUtils.executeSQL(session, "SET search_path = " + spString);
} catch (SQLException e) {
throw new DBCException("Error setting search path", e, this);
}
}
private static boolean isUserFirstInPath(List<String> newSearchPath) {
return !newSearchPath.isEmpty() && newSearchPath.get(0).equals("$user");
}
private void setSearchPath(String path) {
searchPath.clear();
searchPath.add(path);
if (!path.equals(activeUser)) {
searchPath.add(activeUser);
}
}
void setSessionRole(final DBRProgressMonitor monitor) throws DBCException {
final String roleName = getDataSource().getContainer().getConnectionConfiguration().getProviderProperty(PostgreConstants.PROP_CHOSEN_ROLE);
if (CommonUtils.isEmpty(roleName)) {
return;
}
try (JDBCSession session = openSession(monitor, DBCExecutionPurpose.UTIL, "Set active role")) {
try (JDBCStatement dbStat = session.createStatement()) {
dbStat.executeUpdate("SET ROLE " + roleName);
}
} catch (SQLException e) {
throw new DBCException(e, this);
}
}
public void setIsolatedContext(boolean isolatedContext) {
this.isolatedContext = isolatedContext;
}
}
| 1 | 11,169 | Let's use prepared statements here instead | dbeaver-dbeaver | java |
@@ -2,7 +2,11 @@
* External dependencies
*/
import { storiesOf } from '@storybook/react';
-import Button from 'GoogleComponents/button';
+
+/**
+ * Internal dependencies
+ */
+import Button from '../assets/js/components/button';
storiesOf( 'Global', module )
.add( 'Buttons', () => { | 1 | /**
* External dependencies
*/
import { storiesOf } from '@storybook/react';
import Button from 'GoogleComponents/button';
storiesOf( 'Global', module )
.add( 'Buttons', () => {
return (
<div>
<p>
<Button>
Default Button
</Button>
</p>
<p>
<Button className="googlesitekit-button--hover">
VRT: Default Button Hover
</Button>
</p>
<p>
<Button
href="http://google.com"
>
Default Button Link
</Button>
</p>
<p>
<Button
href="http://google.com"
danger
>
Danger Button
</Button>
</p>
<p>
<Button
disabled
>
Disabled Button
</Button>
</p>
</div>
);
}, {
options: {
hoverSelector: '.googlesitekit-button--hover',
postInteractionWait: 3000, // Wait for shadows to animate.
onReadyScript: 'mouse.js',
},
} );
| 1 | 27,277 | Just leaving this in one place but thanks so much for re-organising all these imports. | google-site-kit-wp | js |
@@ -43,6 +43,7 @@
#endif
/* List of instrumentation functions. */
+#ifdef X86
#define FUNCTIONS() \
FUNCTION(empty) \
FUNCTION(empty_1arg) \ | 1 | /* *******************************************************************************
* Copyright (c) 2017 ARM Limited. All rights reserved.
* Copyright (c) 2011 Massachusetts Institute of Technology All rights reserved.
* *******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of MIT nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL MIT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Test the clean call inliner. */
#include "dr_api.h"
#include "client_tools.h"
#ifdef WINDOWS
#define BINARY_NAME "client.inline.exe"
#else
#define BINARY_NAME "client.inline"
#endif
/* List of instrumentation functions. */
#define FUNCTIONS() \
FUNCTION(empty) \
FUNCTION(empty_1arg) \
FUNCTION(inscount) \
FUNCTION(gcc47_inscount) \
FUNCTION(callpic_pop) \
FUNCTION(callpic_mov) \
FUNCTION(nonleaf) \
FUNCTION(cond_br) \
FUNCTION(tls_clobber) \
FUNCTION(aflags_clobber) \
FUNCTION(compiler_inscount) \
FUNCTION(bbcount) \
LAST_FUNCTION()
#define TEST_INLINE 1
static dr_emit_flags_t event_basic_block(void *dc, void *tag, instrlist_t *bb,
bool for_trace, bool translating);
static void compiler_inscount(ptr_uint_t count);
#include "cleancall-opt-shared.h"
static void test_inlined_call_args(void *dc, instrlist_t *bb, instr_t *where,
int fn_idx);
static void
fill_scratch(void)
{
void *dc = dr_get_current_drcontext();
int slot;
/* Set slots to 0x000... 0x111... 0x222... etc. */
for (slot = SPILL_SLOT_1; slot <= SPILL_SLOT_MAX; slot++) {
reg_t value = slot * 0x11111111;
dr_write_saved_reg(dc, slot, value);
}
}
static void
check_scratch(void)
{
void *dc = dr_get_current_drcontext();
int slot;
/* Check that slots are 0x000... 0x111... 0x222... etc. */
for (slot = SPILL_SLOT_1; slot <= SPILL_SLOT_MAX; slot++) {
reg_t value = dr_read_saved_reg(dc, slot);
reg_t expected = slot * 0x11111111;
if (value != expected)
dr_fprintf(STDERR, "Client scratch slot clobbered by clean call!\n");
}
}
static void
check_aflags(int actual, int expected)
{
byte ah = (actual >> 8) & 0xFF;
byte al = actual & 0xFF;
byte eh = (expected >> 8) & 0xFF;
byte el = expected & 0xFF;
dr_fprintf(STDERR, "actual: %04x, expected: %04x\n", actual, expected);
DR_ASSERT_MSG(ah == eh, "Aflags clobbered!");
DR_ASSERT_MSG(al == el, "Overflow clobbered!");
dr_fprintf(STDERR, "passed for %04x\n", expected);
}
static instr_t *
test_aflags(void *dc, instrlist_t *bb, instr_t *where, int aflags,
instr_t *before_label, instr_t *after_label)
{
opnd_t xax = opnd_create_reg(DR_REG_XAX);
opnd_t al = opnd_create_reg(DR_REG_AL);
/* Save flags and REG_XAX
* XXX: Assumes we can push to application stack, which happens to be valid
* for this test application.
*
* pushf
* mov [SPILL_SLOT_1], REG_XAX
*/
PRE(bb, where, INSTR_CREATE_pushf(dc));
PRE(bb, where, INSTR_CREATE_mov_st
(dc, dr_reg_spill_slot_opnd(dc, SPILL_SLOT_1), xax));
/* Then populate aflags from XAX:
* mov REG_XAX, aflags
* add al, HEX(7F)
* sahf ah
*/
PRE(bb, where, INSTR_CREATE_mov_imm(dc, xax, OPND_CREATE_INTPTR(aflags)));
PRE(bb, where, INSTR_CREATE_add(dc, al, OPND_CREATE_INT8(0x7F)));
PRE(bb, where, INSTR_CREATE_sahf(dc));
if (before_label != NULL)
PRE(bb, where, before_label);
dr_insert_clean_call(dc, bb, where, func_ptrs[FN_aflags_clobber], false, 0);
if (after_label != NULL)
PRE(bb, where, after_label);
/* Get the flags back into XAX, and then to SPILL_SLOT_2:
* mov REG_XAX, 0
* lahf
* seto al
* mov [SPILL_SLOT_2], REG_XAX
*/
PRE(bb, where, INSTR_CREATE_mov_imm(dc, xax, OPND_CREATE_INTPTR(0)));
PRE(bb, where, INSTR_CREATE_lahf(dc));
PRE(bb, where, INSTR_CREATE_setcc(dc, OP_seto, al));
PRE(bb, where, INSTR_CREATE_mov_st
(dc, dr_reg_spill_slot_opnd(dc, SPILL_SLOT_2), xax));
/* Assert that they match the original flags. */
dr_insert_clean_call(dc, bb, where, (void*)check_aflags, false, 2,
dr_reg_spill_slot_opnd(dc, SPILL_SLOT_2),
OPND_CREATE_INT32(aflags));
/* Restore and flags. */
PRE(bb, where, INSTR_CREATE_mov_ld
(dc, xax, dr_reg_spill_slot_opnd(dc, SPILL_SLOT_1)));
PRE(bb, where, INSTR_CREATE_popf(dc));
return where;
}
static dr_emit_flags_t
event_basic_block(void *dc, void *tag, instrlist_t *bb,
bool for_trace, bool translating)
{
instr_t *entry = instrlist_first(bb);
app_pc entry_pc = instr_get_app_pc(entry);
int i;
bool inline_expected = true;
instr_t *before_label;
instr_t *after_label;
for (i = 0; i < N_FUNCS; i++) {
if (entry_pc == func_app_pcs[i])
break;
}
if (i == N_FUNCS)
return DR_EMIT_DEFAULT;
/* We're inserting a call to a function in this bb. */
func_called[i] = 1;
dr_insert_clean_call(dc, bb, entry, (void*)before_callee, false, 2,
OPND_CREATE_INTPTR(func_ptrs[i]),
OPND_CREATE_INTPTR(func_names[i]));
before_label = INSTR_CREATE_label(dc);
after_label = INSTR_CREATE_label(dc);
switch (i) {
default:
/* Default behavior is to call instrumentation with no-args and
* assert it gets inlined.
*/
PRE(bb, entry, before_label);
dr_insert_clean_call(dc, bb, entry, func_ptrs[i], false, 0);
PRE(bb, entry, after_label);
break;
case FN_empty_1arg:
case FN_inscount:
case FN_gcc47_inscount:
case FN_compiler_inscount:
PRE(bb, entry, before_label);
dr_insert_clean_call(dc, bb, entry, func_ptrs[i], false, 1,
OPND_CREATE_INT32(0xDEAD));
PRE(bb, entry, after_label);
break;
case FN_nonleaf:
case FN_cond_br:
/* These functions cannot be inlined (yet). */
PRE(bb, entry, before_label);
dr_insert_clean_call(dc, bb, entry, func_ptrs[i], false, 0);
PRE(bb, entry, after_label);
inline_expected = false;
break;
case FN_tls_clobber:
dr_insert_clean_call(dc, bb, entry, (void*)fill_scratch, false, 0);
PRE(bb, entry, before_label);
dr_insert_clean_call(dc, bb, entry, func_ptrs[i], false, 0);
PRE(bb, entry, after_label);
dr_insert_clean_call(dc, bb, entry, (void*)check_scratch, false, 0);
break;
case FN_aflags_clobber:
/* ah is: SF:ZF:0:AF:0:PF:1:CF. If we turn everything on we will
* get all 1's except bits 3 and 5, giving a hex mask of 0xD7.
* Overflow is in the low byte (al usually) so use use a mask of
* 0xD701 first. If we turn everything off we get 0x0200.
*/
entry = test_aflags(dc, bb, entry, 0xD701, before_label, after_label);
(void)test_aflags(dc, bb, entry, 0x00200, NULL, NULL);
break;
}
dr_insert_clean_call(dc, bb, entry, (void*)after_callee, false, 6,
opnd_create_instr(before_label),
opnd_create_instr(after_label),
OPND_CREATE_INT32(inline_expected),
OPND_CREATE_INT32(false),
OPND_CREATE_INT32(i),
OPND_CREATE_INTPTR(func_names[i]));
if (i == FN_inscount || i == FN_empty_1arg) {
test_inlined_call_args(dc, bb, entry, i);
}
return DR_EMIT_DEFAULT;
}
/* For all regs, pass arguments of the form:
* %reg
* (%reg, %xax, 1)-0xDEAD
* (%xax, %reg, 1)-0xDEAD
*/
static void
test_inlined_call_args(void *dc, instrlist_t *bb, instr_t *where, int fn_idx)
{
uint i;
static const ptr_uint_t hex_dead_global = 0xDEAD;
for (i = 0; i < DR_NUM_GPR_REGS; i++) {
reg_id_t reg = DR_REG_XAX + (reg_id_t)i;
reg_id_t other_reg = (reg == DR_REG_XAX ? DR_REG_XBX : DR_REG_XAX);
opnd_t arg;
instr_t *before_label;
instr_t *after_label;
/* FIXME: We should test passing the app %xsp to an inlined function,
* but I hesitate to store a non-stack location in XSP.
*/
if (reg == DR_REG_XSP)
continue;
/* %reg */
before_label = INSTR_CREATE_label(dc);
after_label = INSTR_CREATE_label(dc);
arg = opnd_create_reg(reg);
dr_insert_clean_call(dc, bb, where, (void*)before_callee, false, 2,
OPND_CREATE_INTPTR(func_ptrs[fn_idx]),
OPND_CREATE_INTPTR(0));
PRE(bb, where, before_label);
dr_save_reg(dc, bb, where, reg, SPILL_SLOT_1);
PRE(bb, where, INSTR_CREATE_mov_imm
(dc, arg, OPND_CREATE_INTPTR(0xDEAD)));
dr_insert_clean_call(dc, bb, where, (void*)func_ptrs[fn_idx], false, 1,
arg);
dr_restore_reg(dc, bb, where, reg, SPILL_SLOT_1);
PRE(bb, where, after_label);
dr_insert_clean_call(dc, bb, where, (void*)after_callee, false, 6,
opnd_create_instr(before_label),
opnd_create_instr(after_label),
OPND_CREATE_INT32(true),
OPND_CREATE_INT32(false),
OPND_CREATE_INT32(fn_idx),
OPND_CREATE_INTPTR(0));
/* (%reg, %other_reg, 1)-0xDEAD */
before_label = INSTR_CREATE_label(dc);
after_label = INSTR_CREATE_label(dc);
arg = opnd_create_base_disp(reg, other_reg, 1, -0xDEAD, OPSZ_PTR);
dr_insert_clean_call(dc, bb, where, (void*)before_callee, false, 2,
OPND_CREATE_INTPTR(func_ptrs[fn_idx]),
OPND_CREATE_INTPTR(0));
PRE(bb, where, before_label);
dr_save_reg(dc, bb, where, reg, SPILL_SLOT_1);
dr_save_reg(dc, bb, where, other_reg, SPILL_SLOT_2);
PRE(bb, where, INSTR_CREATE_mov_imm
(dc, opnd_create_reg(reg), OPND_CREATE_INTPTR(0xDEAD)));
PRE(bb, where, INSTR_CREATE_mov_imm
(dc, opnd_create_reg(other_reg),
OPND_CREATE_INTPTR(&hex_dead_global)));
dr_insert_clean_call(dc, bb, where, (void*)func_ptrs[fn_idx], false, 1,
arg);
dr_restore_reg(dc, bb, where, other_reg, SPILL_SLOT_2);
dr_restore_reg(dc, bb, where, reg, SPILL_SLOT_1);
PRE(bb, where, after_label);
dr_insert_clean_call(dc, bb, where, (void*)after_callee, false, 6,
opnd_create_instr(before_label),
opnd_create_instr(after_label),
OPND_CREATE_INT32(true),
OPND_CREATE_INT32(false),
OPND_CREATE_INT32(fn_idx),
OPND_CREATE_INTPTR(0));
/* (%other_reg, %reg, 1)-0xDEAD */
before_label = INSTR_CREATE_label(dc);
after_label = INSTR_CREATE_label(dc);
arg = opnd_create_base_disp(other_reg, reg, 1, -0xDEAD, OPSZ_PTR);
dr_insert_clean_call(dc, bb, where, (void*)before_callee, false, 2,
OPND_CREATE_INTPTR(func_ptrs[fn_idx]),
OPND_CREATE_INTPTR(0));
PRE(bb, where, before_label);
dr_save_reg(dc, bb, where, reg, SPILL_SLOT_1);
dr_save_reg(dc, bb, where, other_reg, SPILL_SLOT_2);
PRE(bb, where, INSTR_CREATE_mov_imm
(dc, opnd_create_reg(other_reg), OPND_CREATE_INTPTR(0xDEAD)));
PRE(bb, where, INSTR_CREATE_mov_imm
(dc, opnd_create_reg(reg),
OPND_CREATE_INTPTR(&hex_dead_global)));
dr_insert_clean_call(dc, bb, where, (void*)func_ptrs[fn_idx], false, 1,
arg);
dr_restore_reg(dc, bb, where, other_reg, SPILL_SLOT_2);
dr_restore_reg(dc, bb, where, reg, SPILL_SLOT_1);
PRE(bb, where, after_label);
dr_insert_clean_call(dc, bb, where, (void*)after_callee, false, 6,
opnd_create_instr(before_label),
opnd_create_instr(after_label),
OPND_CREATE_INT32(true),
OPND_CREATE_INT32(false),
OPND_CREATE_INT32(fn_idx),
OPND_CREATE_INTPTR(0));
}
}
/*****************************************************************************/
/* Instrumentation function code generation. */
/* i#988: We fail to inline if the number of arguments to the same clean call
* routine increases. empty is used for a 0 arg clean call, so we add empty_1arg
* for test_inlined_call_args(), which passes 1 arg.
*/
static instrlist_t *
codegen_empty_1arg(void *dc)
{
return codegen_empty(dc);
}
/*
callpic_pop:
push REG_XBP
mov REG_XBP, REG_XSP
call Lnext_label
Lnext_label:
pop REG_XBX
leave
ret
*/
static instrlist_t *
codegen_callpic_pop(void *dc)
{
instrlist_t *ilist = instrlist_create(dc);
instr_t *next_label = INSTR_CREATE_label(dc);
codegen_prologue(dc, ilist);
APP(ilist, INSTR_CREATE_call(dc, opnd_create_instr(next_label)));
APP(ilist, next_label);
APP(ilist, INSTR_CREATE_pop(dc, opnd_create_reg(DR_REG_XBX)));
codegen_epilogue(dc, ilist);
return ilist;
}
/*
callpic_mov:
push REG_XBP
mov REG_XBP, REG_XSP
call Lnext_instr_mov
Lnext_instr_mov:
mov REG_XBX, [REG_XSP]
leave
ret
*/
static instrlist_t *
codegen_callpic_mov(void *dc)
{
instrlist_t *ilist = instrlist_create(dc);
instr_t *next_label = INSTR_CREATE_label(dc);
codegen_prologue(dc, ilist);
APP(ilist, INSTR_CREATE_call(dc, opnd_create_instr(next_label)));
APP(ilist, next_label);
APP(ilist, INSTR_CREATE_mov_ld
(dc, opnd_create_reg(DR_REG_XBX), OPND_CREATE_MEMPTR(DR_REG_XSP, 0)));
codegen_epilogue(dc, ilist);
return ilist;
}
/* Non-leaf functions cannot be inlined.
nonleaf:
push REG_XBP
mov REG_XBP, REG_XSP
call other_func
leave
ret
other_func:
ret
*/
static instrlist_t *
codegen_nonleaf(void *dc)
{
instrlist_t *ilist = instrlist_create(dc);
instr_t *other_func = INSTR_CREATE_label(dc);
codegen_prologue(dc, ilist);
APP(ilist, INSTR_CREATE_call(dc, opnd_create_instr(other_func)));
codegen_epilogue(dc, ilist);
APP(ilist, other_func);
APP(ilist, INSTR_CREATE_ret(dc));
return ilist;
}
/* Conditional branches cannot be inlined. Avoid flags usage to make test case
* more specific.
cond_br:
push REG_XBP
mov REG_XBP, REG_XSP
mov REG_XCX, ARG1
jecxz Larg_zero
mov REG_XAX, HEX(DEADBEEF)
mov SYMREF(global_count), REG_XAX
Larg_zero:
leave
ret
*/
static instrlist_t *
codegen_cond_br(void *dc)
{
instrlist_t *ilist = instrlist_create(dc);
instr_t *arg_zero = INSTR_CREATE_label(dc);
opnd_t xcx = opnd_create_reg(DR_REG_XCX);
codegen_prologue(dc, ilist);
/* If arg1 is non-zero, write 0xDEADBEEF to global_count. */
APP(ilist, INSTR_CREATE_mov_ld(dc, xcx, codegen_opnd_arg1()));
APP(ilist, INSTR_CREATE_jecxz(dc, opnd_create_instr(arg_zero)));
APP(ilist, INSTR_CREATE_mov_imm(dc, xcx, OPND_CREATE_INTPTR(&global_count)));
APP(ilist, INSTR_CREATE_mov_st(dc, OPND_CREATE_MEMPTR(DR_REG_XCX, 0),
OPND_CREATE_INT32((int)0xDEADBEEF)));
APP(ilist, arg_zero);
codegen_epilogue(dc, ilist);
return ilist;
}
/* A function that uses 2 registers and 1 local variable, which should fill all
* of the scratch slots that the inliner uses. This used to clobber the scratch
* slots exposed to the client.
tls_clobber:
push REG_XBP
mov REG_XBP, REG_XSP
sub REG_XSP, ARG_SZ
mov REG_XAX, HEX(DEAD)
mov REG_XDX, HEX(BEEF)
mov [REG_XSP], REG_XAX
leave
ret
*/
static instrlist_t *
codegen_tls_clobber(void *dc)
{
instrlist_t *ilist = instrlist_create(dc);
opnd_t xax = opnd_create_reg(DR_REG_XAX);
opnd_t xdx = opnd_create_reg(DR_REG_XDX);
codegen_prologue(dc, ilist);
APP(ilist, INSTR_CREATE_sub
(dc, opnd_create_reg(DR_REG_XSP), OPND_CREATE_INT8(sizeof(reg_t))));
APP(ilist, INSTR_CREATE_mov_imm(dc, xax, OPND_CREATE_INT32(0xDEAD)));
APP(ilist, INSTR_CREATE_mov_imm(dc, xdx, OPND_CREATE_INT32(0xBEEF)));
APP(ilist, INSTR_CREATE_mov_st(dc, OPND_CREATE_MEMPTR(DR_REG_XSP, 0), xax));
codegen_epilogue(dc, ilist);
return ilist;
}
/* Zero the aflags. Inliner must ensure they are restored.
aflags_clobber:
push REG_XBP
mov REG_XBP, REG_XSP
mov REG_XAX, 0
add al, HEX(7F)
sahf
leave
ret
*/
static instrlist_t *
codegen_aflags_clobber(void *dc)
{
instrlist_t *ilist = instrlist_create(dc);
codegen_prologue(dc, ilist);
APP(ilist, INSTR_CREATE_mov_imm
(dc, opnd_create_reg(DR_REG_XAX), OPND_CREATE_INTPTR(0)));
APP(ilist, INSTR_CREATE_add
(dc, opnd_create_reg(DR_REG_AL), OPND_CREATE_INT8(0x7F)));
APP(ilist, INSTR_CREATE_sahf(dc));
codegen_epilogue(dc, ilist);
return ilist;
}
/*
bbcount:
push REG_XBP
mov REG_XBP, REG_XSP
inc [global_count]
leave
ret
*/
static instrlist_t *
codegen_bbcount(void *dc)
{
instrlist_t *ilist = instrlist_create(dc);
codegen_prologue(dc, ilist);
APP(ilist, INSTR_CREATE_inc(dc, OPND_CREATE_ABSMEM(&global_count, OPSZ_PTR)));
codegen_epilogue(dc, ilist);
return ilist;
}
/* Reduced code from inscount generated by gcc47 -O0.
gcc47_inscount:
#ifdef X64
push %rbp
mov %rsp,%rbp
mov %rdi,-0x8(%rbp)
mov global_count(%rip),%rdx
mov -0x8(%rbp),%rax
add %rdx,%rax
mov %rax,global_count(%rip)
pop %rbp
retq
#else
push %ebp
mov %esp,%ebp
call pic_thunk
add $0x1c86,%ecx
mov global_count(%ecx),%edx
mov 0x8(%ebp),%eax
add %edx,%eax
mov %eax,global_count(%ecx)
pop %ebp
ret
pic_thunk:
mov (%esp),%ecx
ret
#endif
*/
static instrlist_t *
codegen_gcc47_inscount(void *dc)
{
instrlist_t *ilist = instrlist_create(dc);
opnd_t global;
opnd_t xax = opnd_create_reg(DR_REG_XAX);
opnd_t xdx = opnd_create_reg(DR_REG_XDX);
#ifdef X64
/* This local is past TOS. That's OK by the sysv x64 ABI. */
opnd_t local = OPND_CREATE_MEMPTR(DR_REG_XBP, -(int)sizeof(reg_t));
codegen_prologue(dc, ilist);
global = opnd_create_rel_addr(&global_count, OPSZ_PTR);
APP(ilist, INSTR_CREATE_mov_st(dc, local, codegen_opnd_arg1()));
APP(ilist, INSTR_CREATE_mov_ld(dc, xdx, global));
APP(ilist, INSTR_CREATE_mov_ld(dc, xax, local));
APP(ilist, INSTR_CREATE_add(dc, xax, xdx));
APP(ilist, INSTR_CREATE_mov_st(dc, global, xax));
codegen_epilogue(dc, ilist);
#else
instr_t *pic_thunk = INSTR_CREATE_mov_ld
(dc, opnd_create_reg(DR_REG_XCX), OPND_CREATE_MEMPTR(DR_REG_XSP, 0));
codegen_prologue(dc, ilist);
/* XXX: Do a real 32-bit PIC-style access. For now we just use an absolute
* reference since we're 32-bit and everything is reachable.
*/
global = opnd_create_abs_addr(&global_count, OPSZ_PTR);
APP(ilist, INSTR_CREATE_call(dc, opnd_create_instr(pic_thunk)));
APP(ilist, INSTR_CREATE_add(dc, opnd_create_reg(DR_REG_XCX),
OPND_CREATE_INT32(0x0)));
APP(ilist, INSTR_CREATE_mov_ld(dc, xdx, global));
APP(ilist, INSTR_CREATE_mov_ld(dc, xax, codegen_opnd_arg1()));
APP(ilist, INSTR_CREATE_add(dc, xax, xdx));
APP(ilist, INSTR_CREATE_mov_st(dc, global, xax));
codegen_epilogue(dc, ilist);
APP(ilist, pic_thunk);
APP(ilist, INSTR_CREATE_ret(dc));
#endif
return ilist;
}
| 1 | 11,936 | Ditto, here and below | DynamoRIO-dynamorio | c |
@@ -61,7 +61,7 @@ export function initDebug() {
}
for (const key in vnode.props) {
- if (key[0]==='o' && key[1]==='n' && typeof vnode.props[key]!=='function' && vnode.props[key]!=null) {
+ if (key[0]==='o' && key[1]==='n' && typeof vnode.props[key]!=='function' && vnode.props[key]!=null && typeof vnode.type!=='function') {
throw new Error(
`Component's "${key}" property should be a function, ` +
`but got [${typeof vnode.props[key]}] instead\n` + | 1 | import { checkPropTypes } from './check-props';
import { getDisplayName } from './devtools/custom';
import { options, toChildArray } from 'preact';
import { ELEMENT_NODE, DOCUMENT_NODE, DOCUMENT_FRAGMENT_NODE } from './constants';
export function initDebug() {
/* eslint-disable no-console */
let oldBeforeDiff = options.diff;
let oldDiffed = options.diffed;
let oldVnode = options.vnode;
options.root = (vnode, parentNode) => {
if (!parentNode) {
throw new Error('Undefined parent passed to render(), this is the second argument.\nCheck if the element is available in the DOM/has the correct id.');
}
let isValid;
switch (parentNode.nodeType) {
case ELEMENT_NODE:
case DOCUMENT_FRAGMENT_NODE:
case DOCUMENT_NODE: isValid = true; break;
default: isValid = false;
}
if (!isValid) throw new Error(`
Expected a valid HTML node as a second argument to render.
Received ${parentNode} instead: render(<${vnode.type.name || vnode.type} />, ${parentNode});
`);
};
options.diff = vnode => {
let { type, props } = vnode;
let children = props && props.children;
if (type===undefined) {
throw new Error('Undefined component passed to createElement()\n\n'+
'You likely forgot to export your component or might have mixed up default and named imports'+
serializeVNode(vnode));
}
else if (type!=null && typeof type==='object') {
if (type._lastDomChild!==undefined && type._dom!==undefined) {
let info = 'Did you accidentally pass a JSX literal as JSX twice?\n\n'+
' let My'+getDisplayName(type)+' = '+serializeVNode(type)+';\n'+
' let vnode = <My'+getDisplayName(type)+' />;\n\n'+
'This usually happens when you export a JSX literal and not the component.';
throw new Error('Invalid type passed to createElement(): '+type+'\n\n'+info+'\n');
}
throw new Error('Invalid type passed to createElement(): '+(Array.isArray(type) ? 'array' : type));
}
if (
vnode.ref!==undefined &&
typeof vnode.ref!=='function' &&
typeof vnode.ref!=='object' &&
!('$$typeof' in vnode) // allow string refs when preact-compat is installed
) {
throw new Error(
`Component's "ref" property should be a function, or an object created ` +
`by createRef(), but got [${typeof vnode.ref}] instead\n` +
serializeVNode(vnode)
);
}
for (const key in vnode.props) {
if (key[0]==='o' && key[1]==='n' && typeof vnode.props[key]!=='function' && vnode.props[key]!=null) {
throw new Error(
`Component's "${key}" property should be a function, ` +
`but got [${typeof vnode.props[key]}] instead\n` +
serializeVNode(vnode)
);
}
}
// Check prop-types if available
if (typeof vnode.type==='function' && vnode.type.propTypes) {
checkPropTypes(vnode.type.propTypes, vnode.props, getDisplayName(vnode), serializeVNode(vnode));
}
let keys = [];
for (let deepChild of toChildArray(children)) {
if (!deepChild || deepChild.key==null) continue;
let key = deepChild.key;
if (keys.indexOf(key) !== -1) {
console.error(
'Following component has two or more children with the ' +
`same key attribute: "${key}". This may cause glitches and misbehavior ` +
'in rendering process. Component: \n\n' +
serializeVNode(vnode)
);
// Break early to not spam the console
break;
}
keys.push(key);
}
if (oldBeforeDiff) oldBeforeDiff(vnode);
};
const warn = (property, err) => ({
get() {
throw new Error(`getting vnode.${property} is deprecated, ${err}`);
},
set() {
throw new Error(`setting vnode.${property} is not allowed, ${err}`);
}
});
const deprecatedAttributes = {
nodeName: warn('nodeName', 'use vnode.type'),
attributes: warn('attributes', 'use vnode.props'),
children: warn('children', 'use vnode.props.children')
};
options.vnode = (vnode) => {
Object.defineProperties(vnode, deprecatedAttributes);
if (oldVnode) oldVnode(vnode);
};
options.diffed = (vnode) => {
if (vnode._component && vnode._component.__hooks) {
let hooks = vnode._component.__hooks;
if (hooks._list.length > 0) {
hooks._list.forEach(hook => {
if (hook._callback && (!hook._args || !Array.isArray(hook._args))) {
console.warn(
`In ${vnode.type.name || vnode.type} you are calling useMemo/useCallback without passing arguments.\n` +
`This is a noop since it will not be able to memoize, it will execute it every render.`
);
}
});
}
if (hooks._pendingEffects.length > 0) {
hooks._pendingEffects.forEach((effect) => {
if (!effect._args || !Array.isArray(effect._args)) {
throw new Error('You should provide an array of arguments as the second argument to the "useEffect" hook.\n\n' +
'Not doing so will invoke this effect on every render.\n\n' +
'This effect can be found in the render of ' + (vnode.type.name || vnode.type) + '.');
}
});
}
if (hooks._pendingLayoutEffects.length > 0) {
hooks._pendingLayoutEffects.forEach((layoutEffect) => {
if (!layoutEffect._args || !Array.isArray(layoutEffect._args)) {
throw new Error('You should provide an array of arguments as the second argument to the "useEffect" hook.\n\n' +
'Not doing so will invoke this effect on every render.\n\n' +
'This effect can be found in the render of ' + (vnode.type.name || vnode.type) + '.');
}
});
}
}
if (oldDiffed) oldDiffed(vnode);
};
}
/**
* Serialize a vnode tree to a string
* @param {import('./internal').VNode} vnode
* @returns {string}
*/
export function serializeVNode(vnode) {
let { props } = vnode;
let name = getDisplayName(vnode);
let attrs = '';
if (props) {
for (let prop in props) {
if (props.hasOwnProperty(prop) && prop!=='children') {
let value = props[prop];
// If it is an object but doesn't have toString(), use Object.toString
if (typeof value==='function') {
value = `function ${value.displayName || value.name}() {}`;
}
value = Object(value) === value && !value.toString
? Object.prototype.toString.call(value)
: value + '';
attrs += ` ${prop}=${JSON.stringify(value)}`;
}
}
}
let children = props.children;
return `<${name}${attrs}${children && children.length
? '>..</'+name+'>'
: ' />'}`;
}
| 1 | 13,101 | Let's move the check outside the `for`-loop so that we don't have to do it for every prop. If we change it to check for `vnode.type === 'string'` we can automatically skip checking text nodes which don't have props | preactjs-preact | js |
@@ -19,6 +19,17 @@ class TablePlot(BokehPlot, GenericElementPlot):
height = param.Number(default=None)
+ finalize_hooks = param.HookList(default=[], doc="""
+ Optional list of hooks called when finalizing a column
+ (deprecated in favor of hooks). The hook is passed the plot
+ object and the displayed object, and other plotting handles
+ can be accessed via plot.handles.""")
+
+ hooks = param.HookList(default=[], doc="""
+ Optional list of hooks called when finalizing a column. The
+ hook is passed the plot object and the displayed object, and
+ other plotting handles can be accessed via plot.handles.""")
+
width = param.Number(default=400)
style_opts = ( | 1 | import param
from bokeh.models import Column
from bokeh.models.widgets import (
DataTable, TableColumn, NumberEditor, NumberFormatter, DateFormatter,
DateEditor, StringFormatter, StringEditor, IntEditor
)
from ...core import Dataset, Dimension
from ...element import ItemTable
from ...streams import Buffer
from ...core.util import dimension_sanitizer, datetime_types
from ..plot import GenericElementPlot
from .plot import BokehPlot
from .util import bokeh_version
class TablePlot(BokehPlot, GenericElementPlot):
height = param.Number(default=None)
width = param.Number(default=400)
style_opts = (
['row_headers', 'selectable', 'editable',
'sortable', 'fit_columns', 'scroll_to_selection'] +
(['index_position'] if bokeh_version >= '0.12.15' else [])
)
finalize_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing a column.
The hook is passed the plot object and the displayed
object, and other plotting handles can be accessed via plot.handles.""")
_stream_data = True
def __init__(self, element, plot=None, **params):
super(TablePlot, self).__init__(element, **params)
self.handles = {} if plot is None else self.handles['plot']
element_ids = self.hmap.traverse(lambda x: id(x), [Dataset, ItemTable])
self.static = len(set(element_ids)) == 1 and len(self.keys) == len(self.hmap)
self.callbacks = self._construct_callbacks()
self.streaming = [s for s in self.streams if isinstance(s, Buffer)]
self.static_source = False
def get_data(self, element, ranges, style):
return ({dimension_sanitizer(d.name): element.dimension_values(d)
for d in element.dimensions()}, {}, style)
def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
element = self.hmap.last
key = self.keys[-1]
self.current_frame = element
self.current_key = key
style = self.lookup_options(element, 'style')[self.cyclic_index]
data, _, style = self.get_data(element, ranges, style)
if source is None:
source = self._init_datasource(data)
self.handles['source'] = self.handles['cds'] = source
columns = self._get_columns(element, data)
style['reorderable'] = False
table = DataTable(source=source, columns=columns, height=self.height,
width=self.width, **style)
self.handles['table'] = table
self.handles['glyph_renderer'] = table
self._execute_hooks(element)
self.drawn = True
for cb in self.callbacks:
cb.initialize()
title = self._get_title_div(self.keys[-1], '10pt')
if title:
plot = Column(title, table)
self.handles['title'] = title
else:
plot = table
self.handles['plot'] = plot
return plot
def _get_columns(self, element, data):
columns = []
for d in element.dimensions():
col = dimension_sanitizer(d.name)
kind = data[col].dtype.kind
if kind == 'i':
formatter = NumberFormatter()
editor = IntEditor()
elif kind == 'f':
formatter = NumberFormatter(format='0,0.0[00000]')
editor = NumberEditor()
elif kind == 'M' or (kind == 'O' and len(data[col]) and type(data[col][0]) in datetime_types):
dimtype = element.get_dimension_type(col)
dformat = Dimension.type_formatters.get(dimtype, '%Y-%m-%d %H:%M:%S')
formatter = DateFormatter(format=dformat)
editor = DateEditor()
else:
formatter = StringFormatter()
editor = StringEditor()
column = TableColumn(field=dimension_sanitizer(d.name), title=d.pprint_label,
editor=editor, formatter=formatter)
columns.append(column)
return columns
def update_frame(self, key, ranges=None, plot=None):
"""
Updates an existing plot with data corresponding
to the key.
"""
element = self._get_frame(key)
self._get_title_div(key, '12pt')
# Cache frame object id to skip updating data if unchanged
previous_id = self.handles.get('previous_id', None)
current_id = element._plot_id
self.handles['previous_id'] = current_id
self.static_source = (self.dynamic and (current_id == previous_id))
if (element is None or (not self.dynamic and self.static) or
(self.streaming and self.streaming[0].data is self.current_frame.data
and not self.streaming[0]._triggering) or self.static_source):
return
source = self.handles['source']
style = self.lookup_options(element, 'style')[self.cyclic_index]
data, _, style = self.get_data(element, ranges, style)
columns = self._get_columns(element, data)
self.handles['table'].columns = columns
self._update_datasource(source, data)
| 1 | 21,838 | Seems like the docstring here should simply be "Deprecated; use `hooks` instead.". | holoviz-holoviews | py |
@@ -62,6 +62,8 @@ public class FeedItemlistAdapter extends BaseAdapter {
if(UserPreferences.getTheme() == R.style.Theme_AntennaPod_Dark) {
playingBackGroundColor = ContextCompat.getColor(context, R.color.highlight_dark);
+ } else if(UserPreferences.getTheme() == R.style.Theme_AntennaPod_TrueBlack) {
+ playingBackGroundColor = ContextCompat.getColor(context, R.color.highlight_trueblack);
} else {
playingBackGroundColor = ContextCompat.getColor(context, R.color.highlight_light);
} | 1 | package de.danoeh.antennapod.adapter;
import android.content.Context;
import android.content.res.TypedArray;
import android.os.Build;
import android.support.v4.content.ContextCompat;
import android.text.Layout;
import android.view.LayoutInflater;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.ViewGroup;
import android.widget.Adapter;
import android.widget.BaseAdapter;
import android.widget.ImageButton;
import android.widget.ImageView;
import android.widget.LinearLayout;
import android.widget.ProgressBar;
import android.widget.TextView;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.FeedMedia;
import de.danoeh.antennapod.core.feed.MediaType;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.storage.DownloadRequester;
import de.danoeh.antennapod.core.util.DateUtils;
import de.danoeh.antennapod.core.util.LongList;
import de.danoeh.antennapod.core.util.ThemeUtils;
/**
* List adapter for items of feeds that the user has already subscribed to.
*/
public class FeedItemlistAdapter extends BaseAdapter {
private final ActionButtonCallback callback;
private final ItemAccess itemAccess;
private final Context context;
private final boolean showFeedtitle;
private final int selectedItemIndex;
/** true if played items should be made partially transparent */
private final boolean makePlayedItemsTransparent;
private final ActionButtonUtils actionButtonUtils;
private static final int SELECTION_NONE = -1;
private final int playingBackGroundColor;
private final int normalBackGroundColor;
public FeedItemlistAdapter(Context context,
ItemAccess itemAccess,
ActionButtonCallback callback,
boolean showFeedtitle,
boolean makePlayedItemsTransparent) {
super();
this.callback = callback;
this.context = context;
this.itemAccess = itemAccess;
this.showFeedtitle = showFeedtitle;
this.selectedItemIndex = SELECTION_NONE;
this.actionButtonUtils = new ActionButtonUtils(context);
this.makePlayedItemsTransparent = makePlayedItemsTransparent;
if(UserPreferences.getTheme() == R.style.Theme_AntennaPod_Dark) {
playingBackGroundColor = ContextCompat.getColor(context, R.color.highlight_dark);
} else {
playingBackGroundColor = ContextCompat.getColor(context, R.color.highlight_light);
}
normalBackGroundColor = ContextCompat.getColor(context, android.R.color.transparent);
}
@Override
public int getCount() {
return itemAccess.getCount();
}
@Override
public long getItemId(int position) {
return position;
}
@Override
public FeedItem getItem(int position) {
return itemAccess.getItem(position);
}
@Override
@SuppressWarnings("ResourceType")
public View getView(final int position, View convertView, ViewGroup parent) {
Holder holder;
final FeedItem item = getItem(position);
if (convertView == null) {
holder = new Holder();
LayoutInflater inflater = (LayoutInflater) context
.getSystemService(Context.LAYOUT_INFLATER_SERVICE);
convertView = inflater.inflate(R.layout.feeditemlist_item, parent, false);
holder.container = (LinearLayout) convertView
.findViewById(R.id.container);
holder.title = (TextView) convertView.findViewById(R.id.txtvItemname);
if(Build.VERSION.SDK_INT >= 23) {
holder.title.setHyphenationFrequency(Layout.HYPHENATION_FREQUENCY_FULL);
}
holder.lenSize = (TextView) convertView
.findViewById(R.id.txtvLenSize);
holder.butAction = (ImageButton) convertView
.findViewById(R.id.butSecondaryAction);
holder.published = (TextView) convertView
.findViewById(R.id.txtvPublished);
holder.inPlaylist = (ImageView) convertView
.findViewById(R.id.imgvInPlaylist);
holder.type = (ImageView) convertView.findViewById(R.id.imgvType);
holder.statusUnread = convertView
.findViewById(R.id.statusUnread);
holder.episodeProgress = (ProgressBar) convertView
.findViewById(R.id.pbar_episode_progress);
convertView.setTag(holder);
} else {
holder = (Holder) convertView.getTag();
}
if (!(getItemViewType(position) == Adapter.IGNORE_ITEM_VIEW_TYPE)) {
convertView.setVisibility(View.VISIBLE);
if (position == selectedItemIndex) {
convertView.setBackgroundColor(ContextCompat.getColor(convertView.getContext(),
ThemeUtils.getSelectionBackgroundColor()));
} else {
convertView.setBackgroundResource(0);
}
StringBuilder buffer = new StringBuilder(item.getTitle());
if (showFeedtitle) {
buffer.append(" (");
buffer.append(item.getFeed().getTitle());
buffer.append(")");
}
holder.title.setText(buffer.toString());
if(item.isNew()) {
holder.statusUnread.setVisibility(View.VISIBLE);
} else {
holder.statusUnread.setVisibility(View.INVISIBLE);
}
if(item.isPlayed() && makePlayedItemsTransparent) {
convertView.setAlpha(0.5f);
} else {
convertView.setAlpha(1.0f);
}
String pubDateStr = DateUtils.formatAbbrev(context, item.getPubDate());
holder.published.setText(pubDateStr);
boolean isInQueue = item.isTagged(FeedItem.TAG_QUEUE);
FeedMedia media = item.getMedia();
if (media == null) {
holder.episodeProgress.setVisibility(View.INVISIBLE);
holder.inPlaylist.setVisibility(View.INVISIBLE);
holder.type.setVisibility(View.INVISIBLE);
holder.lenSize.setVisibility(View.INVISIBLE);
} else {
AdapterUtils.updateEpisodePlaybackProgress(item, holder.lenSize, holder.episodeProgress);
if (isInQueue) {
holder.inPlaylist.setVisibility(View.VISIBLE);
} else {
holder.inPlaylist.setVisibility(View.INVISIBLE);
}
if (DownloadRequester.getInstance().isDownloadingFile(item.getMedia())) {
holder.episodeProgress.setVisibility(View.VISIBLE);
holder.episodeProgress.setProgress(itemAccess.getItemDownloadProgressPercent(item));
} else {
if(media.getPosition() == 0) {
holder.episodeProgress.setVisibility(View.INVISIBLE);
}
}
TypedArray typeDrawables = context.obtainStyledAttributes(
new int[]{R.attr.type_audio, R.attr.type_video});
final int[] labels = new int[]{R.string.media_type_audio_label, R.string.media_type_video_label};
MediaType mediaType = item.getMedia().getMediaType();
if (mediaType == MediaType.AUDIO) {
holder.type.setImageDrawable(typeDrawables.getDrawable(0));
holder.type.setContentDescription(context.getString(labels[0]));
holder.type.setVisibility(View.VISIBLE);
} else if (mediaType == MediaType.VIDEO) {
holder.type.setImageDrawable(typeDrawables.getDrawable(1));
holder.type.setContentDescription(context.getString(labels[1]));
holder.type.setVisibility(View.VISIBLE);
} else {
holder.type.setImageBitmap(null);
holder.type.setVisibility(View.GONE);
}
typeDrawables.recycle();
if(media.isCurrentlyPlaying()) {
holder.container.setBackgroundColor(playingBackGroundColor);
} else {
holder.container.setBackgroundColor(normalBackGroundColor);
}
}
actionButtonUtils.configureActionButton(holder.butAction, item, isInQueue);
holder.butAction.setFocusable(false);
holder.butAction.setTag(item);
holder.butAction.setOnClickListener(butActionListener);
} else {
convertView.setVisibility(View.GONE);
}
return convertView;
}
private final OnClickListener butActionListener = new OnClickListener() {
@Override
public void onClick(View v) {
FeedItem item = (FeedItem) v.getTag();
callback.onActionButtonPressed(item, itemAccess.getQueueIds());
}
};
static class Holder {
LinearLayout container;
TextView title;
TextView published;
TextView lenSize;
ImageView type;
ImageView inPlaylist;
ImageButton butAction;
View statusUnread;
ProgressBar episodeProgress;
}
public interface ItemAccess {
int getItemDownloadProgressPercent(FeedItem item);
int getCount();
FeedItem getItem(int position);
LongList getQueueIds();
}
}
| 1 | 13,685 | It would be nice to have an `attr` in `styles.xml` for this, so we don't need the same `if` statement in multiple places | AntennaPod-AntennaPod | java |
@@ -0,0 +1,10 @@
+class CreateTeachersJoinTable < ActiveRecord::Migration
+ def change
+ create_table :teachers do |t|
+ t.belongs_to :user
+ t.belongs_to :workshop
+ end
+
+ add_index :teachers, [:user_id, :workshop_id], unique: true
+ end
+end | 1 | 1 | 9,049 | How about `null: false` on these two lines? Also `t.timestamps null: false`? | thoughtbot-upcase | rb |
|
@@ -183,6 +183,12 @@ public class HiveTableOperations extends BaseMetastoreTableOperations {
}
threw = false;
} catch (TException | UnknownHostException e) {
+ if (e.getMessage().contains("Table/View 'HIVE_LOCKS' does not exist")) {
+ LOG.error("Failed to acquire locks from metastore because 'HIVE_LOCKS' doesn't exist, " +
+ "this probably happened when using embedded metastore or doesn't create transactional" +
+ " meta table. Please reconfigure and start the metastore.", e);
+ }
+
throw new RuntimeException(String.format("Metastore operation failed for %s.%s", database, tableName), e);
} catch (InterruptedException e) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import com.google.common.collect.Lists;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.LockComponent;
import org.apache.hadoop.hive.metastore.api.LockLevel;
import org.apache.hadoop.hive.metastore.api.LockRequest;
import org.apache.hadoop.hive.metastore.api.LockResponse;
import org.apache.hadoop.hive.metastore.api.LockState;
import org.apache.hadoop.hive.metastore.api.LockType;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.iceberg.BaseMetastoreTableOperations;
import org.apache.iceberg.Schema;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.hadoop.HadoopFileIO;
import org.apache.iceberg.io.FileIO;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* TODO we should be able to extract some more commonalities to BaseMetastoreTableOperations to
* avoid code duplication between this class and Metacat Tables.
*/
public class HiveTableOperations extends BaseMetastoreTableOperations {
private static final Logger LOG = LoggerFactory.getLogger(HiveTableOperations.class);
private final HiveClientPool metaClients;
private final String database;
private final String tableName;
private final Configuration conf;
private FileIO fileIO;
protected HiveTableOperations(Configuration conf, HiveClientPool metaClients, String database, String table) {
this.conf = conf;
this.metaClients = metaClients;
this.database = database;
this.tableName = table;
}
@Override
public FileIO io() {
if (fileIO == null) {
fileIO = new HadoopFileIO(conf);
}
return fileIO;
}
@Override
public TableMetadata refresh() {
String metadataLocation = null;
try {
final Table table = metaClients.run(client -> client.getTable(database, tableName));
String tableType = table.getParameters().get(TABLE_TYPE_PROP);
if (tableType == null || !tableType.equalsIgnoreCase(ICEBERG_TABLE_TYPE_VALUE)) {
throw new IllegalArgumentException(String.format("Invalid tableName, not Iceberg: %s.%s", database, table));
}
metadataLocation = table.getParameters().get(METADATA_LOCATION_PROP);
if (metadataLocation == null) {
String errMsg = String.format("%s.%s is missing %s property", database, tableName, METADATA_LOCATION_PROP);
throw new IllegalArgumentException(errMsg);
}
} catch (NoSuchObjectException e) {
if (currentMetadataLocation() != null) {
throw new NoSuchTableException(String.format("No such table: %s.%s", database, tableName));
}
} catch (TException e) {
String errMsg = String.format("Failed to get table info from metastore %s.%s", database, tableName);
throw new RuntimeException(errMsg, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted during refresh", e);
}
refreshFromMetadataLocation(metadataLocation);
return current();
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
// if the metadata is already out of date, reject it
if (base != current()) {
throw new CommitFailedException("Cannot commit: stale table metadata for %s.%s", database, tableName);
}
// if the metadata is not changed, return early
if (base == metadata) {
LOG.info("Nothing to commit.");
return;
}
String newMetadataLocation = writeNewMetadata(metadata, currentVersion() + 1);
boolean threw = true;
Optional<Long> lockId = Optional.empty();
try {
lockId = Optional.of(acquireLock());
// TODO add lock heart beating for cases where default lock timeout is too low.
Table tbl;
if (base != null) {
tbl = metaClients.run(client -> client.getTable(database, tableName));
} else {
final long currentTimeMillis = System.currentTimeMillis();
tbl = new Table(tableName,
database,
System.getProperty("user.name"),
(int) currentTimeMillis / 1000,
(int) currentTimeMillis / 1000,
Integer.MAX_VALUE,
storageDescriptor(metadata),
Collections.emptyList(),
new HashMap<>(),
null,
null,
TableType.EXTERNAL_TABLE.toString());
tbl.getParameters().put("EXTERNAL", "TRUE"); // using the external table type also requires this
}
tbl.setSd(storageDescriptor(metadata)); // set to pickup any schema changes
final String metadataLocation = tbl.getParameters().get(METADATA_LOCATION_PROP);
if (!Objects.equals(currentMetadataLocation(), metadataLocation)) {
String errMsg = String.format("metadataLocation = %s is not same as table metadataLocation %s for %s.%s",
currentMetadataLocation(), metadataLocation, database, tableName);
throw new CommitFailedException(errMsg);
}
setParameters(newMetadataLocation, tbl);
if (base != null) {
metaClients.run(client -> {
client.alter_table(database, tableName, tbl);
return null;
});
} else {
metaClients.run(client -> {
client.createTable(tbl);
return null;
});
}
threw = false;
} catch (TException | UnknownHostException e) {
throw new RuntimeException(String.format("Metastore operation failed for %s.%s", database, tableName), e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted during commit", e);
} finally {
if (threw) {
// if anything went wrong, clean up the uncommitted metadata file
io().deleteFile(newMetadataLocation);
}
unlock(lockId);
}
requestRefresh();
}
private void setParameters(String newMetadataLocation, Table tbl) {
Map<String, String> parameters = tbl.getParameters();
if (parameters == null) {
parameters = new HashMap<>();
}
parameters.put(TABLE_TYPE_PROP, ICEBERG_TABLE_TYPE_VALUE.toUpperCase(Locale.ENGLISH));
parameters.put(METADATA_LOCATION_PROP, newMetadataLocation);
if (currentMetadataLocation() != null && !currentMetadataLocation().isEmpty()) {
parameters.put(PREVIOUS_METADATA_LOCATION_PROP, currentMetadataLocation());
}
tbl.setParameters(parameters);
}
private StorageDescriptor storageDescriptor(TableMetadata metadata) {
final StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(columns(metadata.schema()));
storageDescriptor.setLocation(metadata.location());
storageDescriptor.setOutputFormat("org.apache.hadoop.mapred.FileOutputFormat");
storageDescriptor.setInputFormat("org.apache.hadoop.mapred.FileInputFormat");
SerDeInfo serDeInfo = new SerDeInfo();
serDeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
storageDescriptor.setSerdeInfo(serDeInfo);
return storageDescriptor;
}
private List<FieldSchema> columns(Schema schema) {
return schema.columns().stream()
.map(col -> new FieldSchema(col.name(), HiveTypeConverter.convert(col.type()), ""))
.collect(Collectors.toList());
}
private long acquireLock() throws UnknownHostException, TException, InterruptedException {
final LockComponent lockComponent = new LockComponent(LockType.EXCLUSIVE, LockLevel.TABLE, database);
lockComponent.setTablename(tableName);
final LockRequest lockRequest = new LockRequest(Lists.newArrayList(lockComponent),
System.getProperty("user.name"),
InetAddress.getLocalHost().getHostName());
LockResponse lockResponse = metaClients.run(client -> client.lock(lockRequest));
LockState state = lockResponse.getState();
long lockId = lockResponse.getLockid();
//TODO add timeout
while (state.equals(LockState.WAITING)) {
lockResponse = metaClients.run(client -> client.checkLock(lockId));
state = lockResponse.getState();
}
if (!state.equals(LockState.ACQUIRED)) {
throw new CommitFailedException(String.format("Could not acquire the lock on %s.%s, " +
"lock request ended in state %s", database, tableName, state));
}
return lockId;
}
private void unlock(Optional<Long> lockId) {
if (lockId.isPresent()) {
try {
metaClients.run(client -> {
client.unlock(lockId.get());
return null;
});
} catch (Exception e) {
throw new RuntimeException(String.format("Failed to unlock %s.%s", database, tableName), e);
}
}
}
}
| 1 | 15,336 | This recommendation isn't very helpful because it isn't clear what "the metastore" is. How about this instead: "To fix this, use an alternative metastore". | apache-iceberg | java |
@@ -87,6 +87,11 @@ public class SymbolTable {
return desiredName + suffix;
}
+ /** Returns true if a {@code symbol} is already used. */
+ public boolean isSymbolUsed(Name symbol) {
+ return symbolTable.contains(symbol.toLowerUnderscore());
+ }
+
/**
* Returns the next numeric suffix that makes desiredName unique.
* | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.util;
import com.google.common.base.Strings;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Set;
import java.util.TreeSet;
/**
* A utility class used to get and store unique symbols.
*
* <p>If a symbol is already used, the table will try to append an index number onto the end of it.
* The index will keep increasing until an unused symbol is found.
*/
public class SymbolTable {
private final Set<String> symbolTable;
public SymbolTable() {
symbolTable = new HashSet<>();
}
/**
* Create a SymbolTable with a custom comparison function. This can be used, for example, to make
* a case-insensitive symbol table by using a comparison function that orders two strings the same
* if they are the same in lowercase.
*
* @param comparator function to determine equality on Strings
*/
public SymbolTable(Comparator<String> comparator) {
symbolTable = new TreeSet<>(comparator);
}
/**
* Returns a new SymbolTable seeded with all the words in seed.
*
* <p>For example, if seed is {"int"}, a subsequent call to {@link #getNewSymbol(String)} for
* "int" will return "int2".
*
* <p>The behavior of the returned SymbolTable is guaranteed if used with {@link
* #getNewSymbol(String)}, but not with {@link #getNewSymbol(Name)}.
*/
public static SymbolTable fromSeed(Set<String> seed) {
SymbolTable symbolTable = new SymbolTable();
for (String s : seed) {
symbolTable.getNewSymbol(s);
}
return symbolTable;
}
/**
* Returns a unique name, with a numeric suffix in case of conflicts.
*
* <p>Not guaranteed to work as expected if used in combination with {@link
* #getNewSymbol(String)}.
*/
public Name getNewSymbol(Name desiredName) {
String lower = desiredName.toLowerUnderscore();
String suffix = getAndSaveSuffix(lower);
if (Strings.isNullOrEmpty(suffix)) {
return desiredName;
}
return desiredName.join(suffix);
}
/**
* Returns a unique name, with a numeric suffix in case of conflicts.
*
* <p>Not guaranteed to work as expected if used in combination with {@link #getNewSymbol(Name)}.
*/
public String getNewSymbol(String desiredName) {
String suffix = getAndSaveSuffix(desiredName);
return desiredName + suffix;
}
/**
* Returns the next numeric suffix that makes desiredName unique.
*
* <p>Stores the joined desiredName/suffix in an internal map. For example, if "foo" is passed, ""
* is returned. If "foo" is passed again, "2" is returned, and then "3" and so on.
*/
private String getAndSaveSuffix(String desiredName) {
if (!symbolTable.contains(desiredName)) {
symbolTable.add(desiredName);
return "";
}
// Resolve collisions with a numeric suffix, starting with 2.
int i = 2;
while (symbolTable.contains(desiredName + Integer.toString(i))) {
i++;
}
symbolTable.add(desiredName + Integer.toString(i));
return Integer.toString(i);
}
}
| 1 | 27,491 | The `SymbolTable` deals only with symbols, I think we can just call this `isUsed` here. | googleapis-gapic-generator | java |
@@ -165,7 +165,9 @@ webdriver.Builder.prototype.build = function() {
} else {
var url = this.serverUrl_;
var client;
- if (url[0] == '/') {
+ if (webdriver.http.CorsClient.isAvailable()) {
+ client = new webdriver.http.XhrClient(url);
+ } else if (url[0] == '/') {
var origin = window.location.origin ||
(window.location.protocol + '//' + window.location.host);
client = new webdriver.http.XhrClient(origin + url); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
goog.provide('webdriver.Builder');
goog.require('goog.Uri');
goog.require('goog.userAgent');
goog.require('webdriver.Capabilities');
goog.require('webdriver.FirefoxDomExecutor');
goog.require('webdriver.WebDriver');
goog.require('webdriver.http.CorsClient');
goog.require('webdriver.http.Executor');
goog.require('webdriver.http.XhrClient');
/**
* Creates new {@code webdriver.WebDriver} clients for use in a browser
* environment. Upon instantiation, each Builder will configure itself based
* on the following query parameters:
* <dl>
* <dt>wdurl
* <dd>Defines the WebDriver server to send commands to. If this is a
* relative URL, the builder will use the standard WebDriver wire
* protocol and a {@link webdriver.http.XhrClient}. Otherwise, it will
* use a {@link webdriver.http.CorsClient}; this only works when
* connecting to an instance of the Java Selenium server. The server URL
* may be changed using {@code #usingServer}.
*
* <dt>wdsid
* <dd>Defines the session to connect to. If omitted, will request a new
* session from the server.
* </dl>
*
* @param {Window=} opt_window The window to extract query parameters from.
* @constructor
* @final
* @struct
*/
webdriver.Builder = function(opt_window) {
var win = opt_window || window;
var data = new goog.Uri(win.location).getQueryData();
/** @private {string} */
this.serverUrl_ =
/** @type {string} */ (data.get(webdriver.Builder.SERVER_URL_PARAM,
webdriver.Builder.DEFAULT_SERVER_URL));
/** @private {string} */
this.sessionId_ =
/** @type {string} */ (data.get(webdriver.Builder.SESSION_ID_PARAM));
/** @private {!webdriver.Capabilities} */
this.capabilities_ = new webdriver.Capabilities();
};
/**
* Query parameter that defines which session to connect to.
* @type {string}
* @const
*/
webdriver.Builder.SESSION_ID_PARAM = 'wdsid';
/**
* Query parameter that defines the URL of the remote server to connect to.
* @type {string}
* @const
*/
webdriver.Builder.SERVER_URL_PARAM = 'wdurl';
/**
* The default server URL to use.
* @type {string}
* @const
*/
webdriver.Builder.DEFAULT_SERVER_URL = 'http://localhost:4444/wd/hub';
/**
* Configures which WebDriver server should be used for new sessions.
* @param {string} url URL of the server to use.
* @return {!webdriver.Builder} This Builder instance for chain calling.
*/
webdriver.Builder.prototype.usingServer = function(url) {
this.serverUrl_ = url;
return this;
};
/**
* @return {string} The URL of the WebDriver server this instance is configured
* to use.
*/
webdriver.Builder.prototype.getServerUrl = function() {
return this.serverUrl_;
};
/**
* Configures the builder to create a client that will use an existing WebDriver
* session.
* @param {string} id The existing session ID to use.
* @return {!webdriver.Builder} This Builder instance for chain calling.
*/
webdriver.Builder.prototype.usingSession = function(id) {
this.sessionId_ = id;
return this;
};
/**
* @return {string} The ID of the session, if any, this builder is configured
* to reuse.
*/
webdriver.Builder.prototype.getSession = function() {
return this.sessionId_;
};
/**
* Sets the desired capabilities when requesting a new session. This will
* overwrite any previously set desired capabilities.
* @param {!(Object|webdriver.Capabilities)} capabilities The desired
* capabilities for a new session.
* @return {!webdriver.Builder} This Builder instance for chain calling.
*/
webdriver.Builder.prototype.withCapabilities = function(capabilities) {
this.capabilities_ = new webdriver.Capabilities(capabilities);
return this;
};
/**
* Builds a new {@link webdriver.WebDriver} instance using this builder's
* current configuration.
* @return {!webdriver.WebDriver} A new WebDriver client.
*/
webdriver.Builder.prototype.build = function() {
if (goog.userAgent.GECKO && document.readyState != 'complete') {
throw Error('Cannot create driver instance before window.onload');
}
var executor;
if (webdriver.FirefoxDomExecutor.isAvailable()) {
executor = new webdriver.FirefoxDomExecutor();
return webdriver.WebDriver.createSession(executor, this.capabilities_);
} else {
var url = this.serverUrl_;
var client;
if (url[0] == '/') {
var origin = window.location.origin ||
(window.location.protocol + '//' + window.location.host);
client = new webdriver.http.XhrClient(origin + url);
} else {
client = new webdriver.http.CorsClient(url);
}
executor = new webdriver.http.Executor(client);
if (this.sessionId_) {
return webdriver.WebDriver.attachToSession(executor, this.sessionId_);
} else {
throw new Error('Unable to create a new client for this browser. The ' +
'WebDriver session ID has not been defined.');
}
}
};
| 1 | 11,962 | Please hide this behind a flag and add a setter function to the builder. | SeleniumHQ-selenium | js |
@@ -339,6 +339,9 @@ class EC2VPC(GenericBaseModel):
)
for rt in resp["RouteTables"]:
for assoc in rt.get("Associations", []):
+ # skipping Main association (accommodating recent upstream change)
+ if assoc.get("Main"):
+ continue
ec2_client.disassociate_route_table(
AssociationId=assoc["RouteTableAssociationId"]
) | 1 | from moto.ec2.utils import generate_route_id
from localstack.services.cloudformation.deployment_utils import generate_default_name
from localstack.services.cloudformation.service_models import REF_ID_ATTRS, GenericBaseModel
from localstack.utils.aws import aws_stack
class EC2RouteTable(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::RouteTable"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
tags_filters = map(
lambda tag: {"Name": f"tag:{tag.get('Key')}", "Values": [tag.get("Value")]},
self.props.get("Tags") or [],
)
filters = [
{"Name": "vpc-id", "Values": [self.props["VpcId"]]},
{"Name": "association.main", "Values": ["false"]},
]
filters.extend(tags_filters)
route_tables = client.describe_route_tables(Filters=filters)["RouteTables"]
return (route_tables or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.physical_resource_id or self.props.get("RouteTableId")
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_route_table",
"parameters": {
"VpcId": "VpcId",
"TagSpecifications": lambda params, **kwargs: [
{"ResourceType": "route-table", "Tags": params.get("Tags")}
],
},
},
"delete": {
"function": "delete_route_table",
"parameters": {"RouteTableId": "RouteTableId"},
},
}
class EC2Route(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::Route"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
props = self.props
dst_cidr = self.resolve_refs_recursively(
stack_name, props.get("DestinationCidrBlock"), resources
)
dst_cidr6 = self.resolve_refs_recursively(
stack_name, props.get("DestinationIpv6CidrBlock"), resources
)
table_id = self.resolve_refs_recursively(stack_name, props.get("RouteTableId"), resources)
route_tables = client.describe_route_tables()["RouteTables"]
route_table = ([t for t in route_tables if t["RouteTableId"] == table_id] or [None])[0]
if route_table:
routes = route_table.get("Routes", [])
route = [
r
for r in routes
if r.get("DestinationCidrBlock") == (dst_cidr or "_not_set_")
or r.get("DestinationIpv6CidrBlock") == (dst_cidr6 or "_not_set_")
]
return (route or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
props = self.props
return generate_route_id(
props.get("RouteTableId"),
props.get("DestinationCidrBlock"),
props.get("DestinationIpv6CidrBlock"),
)
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_route",
"parameters": {
"DestinationCidrBlock": "DestinationCidrBlock",
"DestinationIpv6CidrBlock": "DestinationIpv6CidrBlock",
"RouteTableId": "RouteTableId",
},
},
"delete": {
"function": "delete_route",
"parameters": {
"DestinationCidrBlock": "DestinationCidrBlock",
"DestinationIpv6CidrBlock": "DestinationIpv6CidrBlock",
"RouteTableId": "RouteTableId",
},
},
}
class EC2InternetGateway(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::InternetGateway"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
gateways = client.describe_internet_gateways()["InternetGateways"]
tags = self.props.get("Tags")
gateway = [g for g in gateways if (g.get("Tags") or []) == (tags or [])]
return (gateway or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get("InternetGatewayId")
@staticmethod
def get_deploy_templates():
def _create_params(params, **kwargs):
return {
"TagSpecifications": [
{"ResourceType": "internet-gateway", "Tags": params.get("Tags", [])}
]
}
return {
"create": {
"function": "create_internet_gateway",
"parameters": _create_params,
}
}
class EC2SubnetRouteTableAssociation(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::SubnetRouteTableAssociation"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
props = self.props
table_id = self.resolve_refs_recursively(stack_name, props.get("RouteTableId"), resources)
gw_id = self.resolve_refs_recursively(stack_name, props.get("GatewayId"), resources)
route_tables = client.describe_route_tables()["RouteTables"]
route_table = ([t for t in route_tables if t["RouteTableId"] == table_id] or [None])[0]
subnet_id = self.resolve_refs_recursively(stack_name, props.get("SubnetId"), resources)
if route_table:
associations = route_table.get("Associations", [])
association = [a for a in associations if a.get("GatewayId") == gw_id]
if subnet_id:
association = [a for a in associations if a.get("SubnetId") == subnet_id]
return (association or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get("RouteTableAssociationId")
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "associate_route_table",
"parameters": {
"GatewayId": "GatewayId",
"RouteTableId": "RouteTableId",
"SubnetId": "SubnetId",
},
},
"delete": {
"function": "disassociate_route_table",
"parameters": {"AssociationId": "RouteTableAssociationId"},
},
}
class EC2VPCGatewayAttachment(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::VPCGatewayAttachment"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
props = self.props
igw_id = self.resolve_refs_recursively(
stack_name, props.get("InternetGatewayId"), resources
)
vpngw_id = self.resolve_refs_recursively(stack_name, props.get("VpnGatewayId"), resources)
gateways = []
if igw_id:
gateways = client.describe_internet_gateways()["InternetGateways"]
gateways = [g for g in gateways if g["InternetGatewayId"] == igw_id]
elif vpngw_id:
gateways = client.describe_vpn_gateways()["VpnGateways"]
gateways = [g for g in gateways if g["VpnGatewayId"] == vpngw_id]
gateway = (gateways or [{}])[0]
attachments = gateway.get("Attachments") or gateway.get("VpcAttachments") or []
result = [a for a in attachments if a.get("State") in ("attached", "available")]
if result:
return gateway
def get_physical_resource_id(self, attribute=None, **kwargs):
props = self.props
gw_id = props.get("VpnGatewayId") or props.get("InternetGatewayId")
attachment = (props.get("Attachments") or props.get("VpcAttachments") or [{}])[0]
if attachment:
result = "%s-%s" % (gw_id, attachment.get("VpcId"))
return result
@classmethod
def get_deploy_templates(cls):
def _attach_gateway(resource_id, resources, *args, **kwargs):
client = aws_stack.connect_to_service("ec2")
resource = cls(resources[resource_id])
props = resource.props
igw_id = props.get("InternetGatewayId")
vpngw_id = props.get("VpnGatewayId")
vpc_id = props.get("VpcId")
if igw_id:
client.attach_internet_gateway(VpcId=vpc_id, InternetGatewayId=igw_id)
elif vpngw_id:
client.attach_vpn_gateway(VpcId=vpc_id, VpnGatewayId=vpngw_id)
return {"create": {"function": _attach_gateway}}
class SecurityGroup(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::SecurityGroup"
def fetch_state(self, stack_name, resources):
props = self.props
group_id = props.get("GroupId")
group_name = props.get("GroupName")
client = aws_stack.connect_to_service("ec2")
if group_id:
resp = client.describe_security_groups(GroupIds=[group_id])
else:
resp = client.describe_security_groups(GroupNames=[group_name])
return (resp["SecurityGroups"] or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
if self.physical_resource_id:
return self.physical_resource_id
if attribute in REF_ID_ATTRS:
props = self.props
return props.get("GroupId") or props.get("GroupName")
@staticmethod
def add_defaults(resource, stack_name: str):
role_name = resource.get("Properties", {}).get("GroupName")
if not role_name:
resource["Properties"]["GroupName"] = generate_default_name(
stack_name, resource["LogicalResourceId"]
)
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_security_group",
"parameters": {
"GroupName": "GroupName",
"VpcId": "VpcId",
"Description": "GroupDescription",
},
},
"delete": {
"function": "delete_security_group",
"parameters": {"GroupId": "PhysicalResourceId"},
},
}
class EC2Subnet(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::Subnet"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
props = self.props
filters = [
{"Name": "cidr-block", "Values": [props["CidrBlock"]]},
{"Name": "vpc-id", "Values": [props["VpcId"]]},
]
subnets = client.describe_subnets(Filters=filters)["Subnets"]
return (subnets or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get("SubnetId")
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_subnet",
"parameters": {
"VpcId": "VpcId",
"CidrBlock": "CidrBlock",
"OutpostArn": "OutpostArn",
"Ipv6CidrBlock": "Ipv6CidrBlock",
"AvailabilityZone": "AvailabilityZone"
# TODO: add TagSpecifications
},
},
"delete": {
"function": "delete_subnet",
"parameters": {"SubnetId": "PhysicalResourceId"},
},
}
class EC2VPC(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::VPC"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
resp = client.describe_vpcs(Filters=[{"Name": "cidr", "Values": [self.props["CidrBlock"]]}])
return (resp["Vpcs"] or [None])[0]
@classmethod
def get_deploy_templates(cls):
def _pre_delete(resource_id, resources, resource_type, func, stack_name):
res = cls(resources[resource_id])
vpc_id = res.state.get("VpcId")
if vpc_id:
ec2_client = aws_stack.connect_to_service("ec2")
resp = ec2_client.describe_route_tables(
Filters=[
{"Name": "vpc-id", "Values": [vpc_id]},
{"Name": "association.main", "Values": ["false"]},
]
)
for rt in resp["RouteTables"]:
for assoc in rt.get("Associations", []):
ec2_client.disassociate_route_table(
AssociationId=assoc["RouteTableAssociationId"]
)
ec2_client.delete_route_table(RouteTableId=rt["RouteTableId"])
return {
"create": {
"function": "create_vpc",
"parameters": {
"CidrBlock": "CidrBlock",
"InstanceTenancy": "InstanceTenancy"
# TODO: add TagSpecifications
},
},
"delete": [
{"function": _pre_delete},
{
"function": "delete_vpc",
"parameters": {"VpcId": "PhysicalResourceId"},
},
],
}
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.physical_resource_id or self.props.get("VpcId")
class EC2NatGateway(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::NatGateway"
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service("ec2")
props = self.props
subnet_id = self.resolve_refs_recursively(stack_name, props.get("SubnetId"), resources)
assoc_id = self.resolve_refs_recursively(stack_name, props.get("AllocationId"), resources)
result = client.describe_nat_gateways(
Filters=[{"Name": "subnet-id", "Values": [subnet_id]}]
)
result = result["NatGateways"]
result = [
gw
for gw in result
if assoc_id in [ga["AllocationId"] for ga in gw["NatGatewayAddresses"]]
]
return (result or [None])[0]
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_nat_gateway",
"parameters": {
"SubnetId": "SubnetId",
"AllocationId": "AllocationId"
# TODO: add TagSpecifications
},
},
"delete": {
"function": "delete_nat_gateway",
"parameters": {"NatGatewayId": "PhysicalResourceId"},
},
}
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.physical_resource_id or self.props.get("NatGatewayId")
class EC2Instance(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::EC2::Instance"
def fetch_state(self, stack_name, resources):
instance_id = self.get_physical_resource_id()
if not instance_id:
return
return self._get_state()
def update_resource(self, new_resource, stack_name, resources):
instance_id = self.get_physical_resource_id()
props = new_resource["Properties"]
groups = props.get("SecurityGroups", props.get("SecurityGroupIds"))
client = aws_stack.connect_to_service("ec2")
kwargs = {}
if groups:
kwargs["Groups"] = groups
client.modify_instance_attribute(
InstanceId=instance_id,
InstanceType={"Value": props["InstanceType"]},
**kwargs,
)
return self._get_state(client)
def _get_state(self, client=None):
instance_id = self.get_physical_resource_id()
client = client or aws_stack.connect_to_service("ec2")
resp = client.describe_instances(InstanceIds=[instance_id])
reservation = (resp.get("Reservations") or [{}])[0]
result = (reservation.get("Instances") or [None])[0]
return result
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.physical_resource_id or self.props.get("InstanceId")
def get_cfn_attribute(self, attribute_name):
if attribute_name in REF_ID_ATTRS:
return self.props.get("InstanceId")
if attribute_name == "PublicIp":
return self.props.get("PublicIpAddress") or "127.0.0.1"
if attribute_name == "PublicDnsName":
return self.props.get("PublicDnsName")
if attribute_name == "AvailabilityZone":
return (
self.props.get("Placement", {}).get("AvailabilityZone")
or f"{aws_stack.get_region()}a"
)
return super(EC2Instance, self).get_cfn_attribute(attribute_name)
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_instances",
"parameters": {
"InstanceType": "InstanceType",
"SecurityGroups": "SecurityGroups",
"KeyName": "KeyName",
"ImageId": "ImageId",
},
"defaults": {"MinCount": 1, "MaxCount": 1},
},
"delete": {
"function": "terminate_instances",
"parameters": {
"InstanceIds": lambda params, **kw: [
kw["resources"][kw["resource_id"]]["PhysicalResourceId"]
]
},
},
}
| 1 | 14,229 | Just a side-node, no need to change it here: The term "recent" might be confusing at a later point in time. | localstack-localstack | py |
@@ -36,7 +36,9 @@ module.exports = {
this.assertEqual(val1 && val1.getTime(), val2.getTime(), errorMessage, depth + 1);
}
else if (type === 'object') {
- for (const key of Object.keys(val1)) {
+ var keys = val1.keys !== undefined ? val1.keys() : Object.keys(val1);
+
+ for (const key of keys) {
const message = errorMessage ? `${errorMessage}: ${key}` : key;
this.assertEqual(val1[key], val2[key], message, depth + 1);
} | 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2016 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
'use strict';
module.exports = {
assertSimilar: function(type, val1, val2, errorMessage, depth) {
depth = depth || 0;
this.assertDefined(type, depth + 1);
type = type.replace('?', '');
if (val2 === null) {
this.assertNull(val1, errorMessage, depth + 1);
}
else if (type === 'float' || type === 'double') {
this.assertEqualWithTolerance(val1, val2, 0.000001, errorMessage, depth + 1);
}
else if (type === 'data') {
this.assertArraysEqual(new Uint8Array(val1), val2, errorMessage, depth + 1);
}
else if (type === 'date') {
this.assertEqual(val1 && val1.getTime(), val2.getTime(), errorMessage, depth + 1);
}
else if (type === 'object') {
for (const key of Object.keys(val1)) {
const message = errorMessage ? `${errorMessage}: ${key}` : key;
this.assertEqual(val1[key], val2[key], message, depth + 1);
}
}
else if (type === 'list') {
this.assertArraysEqual(val1, val2, errorMessage, depth + 1);
}
else {
this.assertEqual(val1, val2, errorMessage, depth + 1);
}
},
assertEqual: function(val1, val2, errorMessage, depth) {
if (val1 !== val2) {
let message = `'${val1}' does not equal expected value '${val2}'`;
if (errorMessage) {
message = `${errorMessage} - ${message}`;
}
throw new TestFailureError(message, depth);
}
},
assertNotEqual: function(val1, val2, errorMessage, depth) {
if (val1 === val2) {
let message = `'${val1}' equals '${val2}'`;
if (errorMessage) {
message = `${errorMessage} - ${message}`;
}
throw new TestFailureError(message, depth);
}
},
assertEqualWithTolerance: function(val1, val2, tolerance, errorMessage, depth) {
if (val1 < val2 - tolerance || val1 > val2 + tolerance) {
let message = `'${val1}' does not equal '${val2}' with tolerance '${tolerance}'`;
if (errorMessage) {
message = `${errorMessage} - ${message}`;
}
throw new TestFailureError(message, depth);
}
},
assertArray: function(value, length, errorMessage, depth) {
if (!Array.isArray(value)) {
throw new TestFailureError(errorMessage || `Value ${value} is not an array`, depth);
}
},
assertArrayLength: function(value, length, errorMessage, depth) {
this.assertArray(value, 1 + depth || 0);
if (value.length !== length) {
throw new TestFailureError(errorMessage || `Value ${value} is not an array of length ${length}`, depth);
}
},
assertArraysEqual: function(val1, val2, errorMessage, depth) {
this.assertDefined(val1, `val1 should be non-null but is ${val1}`, 1 + (depth || 0));
this.assertDefined(val2, `val2 should be non-null but is ${val2}`, 1 + (depth || 0));
const len1 = val1.length;
const len2 = val2.length;
if (len1 !== len2) {
let message = `Arrays (${val1}) and (${val2}) have different lengths (${len1} != ${len2})`;
if (errorMessage) {
message = `${errorMessage} - ${message}`;
}
throw new TestFailureError(message, depth);
}
let compare;
if (val1.type === "data") {
compare = (i, a, b) => a === b || this.assertArraysEqual(new Uint8Array(a), b, `Data elements at index ${i}`, 1) || true;
}
else if (val1.type === "date") {
compare = (i, a, b) => (a && a.getTime()) === (b && b.getTime());
}
else if (val1.type === "float" || val1.type === "double") {
compare = (i, a, b) => a >= b - 0.000001 && a <= b + 0.000001;
}
else if (val1.type === 'object') {
compare = (i, a, b) => Object.keys(a).every(key => a[key] === b[key]);
}
else {
compare = (i, a, b) => a === b;
}
for (let i = 0; i < len1; i++) {
if (!compare(i, val1[i], val2[i])) {
let message = `Array contents not equal at index ${i} (${val1[i]} != ${val2[i]})`;
if (errorMessage) {
message = `${errorMessage} - ${message}`;
}
throw new TestFailureError(message, depth);
}
}
},
assertThrows: function(func, errorMessage, depth) {
let caught = false;
try {
func();
}
catch (e) {
caught = true;
}
if (!caught) {
throw new TestFailureError(errorMessage || 'Expected exception not thrown', depth);
}
},
assertThrowsException: function(func, expectedException) {
let caught = false;
try {
func();
}
catch (e) {
caught = true;
if (e.name !== expectedException.name) {
throw new TestFailureError(`Expected a ${expectedException.name} exception but caught a ${e.name} instead. Message was: ${e.message}`);
}
if (e.message != expectedException.message) {
throw new TestFailureError(`Expected exception "${expectedException}" not thrown - instead caught: "${e}"`);
}
}
if (!caught) {
throw new TestFailureError('Expected exception not thrown');
}
},
assertThrowsContaining: function(func, expectedMessage, depth) {
let caught = false;
try {
func();
}
catch (e) {
caught = true;
if (!e.message.includes(expectedMessage)) {
throw new TestFailureError(`Expected exception "${expectedMessage}" not thrown - instead caught: "${e}"`, depth);
}
}
if (!caught) {
throw new TestFailureError(`Expected exception "${expectedMessage}" not thrown`, depth);
}
},
assertTrue: function(condition, errorMessage, depth) {
if (!condition) {
throw new TestFailureError(errorMessage || `Condition ${condition} expected to be true`, depth);
}
},
assertFalse: function(condition, errorMessage, depth) {
if (condition) {
throw new TestFailureError(errorMessage || `Condition ${condition} expected to be false`, depth);
}
},
assertInstanceOf: function(object, type, errorMessage, depth) {
if (!(object instanceof type)) {
throw new TestFailureError(errorMessage || `Object ${object} expected to be of type ${type}`, depth);
}
},
assertType: function(value, type, depth) {
try {
this.assertEqual(typeof value, type, "", 1 + depth || 0);
}
catch (e) {
throw new Error(`Value ${value} expected to be of type ${type}`)
}
},
assertDefined: function(value, errorMessage, depth) {
if (value === undefined || value === null) {
throw new TestFailureError(errorMessage || `Value ${value} expected to be non-null`, depth);
}
},
assertUndefined: function(value, errorMessage, depth) {
if (value !== undefined) {
throw new TestFailureError(errorMessage || `Value ${value} expected to be undefined`, depth);
}
},
assertNull: function(value, errorMessage, depth) {
if (value !== null) {
throw new TestFailureError(errorMessage || `Value ${value} expected to be null`, depth);
}
},
isNode: function() {
// eslint-disable-next-line no-undef
return typeof process == 'object' && Object.prototype.toString.call(process) == '[object process]';
},
isNode6: function() {
// eslint-disable-next-line no-undef
return this.isNode() && process.version.indexOf('v6.') == 0;
},
};
function TestFailureError(message, depth) {
let error;
try {
throw new Error(message);
} catch (e) {
error = e;
}
depth = 2 + (depth || 0);
// This regular expression will match stack trace lines provided by JavaScriptCore.
// Example: someMethod@file:///path/to/file.js:10:24
const regex = /^(?:.*?@)?([^[(].+?):(\d+)(?::(\d+))?\s*$/;
// Remove the top two stack frames and use information from the third, if possible.
const stack = error.stack && error.stack.split('\n');
const match = stack[depth] && stack[depth].match(regex);
if (match) {
this.sourceURL = match[1];
this.line = +match[2];
this.column = +match[3];
this.stack = stack.slice(depth).join('\n');
}
this.__proto__ = error;
}
| 1 | 18,319 | When can it be undefined? Wouldn't that be a bug? | realm-realm-js | js |
@@ -19,6 +19,8 @@ package org.openqa.grid.internal;
import com.google.common.base.Predicate;
+import com.sun.org.glassfish.gmbal.ManagedObject;
+
import net.jcip.annotations.ThreadSafe;
import org.openqa.grid.internal.listeners.Prioritizer; | 1 | /*
Copyright 2011 Selenium committers
Copyright 2011 Software Freedom Conservancy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.grid.internal;
import com.google.common.base.Predicate;
import net.jcip.annotations.ThreadSafe;
import org.openqa.grid.internal.listeners.Prioritizer;
import org.openqa.grid.internal.listeners.RegistrationListener;
import org.openqa.grid.internal.listeners.SelfHealingProxy;
import org.openqa.grid.internal.utils.CapabilityMatcher;
import org.openqa.grid.internal.utils.GridHubConfiguration;
import org.openqa.grid.web.Hub;
import org.openqa.grid.web.servlet.handler.RequestHandler;
import org.openqa.selenium.remote.DesiredCapabilities;
import org.openqa.selenium.remote.internal.HttpClientFactory;
import org.openqa.selenium.remote.server.log.LoggingManager;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Kernel of the grid. Keeps track of what's happening, what's free/used and assigned resources to
* incoming requests.
*/
@ThreadSafe
public class Registry {
public static final String KEY = Registry.class.getName();
private static final Logger log = Logger.getLogger(Registry.class.getName());
// lock for anything modifying the tests session currently running on this
// registry.
private final ReentrantLock lock = new ReentrantLock();
private final Condition testSessionAvailable = lock.newCondition();
private final ProxySet proxies;
private final ActiveTestSessions activeTestSessions = new ActiveTestSessions();
private final GridHubConfiguration configuration;
private final HttpClientFactory httpClientFactory;
private final NewSessionRequestQueue newSessionQueue;
private final Matcher matcherThread = new Matcher();
private final List<RemoteProxy> registeringProxies = new CopyOnWriteArrayList<RemoteProxy>();
private final CapabilityMatcher capabilityMatcher;
private volatile boolean stop = false;
// The following three variables need to be volatile because we expose a public setters
private volatile int newSessionWaitTimeout;
private volatile Prioritizer prioritizer;
private volatile Hub hub;
private Registry(Hub hub, GridHubConfiguration config) {
this.hub = hub;
this.capabilityMatcher = config.getCapabilityMatcher();
this.newSessionWaitTimeout = config.getNewSessionWaitTimeout();
this.prioritizer = config.getPrioritizer();
this.newSessionQueue = new NewSessionRequestQueue();
this.configuration = config;
this.httpClientFactory = new HttpClientFactory();
proxies = new ProxySet(config.isThrowOnCapabilityNotPresent());
this.matcherThread.setUncaughtExceptionHandler(new UncaughtExceptionHandler());
}
@SuppressWarnings({"NullableProblems"})
public static Registry newInstance() {
return newInstance(null, new GridHubConfiguration());
}
public static Registry newInstance(Hub hub, GridHubConfiguration config) {
Registry registry = new Registry(hub, config);
registry.matcherThread.start();
// freynaud : TODO
// Registry is in a valid state when testSessionAvailable.await(); from
// assignRequestToProxy is reached. No before.
try {
Thread.sleep(250);
} catch (InterruptedException e) {
e.printStackTrace();
}
return registry;
}
public GridHubConfiguration getConfiguration() {
return configuration;
}
/**
* How long a session can remain in the newSession queue before being evicted.
*
* @return the new session wait timeout
*/
public int getNewSessionWaitTimeout() {
return newSessionWaitTimeout;
}
public void setNewSessionWaitTimeout(int newSessionWaitTimeout) {
this.newSessionWaitTimeout = newSessionWaitTimeout;
}
/**
* Ends this test session for the hub, releasing the resources in the hub / registry. It does not
* release anything on the remote. The resources are released in a separate thread, so the call
* returns immediately. It allows release with long duration not to block the test while the hub is
* releasing the resource.
*
* @param session The session to terminate
* @param reason the reason for termination
*/
public void terminate(final TestSession session, final SessionTerminationReason reason) {
new Thread(new Runnable() { // Thread safety reviewed
public void run() {
_release(session.getSlot(), reason);
}
}).start();
}
/**
* Release the test slot. Free the resource on the slot itself and the registry. If also invokes
* the {@link org.openqa.grid.internal.listeners.TestSessionListener#afterSession(TestSession)} if
* applicable.
*
* @param testSlot The slot to release
*/
private void _release(TestSlot testSlot, SessionTerminationReason reason) {
if (!testSlot.startReleaseProcess()) {
return;
}
if (!testSlot.performAfterSessionEvent()) {
return;
}
final String internalKey = testSlot.getInternalKey();
try {
lock.lock();
testSlot.finishReleaseProcess();
release(internalKey, reason);
} finally {
lock.unlock();
}
}
void terminateSynchronousFOR_TEST_ONLY(TestSession testSession) {
_release(testSession.getSlot(), SessionTerminationReason.CLIENT_STOPPED_SESSION);
}
public void removeIfPresent(RemoteProxy proxy) {
// Find the original proxy. While the supplied one is logically equivalent, it may be a fresh object with
// an empty TestSlot list, which doesn't figure into the proxy equivalence check. Since we want to free up
// those test sessions, we need to operate on that original object.
if (proxies.contains(proxy)) {
log.warning(String.format(
"Proxy '%s' was previously registered. Cleaning up any stale test sessions.", proxy));
final RemoteProxy p = proxies.remove(proxy);
for (TestSlot slot : p.getTestSlots()) {
forceRelease(slot, SessionTerminationReason.PROXY_REREGISTRATION);
}
p.teardown();
}
}
/**
* Releases the test slot, WITHOUT running any listener.
*/
public void forceRelease(TestSlot testSlot, SessionTerminationReason reason) {
if (testSlot.getSession() == null) {
return;
}
String internalKey = testSlot.getInternalKey();
release(internalKey, reason);
testSlot.doFinishRelease();
}
/**
* iterates the queue of incoming new session request and assign them to proxy after they've been
* sorted by priority, with priority defined by the prioritizer.
*/
class Matcher extends Thread { // Thread safety reviewed
Matcher() {
super("Matcher thread");
}
@Override
public void run() {
try {
lock.lock();
assignRequestToProxy();
} finally {
lock.unlock();
}
}
}
public void stop() {
stop = true;
matcherThread.interrupt();
newSessionQueue.stop();
proxies.teardown();
httpClientFactory.close();
}
public Hub getHub() {
return hub;
}
@SuppressWarnings({"UnusedDeclaration"})
public void setHub(Hub hub) {
this.hub = hub;
}
public void addNewSessionRequest(RequestHandler handler) {
try {
lock.lock();
proxies.verifyAbilityToHandleDesiredCapabilities(handler.getRequest().getDesiredCapabilities());
newSessionQueue.add(handler);
fireMatcherStateChanged();
} finally {
lock.unlock();
}
}
/**
* iterates the list of incoming session request to find a potential match in the list of proxies.
* If something changes in the registry, the matcher iteration is stopped to account for that
* change.
*/
private void assignRequestToProxy() {
while (!stop) {
try {
testSessionAvailable.await(5, TimeUnit.SECONDS);
newSessionQueue.processQueue(new Predicate<RequestHandler>() {
public boolean apply(RequestHandler input) {
return takeRequestHandler(input);
}
}, prioritizer);
// Just make sure we delete anything that is logged on this thread from memory
LoggingManager.perSessionLogHandler().clearThreadTempLogs();
} catch (InterruptedException e) {
log.info("Shutting down registry.");
} catch (Throwable t) {
log.log(Level.SEVERE, "Unhandled exception in Matcher thread.", t);
}
}
}
private boolean takeRequestHandler(RequestHandler handler) {
final TestSession session = proxies.getNewSession(handler.getRequest().getDesiredCapabilities());
final boolean sessionCreated = session != null;
if (sessionCreated) {
activeTestSessions.add(session);
handler.bindSession(session);
}
return sessionCreated;
}
/**
* mark the session as finished for the registry. The resources that were associated to it are now
* free to be reserved by other tests
*
* @param session The session
* @param reason the reason for the release
*/
private void release(TestSession session, SessionTerminationReason reason) {
try {
lock.lock();
boolean removed = activeTestSessions.remove(session, reason);
if (removed) {
fireMatcherStateChanged();
}
} finally {
lock.unlock();
}
}
private void release(String internalKey, SessionTerminationReason reason) {
if (internalKey == null) {
return;
}
final TestSession session1 = activeTestSessions.findSessionByInternalKey(internalKey);
if (session1 != null) {
release(session1, reason);
return;
}
log.warning("Tried to release session with internal key " + internalKey +
" but couldn't find it.");
}
/**
* Add a proxy to the list of proxy available for the grid to managed and link the proxy to the
* registry.
*
* @param proxy The proxy to add
*/
public void add(RemoteProxy proxy) {
if (proxy == null) {
return;
}
log.fine("adding " + proxy);
try {
lock.lock();
removeIfPresent(proxy);
if (registeringProxies.contains(proxy)) {
log.warning(String.format("Proxy '%s' is already queued for registration.", proxy));
return;
}
registeringProxies.add(proxy);
fireMatcherStateChanged();
} finally {
lock.unlock();
}
boolean listenerOk = true;
try {
if (proxy instanceof RegistrationListener) {
((RegistrationListener) proxy).beforeRegistration();
}
} catch (Throwable t) {
log.severe("Error running the registration listener on " + proxy + ", " + t.getMessage());
t.printStackTrace();
listenerOk = false;
}
try {
lock.lock();
registeringProxies.remove(proxy);
if (listenerOk) {
if (proxy instanceof SelfHealingProxy) {
((SelfHealingProxy) proxy).startPolling();
}
proxies.add(proxy);
fireMatcherStateChanged();
}
} finally {
lock.unlock();
}
}
/**
* If throwOnCapabilityNotPresent is set to true, the hub will reject test request for a
* capability that is not on the grid. No exception will be thrown if the capability is present
* but busy. <p/> If set to false, the test will be queued hoping a new proxy will register later
* offering that capability.
*
* @param throwOnCapabilityNotPresent true to throw if capability not present
*/
public void setThrowOnCapabilityNotPresent(boolean throwOnCapabilityNotPresent) {
proxies.setThrowOnCapabilityNotPresent(throwOnCapabilityNotPresent);
}
private void fireMatcherStateChanged() {
testSessionAvailable.signalAll();
}
public ProxySet getAllProxies() {
return proxies;
}
public List<RemoteProxy> getUsedProxies() {
return proxies.getBusyProxies();
}
/**
* gets the test session associated to this external key. The external key is the session used by
* webdriver.
*
* @param externalKey the external session key
* @return null if the hub doesn't have a node associated to the provided externalKey
*/
public TestSession getSession(ExternalSessionKey externalKey) {
return activeTestSessions.findSessionByExternalKey(externalKey);
}
/**
* gets the test existing session associated to this external key. The external key is the session
* used by webdriver.
*
* This method will log complaints and reasons if the key cannot be found
*
* @param externalKey the external session key
* @return null if the hub doesn't have a node associated to the provided externalKey
*/
public TestSession getExistingSession(ExternalSessionKey externalKey) {
return activeTestSessions.getExistingSession(externalKey);
}
/*
* May race.
*/
public int getNewSessionRequestCount() {
return newSessionQueue.getNewSessionRequestCount();
}
public void clearNewSessionRequests() {
newSessionQueue.clearNewSessionRequests();
}
public boolean removeNewSessionRequest(RequestHandler request) {
return newSessionQueue.removeNewSessionRequest(request);
}
public Iterable<DesiredCapabilities> getDesiredCapabilities() {
return newSessionQueue.getDesiredCapabilities();
}
public Set<TestSession> getActiveSessions() {
return activeTestSessions.unmodifiableSet();
}
public void setPrioritizer(Prioritizer prioritizer) {
this.prioritizer = prioritizer;
}
public Prioritizer getPrioritizer() {
return prioritizer;
}
public RemoteProxy getProxyById(String id) {
return proxies.getProxyById(id);
}
HttpClientFactory getHttpClientFactory() {
return httpClientFactory;
}
private static class UncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {
public void uncaughtException(Thread t, Throwable e) {
log.log(Level.SEVERE, "Matcher thread dying due to unhandled exception.", e);
}
}
public CapabilityMatcher getCapabilityMatcher() {
return capabilityMatcher;
}
}
| 1 | 11,529 | JMX offers normal APIs for this. I don't think you want the glassfish one. | SeleniumHQ-selenium | py |
@@ -430,7 +430,7 @@ class KoalasBoxPlot(BoxPlot):
).alias("{}_{}%".format(colname, int(q * 100)))
for q in [0.25, 0.50, 0.75]
],
- F.mean(colname).alias("{}_mean".format(colname))
+ F.mean(colname).alias("{}_mean".format(colname)),
).toPandas()
# Computes IQR and Tukey's fences | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import matplotlib
import numpy as np
import pandas as pd
from matplotlib.axes._base import _process_plot_format
from pandas.core.dtypes.inference import is_integer, is_list_like
from pandas.io.formats.printing import pprint_thing
from pandas.core.base import PandasObject
from pyspark.ml.feature import Bucketizer
from pyspark.mllib.stat import KernelDensity
from pyspark.sql import functions as F
from databricks.koalas.missing import unsupported_function
from databricks.koalas.config import get_option
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
from pandas.plotting._core import (
_all_kinds,
BarPlot,
BoxPlot,
HistPlot,
MPLPlot,
PiePlot,
AreaPlot,
LinePlot,
BarhPlot,
ScatterPlot,
KdePlot,
)
else:
from pandas.plotting._core import PlotAccessor
from pandas.plotting._matplotlib import (
BarPlot,
BoxPlot,
HistPlot,
PiePlot,
AreaPlot,
LinePlot,
BarhPlot,
ScatterPlot,
KdePlot,
)
from pandas.plotting._matplotlib.core import MPLPlot
_all_kinds = PlotAccessor._all_kinds
class TopNPlot:
def get_top_n(self, data):
from databricks.koalas import DataFrame, Series
max_rows = get_option("plotting.max_rows")
# Simply use the first 1k elements and make it into a pandas dataframe
# For categorical variables, it is likely called from df.x.value_counts().plot.xxx().
if isinstance(data, (Series, DataFrame)):
data = data.head(max_rows + 1).to_pandas()
else:
raise ValueError("Only DataFrame and Series are supported for plotting.")
self.partial = False
if len(data) > max_rows:
self.partial = True
data = data.iloc[:max_rows]
return data
def set_result_text(self, ax):
max_rows = get_option("plotting.max_rows")
assert hasattr(self, "partial")
if self.partial:
ax.text(
1,
1,
"showing top {} elements only".format(max_rows),
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
class SampledPlot:
def get_sampled(self, data):
from databricks.koalas import DataFrame, Series
fraction = get_option("plotting.sample_ratio")
if fraction is None:
fraction = 1 / (len(data) / get_option("plotting.max_rows"))
fraction = min(1.0, fraction)
self.fraction = fraction
if isinstance(data, (DataFrame, Series)):
if isinstance(data, Series):
data = data.to_frame()
sampled = data._internal.resolved_copy.spark_frame.sample(fraction=self.fraction)
return DataFrame(data._internal.with_new_sdf(sampled)).to_pandas()
else:
raise ValueError("Only DataFrame and Series are supported for plotting.")
def set_result_text(self, ax):
assert hasattr(self, "fraction")
if self.fraction < 1:
ax.text(
1,
1,
"showing the sampled result by fraction %s" % self.fraction,
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
class KoalasBarPlot(BarPlot, TopNPlot):
def __init__(self, data, **kwargs):
super(KoalasBarPlot, self).__init__(self.get_top_n(data), **kwargs)
def _plot(self, ax, x, y, w, start=0, log=False, **kwds):
self.set_result_text(ax)
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
class KoalasBoxPlot(BoxPlot):
def boxplot(
self,
ax,
bxpstats,
notch=None,
sym=None,
vert=None,
whis=None,
positions=None,
widths=None,
patch_artist=None,
bootstrap=None,
usermedians=None,
conf_intervals=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
boxprops=None,
labels=None,
flierprops=None,
medianprops=None,
meanprops=None,
capprops=None,
whiskerprops=None,
manage_xticks=True,
autorange=False,
zorder=None,
precision=None,
):
def update_dict(dictionary, rc_name, properties):
""" Loads properties in the dictionary from rc file if not already
in the dictionary"""
rc_str = "boxplot.{0}.{1}"
if dictionary is None:
dictionary = dict()
for prop_dict in properties:
dictionary.setdefault(
prop_dict, matplotlib.rcParams[rc_str.format(rc_name, prop_dict)]
)
return dictionary
# Common property dictionaries loading from rc
flier_props = [
"color",
"marker",
"markerfacecolor",
"markeredgecolor",
"markersize",
"linestyle",
"linewidth",
]
default_props = ["color", "linewidth", "linestyle"]
boxprops = update_dict(boxprops, "boxprops", default_props)
whiskerprops = update_dict(whiskerprops, "whiskerprops", default_props)
capprops = update_dict(capprops, "capprops", default_props)
medianprops = update_dict(medianprops, "medianprops", default_props)
meanprops = update_dict(meanprops, "meanprops", default_props)
flierprops = update_dict(flierprops, "flierprops", flier_props)
if patch_artist:
boxprops["linestyle"] = "solid"
boxprops["edgecolor"] = boxprops.pop("color")
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == "":
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle="none", marker="", color="none")
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops["marker"] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops["color"] = color
flierprops["markerfacecolor"] = color
flierprops["markeredgecolor"] = color
# replace medians if necessary:
if usermedians is not None:
if len(np.ravel(usermedians)) != len(bxpstats) or np.shape(usermedians)[0] != len(
bxpstats
):
raise ValueError("usermedians length not compatible with x")
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats["med"] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
err_mess = "conf_intervals length not compatible with x"
raise ValueError(err_mess)
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError("each confidence interval must " "have two values")
else:
if ci[0] is not None:
stats["cilo"] = ci[0]
if ci[1] is not None:
stats["cihi"] = ci[1]
artists = ax.bxp(
bxpstats,
positions=positions,
widths=widths,
vert=vert,
patch_artist=patch_artist,
shownotches=notch,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
meanprops=meanprops,
meanline=meanline,
showfliers=showfliers,
capprops=capprops,
whiskerprops=whiskerprops,
manage_xticks=manage_xticks,
zorder=zorder,
)
return artists
def _plot(self, ax, bxpstats, column_num=None, return_type="axes", **kwds):
bp = self.boxplot(ax, bxpstats, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _compute_plot_data(self):
colname = self.data.name
data = self.data
# Updates all props with the rc defaults from matplotlib
self.kwds.update(KoalasBoxPlot.rc_defaults(**self.kwds))
# Gets some important kwds
showfliers = self.kwds.get("showfliers", False)
whis = self.kwds.get("whis", 1.5)
labels = self.kwds.get("labels", [colname])
# This one is Koalas specific to control precision for approx_percentile
precision = self.kwds.get("precision", 0.01)
# # Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = KoalasBoxPlot._compute_stats(data, colname, whis, precision)
# # Creates a column to flag rows as outliers or not
outliers = KoalasBoxPlot._outliers(data, colname, *col_fences)
# # Computes min and max values of non-outliers - the whiskers
whiskers = KoalasBoxPlot._calc_whiskers(colname, outliers)
if showfliers:
fliers = KoalasBoxPlot._get_fliers(colname, outliers)
else:
fliers = []
# Builds bxpstats dict
stats = []
item = {
"mean": col_stats["mean"],
"med": col_stats["med"],
"q1": col_stats["q1"],
"q3": col_stats["q3"],
"whislo": whiskers[0],
"whishi": whiskers[1],
"fliers": fliers,
"label": labels[0],
}
stats.append(item)
self.data = {labels[0]: stats}
def _make_plot(self):
bxpstats = list(self.data.values())[0]
ax = self._get_ax(0)
kwds = self.kwds.copy()
for stats in bxpstats:
if len(stats["fliers"]) > 1000:
stats["fliers"] = stats["fliers"][:1000]
ax.text(
1,
1,
"showing top 1,000 fliers only",
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
ret, bp = self._plot(ax, bxpstats, column_num=0, return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self.data.items()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
@staticmethod
def rc_defaults(
notch=None,
vert=None,
whis=None,
patch_artist=None,
bootstrap=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
**kwargs
):
# Missing arguments default to rcParams.
if whis is None:
whis = matplotlib.rcParams["boxplot.whiskers"]
if bootstrap is None:
bootstrap = matplotlib.rcParams["boxplot.bootstrap"]
if notch is None:
notch = matplotlib.rcParams["boxplot.notch"]
if vert is None:
vert = matplotlib.rcParams["boxplot.vertical"]
if patch_artist is None:
patch_artist = matplotlib.rcParams["boxplot.patchartist"]
if meanline is None:
meanline = matplotlib.rcParams["boxplot.meanline"]
if showmeans is None:
showmeans = matplotlib.rcParams["boxplot.showmeans"]
if showcaps is None:
showcaps = matplotlib.rcParams["boxplot.showcaps"]
if showbox is None:
showbox = matplotlib.rcParams["boxplot.showbox"]
if showfliers is None:
showfliers = matplotlib.rcParams["boxplot.showfliers"]
return dict(
whis=whis,
bootstrap=bootstrap,
notch=notch,
vert=vert,
patch_artist=patch_artist,
meanline=meanline,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
showfliers=showfliers,
)
@staticmethod
def _compute_stats(data, colname, whis, precision):
# Computes mean, median, Q1 and Q3 with approx_percentile and precision
pdf = data._kdf._internal.resolved_copy.spark_frame.agg(
*[
F.expr(
"approx_percentile({}, {}, {})".format(colname, q, int(1.0 / precision))
).alias("{}_{}%".format(colname, int(q * 100)))
for q in [0.25, 0.50, 0.75]
],
F.mean(colname).alias("{}_mean".format(colname))
).toPandas()
# Computes IQR and Tukey's fences
iqr = "{}_iqr".format(colname)
p75 = "{}_75%".format(colname)
p25 = "{}_25%".format(colname)
pdf.loc[:, iqr] = pdf.loc[:, p75] - pdf.loc[:, p25]
pdf.loc[:, "{}_lfence".format(colname)] = pdf.loc[:, p25] - whis * pdf.loc[:, iqr]
pdf.loc[:, "{}_ufence".format(colname)] = pdf.loc[:, p75] + whis * pdf.loc[:, iqr]
qnames = ["25%", "50%", "75%", "mean", "lfence", "ufence"]
col_summ = pdf[["{}_{}".format(colname, q) for q in qnames]]
col_summ.columns = qnames
lfence, ufence = col_summ["lfence"], col_summ["ufence"]
stats = {
"mean": col_summ["mean"].values[0],
"med": col_summ["50%"].values[0],
"q1": col_summ["25%"].values[0],
"q3": col_summ["75%"].values[0],
}
return stats, (lfence.values[0], ufence.values[0])
@staticmethod
def _outliers(data, colname, lfence, ufence):
# Builds expression to identify outliers
expression = F.col(colname).between(lfence, ufence)
# Creates a column to flag rows as outliers or not
return data._kdf._internal.resolved_copy.spark_frame.withColumn(
"__{}_outlier".format(colname), ~expression
)
@staticmethod
def _calc_whiskers(colname, outliers):
# Computes min and max values of non-outliers - the whiskers
minmax = (
outliers.filter("not __{}_outlier".format(colname))
.agg(F.min(colname).alias("min"), F.max(colname).alias("max"))
.toPandas()
)
return minmax.iloc[0][["min", "max"]].values
@staticmethod
def _get_fliers(colname, outliers):
# Filters only the outliers, should "showfliers" be True
fliers_df = outliers.filter("__{}_outlier".format(colname))
# If shows fliers, takes the top 1k with highest absolute values
fliers = (
fliers_df.select(F.abs(F.col("`{}`".format(colname))).alias(colname))
.orderBy(F.desc("`{}`".format(colname)))
.limit(1001)
.toPandas()[colname]
.values
)
return fliers
class KoalasHistPlot(HistPlot):
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _compute_plot_data(self):
# TODO: this logic is same with KdePlot. Might have to deduplicate it.
from databricks.koalas.series import Series
data = self.data
if isinstance(data, Series):
data = data.to_frame()
numeric_data = data.select_dtypes(
include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64]
)
# no empty frames or series allowed
if len(numeric_data.columns) == 0:
raise TypeError(
"Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__)
)
if is_integer(self.bins):
# computes boundaries for the column
self.bins = self._get_bins(data.to_spark(), self.bins)
self.data = numeric_data
def _make_plot(self):
# TODO: this logic is similar with KdePlot. Might have to deduplicate it.
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
# 'y' is a Spark DataFrame that selects one column.
# here, we manually calculates the weights separately via Spark
# and assign it directly to histogram plot.
y = KoalasHistPlot._compute_hist(y, self.bins) # now y is a pandas Series.
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])
# Since the counts were computed already, we use them as weights and just generate
# one entry for each bin
n, bins, patches = ax.hist(bins[:-1], bins=bins, bottom=bottom, weights=y, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
@staticmethod
def _get_bins(sdf, bins):
# 'data' is a Spark DataFrame that selects all columns.
if len(sdf.columns) > 1:
min_col = F.least(*map(F.min, sdf))
max_col = F.greatest(*map(F.max, sdf))
else:
min_col = F.min(sdf.columns[-1])
max_col = F.max(sdf.columns[-1])
boundaries = sdf.select(min_col, max_col).first()
# divides the boundaries into bins
if boundaries[0] == boundaries[1]:
boundaries = (boundaries[0] - 0.5, boundaries[1] + 0.5)
return np.linspace(boundaries[0], boundaries[1], bins + 1)
@staticmethod
def _compute_hist(sdf, bins):
# 'data' is a Spark DataFrame that selects one column.
assert isinstance(bins, (np.ndarray, np.generic))
colname = sdf.columns[-1]
bucket_name = "__{}_bucket".format(colname)
# creates a Bucketizer to get corresponding bin of each value
bucketizer = Bucketizer(
splits=bins, inputCol=colname, outputCol=bucket_name, handleInvalid="skip"
)
# after bucketing values, groups and counts them
result = (
bucketizer.transform(sdf)
.select(bucket_name)
.groupby(bucket_name)
.agg(F.count("*").alias("count"))
.toPandas()
.sort_values(by=bucket_name)
)
# generates a pandas DF with one row for each bin
# we need this as some of the bins may be empty
indexes = pd.DataFrame({bucket_name: np.arange(0, len(bins) - 1), "bucket": bins[:-1]})
# merges the bins with counts on it and fills remaining ones with zeros
pdf = indexes.merge(result, how="left", on=[bucket_name]).fillna(0)[["count"]]
pdf.columns = [bucket_name]
return pdf[bucket_name]
class KoalasPiePlot(PiePlot, TopNPlot):
def __init__(self, data, **kwargs):
super(KoalasPiePlot, self).__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasPiePlot, self)._make_plot()
class KoalasAreaPlot(AreaPlot, SampledPlot):
def __init__(self, data, **kwargs):
super(KoalasAreaPlot, self).__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasAreaPlot, self)._make_plot()
class KoalasLinePlot(LinePlot, SampledPlot):
def __init__(self, data, **kwargs):
super(KoalasLinePlot, self).__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasLinePlot, self)._make_plot()
class KoalasBarhPlot(BarhPlot, TopNPlot):
def __init__(self, data, **kwargs):
super(KoalasBarhPlot, self).__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasBarhPlot, self)._make_plot()
class KoalasScatterPlot(ScatterPlot, TopNPlot):
def __init__(self, data, x, y, **kwargs):
super().__init__(self.get_top_n(data), x, y, **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasScatterPlot, self)._make_plot()
class KoalasKdePlot(KdePlot):
def _compute_plot_data(self):
from databricks.koalas.series import Series
data = self.data
if isinstance(data, Series):
data = data.to_frame()
numeric_data = data.select_dtypes(
include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64]
)
# no empty frames or series allowed
if len(numeric_data.columns) == 0:
raise TypeError(
"Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__)
)
self.data = numeric_data
def _make_plot(self):
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _get_ind(self, y):
# 'y' is a Spark DataFrame that selects one column.
if self.ind is None:
min_val, max_val = y.select(F.min(y.columns[-1]), F.max(y.columns[-1])).first()
sample_range = max_val - min_val
ind = np.linspace(min_val - 0.5 * sample_range, max_val + 0.5 * sample_range, 1000,)
elif is_integer(self.ind):
min_val, max_val = y.select(F.min(y.columns[-1]), F.max(y.columns[-1])).first()
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(min_val - 0.5 * sample_range, max_val + 0.5 * sample_range, self.ind,)
else:
ind = self.ind
return ind
@classmethod
def _plot(
cls, ax, y, style=None, bw_method=None, ind=None, column_num=None, stacking_id=None, **kwds
):
# 'y' is a Spark DataFrame that selects one column.
# Using RDD is slow so we might have to change it to Dataset based implementation
# once Spark has that implementation.
sample = y.rdd.map(lambda x: float(x[0]))
kd = KernelDensity()
kd.setSample(sample)
assert isinstance(bw_method, (int, float)), "'bw_method' must be set as a scalar number."
if bw_method is not None:
# Match the bandwidth with Spark.
kd.setBandwidth(float(bw_method))
y = kd.estimate(list(map(float, ind)))
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
_klasses = [
KoalasHistPlot,
KoalasBarPlot,
KoalasBoxPlot,
KoalasPiePlot,
KoalasAreaPlot,
KoalasLinePlot,
KoalasBarhPlot,
KoalasScatterPlot,
KoalasKdePlot,
]
_plot_klass = {getattr(klass, "_kind"): klass for klass in _klasses}
def plot_series(
data,
kind="line",
ax=None, # Series unique
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False, # Series unique
**kwds
):
"""
Make plots of Series using matplotlib / pylab.
Each plot kind has a corresponding method on the
``Series.plot`` accessor:
``s.plot(kind='line')`` is equivalent to
``s.plot.line()``.
Parameters
----------
data : Series
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
ax : matplotlib axes object
If not passed, uses gca()
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
# function copied from pandas.plotting._core
# so it calls modified _plot below
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
ax = None
with plt.rc_context():
ax = plt.gca()
ax = MPLPlot._get_ax_layer(ax)
return _plot(
data,
kind=kind,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
label=label,
secondary_y=secondary_y,
**kwds
)
def plot_frame(
data,
x=None,
y=None,
kind="line",
ax=None,
subplots=None,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwds
):
"""
Make plots of DataFrames using matplotlib / pylab.
Each plot kind has a corresponding method on the
``DataFrame.plot`` accessor:
``kdf.plot(kind='line')`` is equivalent to
``kdf.plot.line()``.
Parameters
----------
data : DataFrame
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot
ax : matplotlib axes object
If not passed, uses gca()
x : label or position, default None
y : label, position or list of label, positions, default None
Allows plotting of one column versus another.
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
sharex: bool or None, default is None
Whether to share x axis or not.
sharey: bool, default is False
Whether to share y axis or not.
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
sort_columns: bool, default is False
When True, will sort values on plots.
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
return _plot(
data,
kind=kind,
x=x,
y=y,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
subplots=subplots,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
sharex=sharex,
sharey=sharey,
secondary_y=secondary_y,
layout=layout,
sort_columns=sort_columns,
**kwds
)
def _plot(data, x=None, y=None, subplots=False, ax=None, kind="line", **kwds):
from databricks.koalas import DataFrame
# function copied from pandas.plotting._core
# and adapted to handle Koalas DataFrame and Series
kind = kind.lower().strip()
kind = {"density": "kde"}.get(kind, kind)
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
# scatter and hexbin are inherited from PlanePlot which require x and y
if kind in ("scatter", "hexbin"):
plot_obj = klass(data, x, y, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
# check data type and do preprocess before applying plot
if isinstance(data, DataFrame):
if x is not None:
data = data.set_index(x)
# TODO: check if value of y is plottable
if y is not None:
data = data[y]
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
class KoalasSeriesPlotMethods(PandasObject):
"""
Series plotting accessor and method.
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``s.plot(kind='hist')`` is equivalent to ``s.plot.hist()``
"""
def __init__(self, data):
self.data = data
def __call__(
self,
kind="line",
ax=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False,
**kwds
):
return plot_series(
self.data,
kind=kind,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
label=label,
secondary_y=secondary_y,
**kwds
)
__call__.__doc__ = plot_series.__doc__
def line(self, x=None, y=None, **kwargs):
"""
Plot Series as lines.
This function is useful to plot lines using Series's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwds
Keyword arguments to pass on to :meth:`Series.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
Return an ndarray when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> s = ks.Series([1, 3, 2])
>>> ax = s.plot.line()
"""
return self(kind="line", x=x, y=y, **kwargs)
def bar(self, **kwds):
"""
Vertical bar plot.
Parameters
----------
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> s = ks.Series([1, 3, 2])
>>> ax = s.plot.bar()
"""
return self(kind="bar", **kwds)
def barh(self, **kwds):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds
Keyword arguments to pass on to :meth:`databricks.koalas.DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> plot = df.val.plot.barh()
"""
return self(kind="barh", **kwds)
def box(self, **kwds):
"""
Make a box plot of the DataFrame columns.
Parameters
----------
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.Series.plot`.
precision: scalar, default = 0.01
This argument is used by Koalas to compute approximate statistics
for building a boxplot. Use *smaller* values to get more precise
statistics.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
There are behavior differences between Koalas and pandas.
* Koalas computes approximate statistics - expect differences between
pandas and Koalas boxplots, especially regarding 1st and 3rd quartiles.
* The `whis` argument is only supported as a single number.
* Koalas doesn't support the following argument(s).
* `bootstrap` argument is not supported
* `autorange` argument is not supported
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = ks.DataFrame(data, columns=list('ABCD'))
>>> ax = df['A'].plot.box()
"""
return self(kind="box", **kwds)
def hist(self, bins=10, **kwds):
"""
Draw one histogram of the DataFrame’s columns.
Parameters
----------
bins : integer, default 10
Number of histogram bins to be used
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> s = ks.Series([1, 3, 2])
>>> ax = s.plot.hist()
"""
return self(kind="hist", bins=bins, **kwds)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
Parameters
----------
bw_method : scalar
The method used to calculate the estimator bandwidth.
See KernelDensity in PySpark for more information.
ind : NumPy array or integer, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs : optional
Keyword arguments to pass on to :meth:`Koalas.Series.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
Examples
--------
A scalar bandwidth should be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> s = ks.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
The `ind` parameter determines the evaluation points for the
plot of the estimated KDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5], bw_method=0.3)
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def area(self, **kwds):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwds : optional
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> plot = df.sales.plot.area()
"""
return self(kind="area", **kwds)
def pie(self, **kwds):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwds
Keyword arguments to pass on to :meth:`Koalas.Series.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'mass': [0.330, 4.87, 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.mass.plot.pie(figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.mass.plot.pie(subplots=True, figsize=(6, 3))
"""
return self(kind="pie", **kwds)
class KoalasFramePlotMethods(PandasObject):
# TODO: not sure if Koalas wants to combine plot method for Series and DataFrame
"""
DataFrame plotting accessor and method.
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``df.plot(kind='hist')`` is equivalent to ``df.plot.hist()``
"""
def __init__(self, data):
self.data = data
def __call__(
self,
x=None,
y=None,
kind="line",
ax=None,
subplots=None,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwds
):
return plot_frame(
self.data,
x=x,
y=y,
kind=kind,
ax=ax,
subplots=subplots,
sharex=sharex,
sharey=sharey,
layout=layout,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
secondary_y=secondary_y,
sort_columns=sort_columns,
**kwds
)
def line(self, x=None, y=None, **kwargs):
"""
Plot DataFrame as lines.
Parameters
----------
x: int or str, optional
Columns to use for the horizontal axis.
y : int, str, or list of them, optional
The values to be plotted.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
Return an ndarray when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = ks.DataFrame({'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]},
... index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
"""
return self(kind="line", x=x, y=y, **kwargs)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
Parameters
----------
bw_method : scalar
The method used to calculate the estimator bandwidth.
See KernelDensity in PySpark for more information.
ind : NumPy array or integer, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs : optional
Keyword arguments to pass on to :meth:`Koalas.DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
Examples
--------
For DataFrame, it works in the same way as Series:
.. plot::
:context: close-figs
>>> df = ks.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6], bw_method=0.3)
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def pie(self, y=None, **kwds):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwds
Keyword arguments to pass on to :meth:`Koalas.DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
Examples
--------
In the example below we have a DataFrame with the information about
planet's mass and radius. We pass the the 'mass' column to the
pie function to get a pie plot.
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'mass': [0.330, 4.87, 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.plot.pie(y='mass', figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.plot.pie(subplots=True, figsize=(6, 3))
"""
from databricks.koalas import DataFrame
# pandas will raise an error if y is None and subplots if not True
if isinstance(self.data, DataFrame) and y is None and not kwds.get("subplots", False):
raise ValueError("pie requires either y column or 'subplots=True'")
return self(kind="pie", y=y, **kwds)
def area(self, x=None, y=None, stacked=True, **kwds):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwds : optional
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> plot = df.plot.area()
"""
return self(kind="area", x=x, y=y, stacked=stacked, **kwds)
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another.
If not specified, the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another.
If not specified, all numerical columns are used.
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.DataFrame.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
return self(kind="bar", x=x, y=y, **kwds)
def barh(self, x=None, y=None, **kwargs):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with rectangular
bars with lengths proportional to the values that they represent. A bar plot shows
comparisons among discrete categories. One axis of the plot shows the specific
categories being compared, and the other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds:
Keyword arguments to pass on to :meth:`databricks.koalas.DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
"""
return self(kind="barh", x=x, y=y, **kwargs)
def hexbin(self, **kwds):
return unsupported_function(class_name="pd.DataFrame", method_name="hexbin")()
def box(self, **kwds):
return unsupported_function(class_name="pd.DataFrame", method_name="box")()
def hist(self, bins=10, **kwds):
"""
Make a histogram of the DataFrame's.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
**kwds
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns=['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> df = ks.from_pandas(df)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind="hist", bins=bins, **kwds)
def scatter(self, x, y, s=None, c=None, **kwds):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : scalar or array_like, optional
c : str, int or array_like, optional
**kwds: Optional
Keyword arguments to pass on to :meth:`databricks.koalas.DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = ks.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
"""
return self(kind="scatter", x=x, y=y, s=s, c=c, **kwds)
| 1 | 15,718 | Could you remove `,' at the end? | databricks-koalas | py |
@@ -52,6 +52,8 @@ var Server = function(requestHandler) {
* with the server host when it has fully started.
*/
this.start = function(opt_port) {
+ assert(typeof opt_port !== 'function',
+ "start invoked with function, not port (mocha callback)?");
var port = opt_port || portprober.findFreePort('localhost');
return promise.when(port, function(port) {
return promise.checkedNodeCall( | 1 | // Copyright 2013 Selenium committers
// Copyright 2013 Software Freedom Conservancy
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict';
var assert = require('assert'),
http = require('http'),
url = require('url');
var net = require('../../net'),
portprober = require('../../net/portprober'),
promise = require('../..').promise;
/**
* Encapsulates a simple HTTP server for testing. The {@code onrequest}
* function should be overridden to define request handling behavior.
* @param {function(!http.ServerRequest, !http.ServerResponse)} requestHandler
* The request handler for the server.
* @constructor
*/
var Server = function(requestHandler) {
var server = http.createServer(function(req, res) {
requestHandler(req, res);
});
server.on('connection', function(stream) {
stream.setTimeout(4000);
});
/** @typedef {{port: number, address: string, family: string}} */
var Host;
/**
* Starts the server on the given port. If no port, or 0, is provided,
* the server will be started on a random port.
* @param {number=} opt_port The port to start on.
* @return {!webdriver.promise.Promise.<Host>} A promise that will resolve
* with the server host when it has fully started.
*/
this.start = function(opt_port) {
var port = opt_port || portprober.findFreePort('localhost');
return promise.when(port, function(port) {
return promise.checkedNodeCall(
server.listen.bind(server, port, 'localhost'));
}).then(function() {
return server.address();
});
};
/**
* Stops the server.
* @return {!webdriver.promise.Promise} A promise that will resolve when the
* server has closed all connections.
*/
this.stop = function() {
var d = promise.defer();
server.close(d.fulfill);
return d.promise;
};
/**
* @return {Host} This server's host info.
* @throws {Error} If the server is not running.
*/
this.address = function() {
var addr = server.address();
if (!addr) {
throw Error('There server is not running!');
}
return addr;
};
/**
* return {string} The host:port of this server.
* @throws {Error} If the server is not running.
*/
this.host = function() {
return net.getLoopbackAddress() + ':' +
this.address().port;
};
/**
* Formats a URL for this server.
* @param {string=} opt_pathname The desired pathname on the server.
* @return {string} The formatted URL.
* @throws {Error} If the server is not running.
*/
this.url = function(opt_pathname) {
var addr = this.address();
var pathname = opt_pathname || '';
return url.format({
protocol: 'http',
hostname: net.getLoopbackAddress(),
port: addr.port,
pathname: pathname
});
};
};
// PUBLIC API
exports.Server = Server;
| 1 | 11,552 | Maybe it would simpler to ignore opt_port if type !== 'number'? | SeleniumHQ-selenium | java |
@@ -137,11 +137,11 @@ type AccountData struct {
// is expected to have copy-by-value semantics.
Assets map[AssetIndex]AssetHolding `codec:"asset,allocbound=-"`
- // SpendingKey is the address against which signatures/multisigs/logicsigs should be checked.
+ // EffectiveAddr is the address against which signatures/multisigs/logicsigs should be checked.
// If empty, the address of the account whose AccountData this is is used.
- // A transaction may change an account's SpendingKey to "re-key" the account.
+ // A transaction may change an account's EffectiveAddr to "re-key" the account.
// This allows key rotation, changing the members in a multisig, etc.
- SpendingKey Address `codec:"spend"`
+ EffectiveAddr Address `codec:"spend"`
}
// AccountDetail encapsulates meaningful details about a given account, for external consumption | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package basics
import (
"reflect"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
// Status is the delegation status of an account's MicroAlgos
type Status byte
const (
// Offline indicates that the associated account is delegated.
Offline Status = iota
// Online indicates that the associated account used as part of the delegation pool.
Online
// NotParticipating indicates that the associated account is neither a delegator nor a delegate. Currently it is reserved for the incentive pool.
NotParticipating
)
func (s Status) String() string {
switch s {
case Offline:
return "Offline"
case Online:
return "Online"
case NotParticipating:
return "Not Participating"
}
return ""
}
// AccountData contains the data associated with a given address.
//
// This includes the account balance, delegation keys, delegation status, and a custom note.
type AccountData struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Status Status `codec:"onl"`
MicroAlgos MicroAlgos `codec:"algo"`
// RewardsBase is used to implement rewards.
// This is not meaningful for accounts with Status=NotParticipating.
//
// Every block assigns some amount of rewards (algos) to every
// participating account. The amount is the product of how much
// block.RewardsLevel increased from the previous block and
// how many whole config.Protocol.RewardUnit algos this
// account holds.
//
// For performance reasons, we do not want to walk over every
// account to apply these rewards to AccountData.MicroAlgos. Instead,
// we defer applying the rewards until some other transaction
// touches that participating account, and at that point, apply all
// of the rewards to the account's AccountData.MicroAlgos.
//
// For correctness, we need to be able to determine how many
// total algos are present in the system, including deferred
// rewards (deferred in the sense that they have not been
// reflected in the account's AccountData.MicroAlgos, as described
// above). To compute this total efficiently, we avoid
// compounding rewards (i.e., no rewards on rewards) until
// they are applied to AccountData.MicroAlgos.
//
// Mechanically, RewardsBase stores the block.RewardsLevel
// whose rewards are already reflected in AccountData.MicroAlgos.
// If the account is Status=Offline or Status=Online, its
// effective balance (if a transaction were to be issued
// against this account) may be higher, as computed by
// AccountData.Money(). That function calls
// AccountData.WithUpdatedRewards() to apply the deferred
// rewards to AccountData.MicroAlgos.
RewardsBase uint64 `codec:"ebase"`
// RewardedMicroAlgos is used to track how many algos were given
// to this account since the account was first created.
//
// This field is updated along with RewardBase; note that
// it won't answer the question "how many algos did I make in
// the past week".
RewardedMicroAlgos MicroAlgos `codec:"ern"`
VoteID crypto.OneTimeSignatureVerifier `codec:"vote"`
SelectionID crypto.VRFVerifier `codec:"sel"`
VoteFirstValid Round `codec:"voteFst"`
VoteLastValid Round `codec:"voteLst"`
VoteKeyDilution uint64 `codec:"voteKD"`
// If this account created an asset, AssetParams stores
// the parameters defining that asset. The params are indexed
// by the Index of the AssetID; the Creator is this account's address.
//
// An account with any asset in AssetParams cannot be
// closed, until the asset is destroyed. An asset can
// be destroyed if this account holds AssetParams.Total units
// of that asset (in the Assets array below).
//
// NOTE: do not modify this value in-place in existing AccountData
// structs; allocate a copy and modify that instead. AccountData
// is expected to have copy-by-value semantics.
AssetParams map[AssetIndex]AssetParams `codec:"apar,allocbound=-"`
// Assets is the set of assets that can be held by this
// account. Assets (i.e., slots in this map) are explicitly
// added and removed from an account by special transactions.
// The map is keyed by the AssetID, which is the address of
// the account that created the asset plus a unique counter
// to distinguish re-created assets.
//
// Each asset bumps the required MinBalance in this account.
//
// An account that creates an asset must have its own asset
// in the Assets map until that asset is destroyed.
//
// NOTE: do not modify this value in-place in existing AccountData
// structs; allocate a copy and modify that instead. AccountData
// is expected to have copy-by-value semantics.
Assets map[AssetIndex]AssetHolding `codec:"asset,allocbound=-"`
// SpendingKey is the address against which signatures/multisigs/logicsigs should be checked.
// If empty, the address of the account whose AccountData this is is used.
// A transaction may change an account's SpendingKey to "re-key" the account.
// This allows key rotation, changing the members in a multisig, etc.
SpendingKey Address `codec:"spend"`
}
// AccountDetail encapsulates meaningful details about a given account, for external consumption
type AccountDetail struct {
Address Address
Algos MicroAlgos
Status Status
}
// SupplyDetail encapsulates meaningful details about the ledger's current token supply
type SupplyDetail struct {
Round Round
TotalMoney MicroAlgos
OnlineMoney MicroAlgos
}
// BalanceDetail encapsulates meaningful details about the current balances of the ledger, for external consumption
type BalanceDetail struct {
Round Round
TotalMoney MicroAlgos
OnlineMoney MicroAlgos
Accounts []AccountDetail
}
// AssetIndex is the unique integer index of an asset that can be used to look
// up the creator of the asset, whose balance record contains the AssetParams
type AssetIndex uint64
// AssetLocator stores both the asset creator, whose balance record contains
// the asset parameters, and the asset index, which is the key into those
// parameters
type AssetLocator struct {
Creator Address
Index AssetIndex
}
// AssetHolding describes an asset held by an account.
type AssetHolding struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Amount uint64 `codec:"a"`
Frozen bool `codec:"f"`
}
// AssetParams describes the parameters of an asset.
type AssetParams struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
// Total specifies the total number of units of this asset
// created.
Total uint64 `codec:"t"`
// Decimals specifies the number of digits to display after the decimal
// place when displaying this asset. A value of 0 represents an asset
// that is not divisible, a value of 1 represents an asset divisible
// into tenths, and so on. This value must be between 0 and 19
// (inclusive).
Decimals uint32 `codec:"dc"`
// DefaultFrozen specifies whether slots for this asset
// in user accounts are frozen by default or not.
DefaultFrozen bool `codec:"df"`
// UnitName specifies a hint for the name of a unit of
// this asset.
UnitName string `codec:"un"`
// AssetName specifies a hint for the name of the asset.
AssetName string `codec:"an"`
// URL specifies a URL where more information about the asset can be
// retrieved
URL string `codec:"au"`
// MetadataHash specifies a commitment to some unspecified asset
// metadata. The format of this metadata is up to the application.
MetadataHash [32]byte `codec:"am"`
// Manager specifies an account that is allowed to change the
// non-zero addresses in this AssetParams.
Manager Address `codec:"m"`
// Reserve specifies an account whose holdings of this asset
// should be reported as "not minted".
Reserve Address `codec:"r"`
// Freeze specifies an account that is allowed to change the
// frozen state of holdings of this asset.
Freeze Address `codec:"f"`
// Clawback specifies an account that is allowed to take units
// of this asset from any account.
Clawback Address `codec:"c"`
}
// MakeAccountData returns a UserToken
func MakeAccountData(status Status, algos MicroAlgos) AccountData {
return AccountData{Status: status, MicroAlgos: algos}
}
// Money returns the amount of MicroAlgos associated with the user's account
func (u AccountData) Money(proto config.ConsensusParams, rewardsLevel uint64) (money MicroAlgos, rewards MicroAlgos) {
e := u.WithUpdatedRewards(proto, rewardsLevel)
return e.MicroAlgos, e.RewardedMicroAlgos
}
// WithUpdatedRewards returns an updated number of algos in an AccountData
// to reflect rewards up to some rewards level.
func (u AccountData) WithUpdatedRewards(proto config.ConsensusParams, rewardsLevel uint64) AccountData {
if u.Status != NotParticipating {
var ot OverflowTracker
rewardsUnits := u.MicroAlgos.RewardUnits(proto)
rewardsDelta := ot.Sub(rewardsLevel, u.RewardsBase)
rewards := MicroAlgos{Raw: ot.Mul(rewardsUnits, rewardsDelta)}
u.MicroAlgos = ot.AddA(u.MicroAlgos, rewards)
if ot.Overflowed {
logging.Base().Panicf("AccountData.WithUpdatedRewards(): overflowed account balance when applying rewards %v + %d*(%d-%d)", u.MicroAlgos, rewardsUnits, rewardsLevel, u.RewardsBase)
}
u.RewardsBase = rewardsLevel
// The total reward over the lifetime of the account could exceed a 64-bit value. As a result
// this rewardAlgos counter could potentially roll over.
u.RewardedMicroAlgos = MicroAlgos{Raw: (u.RewardedMicroAlgos.Raw + rewards.Raw)}
}
return u
}
// VotingStake returns the amount of MicroAlgos associated with the user's account
// for the purpose of participating in the Algorand protocol. It assumes the
// caller has already updated rewards appropriately using WithUpdatedRewards().
func (u AccountData) VotingStake() MicroAlgos {
if u.Status != Online {
return MicroAlgos{Raw: 0}
}
return u.MicroAlgos
}
// KeyDilution returns the key dilution for this account,
// returning the default key dilution if not explicitly specified.
func (u AccountData) KeyDilution(proto config.ConsensusParams) uint64 {
if u.VoteKeyDilution != 0 {
return u.VoteKeyDilution
}
return proto.DefaultKeyDilution
}
// IsZero checks if an AccountData value is the same as its zero value.
func (u AccountData) IsZero() bool {
if u.Assets != nil && len(u.Assets) == 0 {
u.Assets = nil
}
return reflect.DeepEqual(u, AccountData{})
}
// BalanceRecord pairs an account's address with its associated data.
type BalanceRecord struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Addr Address `codec:"addr"`
AccountData
}
// ToBeHashed implements the crypto.Hashable interface
func (u BalanceRecord) ToBeHashed() (protocol.HashID, []byte) {
return protocol.BalanceRecord, protocol.Encode(&u)
}
| 1 | 38,746 | do you want to rename the codec tag here too? | algorand-go-algorand | go |
@@ -318,4 +318,11 @@ public interface DriverCommand {
// Mobile API
String GET_NETWORK_CONNECTION = "getNetworkConnection";
String SET_NETWORK_CONNECTION = "setNetworkConnection";
+
+ // Cast Media Router API
+ String GET_CAST_SINKS = "getCastSinks";
+ String SET_CAST_SINK_TO_USE = "selectCastSink";
+ String START_CAST_TAB_MIRRORING = "startCastTabMirroring";
+ String GET_CAST_ISSUE_MESSAGE = "getCastIssueMessage";
+ String STOP_CASTING = "stopCasting";
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
import com.google.common.collect.ImmutableMap;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.Cookie;
import org.openqa.selenium.Dimension;
import org.openqa.selenium.Point;
import org.openqa.selenium.WindowType;
import org.openqa.selenium.interactions.Sequence;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* An empty interface defining constants for the standard commands defined in the WebDriver JSON
* wire protocol.
*
* @author [email protected] (Jason Leyba)
*/
public interface DriverCommand {
String GET_ALL_SESSIONS = "getAllSessions";
String GET_CAPABILITIES = "getCapabilities";
String NEW_SESSION = "newSession";
static CommandPayload NEW_SESSION(Capabilities capabilities) {
return new CommandPayload(NEW_SESSION, ImmutableMap.of("desiredCapabilities", capabilities));
}
String STATUS = "status";
String CLOSE = "close";
String QUIT = "quit";
String GET = "get";
static CommandPayload GET(String url) {
return new CommandPayload(GET, ImmutableMap.of("url", url));
}
String GO_BACK = "goBack";
String GO_FORWARD = "goForward";
String REFRESH = "refresh";
String ADD_COOKIE = "addCookie";
static CommandPayload ADD_COOKIE(Cookie cookie) {
return new CommandPayload(ADD_COOKIE, ImmutableMap.of("cookie", cookie));
}
String GET_ALL_COOKIES = "getCookies";
String GET_COOKIE = "getCookie";
String DELETE_COOKIE = "deleteCookie";
static CommandPayload DELETE_COOKIE(String name) {
return new CommandPayload(DELETE_COOKIE, ImmutableMap.of("name", name));
}
String DELETE_ALL_COOKIES = "deleteAllCookies";
String FIND_ELEMENT = "findElement";
static CommandPayload FIND_ELEMENT(String strategy, String value) {
return new CommandPayload(FIND_ELEMENT, ImmutableMap.of("using", strategy, "value", value));
}
String FIND_ELEMENTS = "findElements";
static CommandPayload FIND_ELEMENTS(String strategy, String value) {
return new CommandPayload(FIND_ELEMENTS, ImmutableMap.of("using", strategy, "value", value));
}
String FIND_CHILD_ELEMENT = "findChildElement";
static CommandPayload FIND_CHILD_ELEMENT(String id, String strategy, String value) {
return new CommandPayload(FIND_CHILD_ELEMENT,
ImmutableMap.of("id", id, "using", strategy, "value", value));
}
String FIND_CHILD_ELEMENTS = "findChildElements";
static CommandPayload FIND_CHILD_ELEMENTS(String id, String strategy, String value) {
return new CommandPayload(FIND_CHILD_ELEMENTS,
ImmutableMap.of("id", id, "using", strategy, "value", value));
}
String CLEAR_ELEMENT = "clearElement";
static CommandPayload CLEAR_ELEMENT(String id) {
return new CommandPayload(CLEAR_ELEMENT, ImmutableMap.of("id", id));
}
String CLICK_ELEMENT = "clickElement";
static CommandPayload CLICK_ELEMENT(String id) {
return new CommandPayload(CLICK_ELEMENT, ImmutableMap.of("id", id));
}
String SEND_KEYS_TO_ELEMENT = "sendKeysToElement";
static CommandPayload SEND_KEYS_TO_ELEMENT(String id, CharSequence[] keysToSend) {
return new CommandPayload(SEND_KEYS_TO_ELEMENT, ImmutableMap.of("id", id, "value", keysToSend));
}
String SEND_KEYS_TO_ACTIVE_ELEMENT = "sendKeysToActiveElement";
String SUBMIT_ELEMENT = "submitElement";
static CommandPayload SUBMIT_ELEMENT(String id) {
return new CommandPayload(SUBMIT_ELEMENT, ImmutableMap.of("id", id));
}
String UPLOAD_FILE = "uploadFile";
static CommandPayload UPLOAD_FILE(String file) {
return new CommandPayload(UPLOAD_FILE, ImmutableMap.of("file", file));
}
String GET_CURRENT_WINDOW_HANDLE = "getCurrentWindowHandle";
String GET_WINDOW_HANDLES = "getWindowHandles";
String GET_CURRENT_CONTEXT_HANDLE = "getCurrentContextHandle";
String GET_CONTEXT_HANDLES = "getContextHandles";
String SWITCH_TO_WINDOW = "switchToWindow";
static CommandPayload SWITCH_TO_WINDOW(String windowHandleOrName) {
return new CommandPayload(SWITCH_TO_WINDOW, ImmutableMap.of("handle", windowHandleOrName));
}
String SWITCH_TO_NEW_WINDOW = "newWindow";
static CommandPayload SWITCH_TO_NEW_WINDOW(WindowType typeHint) {
return new CommandPayload(SWITCH_TO_NEW_WINDOW, ImmutableMap.of("type", typeHint.toString()));
}
String SWITCH_TO_CONTEXT = "switchToContext";
String SWITCH_TO_FRAME = "switchToFrame";
static CommandPayload SWITCH_TO_FRAME(Object frame) {
return new CommandPayload(SWITCH_TO_FRAME, Collections.singletonMap("id", frame));
}
String SWITCH_TO_PARENT_FRAME = "switchToParentFrame";
String GET_ACTIVE_ELEMENT = "getActiveElement";
String GET_CURRENT_URL = "getCurrentUrl";
String GET_PAGE_SOURCE = "getPageSource";
String GET_TITLE = "getTitle";
String EXECUTE_SCRIPT = "executeScript";
static CommandPayload EXECUTE_SCRIPT(String script, List<Object> args) {
return new CommandPayload(EXECUTE_SCRIPT, ImmutableMap.of("script", script, "args", args));
}
String EXECUTE_ASYNC_SCRIPT = "executeAsyncScript";
static CommandPayload EXECUTE_ASYNC_SCRIPT(String script, List<Object> args) {
return new CommandPayload(EXECUTE_ASYNC_SCRIPT, ImmutableMap.of("script", script, "args", args));
}
String GET_ELEMENT_TEXT = "getElementText";
static CommandPayload GET_ELEMENT_TEXT(String id) {
return new CommandPayload(GET_ELEMENT_TEXT, ImmutableMap.of("id", id));
}
String GET_ELEMENT_TAG_NAME = "getElementTagName";
static CommandPayload GET_ELEMENT_TAG_NAME(String id) {
return new CommandPayload(GET_ELEMENT_TAG_NAME, ImmutableMap.of("id", id));
}
String IS_ELEMENT_SELECTED = "isElementSelected";
static CommandPayload IS_ELEMENT_SELECTED(String id) {
return new CommandPayload(IS_ELEMENT_SELECTED, ImmutableMap.of("id", id));
}
String IS_ELEMENT_ENABLED = "isElementEnabled";
static CommandPayload IS_ELEMENT_ENABLED(String id) {
return new CommandPayload(IS_ELEMENT_ENABLED, ImmutableMap.of("id", id));
}
String IS_ELEMENT_DISPLAYED = "isElementDisplayed";
static CommandPayload IS_ELEMENT_DISPLAYED(String id) {
return new CommandPayload(IS_ELEMENT_DISPLAYED, ImmutableMap.of("id", id));
}
String GET_ELEMENT_RECT = "getElementRect";
static CommandPayload GET_ELEMENT_RECT(String id) {
return new CommandPayload(GET_ELEMENT_RECT, ImmutableMap.of("id", id));
}
String GET_ELEMENT_LOCATION = "getElementLocation";
static CommandPayload GET_ELEMENT_LOCATION(String id) {
return new CommandPayload(GET_ELEMENT_LOCATION, ImmutableMap.of("id", id));
}
String GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW = "getElementLocationOnceScrolledIntoView";
static CommandPayload GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW(String id) {
return new CommandPayload(GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW, ImmutableMap.of("id", id));
}
String GET_ELEMENT_SIZE = "getElementSize";
static CommandPayload GET_ELEMENT_SIZE(String id) {
return new CommandPayload(GET_ELEMENT_SIZE, ImmutableMap.of("id", id));
}
String GET_ELEMENT_ATTRIBUTE = "getElementAttribute";
static CommandPayload GET_ELEMENT_ATTRIBUTE(String id, String name) {
return new CommandPayload(GET_ELEMENT_ATTRIBUTE, ImmutableMap.of("id", id, "name", name));
}
String GET_ELEMENT_PROPERTY = "getElementProperty";
String GET_ELEMENT_VALUE_OF_CSS_PROPERTY = "getElementValueOfCssProperty";
static CommandPayload GET_ELEMENT_VALUE_OF_CSS_PROPERTY(String id, String name) {
return new CommandPayload(GET_ELEMENT_VALUE_OF_CSS_PROPERTY, ImmutableMap.of("id", id, "propertyName", name));
}
String ELEMENT_EQUALS = "elementEquals";
String SCREENSHOT = "screenshot";
String ELEMENT_SCREENSHOT = "elementScreenshot";
static CommandPayload ELEMENT_SCREENSHOT(String id) {
return new CommandPayload(ELEMENT_SCREENSHOT, ImmutableMap.of("id", id));
}
String ACCEPT_ALERT = "acceptAlert";
String DISMISS_ALERT = "dismissAlert";
String GET_ALERT_TEXT = "getAlertText";
String SET_ALERT_VALUE = "setAlertValue";
static CommandPayload SET_ALERT_VALUE(String keysToSend) {
return new CommandPayload(SET_ALERT_VALUE, ImmutableMap.of("text", keysToSend));
}
String SET_ALERT_CREDENTIALS = "setAlertCredentials";
String SET_TIMEOUT = "setTimeout";
static CommandPayload SET_IMPLICIT_WAIT_TIMEOUT(long time, TimeUnit unit) {
return new CommandPayload(
SET_TIMEOUT, ImmutableMap.of("implicit", TimeUnit.MILLISECONDS.convert(time, unit)));
}
static CommandPayload SET_SCRIPT_TIMEOUT(long time, TimeUnit unit) {
return new CommandPayload(
SET_TIMEOUT, ImmutableMap.of("script", TimeUnit.MILLISECONDS.convert(time, unit)));
}
static CommandPayload SET_PAGE_LOAD_TIMEOUT(long time, TimeUnit unit) {
return new CommandPayload(
SET_TIMEOUT, ImmutableMap.of("pageLoad", TimeUnit.MILLISECONDS.convert(time, unit)));
}
String IMPLICITLY_WAIT = "implicitlyWait";
String SET_SCRIPT_TIMEOUT = "setScriptTimeout";
String GET_LOCATION = "getLocation";
String SET_LOCATION = "setLocation";
String GET_APP_CACHE = "getAppCache";
String GET_APP_CACHE_STATUS = "getStatus";
String CLEAR_APP_CACHE = "clearAppCache";
String IS_BROWSER_ONLINE = "isBrowserOnline";
String SET_BROWSER_ONLINE = "setBrowserOnline";
String GET_LOCAL_STORAGE_ITEM = "getLocalStorageItem";
String GET_LOCAL_STORAGE_KEYS = "getLocalStorageKeys";
String SET_LOCAL_STORAGE_ITEM = "setLocalStorageItem";
String REMOVE_LOCAL_STORAGE_ITEM = "removeLocalStorageItem";
String CLEAR_LOCAL_STORAGE = "clearLocalStorage";
String GET_LOCAL_STORAGE_SIZE = "getLocalStorageSize";
String GET_SESSION_STORAGE_ITEM = "getSessionStorageItem";
String GET_SESSION_STORAGE_KEYS = "getSessionStorageKey";
String SET_SESSION_STORAGE_ITEM = "setSessionStorageItem";
String REMOVE_SESSION_STORAGE_ITEM = "removeSessionStorageItem";
String CLEAR_SESSION_STORAGE = "clearSessionStorage";
String GET_SESSION_STORAGE_SIZE = "getSessionStorageSize";
String SET_SCREEN_ORIENTATION = "setScreenOrientation";
String GET_SCREEN_ORIENTATION = "getScreenOrientation";
String SET_SCREEN_ROTATION = "setScreenRotation";
String GET_SCREEN_ROTATION = "getScreenRotation";
// W3C Actions APIs
String ACTIONS = "actions";
static CommandPayload ACTIONS(Collection<Sequence> actions) {
return new CommandPayload(ACTIONS, ImmutableMap.of("actions", actions));
}
String CLEAR_ACTIONS_STATE = "clearActionState";
// These belong to the Advanced user interactions - an element is
// optional for these commands.
String CLICK = "mouseClick";
String DOUBLE_CLICK = "mouseDoubleClick";
String MOUSE_DOWN = "mouseButtonDown";
String MOUSE_UP = "mouseButtonUp";
String MOVE_TO = "mouseMoveTo";
// Those allow interactions with the Input Methods installed on
// the system.
String IME_GET_AVAILABLE_ENGINES = "imeGetAvailableEngines";
String IME_GET_ACTIVE_ENGINE = "imeGetActiveEngine";
String IME_IS_ACTIVATED = "imeIsActivated";
String IME_DEACTIVATE = "imeDeactivate";
String IME_ACTIVATE_ENGINE = "imeActivateEngine";
static CommandPayload IME_ACTIVATE_ENGINE(String engine) {
return new CommandPayload(SET_ALERT_VALUE, ImmutableMap.of("engine", engine));
}
// These belong to the Advanced Touch API
String TOUCH_SINGLE_TAP = "touchSingleTap";
String TOUCH_DOWN = "touchDown";
String TOUCH_UP = "touchUp";
String TOUCH_MOVE = "touchMove";
String TOUCH_SCROLL = "touchScroll";
String TOUCH_DOUBLE_TAP = "touchDoubleTap";
String TOUCH_LONG_PRESS = "touchLongPress";
String TOUCH_FLICK = "touchFlick";
// Window API
String SET_CURRENT_WINDOW_POSITION = "setWindowPosition";
static CommandPayload SET_CURRENT_WINDOW_POSITION(Point targetPosition) {
return new CommandPayload(
SET_CURRENT_WINDOW_POSITION, ImmutableMap.of("x", targetPosition.x, "y", targetPosition.y));
}
String GET_CURRENT_WINDOW_POSITION = "getWindowPosition";
static CommandPayload GET_CURRENT_WINDOW_POSITION() {
return new CommandPayload(
GET_CURRENT_WINDOW_POSITION, ImmutableMap.of("windowHandle", "current"));
}
// W3C compatible Window API
String SET_CURRENT_WINDOW_SIZE = "setCurrentWindowSize";
static CommandPayload SET_CURRENT_WINDOW_SIZE(Dimension targetSize) {
return new CommandPayload(
SET_CURRENT_WINDOW_SIZE, ImmutableMap.of("width", targetSize.width, "height", targetSize.height));
}
String GET_CURRENT_WINDOW_SIZE = "getCurrentWindowSize";
String MAXIMIZE_CURRENT_WINDOW = "maximizeCurrentWindow";
String FULLSCREEN_CURRENT_WINDOW = "fullscreenCurrentWindow";
// Logging API
String GET_AVAILABLE_LOG_TYPES = "getAvailableLogTypes";
String GET_LOG = "getLog";
String GET_SESSION_LOGS = "getSessionLogs";
// Mobile API
String GET_NETWORK_CONNECTION = "getNetworkConnection";
String SET_NETWORK_CONNECTION = "setNetworkConnection";
}
| 1 | 16,690 | These command names are specific to Chromium-based browsers. Please move to `ChromiumDriverCommand` | SeleniumHQ-selenium | js |
@@ -106,6 +106,13 @@ def _auto_patch():
import os
import logging
+ from pyspark.sql import dataframe as df, functions as F
+ from databricks.koalas import functions
+
+ for name in functions.__all__:
+ if not hasattr(F, name):
+ setattr(F, name, getattr(functions, name))
+
# Attach a usage logger.
logger_module = os.getenv("KOALAS_USAGE_LOGGER", None)
if logger_module is not None: | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from distutils.version import LooseVersion
from databricks.koalas.version import __version__
def assert_pyspark_version():
import logging
pyspark_ver = None
try:
import pyspark
except ImportError:
raise ImportError(
"Unable to import pyspark - consider doing a pip install with [spark] "
"extra to install pyspark with pip"
)
else:
pyspark_ver = getattr(pyspark, "__version__")
if pyspark_ver is None or pyspark_ver < "2.4":
logging.warning(
'Found pyspark version "{}" installed. pyspark>=2.4.0 is recommended.'.format(
pyspark_ver if pyspark_ver is not None else "<unknown version>"
)
)
assert_pyspark_version()
import pyspark
import pyarrow
if LooseVersion(pyspark.__version__) < LooseVersion("3.0"):
if (
LooseVersion(pyarrow.__version__) >= LooseVersion("0.15")
and "ARROW_PRE_0_15_IPC_FORMAT" not in os.environ
):
import logging
logging.warning(
"'ARROW_PRE_0_15_IPC_FORMAT' environment variable was not set. It is required to "
"set this environment variable to '1' in both driver and executor sides if you use "
"pyarrow>=0.15 and pyspark<3.0. "
"Koalas will set it for you but it does not work if there is a Spark context already "
"launched."
)
# This is required to support PyArrow 0.15 in PySpark versions lower than 3.0.
# See SPARK-29367.
os.environ["ARROW_PRE_0_15_IPC_FORMAT"] = "1"
elif "ARROW_PRE_0_15_IPC_FORMAT" in os.environ:
raise RuntimeError(
"Please explicitly unset 'ARROW_PRE_0_15_IPC_FORMAT' environment variable in both "
"driver and executor sides. It is required to set this environment variable only "
"when you use pyarrow>=0.15 and pyspark<3.0."
)
from databricks.koalas.frame import DataFrame
from databricks.koalas.indexes import Index, MultiIndex
from databricks.koalas.series import Series
from databricks.koalas.config import get_option, set_option, reset_option, options
from databricks.koalas.groupby import NamedAgg
__all__ = [
"read_csv",
"read_parquet",
"to_datetime",
"from_pandas",
"get_dummies",
"DataFrame",
"Series",
"Index",
"MultiIndex",
"sql",
"range",
"concat",
"melt",
"get_option",
"set_option",
"reset_option",
"read_sql_table",
"read_sql_query",
"read_sql",
"options",
"option_context",
"NamedAgg",
]
def _auto_patch():
import os
import logging
# Attach a usage logger.
logger_module = os.getenv("KOALAS_USAGE_LOGGER", None)
if logger_module is not None:
try:
from databricks.koalas import usage_logging
usage_logging.attach(logger_module)
except Exception as e:
from pyspark.util import _exception_message
logger = logging.getLogger("databricks.koalas.usage_logger")
logger.warning(
"Tried to attach usage logger `{}`, but an exception was raised: {}".format(
logger_module, _exception_message(e)
)
)
# Autopatching is on by default.
x = os.getenv("SPARK_KOALAS_AUTOPATCH", "true")
if x.lower() in ("true", "1", "enabled"):
logger = logging.getLogger("spark")
logger.info(
"Patching spark automatically. You can disable it by setting "
"SPARK_KOALAS_AUTOPATCH=false in your environment"
)
from pyspark.sql import dataframe as df
df.DataFrame.to_koalas = DataFrame.to_koalas
_auto_patch()
# Import after the usage logger is attached.
from databricks.koalas.config import *
from databricks.koalas.namespace import *
from databricks.koalas.sql import sql
| 1 | 15,278 | Hm.. should we auto-patch? I am less sure on this yet as `percentile_approx` seems a Spark function and auto-patching it in Koalas seems a bit counterintuitive. | databricks-koalas | py |
@@ -4,12 +4,14 @@ declare(strict_types=1);
namespace Tests\ShopBundle\Test;
+use Psr\Container\ContainerInterface;
use Shopsys\FrameworkBundle\Component\DataFixture\PersistentReferenceFacade;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
use Shopsys\FrameworkBundle\Component\Environment\EnvironmentType;
use Symfony\Bundle\FrameworkBundle\Test\WebTestCase;
+use Zalas\Injector\PHPUnit\TestCase\ServiceContainerTestCase;
-abstract class FunctionalTestCase extends WebTestCase
+abstract class FunctionalTestCase extends WebTestCase implements ServiceContainerTestCase
{
/**
* @var \Symfony\Bundle\FrameworkBundle\Client | 1 | <?php
declare(strict_types=1);
namespace Tests\ShopBundle\Test;
use Shopsys\FrameworkBundle\Component\DataFixture\PersistentReferenceFacade;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
use Shopsys\FrameworkBundle\Component\Environment\EnvironmentType;
use Symfony\Bundle\FrameworkBundle\Test\WebTestCase;
abstract class FunctionalTestCase extends WebTestCase
{
/**
* @var \Symfony\Bundle\FrameworkBundle\Client
*/
private $client;
/**
* @var \Shopsys\FrameworkBundle\Component\Domain\Domain|null
*/
private $domain;
protected function setUpDomain()
{
/** @var \Shopsys\FrameworkBundle\Component\Domain\Domain $domain */
$this->domain = $this->getContainer()->get(Domain::class);
$this->domain->switchDomainById(Domain::FIRST_DOMAIN_ID);
}
protected function setUp()
{
parent::setUp();
$this->setUpDomain();
}
/**
* @param bool $createNew
* @param string $username
* @param string $password
* @param array $kernelOptions
* @return \Symfony\Bundle\FrameworkBundle\Client
*/
protected function getClient(
$createNew = false,
$username = null,
$password = null,
$kernelOptions = []
) {
$defaultKernelOptions = [
'environment' => EnvironmentType::TEST,
'debug' => EnvironmentType::isDebug(EnvironmentType::TEST),
];
$kernelOptions = array_replace($defaultKernelOptions, $kernelOptions);
if ($createNew) {
$this->client = $this->createClient($kernelOptions);
$this->setUpDomain();
} elseif (!isset($this->client)) {
$this->client = $this->createClient($kernelOptions);
}
if ($username !== null) {
$this->client->setServerParameters([
'PHP_AUTH_USER' => $username,
'PHP_AUTH_PW' => $password,
]);
}
return $this->client;
}
/**
* @return \Symfony\Component\DependencyInjection\ContainerInterface
*/
protected function getContainer()
{
return $this->getClient()->getContainer();
}
/**
* @param string $referenceName
* @return object
*/
protected function getReference($referenceName)
{
/** @var \Shopsys\FrameworkBundle\Component\DataFixture\PersistentReferenceFacade $persistentReferenceFacade */
$persistentReferenceFacade = $this->getContainer()
->get(PersistentReferenceFacade::class);
return $persistentReferenceFacade->getReference($referenceName);
}
/**
* @param string $referenceName
* @param int $domainId
* @return object
*/
protected function getReferenceForDomain(string $referenceName, int $domainId)
{
/** @var \Shopsys\FrameworkBundle\Component\DataFixture\PersistentReferenceFacade $persistentReferenceFacade */
$persistentReferenceFacade = $this->getContainer()
->get(PersistentReferenceFacade::class);
return $persistentReferenceFacade->getReferenceForDomain($referenceName, $domainId);
}
protected function skipTestIfFirstDomainIsNotInEnglish()
{
if ($this->getFirstDomainLocale() !== 'en') {
$this->markTestSkipped('Tests for product searching are run only when the first domain has English locale');
}
}
/**
* We can use the shorthand here as $this->domain->switchDomainById(1) is called in setUp()
* @return string
*/
protected function getFirstDomainLocale(): string
{
return $this->domain->getLocale();
}
}
| 1 | 18,914 | I found domain use in `AllFeedsTest::getAllFeedExportCreationData` and class AllFeedsTest is descendant of `FunctionalTestCase`. I think so we can rewrite this too.. | shopsys-shopsys | php |
@@ -91,6 +91,13 @@ class IcebergFilesCommitter extends AbstractStreamOperator<Void>
private transient Table table;
private transient long maxCommittedCheckpointId;
+ // There're two cases that we restore from flink checkpoints: the first case is restoring from snapshot created by the
+ // same flink job; another case is restoring from snapshot created by another different job. For the second case, we
+ // need to maintain the old flink job's id in flink state backend to find the max-committed-checkpoint-id when
+ // traversing iceberg table's snapshots.
+ private static final ListStateDescriptor<String> JOB_ID_DESCRIPTOR = new ListStateDescriptor<>(
+ "iceberg-flink-job-id", BasicTypeInfo.STRING_TYPE_INFO);
+ private transient ListState<String> jobIdState;
// All pending checkpoints states for this function.
private static final ListStateDescriptor<SortedMap<Long, List<DataFile>>> STATE_DESCRIPTOR = buildStateDescriptor();
private transient ListState<SortedMap<Long, List<DataFile>>> checkpointsState; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink.sink;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.SortedMap;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.ListTypeInfo;
import org.apache.flink.runtime.state.StateInitializationContext;
import org.apache.flink.runtime.state.StateSnapshotContext;
import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
import org.apache.flink.streaming.api.operators.BoundedOneInput;
import org.apache.flink.streaming.api.operators.OneInputStreamOperator;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
import org.apache.flink.table.runtime.typeutils.SortedMapTypeInfo;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.ReplacePartitions;
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.SnapshotUpdate;
import org.apache.iceberg.Table;
import org.apache.iceberg.flink.TableLoader;
import org.apache.iceberg.hadoop.SerializableConfiguration;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Comparators;
import org.apache.iceberg.types.Types;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class IcebergFilesCommitter extends AbstractStreamOperator<Void>
implements OneInputStreamOperator<DataFile, Void>, BoundedOneInput {
private static final long serialVersionUID = 1L;
private static final long INITIAL_CHECKPOINT_ID = -1L;
private static final Logger LOG = LoggerFactory.getLogger(IcebergFilesCommitter.class);
private static final String FLINK_JOB_ID = "flink.job-id";
// The max checkpoint id we've committed to iceberg table. As the flink's checkpoint is always increasing, so we could
// correctly commit all the data files whose checkpoint id is greater than the max committed one to iceberg table, for
// avoiding committing the same data files twice. This id will be attached to iceberg's meta when committing the
// iceberg transaction.
private static final String MAX_COMMITTED_CHECKPOINT_ID = "flink.max-committed-checkpoint-id";
// TableLoader to load iceberg table lazily.
private final TableLoader tableLoader;
private final SerializableConfiguration hadoopConf;
private final boolean replacePartitions;
// A sorted map to maintain the completed data files for each pending checkpointId (which have not been committed
// to iceberg table). We need a sorted map here because there's possible that few checkpoints snapshot failed, for
// example: the 1st checkpoint have 2 data files <1, <file0, file1>>, the 2st checkpoint have 1 data files
// <2, <file3>>. Snapshot for checkpoint#1 interrupted because of network/disk failure etc, while we don't expect
// any data loss in iceberg table. So we keep the finished files <1, <file0, file1>> in memory and retry to commit
// iceberg table when the next checkpoint happen.
private final NavigableMap<Long, List<DataFile>> dataFilesPerCheckpoint = Maps.newTreeMap();
// The data files cache for current checkpoint. Once the snapshot barrier received, it will be flushed to the
// 'dataFilesPerCheckpoint'.
private final List<DataFile> dataFilesOfCurrentCheckpoint = Lists.newArrayList();
// It will have an unique identifier for one job.
private transient String flinkJobId;
private transient Table table;
private transient long maxCommittedCheckpointId;
// All pending checkpoints states for this function.
private static final ListStateDescriptor<SortedMap<Long, List<DataFile>>> STATE_DESCRIPTOR = buildStateDescriptor();
private transient ListState<SortedMap<Long, List<DataFile>>> checkpointsState;
IcebergFilesCommitter(TableLoader tableLoader, Configuration hadoopConf, boolean replacePartitions) {
this.tableLoader = tableLoader;
this.hadoopConf = new SerializableConfiguration(hadoopConf);
this.replacePartitions = replacePartitions;
}
@Override
public void initializeState(StateInitializationContext context) throws Exception {
super.initializeState(context);
this.flinkJobId = getContainingTask().getEnvironment().getJobID().toString();
// Open the table loader and load the table.
this.tableLoader.open(hadoopConf.get());
this.table = tableLoader.loadTable();
this.maxCommittedCheckpointId = INITIAL_CHECKPOINT_ID;
this.checkpointsState = context.getOperatorStateStore().getListState(STATE_DESCRIPTOR);
if (context.isRestored()) {
this.maxCommittedCheckpointId = getMaxCommittedCheckpointId(table, flinkJobId);
// In the restoring path, it should have one valid snapshot for current flink job at least, so the max committed
// checkpoint id should be positive. If it's not positive, that means someone might have removed or expired the
// iceberg snapshot, in that case we should throw an exception in case of committing duplicated data files into
// the iceberg table.
Preconditions.checkState(maxCommittedCheckpointId != INITIAL_CHECKPOINT_ID,
"There should be an existing iceberg snapshot for current flink job: %s", flinkJobId);
SortedMap<Long, List<DataFile>> restoredDataFiles = checkpointsState.get().iterator().next();
// Only keep the uncommitted data files in the cache.
this.dataFilesPerCheckpoint.putAll(restoredDataFiles.tailMap(maxCommittedCheckpointId + 1));
}
}
@Override
public void snapshotState(StateSnapshotContext context) throws Exception {
super.snapshotState(context);
long checkpointId = context.getCheckpointId();
LOG.info("Start to flush snapshot state to state backend, table: {}, checkpointId: {}", table, checkpointId);
// Update the checkpoint state.
dataFilesPerCheckpoint.put(checkpointId, ImmutableList.copyOf(dataFilesOfCurrentCheckpoint));
// Reset the snapshot state to the latest state.
checkpointsState.clear();
checkpointsState.add(dataFilesPerCheckpoint);
// Clear the local buffer for current checkpoint.
dataFilesOfCurrentCheckpoint.clear();
}
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
super.notifyCheckpointComplete(checkpointId);
// It's possible that we have the following events:
// 1. snapshotState(ckpId);
// 2. snapshotState(ckpId+1);
// 3. notifyCheckpointComplete(ckpId+1);
// 4. notifyCheckpointComplete(ckpId);
// For step#4, we don't need to commit iceberg table again because in step#3 we've committed all the files,
// Besides, we need to maintain the max-committed-checkpoint-id to be increasing.
if (checkpointId > maxCommittedCheckpointId) {
commitUpToCheckpoint(checkpointId);
this.maxCommittedCheckpointId = checkpointId;
}
}
private void commitUpToCheckpoint(long checkpointId) {
NavigableMap<Long, List<DataFile>> pendingFileMap = dataFilesPerCheckpoint.headMap(checkpointId, true);
List<DataFile> pendingDataFiles = Lists.newArrayList();
for (List<DataFile> dataFiles : pendingFileMap.values()) {
pendingDataFiles.addAll(dataFiles);
}
if (replacePartitions) {
replacePartitions(pendingDataFiles, checkpointId);
} else {
append(pendingDataFiles, checkpointId);
}
// Clear the committed data files from dataFilesPerCheckpoint.
pendingFileMap.clear();
}
private void replacePartitions(List<DataFile> dataFiles, long checkpointId) {
ReplacePartitions dynamicOverwrite = table.newReplacePartitions();
int numFiles = 0;
for (DataFile file : dataFiles) {
numFiles += 1;
dynamicOverwrite.addFile(file);
}
commitOperation(dynamicOverwrite, numFiles, "dynamic partition overwrite", checkpointId);
}
private void append(List<DataFile> dataFiles, long checkpointId) {
AppendFiles appendFiles = table.newAppend();
int numFiles = 0;
for (DataFile file : dataFiles) {
numFiles += 1;
appendFiles.appendFile(file);
}
commitOperation(appendFiles, numFiles, "append", checkpointId);
}
private void commitOperation(SnapshotUpdate<?> operation, int numFiles, String description, long checkpointId) {
LOG.info("Committing {} with {} files to table {}", description, numFiles, table);
operation.set(MAX_COMMITTED_CHECKPOINT_ID, Long.toString(checkpointId));
operation.set(FLINK_JOB_ID, flinkJobId);
long start = System.currentTimeMillis();
operation.commit(); // abort is automatically called if this fails.
long duration = System.currentTimeMillis() - start;
LOG.info("Committed in {} ms", duration);
}
@Override
public void processElement(StreamRecord<DataFile> element) {
this.dataFilesOfCurrentCheckpoint.add(element.getValue());
}
@Override
public void endInput() {
// Flush the buffered data files into 'dataFilesPerCheckpoint' firstly.
dataFilesPerCheckpoint.put(Long.MAX_VALUE, ImmutableList.copyOf(dataFilesOfCurrentCheckpoint));
dataFilesOfCurrentCheckpoint.clear();
commitUpToCheckpoint(Long.MAX_VALUE);
}
@Override
public void dispose() throws Exception {
if (tableLoader != null) {
tableLoader.close();
}
}
private static ListStateDescriptor<SortedMap<Long, List<DataFile>>> buildStateDescriptor() {
Comparator<Long> longComparator = Comparators.forType(Types.LongType.get());
// Construct a ListTypeInfo.
ListTypeInfo<DataFile> dataFileListTypeInfo = new ListTypeInfo<>(TypeInformation.of(DataFile.class));
// Construct a SortedMapTypeInfo.
SortedMapTypeInfo<Long, List<DataFile>> sortedMapTypeInfo = new SortedMapTypeInfo<>(
BasicTypeInfo.LONG_TYPE_INFO, dataFileListTypeInfo, longComparator
);
return new ListStateDescriptor<>("iceberg-files-committer-state", sortedMapTypeInfo);
}
static long getMaxCommittedCheckpointId(Table table, String flinkJobId) {
Snapshot snapshot = table.currentSnapshot();
long lastCommittedCheckpointId = INITIAL_CHECKPOINT_ID;
while (snapshot != null) {
Map<String, String> summary = snapshot.summary();
String snapshotFlinkJobId = summary.get(FLINK_JOB_ID);
if (flinkJobId.equals(snapshotFlinkJobId)) {
String value = summary.get(MAX_COMMITTED_CHECKPOINT_ID);
if (value != null) {
lastCommittedCheckpointId = Long.parseLong(value);
break;
}
}
Long parentSnapshotId = snapshot.parentId();
snapshot = parentSnapshotId != null ? table.snapshot(parentSnapshotId) : null;
}
return lastCommittedCheckpointId;
}
}
| 1 | 23,943 | I am wondering if it is better to consolidate all committer checkpoint states into a single structure, e.g. Pojo class or Avro record which are Flink supported state types for schema evolution. | apache-iceberg | java |
@@ -56,9 +56,10 @@ class SignalFilter(QObject):
Return:
A partial function calling _filter_signals with a signal.
"""
- return functools.partial(self._filter_signals, signal, tab)
+ log_signal = debug.signal_name(signal) not in self.BLACKLIST
+ return functools.partial(self._filter_signals, signal, log_signal, tab)
- def _filter_signals(self, signal, tab, *args):
+ def _filter_signals(self, signal, log_signal, tab, *args):
"""Filter signals and trigger TabbedBrowser signals if needed.
Triggers signal if the original signal was sent from the _current_ tab | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""A filter for signals which either filters or passes them."""
import functools
from PyQt5.QtCore import QObject
from qutebrowser.utils import debug, log, objreg
class SignalFilter(QObject):
"""A filter for signals.
Signals are only passed to the parent TabbedBrowser if they originated in
the currently shown widget.
Attributes:
_win_id: The window ID this SignalFilter is associated with.
Class attributes:
BLACKLIST: List of signal names which should not be logged.
"""
BLACKLIST = ['cur_scroll_perc_changed', 'cur_progress', 'cur_link_hovered']
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._win_id = win_id
def create(self, signal, tab):
"""Factory for partial _filter_signals functions.
Args:
signal: The pyqtSignal to filter.
tab: The WebView to create filters for.
Return:
A partial function calling _filter_signals with a signal.
"""
return functools.partial(self._filter_signals, signal, tab)
def _filter_signals(self, signal, tab, *args):
"""Filter signals and trigger TabbedBrowser signals if needed.
Triggers signal if the original signal was sent from the _current_ tab
and not from any other one.
The original signal does not matter, since we get the new signal and
all args.
Args:
signal: The signal to emit if the sender was the current widget.
tab: The WebView which the filter belongs to.
*args: The args to pass to the signal.
"""
log_signal = debug.signal_name(signal) not in self.BLACKLIST
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
try:
tabidx = tabbed_browser.widget.indexOf(tab)
except RuntimeError:
# The tab has been deleted already
return
if tabidx == tabbed_browser.widget.currentIndex():
if log_signal:
log.signals.debug("emitting: {} (tab {})".format(
debug.dbg_signal(signal, args), tabidx))
signal.emit(*args)
else:
if log_signal:
log.signals.debug("ignoring: {} (tab {})".format(
debug.dbg_signal(signal, args), tabidx))
| 1 | 22,934 | I assume this is a performance optimization to only check this once when creating the signal filter? While you're at it, maybe make `BLACKLIST` a set instead? | qutebrowser-qutebrowser | py |
@@ -36,6 +36,18 @@ class DatabaseDriverNode {
});
}
+ loadExtension(path) {
+ return new Promise((resolve, reject) => {
+ this.db_.loadExtension(path, (error) => {
+ if (error) {
+ reject(error);
+ return;
+ }
+ resolve();
+ });
+ });
+ }
+
selectAll(sql, params = null) {
if (!params) params = {};
return new Promise((resolve, reject) => { | 1 | const sqlite3 = require('sqlite3').verbose();
const Promise = require('promise');
class DatabaseDriverNode {
open(options) {
return new Promise((resolve, reject) => {
this.db_ = new sqlite3.Database(options.name, sqlite3.OPEN_READWRITE | sqlite3.OPEN_CREATE, error => {
if (error) {
reject(error);
return;
}
resolve();
});
});
}
sqliteErrorToJsError(error, sql = null, params = null) {
const msg = [error.toString()];
if (sql) msg.push(sql);
if (params) msg.push(params);
const output = new Error(msg.join(': '));
if (error.code) output.code = error.code;
return output;
}
selectOne(sql, params = null) {
if (!params) params = {};
return new Promise((resolve, reject) => {
this.db_.get(sql, params, (error, row) => {
if (error) {
reject(error);
return;
}
resolve(row);
});
});
}
selectAll(sql, params = null) {
if (!params) params = {};
return new Promise((resolve, reject) => {
this.db_.all(sql, params, (error, row) => {
if (error) {
reject(error);
return;
}
resolve(row);
});
});
}
exec(sql, params = null) {
if (!params) params = {};
return new Promise((resolve, reject) => {
this.db_.run(sql, params, error => {
if (error) {
reject(error);
return;
}
resolve();
});
});
}
lastInsertId() {
throw new Error('NOT IMPLEMENTED');
}
}
module.exports = { DatabaseDriverNode };
| 1 | 15,183 | Please use async/await and try/catch | laurent22-joplin | js |
@@ -1065,11 +1065,9 @@ void GenStruct(StructDef &struct_def, std::string *code_ptr) {
}
}
// generate object accessors if is nested_flatbuffer
+ if (field.nested_flatbuffer != nullptr) {
auto nested = field.attributes.Lookup("nested_flatbuffer");
- if (nested) {
- auto nested_qualified_name =
- parser_.namespaces_.back()->GetFullyQualifiedName(nested->constant);
- auto nested_type = parser_.structs_.Lookup(nested_qualified_name);
+ auto nested_type = nested->type.struct_def;
auto nested_type_name = WrapInNameSpace(*nested_type);
auto nestedMethodName = MakeCamel(field.name, lang_.first_camel_upper)
+ "As" + nested_type_name; | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// independent from idl_parser, since this code is not needed for most clients
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
#include "flatbuffers/code_generators.h"
#if defined(FLATBUFFERS_CPP98_STL)
#include <cctype>
#endif // defined(FLATBUFFERS_CPP98_STL)
namespace flatbuffers {
// Convert an underscore_based_indentifier in to camelCase.
// Also uppercases the first character if first is true.
std::string MakeCamel(const std::string &in, bool first) {
std::string s;
for (size_t i = 0; i < in.length(); i++) {
if (!i && first)
s += static_cast<char>(toupper(in[0]));
else if (in[i] == '_' && i + 1 < in.length())
s += static_cast<char>(toupper(in[++i]));
else
s += in[i];
}
return s;
}
// These arrays need to correspond to the IDLOptions::k enum.
struct LanguageParameters {
IDLOptions::Language language;
// Whether function names in the language typically start with uppercase.
bool first_camel_upper;
std::string file_extension;
std::string string_type;
std::string bool_type;
std::string open_curly;
std::string accessor_type;
std::string const_decl;
std::string unsubclassable_decl;
std::string enum_decl;
std::string enum_separator;
std::string getter_prefix;
std::string getter_suffix;
std::string inheritance_marker;
std::string namespace_ident;
std::string namespace_begin;
std::string namespace_end;
std::string set_bb_byteorder;
std::string get_bb_position;
std::string get_fbb_offset;
std::string accessor_prefix;
std::string accessor_prefix_static;
std::string optional_suffix;
std::string includes;
CommentConfig comment_config;
};
const LanguageParameters& GetLangParams(IDLOptions::Language lang) {
static LanguageParameters language_parameters[] = {
{
IDLOptions::kJava,
false,
".java",
"String",
"boolean ",
" {\n",
"class ",
" final ",
"final ",
"final class ",
";\n",
"()",
"",
" extends ",
"package ",
";",
"",
"_bb.order(ByteOrder.LITTLE_ENDIAN); ",
"position()",
"offset()",
"",
"",
"",
"import java.nio.*;\nimport java.lang.*;\nimport java.util.*;\n"
"import com.google.flatbuffers.*;\n\n@SuppressWarnings(\"unused\")\n",
{
"/**",
" *",
" */",
},
},
{
IDLOptions::kCSharp,
true,
".cs",
"string",
"bool ",
"\n{\n",
"struct ",
" readonly ",
"",
"enum ",
",\n",
" { get",
"} ",
" : ",
"namespace ",
"\n{",
"\n}\n",
"",
"Position",
"Offset",
"__p.",
"Table.",
"?",
"using global::System;\nusing global::FlatBuffers;\n\n",
{
nullptr,
"///",
nullptr,
},
},
};
if (lang == IDLOptions::kJava) {
return language_parameters[0];
} else {
assert(lang == IDLOptions::kCSharp);
return language_parameters[1];
}
}
namespace general {
class GeneralGenerator : public BaseGenerator {
public:
GeneralGenerator(const Parser &parser, const std::string &path,
const std::string &file_name)
: BaseGenerator(parser, path, file_name, "", "."),
lang_(GetLangParams(parser_.opts.lang)),
cur_name_space_( nullptr ) {
}
GeneralGenerator &operator=(const GeneralGenerator &);
bool generate() {
std::string one_file_code;
cur_name_space_ = parser_.namespaces_.back();
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
std::string enumcode;
auto &enum_def = **it;
if (!parser_.opts.one_file)
cur_name_space_ = enum_def.defined_namespace;
GenEnum(enum_def, &enumcode);
if (parser_.opts.one_file) {
one_file_code += enumcode;
} else {
if (!SaveType(enum_def.name, *enum_def.defined_namespace,
enumcode, false)) return false;
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
std::string declcode;
auto &struct_def = **it;
if (!parser_.opts.one_file)
cur_name_space_ = struct_def.defined_namespace;
GenStruct(struct_def, &declcode);
if (parser_.opts.one_file) {
one_file_code += declcode;
} else {
if (!SaveType(struct_def.name, *struct_def.defined_namespace,
declcode, true)) return false;
}
}
if (parser_.opts.one_file) {
return SaveType(file_name_, *parser_.namespaces_.back(),
one_file_code, true);
}
return true;
}
// Save out the generated code for a single class while adding
// declaration boilerplate.
bool SaveType(const std::string &defname, const Namespace &ns,
const std::string &classcode, bool needs_includes) {
if (!classcode.length()) return true;
std::string code;
if (lang_.language == IDLOptions::kCSharp) {
code = "// <auto-generated>\n"
"// " + std::string(FlatBuffersGeneratedWarning()) + "\n"
"// </auto-generated>\n\n";
} else {
code = "// " + std::string(FlatBuffersGeneratedWarning()) + "\n\n";
}
std::string namespace_name = FullNamespace(".", ns);
if (!namespace_name.empty()) {
code += lang_.namespace_ident + namespace_name + lang_.namespace_begin;
code += "\n\n";
}
if (needs_includes) code += lang_.includes;
code += classcode;
if (!namespace_name.empty()) code += lang_.namespace_end;
auto filename = NamespaceDir(ns) + defname + lang_.file_extension;
return SaveFile(filename.c_str(), code, false);
}
const Namespace *CurrentNameSpace() const { return cur_name_space_; }
std::string FunctionStart(char upper) {
return std::string() + (lang_.language == IDLOptions::kJava
? static_cast<char>(tolower(upper))
: upper);
}
static bool IsEnum(const Type& type) {
return type.enum_def != nullptr && IsInteger(type.base_type);
}
std::string GenTypeBasic(const Type &type, bool enableLangOverrides) {
static const char *java_typename[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \
#JTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
static const char *csharp_typename[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \
#NTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
if (enableLangOverrides) {
if (lang_.language == IDLOptions::kCSharp) {
if (IsEnum(type)) return WrapInNameSpace(*type.enum_def);
if (type.base_type == BASE_TYPE_STRUCT) {
return "Offset<" + WrapInNameSpace(*type.struct_def) + ">";
}
}
}
if (lang_.language == IDLOptions::kJava) {
return java_typename[type.base_type];
} else {
assert(lang_.language == IDLOptions::kCSharp);
return csharp_typename[type.base_type];
}
}
std::string GenTypeBasic(const Type &type) {
return GenTypeBasic(type, true);
}
std::string GenTypePointer(const Type &type) {
switch (type.base_type) {
case BASE_TYPE_STRING:
return lang_.string_type;
case BASE_TYPE_VECTOR:
return GenTypeGet(type.VectorType());
case BASE_TYPE_STRUCT:
return WrapInNameSpace(*type.struct_def);
case BASE_TYPE_UNION:
// Unions in C# use a generic Table-derived type for better type safety
if (lang_.language == IDLOptions::kCSharp) return "TTable";
// fall through
default:
return "Table";
}
}
std::string GenTypeGet(const Type &type) {
return IsScalar(type.base_type)
? GenTypeBasic(type)
: GenTypePointer(type);
}
// Find the destination type the user wants to receive the value in (e.g.
// one size higher signed types for unsigned serialized values in Java).
Type DestinationType(const Type &type, bool vectorelem) {
if (lang_.language != IDLOptions::kJava) return type;
switch (type.base_type) {
// We use int for both uchar/ushort, since that generally means less casting
// than using short for uchar.
case BASE_TYPE_UCHAR: return Type(BASE_TYPE_INT);
case BASE_TYPE_USHORT: return Type(BASE_TYPE_INT);
case BASE_TYPE_UINT: return Type(BASE_TYPE_LONG);
case BASE_TYPE_VECTOR:
if (vectorelem)
return DestinationType(type.VectorType(), vectorelem);
// else fall thru
default: return type;
}
}
std::string GenOffsetType(const StructDef &struct_def) {
if(lang_.language == IDLOptions::kCSharp) {
return "Offset<" + WrapInNameSpace(struct_def) + ">";
} else {
return "int";
}
}
std::string GenOffsetConstruct(const StructDef &struct_def,
const std::string &variable_name)
{
if(lang_.language == IDLOptions::kCSharp) {
return "new Offset<" + WrapInNameSpace(struct_def) + ">(" + variable_name +
")";
}
return variable_name;
}
std::string GenVectorOffsetType() {
if(lang_.language == IDLOptions::kCSharp) {
return "VectorOffset";
} else {
return "int";
}
}
// Generate destination type name
std::string GenTypeNameDest(const Type &type)
{
return GenTypeGet(DestinationType(type, true));
}
// Mask to turn serialized value into destination type value.
std::string DestinationMask(const Type &type, bool vectorelem) {
if (lang_.language != IDLOptions::kJava) return "";
switch (type.base_type) {
case BASE_TYPE_UCHAR: return " & 0xFF";
case BASE_TYPE_USHORT: return " & 0xFFFF";
case BASE_TYPE_UINT: return " & 0xFFFFFFFFL";
case BASE_TYPE_VECTOR:
if (vectorelem)
return DestinationMask(type.VectorType(), vectorelem);
// else fall thru
default: return "";
}
}
// Casts necessary to correctly read serialized data
std::string DestinationCast(const Type &type) {
if (type.base_type == BASE_TYPE_VECTOR) {
return DestinationCast(type.VectorType());
} else {
switch (lang_.language) {
case IDLOptions::kJava:
// Cast necessary to correctly read serialized unsigned values.
if (type.base_type == BASE_TYPE_UINT) return "(long)";
break;
case IDLOptions::kCSharp:
// Cast from raw integral types to enum.
if (IsEnum(type)) return "(" + WrapInNameSpace(*type.enum_def) + ")";
break;
default:
break;
}
}
return "";
}
// Cast statements for mutator method parameters.
// In Java, parameters representing unsigned numbers need to be cast down to
// their respective type. For example, a long holding an unsigned int value
// would be cast down to int before being put onto the buffer. In C#, one cast
// directly cast an Enum to its underlying type, which is essential before
// putting it onto the buffer.
std::string SourceCast(const Type &type, bool castFromDest) {
if (type.base_type == BASE_TYPE_VECTOR) {
return SourceCast(type.VectorType(), castFromDest);
} else {
switch (lang_.language) {
case IDLOptions::kJava:
if (castFromDest) {
if (type.base_type == BASE_TYPE_UINT) return "(int)";
else if (type.base_type == BASE_TYPE_USHORT) return "(short)";
else if (type.base_type == BASE_TYPE_UCHAR) return "(byte)";
}
break;
case IDLOptions::kCSharp:
if (IsEnum(type)) return "(" + GenTypeBasic(type, false) + ")";
break;
default:
break;
}
}
return "";
}
std::string SourceCast(const Type &type) {
return SourceCast(type, true);
}
std::string SourceCastBasic(const Type &type, bool castFromDest) {
return IsScalar(type.base_type) ? SourceCast(type, castFromDest) : "";
}
std::string SourceCastBasic(const Type &type) {
return SourceCastBasic(type, true);
}
std::string GenEnumDefaultValue(const Value &value) {
auto enum_def = value.type.enum_def;
auto vec = enum_def->vals.vec;
auto default_value = StringToInt(value.constant.c_str());
auto result = value.constant;
for (auto it = vec.begin(); it != vec.end(); ++it) {
auto enum_val = **it;
if (enum_val.value == default_value) {
result = WrapInNameSpace(*enum_def) + "." + enum_val.name;
break;
}
}
return result;
}
std::string GenDefaultValue(const Value &value, bool enableLangOverrides) {
if (enableLangOverrides) {
// handles both enum case and vector of enum case
if (lang_.language == IDLOptions::kCSharp &&
value.type.enum_def != nullptr &&
value.type.base_type != BASE_TYPE_UNION) {
return GenEnumDefaultValue(value);
}
}
auto longSuffix = lang_.language == IDLOptions::kJava ? "L" : "";
switch (value.type.base_type) {
case BASE_TYPE_FLOAT: return value.constant + "f";
case BASE_TYPE_BOOL: return value.constant == "0" ? "false" : "true";
case BASE_TYPE_ULONG:
{
if (lang_.language != IDLOptions::kJava)
return value.constant;
// Converts the ulong into its bits signed equivalent
uint64_t defaultValue = StringToUInt(value.constant.c_str());
return NumToString(static_cast<int64_t>(defaultValue)) + longSuffix;
}
case BASE_TYPE_UINT:
case BASE_TYPE_LONG: return value.constant + longSuffix;
default: return value.constant;
}
}
std::string GenDefaultValue(const Value &value) {
return GenDefaultValue(value, true);
}
std::string GenDefaultValueBasic(const Value &value, bool enableLangOverrides) {
if (!IsScalar(value.type.base_type)) {
if (enableLangOverrides) {
if (lang_.language == IDLOptions::kCSharp) {
switch (value.type.base_type) {
case BASE_TYPE_STRING:
return "default(StringOffset)";
case BASE_TYPE_STRUCT:
return "default(Offset<" + WrapInNameSpace(*value.type.struct_def) +
">)";
case BASE_TYPE_VECTOR:
return "default(VectorOffset)";
default:
break;
}
}
}
return "0";
}
return GenDefaultValue(value, enableLangOverrides);
}
std::string GenDefaultValueBasic(const Value &value) {
return GenDefaultValueBasic(value, true);
}
void GenEnum(EnumDef &enum_def, std::string *code_ptr) {
std::string &code = *code_ptr;
if (enum_def.generated) return;
// Generate enum definitions of the form:
// public static (final) int name = value;
// In Java, we use ints rather than the Enum feature, because we want them
// to map directly to how they're used in C/C++ and file formats.
// That, and Java Enums are expensive, and not universally liked.
GenComment(enum_def.doc_comment, code_ptr, &lang_.comment_config);
code += std::string("public ") + lang_.enum_decl + enum_def.name;
if (lang_.language == IDLOptions::kCSharp) {
code += lang_.inheritance_marker +
GenTypeBasic(enum_def.underlying_type, false);
}
code += lang_.open_curly;
if (lang_.language == IDLOptions::kJava) {
code += " private " + enum_def.name + "() { }\n";
}
for (auto it = enum_def.vals.vec.begin();
it != enum_def.vals.vec.end();
++it) {
auto &ev = **it;
GenComment(ev.doc_comment, code_ptr, &lang_.comment_config, " ");
if (lang_.language != IDLOptions::kCSharp) {
code += " public static";
code += lang_.const_decl;
code += GenTypeBasic(enum_def.underlying_type, false);
}
code += " " + ev.name + " = ";
code += NumToString(ev.value);
code += lang_.enum_separator;
}
// Generate a generate string table for enum values.
// We do not do that for C# where this functionality is native.
if (lang_.language != IDLOptions::kCSharp) {
// Problem is, if values are very sparse that could generate really big
// tables. Ideally in that case we generate a map lookup instead, but for
// the moment we simply don't output a table at all.
auto range = enum_def.vals.vec.back()->value -
enum_def.vals.vec.front()->value + 1;
// Average distance between values above which we consider a table
// "too sparse". Change at will.
static const int kMaxSparseness = 5;
if (range / static_cast<int64_t>(enum_def.vals.vec.size()) < kMaxSparseness) {
code += "\n public static";
code += lang_.const_decl;
code += lang_.string_type;
code += "[] names = { ";
auto val = enum_def.vals.vec.front()->value;
for (auto it = enum_def.vals.vec.begin();
it != enum_def.vals.vec.end();
++it) {
while (val++ != (*it)->value) code += "\"\", ";
code += "\"" + (*it)->name + "\", ";
}
code += "};\n\n";
code += " public static ";
code += lang_.string_type;
code += " " + MakeCamel("name", lang_.first_camel_upper);
code += "(int e) { return names[e";
if (enum_def.vals.vec.front()->value)
code += " - " + enum_def.vals.vec.front()->name;
code += "]; }\n";
}
}
// Close the class
code += "}";
// Java does not need the closing semi-colon on class definitions.
code += (lang_.language != IDLOptions::kJava) ? ";" : "";
code += "\n\n";
}
// Returns the function name that is able to read a value of the given type.
std::string GenGetter(const Type &type) {
switch (type.base_type) {
case BASE_TYPE_STRING: return lang_.accessor_prefix + "__string";
case BASE_TYPE_STRUCT: return lang_.accessor_prefix + "__struct";
case BASE_TYPE_UNION: return lang_.accessor_prefix + "__union";
case BASE_TYPE_VECTOR: return GenGetter(type.VectorType());
default: {
std::string getter =
lang_.accessor_prefix + "bb." + FunctionStart('G') + "et";
if (type.base_type == BASE_TYPE_BOOL) {
getter = "0!=" + getter;
} else if (GenTypeBasic(type, false) != "byte") {
getter += MakeCamel(GenTypeBasic(type, false));
}
return getter;
}
}
}
// Returns the function name that is able to read a value of the given type.
std::string GenGetterForLookupByKey(flatbuffers::FieldDef *key_field,
const std::string &data_buffer,
const char *num = nullptr) {
auto type = key_field->value.type;
auto dest_mask = DestinationMask(type, true);
auto dest_cast = DestinationCast(type);
auto getter = data_buffer + "." + FunctionStart('G') + "et";
if (GenTypeBasic(type, false) != "byte") {
getter += MakeCamel(GenTypeBasic(type, false));
}
getter = dest_cast + getter + "(" + GenOffsetGetter(key_field, num) + ")"
+ dest_mask;
return getter;
}
// Direct mutation is only allowed for scalar fields.
// Hence a setter method will only be generated for such fields.
std::string GenSetter(const Type &type) {
if (IsScalar(type.base_type)) {
std::string setter =
lang_.accessor_prefix + "bb." + FunctionStart('P') + "ut";
if (GenTypeBasic(type, false) != "byte" &&
type.base_type != BASE_TYPE_BOOL) {
setter += MakeCamel(GenTypeBasic(type, false));
}
return setter;
} else {
return "";
}
}
// Returns the method name for use with add/put calls.
std::string GenMethod(const Type &type) {
return IsScalar(type.base_type)
? MakeCamel(GenTypeBasic(type, false))
: (IsStruct(type) ? "Struct" : "Offset");
}
// Recursively generate arguments for a constructor, to deal with nested
// structs.
void GenStructArgs(const StructDef &struct_def, std::string *code_ptr,
const char *nameprefix) {
std::string &code = *code_ptr;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end();
++it) {
auto &field = **it;
if (IsStruct(field.value.type)) {
// Generate arguments for a struct inside a struct. To ensure names
// don't clash, and to make it obvious these arguments are constructing
// a nested struct, prefix the name with the field name.
GenStructArgs(*field.value.type.struct_def, code_ptr,
(nameprefix + (field.name + "_")).c_str());
} else {
code += ", ";
code += GenTypeBasic(DestinationType(field.value.type, false));
code += " ";
code += nameprefix;
code += MakeCamel(field.name, lang_.first_camel_upper);
}
}
}
// Recusively generate struct construction statements of the form:
// builder.putType(name);
// and insert manual padding.
void GenStructBody(const StructDef &struct_def, std::string *code_ptr,
const char *nameprefix) {
std::string &code = *code_ptr;
code += " builder." + FunctionStart('P') + "rep(";
code += NumToString(struct_def.minalign) + ", ";
code += NumToString(struct_def.bytesize) + ");\n";
for (auto it = struct_def.fields.vec.rbegin();
it != struct_def.fields.vec.rend(); ++it) {
auto &field = **it;
if (field.padding) {
code += " builder." + FunctionStart('P') + "ad(";
code += NumToString(field.padding) + ");\n";
}
if (IsStruct(field.value.type)) {
GenStructBody(*field.value.type.struct_def, code_ptr,
(nameprefix + (field.name + "_")).c_str());
} else {
code += " builder." + FunctionStart('P') + "ut";
code += GenMethod(field.value.type) + "(";
code += SourceCast(field.value.type);
auto argname = nameprefix + MakeCamel(field.name, lang_.first_camel_upper);
code += argname;
code += ");\n";
}
}
}
std::string GenByteBufferLength(const char *bb_name) {
std::string bb_len = bb_name;
if (lang_.language == IDLOptions::kCSharp) bb_len += ".Length";
else bb_len += ".capacity()";
return bb_len;
}
std::string GenOffsetGetter(flatbuffers::FieldDef *key_field,
const char *num = nullptr) {
std::string key_offset = "";
key_offset += lang_.accessor_prefix_static + "__offset(" +
NumToString(key_field->value.offset) + ", ";
if (num) {
key_offset += num;
key_offset += (lang_.language == IDLOptions::kCSharp ?
".Value, builder.DataBuffer)" : ", _bb)");
} else {
key_offset += GenByteBufferLength("bb");
key_offset += " - tableOffset, bb)";
}
return key_offset;
}
std::string GenLookupKeyGetter(flatbuffers::FieldDef *key_field) {
std::string key_getter = " ";
key_getter += "int tableOffset = " + lang_.accessor_prefix_static;
key_getter += "__indirect(vectorLocation + 4 * (start + middle)";
key_getter += ", bb);\n ";
if (key_field->value.type.base_type == BASE_TYPE_STRING) {
key_getter += "int comp = " + lang_.accessor_prefix_static;
key_getter += FunctionStart('C') + "ompareStrings(";
key_getter += GenOffsetGetter(key_field);
key_getter += ", byteKey, bb);\n";
} else {
auto get_val = GenGetterForLookupByKey(key_field, "bb");
if (lang_.language == IDLOptions::kCSharp) {
key_getter += "int comp = " + get_val + ".CompareTo(key);\n";
} else {
key_getter += GenTypeNameDest(key_field->value.type) + " val = ";
key_getter += get_val + ";\n";
key_getter += " int comp = val > key ? 1 : val < key ? -1 : 0;\n";
}
}
return key_getter;
}
std::string GenKeyGetter(flatbuffers::FieldDef *key_field) {
std::string key_getter = "";
auto data_buffer = (lang_.language == IDLOptions::kCSharp) ?
"builder.DataBuffer" : "_bb";
if (key_field->value.type.base_type == BASE_TYPE_STRING) {
if (lang_.language == IDLOptions::kJava)
key_getter += " return ";
key_getter += lang_.accessor_prefix_static;
key_getter += FunctionStart('C') + "ompareStrings(";
key_getter += GenOffsetGetter(key_field, "o1") + ", ";
key_getter += GenOffsetGetter(key_field, "o2") + ", " + data_buffer + ")";
if (lang_.language == IDLOptions::kJava)
key_getter += ";";
}
else {
auto field_getter = GenGetterForLookupByKey(key_field, data_buffer, "o1");
if (lang_.language == IDLOptions::kCSharp) {
key_getter += field_getter;
field_getter = GenGetterForLookupByKey(key_field, data_buffer, "o2");
key_getter += ".CompareTo(" + field_getter + ")";
}
else {
key_getter += "\n " + GenTypeNameDest(key_field->value.type) + " val_1 = ";
key_getter += field_getter + ";\n " + GenTypeNameDest(key_field->value.type);
key_getter += " val_2 = ";
field_getter = GenGetterForLookupByKey(key_field, data_buffer, "o2");
key_getter += field_getter + ";\n";
key_getter += " return val_1 > val_2 ? 1 : val_1 < val_2 ? -1 : 0;\n ";
}
}
return key_getter;
}
void GenStruct(StructDef &struct_def, std::string *code_ptr) {
if (struct_def.generated) return;
std::string &code = *code_ptr;
// Generate a struct accessor class, with methods of the form:
// public type name() { return bb.getType(i + offset); }
// or for tables of the form:
// public type name() {
// int o = __offset(offset); return o != 0 ? bb.getType(o + i) : default;
// }
GenComment(struct_def.doc_comment, code_ptr, &lang_.comment_config);
code += "public ";
if (lang_.language == IDLOptions::kCSharp &&
struct_def.attributes.Lookup("csharp_partial")) {
// generate a partial class for this C# struct/table
code += "partial ";
} else {
code += lang_.unsubclassable_decl;
}
code += lang_.accessor_type + struct_def.name;
if (lang_.language == IDLOptions::kCSharp) {
code += " : IFlatbufferObject";
code += lang_.open_curly;
code += " private ";
code += struct_def.fixed ? "Struct" : "Table";
code += " __p;\n";
if (lang_.language == IDLOptions::kCSharp) {
code += " public ByteBuffer ByteBuffer { get { return __p.bb; } }\n";
}
} else {
code += lang_.inheritance_marker;
code += struct_def.fixed ? "Struct" : "Table";
code += lang_.open_curly;
}
if (!struct_def.fixed) {
// Generate a special accessor for the table that when used as the root
// of a FlatBuffer
std::string method_name = FunctionStart('G') + "etRootAs" + struct_def.name;
std::string method_signature = " public static " + struct_def.name + " " +
method_name;
// create convenience method that doesn't require an existing object
code += method_signature + "(ByteBuffer _bb) ";
code += "{ return " + method_name + "(_bb, new " + struct_def.name+ "()); }\n";
// create method that allows object reuse
code += method_signature + "(ByteBuffer _bb, " + struct_def.name + " obj) { ";
code += lang_.set_bb_byteorder;
code += "return (obj.__assign(_bb." + FunctionStart('G') + "etInt(_bb.";
code += lang_.get_bb_position;
code += ") + _bb.";
code += lang_.get_bb_position;
code += ", _bb)); }\n";
if (parser_.root_struct_def_ == &struct_def) {
if (parser_.file_identifier_.length()) {
// Check if a buffer has the identifier.
code += " public static ";
code += lang_.bool_type + struct_def.name;
code += "BufferHasIdentifier(ByteBuffer _bb) { return ";
code += lang_.accessor_prefix_static + "__has_identifier(_bb, \"";
code += parser_.file_identifier_;
code += "\"); }\n";
}
}
}
// Generate the __init method that sets the field in a pre-existing
// accessor object. This is to allow object reuse.
code += " public void __init(int _i, ByteBuffer _bb) ";
code += "{ " + lang_.accessor_prefix + "bb_pos = _i; ";
code += lang_.accessor_prefix + "bb = _bb; }\n";
code += " public " + struct_def.name + " __assign(int _i, ByteBuffer _bb) ";
code += "{ __init(_i, _bb); return this; }\n\n";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end();
++it) {
auto &field = **it;
if (field.deprecated) continue;
GenComment(field.doc_comment, code_ptr, &lang_.comment_config, " ");
std::string type_name = GenTypeGet(field.value.type);
std::string type_name_dest = GenTypeNameDest(field.value.type);
std::string conditional_cast = "";
std::string optional = "";
if (lang_.language == IDLOptions::kCSharp &&
!struct_def.fixed &&
(field.value.type.base_type == BASE_TYPE_STRUCT ||
field.value.type.base_type == BASE_TYPE_UNION ||
(field.value.type.base_type == BASE_TYPE_VECTOR &&
field.value.type.element == BASE_TYPE_STRUCT))) {
optional = lang_.optional_suffix;
conditional_cast = "(" + type_name_dest + optional + ")";
}
std::string dest_mask = DestinationMask(field.value.type, true);
std::string dest_cast = DestinationCast(field.value.type);
std::string src_cast = SourceCast(field.value.type);
std::string method_start = " public " + type_name_dest + optional + " " +
MakeCamel(field.name, lang_.first_camel_upper);
std::string obj = lang_.language == IDLOptions::kCSharp
? "(new " + type_name + "())"
: "obj";
// Most field accessors need to retrieve and test the field offset first,
// this is the prefix code for that:
auto offset_prefix = " { int o = " + lang_.accessor_prefix + "__offset(" +
NumToString(field.value.offset) +
"); return o != 0 ? ";
// Generate the accessors that don't do object reuse.
if (field.value.type.base_type == BASE_TYPE_STRUCT) {
// Calls the accessor that takes an accessor object with a new object.
if (lang_.language != IDLOptions::kCSharp) {
code += method_start + "() { return ";
code += MakeCamel(field.name, lang_.first_camel_upper);
code += "(new ";
code += type_name + "()); }\n";
}
} else if (field.value.type.base_type == BASE_TYPE_VECTOR &&
field.value.type.element == BASE_TYPE_STRUCT) {
// Accessors for vectors of structs also take accessor objects, this
// generates a variant without that argument.
if (lang_.language != IDLOptions::kCSharp) {
code += method_start + "(int j) { return ";
code += MakeCamel(field.name, lang_.first_camel_upper);
code += "(new " + type_name + "(), j); }\n";
}
} else if (field.value.type.base_type == BASE_TYPE_UNION) {
if (lang_.language == IDLOptions::kCSharp) {
// Union types in C# use generic Table-derived type for better type
// safety.
method_start += "<TTable>";
type_name = type_name_dest;
}
}
std::string getter = dest_cast + GenGetter(field.value.type);
code += method_start;
std::string default_cast = "";
// only create default casts for c# scalars or vectors of scalars
if (lang_.language == IDLOptions::kCSharp &&
(IsScalar(field.value.type.base_type) ||
(field.value.type.base_type == BASE_TYPE_VECTOR &&
IsScalar(field.value.type.element)))) {
// For scalars, default value will be returned by GetDefaultValue().
// If the scalar is an enum, GetDefaultValue() returns an actual c# enum
// that doesn't need to be casted. However, default values for enum
// elements of vectors are integer literals ("0") and are still casted
// for clarity.
if (field.value.type.enum_def == nullptr ||
field.value.type.base_type == BASE_TYPE_VECTOR) {
default_cast = "(" + type_name_dest + ")";
}
}
std::string member_suffix = "; ";
if (IsScalar(field.value.type.base_type)) {
code += lang_.getter_prefix;
member_suffix += lang_.getter_suffix;
if (struct_def.fixed) {
code += " { return " + getter;
code += "(" + lang_.accessor_prefix + "bb_pos + ";
code += NumToString(field.value.offset) + ")";
code += dest_mask;
} else {
code += offset_prefix + getter;
code += "(o + " + lang_.accessor_prefix + "bb_pos)" + dest_mask;
code += " : " + default_cast;
code += GenDefaultValue(field.value);
}
} else {
switch (field.value.type.base_type) {
case BASE_TYPE_STRUCT:
if (lang_.language != IDLOptions::kCSharp) {
code += "(" + type_name + " obj" + ")";
} else {
code += lang_.getter_prefix;
member_suffix += lang_.getter_suffix;
}
if (struct_def.fixed) {
code += " { return " + obj + ".__assign(" + lang_.accessor_prefix;
code += "bb_pos + " + NumToString(field.value.offset) + ", ";
code += lang_.accessor_prefix + "bb)";
} else {
code += offset_prefix + conditional_cast;
code += obj + ".__assign(";
code += field.value.type.struct_def->fixed
? "o + " + lang_.accessor_prefix + "bb_pos"
: lang_.accessor_prefix + "__indirect(o + " +
lang_.accessor_prefix + "bb_pos)";
code += ", " + lang_.accessor_prefix + "bb) : null";
}
break;
case BASE_TYPE_STRING:
code += lang_.getter_prefix;
member_suffix += lang_.getter_suffix;
code += offset_prefix + getter + "(o + " + lang_.accessor_prefix;
code += "bb_pos) : null";
break;
case BASE_TYPE_VECTOR: {
auto vectortype = field.value.type.VectorType();
code += "(";
if (vectortype.base_type == BASE_TYPE_STRUCT) {
if (lang_.language != IDLOptions::kCSharp)
code += type_name + " obj, ";
getter = obj + ".__assign";
}
code += "int j)" + offset_prefix + conditional_cast + getter +"(";
auto index = lang_.accessor_prefix + "__vector(o) + j * " +
NumToString(InlineSize(vectortype));
if (vectortype.base_type == BASE_TYPE_STRUCT) {
code += vectortype.struct_def->fixed
? index
: lang_.accessor_prefix + "__indirect(" + index + ")";
code += ", " + lang_.accessor_prefix + "bb";
} else {
code += index;
}
code += ")" + dest_mask + " : ";
code += field.value.type.element == BASE_TYPE_BOOL ? "false" :
(IsScalar(field.value.type.element) ? default_cast + "0" : "null");
break;
}
case BASE_TYPE_UNION:
if (lang_.language == IDLOptions::kCSharp) {
code += "() where TTable : struct, IFlatbufferObject";
code += offset_prefix + "(TTable?)" + getter;
code += "<TTable>(o) : null";
} else {
code += "(" + type_name + " obj)" + offset_prefix + getter;
code += "(obj, o) : null";
}
break;
default:
assert(0);
}
}
code += member_suffix;
code += "}\n";
if (field.value.type.base_type == BASE_TYPE_VECTOR) {
code += " public int " + MakeCamel(field.name, lang_.first_camel_upper);
code += "Length";
code += lang_.getter_prefix;
code += offset_prefix;
code += lang_.accessor_prefix + "__vector_len(o) : 0; ";
code += lang_.getter_suffix;
code += "}\n";
// See if we should generate a by-key accessor.
if (field.value.type.element == BASE_TYPE_STRUCT &&
!field.value.type.struct_def->fixed) {
auto &sd = *field.value.type.struct_def;
auto &fields = sd.fields.vec;
for (auto kit = fields.begin(); kit != fields.end(); ++kit) {
auto &key_field = **kit;
if (key_field.key) {
code += " public " + sd.name + lang_.optional_suffix + " ";
code += MakeCamel(field.name, lang_.first_camel_upper) + "ByKey(";
code += GenTypeNameDest(key_field.value.type) + " key)";
code += offset_prefix;
code += sd.name + ".__lookup_by_key(";
code += lang_.accessor_prefix + "__vector(o), key, ";
code += lang_.accessor_prefix + "bb) : null; ";
code += "}\n";
break;
}
}
}
}
// Generate a ByteBuffer accessor for strings & vectors of scalars.
if ((field.value.type.base_type == BASE_TYPE_VECTOR &&
IsScalar(field.value.type.VectorType().base_type)) ||
field.value.type.base_type == BASE_TYPE_STRING) {
switch (lang_.language) {
case IDLOptions::kJava:
code += " public ByteBuffer ";
code += MakeCamel(field.name, lang_.first_camel_upper);
code += "AsByteBuffer() { return ";
code += lang_.accessor_prefix + "__vector_as_bytebuffer(";
code += NumToString(field.value.offset) + ", ";
code += NumToString(field.value.type.base_type == BASE_TYPE_STRING
? 1
: InlineSize(field.value.type.VectorType()));
code += "); }\n";
break;
case IDLOptions::kCSharp:
code += " public ArraySegment<byte>? Get";
code += MakeCamel(field.name, lang_.first_camel_upper);
code += "Bytes() { return ";
code += lang_.accessor_prefix + "__vector_as_arraysegment(";
code += NumToString(field.value.offset);
code += "); }\n";
break;
default:
break;
}
}
// generate object accessors if is nested_flatbuffer
auto nested = field.attributes.Lookup("nested_flatbuffer");
if (nested) {
auto nested_qualified_name =
parser_.namespaces_.back()->GetFullyQualifiedName(nested->constant);
auto nested_type = parser_.structs_.Lookup(nested_qualified_name);
auto nested_type_name = WrapInNameSpace(*nested_type);
auto nestedMethodName = MakeCamel(field.name, lang_.first_camel_upper)
+ "As" + nested_type_name;
auto getNestedMethodName = nestedMethodName;
if (lang_.language == IDLOptions::kCSharp) {
getNestedMethodName = "Get" + nestedMethodName;
conditional_cast = "(" + nested_type_name + lang_.optional_suffix + ")";
}
if (lang_.language != IDLOptions::kCSharp) {
code += " public " + nested_type_name + lang_.optional_suffix + " ";
code += nestedMethodName + "() { return ";
code += getNestedMethodName + "(new " + nested_type_name + "()); }\n";
} else {
obj = "(new " + nested_type_name + "())";
}
code += " public " + nested_type_name + lang_.optional_suffix + " ";
code += getNestedMethodName + "(";
if (lang_.language != IDLOptions::kCSharp)
code += nested_type_name + " obj";
code += ") { int o = " + lang_.accessor_prefix + "__offset(";
code += NumToString(field.value.offset) +"); ";
code += "return o != 0 ? " + conditional_cast + obj + ".__assign(";
code += lang_.accessor_prefix;
code += "__indirect(" + lang_.accessor_prefix + "__vector(o)), ";
code += lang_.accessor_prefix + "bb) : null; }\n";
}
// Generate mutators for scalar fields or vectors of scalars.
if (parser_.opts.mutable_buffer) {
auto underlying_type = field.value.type.base_type == BASE_TYPE_VECTOR
? field.value.type.VectorType()
: field.value.type;
// Boolean parameters have to be explicitly converted to byte
// representation.
auto setter_parameter = underlying_type.base_type == BASE_TYPE_BOOL
? "(byte)(" + field.name + " ? 1 : 0)"
: field.name;
auto mutator_prefix = MakeCamel("mutate", lang_.first_camel_upper);
// A vector mutator also needs the index of the vector element it should
// mutate.
auto mutator_params = (field.value.type.base_type == BASE_TYPE_VECTOR
? "(int j, "
: "(") + GenTypeNameDest(underlying_type) + " " + field.name + ") { ";
auto setter_index = field.value.type.base_type == BASE_TYPE_VECTOR
? lang_.accessor_prefix + "__vector(o) + j * " +
NumToString(InlineSize(underlying_type))
: (struct_def.fixed
? lang_.accessor_prefix + "bb_pos + " +
NumToString(field.value.offset)
: "o + " + lang_.accessor_prefix + "bb_pos");
if (IsScalar(field.value.type.base_type) ||
(field.value.type.base_type == BASE_TYPE_VECTOR &&
IsScalar(field.value.type.VectorType().base_type))) {
code += " public ";
code += struct_def.fixed ? "void " : lang_.bool_type;
code += mutator_prefix + MakeCamel(field.name, true);
code += mutator_params;
if (struct_def.fixed) {
code += GenSetter(underlying_type) + "(" + setter_index + ", ";
code += src_cast + setter_parameter + "); }\n";
} else {
code += "int o = " + lang_.accessor_prefix + "__offset(";
code += NumToString(field.value.offset) + ");";
code += " if (o != 0) { " + GenSetter(underlying_type);
code += "(" + setter_index + ", " + src_cast + setter_parameter +
"); return true; } else { return false; } }\n";
}
}
}
}
code += "\n";
flatbuffers::FieldDef *key_field = nullptr;
if (struct_def.fixed) {
// create a struct constructor function
code += " public static " + GenOffsetType(struct_def) + " ";
code += FunctionStart('C') + "reate";
code += struct_def.name + "(FlatBufferBuilder builder";
GenStructArgs(struct_def, code_ptr, "");
code += ") {\n";
GenStructBody(struct_def, code_ptr, "");
code += " return ";
code += GenOffsetConstruct(struct_def,
"builder." + std::string(lang_.get_fbb_offset));
code += ";\n }\n";
} else {
// Generate a method that creates a table in one go. This is only possible
// when the table has no struct fields, since those have to be created
// inline, and there's no way to do so in Java.
bool has_no_struct_fields = true;
int num_fields = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) continue;
if (IsStruct(field.value.type)) {
has_no_struct_fields = false;
} else {
num_fields++;
}
}
if (has_no_struct_fields && num_fields) {
// Generate a table constructor of the form:
// public static int createName(FlatBufferBuilder builder, args...)
code += " public static " + GenOffsetType(struct_def) + " ";
code += FunctionStart('C') + "reate" + struct_def.name;
code += "(FlatBufferBuilder builder";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) continue;
code += ",\n ";
code += GenTypeBasic(DestinationType(field.value.type, false));
code += " ";
code += field.name;
if (!IsScalar(field.value.type.base_type)) code += "Offset";
// Java doesn't have defaults, which means this method must always
// supply all arguments, and thus won't compile when fields are added.
if (lang_.language != IDLOptions::kJava) {
code += " = ";
code += GenDefaultValueBasic(field.value);
}
}
code += ") {\n builder.";
code += FunctionStart('S') + "tartObject(";
code += NumToString(struct_def.fields.vec.size()) + ");\n";
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1;
size;
size /= 2) {
for (auto it = struct_def.fields.vec.rbegin();
it != struct_def.fields.vec.rend(); ++it) {
auto &field = **it;
if (!field.deprecated &&
(!struct_def.sortbysize ||
size == SizeOf(field.value.type.base_type))) {
code += " " + struct_def.name + ".";
code += FunctionStart('A') + "dd";
code += MakeCamel(field.name) + "(builder, " + field.name;
if (!IsScalar(field.value.type.base_type)) code += "Offset";
code += ");\n";
}
}
}
code += " return " + struct_def.name + ".";
code += FunctionStart('E') + "nd" + struct_def.name;
code += "(builder);\n }\n\n";
}
// Generate a set of static methods that allow table construction,
// of the form:
// public static void addName(FlatBufferBuilder builder, short name)
// { builder.addShort(id, name, default); }
// Unlike the Create function, these always work.
code += " public static void " + FunctionStart('S') + "tart";
code += struct_def.name;
code += "(FlatBufferBuilder builder) { builder.";
code += FunctionStart('S') + "tartObject(";
code += NumToString(struct_def.fields.vec.size()) + "); }\n";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) continue;
if (field.key) key_field = &field;
code += " public static void " + FunctionStart('A') + "dd";
code += MakeCamel(field.name);
code += "(FlatBufferBuilder builder, ";
code += GenTypeBasic(DestinationType(field.value.type, false));
auto argname = MakeCamel(field.name, false);
if (!IsScalar(field.value.type.base_type)) argname += "Offset";
code += " " + argname + ") { builder." + FunctionStart('A') + "dd";
code += GenMethod(field.value.type) + "(";
code += NumToString(it - struct_def.fields.vec.begin()) + ", ";
code += SourceCastBasic(field.value.type);
code += argname;
if (!IsScalar(field.value.type.base_type) &&
field.value.type.base_type != BASE_TYPE_UNION &&
lang_.language == IDLOptions::kCSharp) {
code += ".Value";
}
code += ", ";
if (lang_.language == IDLOptions::kJava)
code += SourceCastBasic( field.value.type );
code += GenDefaultValue(field.value, false);
code += "); }\n";
if (field.value.type.base_type == BASE_TYPE_VECTOR) {
auto vector_type = field.value.type.VectorType();
auto alignment = InlineAlignment(vector_type);
auto elem_size = InlineSize(vector_type);
if (!IsStruct(vector_type)) {
// Generate a method to create a vector from a Java array.
code += " public static " + GenVectorOffsetType() + " ";
code += FunctionStart('C') + "reate";
code += MakeCamel(field.name);
code += "Vector(FlatBufferBuilder builder, ";
code += GenTypeBasic(vector_type) + "[] data) ";
code += "{ builder." + FunctionStart('S') + "tartVector(";
code += NumToString(elem_size);
code += ", data." + FunctionStart('L') + "ength, ";
code += NumToString(alignment);
code += "); for (int i = data.";
code += FunctionStart('L') + "ength - 1; i >= 0; i--) builder.";
code += FunctionStart('A') + "dd";
code += GenMethod(vector_type);
code += "(";
code += SourceCastBasic(vector_type, false);
code += "data[i]";
if (lang_.language == IDLOptions::kCSharp &&
(vector_type.base_type == BASE_TYPE_STRUCT ||
vector_type.base_type == BASE_TYPE_STRING))
code += ".Value";
code += "); return ";
code += "builder." + FunctionStart('E') + "ndVector(); }\n";
}
// Generate a method to start a vector, data to be added manually after.
code += " public static void " + FunctionStart('S') + "tart";
code += MakeCamel(field.name);
code += "Vector(FlatBufferBuilder builder, int numElems) ";
code += "{ builder." + FunctionStart('S') + "tartVector(";
code += NumToString(elem_size);
code += ", numElems, " + NumToString(alignment);
code += "); }\n";
}
}
code += " public static " + GenOffsetType(struct_def) + " ";
code += FunctionStart('E') + "nd" + struct_def.name;
code += "(FlatBufferBuilder builder) {\n int o = builder.";
code += FunctionStart('E') + "ndObject();\n";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end();
++it) {
auto &field = **it;
if (!field.deprecated && field.required) {
code += " builder." + FunctionStart('R') + "equired(o, ";
code += NumToString(field.value.offset);
code += "); // " + field.name + "\n";
}
}
code += " return " + GenOffsetConstruct(struct_def, "o") + ";\n }\n";
if (parser_.root_struct_def_ == &struct_def) {
code += " public static void ";
code += FunctionStart('F') + "inish" + struct_def.name;
code += "Buffer(FlatBufferBuilder builder, " + GenOffsetType(struct_def);
code += " offset) {";
code += " builder." + FunctionStart('F') + "inish(offset";
if (lang_.language == IDLOptions::kCSharp) {
code += ".Value";
}
if (parser_.file_identifier_.length())
code += ", \"" + parser_.file_identifier_ + "\"";
code += "); }\n";
}
}
// Only generate key compare function for table,
// because `key_field` is not set for struct
if (struct_def.has_key && !struct_def.fixed) {
if (lang_.language == IDLOptions::kJava) {
code += "\n @Override\n protected int keysCompare(";
code += "Integer o1, Integer o2, ByteBuffer _bb) {";
code += GenKeyGetter(key_field);
code += " }\n";
}
else {
code += "\n public static VectorOffset ";
code += "CreateSortedVectorOf" + struct_def.name;
code += "(FlatBufferBuilder builder, ";
code += "Offset<" + struct_def.name + ">";
code += "[] offsets) {\n";
code += " Array.Sort(offsets, (Offset<" + struct_def.name +
"> o1, Offset<" + struct_def.name + "> o2) => " + GenKeyGetter(key_field);
code += ");\n";
code += " return builder.CreateVectorOfTables(offsets);\n }\n";
}
code += "\n public static " + struct_def.name + lang_.optional_suffix;
code += " __lookup_by_key(int vectorLocation, ";
code += GenTypeNameDest(key_field->value.type);
code += " key, ByteBuffer bb) {\n";
if (key_field->value.type.base_type == BASE_TYPE_STRING) {
code += " byte[] byteKey = ";
if (lang_.language == IDLOptions::kJava)
code += "key.getBytes(Table.UTF8_CHARSET.get());\n";
else
code += "System.Text.Encoding.UTF8.GetBytes(key);\n";
}
code += " int span = ";
code += "bb." + FunctionStart('G') + "etInt(vectorLocation - 4);\n";
code += " int start = 0;\n";
code += " while (span != 0) {\n";
code += " int middle = span / 2;\n";
code += GenLookupKeyGetter(key_field);
code += " if (comp > 0) {\n";
code += " span = middle;\n";
code += " } else if (comp < 0) {\n";
code += " middle++;\n";
code += " start += middle;\n";
code += " span -= middle;\n";
code += " } else {\n";
code += " return new " + struct_def.name;
code += "().__assign(tableOffset, bb);\n";
code += " }\n }\n";
code += " return null;\n";
code += " }\n";
}
code += "}";
// Java does not need the closing semi-colon on class definitions.
code += (lang_.language != IDLOptions::kJava) ? ";" : "";
code += "\n\n";
}
const LanguageParameters& lang_;
// This tracks the current namespace used to determine if a type need to be prefixed by its namespace
const Namespace *cur_name_space_;
};
} // namespace general
bool GenerateGeneral(const Parser &parser, const std::string &path,
const std::string &file_name) {
general::GeneralGenerator generator(parser, path, file_name);
return generator.generate();
}
std::string GeneralMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name) {
assert(parser.opts.lang <= IDLOptions::kMAX);
const auto &lang = GetLangParams(parser.opts.lang);
std::string make_rule;
for (auto it = parser.enums_.vec.begin(); it != parser.enums_.vec.end();
++it) {
auto &enum_def = **it;
if (make_rule != "") make_rule += " ";
std::string directory =
BaseGenerator::NamespaceDir(parser, path, *enum_def.defined_namespace);
make_rule += directory + enum_def.name + lang.file_extension;
}
for (auto it = parser.structs_.vec.begin(); it != parser.structs_.vec.end();
++it) {
auto &struct_def = **it;
if (make_rule != "") make_rule += " ";
std::string directory =
BaseGenerator::NamespaceDir(parser, path,
*struct_def.defined_namespace);
make_rule += directory + struct_def.name + lang.file_extension;
}
make_rule += ": ";
auto included_files = parser.GetIncludedFilesRecursive(file_name);
for (auto it = included_files.begin(); it != included_files.end(); ++it) {
make_rule += " " + *it;
}
return make_rule;
}
std::string BinaryFileName(const Parser &parser,
const std::string &path,
const std::string &file_name) {
auto ext = parser.file_extension_.length() ? parser.file_extension_ : "bin";
return path + file_name + "." + ext;
}
bool GenerateBinary(const Parser &parser,
const std::string &path,
const std::string &file_name) {
return !parser.builder_.GetSize() ||
flatbuffers::SaveFile(
BinaryFileName(parser, path, file_name).c_str(),
reinterpret_cast<char *>(parser.builder_.GetBufferPointer()),
parser.builder_.GetSize(),
true);
}
std::string BinaryMakeRule(const Parser &parser,
const std::string &path,
const std::string &file_name) {
if (!parser.builder_.GetSize()) return "";
std::string filebase = flatbuffers::StripPath(
flatbuffers::StripExtension(file_name));
std::string make_rule = BinaryFileName(parser, path, filebase) + ": " +
file_name;
auto included_files = parser.GetIncludedFilesRecursive(
parser.root_struct_def_->file);
for (auto it = included_files.begin();
it != included_files.end(); ++it) {
make_rule += " " + *it;
}
return make_rule;
}
} // namespace flatbuffers
| 1 | 12,366 | Like I said, this code looks incorrect. Why is it still looking up the attribute? Why is it getting the type from `nested->type.struct_def` instead of `field.nested_flatbuffer`? Are you running `generate_code.sh`, because I think `nested->type.struct_def` would be null? | google-flatbuffers | java |
@@ -1664,6 +1664,7 @@ TEST_F(VkLayerTest, DescriptorUpdateTemplateEntryWithInlineUniformBlock) {
m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
} else {
printf("%s Push Descriptor Extension not supported, push descriptor cases skipped.\n", kSkipPrefix);
+ return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); | 1 | /*
* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
* Copyright (c) 2015-2021 Google, Inc.
* Modifications Copyright (C) 2020-2021 Advanced Micro Devices, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Author: Chia-I Wu <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Courtney Goeltzenleuchter <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
* Author: Mike Stroyan <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Tony Barbour <[email protected]>
* Author: Cody Northrop <[email protected]>
* Author: Dave Houlton <[email protected]>
* Author: Jeremy Kniager <[email protected]>
* Author: Shannon McPherson <[email protected]>
* Author: John Zulauf <[email protected]>
* Author: Tobias Hector <[email protected]>
*/
#include "cast_utils.h"
#include "layer_validation_tests.h"
TEST_F(VkLayerTest, ImagelessFramebufferRenderPassBeginImageViewMismatchTests) {
TEST_DESCRIPTION(
"Begin a renderPass where the image views specified do not match the parameters used to create the framebuffer and render "
"pass.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names);
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
} else {
printf("%s test requires VK_KHR_imageless_framebuffer, not available. Skipping.\n", kSkipPrefix);
return;
}
VkPhysicalDeviceImagelessFramebufferFeaturesKHR physicalDeviceImagelessFramebufferFeatures = {};
physicalDeviceImagelessFramebufferFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR;
physicalDeviceImagelessFramebufferFeatures.imagelessFramebuffer = VK_TRUE;
VkPhysicalDeviceFeatures2 physicalDeviceFeatures2 = {};
physicalDeviceFeatures2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
physicalDeviceFeatures2.pNext = &physicalDeviceImagelessFramebufferFeatures;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &physicalDeviceFeatures2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
uint32_t attachmentWidth = 512;
uint32_t attachmentHeight = 512;
VkFormat attachmentFormats[2] = {VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_B8G8R8A8_UNORM};
VkFormat framebufferAttachmentFormats[3] = {VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_B8G8R8A8_UNORM};
// Create a renderPass with a single attachment
VkAttachmentDescription attachmentDescription = {};
attachmentDescription.format = attachmentFormats[0];
attachmentDescription.samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDescription.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
VkAttachmentReference attachmentReference = {};
attachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
VkSubpassDescription subpassDescription = {};
subpassDescription.colorAttachmentCount = 1;
subpassDescription.pColorAttachments = &attachmentReference;
VkRenderPassCreateInfo renderPassCreateInfo = {};
renderPassCreateInfo.subpassCount = 1;
renderPassCreateInfo.pSubpasses = &subpassDescription;
renderPassCreateInfo.attachmentCount = 1;
renderPassCreateInfo.pAttachments = &attachmentDescription;
renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass renderPass;
vk::CreateRenderPass(m_device->device(), &renderPassCreateInfo, NULL, &renderPass);
VkFramebufferAttachmentImageInfoKHR framebufferAttachmentImageInfo = {};
framebufferAttachmentImageInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfo.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
framebufferAttachmentImageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
framebufferAttachmentImageInfo.width = attachmentWidth;
framebufferAttachmentImageInfo.height = attachmentHeight;
framebufferAttachmentImageInfo.layerCount = 1;
framebufferAttachmentImageInfo.viewFormatCount = 2;
framebufferAttachmentImageInfo.pViewFormats = framebufferAttachmentFormats;
VkFramebufferAttachmentsCreateInfoKHR framebufferAttachmentsCreateInfo = {};
framebufferAttachmentsCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR;
framebufferAttachmentsCreateInfo.attachmentImageInfoCount = 1;
framebufferAttachmentsCreateInfo.pAttachmentImageInfos = &framebufferAttachmentImageInfo;
VkFramebufferCreateInfo framebufferCreateInfo = {};
framebufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferCreateInfo.pNext = &framebufferAttachmentsCreateInfo;
framebufferCreateInfo.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR;
framebufferCreateInfo.width = attachmentWidth;
framebufferCreateInfo.height = attachmentHeight;
framebufferCreateInfo.layers = 1;
framebufferCreateInfo.attachmentCount = 1;
framebufferCreateInfo.pAttachments = nullptr;
framebufferCreateInfo.renderPass = renderPass;
VkFramebuffer framebuffer;
VkImageFormatListCreateInfoKHR imageFormatListCreateInfo = {};
imageFormatListCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR;
imageFormatListCreateInfo.viewFormatCount = 2;
imageFormatListCreateInfo.pViewFormats = attachmentFormats;
VkImageCreateInfo imageCreateInfo = {};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.pNext = &imageFormatListCreateInfo;
imageCreateInfo.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
imageCreateInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
imageCreateInfo.extent.width = attachmentWidth;
imageCreateInfo.extent.height = attachmentHeight;
imageCreateInfo.extent.depth = 1;
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.mipLevels = 10;
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.format = attachmentFormats[0];
VkImageObj imageObject(m_device);
imageObject.init(&imageCreateInfo);
VkImage image = imageObject.image();
// Only use the subset without the TRANSFER bit
VkImageViewUsageCreateInfo image_view_usage_create_info = {};
image_view_usage_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO;
image_view_usage_create_info.pNext = nullptr;
image_view_usage_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
VkImageViewCreateInfo imageViewCreateInfo = {};
imageViewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imageViewCreateInfo.image = image;
imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
imageViewCreateInfo.format = attachmentFormats[0];
imageViewCreateInfo.subresourceRange.layerCount = 1;
imageViewCreateInfo.subresourceRange.levelCount = 1;
imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// Has subset of usage flags
VkImageView imageViewSubset;
imageViewCreateInfo.pNext = &image_view_usage_create_info;
vk::CreateImageView(m_device->device(), &imageViewCreateInfo, NULL, &imageViewSubset);
imageViewCreateInfo.pNext = nullptr;
VkImageView imageView;
vk::CreateImageView(m_device->device(), &imageViewCreateInfo, NULL, &imageView);
VkRenderPassAttachmentBeginInfoKHR renderPassAttachmentBeginInfo = {};
renderPassAttachmentBeginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR;
renderPassAttachmentBeginInfo.pNext = nullptr;
renderPassAttachmentBeginInfo.attachmentCount = 1;
renderPassAttachmentBeginInfo.pAttachments = &imageView;
VkRenderPassBeginInfo renderPassBeginInfo = {};
renderPassBeginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassBeginInfo.pNext = &renderPassAttachmentBeginInfo;
renderPassBeginInfo.renderPass = renderPass;
renderPassBeginInfo.renderArea.extent.width = attachmentWidth;
renderPassBeginInfo.renderArea.extent.height = attachmentHeight;
// Positive test first
VkCommandBufferBeginInfo cmd_begin_info = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, nullptr};
framebufferCreateInfo.pAttachments = nullptr;
framebufferCreateInfo.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
vk::BeginCommandBuffer(m_commandBuffer->handle(), &cmd_begin_info);
m_errorMonitor->ExpectSuccess();
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyNotFound();
vk::ResetCommandBuffer(m_commandBuffer->handle(), 0);
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
// Imageless framebuffer creation bit not present
framebufferCreateInfo.pAttachments = &imageView;
framebufferCreateInfo.flags = 0;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-03207", "VUID-VkRenderPassBeginInfo-framebuffer-03207");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
framebufferCreateInfo.pAttachments = nullptr;
framebufferCreateInfo.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassAttachmentBeginInfo.attachmentCount = 2;
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-03208", "VUID-VkRenderPassBeginInfo-framebuffer-03208");
renderPassAttachmentBeginInfo.attachmentCount = 1;
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
// Mismatched number of attachments
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassAttachmentBeginInfo.attachmentCount = 2;
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-03208", "VUID-VkRenderPassBeginInfo-framebuffer-03208");
renderPassAttachmentBeginInfo.attachmentCount = 1;
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
// Mismatched flags
framebufferAttachmentImageInfo.flags = 0;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-03209", "VUID-VkRenderPassBeginInfo-framebuffer-03209");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
framebufferAttachmentImageInfo.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
// Mismatched usage
framebufferAttachmentImageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-04627", "VUID-VkRenderPassBeginInfo-framebuffer-04627");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
framebufferAttachmentImageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
// Mismatched usage because VkImageViewUsageCreateInfo restricted to TRANSFER
renderPassAttachmentBeginInfo.pAttachments = &imageViewSubset;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-04627", "VUID-VkRenderPassBeginInfo-framebuffer-04627");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
renderPassAttachmentBeginInfo.pAttachments = &imageView;
// Mismatched width
framebufferAttachmentImageInfo.width += 1;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-03211", "VUID-VkRenderPassBeginInfo-framebuffer-03211");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
framebufferAttachmentImageInfo.width -= 1;
// Mismatched height
framebufferAttachmentImageInfo.height += 1;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-03212", "VUID-VkRenderPassBeginInfo-framebuffer-03212");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
framebufferAttachmentImageInfo.height -= 1;
// Mismatched layer count
framebufferAttachmentImageInfo.layerCount += 1;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-03213", "VUID-VkRenderPassBeginInfo-framebuffer-03213");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
framebufferAttachmentImageInfo.layerCount -= 1;
// Mismatched view format count
framebufferAttachmentImageInfo.viewFormatCount = 3;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-03214", "VUID-VkRenderPassBeginInfo-framebuffer-03214");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
framebufferAttachmentImageInfo.viewFormatCount = 2;
// Mismatched format lists
framebufferAttachmentFormats[1] = VK_FORMAT_B8G8R8A8_SRGB;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-03215", "VUID-VkRenderPassBeginInfo-framebuffer-03215");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
framebufferAttachmentFormats[1] = VK_FORMAT_B8G8R8A8_UNORM;
// Mismatched formats
VkImageView imageView2;
imageViewCreateInfo.format = attachmentFormats[1];
vk::CreateImageView(m_device->device(), &imageViewCreateInfo, nullptr, &imageView2);
renderPassAttachmentBeginInfo.pAttachments = &imageView2;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-03216", "VUID-VkRenderPassBeginInfo-framebuffer-03216");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
vk::DestroyImageView(m_device->device(), imageView2, nullptr);
renderPassAttachmentBeginInfo.pAttachments = &imageView;
imageViewCreateInfo.format = attachmentFormats[0];
// Mismatched sample counts
imageCreateInfo.samples = VK_SAMPLE_COUNT_4_BIT;
imageCreateInfo.mipLevels = 1;
VkImageObj imageObject2(m_device);
imageObject2.init(&imageCreateInfo);
imageViewCreateInfo.image = imageObject2.image();
vk::CreateImageView(m_device->device(), &imageViewCreateInfo, nullptr, &imageView2);
renderPassAttachmentBeginInfo.pAttachments = &imageView2;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassBeginInfo-framebuffer-03217", "VUID-VkRenderPassBeginInfo-framebuffer-03217");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
vk::DestroyImageView(m_device->device(), imageView2, nullptr);
renderPassAttachmentBeginInfo.pAttachments = &imageView;
imageViewCreateInfo.image = imageObject.image();
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.mipLevels = 10;
// Mismatched level counts
imageViewCreateInfo.subresourceRange.levelCount = 2;
vk::CreateImageView(m_device->device(), &imageViewCreateInfo, nullptr, &imageView2);
renderPassAttachmentBeginInfo.pAttachments = &imageView2;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03218",
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03218");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
vk::DestroyImageView(m_device->device(), imageView2, nullptr);
renderPassAttachmentBeginInfo.pAttachments = &imageView;
imageViewCreateInfo.subresourceRange.levelCount = 1;
// Non-identity component swizzle
imageViewCreateInfo.components.r = VK_COMPONENT_SWIZZLE_A;
vk::CreateImageView(m_device->device(), &imageViewCreateInfo, nullptr, &imageView2);
renderPassAttachmentBeginInfo.pAttachments = &imageView2;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03219",
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03219");
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
vk::DestroyImageView(m_device->device(), imageView2, nullptr);
renderPassAttachmentBeginInfo.pAttachments = &imageView;
imageViewCreateInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
imageViewCreateInfo.subresourceRange.baseMipLevel = 1;
vk::CreateImageView(m_device->device(), &imageViewCreateInfo, nullptr, &imageView2);
renderPassAttachmentBeginInfo.pAttachments = &imageView2;
framebufferAttachmentImageInfo.height = framebufferAttachmentImageInfo.height / 2;
framebufferAttachmentImageInfo.width = framebufferAttachmentImageInfo.width / 2;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
renderPassBeginInfo.framebuffer = framebuffer;
vk::BeginCommandBuffer(m_commandBuffer->handle(), &cmd_begin_info);
m_errorMonitor->ExpectSuccess();
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyNotFound();
vk::ResetCommandBuffer(m_commandBuffer->handle(), 0);
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
vk::DestroyImageView(m_device->device(), imageView2, nullptr);
renderPassAttachmentBeginInfo.pAttachments = &imageView;
imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
framebufferAttachmentImageInfo.height = framebufferAttachmentImageInfo.height * 2;
framebufferAttachmentImageInfo.width = framebufferAttachmentImageInfo.width * 2;
vk::DestroyRenderPass(m_device->device(), renderPass, nullptr);
vk::DestroyImageView(m_device->device(), imageView, nullptr);
vk::DestroyImageView(m_device->device(), imageViewSubset, nullptr);
}
TEST_F(VkLayerTest, ImagelessFramebufferFeatureEnableTest) {
TEST_DESCRIPTION("Use imageless framebuffer functionality without enabling the feature");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix,
VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
uint32_t attachmentWidth = 512;
uint32_t attachmentHeight = 512;
VkFormat attachmentFormat = VK_FORMAT_R8G8B8A8_UNORM;
// Create a renderPass with a single attachment
VkAttachmentDescription attachmentDescription = {};
attachmentDescription.format = attachmentFormat;
attachmentDescription.samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDescription.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
VkAttachmentReference attachmentReference = {};
attachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
VkSubpassDescription subpassDescription = {};
subpassDescription.colorAttachmentCount = 1;
subpassDescription.pColorAttachments = &attachmentReference;
VkRenderPassCreateInfo renderPassCreateInfo = {};
renderPassCreateInfo.subpassCount = 1;
renderPassCreateInfo.pSubpasses = &subpassDescription;
renderPassCreateInfo.attachmentCount = 1;
renderPassCreateInfo.pAttachments = &attachmentDescription;
renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass renderPass;
vk::CreateRenderPass(m_device->device(), &renderPassCreateInfo, NULL, &renderPass);
VkFramebufferAttachmentImageInfoKHR framebufferAttachmentImageInfo = {};
framebufferAttachmentImageInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
framebufferAttachmentImageInfo.width = attachmentWidth;
framebufferAttachmentImageInfo.height = attachmentHeight;
framebufferAttachmentImageInfo.layerCount = 1;
framebufferAttachmentImageInfo.viewFormatCount = 1;
framebufferAttachmentImageInfo.pViewFormats = &attachmentFormat;
VkFramebufferAttachmentsCreateInfoKHR framebufferAttachmentsCreateInfo = {};
framebufferAttachmentsCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR;
framebufferAttachmentsCreateInfo.attachmentImageInfoCount = 1;
framebufferAttachmentsCreateInfo.pAttachmentImageInfos = &framebufferAttachmentImageInfo;
VkFramebufferCreateInfo framebufferCreateInfo = {};
framebufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferCreateInfo.pNext = &framebufferAttachmentsCreateInfo;
framebufferCreateInfo.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR;
framebufferCreateInfo.width = attachmentWidth;
framebufferCreateInfo.height = attachmentHeight;
framebufferCreateInfo.layers = 1;
framebufferCreateInfo.renderPass = renderPass;
framebufferCreateInfo.attachmentCount = 1;
VkFramebuffer framebuffer = VK_NULL_HANDLE;
// Imageless framebuffer creation bit not present
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-03189");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
vk::DestroyRenderPass(m_device->device(), renderPass, nullptr);
}
TEST_F(VkLayerTest, ImagelessFramebufferCreationTests) {
TEST_DESCRIPTION("Create an imageless framebuffer in various invalid ways");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names);
bool multiviewSupported = rp2Supported;
if (!rp2Supported) {
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME);
multiviewSupported = true;
}
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix,
VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
return;
}
VkPhysicalDeviceImagelessFramebufferFeaturesKHR physicalDeviceImagelessFramebufferFeatures = {};
physicalDeviceImagelessFramebufferFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR;
physicalDeviceImagelessFramebufferFeatures.imagelessFramebuffer = VK_TRUE;
VkPhysicalDeviceFeatures2 physicalDeviceFeatures2 = {};
physicalDeviceFeatures2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
physicalDeviceFeatures2.pNext = &physicalDeviceImagelessFramebufferFeatures;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &physicalDeviceFeatures2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
uint32_t attachmentWidth = 512;
uint32_t attachmentHeight = 512;
VkFormat attachmentFormat = VK_FORMAT_R8G8B8A8_UNORM;
// Create a renderPass with a single attachment
VkAttachmentDescription attachmentDescription = {};
attachmentDescription.format = attachmentFormat;
attachmentDescription.samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDescription.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
VkAttachmentReference attachmentReference = {};
attachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
VkSubpassDescription subpassDescription = {};
subpassDescription.colorAttachmentCount = 1;
subpassDescription.pColorAttachments = &attachmentReference;
VkRenderPassCreateInfo renderPassCreateInfo = {};
renderPassCreateInfo.subpassCount = 1;
renderPassCreateInfo.pSubpasses = &subpassDescription;
renderPassCreateInfo.attachmentCount = 1;
renderPassCreateInfo.pAttachments = &attachmentDescription;
renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass renderPass;
vk::CreateRenderPass(m_device->device(), &renderPassCreateInfo, NULL, &renderPass);
VkFramebufferAttachmentImageInfoKHR framebufferAttachmentImageInfo = {};
framebufferAttachmentImageInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
framebufferAttachmentImageInfo.width = attachmentWidth;
framebufferAttachmentImageInfo.height = attachmentHeight;
framebufferAttachmentImageInfo.layerCount = 1;
framebufferAttachmentImageInfo.viewFormatCount = 1;
framebufferAttachmentImageInfo.pViewFormats = &attachmentFormat;
VkFramebufferAttachmentsCreateInfoKHR framebufferAttachmentsCreateInfo = {};
framebufferAttachmentsCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR;
framebufferAttachmentsCreateInfo.attachmentImageInfoCount = 1;
framebufferAttachmentsCreateInfo.pAttachmentImageInfos = &framebufferAttachmentImageInfo;
VkFramebufferCreateInfo framebufferCreateInfo = {};
framebufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferCreateInfo.pNext = &framebufferAttachmentsCreateInfo;
framebufferCreateInfo.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR;
framebufferCreateInfo.width = attachmentWidth;
framebufferCreateInfo.height = attachmentHeight;
framebufferCreateInfo.layers = 1;
framebufferCreateInfo.renderPass = renderPass;
framebufferCreateInfo.attachmentCount = 1;
VkFramebuffer framebuffer = VK_NULL_HANDLE;
// Attachments info not present
framebufferCreateInfo.pNext = nullptr;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-03190");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferCreateInfo.pNext = &framebufferAttachmentsCreateInfo;
// Mismatched attachment counts
framebufferAttachmentsCreateInfo.attachmentImageInfoCount = 2;
VkFramebufferAttachmentImageInfoKHR framebufferAttachmentImageInfos[2] = {framebufferAttachmentImageInfo,
framebufferAttachmentImageInfo};
framebufferAttachmentsCreateInfo.pAttachmentImageInfos = framebufferAttachmentImageInfos;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-03191");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentsCreateInfo.pAttachmentImageInfos = &framebufferAttachmentImageInfo;
framebufferAttachmentsCreateInfo.attachmentImageInfoCount = 1;
// Mismatched format list
attachmentFormat = VK_FORMAT_B8G8R8A8_UNORM;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-03205");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
attachmentFormat = VK_FORMAT_R8G8B8A8_UNORM;
// Mismatched format list
attachmentFormat = VK_FORMAT_B8G8R8A8_UNORM;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-03205");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
attachmentFormat = VK_FORMAT_R8G8B8A8_UNORM;
// Mismatched layer count, multiview disabled
framebufferCreateInfo.layers = 2;
const char* mismatchedLayersNoMultiviewVuid =
multiviewSupported ? "VUID-VkFramebufferCreateInfo-renderPass-04546" : "VUID-VkFramebufferCreateInfo-flags-04547";
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, mismatchedLayersNoMultiviewVuid);
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferCreateInfo.layers = 1;
// Mismatched width
framebufferCreateInfo.width += 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-04541");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferCreateInfo.width -= 1;
// Mismatched height
framebufferCreateInfo.height += 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-04542");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferCreateInfo.height -= 1;
vk::DestroyRenderPass(m_device->device(), renderPass, nullptr);
}
TEST_F(VkLayerTest, ImagelessFramebufferAttachmentImageUsageMismatchTests) {
TEST_DESCRIPTION("Create an imageless framebuffer with mismatched attachment image usage");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix,
VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
return;
}
VkPhysicalDeviceImagelessFramebufferFeaturesKHR physicalDeviceImagelessFramebufferFeatures = {};
physicalDeviceImagelessFramebufferFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR;
physicalDeviceImagelessFramebufferFeatures.imagelessFramebuffer = VK_TRUE;
VkPhysicalDeviceFeatures2 physicalDeviceFeatures2 = {};
physicalDeviceFeatures2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
physicalDeviceFeatures2.pNext = &physicalDeviceImagelessFramebufferFeatures;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &physicalDeviceFeatures2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
uint32_t attachmentWidth = 512;
uint32_t attachmentHeight = 512;
VkFormat colorAndInputAttachmentFormat = VK_FORMAT_R8G8B8A8_UNORM;
VkFormat depthStencilAttachmentFormat = VK_FORMAT_D32_SFLOAT_S8_UINT;
VkAttachmentDescription attachmentDescriptions[4] = {};
// Color attachment
attachmentDescriptions[0].format = colorAndInputAttachmentFormat;
attachmentDescriptions[0].samples = VK_SAMPLE_COUNT_4_BIT;
attachmentDescriptions[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
// Color resolve attachment
attachmentDescriptions[1].format = colorAndInputAttachmentFormat;
attachmentDescriptions[1].samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDescriptions[1].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
// Depth stencil attachment
attachmentDescriptions[2].format = depthStencilAttachmentFormat;
attachmentDescriptions[2].samples = VK_SAMPLE_COUNT_4_BIT;
attachmentDescriptions[2].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
// Input attachment
attachmentDescriptions[3].format = colorAndInputAttachmentFormat;
attachmentDescriptions[3].samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDescriptions[3].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
VkAttachmentReference colorAttachmentReference = {};
colorAttachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
colorAttachmentReference.attachment = 0;
VkAttachmentReference colorResolveAttachmentReference = {};
colorResolveAttachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
colorResolveAttachmentReference.attachment = 1;
VkAttachmentReference depthStencilAttachmentReference = {};
depthStencilAttachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
depthStencilAttachmentReference.attachment = 2;
VkAttachmentReference inputAttachmentReference = {};
inputAttachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
inputAttachmentReference.attachment = 3;
VkSubpassDescription subpassDescription = {};
subpassDescription.colorAttachmentCount = 1;
subpassDescription.pColorAttachments = &colorAttachmentReference;
subpassDescription.pResolveAttachments = &colorResolveAttachmentReference;
subpassDescription.pDepthStencilAttachment = &depthStencilAttachmentReference;
subpassDescription.inputAttachmentCount = 1;
subpassDescription.pInputAttachments = &inputAttachmentReference;
VkRenderPassCreateInfo renderPassCreateInfo = {};
renderPassCreateInfo.attachmentCount = 4;
renderPassCreateInfo.subpassCount = 1;
renderPassCreateInfo.pSubpasses = &subpassDescription;
renderPassCreateInfo.pAttachments = attachmentDescriptions;
renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass renderPass;
vk::CreateRenderPass(m_device->device(), &renderPassCreateInfo, nullptr, &renderPass);
VkFramebufferAttachmentImageInfoKHR framebufferAttachmentImageInfos[4] = {};
// Color attachment
framebufferAttachmentImageInfos[0].sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfos[0].width = attachmentWidth;
framebufferAttachmentImageInfos[0].height = attachmentHeight;
framebufferAttachmentImageInfos[0].usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
framebufferAttachmentImageInfos[0].layerCount = 1;
framebufferAttachmentImageInfos[0].viewFormatCount = 1;
framebufferAttachmentImageInfos[0].pViewFormats = &colorAndInputAttachmentFormat;
// Color resolve attachment
framebufferAttachmentImageInfos[1].sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfos[1].width = attachmentWidth;
framebufferAttachmentImageInfos[1].height = attachmentHeight;
framebufferAttachmentImageInfos[1].usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
framebufferAttachmentImageInfos[1].layerCount = 1;
framebufferAttachmentImageInfos[1].viewFormatCount = 1;
framebufferAttachmentImageInfos[1].pViewFormats = &colorAndInputAttachmentFormat;
// Depth stencil attachment
framebufferAttachmentImageInfos[2].sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfos[2].width = attachmentWidth;
framebufferAttachmentImageInfos[2].height = attachmentHeight;
framebufferAttachmentImageInfos[2].usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
framebufferAttachmentImageInfos[2].layerCount = 1;
framebufferAttachmentImageInfos[2].viewFormatCount = 1;
framebufferAttachmentImageInfos[2].pViewFormats = &depthStencilAttachmentFormat;
// Input attachment
framebufferAttachmentImageInfos[3].sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfos[3].width = attachmentWidth;
framebufferAttachmentImageInfos[3].height = attachmentHeight;
framebufferAttachmentImageInfos[3].usage = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
framebufferAttachmentImageInfos[3].layerCount = 1;
framebufferAttachmentImageInfos[3].viewFormatCount = 1;
framebufferAttachmentImageInfos[3].pViewFormats = &colorAndInputAttachmentFormat;
VkFramebufferAttachmentsCreateInfoKHR framebufferAttachmentsCreateInfo = {};
framebufferAttachmentsCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR;
framebufferAttachmentsCreateInfo.attachmentImageInfoCount = 4;
framebufferAttachmentsCreateInfo.pAttachmentImageInfos = framebufferAttachmentImageInfos;
VkFramebufferCreateInfo framebufferCreateInfo = {};
framebufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferCreateInfo.pNext = &framebufferAttachmentsCreateInfo;
framebufferCreateInfo.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR;
framebufferCreateInfo.width = attachmentWidth;
framebufferCreateInfo.height = attachmentHeight;
framebufferCreateInfo.layers = 1;
framebufferCreateInfo.renderPass = renderPass;
framebufferCreateInfo.attachmentCount = 4;
VkFramebuffer framebuffer = VK_NULL_HANDLE;
// Color attachment, mismatched usage
framebufferAttachmentImageInfos[0].usage = VK_IMAGE_USAGE_SAMPLED_BIT;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-03201");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentImageInfos[0].usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
// Color resolve attachment, mismatched usage
framebufferAttachmentImageInfos[1].usage = VK_IMAGE_USAGE_SAMPLED_BIT;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-03201");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentImageInfos[1].usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
// Depth stencil attachment, mismatched usage
framebufferAttachmentImageInfos[2].usage = VK_IMAGE_USAGE_SAMPLED_BIT;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-03202");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentImageInfos[2].usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
// Color attachment, mismatched usage
framebufferAttachmentImageInfos[3].usage = VK_IMAGE_USAGE_SAMPLED_BIT;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-03204");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentImageInfos[3].usage = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
vk::DestroyRenderPass(m_device->device(), renderPass, nullptr);
}
TEST_F(VkLayerTest, ImagelessFramebufferAttachmentMultiviewImageLayerCountMismatchTests) {
TEST_DESCRIPTION("Create an imageless framebuffer against a multiview-enabled render pass with mismatched layer counts");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix, VK_KHR_MULTIVIEW_EXTENSION_NAME);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix,
VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
return;
}
VkPhysicalDeviceImagelessFramebufferFeaturesKHR physicalDeviceImagelessFramebufferFeatures = {};
physicalDeviceImagelessFramebufferFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR;
physicalDeviceImagelessFramebufferFeatures.imagelessFramebuffer = VK_TRUE;
VkPhysicalDeviceFeatures2 physicalDeviceFeatures2 = {};
physicalDeviceFeatures2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
physicalDeviceFeatures2.pNext = &physicalDeviceImagelessFramebufferFeatures;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &physicalDeviceFeatures2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
uint32_t attachmentWidth = 512;
uint32_t attachmentHeight = 512;
VkFormat colorAndInputAttachmentFormat = VK_FORMAT_R8G8B8A8_UNORM;
VkFormat depthStencilAttachmentFormat = VK_FORMAT_D32_SFLOAT_S8_UINT;
VkAttachmentDescription attachmentDescriptions[4] = {};
// Color attachment
attachmentDescriptions[0].format = colorAndInputAttachmentFormat;
attachmentDescriptions[0].samples = VK_SAMPLE_COUNT_4_BIT;
attachmentDescriptions[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
// Color resolve attachment
attachmentDescriptions[1].format = colorAndInputAttachmentFormat;
attachmentDescriptions[1].samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDescriptions[1].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
// Depth stencil attachment
attachmentDescriptions[2].format = depthStencilAttachmentFormat;
attachmentDescriptions[2].samples = VK_SAMPLE_COUNT_4_BIT;
attachmentDescriptions[2].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
// Input attachment
attachmentDescriptions[3].format = colorAndInputAttachmentFormat;
attachmentDescriptions[3].samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDescriptions[3].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
VkAttachmentReference colorAttachmentReference = {};
colorAttachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
colorAttachmentReference.attachment = 0;
VkAttachmentReference colorResolveAttachmentReference = {};
colorResolveAttachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
colorResolveAttachmentReference.attachment = 1;
VkAttachmentReference depthStencilAttachmentReference = {};
depthStencilAttachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
depthStencilAttachmentReference.attachment = 2;
VkAttachmentReference inputAttachmentReference = {};
inputAttachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
inputAttachmentReference.attachment = 3;
VkSubpassDescription subpassDescription = {};
subpassDescription.colorAttachmentCount = 1;
subpassDescription.pColorAttachments = &colorAttachmentReference;
subpassDescription.pResolveAttachments = &colorResolveAttachmentReference;
subpassDescription.pDepthStencilAttachment = &depthStencilAttachmentReference;
subpassDescription.inputAttachmentCount = 1;
subpassDescription.pInputAttachments = &inputAttachmentReference;
uint32_t viewMask = 0x3u;
VkRenderPassMultiviewCreateInfo renderPassMultiviewCreateInfo = {};
renderPassMultiviewCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO;
renderPassMultiviewCreateInfo.subpassCount = 1;
renderPassMultiviewCreateInfo.pViewMasks = &viewMask;
VkRenderPassCreateInfo renderPassCreateInfo = {};
renderPassCreateInfo.pNext = &renderPassMultiviewCreateInfo;
renderPassCreateInfo.attachmentCount = 4;
renderPassCreateInfo.subpassCount = 1;
renderPassCreateInfo.pSubpasses = &subpassDescription;
renderPassCreateInfo.pAttachments = attachmentDescriptions;
renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass renderPass;
vk::CreateRenderPass(m_device->device(), &renderPassCreateInfo, nullptr, &renderPass);
VkFramebufferAttachmentImageInfoKHR framebufferAttachmentImageInfos[4] = {};
// Color attachment
framebufferAttachmentImageInfos[0].sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfos[0].width = attachmentWidth;
framebufferAttachmentImageInfos[0].height = attachmentHeight;
framebufferAttachmentImageInfos[0].usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
framebufferAttachmentImageInfos[0].layerCount = 2;
framebufferAttachmentImageInfos[0].viewFormatCount = 1;
framebufferAttachmentImageInfos[0].pViewFormats = &colorAndInputAttachmentFormat;
// Color resolve attachment
framebufferAttachmentImageInfos[1].sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfos[1].width = attachmentWidth;
framebufferAttachmentImageInfos[1].height = attachmentHeight;
framebufferAttachmentImageInfos[1].usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
framebufferAttachmentImageInfos[1].layerCount = 2;
framebufferAttachmentImageInfos[1].viewFormatCount = 1;
framebufferAttachmentImageInfos[1].pViewFormats = &colorAndInputAttachmentFormat;
// Depth stencil attachment
framebufferAttachmentImageInfos[2].sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfos[2].width = attachmentWidth;
framebufferAttachmentImageInfos[2].height = attachmentHeight;
framebufferAttachmentImageInfos[2].usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
framebufferAttachmentImageInfos[2].layerCount = 2;
framebufferAttachmentImageInfos[2].viewFormatCount = 1;
framebufferAttachmentImageInfos[2].pViewFormats = &depthStencilAttachmentFormat;
// Input attachment
framebufferAttachmentImageInfos[3].sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfos[3].width = attachmentWidth;
framebufferAttachmentImageInfos[3].height = attachmentHeight;
framebufferAttachmentImageInfos[3].usage = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
framebufferAttachmentImageInfos[3].layerCount = 2;
framebufferAttachmentImageInfos[3].viewFormatCount = 1;
framebufferAttachmentImageInfos[3].pViewFormats = &colorAndInputAttachmentFormat;
VkFramebufferAttachmentsCreateInfoKHR framebufferAttachmentsCreateInfo = {};
framebufferAttachmentsCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR;
framebufferAttachmentsCreateInfo.attachmentImageInfoCount = 4;
framebufferAttachmentsCreateInfo.pAttachmentImageInfos = framebufferAttachmentImageInfos;
VkFramebufferCreateInfo framebufferCreateInfo = {};
framebufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferCreateInfo.pNext = &framebufferAttachmentsCreateInfo;
framebufferCreateInfo.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR;
framebufferCreateInfo.width = attachmentWidth;
framebufferCreateInfo.height = attachmentHeight;
framebufferCreateInfo.layers = 1;
framebufferCreateInfo.renderPass = renderPass;
framebufferCreateInfo.attachmentCount = 4;
VkFramebuffer framebuffer = VK_NULL_HANDLE;
// Color attachment, mismatched layer count
framebufferAttachmentImageInfos[0].layerCount = 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-renderPass-03198");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentImageInfos[0].layerCount = 2;
// Color resolve attachment, mismatched layer count
framebufferAttachmentImageInfos[1].layerCount = 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-renderPass-03198");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentImageInfos[1].layerCount = 2;
// Depth stencil attachment, mismatched layer count
framebufferAttachmentImageInfos[2].layerCount = 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-renderPass-03198");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentImageInfos[2].layerCount = 2;
// Input attachment, mismatched layer count
framebufferAttachmentImageInfos[3].layerCount = 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-renderPass-03198");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentImageInfos[3].layerCount = 2;
vk::DestroyRenderPass(m_device->device(), renderPass, nullptr);
}
TEST_F(VkLayerTest, ImagelessFramebufferDepthStencilResolveAttachmentTests) {
TEST_DESCRIPTION(
"Create an imageless framebuffer against a render pass using depth stencil resolve, with mismatched information");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names);
if (!rp2Supported) {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix,
VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix,
VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
return;
}
VkPhysicalDeviceImagelessFramebufferFeaturesKHR physicalDeviceImagelessFramebufferFeatures = {};
physicalDeviceImagelessFramebufferFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR;
physicalDeviceImagelessFramebufferFeatures.imagelessFramebuffer = VK_TRUE;
VkPhysicalDeviceFeatures2 physicalDeviceFeatures2 = {};
physicalDeviceFeatures2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
physicalDeviceFeatures2.pNext = &physicalDeviceImagelessFramebufferFeatures;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &physicalDeviceFeatures2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
uint32_t attachmentWidth = 512;
uint32_t attachmentHeight = 512;
VkFormat attachmentFormat = FindSupportedDepthStencilFormat(gpu());
if (attachmentFormat == VK_FORMAT_UNDEFINED) {
printf("%s Did not find a supported depth stencil format; skipped.\n", kSkipPrefix);
return;
}
VkAttachmentDescription2KHR attachmentDescriptions[2] = {};
// Depth/stencil attachment
attachmentDescriptions[0].sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR;
attachmentDescriptions[0].format = attachmentFormat;
attachmentDescriptions[0].samples = VK_SAMPLE_COUNT_4_BIT;
attachmentDescriptions[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
// Depth/stencil resolve attachment
attachmentDescriptions[1].sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR;
attachmentDescriptions[1].format = attachmentFormat;
attachmentDescriptions[1].samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDescriptions[1].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
VkAttachmentReference2KHR depthStencilAttachmentReference = {};
depthStencilAttachmentReference.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR;
depthStencilAttachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
depthStencilAttachmentReference.attachment = 0;
VkAttachmentReference2KHR depthStencilResolveAttachmentReference = {};
depthStencilResolveAttachmentReference.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR;
depthStencilResolveAttachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
depthStencilResolveAttachmentReference.attachment = 1;
VkSubpassDescriptionDepthStencilResolveKHR subpassDescriptionDepthStencilResolve = {};
subpassDescriptionDepthStencilResolve.sType = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR;
subpassDescriptionDepthStencilResolve.pDepthStencilResolveAttachment = &depthStencilResolveAttachmentReference;
subpassDescriptionDepthStencilResolve.depthResolveMode = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
subpassDescriptionDepthStencilResolve.stencilResolveMode = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
VkSubpassDescription2KHR subpassDescription = {};
subpassDescription.sType = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR;
subpassDescription.pNext = &subpassDescriptionDepthStencilResolve;
subpassDescription.pDepthStencilAttachment = &depthStencilAttachmentReference;
subpassDescription.viewMask = 0x3u;
VkRenderPassCreateInfo2KHR renderPassCreateInfo = {};
renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR;
renderPassCreateInfo.pNext = nullptr;
renderPassCreateInfo.attachmentCount = 2;
renderPassCreateInfo.subpassCount = 1;
renderPassCreateInfo.pSubpasses = &subpassDescription;
renderPassCreateInfo.pAttachments = attachmentDescriptions;
VkRenderPass renderPass;
PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR =
(PFN_vkCreateRenderPass2KHR)vk::GetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR");
vkCreateRenderPass2KHR(m_device->device(), &renderPassCreateInfo, nullptr, &renderPass);
VkFramebufferAttachmentImageInfoKHR framebufferAttachmentImageInfos[2] = {};
// Depth/stencil attachment
framebufferAttachmentImageInfos[0].sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfos[0].width = attachmentWidth;
framebufferAttachmentImageInfos[0].height = attachmentHeight;
framebufferAttachmentImageInfos[0].usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
framebufferAttachmentImageInfos[0].layerCount = 2;
framebufferAttachmentImageInfos[0].viewFormatCount = 1;
framebufferAttachmentImageInfos[0].pViewFormats = &attachmentFormat;
// Depth/stencil resolve attachment
framebufferAttachmentImageInfos[1].sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfos[1].width = attachmentWidth;
framebufferAttachmentImageInfos[1].height = attachmentHeight;
framebufferAttachmentImageInfos[1].usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
framebufferAttachmentImageInfos[1].layerCount = 2;
framebufferAttachmentImageInfos[1].viewFormatCount = 1;
framebufferAttachmentImageInfos[1].pViewFormats = &attachmentFormat;
VkFramebufferAttachmentsCreateInfoKHR framebufferAttachmentsCreateInfo = {};
framebufferAttachmentsCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR;
framebufferAttachmentsCreateInfo.attachmentImageInfoCount = 2;
framebufferAttachmentsCreateInfo.pAttachmentImageInfos = framebufferAttachmentImageInfos;
VkFramebufferCreateInfo framebufferCreateInfo = {};
framebufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferCreateInfo.pNext = &framebufferAttachmentsCreateInfo;
framebufferCreateInfo.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR;
framebufferCreateInfo.width = attachmentWidth;
framebufferCreateInfo.height = attachmentHeight;
framebufferCreateInfo.layers = 1;
framebufferCreateInfo.renderPass = renderPass;
framebufferCreateInfo.attachmentCount = 2;
framebufferCreateInfo.pAttachments = nullptr;
VkFramebuffer framebuffer = VK_NULL_HANDLE;
// Color attachment, mismatched layer count
framebufferAttachmentImageInfos[0].layerCount = 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-renderPass-03198");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentImageInfos[0].layerCount = 2;
// Depth resolve attachment, mismatched image usage
framebufferAttachmentImageInfos[1].usage = VK_IMAGE_USAGE_SAMPLED_BIT;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-03203");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentImageInfos[1].usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
// Depth resolve attachment, mismatched layer count
framebufferAttachmentImageInfos[1].layerCount = 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-renderPass-03198");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
if (framebuffer != VK_NULL_HANDLE) {
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}
framebufferAttachmentImageInfos[1].layerCount = 2;
vk::DestroyRenderPass(m_device->device(), renderPass, nullptr);
}
TEST_F(VkLayerTest, InvalidFragmentShadingRateImagelessFramebufferUsage) {
TEST_DESCRIPTION("Specify a fragment shading rate attachment without the correct usage");
// Enable KHR_fragment_shading_rate and all of its required extensions
bool fsr_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
if (fsr_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME);
fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME);
fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME);
fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME);
if (fsr_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME);
} else {
printf("%s requires VK_KHR_fragment_shading_rate.\n", kSkipPrefix);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix,
VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR =
(PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr);
VkPhysicalDeviceFragmentShadingRatePropertiesKHR fsr_properties =
LvlInitStruct<VkPhysicalDeviceFragmentShadingRatePropertiesKHR>();
VkPhysicalDeviceProperties2KHR properties2 = LvlInitStruct<VkPhysicalDeviceProperties2KHR>(&fsr_properties);
vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2);
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceImagelessFramebufferFeatures if_features = LvlInitStruct<VkPhysicalDeviceImagelessFramebufferFeatures>();
VkPhysicalDeviceFragmentShadingRateFeaturesKHR fsr_features =
LvlInitStruct<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(&if_features);
VkPhysicalDeviceFeatures2KHR features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&fsr_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (fsr_features.attachmentFragmentShadingRate != VK_TRUE) {
printf("%s requires attachmentFragmentShadingRate feature.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
VkAttachmentReference2 attach = {};
attach.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2;
attach.layout = VK_IMAGE_LAYOUT_GENERAL;
attach.attachment = 0;
VkFragmentShadingRateAttachmentInfoKHR fsr_attachment = {};
fsr_attachment.sType = VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR;
fsr_attachment.shadingRateAttachmentTexelSize = fsr_properties.minFragmentShadingRateAttachmentTexelSize;
fsr_attachment.pFragmentShadingRateAttachment = &attach;
// Create a renderPass with a single fsr attachment
VkSubpassDescription2 subpass = {};
subpass.sType = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2;
subpass.pNext = &fsr_attachment;
VkAttachmentDescription2 attach_desc = {};
attach_desc.format = VK_FORMAT_R8_UINT;
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
VkRenderPassCreateInfo2 rpci = {};
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2;
rpci.subpassCount = 1;
rpci.pSubpasses = &subpass;
rpci.attachmentCount = 1;
rpci.pAttachments = &attach_desc;
VkRenderPass rp;
PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR =
(PFN_vkCreateRenderPass2KHR)vk::GetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR");
VkResult err = vkCreateRenderPass2KHR(m_device->device(), &rpci, NULL, &rp);
ASSERT_VK_SUCCESS(err);
VkFormat viewFormat = VK_FORMAT_R8_UINT;
VkFramebufferAttachmentImageInfo fbai_info = {};
fbai_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO;
fbai_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
fbai_info.width = 1;
fbai_info.height = 1;
fbai_info.layerCount = 1;
fbai_info.viewFormatCount = 1;
fbai_info.pViewFormats = &viewFormat;
VkFramebufferAttachmentsCreateInfo fba_info = {};
fba_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO;
fba_info.attachmentImageInfoCount = 1;
fba_info.pAttachmentImageInfos = &fbai_info;
VkFramebufferCreateInfo fb_info = {};
fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fb_info.pNext = &fba_info;
fb_info.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT;
fb_info.renderPass = rp;
fb_info.attachmentCount = 1;
fb_info.pAttachments = NULL;
fb_info.width = fsr_properties.minFragmentShadingRateAttachmentTexelSize.width;
fb_info.height = fsr_properties.minFragmentShadingRateAttachmentTexelSize.height;
fb_info.layers = 1;
VkFramebuffer fb;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-04549");
err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vk::DestroyFramebuffer(m_device->device(), fb, NULL);
}
vk::DestroyRenderPass(m_device->device(), rp, NULL);
}
TEST_F(VkLayerTest, InvalidFragmentShadingRateImagelessFramebufferDimensions) {
TEST_DESCRIPTION("Specify a fragment shading rate attachment without the correct usage");
// Enable KHR_fragment_shading_rate and all of its required extensions
bool fsr_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
if (fsr_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME);
fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME);
fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME);
fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME);
if (fsr_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME);
} else {
printf("%s requires VK_KHR_fragment_shading_rate.\n", kSkipPrefix);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix,
VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR =
(PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr);
VkPhysicalDeviceFragmentShadingRatePropertiesKHR fsr_properties =
LvlInitStruct<VkPhysicalDeviceFragmentShadingRatePropertiesKHR>();
VkPhysicalDeviceProperties2KHR properties2 = LvlInitStruct<VkPhysicalDeviceProperties2KHR>(&fsr_properties);
vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2);
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceImagelessFramebufferFeatures if_features = LvlInitStruct<VkPhysicalDeviceImagelessFramebufferFeatures>();
VkPhysicalDeviceFragmentShadingRateFeaturesKHR fsr_features =
LvlInitStruct<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(&if_features);
VkPhysicalDeviceFeatures2KHR features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&fsr_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (fsr_features.attachmentFragmentShadingRate != VK_TRUE) {
printf("%s requires attachmentFragmentShadingRate feature.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
VkAttachmentReference2 attach = {};
attach.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2;
attach.layout = VK_IMAGE_LAYOUT_GENERAL;
attach.attachment = 0;
VkFragmentShadingRateAttachmentInfoKHR fsr_attachment = {};
fsr_attachment.sType = VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR;
fsr_attachment.shadingRateAttachmentTexelSize = fsr_properties.minFragmentShadingRateAttachmentTexelSize;
fsr_attachment.pFragmentShadingRateAttachment = &attach;
// Create a renderPass with a single fsr attachment
VkSubpassDescription2 subpass = {};
subpass.sType = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2;
subpass.pNext = &fsr_attachment;
VkAttachmentDescription2 attach_desc = {};
attach_desc.format = VK_FORMAT_R8_UINT;
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
VkRenderPassCreateInfo2 rpci = {};
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2;
rpci.subpassCount = 1;
rpci.pSubpasses = &subpass;
rpci.attachmentCount = 1;
rpci.pAttachments = &attach_desc;
VkRenderPass rp;
PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR =
(PFN_vkCreateRenderPass2KHR)vk::GetDeviceProcAddr(m_device->device(), "vkCreateRenderPass2KHR");
VkResult err = vkCreateRenderPass2KHR(m_device->device(), &rpci, NULL, &rp);
ASSERT_VK_SUCCESS(err);
VkFormat viewFormat = VK_FORMAT_R8_UINT;
VkFramebufferAttachmentImageInfo fbai_info = {};
fbai_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO;
fbai_info.usage = VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR;
fbai_info.width = 1;
fbai_info.height = 1;
fbai_info.layerCount = 1;
fbai_info.viewFormatCount = 1;
fbai_info.pViewFormats = &viewFormat;
VkFramebufferAttachmentsCreateInfo fba_info = {};
fba_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO;
fba_info.attachmentImageInfoCount = 1;
fba_info.pAttachmentImageInfos = &fbai_info;
VkFramebufferCreateInfo fb_info = {};
fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fb_info.pNext = &fba_info;
fb_info.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT;
fb_info.renderPass = rp;
fb_info.attachmentCount = 1;
fb_info.pAttachments = NULL;
fb_info.width = fsr_properties.minFragmentShadingRateAttachmentTexelSize.width;
fb_info.height = fsr_properties.minFragmentShadingRateAttachmentTexelSize.height;
fb_info.layers = 1;
VkFramebuffer fb;
fb_info.width = fsr_properties.minFragmentShadingRateAttachmentTexelSize.width * 2;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-04543");
err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vk::DestroyFramebuffer(m_device->device(), fb, NULL);
}
fb_info.width = fsr_properties.minFragmentShadingRateAttachmentTexelSize.width;
fb_info.height = fsr_properties.minFragmentShadingRateAttachmentTexelSize.height * 2;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-04544");
err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vk::DestroyFramebuffer(m_device->device(), fb, NULL);
}
fb_info.height = fsr_properties.minFragmentShadingRateAttachmentTexelSize.height;
fbai_info.layerCount = 2;
fb_info.layers = 3;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-04545");
err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vk::DestroyFramebuffer(m_device->device(), fb, NULL);
}
fb_info.layers = 1;
fbai_info.layerCount = 1;
vk::DestroyRenderPass(m_device->device(), rp, NULL);
if (fsr_properties.layeredShadingRateAttachments == VK_TRUE) {
subpass.viewMask = 0x4;
err = vkCreateRenderPass2KHR(m_device->device(), &rpci, NULL, &rp);
ASSERT_VK_SUCCESS(err);
subpass.viewMask = 0;
fbai_info.layerCount = 2;
fb_info.renderPass = rp;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, kVUIDUndefined);
err = vk::CreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vk::DestroyFramebuffer(m_device->device(), fb, NULL);
}
fbai_info.layerCount = 1;
vk::DestroyRenderPass(m_device->device(), rp, NULL);
}
}
TEST_F(VkLayerTest, ImagelessFramebufferRenderPassBeginImageView3D) {
TEST_DESCRIPTION("Misuse of VK_IMAGE_VIEW_TYPE_3D.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
bool rp2Supported = CheckCreateRenderPass2Support(this, m_device_extension_names);
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
} else {
printf("%s test requires VK_KHR_imageless_framebuffer, not available. Skipping.\n", kSkipPrefix);
return;
}
VkPhysicalDeviceImagelessFramebufferFeaturesKHR physicalDeviceImagelessFramebufferFeatures = {};
physicalDeviceImagelessFramebufferFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR;
physicalDeviceImagelessFramebufferFeatures.imagelessFramebuffer = VK_TRUE;
VkPhysicalDeviceFeatures2 physicalDeviceFeatures2 = {};
physicalDeviceFeatures2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
physicalDeviceFeatures2.pNext = &physicalDeviceImagelessFramebufferFeatures;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &physicalDeviceFeatures2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
uint32_t attachmentWidth = 512;
uint32_t attachmentHeight = 512;
VkFormat attachmentFormats[1] = {VK_FORMAT_R8G8B8A8_UNORM};
VkFormat framebufferAttachmentFormats[1] = {VK_FORMAT_R8G8B8A8_UNORM};
// Create a renderPass with a single attachment
VkAttachmentDescription attachmentDescription = {};
attachmentDescription.format = attachmentFormats[0];
attachmentDescription.samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDescription.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
VkAttachmentReference attachmentReference = {};
attachmentReference.layout = VK_IMAGE_LAYOUT_GENERAL;
VkSubpassDescription subpassDescription = {};
subpassDescription.colorAttachmentCount = 1;
subpassDescription.pColorAttachments = &attachmentReference;
VkRenderPassCreateInfo renderPassCreateInfo = {};
renderPassCreateInfo.subpassCount = 1;
renderPassCreateInfo.pSubpasses = &subpassDescription;
renderPassCreateInfo.attachmentCount = 1;
renderPassCreateInfo.pAttachments = &attachmentDescription;
renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass renderPass;
vk::CreateRenderPass(m_device->device(), &renderPassCreateInfo, NULL, &renderPass);
// Create Attachments
VkImageCreateInfo imageCreateInfo = {};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.pNext = nullptr;
imageCreateInfo.flags = 0;
imageCreateInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
imageCreateInfo.extent.width = attachmentWidth;
imageCreateInfo.extent.height = attachmentHeight;
imageCreateInfo.extent.depth = 1;
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.mipLevels = 1;
imageCreateInfo.imageType = VK_IMAGE_TYPE_3D;
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.format = attachmentFormats[0];
VkImageObj image3D(m_device);
image3D.init(&imageCreateInfo);
VkImageViewCreateInfo imageViewCreateInfo = {};
imageViewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imageViewCreateInfo.image = image3D.handle();
imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_3D;
imageViewCreateInfo.format = attachmentFormats[0];
imageViewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageViewCreateInfo.subresourceRange.baseMipLevel = 0;
imageViewCreateInfo.subresourceRange.levelCount = 1;
imageViewCreateInfo.subresourceRange.baseArrayLayer = 0;
imageViewCreateInfo.subresourceRange.layerCount = 1;
VkImageView imageView3D;
vk::CreateImageView(m_device->device(), &imageViewCreateInfo, NULL, &imageView3D);
VkFramebufferAttachmentImageInfoKHR framebufferAttachmentImageInfo = {};
framebufferAttachmentImageInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR;
framebufferAttachmentImageInfo.flags = 0;
framebufferAttachmentImageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
framebufferAttachmentImageInfo.width = attachmentWidth;
framebufferAttachmentImageInfo.height = attachmentHeight;
framebufferAttachmentImageInfo.layerCount = 1;
framebufferAttachmentImageInfo.viewFormatCount = 1;
framebufferAttachmentImageInfo.pViewFormats = framebufferAttachmentFormats;
VkFramebufferAttachmentsCreateInfoKHR framebufferAttachmentsCreateInfo = {};
framebufferAttachmentsCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR;
framebufferAttachmentsCreateInfo.attachmentImageInfoCount = 1;
framebufferAttachmentsCreateInfo.pAttachmentImageInfos = &framebufferAttachmentImageInfo;
VkFramebuffer framebuffer;
VkFramebufferCreateInfo framebufferCreateInfo = {};
framebufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferCreateInfo.width = attachmentWidth;
framebufferCreateInfo.height = attachmentHeight;
framebufferCreateInfo.layers = 1;
framebufferCreateInfo.attachmentCount = 1;
framebufferCreateInfo.renderPass = renderPass;
// Try to use 3D Image View without imageless flag
framebufferCreateInfo.pNext = nullptr;
framebufferCreateInfo.flags = 0;
framebufferCreateInfo.pAttachments = &imageView3D;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-flags-04113");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
framebufferCreateInfo.pNext = &framebufferAttachmentsCreateInfo;
framebufferCreateInfo.flags = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR;
framebufferCreateInfo.pAttachments = nullptr;
m_errorMonitor->ExpectSuccess();
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyNotFound();
VkRenderPassAttachmentBeginInfoKHR renderPassAttachmentBeginInfo = {};
renderPassAttachmentBeginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR;
renderPassAttachmentBeginInfo.pNext = nullptr;
renderPassAttachmentBeginInfo.attachmentCount = 1;
renderPassAttachmentBeginInfo.pAttachments = &imageView3D;
VkRenderPassBeginInfo renderPassBeginInfo = {};
renderPassBeginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassBeginInfo.pNext = &renderPassAttachmentBeginInfo;
renderPassBeginInfo.renderPass = renderPass;
renderPassBeginInfo.renderArea.extent.width = attachmentWidth;
renderPassBeginInfo.renderArea.extent.height = attachmentHeight;
renderPassBeginInfo.framebuffer = framebuffer;
// Try to use 3D Image View with imageless flag
TestRenderPassBegin(m_errorMonitor, m_device->device(), m_commandBuffer->handle(), &renderPassBeginInfo, rp2Supported,
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-04114",
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-04114");
vk::DestroyRenderPass(m_device->device(), renderPass, nullptr);
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
vk::DestroyImageView(m_device->device(), imageView3D, nullptr);
}
TEST_F(VkLayerTest, FramebufferAttachmentImageInfoPNext) {
TEST_DESCRIPTION("Begin render pass with missing framebuffer attachment");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME)) {
printf("%s test requires VK_KHR_imageless_framebuffer, not available. Skipping.\n", kSkipPrefix);
return;
}
m_device_extension_names.push_back(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkFormat attachment_format = VK_FORMAT_R8G8B8A8_UNORM;
VkFramebufferAttachmentImageInfo fb_fdm = LvlInitStruct<VkFramebufferAttachmentImageInfo>();
fb_fdm.pNext = &fb_fdm;
fb_fdm.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
fb_fdm.width = 64;
fb_fdm.height = 64;
fb_fdm.layerCount = 1;
fb_fdm.viewFormatCount = 1;
fb_fdm.pViewFormats = &attachment_format;
VkFramebufferAttachmentsCreateInfo fb_aci_fdm = LvlInitStruct<VkFramebufferAttachmentsCreateInfo>();
fb_aci_fdm.attachmentImageInfoCount = 1;
fb_aci_fdm.pAttachmentImageInfos = &fb_fdm;
VkFramebufferCreateInfo framebufferCreateInfo = LvlInitStruct<VkFramebufferCreateInfo>(&fb_aci_fdm);
framebufferCreateInfo.width = 64;
framebufferCreateInfo.height = 64;
framebufferCreateInfo.layers = 1;
framebufferCreateInfo.renderPass = m_renderPass;
framebufferCreateInfo.attachmentCount = static_cast<uint32_t>(m_framebuffer_attachments.size());
framebufferCreateInfo.pAttachments = m_framebuffer_attachments.data();
VkFramebuffer framebuffer;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferAttachmentImageInfo-pNext-pNext");
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DescriptorUpdateTemplateEntryWithInlineUniformBlock) {
TEST_DESCRIPTION("Test VkDescriptorUpdateTemplateEntry with descriptor type VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT");
// GPDDP2 needed for push descriptors support below
bool gpdp2_support = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION);
if (gpdp2_support) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME)) {
printf("%s Descriptor Update Template Extensions not supported, skipped.\n", kSkipPrefix);
return;
}
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME)) {
printf("%s %s not supported, skipped.\n", kSkipPrefix, VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME);
// Note: Includes workaround for some implementations which incorrectly return 0 maxPushDescriptors
bool push_descriptor_support = gpdp2_support &&
DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME) &&
(GetPushDescriptorProperties(instance(), gpu()).maxPushDescriptors > 0);
if (push_descriptor_support) {
m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
} else {
printf("%s Push Descriptor Extension not supported, push descriptor cases skipped.\n", kSkipPrefix);
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
std::vector<VkDescriptorSetLayoutBinding> ds_bindings = {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}};
OneOffDescriptorSet descriptor_set(m_device, ds_bindings);
// Create a buffer to be used for invalid updates
VkBufferCreateInfo buff_ci = LvlInitStruct<VkBufferCreateInfo>();
buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buff_ci.size = m_device->props.limits.minUniformBufferOffsetAlignment;
buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBufferObj buffer;
buffer.init(*m_device, buff_ci);
// Relying on the "return nullptr for non-enabled extensions
auto vkCreateDescriptorUpdateTemplateKHR =
(PFN_vkCreateDescriptorUpdateTemplateKHR)vk::GetDeviceProcAddr(m_device->device(), "vkCreateDescriptorUpdateTemplateKHR");
auto vkDestroyDescriptorUpdateTemplateKHR =
(PFN_vkDestroyDescriptorUpdateTemplateKHR)vk::GetDeviceProcAddr(m_device->device(), "vkDestroyDescriptorUpdateTemplateKHR");
auto vkUpdateDescriptorSetWithTemplateKHR =
(PFN_vkUpdateDescriptorSetWithTemplateKHR)vk::GetDeviceProcAddr(m_device->device(), "vkUpdateDescriptorSetWithTemplateKHR");
ASSERT_NE(vkCreateDescriptorUpdateTemplateKHR, nullptr);
ASSERT_NE(vkDestroyDescriptorUpdateTemplateKHR, nullptr);
ASSERT_NE(vkUpdateDescriptorSetWithTemplateKHR, nullptr);
struct SimpleTemplateData {
VkDescriptorBufferInfo buff_info;
};
VkDescriptorUpdateTemplateEntry update_template_entry = {};
update_template_entry.dstBinding = 0;
update_template_entry.dstArrayElement = 2;
update_template_entry.descriptorCount = 1;
update_template_entry.descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT;
update_template_entry.offset = offsetof(SimpleTemplateData, buff_info);
update_template_entry.stride = sizeof(SimpleTemplateData);
auto update_template_ci = LvlInitStruct<VkDescriptorUpdateTemplateCreateInfoKHR>();
update_template_ci.descriptorUpdateEntryCount = 1;
update_template_ci.pDescriptorUpdateEntries = &update_template_entry;
update_template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET;
update_template_ci.descriptorSetLayout = descriptor_set.layout_.handle();
VkDescriptorUpdateTemplate update_template = VK_NULL_HANDLE;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDescriptorUpdateTemplateEntry-descriptor-02226");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDescriptorUpdateTemplateEntry-descriptor-02227");
vkCreateDescriptorUpdateTemplateKHR(m_device->device(), &update_template_ci, nullptr, &update_template);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, RenderPassCreateFragmentDensityMapReferenceToInvalidAttachment) {
TEST_DESCRIPTION(
"Test creating a framebuffer with fragment density map reference to an attachment with layer count different from 1");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework());
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME)) {
printf("%s %s extension not supported skipped.\n", kSkipPrefix, VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME);
VkPhysicalDeviceFragmentDensityMapFeaturesEXT fdm_features = LvlInitStruct<VkPhysicalDeviceFragmentDensityMapFeaturesEXT>();
VkPhysicalDeviceFeatures2 features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&fdm_features);
fdm_features.fragmentDensityMap = true;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, 0));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkAttachmentReference ref;
ref.attachment = 0;
ref.layout = VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT;
VkRenderPassFragmentDensityMapCreateInfoEXT rpfdmi = LvlInitStruct<VkRenderPassFragmentDensityMapCreateInfoEXT>();
rpfdmi.fragmentDensityMapAttachment = ref;
VkAttachmentDescription attach = {};
attach.format = VK_FORMAT_R8G8_UNORM;
attach.samples = VK_SAMPLE_COUNT_1_BIT;
attach.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
attach.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attach.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attach.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attach.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attach.finalLayout = VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.inputAttachmentCount = 1;
subpass.pInputAttachments = &ref;
VkRenderPassCreateInfo rpci = LvlInitStruct<VkRenderPassCreateInfo>(&rpfdmi);
rpci.attachmentCount = 1;
rpci.pAttachments = &attach;
rpci.subpassCount = 1;
rpci.pSubpasses = &subpass;
VkRenderPass renderPass;
vk::CreateRenderPass(device(), &rpci, nullptr, &renderPass);
VkImageCreateInfo image_create_info = LvlInitStruct<VkImageCreateInfo>();
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8G8_UNORM;
image_create_info.extent.width = 32;
image_create_info.extent.height = 32;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 4;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
image_create_info.flags = 0;
VkImageObj image(m_device);
image.Init(image_create_info);
VkImageView imageView = image.targetView(VK_FORMAT_R8G8_UNORM, VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 4);
VkFramebufferCreateInfo fb_info = LvlInitStruct<VkFramebufferCreateInfo>();
fb_info.renderPass = renderPass;
fb_info.attachmentCount = 1;
fb_info.pAttachments = &imageView;
fb_info.width = 32;
fb_info.height = 32;
fb_info.layers = 1;
VkFramebuffer framebuffer;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkFramebufferCreateInfo-pAttachments-02744");
vk::CreateFramebuffer(device(), &fb_info, nullptr, &framebuffer);
m_errorMonitor->VerifyFound();
}
| 1 | 19,357 | Actually, I'm not sure why lines 1663-1668 are here. Looks like Push descriptors aren't used in the test at all. @ziga-lunarg? | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -250,7 +250,7 @@ public class NodeJSGapicContext extends GapicContext implements NodeJSContext {
*/
public List<String> fieldPropertyComment(Field field) {
String commentType = fieldTypeCardinalityComment(field);
- String fieldName = wrapIfKeywordOrBuiltIn(field.getSimpleName());
+ String fieldName = lowerUnderscoreToLowerCamel(field.getSimpleName());
return convertToCommentedBlock(
fieldComment(String.format("@property {%s} %s", commentType, fieldName), null, field));
} | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.nodejs;
import com.google.api.codegen.ApiConfig;
import com.google.api.codegen.GapicContext;
import com.google.api.codegen.MethodConfig;
import com.google.api.codegen.transformer.ApiMethodTransformer;
import com.google.api.codegen.transformer.GrpcStubTransformer;
import com.google.api.codegen.transformer.MethodTransformerContext;
import com.google.api.codegen.transformer.ModelTypeTable;
import com.google.api.codegen.transformer.SurfaceTransformerContext;
import com.google.api.codegen.transformer.nodejs.NodeJSModelTypeNameConverter;
import com.google.api.codegen.transformer.nodejs.NodeJSSurfaceNamer;
import com.google.api.codegen.util.nodejs.NodeJSTypeTable;
import com.google.api.codegen.viewmodel.ApiMethodView;
import com.google.api.codegen.viewmodel.GrpcStubView;
import com.google.api.tools.framework.aspects.documentation.model.DocumentationUtil;
import com.google.api.tools.framework.aspects.documentation.model.ElementDocumentationAttribute;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.MessageType;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.Model;
import com.google.api.tools.framework.model.ProtoContainerElement;
import com.google.api.tools.framework.model.ProtoElement;
import com.google.api.tools.framework.model.ProtoFile;
import com.google.api.tools.framework.model.TypeRef;
import com.google.api.tools.framework.model.TypeRef.Cardinality;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.protobuf.DescriptorProtos.FieldDescriptorProto.Type;
import java.util.List;
import javax.annotation.Nullable;
/**
* A GapicContext specialized for NodeJS.
*/
public class NodeJSGapicContext extends GapicContext implements NodeJSContext {
public NodeJSGapicContext(Model model, ApiConfig apiConfig) {
super(model, apiConfig);
}
// Snippet Helpers
// ===============
/**
* Return ApiMethodView for sample gen.
*
* NOTE: Temporary solution to use MVVM with just sample gen. This class
* will eventually go away when code gen also converts to MVVM.
*/
public ApiMethodView getApiMethodView(Interface service, Method method) {
SurfaceTransformerContext context = getSurfaceTransformerContextFromService(service);
MethodTransformerContext methodContext = context.asMethodContext(method);
ApiMethodTransformer apiMethodTransformer = new ApiMethodTransformer();
return apiMethodTransformer.generateDynamicLangApiMethod(methodContext);
}
/**
* Return GrpcStubViews for mixins.
*
* NOTE: Temporary solution to use MVVM with just sample gen. This class
* will eventually go away when code gen also converts to MVVM.
*/
public List<GrpcStubView> getStubs(Interface service) {
GrpcStubTransformer grpcStubTransformer = new GrpcStubTransformer();
SurfaceTransformerContext context = getSurfaceTransformerContextFromService(service);
return grpcStubTransformer.generateGrpcStubs(context);
}
private String getGrpcClientVariableNameFor(Interface service, Method method) {
NodeJSSurfaceNamer namer = new NodeJSSurfaceNamer(getApiConfig().getPackageName());
String jsMethodName = namer.getApiMethodName(method);
for (GrpcStubView stub : getStubs(service)) {
for (String methodName : stub.methodNames()) {
if (jsMethodName.equals(methodName)) {
return stub.grpcClientVariableName();
}
}
}
throw new IllegalArgumentException(
"Method " + method.getFullName() + " cannot be found in the stubs");
}
private SurfaceTransformerContext getSurfaceTransformerContextFromService(Interface service) {
ModelTypeTable modelTypeTable =
new ModelTypeTable(
new NodeJSTypeTable(getApiConfig().getPackageName()),
new NodeJSModelTypeNameConverter(getApiConfig().getPackageName()));
return SurfaceTransformerContext.create(
service,
getApiConfig(),
modelTypeTable,
new NodeJSSurfaceNamer(getApiConfig().getPackageName()),
new NodeJSFeatureConfig());
}
public String filePath(ProtoFile file) {
return file.getSimpleName().replace(".proto", "_pb2.js");
}
/**
* Return comments lines for a given proto element, extracted directly from the proto doc
*/
public List<String> defaultComments(ProtoElement element) {
if (!element.hasAttribute(ElementDocumentationAttribute.KEY)) {
return ImmutableList.<String>of();
}
return convertToCommentedBlock(
JSDocCommentFixer.jsdocify(DocumentationUtil.getScopedDescription(element)));
}
/**
* The package name of the grpc module for the API.
*/
public String grpcClientName(Interface service) {
return "grpc-" + service.getFile().getFullName().replace('.', '-');
}
public boolean isGcloud() {
return NodeJSUtils.isGcloud(getApiConfig());
}
/**
* The namespace (full package name) for the service.
*/
public String getNamespace(Interface service) {
String fullName = service.getFullName();
int slash = fullName.lastIndexOf('.');
return fullName.substring(0, slash);
}
/**
* The name for the module for this vkit module. This assumes that the service's
* full name will be in the format of 'google.some.apiname.version.ServiceName',
* and extracts the 'apiname' and 'version' part and combine them to lower-camelcased
* style (like pubsubV1).
*/
public String getModuleName(Interface service) {
List<String> names = Splitter.on(".").splitToList(service.getFullName());
return names.get(names.size() - 3) + lowerUnderscoreToUpperCamel(names.get(names.size() - 2));
}
/**
* Returns the major version part in the API namespace. This assumes that the service's
* full name will be in the format of 'google.some.apiname.version.ServiceName', and
* extracts the 'version' part.
*/
public String getApiVersion(Interface service) {
List<String> names = Splitter.on(".").splitToList(service.getFullName());
return names.get(names.size() - 2);
}
/**
* Returns the filename for documenting messages.
*/
public String getDocFilename(ProtoFile file) {
String filePath = file.getSimpleName().replace(".proto", ".js");
if (isExternalFile(file)) {
filePath = filePath.replaceAll("/", "_");
} else {
int lastSlash = filePath.lastIndexOf('/');
if (lastSlash >= 0) {
filePath = filePath.substring(lastSlash + 1);
}
}
return "doc_" + filePath;
}
/**
* Returns true if the proto file is external to the current package.
* Currently, it only checks the file path and thinks it is external if
* the file is well-known common protos.
*/
public boolean isExternalFile(ProtoFile file) {
String filePath = file.getSimpleName();
for (String commonPath : COMMON_PROTO_PATHS) {
if (filePath.startsWith(commonPath)) {
return true;
}
}
return false;
}
public String getFileURL(ProtoFile file) {
String filePath = file.getSimpleName();
if (filePath.startsWith("google/protobuf")) {
return "https://github.com/google/protobuf/blob/master/src/" + filePath;
} else {
return "https://github.com/googleapis/googleapis/blob/master/" + filePath;
}
}
/**
* Returns type information for a field in JSDoc style.
*/
private String fieldTypeCardinalityComment(Field field) {
TypeRef type = field.getType();
String cardinalityComment = "";
if (type.getCardinality() == Cardinality.REPEATED) {
if (type.isMap()) {
String keyType = jsTypeName(type.getMapKeyField().getType());
String valueType = jsTypeName(type.getMapValueField().getType());
return String.format("Object.<%s, %s>", keyType, valueType);
} else {
cardinalityComment = "[]";
}
}
String typeComment = jsTypeName(field.getType());
return String.format("%s%s", typeComment, cardinalityComment);
}
/**
* Returns a JSDoc comment string for the field as a parameter to a function.
*/
private String fieldParamComment(Field field, String paramComment, boolean isOptional) {
String commentType = fieldTypeCardinalityComment(field);
String fieldName = wrapIfKeywordOrBuiltIn(lowerUnderscoreToLowerCamel(field.getSimpleName()));
if (isOptional) {
fieldName = "options." + fieldName;
commentType = commentType + "=";
}
return fieldComment(
String.format("@param {%s} %s", commentType, fieldName), paramComment, field);
}
/**
* Returns a JSDoc comment string for the field as an attribute of a message.
*/
public List<String> fieldPropertyComment(Field field) {
String commentType = fieldTypeCardinalityComment(field);
String fieldName = wrapIfKeywordOrBuiltIn(field.getSimpleName());
return convertToCommentedBlock(
fieldComment(String.format("@property {%s} %s", commentType, fieldName), null, field));
}
private String fieldComment(String comment, String paramComment, Field field) {
if (paramComment == null) {
paramComment = DocumentationUtil.getScopedDescription(field);
}
if (!Strings.isNullOrEmpty(paramComment)) {
paramComment = JSDocCommentFixer.jsdocify(paramComment);
comment += "\n " + paramComment.replaceAll("(\\r?\\n)", "\n ");
}
if (field.getType().isMessage() && !field.getType().isMap()) {
if (!Strings.isNullOrEmpty(paramComment)) {
comment += "\n";
}
comment +=
"\n This object should have the same structure as "
+ linkForMessage(field.getType().getMessageType());
} else if (field.getType().isEnum()) {
if (!Strings.isNullOrEmpty(paramComment)) {
comment += "\n";
}
comment +=
"\n The number should be among the values of "
+ linkForMessage(field.getType().getEnumType());
}
return comment + "\n";
}
/**
* Return JSDoc callback comment and return type comment for the given method.
*/
@Nullable
private String returnTypeComment(Method method, MethodConfig config) {
if (config.isPageStreaming()) {
String callbackMessage =
"@param {function(?Error, ?"
+ jsTypeName(method.getOutputType())
+ ", ?"
+ jsTypeName(config.getPageStreaming().getResponseTokenField().getType())
+ ")=} callback\n"
+ " When specified, the results are not streamed but this callback\n"
+ " will be called with the response object representing "
+ linkForMessage(method.getOutputMessage())
+ ".\n"
+ " The third item will be set if the response contains the token for the further results\n"
+ " and can be reused to `pageToken` field in the options in the next request.";
TypeRef resourceType = config.getPageStreaming().getResourcesField().getType();
String resourceTypeName;
if (resourceType.isMessage()) {
resourceTypeName =
"an object representing\n " + linkForMessage(resourceType.getMessageType());
} else if (resourceType.isEnum()) {
resourceTypeName = "a number of\n " + linkForMessage(resourceType.getEnumType());
} else {
resourceTypeName = "a " + jsTypeName(resourceType);
}
return callbackMessage
+ "\n@returns {Stream|gax.EventEmitter}\n"
+ " An object stream which emits "
+ resourceTypeName
+ " on 'data' event.\n"
+ " When the callback is specified or streaming is suppressed through options,\n"
+ " it will return an event emitter to handle the call status and the callback\n"
+ " will be called with the response object.";
}
MessageType returnMessageType = method.getOutputMessage();
boolean isEmpty = returnMessageType.getFullName().equals("google.protobuf.Empty");
String classInfo = jsTypeName(method.getOutputType());
String callbackType =
isEmpty ? "function(?Error)" : String.format("function(?Error, ?%s)", classInfo);
String callbackMessage =
"@param {"
+ callbackType
+ "=} callback\n"
+ " The function which will be called with the result of the API call.";
if (!isEmpty) {
callbackMessage +=
"\n\n The second parameter to the callback is an object representing "
+ linkForMessage(returnMessageType);
}
String returnMessage =
"@returns {"
+ (config.isBundling() ? "gax.BundleEventEmitter" : "gax.EventEmitter")
+ "} - the event emitter to handle the call\n"
+ " status.";
if (config.isBundling()) {
returnMessage +=
" When isBundling: false is specified in the options, it still returns\n"
+ " a gax.BundleEventEmitter but the API is immediately invoked, so it behaves same\n"
+ " as a gax.EventEmitter does.";
}
return callbackMessage + "\n" + returnMessage;
}
/**
* Return the list of messages within element which should be documented in Node.JS.
*/
public ImmutableList<MessageType> filterDocumentingMessages(ProtoContainerElement element) {
ImmutableList.Builder<MessageType> builder = ImmutableList.builder();
for (MessageType msg : element.getMessages()) {
// Doesn't have to document map entries in Node.JS because Object is used.
if (!msg.isMapEntry()) {
builder.add(msg);
}
}
return builder.build();
}
/**
* Return comments lines for a given method, consisting of proto doc and parameter type
* documentation.
*/
public List<String> methodComments(Interface service, Method msg) {
MethodConfig config = getApiConfig().getInterfaceConfig(service).getMethodConfig(msg);
// Generate parameter types
StringBuilder paramTypesBuilder = new StringBuilder();
for (Field field : config.getRequiredFields()) {
paramTypesBuilder.append(fieldParamComment(field, null, false));
}
paramTypesBuilder.append(
"@param {Object=} options\n"
+ " Optional parameters. You can override the default settings for this call, e.g, timeout,\n"
+ " retries, paginations, etc. See [gax.CallOptions]{@link "
+ "https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details.");
Iterable<Field> optionalParams = removePageTokenFromFields(config.getOptionalFields(), config);
if (optionalParams.iterator().hasNext()) {
paramTypesBuilder.append(
"\n\n In addition, options may contain the following optional parameters.\n");
for (Field field : optionalParams) {
if (config.isPageStreaming()
&& field.equals((config.getPageStreaming().getPageSizeField()))) {
paramTypesBuilder.append(
fieldParamComment(
field,
"The maximum number of resources contained in the underlying API\n"
+ "response. If page streaming is performed per-resource, this\n"
+ "parameter does not affect the return value. If page streaming is\n"
+ "performed per-page, this determines the maximum number of\n"
+ "resources in a page.",
true));
} else {
paramTypesBuilder.append(fieldParamComment(field, null, true));
}
}
}
String paramTypes = paramTypesBuilder.toString();
String returnType = returnTypeComment(msg, config);
// Generate comment contents
StringBuilder contentBuilder = new StringBuilder();
if (msg.hasAttribute(ElementDocumentationAttribute.KEY)) {
contentBuilder.append(
JSDocCommentFixer.jsdocify(DocumentationUtil.getScopedDescription(msg)));
if (!Strings.isNullOrEmpty(paramTypes)) {
contentBuilder.append("\n\n");
}
}
contentBuilder.append(paramTypes);
if (returnType != null) {
contentBuilder.append("\n" + returnType);
}
return convertToCommentedBlock(contentBuilder.toString());
}
/**
* Return a non-conflicting safe name if name is a JS reserved word.
*/
public String wrapIfKeywordOrBuiltIn(String name) {
if (KEYWORD_BUILT_IN_SET.contains(name)) {
return name + "_";
}
return name;
}
/**
* Returns the name of JS type for the given typeRef.
*/
public String jsTypeName(TypeRef typeRef) {
switch (typeRef.getKind()) {
case TYPE_MESSAGE:
return "Object";
case TYPE_ENUM:
return "number";
default:
{
String name = PRIMITIVE_TYPE_NAMES.get(typeRef.getKind());
if (!Strings.isNullOrEmpty(name)) {
return name;
}
throw new IllegalArgumentException("unknown type kind: " + typeRef.getKind());
}
}
}
/**
* Returns the name of the JS type name for arguejs parameter definitions.
*/
public String getFieldType(Field field) {
TypeRef typeRef = field.getType();
if (typeRef.isMap()) {
return "Object";
}
if (typeRef.getCardinality() == Cardinality.REPEATED) {
return "Array";
}
switch (typeRef.getKind()) {
case TYPE_MESSAGE:
return "Object";
case TYPE_BOOL:
return "Boolean";
case TYPE_STRING:
case TYPE_BYTES:
return "String";
default:
// Numeric types and enums.
return "Number";
}
}
/**
* Returns the JSDoc format of link to the element.
*/
public String linkForMessage(ProtoElement element) {
if (isExternalFile(element.getFile())) {
String fullName = element.getFullName();
return String.format("[%s]{@link external:\"%s\"}", fullName, fullName);
} else {
String simpleName = element.getSimpleName();
return String.format("[%s]{@link %s}", simpleName, simpleName);
}
}
/**
* Returns the JavaScript representation of the function to return the byte length.
*/
public String getByteLengthFunction(Interface service, Method method, TypeRef typeRef) {
switch (typeRef.getKind()) {
case TYPE_MESSAGE:
return "gax.createByteLengthFunction(grpcClients."
+ getGrpcClientVariableNameFor(service, method)
+ "."
+ typeRef.getMessageType().getFullName()
+ ")";
case TYPE_STRING:
case TYPE_BYTES:
return "function(s) { return s.length; }";
default:
// There is no easy way to say the actual length of the numeric fields.
// For now throwing an exception.
throw new IllegalArgumentException(
"Can't determine the byte length function for " + typeRef.getKind());
}
}
/**
* Convert the content string into a commented block that can be directly printed out in the
* generated JS files.
*/
private List<String> convertToCommentedBlock(String content) {
if (Strings.isNullOrEmpty(content)) {
return ImmutableList.<String>of();
}
ImmutableList.Builder<String> builder = ImmutableList.builder();
for (String comment : Splitter.on("\n").splitToList(content)) {
builder.add(comment);
}
return builder.build();
}
// Constants
// =========
/**
* A map from primitive types to its default value.
*/
private static final ImmutableMap<Type, String> DEFAULT_VALUE_MAP =
ImmutableMap.<Type, String>builder()
.put(Type.TYPE_BOOL, "false")
.put(Type.TYPE_DOUBLE, "0.0")
.put(Type.TYPE_FLOAT, "0.0")
.put(Type.TYPE_INT64, "0")
.put(Type.TYPE_UINT64, "0")
.put(Type.TYPE_SINT64, "0")
.put(Type.TYPE_FIXED64, "0")
.put(Type.TYPE_SFIXED64, "0")
.put(Type.TYPE_INT32, "0")
.put(Type.TYPE_UINT32, "0")
.put(Type.TYPE_SINT32, "0")
.put(Type.TYPE_FIXED32, "0")
.put(Type.TYPE_SFIXED32, "0")
.put(Type.TYPE_STRING, "\'\'")
.put(Type.TYPE_BYTES, "\'\'")
.build();
private static final ImmutableMap<Type, String> PRIMITIVE_TYPE_NAMES =
ImmutableMap.<Type, String>builder()
.put(Type.TYPE_BOOL, "boolean")
.put(Type.TYPE_DOUBLE, "number")
.put(Type.TYPE_FLOAT, "number")
.put(Type.TYPE_INT64, "number")
.put(Type.TYPE_UINT64, "number")
.put(Type.TYPE_SINT64, "number")
.put(Type.TYPE_FIXED64, "number")
.put(Type.TYPE_SFIXED64, "number")
.put(Type.TYPE_INT32, "number")
.put(Type.TYPE_UINT32, "number")
.put(Type.TYPE_SINT32, "number")
.put(Type.TYPE_FIXED32, "number")
.put(Type.TYPE_SFIXED32, "number")
.put(Type.TYPE_STRING, "string")
.put(Type.TYPE_BYTES, "string")
.build();
/**
* A set of ECMAScript 2016 reserved words. See
* https://tc39.github.io/ecma262/2016/#sec-reserved-words
*/
private static final ImmutableSet<String> KEYWORD_BUILT_IN_SET =
ImmutableSet.<String>builder()
.add(
"break",
"do",
"in",
"typeof",
"case",
"else",
"instanceof",
"var",
"catch",
"export",
"new",
"void",
"class",
"extends",
"return",
"while",
"const",
"finally",
"super",
"with",
"continue",
"for",
"switch",
"yield",
"debugger",
"function",
"this",
"default",
"if",
"throw",
"delete",
"import",
"try",
"let",
"static",
"enum",
"await",
"implements",
"package",
"protected",
"interface",
"private",
"public",
"null",
"true",
"false",
// common parameters passed to methods.
"options",
"callback",
// parameters used in CallOptions.
"timeout",
"retry",
"flattenPages",
"pageToken",
"isBundling")
.build();
private static final ImmutableSet<String> COMMON_PROTO_PATHS =
ImmutableSet.<String>builder()
.add(
"google/api",
"google/bytestream",
"google/logging/type",
"google/longrunning",
"google/protobuf",
"google/rpc",
"google/type")
.build();
}
| 1 | 18,074 | Don't we still want to check it it's a keyword? | googleapis-gapic-generator | java |
@@ -14,7 +14,7 @@ class AttachmentDecorator < Draper::Decorator
class: "image-with-border"
)
else
- "<br><table><tr><td><strong><u>#{link_text}</u></strong></td></tr></table>"
+ "#{link_text}"
end
end
| 1 | class AttachmentDecorator < Draper::Decorator
include Rails.application.routes.url_helpers
include ActionView::Helpers::AssetTagHelper
include ActionView::Helpers::UrlHelper
default_url_options[:host] = ::Rails.application.routes.default_url_options[:host]
delegate_all
def file_preview
if file.content_type =~ /\Aimage/
image_tag(
object.url,
alt: "",
class: "image-with-border"
)
else
"<br><table><tr><td><strong><u>#{link_text}</u></strong></td></tr></table>"
end
end
private
def link_text
I18n.t(
"mailer.attachment_mailer.new_attachment_notification.attachment_cta",
attachment_name: file.original_filename
)
end
end
| 1 | 17,003 | this can just me `link_text` :tomato: | 18F-C2 | rb |
@@ -2045,3 +2045,15 @@ class SeriesTest(ReusedSQLTestCase, SQLTestUtils):
kser = ks.from_pandas(pser)
expected = pser
self.assert_eq(kser.explode(), expected)
+
+ def test_argsort(self):
+ pser = pd.Series(np.random.rand(5), index=np.random.rand(5), name="Koalas")
+ kser = ks.from_pandas(pser)
+ self.assert_eq(pser.argsort().sort_index(), kser.argsort().sort_index())
+
+ # MultiIndex
+ pser.index = pd.MultiIndex.from_tuples(
+ [("a", "v"), ("b", "w"), ("c", "x"), ("d", "y"), ("e", "z")]
+ )
+ kser = ks.from_pandas(pser)
+ self.assert_eq(pser.argsort().sort_index(), kser.argsort().sort_index()) | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import unittest
from collections import defaultdict
from distutils.version import LooseVersion
import inspect
from io import BytesIO
from itertools import product
from datetime import datetime, timedelta
import matplotlib
matplotlib.use("agg")
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import pyspark
from pyspark.ml.linalg import SparseVector
from databricks import koalas as ks
from databricks.koalas import Series
from databricks.koalas.testing.utils import (
ReusedSQLTestCase,
SQLTestUtils,
SPARK_CONF_ARROW_ENABLED,
)
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.series import MissingPandasLikeSeries
class SeriesTest(ReusedSQLTestCase, SQLTestUtils):
@property
def pser(self):
return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
@property
def kser(self):
return ks.from_pandas(self.pser)
def test_series(self):
kser = self.kser
self.assertTrue(isinstance(kser, Series))
self.assert_eq(kser + 1, self.pser + 1)
def test_series_tuple_name(self):
pser = self.pser
pser.name = ("x", "a")
kser = ks.from_pandas(pser)
self.assert_eq(kser, pser)
self.assert_eq(kser.name, pser.name)
pser.name = ("y", "z")
kser.name = ("y", "z")
self.assert_eq(kser, pser)
self.assert_eq(kser.name, pser.name)
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
s = ks.range(10)["id"]
s.__repr__()
s.rename("a", inplace=True)
self.assertEqual(s.__repr__(), s.rename("a").__repr__())
def test_empty_series(self):
a = pd.Series([], dtype="i1")
b = pd.Series([], dtype="str")
self.assert_eq(ks.from_pandas(a), a)
self.assertRaises(ValueError, lambda: ks.from_pandas(b))
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(ks.from_pandas(a), a)
self.assertRaises(ValueError, lambda: ks.from_pandas(b))
def test_all_null_series(self):
a = pd.Series([None, None, None], dtype="float64")
b = pd.Series([None, None, None], dtype="str")
self.assert_eq(ks.from_pandas(a).dtype, a.dtype)
self.assertTrue(ks.from_pandas(a).to_pandas().isnull().all())
self.assertRaises(ValueError, lambda: ks.from_pandas(b))
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(ks.from_pandas(a).dtype, a.dtype)
self.assertTrue(ks.from_pandas(a).to_pandas().isnull().all())
self.assertRaises(ValueError, lambda: ks.from_pandas(b))
def test_head(self):
kser = self.kser
pser = self.pser
self.assert_eq(kser.head(3), pser.head(3))
self.assert_eq(kser.head(0), pser.head(0))
self.assert_eq(kser.head(-3), pser.head(-3))
self.assert_eq(kser.head(-10), pser.head(-10))
def test_rename(self):
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
kser = ks.from_pandas(pser)
pser.name = "renamed"
kser.name = "renamed"
self.assertEqual(kser.name, "renamed")
self.assert_eq(kser, pser)
# pser.name = None
# kser.name = None
# self.assertEqual(kser.name, None)
# self.assert_eq(kser, pser)
pidx = pser.index
kidx = kser.index
pidx.name = "renamed"
kidx.name = "renamed"
self.assertEqual(kidx.name, "renamed")
self.assert_eq(kidx, pidx)
def test_rename_method(self):
# Series name
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
kser = ks.from_pandas(pser)
self.assert_eq(kser.rename("y"), pser.rename("y"))
self.assertEqual(kser.name, "x") # no mutation
self.assert_eq(kser.rename(), pser.rename())
self.assert_eq((kser.rename("y") + 1).head(), (pser.rename("y") + 1).head())
kser.rename("z", inplace=True)
pser.rename("z", inplace=True)
self.assertEqual(kser.name, "z")
self.assert_eq(kser, pser)
# Series index
# pser = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x')
# kser = ks.from_pandas(s)
# TODO: index
# res = kser.rename(lambda x: x ** 2)
# self.assert_eq(res, pser.rename(lambda x: x ** 2))
# res = kser.rename(pser)
# self.assert_eq(res, pser.rename(pser))
# res = kser.rename(kser)
# self.assert_eq(res, pser.rename(pser))
# res = kser.rename(lambda x: x**2, inplace=True)
# self.assertis(res, kser)
# s.rename(lambda x: x**2, inplace=True)
# self.assert_eq(kser, pser)
def test_or(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf["left"] | pdf["right"], kdf["left"] | kdf["right"])
def test_and(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
pdf["left"] & pdf["right"], kdf["left"] & kdf["right"],
)
def test_to_numpy(self):
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
kser = ks.from_pandas(pser)
np.testing.assert_equal(kser.to_numpy(), pser.values)
def test_isin(self):
pser = pd.Series(["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal")
kser = ks.from_pandas(pser)
self.assert_eq(kser.isin(["cow", "lama"]), pser.isin(["cow", "lama"]))
self.assert_eq(kser.isin({"cow"}), pser.isin({"cow"}))
msg = "only list-like objects are allowed to be passed to isin()"
with self.assertRaisesRegex(TypeError, msg):
kser.isin(1)
def test_drop_duplicates(self):
pdf = pd.DataFrame({"animal": ["lama", "cow", "lama", "beetle", "lama", "hippo"]})
kdf = ks.from_pandas(pdf)
pser = pdf.animal
kser = kdf.animal
self.assert_eq(kser.drop_duplicates().sort_index(), pser.drop_duplicates().sort_index())
self.assert_eq(
kser.drop_duplicates(keep="last").sort_index(),
pser.drop_duplicates(keep="last").sort_index(),
)
# inplace
kser.drop_duplicates(keep=False, inplace=True)
pser.drop_duplicates(keep=False, inplace=True)
self.assert_eq(kser.sort_index(), pser.sort_index())
self.assert_eq(kdf, pdf)
def test_reindex(self):
index = ["A", "B", "C", "D", "E"]
pser = pd.Series([1.0, 2.0, 3.0, 4.0, None], index=index, name="x")
kser = ks.from_pandas(pser)
self.assert_eq(pser, kser)
self.assert_eq(
pser.reindex(["A", "B"]).sort_index(), kser.reindex(["A", "B"]).sort_index(),
)
self.assert_eq(
pser.reindex(["A", "B", "2", "3"]).sort_index(),
kser.reindex(["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pser.reindex(["A", "E", "2"], fill_value=0).sort_index(),
kser.reindex(["A", "E", "2"], fill_value=0).sort_index(),
)
self.assertRaises(TypeError, lambda: kser.reindex(index=123))
def test_fillna(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.fillna(0), pser.fillna(0))
self.assert_eq(kser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0))
kser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
# test considering series does not have NA/NaN values
kser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(kser, pser)
kser = kdf.x.rename("y")
pser = pdf.x.rename("y")
kser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(kser.head(), pser.head())
pser = pd.Series([1, 2, 3, 4, 5, 6], name="x")
kser = ks.from_pandas(pser)
pser.loc[3] = np.nan
kser.loc[3] = np.nan
self.assert_eq(kser.fillna(0), pser.fillna(0))
self.assert_eq(kser.fillna(method="ffill"), pser.fillna(method="ffill"))
self.assert_eq(kser.fillna(method="bfill"), pser.fillna(method="bfill"))
def test_dropna(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.dropna(), pser.dropna())
pser.dropna(inplace=True)
kser.dropna(inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
def test_nunique(self):
pser = pd.Series([1, 2, 1, np.nan])
kser = ks.from_pandas(pser)
# Assert NaNs are dropped by default
nunique_result = kser.nunique()
self.assertEqual(nunique_result, 2)
self.assert_eq(nunique_result, pser.nunique())
# Assert including NaN values
nunique_result = kser.nunique(dropna=False)
self.assertEqual(nunique_result, 3)
self.assert_eq(nunique_result, pser.nunique(dropna=False))
# Assert approximate counts
self.assertEqual(ks.Series(range(100)).nunique(approx=True), 103)
self.assertEqual(ks.Series(range(100)).nunique(approx=True, rsd=0.01), 100)
def _test_value_counts(self):
# this is also containing test for Index & MultiIndex
pser = pd.Series(
[1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3],
index=[1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3],
name="x",
)
kser = ks.from_pandas(pser)
exp = pser.value_counts()
res = kser.value_counts()
self.assertEqual(res.name, exp.name)
self.assert_eq(res, exp)
self.assert_eq(kser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(kser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
kser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
with self.assertRaisesRegex(
NotImplementedError, "value_counts currently does not support bins"
):
kser.value_counts(bins=3)
pser.name = "index"
kser.name = "index"
self.assert_eq(kser.value_counts(), pser.value_counts())
# Series from DataFrame
pdf = pd.DataFrame({"a": [2, 2, 3], "b": [None, 1, None]})
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.a.value_counts(normalize=True), pdf.a.value_counts(normalize=True))
self.assert_eq(kdf.a.value_counts(ascending=True), pdf.a.value_counts(ascending=True))
self.assert_eq(
kdf.a.value_counts(normalize=True, dropna=False),
pdf.a.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kdf.a.value_counts(ascending=True, dropna=False),
pdf.a.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
# Series with NaN index
pser = pd.Series([3, 2, 3, 1, 2, 3], index=[2.0, None, 5.0, 5.0, None, 5.0])
kser = ks.from_pandas(pser)
self.assert_eq(kser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(kser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
kser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
# Series with MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
kser = ks.from_pandas(pser)
self.assert_eq(kser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(kser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
kser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
kser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
# Series with MultiIndex some of index has NaN
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", None), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
kser = ks.from_pandas(pser)
self.assert_eq(kser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(kser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
kser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
kser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
# Series with MultiIndex some of index is NaN.
# This test only available for pandas >= 0.24.
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), None, ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
kser = ks.from_pandas(pser)
self.assert_eq(kser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(kser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
kser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
kser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
def test_value_counts(self):
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self._test_value_counts()
self.assertRaises(
RuntimeError,
lambda: ks.MultiIndex.from_tuples([("x", "a"), ("x", "b")]).value_counts(),
)
else:
self._test_value_counts()
def test_nsmallest(self):
sample_lst = [1, 2, 3, 4, np.nan, 6]
pser = pd.Series(sample_lst, name="x")
kser = ks.Series(sample_lst, name="x")
self.assert_eq(kser.nsmallest(n=3), pser.nsmallest(n=3))
self.assert_eq(kser.nsmallest(), pser.nsmallest())
self.assert_eq((kser + 1).nsmallest(), (pser + 1).nsmallest())
def test_nlargest(self):
sample_lst = [1, 2, 3, 4, np.nan, 6]
pser = pd.Series(sample_lst, name="x")
kser = ks.Series(sample_lst, name="x")
self.assert_eq(kser.nlargest(n=3), pser.nlargest(n=3))
self.assert_eq(kser.nlargest(), pser.nlargest())
self.assert_eq((kser + 1).nlargest(), (pser + 1).nlargest())
def test_isnull(self):
pser = pd.Series([1, 2, 3, 4, np.nan, 6], name="x")
kser = ks.from_pandas(pser)
self.assert_eq(kser.notnull(), pser.notnull())
self.assert_eq(kser.isnull(), pser.isnull())
pser = self.pser
kser = self.kser
self.assert_eq(kser.notnull(), pser.notnull())
self.assert_eq(kser.isnull(), pser.isnull())
def test_all(self):
for pser in [
pd.Series([True, True], name="x"),
pd.Series([True, False], name="x"),
pd.Series([0, 1], name="x"),
pd.Series([1, 2, 3], name="x"),
pd.Series([True, True, None], name="x"),
pd.Series([True, False, None], name="x"),
pd.Series([], name="x"),
pd.Series([np.nan], name="x"),
]:
kser = ks.from_pandas(pser)
self.assert_eq(kser.all(), pser.all())
pser = pd.Series([1, 2, 3, 4], name="x")
kser = ks.from_pandas(pser)
self.assert_eq((kser % 2 == 0).all(), (pser % 2 == 0).all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
kser.all(axis=1)
def test_any(self):
for pser in [
pd.Series([False, False], name="x"),
pd.Series([True, False], name="x"),
pd.Series([0, 1], name="x"),
pd.Series([1, 2, 3], name="x"),
pd.Series([True, True, None], name="x"),
pd.Series([True, False, None], name="x"),
pd.Series([], name="x"),
pd.Series([np.nan], name="x"),
]:
kser = ks.from_pandas(pser)
self.assert_eq(kser.any(), pser.any())
pser = pd.Series([1, 2, 3, 4], name="x")
kser = ks.from_pandas(pser)
self.assert_eq((kser % 2 == 0).any(), (pser % 2 == 0).any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
kser.any(axis=1)
def test_reset_index(self):
pdf = pd.DataFrame({"foo": [1, 2, 3, 4]}, index=pd.Index(["a", "b", "c", "d"], name="idx"))
kdf = ks.from_pandas(pdf)
pser = pdf.foo
kser = kdf.foo
self.assert_eq(kser.reset_index(), pser.reset_index())
self.assert_eq(kser.reset_index(name="values"), pser.reset_index(name="values"))
self.assert_eq(kser.reset_index(drop=True), pser.reset_index(drop=True))
# inplace
kser.reset_index(drop=True, inplace=True)
pser.reset_index(drop=True, inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
def test_reset_index_with_default_index_types(self):
pser = pd.Series([1, 2, 3], name="0", index=np.random.rand(3))
kser = ks.from_pandas(pser)
with ks.option_context("compute.default_index_type", "sequence"):
self.assert_eq(kser.reset_index(), pser.reset_index())
with ks.option_context("compute.default_index_type", "distributed-sequence"):
# the order might be changed.
self.assert_eq(kser.reset_index().sort_index(), pser.reset_index())
with ks.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(
kser.reset_index().to_pandas().reset_index(drop=True), pser.reset_index()
)
def test_sort_values(self):
pdf = pd.DataFrame({"x": [1, 2, 3, 4, 5, None, 7]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.sort_values(), pser.sort_values())
self.assert_eq(kser.sort_values(ascending=False), pser.sort_values(ascending=False))
self.assert_eq(kser.sort_values(na_position="first"), pser.sort_values(na_position="first"))
self.assertRaises(ValueError, lambda: kser.sort_values(na_position="invalid"))
# inplace
# pandas raises an exception when the Series is derived from DataFrame
kser.sort_values(inplace=True)
self.assert_eq(kser, pser.sort_values())
self.assert_eq(kdf, pdf)
pser = pdf.x.copy()
kser = kdf.x.copy()
kser.sort_values(inplace=True)
pser.sort_values(inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
def test_sort_index(self):
pdf = pd.DataFrame({"x": [2, 1, np.nan]}, index=["b", "a", np.nan])
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: kser.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: kser.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: kser.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(kser.sort_index(), pser.sort_index())
# Assert sorting descending
self.assert_eq(kser.sort_index(ascending=False), pser.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(kser.sort_index(na_position="first"), pser.sort_index(na_position="first"))
# Assert sorting inplace
# pandas sorts pdf.x by the index and update the column only
# when the Series is derived from DataFrame.
kser.sort_index(inplace=True)
self.assert_eq(kser, pser.sort_index())
self.assert_eq(kdf, pdf)
pser = pdf.x.copy()
kser = kdf.x.copy()
kser.sort_index(inplace=True)
pser.sort_index(inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
# Assert multi-indices
pser = pd.Series(range(4), index=[["b", "b", "a", "a"], [1, 0, 1, 0]], name="0")
kser = ks.from_pandas(pser)
self.assert_eq(kser.sort_index(), pser.sort_index())
self.assert_eq(kser.sort_index(level=[1, 0]), pser.sort_index(level=[1, 0]))
self.assert_eq(kser.reset_index().sort_index(), pser.reset_index().sort_index())
def test_to_datetime(self):
pser = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 100)
kser = ks.from_pandas(pser)
self.assert_eq(
pd.to_datetime(pser, infer_datetime_format=True),
ks.to_datetime(kser, infer_datetime_format=True),
)
def test_missing(self):
kser = self.kser
missing_functions = inspect.getmembers(MissingPandasLikeSeries, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kser, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Series.*{}.*is deprecated".format(name)
):
getattr(kser, name)()
missing_properties = inspect.getmembers(
MissingPandasLikeSeries, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kser, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Series.*{}.*is deprecated".format(name)
):
getattr(kser, name)
def test_clip(self):
pser = pd.Series([0, 2, 4], index=np.random.rand(3))
kser = ks.from_pandas(pser)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(ValueError, msg=msg):
kser.clip(lower=[1])
with self.assertRaises(ValueError, msg=msg):
kser.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(kser.clip(), pser.clip())
# Assert lower only
self.assert_eq(kser.clip(1), pser.clip(1))
# Assert upper only
self.assert_eq(kser.clip(upper=3), pser.clip(upper=3))
# Assert lower and upper
self.assert_eq(kser.clip(1, 3), pser.clip(1, 3))
# Assert behavior on string values
str_kser = ks.Series(["a", "b", "c"])
self.assert_eq(str_kser.clip(1, 3), str_kser)
def test_is_unique(self):
# We can't use pandas' is_unique for comparison. pandas 0.23 ignores None
pser = pd.Series([1, 2, 2, None, None])
kser = ks.from_pandas(pser)
self.assertEqual(False, kser.is_unique)
self.assertEqual(False, (kser + 1).is_unique)
pser = pd.Series([1, None, None])
kser = ks.from_pandas(pser)
self.assertEqual(False, kser.is_unique)
self.assertEqual(False, (kser + 1).is_unique)
pser = pd.Series([1])
kser = ks.from_pandas(pser)
self.assertEqual(pser.is_unique, kser.is_unique)
self.assertEqual((pser + 1).is_unique, (kser + 1).is_unique)
pser = pd.Series([1, 1, 1])
kser = ks.from_pandas(pser)
self.assertEqual(pser.is_unique, kser.is_unique)
self.assertEqual((pser + 1).is_unique, (kser + 1).is_unique)
def test_to_list(self):
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assertEqual(self.kser.to_list(), self.pser.to_list())
def test_append(self):
pser1 = pd.Series([1, 2, 3], name="0")
pser2 = pd.Series([4, 5, 6], name="0")
pser3 = pd.Series([4, 5, 6], index=[3, 4, 5], name="0")
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
kser3 = ks.from_pandas(pser3)
self.assert_eq(kser1.append(kser2), pser1.append(pser2))
self.assert_eq(kser1.append(kser3), pser1.append(pser3))
self.assert_eq(
kser1.append(kser2, ignore_index=True), pser1.append(pser2, ignore_index=True)
)
kser1.append(kser3, verify_integrity=True)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
kser1.append(kser2, verify_integrity=True)
def test_map(self):
pser = pd.Series(["cat", "dog", None, "rabbit"])
kser = ks.from_pandas(pser)
# Currently Koalas doesn't return NaN as pandas does.
self.assert_eq(kser.map({}), pser.map({}).replace({pd.np.nan: None}))
d = defaultdict(lambda: "abc")
self.assertTrue("abc" in repr(kser.map(d)))
self.assert_eq(kser.map(d), pser.map(d))
def tomorrow(date) -> datetime:
return date + timedelta(days=1)
pser = pd.Series([datetime(2019, 10, 24)])
kser = ks.from_pandas(pser)
self.assert_eq(kser.map(tomorrow), pser.map(tomorrow))
def test_add_prefix(self):
pser = pd.Series([1, 2, 3, 4], name="0")
kser = ks.from_pandas(pser)
self.assert_eq(pser.add_prefix("item_"), kser.add_prefix("item_"))
pser = pd.Series(
[1, 2, 3],
name="0",
index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]),
)
kser = ks.from_pandas(pser)
self.assert_eq(pser.add_prefix("item_"), kser.add_prefix("item_"))
def test_add_suffix(self):
pser = pd.Series([1, 2, 3, 4], name="0")
kser = ks.from_pandas(pser)
self.assert_eq(pser.add_suffix("_item"), kser.add_suffix("_item"))
pser = pd.Series(
[1, 2, 3],
name="0",
index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]),
)
kser = ks.from_pandas(pser)
self.assert_eq(pser.add_suffix("_item"), kser.add_suffix("_item"))
def test_hist(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
kdf = ks.from_pandas(pdf)
def plot_to_base64(ax):
bytes_data = BytesIO()
ax.figure.savefig(bytes_data, format="png")
bytes_data.seek(0)
b64_data = base64.b64encode(bytes_data.read())
plt.close(ax.figure)
return b64_data
_, ax1 = plt.subplots(1, 1)
# Using plot.hist() because pandas changes ticks props when called hist()
ax1 = pdf["a"].plot.hist()
_, ax2 = plt.subplots(1, 1)
ax2 = kdf["a"].hist()
self.assert_eq(plot_to_base64(ax1), plot_to_base64(ax2))
def test_cummin(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
kser = ks.from_pandas(pser)
self.assert_eq(pser.cummin(), kser.cummin())
self.assert_eq(pser.cummin(skipna=False), kser.cummin(skipna=False))
self.assert_eq(pser.cummin().sum(), kser.cummin().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
kser = ks.from_pandas(pser)
self.assert_eq(pser.cummin(), kser.cummin())
self.assert_eq(pser.cummin(skipna=False), kser.cummin(skipna=False))
def test_cummax(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
kser = ks.from_pandas(pser)
self.assert_eq(pser.cummax(), kser.cummax())
self.assert_eq(pser.cummax(skipna=False), kser.cummax(skipna=False))
self.assert_eq(pser.cummax().sum(), kser.cummax().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
kser = ks.from_pandas(pser)
self.assert_eq(pser.cummax(), kser.cummax())
self.assert_eq(pser.cummax(skipna=False), kser.cummax(skipna=False))
def test_cumsum(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
kser = ks.from_pandas(pser)
self.assert_eq(pser.cumsum(), kser.cumsum())
self.assert_eq(pser.cumsum(skipna=False), kser.cumsum(skipna=False))
self.assert_eq(pser.cumsum().sum(), kser.cumsum().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
kser = ks.from_pandas(pser)
self.assert_eq(pser.cumsum(), kser.cumsum())
self.assert_eq(pser.cumsum(skipna=False), kser.cumsum(skipna=False))
def test_cumprod(self):
pser = pd.Series([1.0, None, 1.0, 4.0, 9.0])
kser = ks.from_pandas(pser)
self.assert_eq(pser.cumprod(), kser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), kser.cumprod(skipna=False))
self.assert_eq(pser.cumprod().sum(), kser.cumprod().sum())
# with integer type
pser = pd.Series([1, 10, 1, 4, 9])
kser = ks.from_pandas(pser)
self.assert_eq(pser.cumprod(), kser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), kser.cumprod(skipna=False))
self.assert_eq(pser.cumprod().sum(), kser.cumprod().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
kser = ks.from_pandas(pser)
self.assert_eq(pser.cumprod(), kser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), kser.cumprod(skipna=False))
with self.assertRaisesRegex(Exception, "values should be bigger than 0"):
ks.Series([0, 1]).cumprod().to_pandas()
def test_median(self):
with self.assertRaisesRegex(ValueError, "accuracy must be an integer; however"):
ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).median(accuracy="a")
def test_rank(self):
pser = pd.Series([1, 2, 3, 1], name="x")
kser = ks.from_pandas(pser)
self.assert_eq(pser.rank(), kser.rank().sort_index())
self.assert_eq(pser.rank(ascending=False), kser.rank(ascending=False).sort_index())
self.assert_eq(pser.rank(method="min"), kser.rank(method="min").sort_index())
self.assert_eq(pser.rank(method="max"), kser.rank(method="max").sort_index())
self.assert_eq(pser.rank(method="first"), kser.rank(method="first").sort_index())
self.assert_eq(pser.rank(method="dense"), kser.rank(method="dense").sort_index())
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
kser.rank(method="nothing")
def test_round(self):
pser = pd.Series([0.028208, 0.038683, 0.877076], name="x")
kser = ks.from_pandas(pser)
self.assert_eq(pser.round(2), kser.round(2))
msg = "decimals must be an integer"
with self.assertRaisesRegex(ValueError, msg):
kser.round(1.5)
def test_quantile(self):
with self.assertRaisesRegex(ValueError, "accuracy must be an integer; however"):
ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(accuracy="a")
with self.assertRaisesRegex(ValueError, "q must be a float of an array of floats;"):
ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q="a")
with self.assertRaisesRegex(ValueError, "q must be a float of an array of floats;"):
ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q=["a"])
def test_idxmax(self):
pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"])
kser = ks.Series(pser)
self.assertEqual(kser.idxmax(), pser.idxmax())
self.assertEqual(kser.idxmax(skipna=False), pser.idxmax(skipna=False))
index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second")
)
pser = pd.Series(data=[1, 2, 4, 5], index=index)
kser = ks.Series(pser)
self.assertEqual(kser.idxmax(), pser.idxmax())
self.assertEqual(kser.idxmax(skipna=False), pser.idxmax(skipna=False))
kser = ks.Series([])
with self.assertRaisesRegex(ValueError, "an empty sequence"):
kser.idxmax()
pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
kser = ks.Series(pser)
self.assertEqual(kser.idxmax(), pser.idxmax())
self.assertEqual(repr(kser.idxmax(skipna=False)), repr(pser.idxmax(skipna=False)))
def test_idxmin(self):
pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"])
kser = ks.Series(pser)
self.assertEqual(kser.idxmin(), pser.idxmin())
self.assertEqual(kser.idxmin(skipna=False), pser.idxmin(skipna=False))
index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second")
)
pser = pd.Series(data=[1, 2, 4, 5], index=index)
kser = ks.Series(pser)
self.assertEqual(kser.idxmin(), pser.idxmin())
self.assertEqual(kser.idxmin(skipna=False), pser.idxmin(skipna=False))
kser = ks.Series([])
with self.assertRaisesRegex(ValueError, "an empty sequence"):
kser.idxmin()
pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
kser = ks.Series(pser)
self.assertEqual(kser.idxmin(), pser.idxmin())
self.assertEqual(repr(kser.idxmin(skipna=False)), repr(pser.idxmin(skipna=False)))
def test_shift(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
if LooseVersion(pd.__version__) < LooseVersion("0.24.2"):
self.assert_eq(kser.shift(periods=2), pser.shift(periods=2))
else:
self.assert_eq(kser.shift(periods=2, fill_value=0), pser.shift(periods=2, fill_value=0))
with self.assertRaisesRegex(ValueError, "periods should be an int; however"):
kser.shift(periods=1.5)
def test_astype(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
self.assert_eq(kser.astype(np.int32), pser.astype(np.int32))
self.assert_eq(kser.astype(bool), pser.astype(bool))
pser = pd.Series([10, 20, 15, 30, 45, None, np.nan], name="x")
kser = ks.Series(pser)
self.assert_eq(kser.astype(bool), pser.astype(bool))
pser = pd.Series(["hi", "hi ", " ", " \t", "", None], name="x")
kser = ks.Series(pser)
self.assert_eq(kser.astype(bool), pser.astype(bool))
self.assert_eq(kser.str.strip().astype(bool), pser.str.strip().astype(bool))
pser = pd.Series([True, False, None], name="x")
kser = ks.Series(pser)
self.assert_eq(kser.astype(bool), pser.astype(bool))
with self.assertRaisesRegex(TypeError, "not understood"):
kser.astype("int63")
def test_aggregate(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
msg = "func must be a string or list of strings"
with self.assertRaisesRegex(ValueError, msg):
kser.aggregate({"x": ["min", "max"]})
msg = (
"If the given function is a list, it " "should only contains function names as strings."
)
with self.assertRaisesRegex(ValueError, msg):
kser.aggregate(["min", max])
def test_drop(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
msg = "Need to specify at least one of 'labels' or 'index'"
with self.assertRaisesRegex(ValueError, msg):
kser.drop()
# For MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
msg = "'level' should be less than the number of indexes"
with self.assertRaisesRegex(ValueError, msg):
kser.drop(labels="weight", level=2)
msg = (
"If the given index is a list, it "
"should only contains names as strings, "
"or a list of tuples that contain "
"index names as strings"
)
with self.assertRaisesRegex(ValueError, msg):
kser.drop(["lama", ["cow", "falcon"]])
msg = "'index' type should be one of str, list, tuple"
with self.assertRaisesRegex(ValueError, msg):
kser.drop({"lama": "speed"})
msg = "Cannot specify both 'labels' and 'index'"
with self.assertRaisesRegex(ValueError, msg):
kser.drop("lama", index="cow")
msg = r"'Key length \(2\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
kser.drop(("lama", "speed", "x"))
self.assert_eq(kser.drop(("lama", "speed", "x"), level=1), kser)
def test_pop(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pdf = pd.DataFrame({"x": [45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3]}, index=midx)
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.pop(("lama", "speed")), pser.pop(("lama", "speed")))
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
msg = "'key' should be string or tuple that contains strings"
with self.assertRaisesRegex(ValueError, msg):
kser.pop(0)
msg = (
"'key' should have index names as only strings "
"or a tuple that contain index names as only strings"
)
with self.assertRaisesRegex(ValueError, msg):
kser.pop(("lama", 0))
msg = r"'Key length \(3\) exceeds index depth \(2\)'"
with self.assertRaisesRegex(KeyError, msg):
kser.pop(("lama", "speed", "x"))
def test_replace(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
self.assert_eq(kser.replace(), pser.replace())
self.assert_eq(kser.replace({}), pser.replace({}))
msg = "'to_replace' should be one of str, list, dict, int, float"
with self.assertRaisesRegex(ValueError, msg):
kser.replace(ks.range(5))
msg = "Replacement lists must match in length. Expecting 3 got 2"
with self.assertRaisesRegex(ValueError, msg):
kser.replace([10, 20, 30], [1, 2])
msg = "replace currently not support for regex"
with self.assertRaisesRegex(NotImplementedError, msg):
kser.replace(r"^1.$", regex=True)
def test_xs(self):
midx = pd.MultiIndex(
[["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.xs(("a", "lama", "speed")), pser.xs(("a", "lama", "speed")))
def test_duplicates(self):
psers = {
"test on texts": pd.Series(
["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal"
),
"test on numbers": pd.Series([1, 1, 2, 4, 3]),
}
keeps = ["first", "last", False]
for (msg, pser), keep in product(psers.items(), keeps):
with self.subTest(msg, keep=keep):
kser = ks.Series(pser)
self.assert_eq(
pser.drop_duplicates(keep=keep).sort_values(),
kser.drop_duplicates(keep=keep).sort_values(),
)
def test_update(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
msg = "'other' must be a Series"
with self.assertRaisesRegex(ValueError, msg):
kser.update(10)
def test_where(self):
pser1 = pd.Series([0, 1, 2, 3, 4])
kser1 = ks.from_pandas(pser1)
self.assert_eq(pser1.where(pser1 > 3), kser1.where(kser1 > 3).sort_index())
def test_mask(self):
pser1 = pd.Series([0, 1, 2, 3, 4])
kser1 = ks.from_pandas(pser1)
self.assert_eq(pser1.mask(pser1 > 3), kser1.mask(kser1 > 3).sort_index())
def test_truncate(self):
pser1 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7])
kser1 = ks.Series(pser1)
pser2 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[7, 6, 5, 4, 3, 2, 1])
kser2 = ks.Series(pser2)
self.assert_eq(kser1.truncate(), pser1.truncate())
self.assert_eq(kser1.truncate(before=2), pser1.truncate(before=2))
self.assert_eq(kser1.truncate(after=5), pser1.truncate(after=5))
self.assert_eq(kser1.truncate(copy=False), pser1.truncate(copy=False))
self.assert_eq(kser1.truncate(2, 5, copy=False), pser1.truncate(2, 5, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(kser2.truncate(4, 6), pser2.truncate(4, 6))
self.assert_eq(kser2.truncate(4, 6, copy=False), pser2.truncate(4, 6, copy=False))
else:
expected_kser = ks.Series([20, 30, 40], index=[6, 5, 4])
self.assert_eq(kser2.truncate(4, 6), expected_kser)
self.assert_eq(kser2.truncate(4, 6, copy=False), expected_kser)
kser = ks.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 3, 2, 1])
msg = "truncate requires a sorted index"
with self.assertRaisesRegex(ValueError, msg):
kser.truncate()
kser = ks.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7])
msg = "Truncate: 2 must be after 5"
with self.assertRaisesRegex(ValueError, msg):
kser.truncate(5, 2)
def test_getitem(self):
pser = pd.Series([10, 20, 15, 30, 45], ["A", "A", "B", "C", "D"])
kser = ks.Series(pser)
self.assert_eq(kser["A"], pser["A"])
self.assert_eq(kser["B"], pser["B"])
self.assert_eq(kser[kser > 15], pser[pser > 15])
# for MultiIndex
midx = pd.MultiIndex(
[["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], name="0", index=midx)
kser = ks.Series(pser)
self.assert_eq(kser["a"], pser["a"])
self.assert_eq(kser["a", "lama"], pser["a", "lama"])
self.assert_eq(kser[kser > 1.5], pser[pser > 1.5])
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
kser[("a", "lama", "speed", "x")]
def test_keys(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.keys(), pser.keys())
def test_index(self):
# to check setting name of Index properly.
idx = pd.Index([1, 2, 3, 4, 5, 6, 7, 8, 9])
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=idx)
kser = ks.from_pandas(pser)
kser.name = "koalas"
pser.name = "koalas"
self.assert_eq(kser.index.name, pser.index.name)
# for check setting names of MultiIndex properly.
kser.names = ["hello", "koalas"]
pser.names = ["hello", "koalas"]
self.assert_eq(kser.index.names, pser.index.names)
def test_pct_change(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
kser = ks.from_pandas(pser)
self.assert_eq(kser.pct_change(), pser.pct_change(), check_exact=False)
self.assert_eq(kser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False)
self.assert_eq(kser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False)
self.assert_eq(kser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000))
self.assert_eq(kser.pct_change(periods=100000000), pser.pct_change(periods=100000000))
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.pct_change(), pser.pct_change(), check_exact=False)
self.assert_eq(kser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False)
self.assert_eq(kser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False)
self.assert_eq(kser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000))
self.assert_eq(kser.pct_change(periods=100000000), pser.pct_change(periods=100000000))
def test_axes(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
kser = ks.from_pandas(pser)
self.assert_list_eq(kser.axes, pser.axes)
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
self.assert_list_eq(kser.axes, pser.axes)
def test_combine_first(self):
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
with self.assertRaisesRegex(
ValueError, "`combine_first` only allows `Series` for parameter `other`"
):
kser1.combine_first(50)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# MultiIndex
midx1 = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
midx2 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser1 = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1)
pser2 = pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# Series come from same DataFrame
pdf = pd.DataFrame(
{
"A": {"falcon": 330.0, "eagle": 160.0},
"B": {"falcon": 345.0, "eagle": 200.0, "duck": 30.0},
}
)
pser1 = pdf.A
pser2 = pdf.B
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
def test_udt(self):
sparse_values = {0: 0.1, 1: 1.1}
sparse_vector = SparseVector(len(sparse_values), sparse_values)
pser = pd.Series([sparse_vector])
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
kser = ks.from_pandas(pser)
self.assert_eq(kser, pser)
else:
kser = ks.from_pandas(pser)
self.assert_eq(kser, pser)
def test_repeat(self):
pser = pd.Series(["a", "b", "c"], name="0", index=np.random.rand(3))
kser = ks.from_pandas(pser)
self.assert_eq(kser.repeat(3).sort_index(), pser.repeat(3).sort_index())
self.assert_eq(kser.repeat(0).sort_index(), pser.repeat(0).sort_index())
self.assertRaises(ValueError, lambda: kser.repeat(-1))
self.assertRaises(ValueError, lambda: kser.repeat("abc"))
pdf = pd.DataFrame({"a": ["a", "b", "c"], "rep": [10, 20, 30]}, index=np.random.rand(3))
kdf = ks.from_pandas(pdf)
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
self.assertRaises(ValueError, lambda: kdf.a.repeat(kdf.rep))
else:
self.assert_eq(kdf.a.repeat(kdf.rep).sort_index(), pdf.a.repeat(pdf.rep).sort_index())
def test_take(self):
pser = pd.Series([100, 200, 300, 400, 500], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(kser.take([0, 2, 4]).sort_values(), pser.take([0, 2, 4]).sort_values())
self.assert_eq(
kser.take(range(0, 5, 2)).sort_values(), pser.take(range(0, 5, 2)).sort_values()
)
self.assert_eq(kser.take([-4, -2, 0]).sort_values(), pser.take([-4, -2, 0]).sort_values())
self.assert_eq(
kser.take(range(-2, 1, 2)).sort_values(), pser.take(range(-2, 1, 2)).sort_values()
)
# Checking the type of indices.
self.assertRaises(ValueError, lambda: kser.take(1))
self.assertRaises(ValueError, lambda: kser.take("1"))
self.assertRaises(ValueError, lambda: kser.take({1, 2}))
self.assertRaises(ValueError, lambda: kser.take({1: None, 2: None}))
def test_divmod(self):
pser = pd.Series([100, None, 300, None, 500], name="Koalas")
kser = ks.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
kdiv, kmod = kser.divmod(-100)
pdiv, pmod = pser.divmod(-100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
kdiv, kmod = kser.divmod(100)
pdiv, pmod = pser.divmod(100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
kdiv, kmod = kser.divmod(-100)
pdiv, pmod = pser.floordiv(-100), pser.mod(-100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
kdiv, kmod = kser.divmod(100)
pdiv, pmod = pser.floordiv(100), pser.mod(100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
def test_rdivmod(self):
pser = pd.Series([100, None, 300, None, 500])
kser = ks.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
krdiv, krmod = kser.rdivmod(-100)
prdiv, prmod = pser.rdivmod(-100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
krdiv, krmod = kser.rdivmod(100)
prdiv, prmod = pser.rdivmod(100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
krdiv, krmod = kser.rdivmod(-100)
prdiv, prmod = pser.rfloordiv(-100), pser.rmod(-100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
krdiv, krmod = kser.rdivmod(100)
prdiv, prmod = pser.rfloordiv(100), pser.rmod(100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(kser.mod(-150), pser.mod(-150))
self.assert_eq(kser.mod(0), pser.mod(0))
self.assert_eq(kser.mod(150), pser.mod(150))
pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6})
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.a.mod(kdf.b), pdf.a.mod(pdf.b))
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(kser.rmod(-150), pser.rmod(-150))
self.assert_eq(kser.rmod(0), pser.rmod(0))
self.assert_eq(kser.rmod(150), pser.rmod(150))
pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6})
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.a.rmod(kdf.b), pdf.a.rmod(pdf.b))
def test_asof(self):
pser = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(kser.asof(20), pser.asof(20))
self.assert_eq(kser.asof([5, 20]).sort_index(), pser.asof([5, 20]).sort_index())
self.assert_eq(kser.asof(100), pser.asof(100))
self.assert_eq(repr(kser.asof(-100)), repr(pser.asof(-100)))
self.assert_eq(kser.asof([-100, 100]).sort_index(), pser.asof([-100, 100]).sort_index())
# where cannot be an Index, Series or a DataFrame
self.assertRaises(ValueError, lambda: kser.asof(ks.Index([-100, 100])))
self.assertRaises(ValueError, lambda: kser.asof(ks.Series([-100, 100])))
self.assertRaises(ValueError, lambda: kser.asof(ks.DataFrame({"A": [1, 2, 3]})))
# asof is not supported for a MultiIndex
pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c"), ("y", "d")])
kser = ks.from_pandas(pser)
self.assertRaises(ValueError, lambda: kser.asof(20))
# asof requires a sorted index (More precisely, should be a monotonic increasing)
kser = ks.Series([1, 2, np.nan, 4], index=[10, 30, 20, 40], name="Koalas")
self.assertRaises(ValueError, lambda: kser.asof(20))
kser = ks.Series([1, 2, np.nan, 4], index=[40, 30, 20, 10], name="Koalas")
self.assertRaises(ValueError, lambda: kser.asof(20))
def test_squeeze(self):
# Single value
pser = pd.Series([90])
kser = ks.from_pandas(pser)
self.assert_eq(kser.squeeze(), pser.squeeze())
# Single value with MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "b", "c")])
pser = pd.Series([90], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.squeeze(), pser.squeeze())
# Multiple values
pser = pd.Series([90, 91, 85])
kser = ks.from_pandas(pser)
self.assert_eq(kser.squeeze(), pser.squeeze())
# Multiple values with MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series([90, 91, 85], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.squeeze(), pser.squeeze())
def test_div_zero_and_nan(self):
pser = pd.Series([100, None, -300, None, 500, -700, np.inf, -np.inf], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(pser.div(0), kser.div(0))
self.assert_eq(pser.truediv(0), kser.truediv(0))
self.assert_eq(pser / 0, kser / 0)
self.assert_eq(pser.div(np.nan), kser.div(np.nan))
self.assert_eq(pser.truediv(np.nan), kser.truediv(np.nan))
self.assert_eq(pser / np.nan, kser / np.nan)
# floordiv has different behavior in pandas > 1.0.0 when divide by 0
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(pser.floordiv(0), kser.floordiv(0))
self.assert_eq(pser // 0, kser // 0)
else:
result = pd.Series(
[np.inf, np.nan, -np.inf, np.nan, np.inf, -np.inf, np.inf, -np.inf], name="Koalas"
)
self.assert_eq(kser.floordiv(0), result)
self.assert_eq(kser // 0, result)
self.assert_eq(pser.floordiv(np.nan), kser.floordiv(np.nan))
def test_mad(self):
pser = pd.Series([1, 2, 3, 4], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(pser.mad(), kser.mad())
pser = pd.Series([None, -2, 5, 10, 50, np.nan, -20], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(pser.mad(), kser.mad())
pmidx = pd.MultiIndex.from_tuples(
[("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")]
)
pser = pd.Series([1, 2, 3, 4, 5], name="Koalas")
pser.index = pmidx
kser = ks.from_pandas(pser)
self.assert_eq(pser.mad(), kser.mad())
pmidx = pd.MultiIndex.from_tuples(
[("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")]
)
pser = pd.Series([None, -2, 5, 50, np.nan], name="Koalas")
pser.index = pmidx
kser = ks.from_pandas(pser)
self.assert_eq(pser.mad(), kser.mad())
def test_to_frame(self):
pser = pd.Series(["a", "b", "c"])
kser = ks.from_pandas(pser)
self.assert_eq(pser.to_frame(name="a"), kser.to_frame(name="a"))
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series(["a", "b", "c"], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(pser.to_frame(name="a"), kser.to_frame(name="a"))
def test_shape(self):
pser = pd.Series(["a", "b", "c"])
kser = ks.from_pandas(pser)
self.assert_eq(pser.shape, kser.shape)
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series(["a", "b", "c"], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(pser.shape, kser.shape)
def test_to_markdown(self):
pser = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")
kser = ks.from_pandas(pser)
# `to_markdown()` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assertRaises(NotImplementedError, lambda: kser.to_markdown())
else:
self.assert_eq(pser.to_markdown(), kser.to_markdown())
def test_unstack(self):
pser = pd.Series(
[10, -2, 4, 7],
index=pd.MultiIndex.from_tuples(
[("one", "a", "z"), ("one", "b", "x"), ("two", "a", "c"), ("two", "b", "v")],
names=["A", "B", "C"],
),
)
kser = ks.from_pandas(pser)
levels = [-3, -2, -1, 0, 1, 2]
for level in levels:
pandas_result = pser.unstack(level=level)
koalas_result = kser.unstack(level=level).sort_index()
self.assert_eq(pandas_result, koalas_result)
self.assert_eq(pandas_result.index.names, koalas_result.index.names)
self.assert_eq(pandas_result.columns.names, koalas_result.columns.names)
# non-numeric datatypes
pser = pd.Series(
list("abcd"), index=pd.MultiIndex.from_product([["one", "two"], ["a", "b"]])
)
kser = ks.from_pandas(pser)
levels = [-2, -1, 0, 1]
for level in levels:
pandas_result = pser.unstack(level=level)
koalas_result = kser.unstack(level=level).sort_index()
self.assert_eq(pandas_result, koalas_result)
self.assert_eq(pandas_result.index.names, koalas_result.index.names)
self.assert_eq(pandas_result.columns.names, koalas_result.columns.names)
# Exceeding the range of level
self.assertRaises(IndexError, lambda: kser.unstack(level=3))
self.assertRaises(IndexError, lambda: kser.unstack(level=-4))
# Only support for MultiIndex
kser = ks.Series([10, -2, 4, 7])
self.assertRaises(ValueError, lambda: kser.unstack())
def test_item(self):
kser = ks.Series([10, 20])
self.assertRaises(ValueError, lambda: kser.item())
def test_filter(self):
pser = pd.Series([0, 1, 2], index=["one", "two", "three"])
kser = ks.from_pandas(pser)
self.assert_eq(pser.filter(items=["one", "three"]), kser.filter(items=["one", "three"]))
self.assert_eq(pser.filter(regex="e$"), kser.filter(regex="e$"))
self.assert_eq(pser.filter(like="hre"), kser.filter(like="hre"))
with self.assertRaisesRegex(ValueError, "Series does not support columns axis."):
kser.filter(like="hre", axis=1)
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("one", "x"), ("two", "y"), ("three", "z")])
pser = pd.Series([0, 1, 2], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(
pser.filter(items=[("one", "x"), ("three", "z")]),
kser.filter(items=[("one", "x"), ("three", "z")]),
)
with self.assertRaisesRegex(TypeError, "Unsupported type <class 'list'>"):
kser.filter(items=[["one", "x"], ("three", "z")])
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
kser.filter(items=[(), ("three", "z")])
def test_abs(self):
pser = pd.Series([-2, -1, 0, 1])
kser = ks.from_pandas(pser)
self.assert_eq(abs(kser), abs(pser))
self.assert_eq(np.abs(kser), np.abs(pser))
def test_bfill(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.bfill(), pser.bfill())
self.assert_eq(kser.bfill()[0], pser.bfill()[0])
kser.bfill(inplace=True)
pser.bfill(inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kser[0], pser[0])
self.assert_eq(kdf, pdf)
def test_ffill(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.ffill(), pser.ffill())
self.assert_eq(kser.ffill()[4], pser.ffill()[4])
kser.ffill(inplace=True)
pser.ffill(inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kser[4], pser[4])
self.assert_eq(kdf, pdf)
def test_iteritems(self):
pser = pd.Series(["A", "B", "C"])
kser = ks.from_pandas(pser)
for (p_name, p_items), (k_name, k_items) in zip(pser.iteritems(), kser.iteritems()):
self.assert_eq(p_name, k_name)
self.assert_eq(p_items, k_items)
def test_droplevel(self):
# droplevel is new in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
pser = pd.Series(
[1, 2, 3],
index=pd.MultiIndex.from_tuples(
[("x", "a", "q"), ("x", "b", "w"), ("y", "c", "e")],
names=["level_1", "level_2", "level_3"],
),
)
kser = ks.from_pandas(pser)
self.assert_eq(pser.droplevel(0), kser.droplevel(0))
self.assert_eq(pser.droplevel("level_1"), kser.droplevel("level_1"))
self.assert_eq(pser.droplevel(-1), kser.droplevel(-1))
self.assert_eq(pser.droplevel([0]), kser.droplevel([0]))
self.assert_eq(pser.droplevel(["level_1"]), kser.droplevel(["level_1"]))
self.assert_eq(pser.droplevel((0,)), kser.droplevel((0,)))
self.assert_eq(pser.droplevel(("level_1",)), kser.droplevel(("level_1",)))
self.assert_eq(pser.droplevel([0, 2]), kser.droplevel([0, 2]))
self.assert_eq(
pser.droplevel(["level_1", "level_3"]), kser.droplevel(["level_1", "level_3"])
)
self.assert_eq(pser.droplevel((1, 2)), kser.droplevel((1, 2)))
self.assert_eq(
pser.droplevel(("level_2", "level_3")), kser.droplevel(("level_2", "level_3"))
)
with self.assertRaisesRegex(KeyError, "Level {0, 1, 2} not found"):
kser.droplevel({0, 1, 2})
with self.assertRaisesRegex(KeyError, "Level level_100 not found"):
kser.droplevel(["level_1", "level_100"])
with self.assertRaisesRegex(
IndexError, "Too many levels: Index has only 3 levels, not 11"
):
kser.droplevel(10)
with self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 3 levels, -10 is not a valid level number",
):
kser.droplevel(-10)
with self.assertRaisesRegex(
ValueError,
"Cannot remove 3 levels from an index with 3 levels: "
"at least one level must be left.",
):
kser.droplevel([0, 1, 2])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 5 levels from an index with 3 levels: "
"at least one level must be left.",
):
kser.droplevel([1, 1, 1, 1, 1])
# Tupled names
pser.index.names = [("a", "1"), ("b", "2"), ("c", "3")]
kser = ks.from_pandas(pser)
self.assert_eq(
pser.droplevel([("a", "1"), ("c", "3")]), kser.droplevel([("a", "1"), ("c", "3")])
)
@unittest.skipIf(
LooseVersion(pyspark.__version__) < LooseVersion("3.0"),
"tail won't work properly with PySpark<3.0",
)
def test_tail(self):
pser = pd.Series(range(1000), name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(pser.tail(), kser.tail())
self.assert_eq(pser.tail(10), kser.tail(10))
self.assert_eq(pser.tail(-990), kser.tail(-990))
self.assert_eq(pser.tail(0), kser.tail(0))
self.assert_eq(pser.tail(1001), kser.tail(1001))
self.assert_eq(pser.tail(-1001), kser.tail(-1001))
with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"):
kser.tail("10")
def test_product(self):
pser = pd.Series([10, 20, 30, 40, 50])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
# Containing NA values
pser = pd.Series([10, np.nan, 30, np.nan, 50])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod(), almost=True)
# All-NA values
pser = pd.Series([np.nan, np.nan, np.nan])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
# Empty Series
pser = pd.Series([])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
# Boolean Series
pser = pd.Series([True, True, True])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
pser = pd.Series([False, False, False])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
pser = pd.Series([True, False, True])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
# With `min_count` parameter
pser = pd.Series([10, 20, 30, 40, 50])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(min_count=5), kser.prod(min_count=5))
# Using `repr` since the result of below will be `np.nan`.
self.assert_eq(repr(pser.prod(min_count=6)), repr(kser.prod(min_count=6)))
pser = pd.Series([10, np.nan, 30, np.nan, 50])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(min_count=3), kser.prod(min_count=3), almost=True)
# ditto.
self.assert_eq(repr(pser.prod(min_count=4)), repr(kser.prod(min_count=4)))
pser = pd.Series([np.nan, np.nan, np.nan])
kser = ks.from_pandas(pser)
# ditto.
self.assert_eq(repr(pser.prod(min_count=1)), repr(kser.prod(min_count=1)))
pser = pd.Series([])
kser = ks.from_pandas(pser)
# ditto.
self.assert_eq(repr(pser.prod(min_count=1)), repr(kser.prod(min_count=1)))
with self.assertRaisesRegex(TypeError, "cannot perform prod with type object"):
ks.Series(["a", "b", "c"]).prod()
with self.assertRaisesRegex(TypeError, "cannot perform prod with type datetime64"):
ks.Series([pd.Timestamp("2016-01-01") for _ in range(3)]).prod()
def test_hasnans(self):
# BooleanType
pser = pd.Series([True, False, True, True])
kser = ks.from_pandas(pser)
self.assert_eq(pser.hasnans, kser.hasnans)
pser = pd.Series([True, False, np.nan, True])
kser = ks.from_pandas(pser)
self.assert_eq(pser.hasnans, kser.hasnans)
# TimestampType
pser = pd.Series([pd.Timestamp("2020-07-30") for _ in range(3)])
kser = ks.from_pandas(pser)
self.assert_eq(pser.hasnans, kser.hasnans)
pser = pd.Series([pd.Timestamp("2020-07-30"), np.nan, pd.Timestamp("2020-07-30")])
kser = ks.from_pandas(pser)
self.assert_eq(pser.hasnans, kser.hasnans)
def test_last_valid_index(self):
# `pyspark.sql.dataframe.DataFrame.tail` is new in pyspark >= 3.0.
if LooseVersion(pyspark.__version__) >= LooseVersion("3.0"):
pser = pd.Series([250, 1.5, 320, 1, 0.3, None, None, None, None])
kser = ks.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), kser.last_valid_index())
# MultiIndex columns
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser.index = midx
kser = ks.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), kser.last_valid_index())
# Empty Series
pser = pd.Series([])
kser = ks.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), kser.last_valid_index())
def test_first_valid_index(self):
# Empty Series
pser = pd.Series([])
kser = ks.from_pandas(pser)
self.assert_eq(pser.first_valid_index(), kser.first_valid_index())
def test_explode(self):
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pser = pd.Series([[1, 2, 3], [], None, [3, 4]])
kser = ks.from_pandas(pser)
self.assert_eq(pser.explode(), kser.explode(), almost=True)
# MultiIndex
pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")])
kser = ks.from_pandas(pser)
self.assert_eq(pser.explode(), kser.explode(), almost=True)
# non-array type Series
pser = pd.Series([1, 2, 3, 4])
kser = ks.from_pandas(pser)
self.assert_eq(pser.explode(), kser.explode())
else:
pser = pd.Series([[1, 2, 3], [], None, [3, 4]])
kser = ks.from_pandas(pser)
expected = pd.Series([1.0, 2.0, 3.0, None, None, 3.0, 4.0], index=[0, 0, 0, 1, 2, 3, 3])
self.assert_eq(kser.explode(), expected)
# MultiIndex
pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")])
kser = ks.from_pandas(pser)
expected = pd.Series(
[1.0, 2.0, 3.0, None, None, 3.0, 4.0],
index=pd.MultiIndex.from_tuples(
[
("a", "w"),
("a", "w"),
("a", "w"),
("b", "x"),
("c", "y"),
("d", "z"),
("d", "z"),
]
),
)
self.assert_eq(kser.explode(), expected)
# non-array type Series
pser = pd.Series([1, 2, 3, 4])
kser = ks.from_pandas(pser)
expected = pser
self.assert_eq(kser.explode(), expected)
| 1 | 16,402 | What if the Series contains null values? | databricks-koalas | py |
@@ -177,14 +177,11 @@ namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
{
new Location
{
- AnalysisTarget = new List<PhysicalLocationComponent>
- {
- new PhysicalLocationComponent
- {
- Uri = TestAnalysisTarget.CreateUriForJsonSerialization(),
- Region = region
- },
- }
+ AnalysisTarget = new PhysicalLocation
+ {
+ Uri = TestAnalysisTarget.CreateUriForJsonSerialization(),
+ Region = region
+ }
}
},
FormattedMessage = new FormattedMessage | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System.Collections.Generic;
using FluentAssertions;
using Xunit;
namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
{
// These tests test the extension method Result.FormatForVisualStudio.
// But by providing various Region objects and ResultKind values, they
// also exercise Region.FormatForVisualStudio and ResultKind.FormatForVisualStudio.
public class FormatForVisualStudioTests
{
private const string TestRuleId = "TST0001";
private const string TestFormatSpecifier = "testFormatSpecifier";
private const string TestAnalysisTarget = @"C:\dir\file";
private static readonly string DisplayedTarget = TestAnalysisTarget.Replace('\\', '/');
private static readonly RuleDescriptor TestRule = new RuleDescriptor(
TestRuleId,
"ThisIsATest",
"short description",
"full description",
null, // options
new Dictionary<string, string>
{
[TestFormatSpecifier] = "First: {0}, Second: {1}"
},
null, // helpUri
null, // properties
null); // tags
private static readonly Region MultiLineTestRegion = new Region
{
StartLine = 2,
StartColumn = 4,
EndLine = 3,
EndColumn = 5
};
private static readonly Region SingleLineMultiColumnTestRegion = new Region
{
StartLine = 2,
StartColumn = 4,
EndLine = 2,
EndColumn = 5
};
private static readonly Region SingleLineSingleColumnTestRegion = new Region
{
StartLine = 2,
StartColumn = 4
};
private static readonly Region SingleLineNoColumnTestRegion = new Region
{
StartLine = 2
};
private static readonly Region MultiLineNoColumnTestRegion = new Region
{
StartLine = 2,
EndLine = 3
};
public static IEnumerable<object[]> ResultFormatForVisualStudioTestCases => new[]
{
// Test each ResultKind value.
new object[]
{
ResultKind.Error,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): error {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.ConfigurationError,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): error {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.InternalError,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): error {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.Warning,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): warning {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.NotApplicable,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): info {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.Note,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): info {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.Pass,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): info {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.Unknown,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): info {TestRuleId}: First: 42, Second: 54"
},
// Test formatting of a single-line multi-column region (previous tests used a multi-line region).
new object[]
{
ResultKind.Error,
SingleLineMultiColumnTestRegion,
$"{DisplayedTarget}(2,4-5): error {TestRuleId}: First: 42, Second: 54"
},
// Test formatting of a single-line single-column region.
new object[]
{
ResultKind.Error,
SingleLineSingleColumnTestRegion,
$"{DisplayedTarget}(2,4): error {TestRuleId}: First: 42, Second: 54"
},
// Test formatting of a single-line region with no column specified.
new object[]
{
ResultKind.Error,
SingleLineNoColumnTestRegion,
$"{DisplayedTarget}(2): error {TestRuleId}: First: 42, Second: 54"
},
// Test formatting of a multi-line region with no columns specified.
new object[]
{
ResultKind.Error,
MultiLineNoColumnTestRegion,
$"{DisplayedTarget}(2-3): error {TestRuleId}: First: 42, Second: 54"
},
};
[Theory]
[MemberData(nameof(ResultFormatForVisualStudioTestCases))]
public void Result_FormatForVisualStudioTests(ResultKind kind, Region region, string expected)
{
Result result = MakeResultFromTestCase(kind, region);
string actual = result.FormatForVisualStudio(TestRule);
actual.Should().Be(expected);
}
private Result MakeResultFromTestCase(ResultKind kind, Region region)
{
return new Result
{
RuleId = TestRuleId,
Kind = kind,
Locations = new List<Location>
{
new Location
{
AnalysisTarget = new List<PhysicalLocationComponent>
{
new PhysicalLocationComponent
{
Uri = TestAnalysisTarget.CreateUriForJsonSerialization(),
Region = region
},
}
}
},
FormattedMessage = new FormattedMessage
{
SpecifierId = TestFormatSpecifier,
Arguments = new List<string>
{
"42",
"54"
}
}
};
}
}
}
| 1 | 10,167 | Every place you see a change like this, it's because `PhysicalLocation` is now its own object, not an array of `PhysicalLocationComponent` objects. The `Uri` is the key into the `FileInfo` dictionary. | microsoft-sarif-sdk | .cs |
@@ -7,7 +7,7 @@ from mmdet.core.bbox.iou_calculators import bbox_overlaps
from mmdet.models import HEADS
from mmdet.models.dense_heads import ATSSHead
-eps = 1e-12
+EPS = 1e-12
try:
import sklearn.mixture as skm
except ImportError: | 1 | import numpy as np
import torch
from mmcv.runner import force_fp32
from mmdet.core import multi_apply, multiclass_nms
from mmdet.core.bbox.iou_calculators import bbox_overlaps
from mmdet.models import HEADS
from mmdet.models.dense_heads import ATSSHead
eps = 1e-12
try:
import sklearn.mixture as skm
except ImportError:
skm = None
def levels_to_images(mlvl_tensor):
"""Concat multi-level feature maps by image.
[feature_level0, feature_level1...] -> [feature_image0, feature_image1...]
Convert the shape of each element in mlvl_tensor from (N, C, H, W) to
(N, H*W , C), then split the element to N elements with shape (H*W, C), and
concat elements in same image of all level along first dimension.
Args:
mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from
corresponding level. Each element is of shape (N, C, H, W)
Returns:
list[torch.Tensor]: A list that contains N tensors and each tensor is
of shape (num_elements, C)
"""
batch_size = mlvl_tensor[0].size(0)
batch_list = [[] for _ in range(batch_size)]
channels = mlvl_tensor[0].size(1)
for t in mlvl_tensor:
t = t.permute(0, 2, 3, 1)
t = t.view(batch_size, -1, channels).contiguous()
for img in range(batch_size):
batch_list[img].append(t[img])
return [torch.cat(item, 0) for item in batch_list]
@HEADS.register_module()
class PAAHead(ATSSHead):
"""Head of PAAAssignment: Probabilistic Anchor Assignment with IoU
Prediction for Object Detection.
Code is modified from the `official github repo
<https://github.com/kkhoot/PAA/blob/master/paa_core
/modeling/rpn/paa/loss.py>`_.
More details can be found in the `paper
<https://arxiv.org/abs/2007.08103>`_ .
Args:
topk (int): Select topk samples with smallest loss in
each level.
score_voting (bool): Whether to use score voting in post-process.
"""
def __init__(self, *args, topk=9, score_voting=True, **kwargs):
# topk used in paa reassign process
self.topk = topk
self.with_score_voting = score_voting
super(PAAHead, self).__init__(*args, **kwargs)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))
def loss(self,
cls_scores,
bbox_preds,
iou_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
iou_preds (list[Tensor]): iou_preds for each scale
level with shape (N, num_anchors * 1, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when are computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss gmm_assignment.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
)
(labels, labels_weight, bboxes_target, bboxes_weight, pos_inds,
pos_gt_index) = cls_reg_targets
cls_scores = levels_to_images(cls_scores)
cls_scores = [
item.reshape(-1, self.cls_out_channels) for item in cls_scores
]
bbox_preds = levels_to_images(bbox_preds)
bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]
iou_preds = levels_to_images(iou_preds)
iou_preds = [item.reshape(-1, 1) for item in iou_preds]
pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list,
cls_scores, bbox_preds, labels,
labels_weight, bboxes_target,
bboxes_weight, pos_inds)
with torch.no_grad():
labels, label_weights, bbox_weights, num_pos = multi_apply(
self.paa_reassign,
pos_losses_list,
labels,
labels_weight,
bboxes_weight,
pos_inds,
pos_gt_index,
anchor_list,
)
num_pos = sum(num_pos)
if num_pos == 0:
num_pos = len(img_metas)
# convert all tensor list to a flatten tensor
cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1))
bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1))
iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1))
labels = torch.cat(labels, 0).view(-1)
flatten_anchors = torch.cat(
[torch.cat(item, 0) for item in anchor_list])
labels_weight = torch.cat(labels_weight, 0).view(-1)
bboxes_target = torch.cat(bboxes_target,
0).view(-1, bboxes_target[0].size(-1))
pos_inds_flatten = (
(labels >= 0)
& (labels < self.background_label)).nonzero().reshape(-1)
losses_cls = self.loss_cls(
cls_scores, labels, labels_weight, avg_factor=num_pos)
if num_pos:
pos_bbox_pred = self.bbox_coder.decode(
flatten_anchors[pos_inds_flatten],
bbox_preds[pos_inds_flatten])
pos_bbox_target = bboxes_target[pos_inds_flatten]
iou_target = bbox_overlaps(
pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True)
losses_iou = self.loss_centerness(
iou_preds[pos_inds_flatten],
iou_target.unsqueeze(-1),
avg_factor=num_pos)
losses_bbox = self.loss_bbox(
pos_bbox_pred,
pos_bbox_target,
iou_target.clamp(min=eps),
avg_factor=iou_target.sum())
else:
losses_iou = iou_preds.sum() * 0
losses_bbox = bbox_preds.sum() * 0
return dict(
loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)
def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight,
bbox_target, bbox_weight, pos_inds):
"""Calculate loss of all potential positive samples obtained from first
match process.
Args:
anchors (list[Tensor]): Anchors of each scale.
cls_score (Tensor): Box scores of single image with shape
(num_anchors, num_classes)
bbox_pred (Tensor): Box energies / deltas of single image
with shape (num_anchors, 4)
label (Tensor): classification target of each anchor with
shape (num_anchors,)
label_weight (Tensor): Classification loss weight of each
anchor with shape (num_anchors).
bbox_target (dict): Regression target of each anchor with
shape (num_anchors, 4).
bbox_weight (Tensor): Bbox weight of each anchor with shape
(num_anchors, 4).
pos_inds (Tensor): Index of all positive samples got from
first assign process.
Returns:
Tensor: Losses of all positive samples in single image.
"""
if not len(pos_inds):
return cls_score.new([]),
anchors_all_level = torch.cat(anchors, 0)
pos_scores = cls_score[pos_inds]
pos_bbox_pred = bbox_pred[pos_inds]
pos_label = label[pos_inds]
pos_label_weight = label_weight[pos_inds]
pos_bbox_target = bbox_target[pos_inds]
pos_bbox_weight = bbox_weight[pos_inds]
pos_anchors = anchors_all_level[pos_inds]
pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred)
# to keep loss dimension
loss_cls = self.loss_cls(
pos_scores,
pos_label,
pos_label_weight,
avg_factor=self.loss_cls.loss_weight,
reduction_override='none')
loss_bbox = self.loss_bbox(
pos_bbox_pred,
pos_bbox_target,
pos_bbox_weight,
avg_factor=self.loss_cls.loss_weight,
reduction_override='none')
loss_cls = loss_cls.sum(-1)
pos_loss = loss_bbox + loss_cls
return pos_loss,
def paa_reassign(self, pos_losses, label, label_weight, bbox_weight,
pos_inds, pos_gt_inds, anchors):
"""Fit loss to GMM distribution and separate positive, ignore, negative
samples again with GMM model.
Args:
pos_losses (Tensor): Losses of all positive samples in
single image.
label (Tensor): classification target of each anchor with
shape (num_anchors,)
label_weight (Tensor): Classification loss weight of each
anchor with shape (num_anchors).
bbox_weight (Tensor): Bbox weight of each anchor with shape
(num_anchors, 4).
pos_inds (Tensor): Index of all positive samples got from
first assign process.
pos_gt_inds (Tensor): Gt_index of all positive samples got
from first assign process.
anchors (list[Tensor]): Anchors of each scale.
Returns:
tuple: Usually returns a tuple containing learning targets.
- label (Tensor): classification target of each anchor after
paa assign, with shape (num_anchors,)
- label_weight (Tensor): Classification loss weight of each
anchor after paa assign, with shape (num_anchors).
- bbox_weight (Tensor): Bbox weight of each anchor with shape
(num_anchors, 4).
- num_pos (int): The number of positive samples after paa
assign.
"""
if not len(pos_inds):
return label, label_weight, bbox_weight, 0
num_gt = pos_gt_inds.max() + 1
num_level = len(anchors)
num_anchors_each_level = [item.size(0) for item in anchors]
num_anchors_each_level.insert(0, 0)
inds_level_interval = np.cumsum(num_anchors_each_level)
pos_level_mask = []
for i in range(num_level):
mask = (pos_inds >= inds_level_interval[i]) & (
pos_inds < inds_level_interval[i + 1])
pos_level_mask.append(mask)
pos_inds_after_paa = []
ignore_inds_after_paa = []
for gt_ind in range(num_gt):
pos_inds_gmm = []
pos_loss_gmm = []
gt_mask = pos_gt_inds == gt_ind
for level in range(num_level):
level_mask = pos_level_mask[level]
level_gt_mask = level_mask & gt_mask
value, topk_inds = pos_losses[level_gt_mask].topk(
min(level_gt_mask.sum(), self.topk), largest=False)
pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds])
pos_loss_gmm.append(value)
pos_inds_gmm = torch.cat(pos_inds_gmm)
pos_loss_gmm = torch.cat(pos_loss_gmm)
# fix gmm need at least two sample
if len(pos_inds_gmm) < 2:
continue
device = pos_inds_gmm.device
pos_loss_gmm, sort_inds = pos_loss_gmm.sort()
pos_inds_gmm = pos_inds_gmm[sort_inds]
pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy()
min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max()
means_init = [[min_loss], [max_loss]]
weights_init = [0.5, 0.5]
precisions_init = [[[1.0]], [[1.0]]]
if skm is None:
raise ImportError('Please run "pip install sklearn" '
'to install sklearn first.')
gmm = skm.GaussianMixture(
2,
weights_init=weights_init,
means_init=means_init,
precisions_init=precisions_init)
gmm.fit(pos_loss_gmm)
gmm_assignment = gmm.predict(pos_loss_gmm)
scores = gmm.score_samples(pos_loss_gmm)
gmm_assignment = torch.from_numpy(gmm_assignment).to(device)
scores = torch.from_numpy(scores).to(device)
pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme(
gmm_assignment, scores, pos_inds_gmm)
pos_inds_after_paa.append(pos_inds_temp)
ignore_inds_after_paa.append(ignore_inds_temp)
pos_inds_after_paa = torch.cat(pos_inds_after_paa)
ignore_inds_after_paa = torch.cat(ignore_inds_after_paa)
reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1)
reassign_ids = pos_inds[reassign_mask]
label[reassign_ids] = self.background_label
label_weight[ignore_inds_after_paa] = 0
bbox_weight[reassign_ids] = 0
num_pos = len(pos_inds_after_paa)
return label, label_weight, bbox_weight, num_pos
def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm):
"""A general separation scheme for gmm model.
It separates a GMM distribution of candidate samples into three
parts, 0 1 and uncertain areas, and you can implement other
separation schemes by rewriting this function.
Args:
gmm_assignment (Tensor): The prediction of GMM which is of shape
(num_samples,). The 0/1 value indicates the distribution
that each sample comes from.
scores (Tensor): The probability of sample coming from the
fit GMM distribution. The tensor is of shape (num_samples,).
pos_inds_gmm (Tensor): All the indexes of samples which are used
to fit GMM model. The tensor is of shape (num_samples,)
Returns:
tuple[Tensor]: The indices of positive and ignored samples.
- pos_inds_temp (Tensor): Indices of positive samples.
- ignore_inds_temp (Tensor): Indices of ignore samples.
"""
# The implementation is (c) in Fig.3 in origin paper intead of (b).
# You can refer to issues such as
# https://github.com/kkhoot/PAA/issues/8 and
# https://github.com/kkhoot/PAA/issues/9.
fgs = gmm_assignment == 0
pos_inds_temp = fgs.new_tensor([], dtype=torch.long)
ignore_inds_temp = fgs.new_tensor([], dtype=torch.long)
if fgs.nonzero().numel():
_, pos_thr_ind = scores[fgs].topk(1)
pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1]
ignore_inds_temp = pos_inds_gmm.new_tensor([])
return pos_inds_temp, ignore_inds_temp
def get_targets(
self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True,
):
"""Get targets for PAA head.
This method is almost the same as `AnchorHead.get_targets()`. We direct
return the results from _get_targets_single instead map it to levels
by images_to_levels function.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, 4).
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, )
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
ignored.
gt_labels_list (list[Tensor]): Ground truth labels of each box.
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: Usually returns a tuple containing learning targets.
- labels (list[Tensor]): Labels of all anchors, each with
shape (num_anchors,).
- label_weights (list[Tensor]): Label weights of all anchor.
each with shape (num_anchors,).
- bbox_targets (list[Tensor]): BBox targets of all anchors.
each with shape (num_anchors, 4).
- bbox_weights (list[Tensor]): BBox weights of all anchors.
each with shape (num_anchors, 4).
- pos_inds (list[Tensor]): Contains all index of positive
sample in all anchor.
- gt_inds (list[Tensor]): Contains all gt_index of positive
sample in all anchor.
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
concat_anchor_list = []
concat_valid_flag_list = []
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
concat_anchor_list.append(torch.cat(anchor_list[i]))
concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
results = multi_apply(
self._get_targets_single,
concat_anchor_list,
concat_valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
unmap_outputs=unmap_outputs)
(labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds,
valid_neg_inds, sampling_result) = results
# Due to valid flag of anchors, we have to calculate the real pos_inds
# in origin anchor set.
pos_inds = []
for i, single_labels in enumerate(labels):
pos_mask = (0 <= single_labels) & (
single_labels < self.background_label)
pos_inds.append(pos_mask.nonzero().view(-1))
gt_inds = [item.pos_assigned_gt_inds for item in sampling_result]
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
gt_inds)
def _get_targets_single(self,
flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True):
"""Compute regression and classification targets for anchors in a
single image.
This method is same as `AnchorHead._get_targets_single()`.
"""
assert unmap_outputs, 'We must map outputs back to the original' \
'set of anchors in PAAhead'
return super(ATSSHead, self)._get_targets_single(
flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True)
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
iou_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
"""Transform outputs for a single batch item into labeled boxes.
This method is almost same as `ATSSHead._get_bboxes_single()`.
We use sqrt(iou_preds * cls_scores) in NMS process instead of just
cls_scores. Besides, score voting is used when `` score_voting``
is set to True.
"""
assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
mlvl_iou_preds = []
for cls_score, bbox_pred, iou_preds, anchors in zip(
cls_scores, bbox_preds, iou_preds, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
iou_preds = iou_preds.permute(1, 2, 0).reshape(-1).sigmoid()
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * iou_preds[:, None]).sqrt().max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
iou_preds = iou_preds[topk_inds]
bboxes = self.bbox_coder.decode(
anchors, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_iou_preds.append(iou_preds)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
# Add a dummy background class to the backend when using sigmoid
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
mlvl_iou_preds = torch.cat(mlvl_iou_preds)
mlvl_nms_scores = (mlvl_scores * mlvl_iou_preds[:, None]).sqrt()
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_nms_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=None)
if self.with_score_voting:
det_bboxes, det_labels = self.score_voting(det_bboxes, det_labels,
mlvl_bboxes,
mlvl_nms_scores,
cfg.score_thr)
return det_bboxes, det_labels
def score_voting(self, det_bboxes, det_labels, mlvl_bboxes,
mlvl_nms_scores, score_thr):
"""Implementation of score voting method works on each remaining boxes
after NMS procedure.
Args:
det_bboxes (Tensor): Remaining boxes after NMS procedure,
with shape (k, 5), each dimension means
(x1, y1, x2, y2, score).
det_labels (Tensor): The label of remaining boxes, with shape
(k, 1),Labels are 0-based.
mlvl_bboxes (Tensor): All boxes before the NMS procedure,
with shape (num_anchors,4).
mlvl_nms_scores (Tensor): The scores of all boxes which is used
in the NMS procedure, with shape (num_anchors, num_class)
mlvl_iou_preds (Tensot): The predictions of IOU of all boxes
before the NMS procedure, with shape (num_anchors, 1)
score_thr (float): The score threshold of bboxes.
Returns:
tuple: Usually returns a tuple containing voting results.
- det_bboxes_voted (Tensor): Remaining boxes after
score voting procedure, with shape (k, 5), each
dimension means (x1, y1, x2, y2, score).
- det_labels_voted (Tensor): Label of remaining bboxes
after voting, with shape (num_anchors,).
"""
candidate_mask = mlvl_nms_scores > score_thr
candidate_mask_nozeros = candidate_mask.nonzero()
candidate_inds = candidate_mask_nozeros[:, 0]
candidate_labels = candidate_mask_nozeros[:, 1]
candidate_bboxes = mlvl_bboxes[candidate_inds]
candidate_scores = mlvl_nms_scores[candidate_mask]
det_bboxes_voted = []
det_labels_voted = []
for cls in range(self.cls_out_channels):
candidate_cls_mask = candidate_labels == cls
if not candidate_cls_mask.any():
continue
candidate_cls_scores = candidate_scores[candidate_cls_mask]
candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask]
det_cls_mask = det_labels == cls
det_cls_bboxes = det_bboxes[det_cls_mask].view(
-1, det_bboxes.size(-1))
det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4],
candidate_cls_bboxes)
for det_ind in range(len(det_cls_bboxes)):
single_det_ious = det_candidate_ious[det_ind]
pos_ious_mask = single_det_ious > 0.01
pos_ious = single_det_ious[pos_ious_mask]
pos_bboxes = candidate_cls_bboxes[pos_ious_mask]
pos_scores = candidate_cls_scores[pos_ious_mask]
pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) *
pos_scores)[:, None]
voted_box = torch.sum(
pis * pos_bboxes, dim=0) / torch.sum(
pis, dim=0)
voted_score = det_cls_bboxes[det_ind][-1:][None, :]
det_bboxes_voted.append(
torch.cat((voted_box[None, :], voted_score), dim=1))
det_labels_voted.append(cls)
det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0)
det_labels_voted = det_labels.new_tensor(det_labels_voted)
return det_bboxes_voted, det_labels_voted
| 1 | 21,418 | Like in atss_head | open-mmlab-mmdetection | py |
@@ -233,6 +233,16 @@ func (pool *TransactionPool) test(txgroup []transactions.SignedTxn) error {
// requires a flat MinTxnFee).
feePerByte = feePerByte * pool.feeThresholdMultiplier
+ // The threshold grows exponentially if there are multiple blocks
+ // pending in the pool.
+ if pool.numPendingWholeBlocks > 1 {
+ // golang has no convenient integer exponentiation, so we just
+ // do this in a loop
+ for i := 0; i < int(pool.numPendingWholeBlocks)-1; i++ {
+ feePerByte *= pool.expFeeFactor
+ }
+ }
+
for _, t := range txgroup {
feeThreshold := feePerByte * uint64(t.GetEncodedLength())
if t.Txn.Fee.Raw < feeThreshold { | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package pools
import (
"fmt"
"sync"
"time"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/condvar"
)
// TransactionPool is a struct maintaining a sanitized pool of transactions that are available for inclusion in
// a Block. We sanitize it by preventing duplicates and limiting the number of transactions retained for each account
type TransactionPool struct {
mu deadlock.Mutex
cond sync.Cond
expiredTxCount map[basics.Round]int
pendingBlockEvaluator *ledger.BlockEvaluator
numPendingWholeBlocks basics.Round
feeThresholdMultiplier uint64
ledger *ledger.Ledger
statusCache *statusCache
logStats bool
expFeeFactor uint64
// pendingMu protects pendingTxGroups and pendingTxids
pendingMu deadlock.RWMutex
pendingTxGroups [][]transactions.SignedTxn
pendingTxids map[transactions.Txid]transactions.SignedTxn
// Calls to remember() add transactions to rememberedTxGroups and
// rememberedTxids. Calling rememberCommit() adds them to the
// pendingTxGroups and pendingTxids. This allows us to batch the
// changes in OnNewBlock() without preventing a concurrent call
// to Pending() or Verified().
rememberedTxGroups [][]transactions.SignedTxn
rememberedTxids map[transactions.Txid]transactions.SignedTxn
// result of logic.Eval()
lsigCache *lsigEvalCache
lcmu deadlock.RWMutex
}
// MakeTransactionPool is the constructor, it uses Ledger to ensure that no account has pending transactions that together overspend.
//
// The pool also contains status information for the last transactionPoolStatusSize
// transactions that were removed from the pool without being committed.
func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local) *TransactionPool {
if cfg.TxPoolExponentialIncreaseFactor < 1 {
cfg.TxPoolExponentialIncreaseFactor = 1
}
pool := TransactionPool{
pendingTxids: make(map[transactions.Txid]transactions.SignedTxn),
rememberedTxids: make(map[transactions.Txid]transactions.SignedTxn),
expiredTxCount: make(map[basics.Round]int),
ledger: ledger,
statusCache: makeStatusCache(cfg.TxPoolSize),
logStats: cfg.EnableAssembleStats,
expFeeFactor: cfg.TxPoolExponentialIncreaseFactor,
lsigCache: makeLsigEvalCache(cfg.TxPoolSize),
}
pool.cond.L = &pool.mu
pool.recomputeBlockEvaluator()
return &pool
}
// TODO I moved this number to be a constant in the module, we should consider putting it in the local config
const expiredHistory = 10
// timeoutOnNewBlock determines how long Test() and Remember() wait for
// OnNewBlock() to process a new block that appears to be in the ledger.
const timeoutOnNewBlock = time.Second
// NumExpired returns the number of transactions that expired at the end of a round (only meaningful if cleanup has
// been called for that round)
func (pool *TransactionPool) NumExpired(round basics.Round) int {
pool.mu.Lock()
defer pool.mu.Unlock()
return pool.expiredTxCount[round]
}
// PendingTxIDs return the IDs of all pending transactions
func (pool *TransactionPool) PendingTxIDs() []transactions.Txid {
pool.pendingMu.RLock()
defer pool.pendingMu.RUnlock()
ids := make([]transactions.Txid, len(pool.pendingTxids))
i := 0
for txid := range pool.pendingTxids {
ids[i] = txid
i++
}
return ids
}
// Pending returns a list of transaction groups that should be proposed
// in the next block, in order.
func (pool *TransactionPool) Pending() [][]transactions.SignedTxn {
pool.pendingMu.RLock()
defer pool.pendingMu.RUnlock()
// note that this operation is safe for the sole reason that arrays in go are immutable.
// if the underlaying array need to be expanded, the actual underlaying array would need
// to be reallocated.
return pool.pendingTxGroups
}
// rememberCommit() saves the changes added by remember to
// pendingTxGroups and pendingTxids. The caller is assumed to
// be holding pool.mu. flush indicates whether previous
// pendingTxGroups and pendingTxids should be flushed out and
// replaced altogether by rememberedTxGroups and rememberedTxids.
func (pool *TransactionPool) rememberCommit(flush bool) {
pool.pendingMu.Lock()
defer pool.pendingMu.Unlock()
if flush {
pool.pendingTxGroups = pool.rememberedTxGroups
pool.pendingTxids = pool.rememberedTxids
} else {
pool.pendingTxGroups = append(pool.pendingTxGroups, pool.rememberedTxGroups...)
for txid, txn := range pool.rememberedTxids {
pool.pendingTxids[txid] = txn
}
}
pool.rememberedTxGroups = nil
pool.rememberedTxids = make(map[transactions.Txid]transactions.SignedTxn)
}
// PendingCount returns the number of transactions currently pending in the pool.
func (pool *TransactionPool) PendingCount() int {
pool.pendingMu.RLock()
defer pool.pendingMu.RUnlock()
var count int
for _, txgroup := range pool.pendingTxGroups {
count += len(txgroup)
}
return count
}
// Test checks whether a transaction group could be remembered in the pool,
// but does not actually store this transaction in the pool.
func (pool *TransactionPool) Test(txgroup []transactions.SignedTxn) error {
for i := range txgroup {
txgroup[i].InitCaches()
}
pool.mu.Lock()
defer pool.mu.Unlock()
return pool.test(txgroup)
}
// test checks whether a transaction group could be remembered in the pool,
// but does not actually store this transaction in the pool.
//
// test assumes that pool.mu is locked. It might release the lock
// while it waits for OnNewBlock() to be called.
func (pool *TransactionPool) test(txgroup []transactions.SignedTxn) error {
if pool.pendingBlockEvaluator == nil {
return fmt.Errorf("TransactionPool.test: no pending block evaluator")
}
// Make sure that the latest block has been processed by OnNewBlock().
// If not, we might be in a race, so wait a little bit for OnNewBlock()
// to catch up to the ledger.
latest := pool.ledger.Latest()
waitExpires := time.Now().Add(timeoutOnNewBlock)
for pool.pendingBlockEvaluator.Round() <= latest && time.Now().Before(waitExpires) {
condvar.TimedWait(&pool.cond, timeoutOnNewBlock)
if pool.pendingBlockEvaluator == nil {
return fmt.Errorf("TransactionPool.test: no pending block evaluator")
}
}
tentativeRound := pool.pendingBlockEvaluator.Round() + pool.numPendingWholeBlocks
err := pool.pendingBlockEvaluator.TestTransactionGroup(txgroup)
if err == ledger.ErrNoSpace {
tentativeRound++
} else if err != nil {
return err
}
for _, t := range txgroup {
if t.Txn.LastValid < tentativeRound {
return transactions.TxnDeadError{
Round: tentativeRound,
FirstValid: t.Txn.FirstValid,
LastValid: t.Txn.LastValid,
}
}
}
// The baseline threshold fee per byte is 1, the smallest fee we can
// represent. This amounts to a fee of 100 for a 100-byte txn, which
// is well below MinTxnFee (1000). This means that, when the pool
// is not under load, the total MinFee dominates for small txns,
// but once the pool comes under load, the fee-per-byte will quickly
// come to dominate.
feePerByte := uint64(1)
// The threshold is multiplied by the feeThresholdMultiplier that
// tracks the load on the transaction pool over time. If the pool
// is mostly idle, feeThresholdMultiplier will be 0, and all txns
// are accepted (assuming the BlockEvaluator approves them, which
// requires a flat MinTxnFee).
feePerByte = feePerByte * pool.feeThresholdMultiplier
for _, t := range txgroup {
feeThreshold := feePerByte * uint64(t.GetEncodedLength())
if t.Txn.Fee.Raw < feeThreshold {
return fmt.Errorf("fee %d below threshold %d (%d per byte * %d bytes)",
t.Txn.Fee, feeThreshold, feePerByte, t.GetEncodedLength())
}
}
return nil
}
// RememberOne stores the provided transaction
// Precondition: Only RememberOne() properly-signed and well-formed transactions (i.e., ensure t.WellFormed())
func (pool *TransactionPool) RememberOne(t transactions.SignedTxn) error {
return pool.Remember([]transactions.SignedTxn{t})
}
// Remember stores the provided transaction group
// Precondition: Only Remember() properly-signed and well-formed transactions (i.e., ensure t.WellFormed())
func (pool *TransactionPool) Remember(txgroup []transactions.SignedTxn) error {
for i := range txgroup {
txgroup[i].InitCaches()
}
pool.mu.Lock()
defer pool.mu.Unlock()
err := pool.test(txgroup)
if err != nil {
return fmt.Errorf("TransactionPool.Remember: %v", err)
}
if pool.pendingBlockEvaluator == nil {
return fmt.Errorf("TransactionPool.Remember: no pending block evaluator")
}
err = pool.remember(txgroup)
if err != nil {
return fmt.Errorf("TransactionPool.Remember: %v", err)
}
pool.rememberCommit(false)
return nil
}
// remember tries to add the transaction to the pool, bypassing the fee priority checks.
func (pool *TransactionPool) remember(txgroup []transactions.SignedTxn) error {
err := pool.addToPendingBlockEvaluator(txgroup)
if err != nil {
return err
}
pool.rememberedTxGroups = append(pool.rememberedTxGroups, txgroup)
for _, t := range txgroup {
pool.rememberedTxids[t.ID()] = t
}
return nil
}
// Lookup returns the error associated with a transaction that used
// to be in the pool. If no status information is available (e.g., because
// it was too long ago, or the transaction committed successfully), then
// found is false. If the transaction is still in the pool, txErr is empty.
func (pool *TransactionPool) Lookup(txid transactions.Txid) (tx transactions.SignedTxn, txErr string, found bool) {
if pool == nil {
return transactions.SignedTxn{}, "", false
}
pool.mu.Lock()
defer pool.mu.Unlock()
pool.pendingMu.RLock()
defer pool.pendingMu.RUnlock()
tx, inPool := pool.pendingTxids[txid]
if inPool {
return tx, "", true
}
return pool.statusCache.check(txid)
}
// Verified returns whether a given SignedTxn is already in the
// pool, and, since only verified transactions should be added
// to the pool, whether that transaction is verified (i.e., Verify
// returned success). This is used as an optimization to avoid
// re-checking signatures on transactions that we have already
// verified.
func (pool *TransactionPool) Verified(txn transactions.SignedTxn) bool {
if pool == nil {
return false
}
pool.pendingMu.RLock()
defer pool.pendingMu.RUnlock()
pendingSigTxn, ok := pool.pendingTxids[txn.ID()]
if !ok {
return false
}
return pendingSigTxn.Sig == txn.Sig && pendingSigTxn.Msig.Equal(txn.Msig) && pendingSigTxn.Lsig.Equal(&txn.Lsig)
}
// EvalOk for LogicSig Eval of a txn by txid, returns the SignedTxn, error string, and found.
func (pool *TransactionPool) EvalOk(cvers protocol.ConsensusVersion, txid transactions.Txid) (found bool, err error) {
pool.lcmu.RLock()
defer pool.lcmu.RUnlock()
return pool.lsigCache.get(cvers, txid)
}
// EvalRemember sets an error string from LogicSig Eval for some SignedTxn
func (pool *TransactionPool) EvalRemember(cvers protocol.ConsensusVersion, txid transactions.Txid, err error) {
pool.lcmu.Lock()
defer pool.lcmu.Unlock()
pool.lsigCache.put(cvers, txid, err)
}
// OnNewBlock excises transactions from the pool that are included in the specified Block or if they've expired
func (pool *TransactionPool) OnNewBlock(block bookkeeping.Block) {
pool.mu.Lock()
defer pool.mu.Unlock()
defer pool.cond.Broadcast()
var stats telemetryspec.ProcessBlockMetrics
var knownCommitted uint
var unknownCommitted uint
payset, err := block.DecodePaysetFlat()
if err == nil {
pool.pendingMu.RLock()
for _, txad := range payset {
tx := txad.SignedTxn
txid := tx.ID()
_, ok := pool.pendingTxids[txid]
if ok {
knownCommitted++
} else {
unknownCommitted++
}
}
pool.pendingMu.RUnlock()
}
if pool.pendingBlockEvaluator == nil || block.Round() >= pool.pendingBlockEvaluator.Round() {
// Adjust the pool fee threshold. The rules are:
// - If there was less than one full block in the pool, reduce
// the multiplier by 2x. It will eventually go to 0, so that
// only the flat MinTxnFee matters if the pool is idle.
// - If there were less than two full blocks in the pool, keep
// the multiplier as-is.
// - If there were two or more full blocks in the pool, grow
// the multiplier by 2x (or increment by 1, if 0).
switch pool.numPendingWholeBlocks {
case 0:
pool.feeThresholdMultiplier = pool.feeThresholdMultiplier / pool.expFeeFactor
case 1:
// Keep the fee multiplier the same.
default:
if pool.feeThresholdMultiplier == 0 {
pool.feeThresholdMultiplier = 1
} else {
pool.feeThresholdMultiplier = pool.feeThresholdMultiplier * pool.expFeeFactor
}
}
// Recompute the pool by starting from the new latest block.
// This has the side-effect of discarding transactions that
// have been committed (or that are otherwise no longer valid).
stats = pool.recomputeBlockEvaluator()
}
stats.KnownCommittedCount = knownCommitted
stats.UnknownCommittedCount = unknownCommitted
proto := config.Consensus[block.CurrentProtocol]
pool.expiredTxCount[block.Round()] = int(stats.ExpiredCount)
delete(pool.expiredTxCount, block.Round()-expiredHistory*basics.Round(proto.MaxTxnLife))
if pool.logStats {
var details struct {
Round uint64
}
details.Round = uint64(block.Round())
logging.Base().Metrics(telemetryspec.Transaction, stats, details)
}
}
// alwaysVerifiedPool implements ledger.VerifiedTxnCache and returns every
// transaction as verified.
type alwaysVerifiedPool struct {
pool *TransactionPool
}
func (*alwaysVerifiedPool) Verified(txn transactions.SignedTxn) bool {
return true
}
func (pool *alwaysVerifiedPool) EvalOk(cvers protocol.ConsensusVersion, txid transactions.Txid) (txfound bool, err error) {
return pool.pool.EvalOk(cvers, txid)
}
func (pool *alwaysVerifiedPool) EvalRemember(cvers protocol.ConsensusVersion, txid transactions.Txid, txErr error) {
pool.pool.EvalRemember(cvers, txid, txErr)
}
func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup []transactions.SignedTxn) error {
r := pool.pendingBlockEvaluator.Round() + pool.numPendingWholeBlocks
for _, tx := range txgroup {
if tx.Txn.LastValid < r {
return transactions.TxnDeadError{
Round: r,
FirstValid: tx.Txn.FirstValid,
LastValid: tx.Txn.LastValid,
}
}
}
txgroupad := make([]transactions.SignedTxnWithAD, len(txgroup))
for i, tx := range txgroup {
txgroupad[i].SignedTxn = tx
}
return pool.pendingBlockEvaluator.TransactionGroup(txgroupad)
}
func (pool *TransactionPool) addToPendingBlockEvaluator(txgroup []transactions.SignedTxn) error {
err := pool.addToPendingBlockEvaluatorOnce(txgroup)
if err == ledger.ErrNoSpace {
pool.numPendingWholeBlocks++
pool.pendingBlockEvaluator.ResetTxnBytes()
err = pool.addToPendingBlockEvaluatorOnce(txgroup)
}
return err
}
// recomputeBlockEvaluator constructs a new BlockEvaluator and feeds all
// in-pool transactions to it (removing any transactions that are rejected
// by the BlockEvaluator).
func (pool *TransactionPool) recomputeBlockEvaluator() (stats telemetryspec.ProcessBlockMetrics) {
pool.pendingBlockEvaluator = nil
latest := pool.ledger.Latest()
prev, err := pool.ledger.BlockHdr(latest)
if err != nil {
logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: cannot get prev header for %d: %v",
latest, err)
return
}
next := bookkeeping.MakeBlock(prev)
pool.numPendingWholeBlocks = 0
pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, &alwaysVerifiedPool{pool}, nil)
if err != nil {
logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: cannot start evaluator: %v", err)
return
}
// Feed the transactions in order.
pool.pendingMu.RLock()
txgroups := pool.pendingTxGroups
pool.pendingMu.RUnlock()
for _, txgroup := range txgroups {
err := pool.remember(txgroup)
if err != nil {
for _, tx := range txgroup {
pool.statusCache.put(tx, err.Error())
}
switch err.(type) {
case transactions.TxnDeadError:
stats.ExpiredCount++
default:
stats.RemovedInvalidCount++
}
}
}
pool.rememberCommit(true)
return
}
| 1 | 36,611 | nit : I think it would be better to have the multiplier being calculated along with `pool.numPendingWholeBlocks`, so that we could always multiply the feePerByte with that multiplier. Not only that it would be faster, but it would also have constant time. btw - Correct me if I'm wrong, but with this change, you're practically rolling back your previous change while making it `pool.expFeeFactor` adjustable, right ? | algorand-go-algorand | go |
@@ -1,4 +1,5 @@
class CommentsController < ApplicationController
+ before_filter :authenticate_user!
def index
@commentable = find_commentable | 1 | class CommentsController < ApplicationController
def index
@commentable = find_commentable
@comments = @commentable.comments
end
def create
@commentable = find_commentable
@comment = @commentable.comments.build(comment_params)
if @comment.save
flash[:notice] = "You successfully added a comment for #{@commentable.class.name} #{@commentable.id}"
redirect_to id: nil
else
raise 'something went wrong'
end
end
private
def find_commentable
params.each do |name, val|
if name =~ /^(.+)_id$/
return $1.classify.constantize.find(val)
end
end
nil
end
def comment_params
params.require(:comment).permit(:comment_text)
end
end | 1 | 12,335 | Is there a reason we _wouldn't_ want to do this? A bit of a security hole :grimacing: | 18F-C2 | rb |
@@ -0,0 +1,14 @@
+package azkaban.project;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import org.yaml.snakeyaml.Yaml;
+
+
+public class ProjectSpecLoader {
+
+ public ProjectSpec load(File projectSpecFile) throws FileNotFoundException {
+ return new Yaml().loadAs(new FileInputStream(projectSpecFile), ProjectSpec.class);
+ }
+} | 1 | 1 | 12,407 | Please add a copyright notice to all new files. Is there a tool to check and auto update copyright notices? | azkaban-azkaban | java |
|
@@ -132,7 +132,7 @@ class UserFormType extends AbstractType
new Email(['message' => 'Please enter valid e-mail']),
new UniqueEmail(['ignoredEmail' => $user !== null ? $user->getEmail() : null]),
],
- 'label' => t('Email'),
+ 'label' => t('E-mail'),
]);
$builderRegisteredCustomerGroup = $builder->create('registeredCustomer', GroupType::class, [ | 1 | <?php
namespace Shopsys\FrameworkBundle\Form\Admin\Customer;
use Shopsys\FrameworkBundle\Component\Constraints\Email;
use Shopsys\FrameworkBundle\Component\Constraints\FieldsAreNotIdentical;
use Shopsys\FrameworkBundle\Component\Constraints\NotIdenticalToEmailLocalPart;
use Shopsys\FrameworkBundle\Component\Constraints\UniqueEmail;
use Shopsys\FrameworkBundle\Form\DisplayOnlyDomainIconType;
use Shopsys\FrameworkBundle\Form\DisplayOnlyType;
use Shopsys\FrameworkBundle\Form\DomainType;
use Shopsys\FrameworkBundle\Form\GroupType;
use Shopsys\FrameworkBundle\Model\Customer\User;
use Shopsys\FrameworkBundle\Model\Customer\UserData;
use Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroupFacade;
use Shopsys\FrameworkBundle\Twig\DateTimeFormatterExtension;
use Symfony\Component\Form\AbstractType;
use Symfony\Component\Form\Extension\Core\Type\ChoiceType;
use Symfony\Component\Form\Extension\Core\Type\EmailType;
use Symfony\Component\Form\Extension\Core\Type\PasswordType;
use Symfony\Component\Form\Extension\Core\Type\RepeatedType;
use Symfony\Component\Form\Extension\Core\Type\TextType;
use Symfony\Component\Form\FormBuilderInterface;
use Symfony\Component\OptionsResolver\OptionsResolver;
use Symfony\Component\Validator\Constraints;
class UserFormType extends AbstractType
{
/**
* @var \Shopsys\FrameworkBundle\Model\Pricing\Group\PricingGroupFacade
*/
private $pricingGroupFacade;
/**
* @var \Shopsys\FrameworkBundle\Twig\DateTimeFormatterExtension
*/
private $dateTimeFormatterExtension;
public function __construct(
PricingGroupFacade $pricingGroupFacade,
DateTimeFormatterExtension $dateTimeFormatterExtension
) {
$this->pricingGroupFacade = $pricingGroupFacade;
$this->dateTimeFormatterExtension = $dateTimeFormatterExtension;
}
/**
* @param \Symfony\Component\Form\FormBuilderInterface $builder
* @param array $options
* @SuppressWarnings(PHPMD.ExcessiveMethodLength)
*/
public function buildForm(FormBuilderInterface $builder, array $options)
{
$user = $options['user'];
/* @var $user \Shopsys\FrameworkBundle\Model\Customer\User */
$builderSystemDataGroup = $builder->create('systemData', GroupType::class, [
'label' => t('System data'),
]);
if ($user instanceof User) {
$builderSystemDataGroup->add('formId', DisplayOnlyType::class, [
'label' => t('ID'),
'data' => $user->getId(),
]);
$builderSystemDataGroup->add('domainIcon', DisplayOnlyDomainIconType::class, [
'data' => $user->getDomainId(),
]);
$pricingGroups = $this->pricingGroupFacade->getByDomainId($options['domain_id']);
$groupPricingGroupsBy = null;
} else {
$builderSystemDataGroup
->add('domainId', DomainType::class, [
'required' => true,
'data' => $options['domain_id'],
'label' => t('Domain'),
'attr' => [
'class' => 'js-toggle-opt-group-control',
],
]);
$pricingGroups = $this->pricingGroupFacade->getAll();
$groupPricingGroupsBy = 'domainId';
}
$builderSystemDataGroup
->add('pricingGroup', ChoiceType::class, [
'required' => true,
'choices' => $pricingGroups,
'choice_label' => 'name',
'choice_value' => 'id',
'group_by' => $groupPricingGroupsBy,
'label' => t('Pricing group'),
'attr' => [
'class' => 'js-toggle-opt-group',
'data-js-toggle-opt-group-control' => '.js-toggle-opt-group-control',
],
]);
$builderPersonalDataGroup = $builder->create('personalData', GroupType::class, [
'label' => t('Personal data'),
]);
$builderPersonalDataGroup
->add('firstName', TextType::class, [
'constraints' => [
new Constraints\NotBlank(['message' => 'Please enter first name']),
new Constraints\Length([
'max' => 100,
'maxMessage' => 'First name cannot be longer then {{ limit }} characters',
]),
],
'label' => t('First name'),
])
->add('lastName', TextType::class, [
'constraints' => [
new Constraints\NotBlank(['message' => 'Please enter last name']),
new Constraints\Length([
'max' => 100,
'maxMessage' => 'Last name cannot be longer than {{ limit }} characters',
]),
],
'label' => t('Last name'),
])
->add('email', EmailType::class, [
'constraints' => [
new Constraints\NotBlank(['message' => 'Please enter e-mail']),
new Constraints\Length([
'max' => 255,
'maxMessage' => 'Email cannot be longer then {{ limit }} characters',
]),
new Email(['message' => 'Please enter valid e-mail']),
new UniqueEmail(['ignoredEmail' => $user !== null ? $user->getEmail() : null]),
],
'label' => t('Email'),
]);
$builderRegisteredCustomerGroup = $builder->create('registeredCustomer', GroupType::class, [
'label' => t('Registered cust.'),
]);
$builderRegisteredCustomerGroup
->add('password', RepeatedType::class, [
'type' => PasswordType::class,
'required' => $options['user'] === null,
'options' => [
'attr' => ['autocomplete' => 'off'],
],
'first_options' => [
'constraints' => $this->getFirstPasswordConstraints($options['user'] === null),
'label' => t('Password'),
],
'second_options' => [
'label' => t('Password again'),
],
'invalid_message' => 'Passwords do not match',
]);
if ($user instanceof User) {
$builderSystemDataGroup->add('createdAt', DisplayOnlyType::class, [
'label' => t('Date of registration and privacy policy agreement'),
'data' => $this->dateTimeFormatterExtension->formatDateTime($user->getCreatedAt()),
]);
$builderRegisteredCustomerGroup->add('lastLogin', DisplayOnlyType::class, [
'label' => t('Last login'),
'data' => $user->getLastLogin() !== null ? $this->dateTimeFormatterExtension->formatDateTime($user->getLastLogin()) : 'never',
]);
}
$builder
->add($builderSystemDataGroup)
->add($builderPersonalDataGroup)
->add($builderRegisteredCustomerGroup);
}
/**
* @param bool $isCreatingNewUser
* @return \Symfony\Component\Validator\Constraint[]
*/
private function getFirstPasswordConstraints($isCreatingNewUser)
{
$constraints = [
new Constraints\Length(['min' => 6, 'minMessage' => 'Password cannot be longer then {{ limit }} characters']),
];
if ($isCreatingNewUser) {
$constraints[] = new Constraints\NotBlank([
'message' => 'Please enter password',
]);
}
return $constraints;
}
/**
* @param \Symfony\Component\OptionsResolver\OptionsResolver $resolver
*/
public function configureOptions(OptionsResolver $resolver)
{
$resolver
->setRequired(['user', 'domain_id'])
->setAllowedTypes('user', [User::class, 'null'])
->setAllowedTypes('domain_id', 'int')
->setDefaults([
'data_class' => UserData::class,
'attr' => ['novalidate' => 'novalidate'],
'constraints' => [
new FieldsAreNotIdentical([
'field1' => 'email',
'field2' => 'password',
'errorPath' => 'password',
'message' => 'Password cannot be same as e-mail',
]),
new NotIdenticalToEmailLocalPart([
'password' => 'password',
'email' => 'email',
'errorPath' => 'password',
'message' => 'Password cannot be same as part of e-mail before at sign',
]),
],
]);
}
}
| 1 | 10,136 | A new translation dump should be made after this change (removing the "Email" message) | shopsys-shopsys | php |
@@ -9,7 +9,9 @@ function owns(node, virtualTree, role, ariaOwned) {
if (node === null) {
return false;
}
- var implicit = implicitNodes(role),
+ var implicit = implicitNodes(role).map(
+ implicitSelector => implicitSelector + ':not([role])'
+ ),
selector = ['[role="' + role + '"]'];
if (implicit) { | 1 | const requiredOwned = axe.commons.aria.requiredOwned;
const implicitNodes = axe.commons.aria.implicitNodes;
const matchesSelector = axe.utils.matchesSelector;
const idrefs = axe.commons.dom.idrefs;
const reviewEmpty =
options && Array.isArray(options.reviewEmpty) ? options.reviewEmpty : [];
function owns(node, virtualTree, role, ariaOwned) {
if (node === null) {
return false;
}
var implicit = implicitNodes(role),
selector = ['[role="' + role + '"]'];
if (implicit) {
selector = selector.concat(implicit);
}
selector = selector.join(',');
return ariaOwned
? matchesSelector(node, selector) ||
!!axe.utils.querySelectorAll(virtualTree, selector)[0]
: !!axe.utils.querySelectorAll(virtualTree, selector)[0];
}
function ariaOwns(nodes, role) {
var index, length;
for (index = 0, length = nodes.length; index < length; index++) {
if (nodes[index] === null) {
continue;
}
const virtualTree = axe.utils.getNodeFromTree(nodes[index]);
if (owns(nodes[index], virtualTree, role, true)) {
return true;
}
}
return false;
}
function missingRequiredChildren(node, childRoles, all, role) {
var index,
length = childRoles.length,
missing = [],
ownedElements = idrefs(node, 'aria-owns');
for (index = 0; index < length; index++) {
var childRole = childRoles[index];
if (
owns(node, virtualNode, childRole) ||
ariaOwns(ownedElements, childRole)
) {
if (!all) {
return null;
}
} else {
if (all) {
missing.push(childRole);
}
}
}
// combobox exceptions
if (role === 'combobox') {
// remove 'textbox' from missing roles if combobox is a native text-type input
var textboxIndex = missing.indexOf('textbox');
var textTypeInputs = ['text', 'search', 'email', 'url', 'tel'];
if (
textboxIndex >= 0 &&
node.nodeName.toUpperCase() === 'INPUT' &&
textTypeInputs.includes(node.type)
) {
missing.splice(textboxIndex, 1);
}
// remove 'listbox' from missing roles if combobox is collapsed
var listboxIndex = missing.indexOf('listbox');
var expanded = node.getAttribute('aria-expanded');
if (listboxIndex >= 0 && (!expanded || expanded === 'false')) {
missing.splice(listboxIndex, 1);
}
}
if (missing.length) {
return missing;
}
if (!all && childRoles.length) {
return childRoles;
}
return null;
}
var role = node.getAttribute('role');
var required = requiredOwned(role);
if (!required) {
return true;
}
var all = false;
var childRoles = required.one;
if (!childRoles) {
var all = true;
childRoles = required.all;
}
var missing = missingRequiredChildren(node, childRoles, all, role);
if (!missing) {
return true;
}
this.data(missing);
// Only review empty nodes when a node is both empty and does not have an aria-owns relationship
if (
reviewEmpty.includes(role) &&
node.children.length === 0 &&
idrefs(node, 'aria-owns').length === 0
) {
return undefined;
} else {
return false;
}
| 1 | 14,811 | CI is failing saying `expected [TypeError: null is not an object (evaluating 'implicitNodes(role).map')] to equal null`. | dequelabs-axe-core | js |
@@ -572,12 +572,12 @@ describe('createRoot()', () => {
});
it('should avoid reapplying innerHTML when __html property of dangerouslySetInnerHTML attr remains unchanged', () => {
+ let thing;
+
class Thing extends Component {
- constructor(props) {
- super(props);
- props.ref(this);
- }
render() {
+ thing = this;
+
// eslint-disable-next-line react/no-danger
return (
<div dangerouslySetInnerHTML={{ __html: '<span>same</span>' }} /> | 1 | import { setupRerender } from 'preact/test-utils';
import {
createElement,
Component,
options,
createRoot,
Fragment
} from 'preact';
import {
setupScratch,
teardown,
getMixedArray,
mixedArrayHTML,
serializeHtml,
supportsDataList,
sortAttributes,
spyOnElementAttributes,
createEvent
} from '../_util/helpers';
import { clearLog, getLog, logCall } from '../_util/logCall';
import { useState } from 'preact/hooks';
import { ul, li, div } from '../_util/dom';
/** @jsx createElement */
function getAttributes(node) {
let attrs = {};
for (let i = node.attributes.length; i--; ) {
attrs[node.attributes[i].name] = node.attributes[i].value;
}
return attrs;
}
const isIE11 = /Trident\//.test(navigator.userAgent);
describe('createRoot()', () => {
describe('render', () => {
let scratch, rerender;
let resetAppendChild;
let resetInsertBefore;
let resetRemoveChild;
let resetRemove;
let render;
beforeEach(() => {
scratch = setupScratch();
rerender = setupRerender();
render = createRoot(scratch).render;
});
afterEach(() => {
teardown(scratch);
});
before(() => {
resetAppendChild = logCall(Element.prototype, 'appendChild');
resetInsertBefore = logCall(Element.prototype, 'insertBefore');
resetRemoveChild = logCall(Element.prototype, 'removeChild');
resetRemove = logCall(Element.prototype, 'remove');
});
after(() => {
resetAppendChild();
resetInsertBefore();
resetRemoveChild();
resetRemove();
});
it('should rerender when value from "" to 0', () => {
render('');
expect(scratch.innerHTML).to.equal('');
render(0);
expect(scratch.innerHTML).to.equal('0');
});
it('should render an empty text node given an empty string', () => {
render('');
let c = scratch.childNodes;
expect(c).to.have.length(1);
expect(c[0].data).to.equal('');
expect(c[0].nodeName).to.equal('#text');
});
it('should allow node type change with content', () => {
render(<span>Bad</span>);
render(<div>Good</div>);
expect(scratch.innerHTML).to.eql(`<div>Good</div>`);
});
it('should not render when detecting JSON-injection', () => {
const vnode = JSON.parse('{"type":"span","children":"Malicious"}');
render(vnode);
expect(scratch.innerHTML).to.equal('');
});
it('should create empty nodes (<* />)', () => {
render(<div />);
expect(scratch.childNodes).to.have.length(1);
expect(scratch.childNodes[0].nodeName).to.equal('DIV');
scratch.parentNode.removeChild(scratch);
scratch = document.createElement('div');
render = createRoot(scratch).render;
(document.body || document.documentElement).appendChild(scratch);
render(<span />);
expect(scratch.childNodes).to.have.length(1);
expect(scratch.childNodes[0].nodeName).to.equal('SPAN');
});
it('should not throw error in IE11 with type date', () => {
expect(() => render(<input type="date" />)).to.not.throw();
});
it('should support custom tag names', () => {
render(<foo />);
expect(scratch.childNodes).to.have.length(1);
expect(scratch.firstChild).to.have.property('nodeName', 'FOO');
scratch.parentNode.removeChild(scratch);
scratch = document.createElement('div');
(document.body || document.documentElement).appendChild(scratch);
render = createRoot(scratch).render;
render(<x-bar />);
expect(scratch.childNodes).to.have.length(1);
expect(scratch.firstChild).to.have.property('nodeName', 'X-BAR');
});
it('should support the form attribute', () => {
render(
<div>
<form id="myform" />
<button form="myform">test</button>
<input form="myform" />
</div>
);
const div = scratch.childNodes[0];
const form = div.childNodes[0];
const button = div.childNodes[1];
const input = div.childNodes[2];
// IE11 doesn't support the form attribute
if (!isIE11) {
expect(button).to.have.property('form', form);
expect(input).to.have.property('form', form);
}
});
it('should allow VNode reuse', () => {
let reused = <div class="reuse">Hello World!</div>;
render(
<div>
{reused}
<hr />
{reused}
</div>
);
expect(serializeHtml(scratch)).to.eql(
`<div><div class="reuse">Hello World!</div><hr><div class="reuse">Hello World!</div></div>`
);
render(
<div>
<hr />
{reused}
</div>
);
expect(serializeHtml(scratch)).to.eql(
`<div><hr><div class="reuse">Hello World!</div></div>`
);
});
it('should merge new elements when called multiple times', () => {
render(<div />);
expect(scratch.childNodes).to.have.length(1);
expect(scratch.firstChild).to.have.property('nodeName', 'DIV');
expect(scratch.innerHTML).to.equal('<div></div>');
render(<span />);
expect(scratch.childNodes).to.have.length(1);
expect(scratch.firstChild).to.have.property('nodeName', 'SPAN');
expect(scratch.innerHTML).to.equal('<span></span>');
render(<span class="hello">Hello!</span>);
expect(scratch.childNodes).to.have.length(1);
expect(scratch.firstChild).to.have.property('nodeName', 'SPAN');
expect(scratch.innerHTML).to.equal('<span class="hello">Hello!</span>');
});
it('should nest empty nodes', () => {
render(
<div>
<span />
<foo />
<x-bar />
</div>
);
expect(scratch.childNodes).to.have.length(1);
expect(scratch.childNodes[0].nodeName).to.equal('DIV');
let c = scratch.childNodes[0].childNodes;
expect(c).to.have.length(3);
expect(c[0].nodeName).to.equal('SPAN');
expect(c[1].nodeName).to.equal('FOO');
expect(c[2].nodeName).to.equal('X-BAR');
});
it('should not render falsy values', () => {
render(
<div>
{null},{undefined},{false},{0},{NaN}
</div>
);
expect(scratch.firstChild).to.have.property('innerHTML', ',,,0,NaN');
});
it('should not render null', () => {
render(null);
expect(scratch.innerHTML).to.equal('');
expect(scratch.childNodes).to.have.length(0);
});
it('should not render undefined', () => {
render(undefined);
expect(scratch.innerHTML).to.equal('');
expect(scratch.childNodes).to.have.length(0);
});
it('should not render boolean true', () => {
render(true);
expect(scratch.innerHTML).to.equal('');
expect(scratch.childNodes).to.have.length(0);
});
it('should not render boolean false', () => {
render(false);
expect(scratch.innerHTML).to.equal('');
expect(scratch.childNodes).to.have.length(0);
});
it('should not render children when using function children', () => {
render(<div>{() => {}}</div>);
expect(scratch.innerHTML).to.equal('<div></div>');
});
it('should render NaN as text content', () => {
render(NaN);
expect(scratch.innerHTML).to.equal('NaN');
});
it('should render numbers (0) as text content', () => {
render(0);
expect(scratch.innerHTML).to.equal('0');
});
it('should render numbers (42) as text content', () => {
render(42);
expect(scratch.innerHTML).to.equal('42');
});
it('should render bigint as text content', () => {
// Skip in browsers not supporting big integers
if (typeof BigInt === 'undefined') {
return;
}
// eslint-disable-next-line no-undef, new-cap
render(BigInt(4));
expect(scratch.innerHTML).to.equal('4');
});
it('should render strings as text content', () => {
render('Testing, huh! How is it going?');
expect(scratch.innerHTML).to.equal('Testing, huh! How is it going?');
});
it('should render arrays of mixed elements', () => {
render(getMixedArray());
expect(scratch.innerHTML).to.equal(mixedArrayHTML);
});
it('should clear falsy attributes', () => {
render(
<div
anull="anull"
aundefined="aundefined"
afalse="afalse"
anan="aNaN"
a0="a0"
/>
);
render(
<div
anull={null}
aundefined={undefined}
afalse={false}
anan={NaN}
a0={0}
/>
);
expect(
getAttributes(scratch.firstChild),
'from previous truthy values'
).to.eql({
a0: '0',
anan: 'NaN'
});
});
it('should not render falsy attributes on hydrate', () => {
render(
<div
anull={null}
aundefined={undefined}
afalse={false}
anan={NaN}
a0={0}
/>
);
expect(getAttributes(scratch.firstChild), 'initial render').to.eql({
a0: '0',
anan: 'NaN'
});
});
it('should clear falsy input values', () => {
// Note: this test just demonstrates the default browser behavior
render(
<div>
<input value={0} />
<input value={false} />
<input value={null} />
<input value={undefined} />
</div>
);
let root = scratch.firstChild;
expect(root.children[0]).to.have.property('value', '0');
expect(root.children[1]).to.have.property('value', 'false');
expect(root.children[2]).to.have.property('value', '');
expect(root.children[3]).to.have.property('value', '');
});
it('should set value inside the specified range', () => {
render(<input type="range" value={0.5} min="0" max="1" step="0.05" />);
expect(scratch.firstChild.value).to.equal('0.5');
});
// IE or IE Edge will throw when attribute values don't conform to the
// spec. That's the correct behaviour, but bad for this test...
if (!/(Edge|MSIE|Trident)/.test(navigator.userAgent)) {
it('should not clear falsy DOM properties', () => {
function test(val) {
render(
<div>
<input value={val} />
<table border={val} />
</div>,
scratch
);
}
test('2');
test(false);
expect(scratch.innerHTML).to.equal(
'<div><input><table border="false"></table></div>',
'for false'
);
test('3');
test(null);
expect(scratch.innerHTML).to.equal(
'<div><input><table border=""></table></div>',
'for null'
);
test('4');
test(undefined);
expect(scratch.innerHTML).to.equal(
'<div><input><table border=""></table></div>',
'for undefined'
);
});
}
// Test for preactjs/preact#651
it('should set enumerable boolean attribute', () => {
render(<input spellcheck={false} />);
expect(scratch.firstChild.spellcheck).to.equal(false);
});
it('should render download attribute', () => {
render(<a download="" />);
expect(scratch.firstChild.getAttribute('download')).to.equal('');
render(<a download={null} />);
expect(scratch.firstChild.getAttribute('download')).to.equal(null);
});
it('should not set tagName', () => {
expect(() => render(<input tagName="div" />)).not.to.throw();
});
it('should apply string attributes', () => {
render(<div foo="bar" data-foo="databar" />);
expect(serializeHtml(scratch)).to.equal(
'<div data-foo="databar" foo="bar"></div>'
);
});
it('should not serialize function props as attributes', () => {
render(<div click={function a() {}} ONCLICK={function b() {}} />);
let div = scratch.childNodes[0];
expect(div.attributes.length).to.equal(0);
});
it('should serialize object props as attributes', () => {
render(
<div
foo={{ a: 'b' }}
bar={{
toString() {
return 'abc';
}
}}
/>
);
let div = scratch.childNodes[0];
expect(div.attributes.length).to.equal(2);
// Normalize attribute order because it's different in various browsers
let normalized = {};
for (let i = 0; i < div.attributes.length; i++) {
let attr = div.attributes[i];
normalized[attr.name] = attr.value;
}
expect(normalized).to.deep.equal({
bar: 'abc',
foo: '[object Object]'
});
});
it('should apply class as String', () => {
render(<div class="foo" />);
expect(scratch.childNodes[0]).to.have.property('className', 'foo');
});
it('should alias className to class', () => {
render(<div className="bar" />);
expect(scratch.childNodes[0]).to.have.property('className', 'bar');
});
it('should support false aria-* attributes', () => {
render(<div aria-checked="false" />);
expect(scratch.firstChild.getAttribute('aria-checked')).to.equal('false');
});
it('should set checked attribute on custom elements without checked property', () => {
render(<o-checkbox checked />);
expect(scratch.innerHTML).to.equal(
'<o-checkbox checked="true"></o-checkbox>'
);
});
it('should set value attribute on custom elements without value property', () => {
render(<o-input value="test" />);
expect(scratch.innerHTML).to.equal('<o-input value="test"></o-input>');
});
it('should mask value on password input elements', () => {
render(<input value="xyz" type="password" />);
expect(scratch.innerHTML).to.equal('<input type="password">');
});
it('should unset href if null || undefined', () => {
render(
<pre>
<a href="#">href="#"</a>
<a href={undefined}>href="undefined"</a>
<a href={null}>href="null"</a>
<a href={''}>href="''"</a>
</pre>
);
const links = scratch.querySelectorAll('a');
expect(links[0].hasAttribute('href')).to.equal(true);
expect(links[1].hasAttribute('href')).to.equal(false);
expect(links[2].hasAttribute('href')).to.equal(false);
expect(links[3].hasAttribute('href')).to.equal(true);
});
describe('dangerouslySetInnerHTML', () => {
it('should support dangerouslySetInnerHTML', () => {
let html = '<b>foo & bar</b>';
// eslint-disable-next-line react/no-danger
render(<div dangerouslySetInnerHTML={{ __html: html }} />);
expect(scratch.firstChild, 'set').to.have.property('innerHTML', html);
expect(scratch.innerHTML).to.equal('<div>' + html + '</div>');
render(
<div>
a<strong>b</strong>
</div>,
scratch
);
expect(scratch, 'unset').to.have.property(
'innerHTML',
`<div>a<strong>b</strong></div>`
);
// eslint-disable-next-line react/no-danger
render(<div dangerouslySetInnerHTML={{ __html: html }} />);
expect(scratch.innerHTML, 're-set').to.equal('<div>' + html + '</div>');
});
it('should apply proper mutation for VNodes with dangerouslySetInnerHTML attr', () => {
let thing;
class Thing extends Component {
constructor(props, context) {
super(props, context);
this.state = { html: this.props.html };
thing = this;
}
render(props, { html }) {
// eslint-disable-next-line react/no-danger
return html ? (
<div dangerouslySetInnerHTML={{ __html: html }} />
) : (
<div />
);
}
}
render(<Thing html="<b><i>test</i></b>" />);
expect(scratch.innerHTML).to.equal('<div><b><i>test</i></b></div>');
thing.setState({ html: false });
rerender();
expect(scratch.innerHTML).to.equal('<div></div>');
thing.setState({ html: '<foo><bar>test</bar></foo>' });
rerender();
expect(scratch.innerHTML).to.equal(
'<div><foo><bar>test</bar></foo></div>'
);
});
it('should not mutative render with dangerouslySetInnerHTML', () => {
// In other words, if render is called with a container with existing
// children, dangerouslySetInnerHTML should leave the DOM intact
let html = '<b>foo & bar</b>';
scratch.innerHTML = `<div>${html}</div>`;
// eslint-disable-next-line react/no-danger
render(<div dangerouslySetInnerHTML={{ __html: html }} />);
expect(scratch.firstChild).to.have.property('innerHTML', html);
expect(scratch.innerHTML).to.equal(`<div>${html}</div>`);
});
it('should avoid reapplying innerHTML when __html property of dangerouslySetInnerHTML attr remains unchanged', () => {
class Thing extends Component {
constructor(props) {
super(props);
props.ref(this);
}
render() {
// eslint-disable-next-line react/no-danger
return (
<div dangerouslySetInnerHTML={{ __html: '<span>same</span>' }} />
);
}
}
let thing;
render(<Thing ref={r => (thing = r)} />);
let firstInnerHTMLChild = scratch.firstChild.firstChild;
// Re-render
thing.forceUpdate();
expect(firstInnerHTMLChild).to.equalNode(scratch.firstChild.firstChild);
});
it('should unmount dangerouslySetInnerHTML', () => {
let set;
const TextDiv = () => (
<div dangerouslySetInnerHTML={{ __html: '' }}>some text</div>
);
class App extends Component {
constructor(props) {
super(props);
set = this.setState.bind(this);
this.state = { show: true };
}
render() {
return this.state.show && <TextDiv />;
}
}
render(<App />);
expect(scratch.innerHTML).to.equal('<div></div>');
set({ show: false });
rerender();
expect(scratch.innerHTML).to.equal('');
});
});
it('should reconcile mutated checked property', () => {
let check = p => render(<input type="checkbox" checked={p} />),
value = () => scratch.lastChild.checked,
setValue = p => (scratch.lastChild.checked = p);
check(true);
expect(value()).to.equal(true);
check(false);
expect(value()).to.equal(false);
check(true);
expect(value()).to.equal(true);
setValue(true);
check(false);
expect(value()).to.equal(false);
setValue(false);
check(true);
expect(value()).to.equal(true);
});
it('should reorder child pairs', () => {
render(
<div>
<a>a</a>
<b>b</b>
</div>
);
let a = scratch.firstChild.firstChild;
let b = scratch.firstChild.lastChild;
expect(a).to.have.property('nodeName', 'A');
expect(b).to.have.property('nodeName', 'B');
render(
<div>
<b>b</b>
<a>a</a>
</div>
);
expect(scratch.firstChild.firstChild).to.equalNode(b);
expect(scratch.firstChild.lastChild).to.equalNode(a);
});
// Discussion: https://github.com/preactjs/preact/issues/287
// <datalist> is not supported in Safari, even though the element
// constructor is present
if (supportsDataList()) {
it('should allow <input list /> to pass through as an attribute', () => {
render(
<div>
<input type="range" min="0" max="100" list="steplist" />
<datalist id="steplist">
<option>0</option>
<option>50</option>
<option>100</option>
</datalist>
</div>,
scratch
);
let html = scratch.firstElementChild.firstElementChild.outerHTML;
expect(sortAttributes(html)).to.equal(
sortAttributes(
'<input type="range" min="0" max="100" list="steplist">'
)
);
});
}
// Issue #2284
it('should not throw when setting size to an invalid value', () => {
// These values are usually used to reset the `size` attribute to its
// initial state.
expect(() => render(<input size={undefined} />)).to.not.throw();
expect(() => render(<input size={null} />)).to.not.throw();
expect(() => render(<input size={0} />)).to.not.throw();
});
it('should not execute append operation when child is at last', () => {
// See preactjs/preact#717 for discussion about the issue this addresses
let todoText = 'new todo that I should complete';
let input;
let setText;
let addTodo;
const ENTER = 13;
class TodoList extends Component {
constructor(props) {
super(props);
this.state = { todos: [], text: '' };
setText = this.setText = this.setText.bind(this);
addTodo = this.addTodo = this.addTodo.bind(this);
}
setText(e) {
this.setState({ text: e.target.value });
}
addTodo(e) {
if (e.keyCode === ENTER) {
let { todos, text } = this.state;
todos = todos.concat({ text });
this.setState({ todos, text: '' });
}
}
render() {
const { todos, text } = this.state;
return (
<div onKeyDown={this.addTodo}>
{todos.map(todo => [
<span>{todo.text}</span>,
<span>
{' '}
[ <a href="javascript:;">Delete</a> ]
</span>,
<br />
])}
<input
value={text}
onInput={this.setText}
ref={i => (input = i)}
/>
</div>
);
}
}
render(<TodoList />);
// Simulate user typing
input.focus();
input.value = todoText;
setText({
target: input
});
// Commit the user typing setState call
rerender();
// Simulate user pressing enter
addTodo({
keyCode: ENTER
});
// Before Preact rerenders, focus should be on the input
expect(document.activeElement).to.equalNode(input);
rerender();
// After Preact rerenders, focus should remain on the input
expect(document.activeElement).to.equalNode(input);
expect(scratch.innerHTML).to.contain(`<span>${todoText}</span>`);
});
it('should keep value of uncontrolled inputs', () => {
render(<input value={undefined} />);
scratch.firstChild.value = 'foo';
render(<input value={undefined} />);
expect(scratch.firstChild.value).to.equal('foo');
});
it('should keep value of uncontrolled checkboxes', () => {
render(<input type="checkbox" checked={undefined} />);
scratch.firstChild.checked = true;
render(<input type="checkbox" checked={undefined} />);
expect(scratch.firstChild.checked).to.equal(true);
});
// #2756
it('should set progress value to 0', () => {
render(<progress value={0} max="100" />);
expect(scratch.firstChild.value).to.equal(0);
expect(scratch.firstChild.getAttribute('value')).to.equal('0');
});
it('should always diff `checked` and `value` properties against the DOM', () => {
// See https://github.com/preactjs/preact/issues/1324
let inputs;
let text;
let checkbox;
class Inputs extends Component {
constructor(props) {
super(props);
props.ref(this);
}
render() {
return (
<div>
<input value={'Hello'} ref={el => (text = el)} />
<input type="checkbox" checked ref={el => (checkbox = el)} />
</div>
);
}
}
render(<Inputs ref={x => (inputs = x)} />);
expect(text.value).to.equal('Hello');
expect(checkbox.checked).to.equal(true);
text.value = 'World';
checkbox.checked = false;
inputs.forceUpdate();
rerender();
expect(text.value).to.equal('Hello');
expect(checkbox.checked).to.equal(true);
});
it('should always diff `contenteditable` `innerHTML` against the DOM', () => {
// This tests that we do not cause any cursor jumps in contenteditable fields
// See https://github.com/preactjs/preact/issues/2691
function Editable() {
const [value, setValue] = useState('Hello');
return (
<div
contentEditable
dangerouslySetInnerHTML={{ __html: value }}
onInput={e => setValue(e.currentTarget.innerHTML)}
/>
);
}
render(<Editable />);
let editable = scratch.querySelector('[contenteditable]');
// modify the innerHTML and set the caret to character 2 to simulate a user typing
editable.innerHTML = 'World';
const range = document.createRange();
range.selectNodeContents(editable);
range.setStart(editable.childNodes[0], 2);
range.collapse(true);
const sel = window.getSelection();
sel.removeAllRanges();
sel.addRange(range);
// ensure we didn't mess up setting the cursor to position 2
expect(window.getSelection().getRangeAt(0).startOffset).to.equal(2);
// dispatch the input event to tell preact to re-render
editable.dispatchEvent(createEvent('input'));
rerender();
// ensure innerHTML is still correct (was not an issue before) and
// more importantly the caret is still at character 2
editable = scratch.querySelector('[contenteditable]');
expect(editable.innerHTML).to.equal('World');
expect(window.getSelection().getRangeAt(0).startOffset).to.equal(2);
});
it('should not re-render when a component returns undefined', () => {
let Dialog = () => undefined;
let updateState;
class App extends Component {
constructor(props) {
super(props);
this.state = { name: '' };
updateState = () => this.setState({ name: ', friend' });
}
render(props, { name }) {
return (
<div>
<Dialog />
<h1 class="fade-down">Hi{name}</h1>
</div>
);
}
}
render(<App />);
expect(scratch.innerHTML).to.equal(
'<div><h1 class="fade-down">Hi</h1></div>'
);
clearLog();
updateState();
rerender();
expect(scratch.innerHTML).to.equal(
'<div><h1 class="fade-down">Hi, friend</h1></div>'
);
// We don't log text updates
expect(getLog()).to.deep.equal([]);
});
it('should not lead to stale DOM nodes', () => {
let i = 0;
let updateApp;
class App extends Component {
render() {
updateApp = () => this.forceUpdate();
return <Parent />;
}
}
let updateParent;
function Parent() {
updateParent = () => this.forceUpdate();
i++;
return <Child i={i} />;
}
function Child({ i }) {
return i < 3 ? null : <div>foo</div>;
}
render(<App />);
updateApp();
rerender();
updateParent();
rerender();
updateApp();
rerender();
// Without a fix it would render: `<div>foo</div><div></div>`
expect(scratch.innerHTML).to.equal('<div>foo</div>');
});
// see preact/#1327
it('should not reuse unkeyed components', () => {
let forceUpdate;
class X extends Component {
constructor(props) {
super(props);
forceUpdate = this.update.bind(this);
this.state = { i: 0 };
}
update() {
this.setState(prev => ({ i: prev.i + 1 }));
}
componentWillUnmount() {
clearTimeout(this.id);
}
render() {
return <div>{this.state.i}</div>;
}
}
let updateApp;
class App extends Component {
constructor() {
super();
this.state = { i: 0 };
updateApp = () => this.setState(prev => ({ i: prev.i ^ 1 }));
}
render() {
return (
<div>
{this.state.i === 0 && <X />}
<X />
</div>
);
}
}
render(<App />);
expect(scratch.textContent).to.equal('00');
forceUpdate();
updateApp();
rerender();
expect(scratch.textContent).to.equal('1');
updateApp();
rerender();
expect(scratch.textContent).to.equal('01');
});
it('should not cause infinite loop with referentially equal props', () => {
let i = 0;
let prevDiff = options._diff;
options._diff = () => {
if (++i > 10) {
options._diff = prevDiff;
throw new Error('Infinite loop');
}
};
function App({ children, ...rest }) {
return (
<div {...rest}>
<div {...rest}>{children}</div>
</div>
);
}
render(<App>10</App>);
expect(scratch.textContent).to.equal('10');
options._diff = prevDiff;
});
it('should not call options.debounceRendering unnecessarily', () => {
let comp;
class A extends Component {
constructor(props) {
super(props);
this.state = { updates: 0 };
comp = this;
}
render() {
return <div>{this.state.updates}</div>;
}
}
render(<A />);
expect(scratch.innerHTML).to.equal('<div>0</div>');
const sandbox = sinon.createSandbox();
try {
sandbox.spy(options, 'debounceRendering');
comp.setState({ updates: 1 }, () => {
comp.setState({ updates: 2 });
});
rerender();
expect(scratch.innerHTML).to.equal('<div>2</div>');
expect(options.debounceRendering).to.have.been.calledOnce;
} finally {
sandbox.restore();
}
});
it('should remove attributes on pre-existing DOM', () => {
const div = document.createElement('div');
div.setAttribute('class', 'red');
const span = document.createElement('span');
const text = document.createTextNode('Hi');
span.appendChild(text);
div.appendChild(span);
scratch.appendChild(div);
const App = () => (
<div>
<span>Bye</span>
</div>
);
render(<App />);
expect(serializeHtml(scratch)).to.equal('<div><span>Bye</span></div>');
});
it('should remove class attributes', () => {
const App = props => (
<div className={props.class}>
<span>Bye</span>
</div>
);
render(<App class="hi" />);
expect(scratch.innerHTML).to.equal(
'<div class="hi"><span>Bye</span></div>'
);
render(<App />);
expect(serializeHtml(scratch)).to.equal('<div><span>Bye</span></div>');
});
it('should not read DOM attributes on render without existing DOM', () => {
const attributesSpy = spyOnElementAttributes();
render(
<div id="wrapper">
<div id="page1">Page 1</div>
</div>
);
expect(scratch.innerHTML).to.equal(
'<div id="wrapper"><div id="page1">Page 1</div></div>'
);
// IE11 doesn't allow modifying Element.prototype functions properly.
// Custom spies will never be called.
if (!isIE11) {
expect(attributesSpy.get).to.not.have.been.called;
}
render(
<div id="wrapper">
<div id="page2">Page 2</div>
</div>
);
expect(scratch.innerHTML).to.equal(
'<div id="wrapper"><div id="page2">Page 2</div></div>'
);
// IE11 doesn't allow modifying Element.prototype functions properly.
// Custom spies will never be called.
if (!isIE11) {
expect(attributesSpy.get).to.not.have.been.called;
}
});
// #2926
it('should not throw when changing contentEditable to undefined or null', () => {
render(<p contentEditable>foo</p>);
expect(() =>
render(<p contentEditable={undefined}>foo</p>)
).to.not.throw();
expect(scratch.firstChild.contentEditable).to.equal('inherit');
expect(() => render(<p contentEditable={null}>foo</p>)).to.not.throw();
expect(scratch.firstChild.contentEditable).to.equal('inherit');
});
// #2926 Part 2
it('should allow setting contentEditable to false', () => {
render(
<div contentEditable>
<span>editable</span>
<p contentEditable={false}>not editable</p>
</div>
);
expect(scratch.firstChild.contentEditable).to.equal('true');
expect(scratch.querySelector('p').contentEditable).to.equal('false');
});
// #3060
it('should reset tabindex on undefined/null', () => {
const defaultValue = isIE11 ? 0 : -1;
render(<div tabIndex={0} />);
expect(scratch.firstChild.tabIndex).to.equal(0);
render(<div tabIndex={undefined} />);
expect(scratch.firstChild.tabIndex).to.equal(defaultValue);
render(<div tabIndex={null} />);
expect(scratch.firstChild.tabIndex).to.equal(defaultValue);
render(<div tabindex={0} />);
expect(scratch.firstChild.tabIndex).to.equal(0);
render(<div tabindex={undefined} />);
expect(scratch.firstChild.tabIndex).to.equal(defaultValue);
render(<div tabindex={null} />);
expect(scratch.firstChild.tabIndex).to.equal(defaultValue);
});
it('should only remove the highest parent when unmounting a tree of DOM', () => {
render(
<ul>
<li>Hello</li>
<li>World</li>
</ul>
);
clearLog();
render(null);
expect(getLog()).to.deep.equal(['<ul>HelloWorld.remove()']);
});
it('should only remove the highest parent when unmounting a tree with components', () => {
const List = props => props.children;
const Item = props => <li>{props.children}</li>;
render(
<ul>
<List>
<Item>Hello</Item>
<Item>World</Item>
</List>
</ul>
);
const items = scratch.querySelectorAll('li');
clearLog();
render(null);
expect(getLog()).to.deep.equal(['<ul>HelloWorld.remove()']);
expect(items[0]).to.have.property('parentNode').that.should.exist;
expect(items[1]).to.have.property('parentNode').that.should.exist;
});
});
describe('hydrate', () => {
/** @type {HTMLElement} */
let scratch;
let attributesSpy;
let hydrate;
const List = ({ children }) => <ul>{children}</ul>;
const ListItem = ({ children, onClick = null }) => (
<li onClick={onClick}>{children}</li>
);
let resetAppendChild;
let resetInsertBefore;
let resetRemoveChild;
let resetRemove;
let resetSetAttribute;
let resetRemoveAttribute;
before(() => {
resetAppendChild = logCall(Element.prototype, 'appendChild');
resetInsertBefore = logCall(Element.prototype, 'insertBefore');
resetRemoveChild = logCall(Element.prototype, 'removeChild');
resetRemove = logCall(Element.prototype, 'remove');
resetSetAttribute = logCall(Element.prototype, 'setAttribute');
resetRemoveAttribute = logCall(Element.prototype, 'removeAttribute');
});
after(() => {
resetAppendChild();
resetInsertBefore();
resetRemoveChild();
resetRemove();
resetSetAttribute();
resetRemoveAttribute();
if (Element.prototype.addEventListener.restore)
Element.prototype.addEventListener.restore();
});
beforeEach(() => {
scratch = setupScratch();
attributesSpy = spyOnElementAttributes();
hydrate = createRoot(scratch).hydrate;
});
afterEach(() => {
teardown(scratch);
clearLog();
});
it('should reuse existing DOM', () => {
const onClickSpy = sinon.spy();
const html = ul([li('1'), li('2'), li('3')]);
scratch.innerHTML = html;
clearLog();
hydrate(
<ul>
<li>1</li>
<li>2</li>
<li onClick={onClickSpy}>3</li>
</ul>,
scratch
);
expect(scratch.innerHTML).to.equal(html);
expect(getLog()).to.deep.equal([]);
expect(onClickSpy).not.to.have.been.called;
scratch
.querySelector('li:last-child')
.dispatchEvent(createEvent('click'));
expect(onClickSpy).to.have.been.called.calledOnce;
});
it('should reuse existing DOM when given components', () => {
const onClickSpy = sinon.spy();
const html = ul([li('1'), li('2'), li('3')]);
scratch.innerHTML = html;
clearLog();
hydrate(
<List>
<ListItem>1</ListItem>
<ListItem>2</ListItem>
<ListItem onClick={onClickSpy}>3</ListItem>
</List>,
scratch
);
expect(scratch.innerHTML).to.equal(html);
expect(getLog()).to.deep.equal([]);
expect(onClickSpy).not.to.have.been.called;
scratch
.querySelector('li:last-child')
.dispatchEvent(createEvent('click'));
expect(onClickSpy).to.have.been.called.calledOnce;
});
it('should properly set event handlers to existing DOM when given components', () => {
const proto = Element.prototype;
sinon.spy(proto, 'addEventListener');
const clickHandlers = [sinon.spy(), sinon.spy(), sinon.spy()];
const html = ul([li('1'), li('2'), li('3')]);
scratch.innerHTML = html;
clearLog();
hydrate(
<List>
<ListItem onClick={clickHandlers[0]}>1</ListItem>
<ListItem onClick={clickHandlers[1]}>2</ListItem>
<ListItem onClick={clickHandlers[2]}>3</ListItem>
</List>,
scratch
);
expect(scratch.innerHTML).to.equal(html);
expect(getLog()).to.deep.equal([]);
expect(proto.addEventListener).to.have.been.calledThrice;
expect(clickHandlers[2]).not.to.have.been.called;
scratch
.querySelector('li:last-child')
.dispatchEvent(createEvent('click'));
expect(clickHandlers[2]).to.have.been.calledOnce;
});
it('should add missing nodes to existing DOM when hydrating', () => {
const html = ul([li('1')]);
scratch.innerHTML = html;
clearLog();
hydrate(
<List>
<ListItem>1</ListItem>
<ListItem>2</ListItem>
<ListItem>3</ListItem>
</List>,
scratch
);
expect(scratch.innerHTML).to.equal(ul([li('1'), li('2'), li('3')]));
expect(getLog()).to.deep.equal([
'<li>.insertBefore(#text, Null)',
'<ul>1.insertBefore(<li>2, Null)',
'<li>.insertBefore(#text, Null)',
'<ul>12.insertBefore(<li>3, Null)'
]);
});
it('should remove extra nodes from existing DOM when hydrating', () => {
const html = ul([li('1'), li('2'), li('3'), li('4')]);
scratch.innerHTML = html;
clearLog();
hydrate(
<List>
<ListItem>1</ListItem>
<ListItem>2</ListItem>
<ListItem>3</ListItem>
</List>,
scratch
);
expect(scratch.innerHTML).to.equal(ul([li('1'), li('2'), li('3')]));
expect(getLog()).to.deep.equal(['<li>4.remove()']);
});
it('should not update attributes on existing DOM', () => {
scratch.innerHTML =
'<div><span before-hydrate="test" same-value="foo" different-value="a">Test</span></div>';
let vnode = (
<div>
<span same-value="foo" different-value="b" new-value="c">
Test
</span>
</div>
);
clearLog();
hydrate(vnode, scratch);
// IE11 doesn't support spying on Element.prototype
if (!/Trident/.test(navigator.userAgent)) {
expect(attributesSpy.get).to.not.have.been.called;
}
expect(serializeHtml(scratch)).to.equal(
sortAttributes(
'<div><span before-hydrate="test" different-value="a" same-value="foo">Test</span></div>'
)
);
expect(getLog()).to.deep.equal([]);
});
it('should update class attribute via className prop', () => {
scratch.innerHTML = '<div class="foo">bar</div>';
hydrate(<div className="foo">bar</div>, scratch);
expect(scratch.innerHTML).to.equal('<div class="foo">bar</div>');
});
it('should correctly hydrate with Fragments', () => {
const html = ul([li('1'), li('2'), li('3'), li('4')]);
scratch.innerHTML = html;
clearLog();
const clickHandlers = [
sinon.spy(),
sinon.spy(),
sinon.spy(),
sinon.spy()
];
hydrate(
<List>
<ListItem onClick={clickHandlers[0]}>1</ListItem>
<Fragment>
<ListItem onClick={clickHandlers[1]}>2</ListItem>
<ListItem onClick={clickHandlers[2]}>3</ListItem>
</Fragment>
<ListItem onClick={clickHandlers[3]}>4</ListItem>
</List>,
scratch
);
expect(scratch.innerHTML).to.equal(html);
expect(getLog()).to.deep.equal([]);
expect(clickHandlers[2]).not.to.have.been.called;
scratch
.querySelector('li:nth-child(3)')
.dispatchEvent(createEvent('click'));
expect(clickHandlers[2]).to.have.been.called.calledOnce;
});
it('should correctly hydrate root Fragments', () => {
const html = [
ul([li('1'), li('2'), li('3'), li('4')]),
div('sibling')
].join('');
scratch.innerHTML = html;
clearLog();
const clickHandlers = [
sinon.spy(),
sinon.spy(),
sinon.spy(),
sinon.spy(),
sinon.spy()
];
hydrate(
<Fragment>
<List>
<Fragment>
<ListItem onClick={clickHandlers[0]}>1</ListItem>
<ListItem onClick={clickHandlers[1]}>2</ListItem>
</Fragment>
<ListItem onClick={clickHandlers[2]}>3</ListItem>
<ListItem onClick={clickHandlers[3]}>4</ListItem>
</List>
<div onClick={clickHandlers[4]}>sibling</div>
</Fragment>,
scratch
);
expect(scratch.innerHTML).to.equal(html);
expect(getLog()).to.deep.equal([]);
expect(clickHandlers[2]).not.to.have.been.called;
scratch
.querySelector('li:nth-child(3)')
.dispatchEvent(createEvent('click'));
expect(clickHandlers[2]).to.have.been.calledOnce;
expect(clickHandlers[4]).not.to.have.been.called;
scratch.querySelector('div').dispatchEvent(createEvent('click'));
expect(clickHandlers[2]).to.have.been.calledOnce;
expect(clickHandlers[4]).to.have.been.calledOnce;
});
// Failing because the following condition in mountDomElement doesn't evaluate to true
// when hydrating a dom node which is not correct
// dom===d && newVNode.text!==oldVNode.text
// We don't set `d` when hydrating. If we did, then newVNode.text would never equal
// oldVNode.text since oldVNode is always EMPTY_OBJ when hydrating
it.skip('should override incorrect pre-existing DOM with VNodes passed into render', () => {
const initialHtml = [
div('sibling'),
ul([li('1'), li('4'), li('3'), li('2')])
].join('');
scratch.innerHTML = initialHtml;
clearLog();
hydrate(
<Fragment>
<List>
<Fragment>
<ListItem>1</ListItem>
<ListItem>2</ListItem>
</Fragment>
<ListItem>3</ListItem>
<ListItem>4</ListItem>
</List>
<div>sibling</div>
</Fragment>,
scratch
);
const finalHtml = [
ul([li('1'), li('2'), li('3'), li('4')]),
div('sibling')
].join('');
expect(scratch.innerHTML).to.equal(finalHtml);
// TODO: Fill in with proper log once this test is passing
expect(getLog()).to.deep.equal([]);
});
it('should not merge attributes with node created by the DOM', () => {
const html = htmlString => {
const div = document.createElement('div');
div.innerHTML = htmlString;
return div.firstChild;
};
// prettier-ignore
const DOMElement = html`<div><a foo="bar"></a></div>`;
scratch.appendChild(DOMElement);
const preactElement = (
<div>
<a />
</div>
);
hydrate(preactElement, scratch);
// IE11 doesn't support spies on built-in prototypes
if (!/Trident/.test(navigator.userAgent)) {
expect(attributesSpy.get).to.not.have.been.called;
}
expect(scratch).to.have.property(
'innerHTML',
'<div><a foo="bar"></a></div>'
);
});
it('should attach event handlers', () => {
let spy = sinon.spy();
scratch.innerHTML = '<span>Test</span>';
let vnode = <span onClick={spy}>Test</span>;
hydrate(vnode, scratch);
scratch.firstChild.click();
expect(spy).to.be.calledOnce;
});
// #2237
it('should not redundantly add text nodes', () => {
scratch.innerHTML = '<div id="test"><p>hello bar</p></div>';
const element = document.getElementById('test');
const Component = props => <p>hello {props.foo}</p>;
hydrate(<Component foo="bar" />, element);
expect(element.innerHTML).to.equal('<p>hello bar</p>');
});
it('should not remove values', () => {
scratch.innerHTML =
'<select><option value="0">Zero</option><option selected value="2">Two</option></select>';
const App = () => {
const options = [
{
value: '0',
label: 'Zero'
},
{
value: '2',
label: 'Two'
}
];
return (
<select value="2">
{options.map(({ disabled, label, value }) => (
<option key={label} disabled={disabled} value={value}>
{label}
</option>
))}
</select>
);
};
hydrate(<App />, scratch);
expect(sortAttributes(scratch.innerHTML)).to.equal(
sortAttributes(
'<select><option value="0">Zero</option><option selected="" value="2">Two</option></select>'
)
);
});
it('should deopt for trees introduced in hydrate (append)', () => {
scratch.innerHTML = '<div id="test"><p class="hi">hello bar</p></div>';
const Component = props => <p class="hi">hello {props.foo}</p>;
const element = document.getElementById('test');
hydrate = createRoot(element).hydrate;
hydrate(
<Fragment>
<Component foo="bar" />
<Component foo="baz" />
</Fragment>
);
expect(element.innerHTML).to.equal(
'<p class="hi">hello bar</p><p class="hi">hello baz</p>'
);
});
it('should deopt for trees introduced in hydrate (insert before)', () => {
scratch.innerHTML = '<div id="test"><p class="hi">hello bar</p></div>';
const Component = props => <p class="hi">hello {props.foo}</p>;
const element = document.getElementById('test');
hydrate = createRoot(element).hydrate;
hydrate(
<Fragment>
<Component foo="baz" />
<Component foo="bar" />
</Fragment>
);
expect(element.innerHTML).to.equal(
'<p class="hi">hello baz</p><p class="hi">hello bar</p>'
);
});
it('should skip comment nodes between text nodes', () => {
scratch.innerHTML = '<p>hello <!-- c -->foo</p>';
hydrate(<p>hello {'foo'}</p>, scratch);
expect(scratch.innerHTML).to.equal('<p>hello foo</p>');
});
it('should skip comment nodes between dom nodes', () => {
scratch.innerHTML = '<p><i>0</i><!-- c --><b>1</b></p>';
hydrate(
<p>
<i>0</i>
<b>1</b>
</p>,
scratch
);
expect(scratch.innerHTML).to.equal('<p><i>0</i><b>1</b></p>');
});
it('should not hydrate with dangerouslySetInnerHTML', () => {
let html = '<b>foo & bar</b>';
scratch.innerHTML = `<div>${html}</div>`;
clearLog();
// eslint-disable-next-line react/no-danger
hydrate(<div dangerouslySetInnerHTML={{ __html: html }} />, scratch);
expect(scratch.firstChild).to.have.property('innerHTML', html);
expect(scratch.innerHTML).to.equal(`<div>${html}</div>`);
expect(getLog()).to.deep.equal([]);
});
});
describe('root', () => {
/** @type {HTMLElement} */
let scratch;
let root;
beforeEach(() => {
scratch = setupScratch();
root = createRoot(scratch);
});
afterEach(() => {
teardown(scratch);
});
it('can reuse a root', () => {
root.render(
<p>
<i>0</i>
<b>1</b>
</p>
);
expect(scratch.innerHTML).to.equal('<p><i>0</i><b>1</b></p>');
root.render(
<div>
<i>0</i>
<b>1</b>
</div>
);
expect(scratch.innerHTML).to.equal('<div><i>0</i><b>1</b></div>');
});
});
});
| 1 | 17,489 | @JoviDeCroock I replaced the refs here with simple assignments. | preactjs-preact | js |
@@ -84,7 +84,7 @@ public class TestHiveMetastore {
private ExecutorService executorService;
private TServer server;
private HiveMetaStore.HMSHandler baseHandler;
- private HiveClientPool clientPool;
+ protected HiveClientPool clientPool; // Exposed for testing.
/**
* Starts a TestHiveMetastore with the default connection pool size (5). | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.IHMSHandler;
import org.apache.hadoop.hive.metastore.RetryingHMSHandler;
import org.apache.hadoop.hive.metastore.TSetIpAddressProcessor;
import org.apache.iceberg.common.DynConstructors;
import org.apache.iceberg.common.DynMethods;
import org.apache.iceberg.hadoop.Util;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.server.TServer;
import org.apache.thrift.server.TThreadPoolServer;
import org.apache.thrift.transport.TServerSocket;
import org.apache.thrift.transport.TTransportFactory;
import static java.nio.file.Files.createTempDirectory;
import static java.nio.file.attribute.PosixFilePermissions.asFileAttribute;
import static java.nio.file.attribute.PosixFilePermissions.fromString;
public class TestHiveMetastore {
private static final String DEFAULT_DATABASE_NAME = "default";
private static final int DEFAULT_POOL_SIZE = 5;
// create the metastore handlers based on whether we're working with Hive2 or Hive3 dependencies
// we need to do this because there is a breaking API change between Hive2 and Hive3
private static final DynConstructors.Ctor<HiveMetaStore.HMSHandler> HMS_HANDLER_CTOR = DynConstructors.builder()
.impl(HiveMetaStore.HMSHandler.class, String.class, Configuration.class)
.impl(HiveMetaStore.HMSHandler.class, String.class, HiveConf.class)
.build();
private static final DynMethods.StaticMethod GET_BASE_HMS_HANDLER = DynMethods.builder("getProxy")
.impl(RetryingHMSHandler.class, Configuration.class, IHMSHandler.class, boolean.class)
.impl(RetryingHMSHandler.class, HiveConf.class, IHMSHandler.class, boolean.class)
.buildStatic();
// Hive3 introduces background metastore tasks (MetastoreTaskThread) for performing various cleanup duties. These
// threads are scheduled and executed in a static thread pool (org.apache.hadoop.hive.metastore.ThreadPool).
// This thread pool is shut down normally as part of the JVM shutdown hook, but since we're creating and tearing down
// multiple metastore instances within the same JVM, we have to call this cleanup method manually, otherwise
// threads from our previous test suite will be stuck in the pool with stale config, and keep on being scheduled.
// This can lead to issues, e.g. accidental Persistence Manager closure by ScheduledQueryExecutionsMaintTask.
private static final DynMethods.StaticMethod METASTORE_THREADS_SHUTDOWN = DynMethods.builder("shutdown")
.impl("org.apache.hadoop.hive.metastore.ThreadPool")
.orNoop()
.buildStatic();
private File hiveLocalDir;
private HiveConf hiveConf;
private ExecutorService executorService;
private TServer server;
private HiveMetaStore.HMSHandler baseHandler;
private HiveClientPool clientPool;
/**
* Starts a TestHiveMetastore with the default connection pool size (5).
*/
public void start() {
start(DEFAULT_POOL_SIZE);
}
/**
* Starts a TestHiveMetastore with a provided connection pool size.
* @param poolSize The number of threads in the executor pool
*/
public void start(int poolSize) {
try {
this.hiveLocalDir = createTempDirectory("hive", asFileAttribute(fromString("rwxrwxrwx"))).toFile();
File derbyLogFile = new File(hiveLocalDir, "derby.log");
System.setProperty("derby.stream.error.file", derbyLogFile.getAbsolutePath());
setupMetastoreDB("jdbc:derby:" + getDerbyPath() + ";create=true");
TServerSocket socket = new TServerSocket(0);
int port = socket.getServerSocket().getLocalPort();
this.hiveConf = newHiveConf(port);
this.server = newThriftServer(socket, poolSize, hiveConf);
this.executorService = Executors.newSingleThreadExecutor();
this.executorService.submit(() -> server.serve());
// in Hive3, setting this as a system prop ensures that it will be picked up whenever a new HiveConf is created
System.setProperty(HiveConf.ConfVars.METASTOREURIS.varname, hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS));
this.clientPool = new HiveClientPool(1, hiveConf);
} catch (Exception e) {
throw new RuntimeException("Cannot start TestHiveMetastore", e);
}
}
public void stop() {
if (clientPool != null) {
clientPool.close();
}
if (server != null) {
server.stop();
}
if (executorService != null) {
executorService.shutdown();
}
if (hiveLocalDir != null) {
hiveLocalDir.delete();
}
if (baseHandler != null) {
baseHandler.shutdown();
}
METASTORE_THREADS_SHUTDOWN.invoke();
}
public HiveConf hiveConf() {
return hiveConf;
}
public HiveClientPool clientPool() {
return clientPool;
}
public String getDatabasePath(String dbName) {
File dbDir = new File(hiveLocalDir, dbName + ".db");
return dbDir.getPath();
}
public void reset() throws Exception {
for (String dbName : clientPool.run(client -> client.getAllDatabases())) {
for (String tblName : clientPool.run(client -> client.getAllTables(dbName))) {
clientPool.run(client -> {
client.dropTable(dbName, tblName, true, true, true);
return null;
});
}
if (!DEFAULT_DATABASE_NAME.equals(dbName)) {
// Drop cascade, functions dropped by cascade
clientPool.run(client -> {
client.dropDatabase(dbName, true, true, true);
return null;
});
}
}
Path warehouseRoot = new Path(hiveLocalDir.getAbsolutePath());
FileSystem fs = Util.getFs(warehouseRoot, hiveConf);
for (FileStatus fileStatus : fs.listStatus(warehouseRoot)) {
if (!fileStatus.getPath().getName().equals("derby.log") &&
!fileStatus.getPath().getName().equals("metastore_db")) {
fs.delete(fileStatus.getPath(), true);
}
}
}
private TServer newThriftServer(TServerSocket socket, int poolSize, HiveConf conf) throws Exception {
HiveConf serverConf = new HiveConf(conf);
serverConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:" + getDerbyPath() + ";create=true");
baseHandler = HMS_HANDLER_CTOR.newInstance("new db based metaserver", serverConf);
IHMSHandler handler = GET_BASE_HMS_HANDLER.invoke(serverConf, baseHandler, false);
TThreadPoolServer.Args args = new TThreadPoolServer.Args(socket)
.processor(new TSetIpAddressProcessor<>(handler))
.transportFactory(new TTransportFactory())
.protocolFactory(new TBinaryProtocol.Factory())
.minWorkerThreads(poolSize)
.maxWorkerThreads(poolSize);
return new TThreadPoolServer(args);
}
private HiveConf newHiveConf(int port) {
HiveConf newHiveConf = new HiveConf(new Configuration(), TestHiveMetastore.class);
newHiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, "thrift://localhost:" + port);
newHiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, "file:" + hiveLocalDir.getAbsolutePath());
newHiveConf.set(HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL.varname, "false");
newHiveConf.set(HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES.varname, "false");
newHiveConf.set("iceberg.hive.client-pool-size", "2");
return newHiveConf;
}
private void setupMetastoreDB(String dbURL) throws SQLException, IOException {
Connection connection = DriverManager.getConnection(dbURL);
ScriptRunner scriptRunner = new ScriptRunner(connection, true, true);
ClassLoader classLoader = ClassLoader.getSystemClassLoader();
InputStream inputStream = classLoader.getResourceAsStream("hive-schema-3.1.0.derby.sql");
try (Reader reader = new InputStreamReader(inputStream)) {
scriptRunner.runScript(reader);
}
}
private String getDerbyPath() {
File metastoreDB = new File(hiveLocalDir, "metastore_db");
return metastoreDB.getPath();
}
}
| 1 | 29,601 | nit: Could we use VisibleForTesting annotation here? | apache-iceberg | java |
@@ -24,6 +24,10 @@ class PostTest < ActiveSupport::TestCase
post.must :save
end
+ it "should sort 'by_unanswered'" do
+ Post.by_unanswered.must_equal Post.joins(:topic).where("posts_count = '1'").order('created_at desc')
+ end
+
it 'posts should have an associated topic' do
topic = create(:topic_with_posts)
topic.posts[0].topic.must_equal topic | 1 | # encoding: utf-8
require 'test_helper'
class PostTest < ActiveSupport::TestCase
before { create_must_and_wont_aliases(Post) }
let(:topic) { create(:topic) }
let(:post) { create(:post) }
it 'a post without a body should not be able to save' do
post.body = nil
post.wont_be :valid?
post.errors[:body].must_equal ["can't be blank"]
post.wont :save
end
it 'a post without an associated topic should not be able to save' do
post.topic_id = nil
post.wont_be :valid?
post.wont :save
end
it 'a valid post should be able to save' do
post.must_be :valid?
post.must :save
end
it 'posts should have an associated topic' do
topic = create(:topic_with_posts)
topic.posts[0].topic.must_equal topic
topic.posts[1].topic.must_equal topic
topic.posts[2].topic.must_equal topic
end
it 'gracefully handles weirdly encoded post bodies' do
post.body = "* oprava chyby 33731\n* \xFAprava podle Revize B anglick\xE9ho dokumentu\n"
post.body.split("\n")
.must_equal ['* oprava chyby 33731', '* �prava podle Revize B anglick�ho dokumentu']
end
it 'strip tags method removes ' do
post.body = "<p>Bad Tags</b>\n"
post.save
post.reload
post.body.must_equal 'Bad Tags'
end
end
| 1 | 7,235 | Can we create some post records to assert sort_by | blackducksoftware-ohloh-ui | rb |
@@ -178,6 +178,7 @@ func New(ctx context.Context, o Options) (*Service, error) {
peerID := stream.Conn().RemotePeer()
i, err := s.handshakeService.Handle(NewStream(stream), peerID)
if err != nil {
+ _ = stream.Reset()
if err == handshake.ErrNetworkIDIncompatible {
s.logger.Warningf("peer %s has a different network id.", peerID)
} | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package libp2p
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"net"
"github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
handshake "github.com/ethersphere/bee/pkg/p2p/libp2p/internal/handshake"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/libp2p/go-libp2p"
autonat "github.com/libp2p/go-libp2p-autonat-svc"
crypto "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/helpers"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/network"
libp2ppeer "github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/peerstore"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
libp2pquic "github.com/libp2p/go-libp2p-quic-transport"
"github.com/libp2p/go-tcp-transport"
ws "github.com/libp2p/go-ws-transport"
ma "github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multistream"
)
var _ p2p.Service = (*Service)(nil)
type Service struct {
ctx context.Context
host host.Host
libp2pPeerstore peerstore.Peerstore
metrics metrics
networkID int32
handshakeService *handshake.Service
addrssbook addressbook.Putter
peers *peerRegistry
peerHandler func(context.Context, swarm.Address) error
logger logging.Logger
tracer *tracing.Tracer
}
type Options struct {
PrivateKey *ecdsa.PrivateKey
Overlay swarm.Address
Addr string
DisableWS bool
DisableQUIC bool
NetworkID int32
Addressbook addressbook.Putter
Logger logging.Logger
Tracer *tracing.Tracer
}
func New(ctx context.Context, o Options) (*Service, error) {
host, port, err := net.SplitHostPort(o.Addr)
if err != nil {
return nil, fmt.Errorf("address: %w", err)
}
ip4Addr := "0.0.0.0"
ip6Addr := "::1"
if host != "" {
ip := net.ParseIP(host)
if ip4 := ip.To4(); ip4 != nil {
ip4Addr = ip4.String()
ip6Addr = ""
} else if ip6 := ip.To16(); ip6 != nil {
ip6Addr = ip6.String()
ip4Addr = ""
}
}
var listenAddrs []string
if ip4Addr != "" {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s", ip4Addr, port))
if !o.DisableWS {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s/ws", ip4Addr, port))
}
if !o.DisableQUIC {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/udp/%s/quic", ip4Addr, port))
}
}
if ip6Addr != "" {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s", ip6Addr, port))
if !o.DisableWS {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s/ws", ip6Addr, port))
}
if !o.DisableQUIC {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/udp/%s/quic", ip6Addr, port))
}
}
security := libp2p.DefaultSecurity
libp2pPeerstore := pstoremem.NewPeerstore()
opts := []libp2p.Option{
libp2p.ListenAddrStrings(listenAddrs...),
security,
// Attempt to open ports using uPNP for NATed hosts.
libp2p.NATPortMap(),
// Use dedicated peerstore instead the global DefaultPeerstore
libp2p.Peerstore(libp2pPeerstore),
}
if o.PrivateKey != nil {
opts = append(opts,
libp2p.Identity((*crypto.Secp256k1PrivateKey)(o.PrivateKey)),
)
}
transports := []libp2p.Option{
libp2p.Transport(tcp.NewTCPTransport),
}
if !o.DisableWS {
transports = append(transports, libp2p.Transport(ws.New))
}
if !o.DisableQUIC {
transports = append(transports, libp2p.Transport(libp2pquic.NewTransport))
}
opts = append(opts, transports...)
h, err := libp2p.New(ctx, opts...)
if err != nil {
return nil, err
}
// If you want to help other peers to figure out if they are behind
// NATs, you can launch the server-side of AutoNAT too (AutoRelay
// already runs the client)
if _, err = autonat.NewAutoNATService(ctx, h,
// Support same non default security and transport options as
// original host.
append(transports, security)...,
); err != nil {
return nil, fmt.Errorf("autonat: %w", err)
}
peerRegistry := newPeerRegistry()
s := &Service{
ctx: ctx,
host: h,
libp2pPeerstore: libp2pPeerstore,
metrics: newMetrics(),
networkID: o.NetworkID,
handshakeService: handshake.New(o.Overlay, o.NetworkID, o.Logger),
peers: peerRegistry,
addrssbook: o.Addressbook,
logger: o.Logger,
tracer: o.Tracer,
}
// Construct protocols.
id := protocol.ID(p2p.NewSwarmStreamName(handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName))
matcher, err := s.protocolSemverMatcher(id)
if err != nil {
return nil, fmt.Errorf("protocol version match %s: %w", id, err)
}
// handshake
s.host.SetStreamHandlerMatch(id, matcher, func(stream network.Stream) {
peerID := stream.Conn().RemotePeer()
i, err := s.handshakeService.Handle(NewStream(stream), peerID)
if err != nil {
if err == handshake.ErrNetworkIDIncompatible {
s.logger.Warningf("peer %s has a different network id.", peerID)
}
if err == handshake.ErrHandshakeDuplicate {
s.logger.Warningf("handshake happened for already connected peer %s", peerID)
}
s.logger.Debugf("handshake: handle %s: %v", peerID, err)
s.logger.Errorf("unable to handshake with peer %v", peerID)
_ = s.disconnect(peerID)
return
}
if exists := s.peers.addIfNotExists(stream.Conn(), i.Address); exists {
return
}
remoteMultiaddr, err := ma.NewMultiaddr(fmt.Sprintf("%s/p2p/%s", stream.Conn().RemoteMultiaddr().String(), peerID.Pretty()))
if err != nil {
s.logger.Debugf("multiaddr error: handle %s: %v", peerID, err)
s.logger.Errorf("unable to connect with peer %v", peerID)
_ = s.disconnect(peerID)
return
}
err = s.addrssbook.Put(i.Address, remoteMultiaddr)
if err != nil {
s.logger.Debugf("handshake: addressbook put error %s: %v", peerID, err)
s.logger.Errorf("unable to persist peer %v", peerID)
_ = s.disconnect(peerID)
return
}
if s.peerHandler != nil {
if err := s.peerHandler(ctx, i.Address); err != nil {
s.logger.Debugf("peerhandler error: %s: %v", peerID, err)
}
}
s.metrics.HandledStreamCount.Inc()
s.logger.Infof("peer %s connected", i.Address)
})
h.Network().SetConnHandler(func(_ network.Conn) {
s.metrics.HandledConnectionCount.Inc()
})
h.Network().Notify(peerRegistry) // update peer registry on network events
h.Network().Notify(s.handshakeService) // update handshake service on network events
return s, nil
}
func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) {
for _, ss := range p.StreamSpecs {
id := protocol.ID(p2p.NewSwarmStreamName(p.Name, p.Version, ss.Name))
matcher, err := s.protocolSemverMatcher(id)
if err != nil {
return fmt.Errorf("protocol version match %s: %w", id, err)
}
s.host.SetStreamHandlerMatch(id, matcher, func(streamlibp2p network.Stream) {
peerID := streamlibp2p.Conn().RemotePeer()
overlay, found := s.peers.overlay(peerID)
if !found {
// todo: this should never happen, should we disconnect in this case?
// todo: test connection close and refactor
_ = s.disconnect(peerID)
s.logger.Errorf("overlay address for peer %q not found", peerID)
return
}
stream := newStream(streamlibp2p)
// exchange headers
if err := handleHeaders(ss.Headler, stream); err != nil {
s.logger.Debugf("handle protocol %s/%s: stream %s: peer %s: handle headers: %v", p.Name, p.Version, ss.Name, overlay, err)
return
}
// tracing: get span tracing context and add it to the context
// silently ignore if the peer is not providing tracing
ctx, err := s.tracer.WithContextFromHeaders(s.ctx, stream.Headers())
if err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
s.logger.Debugf("handle protocol %s/%s: stream %s: peer %s: get tracing context: %v", p.Name, p.Version, ss.Name, overlay, err)
return
}
logger := tracing.NewLoggerWithTraceID(ctx, s.logger)
logger.Tracef("handle protocol %s/%s: stream %s: peer %s", p.Name, p.Version, ss.Name, overlay)
s.metrics.HandledStreamCount.Inc()
if err := ss.Handler(ctx, p2p.Peer{Address: overlay}, stream); err != nil {
var e *p2p.DisconnectError
if errors.Is(err, e) {
// todo: test connection close and refactor
_ = s.Disconnect(overlay)
}
logger.Debugf("handle protocol %s/%s: stream %s: peer %s: %v", p.Name, p.Version, ss.Name, overlay, err)
return
}
})
}
return nil
}
func (s *Service) Addresses() (addrs []ma.Multiaddr, err error) {
// Build host multiaddress
hostAddr, err := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", s.host.ID().Pretty()))
if err != nil {
return nil, err
}
// Now we can build a full multiaddress to reach this host
// by encapsulating both addresses:
for _, addr := range s.host.Addrs() {
addrs = append(addrs, addr.Encapsulate(hostAddr))
}
return addrs, nil
}
func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (overlay swarm.Address, err error) {
// Extract the peer ID from the multiaddr.
info, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
if err != nil {
return swarm.Address{}, err
}
if _, found := s.peers.overlay(info.ID); found {
return swarm.Address{}, p2p.ErrAlreadyConnected
}
if err := s.host.Connect(ctx, *info); err != nil {
return swarm.Address{}, err
}
stream, err := s.newStreamForPeerID(ctx, info.ID, handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName)
if err != nil {
_ = s.disconnect(info.ID)
return swarm.Address{}, err
}
i, err := s.handshakeService.Handshake(NewStream(stream))
if err != nil {
_ = s.disconnect(info.ID)
return swarm.Address{}, fmt.Errorf("handshake: %w", err)
}
if err := helpers.FullClose(stream); err != nil {
return swarm.Address{}, err
}
if exists := s.peers.addIfNotExists(stream.Conn(), i.Address); exists {
return i.Address, nil
}
s.metrics.CreatedConnectionCount.Inc()
s.logger.Infof("peer %s connected", i.Address)
return i.Address, nil
}
func (s *Service) Disconnect(overlay swarm.Address) error {
peerID, found := s.peers.peerID(overlay)
if !found {
return p2p.ErrPeerNotFound
}
return s.disconnect(peerID)
}
func (s *Service) disconnect(peerID libp2ppeer.ID) error {
if err := s.host.Network().ClosePeer(peerID); err != nil {
return err
}
s.peers.remove(peerID)
return nil
}
func (s *Service) Peers() []p2p.Peer {
return s.peers.peers()
}
func (s *Service) SetPeerAddedHandler(h func(context.Context, swarm.Address) error) {
s.peerHandler = h
}
func (s *Service) NewStream(ctx context.Context, overlay swarm.Address, headers p2p.Headers, protocolName, protocolVersion, streamName string) (p2p.Stream, error) {
peerID, found := s.peers.peerID(overlay)
if !found {
return nil, p2p.ErrPeerNotFound
}
streamlibp2p, err := s.newStreamForPeerID(ctx, peerID, protocolName, protocolVersion, streamName)
if err != nil {
return nil, err
}
stream := newStream(streamlibp2p)
// tracing: add span context header
if headers == nil {
headers = make(p2p.Headers)
}
if err := s.tracer.AddContextHeader(ctx, headers); err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
return nil, err
}
// exchange headers
if err := sendHeaders(ctx, headers, stream); err != nil {
return nil, fmt.Errorf("send headers: %w", err)
}
return stream, nil
}
func (s *Service) newStreamForPeerID(ctx context.Context, peerID libp2ppeer.ID, protocolName, protocolVersion, streamName string) (network.Stream, error) {
swarmStreamName := p2p.NewSwarmStreamName(protocolName, protocolVersion, streamName)
st, err := s.host.NewStream(ctx, peerID, protocol.ID(swarmStreamName))
if err != nil {
if err == multistream.ErrNotSupported || err == multistream.ErrIncorrectVersion {
return nil, p2p.NewIncompatibleStreamError(err)
}
return nil, fmt.Errorf("create stream %q to %q: %w", swarmStreamName, peerID, err)
}
s.metrics.CreatedStreamCount.Inc()
return st, nil
}
func (s *Service) Close() error {
if err := s.libp2pPeerstore.Close(); err != nil {
return err
}
return s.host.Close()
}
| 1 | 9,368 | is this necessary? | ethersphere-bee | go |
@@ -68,6 +68,8 @@ namespace Datadog.Trace.TestHelpers
/// </summary>
public int Port { get; }
+ public List<Func<Span, bool>> Filters { get; private set; } = new List<Func<Span, bool>>();
+
public IImmutableList<Span> Spans { get; private set; } = ImmutableList<Span>.Empty;
public IImmutableList<NameValueCollection> RequestHeaders { get; private set; } = ImmutableList<NameValueCollection>.Empty; | 1 | using System;
using System.Collections;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Collections.Specialized;
using System.Diagnostics;
using System.Linq;
using System.Net;
using System.Text;
using System.Threading;
using Datadog.Trace.ExtensionMethods;
using MessagePack;
namespace Datadog.Trace.TestHelpers
{
public class MockTracerAgent : IDisposable
{
private readonly HttpListener _listener;
private readonly Thread _listenerThread;
public MockTracerAgent(int port = 8126, int retries = 5)
{
// try up to 5 consecutive ports before giving up
while (true)
{
// seems like we can't reuse a listener if it fails to start,
// so create a new listener each time we retry
var listener = new HttpListener();
listener.Prefixes.Add($"http://localhost:{port}/");
try
{
listener.Start();
// successfully listening
Port = port;
_listener = listener;
_listenerThread = new Thread(HandleHttpRequests);
_listenerThread.Start();
return;
}
catch (HttpListenerException) when (retries > 0)
{
// only catch the exception if there are retries left
port++;
retries--;
}
// always close listener if exception is thrown,
// whether it was caught or not
listener.Close();
}
}
public event EventHandler<EventArgs<HttpListenerContext>> RequestReceived;
/// <summary>
/// Gets or sets a value indicating whether to skip serialization of traces.
/// </summary>
public bool ShouldDeserializeTraces { get; set; } = true;
/// <summary>
/// Gets the TCP port that this Agent is listening on.
/// Can be different from <see cref="MockTracerAgent(int, int)"/>'s <c>initialPort</c>
/// parameter if listening on that port fails.
/// </summary>
public int Port { get; }
public IImmutableList<Span> Spans { get; private set; } = ImmutableList<Span>.Empty;
public IImmutableList<NameValueCollection> RequestHeaders { get; private set; } = ImmutableList<NameValueCollection>.Empty;
/// <summary>
/// Wait for the given number of spans to appear.
/// </summary>
/// <param name="count">The expected number of spans.</param>
/// <param name="timeoutInMilliseconds">The timeout</param>
/// <param name="operationName">The integration we're testing</param>
/// <param name="minDateTime">Minimum time to check for spans from</param>
/// <param name="returnAllOperations">When true, returns every span regardless of operation name</param>
/// <returns>The list of spans.</returns>
public IImmutableList<Span> WaitForSpans(
int count,
int timeoutInMilliseconds = 20000,
string operationName = null,
DateTimeOffset? minDateTime = null,
bool returnAllOperations = false)
{
var deadline = DateTime.Now.AddMilliseconds(timeoutInMilliseconds);
var minimumOffset = (minDateTime ?? DateTimeOffset.MinValue).ToUnixTimeNanoseconds();
IImmutableList<Span> relevantSpans = null;
while (DateTime.Now < deadline)
{
relevantSpans =
Spans
.Where(s => s.Start > minimumOffset)
.ToImmutableList();
if (relevantSpans.Count(s => operationName == null || s.Name == operationName) >= count)
{
break;
}
Thread.Sleep(500);
}
foreach (var headers in RequestHeaders)
{
// This is the place to check against headers we expect
AssertHeader(
headers,
"X-Datadog-Trace-Count",
header =>
{
if (int.TryParse(header, out int traceCount))
{
return traceCount > 0;
}
return false;
});
}
if (!returnAllOperations)
{
relevantSpans =
relevantSpans
.Where(s => operationName == null || s.Name == operationName)
.ToImmutableList();
}
return relevantSpans;
}
public void Dispose()
{
_listener?.Stop();
}
protected virtual void OnRequestReceived(HttpListenerContext context)
{
RequestReceived?.Invoke(this, new EventArgs<HttpListenerContext>(context));
}
private static List<Span> ToSpans(dynamic data)
{
if (data is IDictionary dict)
{
var span = new Span
{
TraceId = dict.GetValueOrDefault<ulong>("trace_id"),
SpanId = dict.GetValueOrDefault<ulong>("span_id"),
Name = dict.GetValueOrDefault<string>("name"),
Resource = dict.GetValueOrDefault<string>("resource"),
Service = dict.GetValueOrDefault<string>("service"),
Type = dict.GetValueOrDefault<string>("type"),
Start = dict.GetValueOrDefault<long>("start"),
Duration = dict.GetValueOrDefault<ulong>("duration"),
Tags = dict.GetValueOrDefault<Dictionary<object, object>>("meta")
.ToDictionary(p => (string)p.Key, p => (string)p.Value),
};
return new List<Span> { span };
}
if (data is IEnumerable rawSpans)
{
var allSpans = new List<Span>();
foreach (var rawSpan in rawSpans)
{
allSpans.AddRange(ToSpans(rawSpan));
}
return allSpans;
}
return new List<Span>();
}
private void AssertHeader(
NameValueCollection headers,
string headerKey,
Func<string, bool> assertion)
{
var header = headers.Get(headerKey);
if (string.IsNullOrEmpty(header))
{
throw new Exception($"Every submission to the agent should have a {headerKey} header.");
}
if (!assertion(header))
{
throw new Exception($"Failed assertion for {headerKey} on {header}");
}
}
private void HandleHttpRequests()
{
while (_listener.IsListening)
{
try
{
var ctx = _listener.GetContext();
OnRequestReceived(ctx);
if (ShouldDeserializeTraces)
{
var rawSpans = MessagePackSerializer.Deserialize<dynamic>(ctx.Request.InputStream);
var spans = ToSpans(rawSpans);
lock (this)
{
// we only need to lock when replacing the span collection,
// not when reading it because it is immutable
Spans = Spans.AddRange(spans);
RequestHeaders = RequestHeaders.Add(new NameValueCollection(ctx.Request.Headers));
}
}
ctx.Response.ContentType = "application/json";
var buffer = Encoding.UTF8.GetBytes("{}");
ctx.Response.OutputStream.Write(buffer, 0, buffer.Length);
ctx.Response.Close();
}
catch (HttpListenerException)
{
// listener was stopped,
// ignore to let the loop end and the method return
}
}
}
[DebuggerDisplay("TraceId={TraceId}, SpanId={SpanId}, Service={Service}, Name={Name}, Resource={Resource}")]
public struct Span
{
public ulong TraceId { get; set; }
public ulong SpanId { get; set; }
public string Name { get; set; }
public string Resource { get; set; }
public string Service { get; set; }
public string Type { get; set; }
public long Start { get; set; }
public ulong Duration { get; set; }
public Dictionary<string, string> Tags { get; set; }
}
}
}
| 1 | 16,128 | What is the purpose of this? It seems like it is used to indicate whether there is a filter on a span. But what filter? Could there eventually be multiple filters per span? Just not understanding. | DataDog-dd-trace-dotnet | .cs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.