file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
viewsets.py | Optional
import toml
from asciinema import asciicast
from asciinema.commands.cat import CatCommand
from django.conf import settings
from django.db.models import F, Q
from django.http import StreamingHttpResponse, Http404
from django.utils.functional import cached_property
from django.views.static import serve
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, filters, mixins, renderers, serializers
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework import parsers
from rest_framework.response import Response
from rest_framework.serializers import Serializer
from pentest_project.exceptions import ItemDoesNotExist
from pentest_project.plugins import PluginCollections, Plugins, PluginShortcuts, Plugin
from pentest_studio.api import BaseViewSetMixIn, StandardResultsSetPagination
from pentest_worker.models import Worker
from pentest_project.api.filters import ActionFilter, FileTomlSearchFilter
from pentest_project.api.serializers import ProjectSerializer, ActionSerializer, DetailProjectSerializer, \
DetailActionSerializer, PluginSetupSerializer, PluginSerializer, PluginShortcutArgSerializer, \
PluginShortcutEnvSerializer, PluginShortcutSerializer, RenderShortcutSerializer, PluginCollectionSerializer
from pentest_project.models import Project, Action
def serve_file(request, filepath):
return serve(request, os.path.basename(filepath), os.path.dirname(filepath))
def asciinema_cat(file):
with asciicast.open_from_url(file) as a:
for t, _type, text in a.stdout_events():
yield text
def filter_toml_file(file_id):
def filter_file(file):
return re.match('{}\.plugin\.to?ml$'.format(re.escape(file_id)), file, re.IGNORECASE)
return filter_file
class FilesQuerySet:
def __init__(self, directory: str, read_file: Callable, filter_callable: Optional[Callable] = None):
self.directory = directory
self.read_file = read_file
self.filter_callable = filter_callable
@cached_property
def file_names(self):
file_names = os.listdir(self.directory)
if self.filter_callable is not None:
file_names = list(filter(self.filter_callable, file_names))
return file_names
def __iter__(self):
return map(self.read_file, self.file_names)
def __len__(self):
return len(self.file_names)
def __getitem__(self, item):
if not isinstance(item, slice):
raise ValueError(f'Unsupported slice type: {item}')
return map(self.read_file, self.file_names[item])
class PlainTextRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'text'
def render(self, data, media_type=None, renderer_context=None):
return data
class PlainTextParser(parsers.FileUploadParser):
media_type = 'text/plain'
# TODO: tomar el nombre del fichero si es la creación del mismo
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
decoded_stream = codecs.getreader(encoding)(stream)
return decoded_stream
class TomlParser(parsers.FileUploadParser):
media_type = 'application/toml'
# TODO: tomar el nombre del fichero si es la creación del mismo
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
decoded_stream = codecs.getreader(encoding)(stream)
raw_body = decoded_stream.read()
request = parser_context.get('request')
setattr(request, 'raw_body', raw_body)
filename = self.get_filename(stream, media_type, parser_context)
if filename and (not filename.endswith('.toml') and not filename.endswith('.tml')):
filename = f'{filename}.toml'
setattr(request, 'filename', filename)
return toml.loads(raw_body)
class ProjectViewSet(BaseViewSetMixIn, viewsets.ModelViewSet):
"""
"""
queryset = Project.objects.all().order_by('-pk')
serializer_class = ProjectSerializer
filter_backends = (filters.OrderingFilter, filters.SearchFilter, DjangoFilterBackend)
search_fields = ('name',)
filter_fields = ('parent', 'created_at', 'updated_at')
ordering_fields = filter_fields
detail_serializer_class = DetailProjectSerializer
class ActionViewSet(BaseViewSetMixIn, viewsets.ModelViewSet):
"""
""" | filterset_class = ActionFilter
ordering_fields = ('parent', 'parent', 'created_at', 'updated_at')
detail_serializer_class = DetailActionSerializer
pagination_class = StandardResultsSetPagination
def get_queryset(self):
queryset = super(ActionViewSet, self).get_queryset()
if self.action == 'grouped':
# Filtrar por (plugin + is_last) or not plugin
queryset = queryset.filter(Q(plugin='') | Q(is_last=True))
return queryset
@action(methods=['put'], detail=True)
def worker_upload(self, request, pk):
file_obj = request.data['file']
instance: Action = self.get_object()
directory = os.path.dirname(instance.get_data_directory())
os.makedirs(directory, exist_ok=True)
t = tarfile.open(fileobj=file_obj.file)
t.extractall(directory)
return_code = instance.get_return_code()
if return_code is None:
instance.status = 'FINISHED'
else:
instance.status = 'SUCCESS' if return_code == 0 else 'ERROR'
instance.save()
return Response(status=204)
@action(methods=['get'], detail=True, url_path='asciinema.cast')
def download_cast(self, request, pk):
instance: Action = self.get_object()
return serve_file(request, instance.get_terminal_path())
@action(methods=['get'], detail=True)
def terminal_output(self, request, pk):
instance: Action = self.get_object()
file: str = instance.get_terminal_path()
if not os.path.lexists(file) or not os.path.getsize(file):
raise Http404
return StreamingHttpResponse(asciinema_cat(file))
@action(methods=['post'], detail=True)
def block_task(self, request, pk):
instance: Action = self.get_object()
try:
worker = Worker.objects.get(user=request.user)
except Worker.DoesNotExist:
raise ValidationError('User {} is not a worker'.format(request.user))
instance.block_task(worker)
instance.save()
return self.retrieve(request, pk)
@action(methods=['get'], detail=False)
def grouped(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class FileTomlViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin,
viewsets.GenericViewSet, viewsets.ViewSet):
pagination_class = StandardResultsSetPagination
serializer_class: Type[Serializer] = None
filter_backends = (FileTomlSearchFilter,)
parser_classes = (TomlParser,)
def get_renderers(self):
if self.action == 'text':
return [PlainTextRenderer()]
else:
return super(FileTomlViewSet, self).get_renderers()
def get_queryset(self):
raise NotImplementedError
def get_object(self):
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
try:
return self.get_queryset().from_name(self.kwargs[lookup_url_kwarg])
except ItemDoesNotExist:
raise Http404
@action(detail=True, methods=['get'])
def text(self, request, *args, **kwargs):
obj = self.get_object()
return Response(obj.text)
class PluginViewSet(mixins.UpdateModelMixin, mixins.CreateModelMixin, mixins.DestroyModelMixin, FileTomlViewSet):
serializer_class = PluginSerializer
parser_classes = (TomlParser, JSONParser)
def get_serializer_class(self):
if self.action == 'install':
return serializers.Serializer
else:
return super(PluginViewSet, self).get_serializer_class()
def get_queryset(self):
return Plugins()
@action(detail=True, methods=['post'])
def install(self, request, *args, **kwargs):
obj: Plugin = self.get_object()
actions = Worker.objects.active().run_command(f'Installing the {obj.name} plugin', obj.setup['install'])
serializer = ActionSerializer(many=True, instance=actions, context=self.get_serializer_context())
return Response(serializer.data)
class PluginShortcutViewSet(FileTomlViewSet):
serializer_class = PluginShortcutSerializer
lookup_value_regex = '[a-zA-Z0-9.\-]+'
parser_classes = (JSONParser,)
renderer_classes = (JSONRenderer, BrowsableAPIRenderer, PlainTextRenderer)
def get_queryset(self):
return PluginShortcuts()
@action(methods=['post'], detail=True)
def render_command(self, request, pk=None):
obj = self.get_object()
serializer = RenderShortcutSerializer | queryset = Action.objects.order_by('-pk')
serializer_class = ActionSerializer
filter_backends = (filters.OrderingFilter, filters.SearchFilter, DjangoFilterBackend)
search_fields = ('name',) | random_line_split |
viewsets.py | Optional
import toml
from asciinema import asciicast
from asciinema.commands.cat import CatCommand
from django.conf import settings
from django.db.models import F, Q
from django.http import StreamingHttpResponse, Http404
from django.utils.functional import cached_property
from django.views.static import serve
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, filters, mixins, renderers, serializers
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework import parsers
from rest_framework.response import Response
from rest_framework.serializers import Serializer
from pentest_project.exceptions import ItemDoesNotExist
from pentest_project.plugins import PluginCollections, Plugins, PluginShortcuts, Plugin
from pentest_studio.api import BaseViewSetMixIn, StandardResultsSetPagination
from pentest_worker.models import Worker
from pentest_project.api.filters import ActionFilter, FileTomlSearchFilter
from pentest_project.api.serializers import ProjectSerializer, ActionSerializer, DetailProjectSerializer, \
DetailActionSerializer, PluginSetupSerializer, PluginSerializer, PluginShortcutArgSerializer, \
PluginShortcutEnvSerializer, PluginShortcutSerializer, RenderShortcutSerializer, PluginCollectionSerializer
from pentest_project.models import Project, Action
def serve_file(request, filepath):
return serve(request, os.path.basename(filepath), os.path.dirname(filepath))
def asciinema_cat(file):
with asciicast.open_from_url(file) as a:
for t, _type, text in a.stdout_events():
yield text
def filter_toml_file(file_id):
def filter_file(file):
return re.match('{}\.plugin\.to?ml$'.format(re.escape(file_id)), file, re.IGNORECASE)
return filter_file
class FilesQuerySet:
def __init__(self, directory: str, read_file: Callable, filter_callable: Optional[Callable] = None):
self.directory = directory
self.read_file = read_file
self.filter_callable = filter_callable
@cached_property
def file_names(self):
file_names = os.listdir(self.directory)
if self.filter_callable is not None:
file_names = list(filter(self.filter_callable, file_names))
return file_names
def __iter__(self):
return map(self.read_file, self.file_names)
def __len__(self):
return len(self.file_names)
def __getitem__(self, item):
if not isinstance(item, slice):
raise ValueError(f'Unsupported slice type: {item}')
return map(self.read_file, self.file_names[item])
class PlainTextRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'text'
def render(self, data, media_type=None, renderer_context=None):
return data
class PlainTextParser(parsers.FileUploadParser):
media_type = 'text/plain'
# TODO: tomar el nombre del fichero si es la creación del mismo
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
decoded_stream = codecs.getreader(encoding)(stream)
return decoded_stream
class TomlParser(parsers.FileUploadParser):
media_type = 'application/toml'
# TODO: tomar el nombre del fichero si es la creación del mismo
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
decoded_stream = codecs.getreader(encoding)(stream)
raw_body = decoded_stream.read()
request = parser_context.get('request')
setattr(request, 'raw_body', raw_body)
filename = self.get_filename(stream, media_type, parser_context)
if filename and (not filename.endswith('.toml') and not filename.endswith('.tml')):
filename = f'{filename}.toml'
setattr(request, 'filename', filename)
return toml.loads(raw_body)
class ProjectViewSet(BaseViewSetMixIn, viewsets.ModelViewSet):
"""
"""
queryset = Project.objects.all().order_by('-pk')
serializer_class = ProjectSerializer
filter_backends = (filters.OrderingFilter, filters.SearchFilter, DjangoFilterBackend)
search_fields = ('name',)
filter_fields = ('parent', 'created_at', 'updated_at')
ordering_fields = filter_fields
detail_serializer_class = DetailProjectSerializer
class ActionViewSet(BaseViewSetMixIn, viewsets.ModelViewSet):
"""
"""
queryset = Action.objects.order_by('-pk')
serializer_class = ActionSerializer
filter_backends = (filters.OrderingFilter, filters.SearchFilter, DjangoFilterBackend)
search_fields = ('name',)
filterset_class = ActionFilter
ordering_fields = ('parent', 'parent', 'created_at', 'updated_at')
detail_serializer_class = DetailActionSerializer
pagination_class = StandardResultsSetPagination
def get_queryset(self):
queryset = super(ActionViewSet, self).get_queryset()
if self.action == 'grouped':
# Filtrar por (plugin + is_last) or not plugin
queryset = queryset.filter(Q(plugin='') | Q(is_last=True))
return queryset
@action(methods=['put'], detail=True)
def worker_upload(self, request, pk):
file_obj = request.data['file']
instance: Action = self.get_object()
directory = os.path.dirname(instance.get_data_directory())
os.makedirs(directory, exist_ok=True)
t = tarfile.open(fileobj=file_obj.file)
t.extractall(directory)
return_code = instance.get_return_code()
if return_code is None:
instance.status = 'FINISHED'
else:
instance.status = 'SUCCESS' if return_code == 0 else 'ERROR'
instance.save()
return Response(status=204)
@action(methods=['get'], detail=True, url_path='asciinema.cast')
def download_cast(self, request, pk):
in | @action(methods=['get'], detail=True)
def terminal_output(self, request, pk):
instance: Action = self.get_object()
file: str = instance.get_terminal_path()
if not os.path.lexists(file) or not os.path.getsize(file):
raise Http404
return StreamingHttpResponse(asciinema_cat(file))
@action(methods=['post'], detail=True)
def block_task(self, request, pk):
instance: Action = self.get_object()
try:
worker = Worker.objects.get(user=request.user)
except Worker.DoesNotExist:
raise ValidationError('User {} is not a worker'.format(request.user))
instance.block_task(worker)
instance.save()
return self.retrieve(request, pk)
@action(methods=['get'], detail=False)
def grouped(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class FileTomlViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin,
viewsets.GenericViewSet, viewsets.ViewSet):
pagination_class = StandardResultsSetPagination
serializer_class: Type[Serializer] = None
filter_backends = (FileTomlSearchFilter,)
parser_classes = (TomlParser,)
def get_renderers(self):
if self.action == 'text':
return [PlainTextRenderer()]
else:
return super(FileTomlViewSet, self).get_renderers()
def get_queryset(self):
raise NotImplementedError
def get_object(self):
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
try:
return self.get_queryset().from_name(self.kwargs[lookup_url_kwarg])
except ItemDoesNotExist:
raise Http404
@action(detail=True, methods=['get'])
def text(self, request, *args, **kwargs):
obj = self.get_object()
return Response(obj.text)
class PluginViewSet(mixins.UpdateModelMixin, mixins.CreateModelMixin, mixins.DestroyModelMixin, FileTomlViewSet):
serializer_class = PluginSerializer
parser_classes = (TomlParser, JSONParser)
def get_serializer_class(self):
if self.action == 'install':
return serializers.Serializer
else:
return super(PluginViewSet, self).get_serializer_class()
def get_queryset(self):
return Plugins()
@action(detail=True, methods=['post'])
def install(self, request, *args, **kwargs):
obj: Plugin = self.get_object()
actions = Worker.objects.active().run_command(f'Installing the {obj.name} plugin', obj.setup['install'])
serializer = ActionSerializer(many=True, instance=actions, context=self.get_serializer_context())
return Response(serializer.data)
class PluginShortcutViewSet(FileTomlViewSet):
serializer_class = PluginShortcutSerializer
lookup_value_regex = '[a-zA-Z0-9.\-]+'
parser_classes = (JSONParser,)
renderer_classes = (JSONRenderer, BrowsableAPIRenderer, PlainTextRenderer)
def get_queryset(self):
return PluginShortcuts()
@action(methods=['post'], detail=True)
def render_command(self, request, pk=None):
obj = self.get_object()
serializer = RenderShortcutSerializer | stance: Action = self.get_object()
return serve_file(request, instance.get_terminal_path())
| identifier_body |
lib.rs | nalgebra_v0_32` | no | version 0.32 |
//! | `nalgebra_v0_32-serde` | no | version 0.32 + serde support |
//! | `nalgebra_v0_31` | no | version 0.31 |
//! | `nalgebra_v0_31-serde` | no | version 0.31 + serde support |
//! | `nalgebra_v0_30` | no | version 0.30 |
//! | `nalgebra_v0_30-serde` | no | version 0.30 + serde support |
//! | `nalgebra_v0_29` | no | version 0.29 |
//! | `nalgebra_v0_29-serde` | no | version 0.29 + serde support |
//!
//!
//! ## Choosing a backend
//!
//! It is not possible to activate two versions of the same backend.
//!
//! The features labeled `*latest*` are an alias for the most recent supported version of the
//! respective backend. It is however recommended to explicitly specify the desired version instead
//! of using any of the `*latest*` features (see section about semantic versioning below).
//!
//! Note that `argmin` by default compiles with `serde` support. Therefore, unless `serde` is
//! deliberately turned off in `argmin`, it is necessary to activate the `serde` support in
//! `argmin-math` as well.
//!
//! The default features `primitives` and `vec` can be turned off in order to only compile the
//! trait definitions. If another backend is chosen, `primitives` will automatically be turned on
//! again.
//!
//! ### Example
//!
//! Activate support for the latest supported `ndarray` version:
//!
//! ```toml
//! [dependencies]
#![doc = concat!("argmin-math = { version = \"", env!("CARGO_PKG_VERSION"), "\", features = [\"ndarray_latest-serde\"] }")]
//! ```
//!
//! # Semantic versioning
//!
//! This crate follows semantic versioning. Adding a new backend or a new version of a backend is
//! not considered a breaking change. However, your code may still break if you use any of the
//! features containing `*latest*`. It is therefore recommended to specify the actual version of the
//! backend you are using.
//!
//! # Development
//!
//! For development and running the tests a backend for `ndarray-linalg` must be chosen. Normally
//! one would add those as dev dependencies (the features would then be unified with the regular
//! dependencies). However, linking somehow fails when the non-dev `ndarra-linalg` dependency is
//! missing (which is the case for the `*-nolinalg*` features of the ndarray backend). To fix that,
//! the `_dev_linalg_*` features were introduced. When testing and developing with one of the
//! ndarray features with linalg support on, the appropriate `_dev_linalg_*` feature must be turned
//! on as well. Note that the version number in `_dev_linalg_*` is always one below the `ndarray`
//! version. For instance, for ndarray 0.15, one would use the `_dev_linalg_0_14` feature.
//!
//! | Development Feature | Comment |
//! |-----------------------|----------------------------------------------|
//! | `_dev_linalg_latest` | latest `ndarray-linalg` for latest `ndarray` |
//! | `_dev_linalg_0_16` | `ndarray-linalg` v0.16 for `ndarray` v0.15 |
//! | `_dev_linalg_0_13` | `ndarray-linalg` v0.13 for `ndarray` v0.14 |
//! | `_dev_linalg_0_12` | `ndarray-linalg` v0.12 for `ndarray` v0.13 |
//!
//! # Contributing
//!
//! You found a bug? Your favorite backend is not supported? Feel free to open an issue or ideally
//! submit a PR.
//!
//! # License
//!
//! Licensed under either of
//!
//! * Apache License, Version 2.0,
//! ([LICENSE-APACHE](https://github.com/argmin-rs/argmin/blob/main/LICENSE-APACHE) or
//! <http://www.apache.org/licenses/LICENSE-2.0>)
//! * MIT License ([LICENSE-MIT](https://github.com/argmin-rs/argmin/blob/main/LICENSE-MIT) or
//! <http://opensource.org/licenses/MIT>)
//!
//! at your option.
//!
//! ## Contribution
//!
//! Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion
//! in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above,
//! without any additional terms or conditions.
#![warn(missing_docs)]
// Explicitly disallow EQ comparison of floats. (This clippy lint is denied by default; however,
// this is just to make sure that it will always stay this way.)
#![deny(clippy::float_cmp)]
cfg_if::cfg_if! {
if #[cfg(feature = "nalgebra_0_32")] {
extern crate nalgebra_0_32 as nalgebra;
} else if #[cfg(feature = "nalgebra_0_31")] {
extern crate nalgebra_0_31 as nalgebra;
} else if #[cfg(feature = "nalgebra_0_30")] {
extern crate nalgebra_0_30 as nalgebra;
} else if #[cfg(feature = "nalgebra_0_29")] {
extern crate nalgebra_0_29 as nalgebra;
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "ndarray_0_15")] {
extern crate ndarray_0_15 as ndarray;
} else if #[cfg(feature = "ndarray_0_14")] {
extern crate ndarray_0_14 as ndarray;
} else if #[cfg(feature = "ndarray_0_13")] {
extern crate ndarray_0_13 as ndarray;
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "ndarray-linalg_0_16")] {
extern crate ndarray_linalg_0_16 as ndarray_linalg;
} else if #[cfg(feature = "ndarray-linalg_0_13")] {
extern crate ndarray_linalg_0_13 as ndarray_linalg;
} else if #[cfg(feature = "ndarray-linalg_0_12")] {
extern crate ndarray_linalg_0_12 as ndarray_linalg;
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "num-complex_0_2")] {
extern crate num_complex_0_2 as num_complex;
} else if #[cfg(feature = "num-complex_0_3")] {
extern crate num_complex_0_3 as num_complex;
} else if #[cfg(feature = "num-complex_0_4")] {
extern crate num_complex_0_4 as num_complex;
}
}
#[cfg(feature = "primitives")]
mod primitives;
#[cfg(feature = "primitives")]
pub use crate::primitives::*;
#[cfg(feature = "ndarray_all")]
mod ndarray_m;
#[cfg(feature = "ndarray_all")]
pub use crate::ndarray_m::*;
#[cfg(feature = "nalgebra_all")]
mod nalgebra_m;
#[cfg(feature = "nalgebra_all")]
pub use crate::nalgebra_m::*;
#[cfg(feature = "vec")]
mod vec;
#[cfg(feature = "vec")]
pub use crate::vec::*;
use anyhow::Error;
/// Dot/scalar product of `T` and `self`
pub trait ArgminDot<T, U> {
/// Dot/scalar product of `T` and `self`
fn dot(&self, other: &T) -> U;
}
/// Dot/scalar product of `T` and `self` weighted by W (p^TWv)
pub trait ArgminWeightedDot<T, U, V> {
/// Dot/scalar product of `T` and `self`
fn weighted_dot(&self, w: &V, vec: &T) -> U;
}
/// Return param vector of all zeros (for now, this is a hack. It should be done better)
pub trait ArgminZero {
/// Return zero(s)
fn zero() -> Self;
}
/// Return the conjugate
pub trait ArgminConj {
/// Return conjugate
#[must_use]
fn conj(&self) -> Self;
}
/// Zero for dynamically sized objects
pub trait ArgminZeroLike {
/// Return zero(s)
#[must_use]
fn zero_like(&self) -> Self;
}
/// Identity matrix
pub trait ArgminEye {
/// Identity matrix of size `n`
fn eye(n: usize) -> Self;
/// Identity matrix of same size as `self`
#[must_use]
fn eye_like(&self) -> Self;
}
/// Add a `T` to `self`
pub trait ArgminAdd<T, U> {
/// Add a `T` to `self` | fn add(&self, other: &T) -> U;
}
/// Subtract a `T` from `self` | random_line_split |
|
main.rs | _offset = {} = ({} - {}) * {}/2/{} + {}/2/2 - {}/2", h_offset, sprite_dir, player.a, fb.w, player.fov, fb.w, sprite_screen_size);
for i in 0..sprite_screen_size {
if h_offset+i<0 || h_offset+i >= screen_size { continue; }
if depth_buffer[(h_offset+i) as usize] < sprite.player_dist { continue; }
for j in 0..sprite_screen_size {
if v_offset+j<0 || v_offset+j >= fb.h as i32 { continue; }
let color = tex_sprites.get(i as u32*tex_sprites.size/sprite_screen_size as u32,
j as u32*tex_sprites.size/sprite_screen_size as u32, sprite.tex_id)
.unwrap();
let (r, g, b, a) = utils::unpack_color(color);
if a > 128 {
let color = utils::pack_color_bgra(b, g, r, a);
fb.set_pixel(fb.w/2 + (h_offset+i) as usize, (v_offset+j) as usize, color)?;
}
}
}
Ok(())
}
fn map_show_sprite(sprite: &Sprite, fb: &mut Framebuffer, map: &Map) -> Result<(), FrameError> {
//(rect_w, rect_h) == size of one map tile
let rect_w = (fb.w / (map.w as usize * 2)) as f32;
let rect_h = (fb.h / map.h as usize) as f32;
fb.draw_rectangle(
(sprite.x * rect_w - 3.0) as usize,
(sprite.y * rect_h - 3.0) as usize,
6,
6,
// utils::pack_color_rgb(255, 0, 0),
utils::pack_color_bgra(0, 0, 255, 255),
)
}
fn render(
fb: &mut Framebuffer,
map: &Map,
player: &Player,
sprites: &mut Vec<Sprite>, // will change order of sprites according to distance from player
tex_walls: &Texture,
tex_monsters: &Texture,
) -> Result<(), FrameError> {
// fb.clear(utils::pack_color_rgb(249, 209, 152));
fb.clear(utils::pack_color_bgra(152, 209, 249, 255));
let rect_w = fb.w / (map.w as usize * 2); //size of one map cell on the screen
let rect_h = fb.h / map.h as usize;
// draw overhead map
for j in 0..map.h {
for i in 0..map.w {
if map.is_empty(i, j) {
continue; //skip empty spaces
}
let rect_x = i as usize * rect_w;
let rect_y = j as usize * rect_h;
let texid = map.get(i, j).expect("i, j not in map range");
fb.draw_rectangle(
rect_x,
rect_y,
rect_w,
rect_h,
tex_walls.get(0, 0, texid).expect("no texture at texid"),
)?;
}
}
let mut depth_buffer = vec![1e3; (fb.w/2) as usize];
for i in 0..fb.w / 2 {
//cast field of vision on map AND generate 3D view
let angle: f32 = player.get_a() - player.fov / 2. + player.fov * i as f32 / (fb.w / 2) as f32;
for t in 0..2000 {
//since Rust doesn't allow step by float, remap so step==1
let t = t as f32 / 100.; //then transform back to original range
let x = player.x + t * angle.cos();
let y = player.y + t * angle.sin();
// draw the visibility cone on the map
fb.set_pixel(
(x * rect_w as f32) as usize,
(y * rect_h as f32) as usize,
// utils::pack_color_rgb(160, 160, 160),
utils::pack_color_bgra(160, 160, 160, 255),
)
.expect("Could not set pixel");
// if this map tile isn't empty, we've hit a wall
if map.is_empty(x as u32, y as u32) {
continue;
}
// hit a wall
let texid = map
.get(x as u32, y as u32)
.expect("Cannot index this map tile");
assert!(texid < tex_walls.count);
let distance = t * f32::cos(angle - player.get_a());
depth_buffer[i as usize] = distance;
let column_height = (fb.h as f32 / distance) as u32;
let x_texcoord = wall_x_texcoord(x, y, tex_walls);
let column = tex_walls
.get_scaled_column(texid, x_texcoord as u32, column_height)
.expect("Cannot retrieve scaled column");
let pix_x = i + fb.w / 2;
for j in 0..column_height {
let pix_y = j as usize + fb.h / 2 - column_height as usize / 2;
if pix_y < fb.h {
fb.set_pixel(pix_x, pix_y, column[j as usize])
.expect("Could not set pixel");
}
}
break;
}
}
// update distances from sprites to player
for sprite in sprites.iter_mut() {
sprite.player_dist = f32::sqrt(f32::powi(player.x - sprite.x, 2) + f32::powi(player.y - sprite.y, 2));
}
// sort sprites in reverse order of distance to player
sprites.sort_unstable_by(|lhs, rhs| rhs.player_dist.partial_cmp(&lhs.player_dist).unwrap());
// render sprites on map
for sprite in sprites.iter().take(sprites.len()) {
map_show_sprite(sprite, fb, &map)?;
draw_sprite(sprite, &depth_buffer, fb, &player, &tex_monsters)?;
}
Ok(())
}
fn main() -> std::io::Result<()> {
// TODO: unfuck colors
// TODO: create variable color schemes (RGBA vs BGRA)
// TODO: cleanup code
let mut fb = Framebuffer::new(1024, 512);
let mut window = minifb::Window::new("doom-iow", fb.w, fb.h, minifb::WindowOptions::default()).unwrap();
let mut player = Player::new (
3.456,
2.345,
1.523,
std::f32::consts::PI / 3.,
);
let map = match Map::init(16, 16) {
Ok(m) => m,
Err(_) => {
panic!("Could not open map");
}
};
let tex_walls = Texture::new("./walltex.png").expect("Could not open wall texture");
let tex_monsters = Texture::new("./monsters.png").expect("Could not open monster texture");
let mut sprites = vec![
Sprite::new(3.523, 3.812, 2, 0.0),
Sprite::new(1.834, 8.765, 0, 0.0),
Sprite::new(5.323, 5.365, 1, 0.0),
Sprite::new(4.123, 10.265, 1, 0.0),
];
make_gif(&mut player, &mut fb, &map, &mut sprites, &tex_walls, &tex_monsters).unwrap();
while window.is_open() && !window.is_key_down(minifb::Key::Escape) {
render(&mut fb, &map, &player, &mut sprites, &tex_walls, &tex_monsters).unwrap();
player.set_a(player.get_a() - 0.1 * (2. * std::f32::consts::PI / 360.));
window.update_with_buffer(fb.img.as_slice()).unwrap();
}
Ok(())
}
fn make_gif(player: &mut Player, fb: &mut Framebuffer, map: &Map, sprites: &mut Vec<Sprite>,
tex_walls: &Texture, tex_monsters: &Texture) -> Result<(), std::io::Error> {
//clear the /out folder
Command::new("rm")
.arg("-rf")
.arg("out/")
.output()
.expect("failed to clear out directory");
//create new /out folder
Command::new("mkdir") | .arg("out")
.output()
.expect("failed to create directory"); | random_line_split |
|
main.rs | ;
let texid = map.get(i, j).expect("i, j not in map range");
fb.draw_rectangle(
rect_x,
rect_y,
rect_w,
rect_h,
tex_walls.get(0, 0, texid).expect("no texture at texid"),
)?;
}
}
let mut depth_buffer = vec![1e3; (fb.w/2) as usize];
for i in 0..fb.w / 2 {
//cast field of vision on map AND generate 3D view
let angle: f32 = player.get_a() - player.fov / 2. + player.fov * i as f32 / (fb.w / 2) as f32;
for t in 0..2000 {
//since Rust doesn't allow step by float, remap so step==1
let t = t as f32 / 100.; //then transform back to original range
let x = player.x + t * angle.cos();
let y = player.y + t * angle.sin();
// draw the visibility cone on the map
fb.set_pixel(
(x * rect_w as f32) as usize,
(y * rect_h as f32) as usize,
// utils::pack_color_rgb(160, 160, 160),
utils::pack_color_bgra(160, 160, 160, 255),
)
.expect("Could not set pixel");
// if this map tile isn't empty, we've hit a wall
if map.is_empty(x as u32, y as u32) {
continue;
}
// hit a wall
let texid = map
.get(x as u32, y as u32)
.expect("Cannot index this map tile");
assert!(texid < tex_walls.count);
let distance = t * f32::cos(angle - player.get_a());
depth_buffer[i as usize] = distance;
let column_height = (fb.h as f32 / distance) as u32;
let x_texcoord = wall_x_texcoord(x, y, tex_walls);
let column = tex_walls
.get_scaled_column(texid, x_texcoord as u32, column_height)
.expect("Cannot retrieve scaled column");
let pix_x = i + fb.w / 2;
for j in 0..column_height {
let pix_y = j as usize + fb.h / 2 - column_height as usize / 2;
if pix_y < fb.h {
fb.set_pixel(pix_x, pix_y, column[j as usize])
.expect("Could not set pixel");
}
}
break;
}
}
// update distances from sprites to player
for sprite in sprites.iter_mut() {
sprite.player_dist = f32::sqrt(f32::powi(player.x - sprite.x, 2) + f32::powi(player.y - sprite.y, 2));
}
// sort sprites in reverse order of distance to player
sprites.sort_unstable_by(|lhs, rhs| rhs.player_dist.partial_cmp(&lhs.player_dist).unwrap());
// render sprites on map
for sprite in sprites.iter().take(sprites.len()) {
map_show_sprite(sprite, fb, &map)?;
draw_sprite(sprite, &depth_buffer, fb, &player, &tex_monsters)?;
}
Ok(())
}
fn main() -> std::io::Result<()> {
// TODO: unfuck colors
// TODO: create variable color schemes (RGBA vs BGRA)
// TODO: cleanup code
let mut fb = Framebuffer::new(1024, 512);
let mut window = minifb::Window::new("doom-iow", fb.w, fb.h, minifb::WindowOptions::default()).unwrap();
let mut player = Player::new (
3.456,
2.345,
1.523,
std::f32::consts::PI / 3.,
);
let map = match Map::init(16, 16) {
Ok(m) => m,
Err(_) => {
panic!("Could not open map");
}
};
let tex_walls = Texture::new("./walltex.png").expect("Could not open wall texture");
let tex_monsters = Texture::new("./monsters.png").expect("Could not open monster texture");
let mut sprites = vec![
Sprite::new(3.523, 3.812, 2, 0.0),
Sprite::new(1.834, 8.765, 0, 0.0),
Sprite::new(5.323, 5.365, 1, 0.0),
Sprite::new(4.123, 10.265, 1, 0.0),
];
make_gif(&mut player, &mut fb, &map, &mut sprites, &tex_walls, &tex_monsters).unwrap();
while window.is_open() && !window.is_key_down(minifb::Key::Escape) {
render(&mut fb, &map, &player, &mut sprites, &tex_walls, &tex_monsters).unwrap();
player.set_a(player.get_a() - 0.1 * (2. * std::f32::consts::PI / 360.));
window.update_with_buffer(fb.img.as_slice()).unwrap();
}
Ok(())
}
fn make_gif(player: &mut Player, fb: &mut Framebuffer, map: &Map, sprites: &mut Vec<Sprite>,
tex_walls: &Texture, tex_monsters: &Texture) -> Result<(), std::io::Error> {
//clear the /out folder
Command::new("rm")
.arg("-rf")
.arg("out/")
.output()
.expect("failed to clear out directory");
//create new /out folder
Command::new("mkdir")
.arg("out")
.output()
.expect("failed to create directory");
for frame in 0..360 {
// for frame in 0..5 {
// for frame in 0..1 {
let output_path = "./out/";
let ss = format!("{}{:05}.ppm", output_path, frame);
// player.a -= 2. * std::f32::consts::PI / 360.;
player.set_a( player.get_a() - (2. * std::f32::consts::PI / 360.) );
render(fb, &map, &player, sprites, &tex_walls, &tex_monsters).expect("Could not render image");
utils::drop_ppm_image(ss.as_str(), &fb.img, fb.w as usize, fb.h as usize)
.expect("Could not drop image");
}
println!("Rendered all frames, collecting into gif...");
let output = Command::new("convert")
.args(&["-delay", "10", "-loop", "0", "*.ppm", "rendered.gif"])
.current_dir("out/")
.output()
.expect("Could not start process");
println!("Status: {}", output.status);
println!("Stdout: {}", String::from_utf8_lossy(&output.stdout));
println!("Stderr: {}", String::from_utf8_lossy(&output.stderr));
println!("done");
//open results in Finder
Command::new("open")
.arg("out/")
.output()
.expect("Could not open folder");
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn packs_ints() {
let r = 2;
let g = 4;
let b = 8;
let a = 16;
let packed = utils::pack_color_rgba(r, g, b, a);
assert_eq!(packed, 0b0001_0000_0000_1000_0000_0100_0000_0010);
}
#[test]
fn unpacks_ints() {
let packed = 0b0001_0000_0000_1000_0000_0100_0000_0010;
let (r, g, b, a) = utils::unpack_color(packed);
assert_eq!(vec![2, 4, 8, 16], vec![r, g, b, a]);
}
#[test]
fn packs_ints_idempotently() | {
let r = 2;
let g = 4;
let b = 8;
let a = 255;
let color = utils::pack_color_rgba(r, g, b, a);
let (rc, gc, bc, ac) = utils::unpack_color(color);
assert_eq!(vec![r, g, b, a], vec![rc, gc, bc, ac]);
} | identifier_body |
|
main.rs | ::PI { sprite_dir += 2.0 * f32::consts::PI; }
//distance from player to sprite
// let sprite_dist =
// f32::sqrt(f32::powi(player.x - sprite.x, 2) + f32::powi(player.y - sprite.y, 2));
// let sprite_screen_size = f32::min(2000.0, fb.h as f32 / sprite_dist) as i32;
let sprite_screen_size = f32::min(1000.0, fb.h as f32/sprite.player_dist) as i32;
let screen_size = fb.w as i32 / 2;
let h_offset: i32 = ((sprite_dir - player.get_a()) * (fb.w as f32/2.0)/(player.fov) +
(fb.w as f32/2.0)/2.0 - (sprite_screen_size as f32)/2.0) as i32;
let v_offset: i32 = (fb.h as i32/2 - sprite_screen_size/2) as i32;
// println!("h_offset = {} = ({} - {}) * {}/2/{} + {}/2/2 - {}/2", h_offset, sprite_dir, player.a, fb.w, player.fov, fb.w, sprite_screen_size);
for i in 0..sprite_screen_size {
if h_offset+i<0 || h_offset+i >= screen_size { continue; }
if depth_buffer[(h_offset+i) as usize] < sprite.player_dist { continue; }
for j in 0..sprite_screen_size {
if v_offset+j<0 || v_offset+j >= fb.h as i32 { continue; }
let color = tex_sprites.get(i as u32*tex_sprites.size/sprite_screen_size as u32,
j as u32*tex_sprites.size/sprite_screen_size as u32, sprite.tex_id)
.unwrap();
let (r, g, b, a) = utils::unpack_color(color);
if a > 128 {
let color = utils::pack_color_bgra(b, g, r, a);
fb.set_pixel(fb.w/2 + (h_offset+i) as usize, (v_offset+j) as usize, color)?;
}
}
}
Ok(())
}
fn map_show_sprite(sprite: &Sprite, fb: &mut Framebuffer, map: &Map) -> Result<(), FrameError> {
//(rect_w, rect_h) == size of one map tile
let rect_w = (fb.w / (map.w as usize * 2)) as f32;
let rect_h = (fb.h / map.h as usize) as f32;
fb.draw_rectangle(
(sprite.x * rect_w - 3.0) as usize,
(sprite.y * rect_h - 3.0) as usize,
6,
6,
// utils::pack_color_rgb(255, 0, 0),
utils::pack_color_bgra(0, 0, 255, 255),
)
}
fn render(
fb: &mut Framebuffer,
map: &Map,
player: &Player,
sprites: &mut Vec<Sprite>, // will change order of sprites according to distance from player
tex_walls: &Texture,
tex_monsters: &Texture,
) -> Result<(), FrameError> {
// fb.clear(utils::pack_color_rgb(249, 209, 152));
fb.clear(utils::pack_color_bgra(152, 209, 249, 255));
let rect_w = fb.w / (map.w as usize * 2); //size of one map cell on the screen
let rect_h = fb.h / map.h as usize;
// draw overhead map
for j in 0..map.h {
for i in 0..map.w {
if map.is_empty(i, j) |
let rect_x = i as usize * rect_w;
let rect_y = j as usize * rect_h;
let texid = map.get(i, j).expect("i, j not in map range");
fb.draw_rectangle(
rect_x,
rect_y,
rect_w,
rect_h,
tex_walls.get(0, 0, texid).expect("no texture at texid"),
)?;
}
}
let mut depth_buffer = vec![1e3; (fb.w/2) as usize];
for i in 0..fb.w / 2 {
//cast field of vision on map AND generate 3D view
let angle: f32 = player.get_a() - player.fov / 2. + player.fov * i as f32 / (fb.w / 2) as f32;
for t in 0..2000 {
//since Rust doesn't allow step by float, remap so step==1
let t = t as f32 / 100.; //then transform back to original range
let x = player.x + t * angle.cos();
let y = player.y + t * angle.sin();
// draw the visibility cone on the map
fb.set_pixel(
(x * rect_w as f32) as usize,
(y * rect_h as f32) as usize,
// utils::pack_color_rgb(160, 160, 160),
utils::pack_color_bgra(160, 160, 160, 255),
)
.expect("Could not set pixel");
// if this map tile isn't empty, we've hit a wall
if map.is_empty(x as u32, y as u32) {
continue;
}
// hit a wall
let texid = map
.get(x as u32, y as u32)
.expect("Cannot index this map tile");
assert!(texid < tex_walls.count);
let distance = t * f32::cos(angle - player.get_a());
depth_buffer[i as usize] = distance;
let column_height = (fb.h as f32 / distance) as u32;
let x_texcoord = wall_x_texcoord(x, y, tex_walls);
let column = tex_walls
.get_scaled_column(texid, x_texcoord as u32, column_height)
.expect("Cannot retrieve scaled column");
let pix_x = i + fb.w / 2;
for j in 0..column_height {
let pix_y = j as usize + fb.h / 2 - column_height as usize / 2;
if pix_y < fb.h {
fb.set_pixel(pix_x, pix_y, column[j as usize])
.expect("Could not set pixel");
}
}
break;
}
}
// update distances from sprites to player
for sprite in sprites.iter_mut() {
sprite.player_dist = f32::sqrt(f32::powi(player.x - sprite.x, 2) + f32::powi(player.y - sprite.y, 2));
}
// sort sprites in reverse order of distance to player
sprites.sort_unstable_by(|lhs, rhs| rhs.player_dist.partial_cmp(&lhs.player_dist).unwrap());
// render sprites on map
for sprite in sprites.iter().take(sprites.len()) {
map_show_sprite(sprite, fb, &map)?;
draw_sprite(sprite, &depth_buffer, fb, &player, &tex_monsters)?;
}
Ok(())
}
fn main() -> std::io::Result<()> {
// TODO: unfuck colors
// TODO: create variable color schemes (RGBA vs BGRA)
// TODO: cleanup code
let mut fb = Framebuffer::new(1024, 512);
let mut window = minifb::Window::new("doom-iow", fb.w, fb.h, minifb::WindowOptions::default()).unwrap();
let mut player = Player::new (
3.456,
2.345,
1.523,
std::f32::consts::PI / 3.,
);
let map = match Map::init(16, 16) {
Ok(m) => m,
Err(_) => {
panic!("Could not open map");
}
};
let tex_walls = Texture::new("./walltex.png").expect("Could not open wall texture");
let tex_monsters = Texture::new("./monsters.png").expect("Could not open monster texture");
let mut sprites = vec![
Sprite::new(3.523, 3.812, 2, 0.0),
Sprite::new(1.834, 8.765, 0, 0.0),
Sprite::new(5.323, 5.365, 1, 0.0),
Sprite::new(4.123, 10.265, 1, 0. | {
continue; //skip empty spaces
} | conditional_block |
main.rs | : &mut Vec<Sprite>, // will change order of sprites according to distance from player
tex_walls: &Texture,
tex_monsters: &Texture,
) -> Result<(), FrameError> {
// fb.clear(utils::pack_color_rgb(249, 209, 152));
fb.clear(utils::pack_color_bgra(152, 209, 249, 255));
let rect_w = fb.w / (map.w as usize * 2); //size of one map cell on the screen
let rect_h = fb.h / map.h as usize;
// draw overhead map
for j in 0..map.h {
for i in 0..map.w {
if map.is_empty(i, j) {
continue; //skip empty spaces
}
let rect_x = i as usize * rect_w;
let rect_y = j as usize * rect_h;
let texid = map.get(i, j).expect("i, j not in map range");
fb.draw_rectangle(
rect_x,
rect_y,
rect_w,
rect_h,
tex_walls.get(0, 0, texid).expect("no texture at texid"),
)?;
}
}
let mut depth_buffer = vec![1e3; (fb.w/2) as usize];
for i in 0..fb.w / 2 {
//cast field of vision on map AND generate 3D view
let angle: f32 = player.get_a() - player.fov / 2. + player.fov * i as f32 / (fb.w / 2) as f32;
for t in 0..2000 {
//since Rust doesn't allow step by float, remap so step==1
let t = t as f32 / 100.; //then transform back to original range
let x = player.x + t * angle.cos();
let y = player.y + t * angle.sin();
// draw the visibility cone on the map
fb.set_pixel(
(x * rect_w as f32) as usize,
(y * rect_h as f32) as usize,
// utils::pack_color_rgb(160, 160, 160),
utils::pack_color_bgra(160, 160, 160, 255),
)
.expect("Could not set pixel");
// if this map tile isn't empty, we've hit a wall
if map.is_empty(x as u32, y as u32) {
continue;
}
// hit a wall
let texid = map
.get(x as u32, y as u32)
.expect("Cannot index this map tile");
assert!(texid < tex_walls.count);
let distance = t * f32::cos(angle - player.get_a());
depth_buffer[i as usize] = distance;
let column_height = (fb.h as f32 / distance) as u32;
let x_texcoord = wall_x_texcoord(x, y, tex_walls);
let column = tex_walls
.get_scaled_column(texid, x_texcoord as u32, column_height)
.expect("Cannot retrieve scaled column");
let pix_x = i + fb.w / 2;
for j in 0..column_height {
let pix_y = j as usize + fb.h / 2 - column_height as usize / 2;
if pix_y < fb.h {
fb.set_pixel(pix_x, pix_y, column[j as usize])
.expect("Could not set pixel");
}
}
break;
}
}
// update distances from sprites to player
for sprite in sprites.iter_mut() {
sprite.player_dist = f32::sqrt(f32::powi(player.x - sprite.x, 2) + f32::powi(player.y - sprite.y, 2));
}
// sort sprites in reverse order of distance to player
sprites.sort_unstable_by(|lhs, rhs| rhs.player_dist.partial_cmp(&lhs.player_dist).unwrap());
// render sprites on map
for sprite in sprites.iter().take(sprites.len()) {
map_show_sprite(sprite, fb, &map)?;
draw_sprite(sprite, &depth_buffer, fb, &player, &tex_monsters)?;
}
Ok(())
}
fn main() -> std::io::Result<()> {
// TODO: unfuck colors
// TODO: create variable color schemes (RGBA vs BGRA)
// TODO: cleanup code
let mut fb = Framebuffer::new(1024, 512);
let mut window = minifb::Window::new("doom-iow", fb.w, fb.h, minifb::WindowOptions::default()).unwrap();
let mut player = Player::new (
3.456,
2.345,
1.523,
std::f32::consts::PI / 3.,
);
let map = match Map::init(16, 16) {
Ok(m) => m,
Err(_) => {
panic!("Could not open map");
}
};
let tex_walls = Texture::new("./walltex.png").expect("Could not open wall texture");
let tex_monsters = Texture::new("./monsters.png").expect("Could not open monster texture");
let mut sprites = vec![
Sprite::new(3.523, 3.812, 2, 0.0),
Sprite::new(1.834, 8.765, 0, 0.0),
Sprite::new(5.323, 5.365, 1, 0.0),
Sprite::new(4.123, 10.265, 1, 0.0),
];
make_gif(&mut player, &mut fb, &map, &mut sprites, &tex_walls, &tex_monsters).unwrap();
while window.is_open() && !window.is_key_down(minifb::Key::Escape) {
render(&mut fb, &map, &player, &mut sprites, &tex_walls, &tex_monsters).unwrap();
player.set_a(player.get_a() - 0.1 * (2. * std::f32::consts::PI / 360.));
window.update_with_buffer(fb.img.as_slice()).unwrap();
}
Ok(())
}
fn make_gif(player: &mut Player, fb: &mut Framebuffer, map: &Map, sprites: &mut Vec<Sprite>,
tex_walls: &Texture, tex_monsters: &Texture) -> Result<(), std::io::Error> {
//clear the /out folder
Command::new("rm")
.arg("-rf")
.arg("out/")
.output()
.expect("failed to clear out directory");
//create new /out folder
Command::new("mkdir")
.arg("out")
.output()
.expect("failed to create directory");
for frame in 0..360 {
// for frame in 0..5 {
// for frame in 0..1 {
let output_path = "./out/";
let ss = format!("{}{:05}.ppm", output_path, frame);
// player.a -= 2. * std::f32::consts::PI / 360.;
player.set_a( player.get_a() - (2. * std::f32::consts::PI / 360.) );
render(fb, &map, &player, sprites, &tex_walls, &tex_monsters).expect("Could not render image");
utils::drop_ppm_image(ss.as_str(), &fb.img, fb.w as usize, fb.h as usize)
.expect("Could not drop image");
}
println!("Rendered all frames, collecting into gif...");
let output = Command::new("convert")
.args(&["-delay", "10", "-loop", "0", "*.ppm", "rendered.gif"])
.current_dir("out/")
.output()
.expect("Could not start process");
println!("Status: {}", output.status);
println!("Stdout: {}", String::from_utf8_lossy(&output.stdout));
println!("Stderr: {}", String::from_utf8_lossy(&output.stderr));
println!("done");
//open results in Finder
Command::new("open")
.arg("out/")
.output()
.expect("Could not open folder");
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn packs_ints() {
let r = 2;
let g = 4;
let b = 8;
let a = 16;
let packed = utils::pack_color_rgba(r, g, b, a);
assert_eq!(packed, 0b0001_0000_0000_1000_0000_0100_0000_0010);
}
#[test]
fn | unpacks_ints | identifier_name |
|
metrics.pb.go |
const (
SimpleMetric_COUNTER SimpleMetric_Type = 0
SimpleMetric_GAUGE SimpleMetric_Type = 1
)
var SimpleMetric_Type_name = map[int32]string{
0: "COUNTER",
1: "GAUGE",
}
var SimpleMetric_Type_value = map[string]int32{
"COUNTER": 0,
"GAUGE": 1,
}
func (x SimpleMetric_Type) String() string {
return proto.EnumName(SimpleMetric_Type_name, int32(x))
}
func (SimpleMetric_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_f43fd25d8f6d2cfd, []int{0, 0}
}
// Proto representation of an Envoy Counter or Gauge value.
type SimpleMetric struct {
// Type of the metric represented.
Type SimpleMetric_Type `protobuf:"varint,1,opt,name=type,proto3,enum=envoy.admin.v3.SimpleMetric_Type" json:"type,omitempty"`
// Current metric value.
Value uint64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
// Name of the metric.
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SimpleMetric) Reset() { *m = SimpleMetric{} }
func (m *SimpleMetric) String() string { return proto.CompactTextString(m) }
func (*SimpleMetric) ProtoMessage() {}
func (*SimpleMetric) Descriptor() ([]byte, []int) {
return fileDescriptor_f43fd25d8f6d2cfd, []int{0}
}
func (m *SimpleMetric) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SimpleMetric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SimpleMetric.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *SimpleMetric) XXX_Merge(src proto.Message) {
xxx_messageInfo_SimpleMetric.Merge(m, src)
}
func (m *SimpleMetric) XXX_Size() int {
return m.Size()
}
func (m *SimpleMetric) XXX_DiscardUnknown() {
xxx_messageInfo_SimpleMetric.DiscardUnknown(m)
}
var xxx_messageInfo_SimpleMetric proto.InternalMessageInfo
func (m *SimpleMetric) GetType() SimpleMetric_Type {
if m != nil {
return m.Type
}
return SimpleMetric_COUNTER
}
func (m *SimpleMetric) GetValue() uint64 {
if m != nil {
return m.Value
}
return 0
}
func (m *SimpleMetric) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func init() {
proto.RegisterEnum("envoy.admin.v3.SimpleMetric_Type", SimpleMetric_Type_name, SimpleMetric_Type_value)
proto.RegisterType((*SimpleMetric)(nil), "envoy.admin.v3.SimpleMetric")
}
func init() { proto.RegisterFile("envoy/admin/v3/metrics.proto", fileDescriptor_f43fd25d8f6d2cfd) }
var fileDescriptor_f43fd25d8f6d2cfd = []byte{
// 287 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcd, 0x2b, 0xcb,
0xaf, 0xd4, 0x4f, 0x4c, 0xc9, 0xcd, 0xcc, 0xd3, 0x2f, 0x33, 0xd6, 0xcf, 0x4d, 0x2d, 0x29, 0xca,
0x4c, 0x2e, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x03, 0xcb, 0xea, 0x81, 0x65, 0xf5,
0xca, 0x8c, 0xa5, 0x64, 0x4b, 0x53, 0x0a, 0x12, 0xf5, 0x13, 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b,
0x32, 0xf3, 0xf3, 0x8a, 0xf5, 0x8b, 0x4b, 0x12, 0x4b, 0x4a, 0xa1, 0xca, 0xa5, 0x14, 0x31, 0xa4,
0xcb, 0x52, 0x8b, 0x8a, 0x33, 0xf3, 0xf3, 0x32, 0xf3, 0xd2, 0x21, 0x4a, 0x94, 0x76, 0x30, 0x72,
0xf1, 0x04, 0x67, 0xe6, 0x16, 0xe4, 0xa4, 0xfa, 0x82, 0x6d, 0x12, 0x32, 0xe5, 0x62, 0x29, 0xa9,
0x2c, 0x48, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x33, 0x52, 0xd4, 0x43, 0xb5, 0x51, 0x0f, 0x59,
0xad, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x10, 0x58, 0xb9, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e,
0x69, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x4b, 0x10, 0x84, 0x23, 0x24, 0xc4, 0xc5, 0x92, 0x97,
0x98, 0x9b, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x66, 0x2b, 0xc9, 0x71, 0xb1, 0x80,
0xf4, 0x09, 0x71, 0x73, 0xb1, 0x3b, 0xfb, 0x87, 0xfa, 0x85, 0xb8, 0x06, 0x09, 0x30, 0x08, 0x71,
0x72, 0xb1, 0xba, 0x3b, 0x86, 0xba, 0xbb, 0x0a, 0x30, 0x5a, 0xa9, 0xcf, 0x3a, 0xda, 0x21, 0xa7,
0xc4, 0xa5, 0x80, 0x62, 0xb1, 0x51, 0x62, 0x4e, 0x41, 0x46, 0x22, 0x8a, 0xed, 0x4e, 0x4e, 0x27,
0x1e, 0xc9, 0x31, | random_line_split |
||
metrics.pb.go | () SimpleMetric_Type {
if m != nil {
return m.Type
}
return SimpleMetric_COUNTER
}
func (m *SimpleMetric) GetValue() uint64 {
if m != nil {
return m.Value
}
return 0
}
func (m *SimpleMetric) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func init() {
proto.RegisterEnum("envoy.admin.v3.SimpleMetric_Type", SimpleMetric_Type_name, SimpleMetric_Type_value)
proto.RegisterType((*SimpleMetric)(nil), "envoy.admin.v3.SimpleMetric")
}
func init() { proto.RegisterFile("envoy/admin/v3/metrics.proto", fileDescriptor_f43fd25d8f6d2cfd) }
var fileDescriptor_f43fd25d8f6d2cfd = []byte{
// 287 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcd, 0x2b, 0xcb,
0xaf, 0xd4, 0x4f, 0x4c, 0xc9, 0xcd, 0xcc, 0xd3, 0x2f, 0x33, 0xd6, 0xcf, 0x4d, 0x2d, 0x29, 0xca,
0x4c, 0x2e, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x03, 0xcb, 0xea, 0x81, 0x65, 0xf5,
0xca, 0x8c, 0xa5, 0x64, 0x4b, 0x53, 0x0a, 0x12, 0xf5, 0x13, 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b,
0x32, 0xf3, 0xf3, 0x8a, 0xf5, 0x8b, 0x4b, 0x12, 0x4b, 0x4a, 0xa1, 0xca, 0xa5, 0x14, 0x31, 0xa4,
0xcb, 0x52, 0x8b, 0x8a, 0x33, 0xf3, 0xf3, 0x32, 0xf3, 0xd2, 0x21, 0x4a, 0x94, 0x76, 0x30, 0x72,
0xf1, 0x04, 0x67, 0xe6, 0x16, 0xe4, 0xa4, 0xfa, 0x82, 0x6d, 0x12, 0x32, 0xe5, 0x62, 0x29, 0xa9,
0x2c, 0x48, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x33, 0x52, 0xd4, 0x43, 0xb5, 0x51, 0x0f, 0x59,
0xad, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x10, 0x58, 0xb9, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e,
0x69, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x4b, 0x10, 0x84, 0x23, 0x24, 0xc4, 0xc5, 0x92, 0x97,
0x98, 0x9b, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x66, 0x2b, 0xc9, 0x71, 0xb1, 0x80,
0xf4, 0x09, 0x71, 0x73, 0xb1, 0x3b, 0xfb, 0x87, 0xfa, 0x85, 0xb8, 0x06, 0x09, 0x30, 0x08, 0x71,
0x72, 0xb1, 0xba, 0x3b, 0x86, 0xba, 0xbb, 0x0a, 0x30, 0x5a, 0xa9, 0xcf, 0x3a, 0xda, 0x21, 0xa7,
0xc4, 0xa5, 0x80, 0x62, 0xb1, 0x51, 0x62, 0x4e, 0x41, 0x46, 0x22, 0x8a, 0xed, 0x4e, 0x4e, 0x27,
0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0xae, 0x86, 0x13, 0x17,
0xd9, 0x98, 0x04, 0x18, 0xb9, 0x64, 0x32, 0xf3, 0x21, 0x6e, 0x2e, 0x28, 0xca, 0xaf, 0xa8, 0x44,
0x73, 0xbe, 0x13, 0x0f, 0x44, 0x6f, 0x71, 0x00, 0xc8, 0xf3, 0x01, 0x8c, 0x49, 0x6c, 0xe0, 0x50,
0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x8a, 0x1c, 0x54, 0x77, 0x01, 0x00, 0x00,
}
func (m *SimpleMetric) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SimpleMetric) | (dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *SimpleMetric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x1a
}
if m.Value != 0 {
i = encodeVarintMetrics(dAtA, i, uint | MarshalTo | identifier_name |
metrics.pb.go | x49, 0xcd, 0x2b, 0xcb,
0xaf, 0xd4, 0x4f, 0x4c, 0xc9, 0xcd, 0xcc, 0xd3, 0x2f, 0x33, 0xd6, 0xcf, 0x4d, 0x2d, 0x29, 0xca,
0x4c, 0x2e, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x03, 0xcb, 0xea, 0x81, 0x65, 0xf5,
0xca, 0x8c, 0xa5, 0x64, 0x4b, 0x53, 0x0a, 0x12, 0xf5, 0x13, 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b,
0x32, 0xf3, 0xf3, 0x8a, 0xf5, 0x8b, 0x4b, 0x12, 0x4b, 0x4a, 0xa1, 0xca, 0xa5, 0x14, 0x31, 0xa4,
0xcb, 0x52, 0x8b, 0x8a, 0x33, 0xf3, 0xf3, 0x32, 0xf3, 0xd2, 0x21, 0x4a, 0x94, 0x76, 0x30, 0x72,
0xf1, 0x04, 0x67, 0xe6, 0x16, 0xe4, 0xa4, 0xfa, 0x82, 0x6d, 0x12, 0x32, 0xe5, 0x62, 0x29, 0xa9,
0x2c, 0x48, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x33, 0x52, 0xd4, 0x43, 0xb5, 0x51, 0x0f, 0x59,
0xad, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x10, 0x58, 0xb9, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e,
0x69, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x4b, 0x10, 0x84, 0x23, 0x24, 0xc4, 0xc5, 0x92, 0x97,
0x98, 0x9b, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x66, 0x2b, 0xc9, 0x71, 0xb1, 0x80,
0xf4, 0x09, 0x71, 0x73, 0xb1, 0x3b, 0xfb, 0x87, 0xfa, 0x85, 0xb8, 0x06, 0x09, 0x30, 0x08, 0x71,
0x72, 0xb1, 0xba, 0x3b, 0x86, 0xba, 0xbb, 0x0a, 0x30, 0x5a, 0xa9, 0xcf, 0x3a, 0xda, 0x21, 0xa7,
0xc4, 0xa5, 0x80, 0x62, 0xb1, 0x51, 0x62, 0x4e, 0x41, 0x46, 0x22, 0x8a, 0xed, 0x4e, 0x4e, 0x27,
0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0xae, 0x86, 0x13, 0x17,
0xd9, 0x98, 0x04, 0x18, 0xb9, 0x64, 0x32, 0xf3, 0x21, 0x6e, 0x2e, 0x28, 0xca, 0xaf, 0xa8, 0x44,
0x73, 0xbe, 0x13, 0x0f, 0x44, 0x6f, 0x71, 0x00, 0xc8, 0xf3, 0x01, 0x8c, 0x49, 0x6c, 0xe0, 0x50,
0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x8a, 0x1c, 0x54, 0x77, 0x01, 0x00, 0x00,
}
func (m *SimpleMetric) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SimpleMetric) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *SimpleMetric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x1a
}
if m.Value != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.Value))
i--
dAtA[i] = 0x10
}
if m.Type != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.Type))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
offset -= sovMetrics(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *SimpleMetric) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Type != 0 {
n += 1 + sovMetrics(uint64(m.Type))
}
if m.Value != 0 {
n += 1 + sovMetrics(uint64(m.Value))
}
l = len(m.Name)
if l > 0 | {
n += 1 + l + sovMetrics(uint64(l))
} | conditional_block |
|
metrics.pb.go | 6, 0x22, 0x8a, 0xed, 0x4e, 0x4e, 0x27,
0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0xae, 0x86, 0x13, 0x17,
0xd9, 0x98, 0x04, 0x18, 0xb9, 0x64, 0x32, 0xf3, 0x21, 0x6e, 0x2e, 0x28, 0xca, 0xaf, 0xa8, 0x44,
0x73, 0xbe, 0x13, 0x0f, 0x44, 0x6f, 0x71, 0x00, 0xc8, 0xf3, 0x01, 0x8c, 0x49, 0x6c, 0xe0, 0x50,
0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x8a, 0x1c, 0x54, 0x77, 0x01, 0x00, 0x00,
}
func (m *SimpleMetric) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SimpleMetric) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *SimpleMetric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x1a
}
if m.Value != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.Value))
i--
dAtA[i] = 0x10
}
if m.Type != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.Type))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
offset -= sovMetrics(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *SimpleMetric) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Type != 0 {
n += 1 + sovMetrics(uint64(m.Type))
}
if m.Value != 0 {
n += 1 + sovMetrics(uint64(m.Value))
}
l = len(m.Name)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovMetrics(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozMetrics(x uint64) (n int) {
return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *SimpleMetric) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SimpleMetric: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SimpleMetric: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= SimpleMetric_Type(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
m.Value = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Value |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMetrics(dAtA []byte) (n int, err error) | {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
} | identifier_body |
|
OceanFront_ViewshedNEW2.py |
if not os.path.exists(Final_Floor_Viewsheds) : os.makedirs(Final_Floor_Viewsheds)
if not os.path.exists(SummaryTables) : os.makedirs(SummaryTables)
if not os.path.exists(ElevAvgTables): os.makedirs(ElevAvgTables)
if not os.path.exists(ArcLengths): os.makedirs(ArcLengths)
def outName(input,post="_Output"):
"""Returns output name."""
outName=os.path.basename(input).split(".")[0]+post
return outName
def DegViewshed (FLOOR, HEIGHT):
"""Calculates a parcels viewshed, in degrees"""
#Select Record
arcpy.SelectLayerByAttribute_management(PointsFL,"NEW_SELECTION",SQL)
#Set Observer Height (OffSETA)
arcpy.CalculateField_management(PointsFL,"OFFSETA",HEIGHT,"PYTHON_9.3")
#perform viewshed analysis
arcpy.SetProgressorLabel("Performing Viewshed Analysis for point "+str(value))
outViewshed = IntermediateFiles+"\\vs_"+str(FLOOR)+"_"+str(value).split(".")[0]
arcpy.Viewshed_3d(outCon,PointsFL,outViewshed)
#convert viewshed to polygon
arcpy.SetProgressorLabel("Converting viewshed"+str(value)+" on floor "+str(FLOOR)+" to polygon.")
OutPoly = IntermediateFiles+"\\"+os.path.basename(outViewshed).split(".")[0]+"_poly.shp"
arcpy.RasterToPolygon_conversion(outViewshed,OutPoly)
#Intersect viewshed polygon with buffer clip
#This will allow the viewshed poly to inherit attribute fields needed for later analysis
FinalView = Final_Floor_Viewsheds+"\\FinalViewshed_"+str(FLOOR)+"_"+str(value)+".shp"
arcpy.Intersect_analysis([BufferClip,OutPoly],FinalView)
#Select features in viewshed polygon with Gridcode = 1
#If no records with grid = 1 exist, scriptwill skip to setting viewshed in degrees to 0
#Convert viewshed polygon to layer
ViewshedLayer = outName(FinalView,"lyr")
arcpy.MakeFeatureLayer_management(FinalView,ViewshedLayer)
#Select records with gridcode = 1
arcpy.SelectLayerByAttribute_management(ViewshedLayer,"NEW_SELECTION","GRIDCODE ="+str(1)+"")
#Get count of the # of records selected in viewshed poly layer
VsLyrCount = int(arcpy.GetCount_management(ViewshedLayer).getOutput(0))
NoView = SummaryTables+"\\summary_"+str(FLOOR)+"_"+str(value)+".dbf"
YesView = SummaryTables+"\\summary_"+str(FLOOR)+"_"+str(value)+".dbf"
StatsField0 = [["GRIDCODE","SUM"]]
CaseField0 = ["ID","SPOT",FloorField]
StatsField1 = [["LENGTH","SUM"]]
CaseField1 = ["GRIDCODE","ID","SPOT",FloorField]
VsArcLengths = ArcLengths+"\\ArcLength_"+str(FLOOR)+"_"+str(value)+".shp"
if VsLyrCount == 0: #no viewable areas exist
arcpy.SelectLayerByAttribute_management(ViewshedLayer,"CLEAR_SELECTION")
arcpy.SetProgressorLabel("Calculating viewshed statistics for parcel "+str(value))
arcpy.Statistics_analysis(ViewshedLayer,NoView, StatsField0,CaseField0)
#Add field to summary table to hold viewshed value of 0
#Add field to note which floor viewshed corresponds to
arcpy.AddField_management(NoView, "FLR_RAN","SHORT")
arcpy.AddField_management(NoView, "VIEW_"+Year,"DOUBLE")
arcpy.AddField_management(NoView,"OFFSETA","SHORT")
arcpy.CalculateField_management(NoView,"FLR_RAN",FLOOR)
arcpy.CalculateField_management(NoView,"VIEW_"+Year,0)
arcpy.CalculateField_management(NoView,"OFFSETA",HEIGHT)
else: #Calculate viewshed, in degrees, for selected records
arcpy.SetProgressorLabel("Getting arc length for parcel"+str(value)+" at the "+str(FLOOR)+" floor.")
arcpy.Intersect_analysis([BufferLine,ViewshedLayer],VsArcLengths,"",10,"LINE")#Intersect with any line within 10 ft.
arcpy.AddField_management(VsArcLengths, "Length","DOUBLE")
arcpy.CalculateField_management(VsArcLengths,"Length","!SHAPE.length@miles!","PYTHON_9.3")
arcpy.Statistics_analysis(VsArcLengths,YesView,StatsField1,CaseField1)
#Add fields to output summary table
arcpy.AddField_management(YesView,"FLR_RAN","SHORT")
arcpy.AddField_management(YesView,"VIEW_"+Year,"DOUBLE")
arcpy.AddField_management(YesView,"OFFSETA","SHORT")
arcpy.CalculateField_management(YesView,"FLR_RAN",FLOOR)
arcpy.CalculateField_management(YesView,"OFFSETA",HEIGHT)
arcpy.CalculateField_management(YesView,"VIEW_"+Year,"((!SUM_LENGTH!/3.14)*180)","PYTHON_9.3")
arcpy.SelectLayerByAttribute_management(ViewshedLayer,"CLEAR_SELECTION")
#Open error log file
infile = open(outputWorkspace+"\\Error_Log_"+Year+".txt","w")
#Perform field check for viewshed parameters within the observation point attribute table.
#Script will add field to the attribute table if the field does not already exist.
#Needed fields are SPOT - used to define the surface elevations for the observation points.
#Azimuth -define the horizontal angle limits to the scan (start and end points in degrees).
#Radius - defines the search distance when identifying areas visible from each observation point.
#Cells that are beyond a certain distance can be excluded from the analysis.
VSFieldList = ["SPOT","OFFSETA","AZIMUTH1","AZIMUTH2","RADIUS1","RADIUS2"]
arcpy.SetProgressorLabel("Checking fields in observation point attribute table")
for FieldList in VSFieldList:
ObsPtsFieldList=arcpy.ListFields(ObsPts,FieldList)
fieldNames=[field.name for field in ObsPtsFieldList]
if FieldList in fieldNames:
print "Field", FieldList, "found in", ObsPts
else:
print"Field", FieldList, "NOT found in", ObsPts
arcpy.AddField_management(ObsPts,FieldList, "DOUBLE")
print FieldList, "created"
#Populate viewshed parameters with correct values for viewshed
Az1Cal = 0
Az2Cal = 180
Rad1Cal = 0
Rad2Cal = 5280
arcpy.CalculateField_management(ObsPts,"AZIMUTH1",Az1Cal)
arcpy.CalculateField_management(ObsPts,"AZIMUTH2",Az2Cal)
arcpy.CalculateField_management(ObsPts,"RADIUS1",Rad1Cal)
arcpy.CalculateField_management(ObsPts,"RADIUS2",Rad2Cal)
#Create Feature Layers
arcpy.SetProgressorLabel("Creating feature layers")
PointsFL = outName(ObsPts,"_Lyr")
footprintFL = outName(footprint,"_Lyr")
arcpy.MakeFeatureLayer_management(ObsPts, PointsFL)
arcpy.MakeFeatureLayer_management(footprint,footprintFL)
#Select observation points one by one
arcpy.SetProgressorLabel("Starting viewshed analysis...")
RangeCount = int(arcpy.GetCount_management(PointsFL).getOutput(0))
#Count number of parcels being processed
arcpy.AddMessage("\nCalculating viewshed for "+str(RangeCount)+" parcels")
sc = arcpy.SearchCursor(PointsFL)
for row in sc:
try:
#Get Parcel ID value
value = row.ID
count = row.FID+1
FlrCnt = row.getValue(FloorField)
#Get bare earth elevation of parcel
arcpy.SetProgressorLabel("Changing elevation footprint to bare earth elevation for point "+str(value))
SQL = "Id =" +str(value)+""
arcpy.SelectLayerByAttribute_management(PointsFL,"NEW_SELECTION",SQL)
arcpy.SelectLayerByLocation_management(footprintFL,"INTERSECT",PointsFL)
arcpy.env.workspace = IntermediateFiles #need to change workspace so that the .save files get saved correctly
outExtractByMask = ExtractByMask(BareElevation,footprintFL)
outExtractByMask.save(IntermediateFiles+"\\ebm_"+str(value))
ElevAvg = ElevAvgTables+"\\avgelev_"+str(value)+".dbf"
arcpy.Statistics_analysis(outExtractByMask,ElevAvg,[["VALUE","MEAN"]])
arcpy.AddField_management(ElevAvg,"Pt_ID","SHORT")
arcpy.CalculateField_management(ElevAvg,"Pt_ID",value)
arcpy.AddJoin_management(PointsFL,"Id",ElevAvg,"Pt_ID","KEEP_COMMON")
Field1 = os.path.basename(ObsPts).split(".")[0]+".SPOT"
Field2 = "!"+os.path.basename(ElevAvg).split(".")[0]+".MEAN_VALUE!"
arcpy.CalculateField_management(PointsFL,Field1,Field2,"PYTHON_9.3")
arcpy.RemoveJoin_management(PointsFL)
#Set parcel elevation to 0 this will be replaced by SPOT value caclualted above
RastFootprint = IntermediateFiles+"\\fp | os.makedirs(IntermediateFiles) | conditional_block |
|
OceanFront_ViewshedNEW2.py | ")
#Open error log file
infile = open(outputWorkspace+"\\Error_Log_"+Year+".txt","w")
#Perform field check for viewshed parameters within the observation point attribute table.
#Script will add field to the attribute table if the field does not already exist.
#Needed fields are SPOT - used to define the surface elevations for the observation points.
#Azimuth -define the horizontal angle limits to the scan (start and end points in degrees).
#Radius - defines the search distance when identifying areas visible from each observation point.
#Cells that are beyond a certain distance can be excluded from the analysis.
VSFieldList = ["SPOT","OFFSETA","AZIMUTH1","AZIMUTH2","RADIUS1","RADIUS2"]
arcpy.SetProgressorLabel("Checking fields in observation point attribute table")
for FieldList in VSFieldList:
ObsPtsFieldList=arcpy.ListFields(ObsPts,FieldList)
fieldNames=[field.name for field in ObsPtsFieldList]
if FieldList in fieldNames:
print "Field", FieldList, "found in", ObsPts
else:
print"Field", FieldList, "NOT found in", ObsPts
arcpy.AddField_management(ObsPts,FieldList, "DOUBLE")
print FieldList, "created"
#Populate viewshed parameters with correct values for viewshed
Az1Cal = 0
Az2Cal = 180
Rad1Cal = 0
Rad2Cal = 5280
arcpy.CalculateField_management(ObsPts,"AZIMUTH1",Az1Cal)
arcpy.CalculateField_management(ObsPts,"AZIMUTH2",Az2Cal)
arcpy.CalculateField_management(ObsPts,"RADIUS1",Rad1Cal)
arcpy.CalculateField_management(ObsPts,"RADIUS2",Rad2Cal)
#Create Feature Layers
arcpy.SetProgressorLabel("Creating feature layers")
PointsFL = outName(ObsPts,"_Lyr")
footprintFL = outName(footprint,"_Lyr")
arcpy.MakeFeatureLayer_management(ObsPts, PointsFL)
arcpy.MakeFeatureLayer_management(footprint,footprintFL)
#Select observation points one by one
arcpy.SetProgressorLabel("Starting viewshed analysis...")
RangeCount = int(arcpy.GetCount_management(PointsFL).getOutput(0))
#Count number of parcels being processed
arcpy.AddMessage("\nCalculating viewshed for "+str(RangeCount)+" parcels")
sc = arcpy.SearchCursor(PointsFL)
for row in sc:
try:
#Get Parcel ID value
value = row.ID
count = row.FID+1
FlrCnt = row.getValue(FloorField)
#Get bare earth elevation of parcel
arcpy.SetProgressorLabel("Changing elevation footprint to bare earth elevation for point "+str(value))
SQL = "Id =" +str(value)+""
arcpy.SelectLayerByAttribute_management(PointsFL,"NEW_SELECTION",SQL)
arcpy.SelectLayerByLocation_management(footprintFL,"INTERSECT",PointsFL)
arcpy.env.workspace = IntermediateFiles #need to change workspace so that the .save files get saved correctly
outExtractByMask = ExtractByMask(BareElevation,footprintFL)
outExtractByMask.save(IntermediateFiles+"\\ebm_"+str(value))
ElevAvg = ElevAvgTables+"\\avgelev_"+str(value)+".dbf"
arcpy.Statistics_analysis(outExtractByMask,ElevAvg,[["VALUE","MEAN"]])
arcpy.AddField_management(ElevAvg,"Pt_ID","SHORT")
arcpy.CalculateField_management(ElevAvg,"Pt_ID",value)
arcpy.AddJoin_management(PointsFL,"Id",ElevAvg,"Pt_ID","KEEP_COMMON")
Field1 = os.path.basename(ObsPts).split(".")[0]+".SPOT"
Field2 = "!"+os.path.basename(ElevAvg).split(".")[0]+".MEAN_VALUE!"
arcpy.CalculateField_management(PointsFL,Field1,Field2,"PYTHON_9.3")
arcpy.RemoveJoin_management(PointsFL)
#Set parcel elevation to 0 this will be replaced by SPOT value caclualted above
RastFootprint = IntermediateFiles+"\\fp_"+str(value).split(".")[0]
arcpy.PolygonToRaster_conversion(footprintFL,"FID",RastFootprint,"MAXIMUM_AREA","",6)
outIsNull = IsNull(RastFootprint) #Identify NoData Areas
outIsNull.save(IntermediateFiles+"\\null1_"+str(value))
outCon = Con(outIsNull,Elevation,0,"") #Use Con tool to change building footprint to elevation of 0 while leaving all other building footprints as is
outCon.save(IntermediateFiles+"\\con1_"+str(value)) #Final raster to be used in viewshed analysis
#buffer selected viewpoint
arcpy.SetProgressorLabel("Buffering point "+str(value))
outBuffer = IntermediateFiles+"\\buffer_"+str(value)+".shp"
arcpy.Buffer_analysis(PointsFL,outBuffer,"1 mile")
#Convert buffer polygon to line
BufferLine = IntermediateFiles+"\\BufferLine_"+str(value)+".shp"
arcpy.FeatureToLine_management(outBuffer,BufferLine)
#Clip buffer to Ocean
arcpy.SetProgressorLabel("Clipping point "+str(value)+" buffer to ocean")
BufferClip = IntermediateFiles+"\\buffer_clipped_"+str(value).split(".")[0]+".shp"
arcpy.Clip_analysis(outBuffer, Ocean, BufferClip)
if FlrCnt ==1: #parcel floor count =1
arcpy.AddMessage("\nParcel "+str(value)+" has 1 story to process. Calculating viewshed now...")
print "\nParcel ",str(value)," has 1 story to process. Calculating viewshed now..."
DegViewshed(1,10) #Calculate the viewshed with an observer height of 10 feet then move to point
arcpy.AddMessage("First floor viewshed for parcel "+str(value)+" has been completed...")
print "First floor viewshed for parcel ",str(value)," has been completed..."
arcpy.AddMessage(str(count)+" of "+str(RangeCount)+" parcles has been completed.\n")
print str(count)," of "+str(RangeCount)," parcels has been processed.\n"
else: #if parcel has 1.5 floors or greater do this
arcpy.AddMessage("\nParcel "+str(value)+" has 2 stories to process. Calculating viewsheds now...")
print "\nParcel ",str(value)," has 2 stories to process. Calculating viewsheds now..."
DegViewshed(1,10) #Calculate first floor view, then
arcpy.AddMessage("First floor viewshed for parcel "+str(value)+" has been completed...")
print "First floor viewshed for parcel ",str(value)," has been completed..."
DegViewshed(2,20) #Calculate second floor view
arcpy.AddMessage("Second floor viewshed for parcel "+str(value)+" has been completed...")
print "Second floor viewshed for parcel ",str(value)," has been completed..."
arcpy.AddMessage("Viewsheds for "+str(count)+" of "+str(RangeCount)+" parcels have been processed.\n")
print "Viewsheds for",str(count)," of ",str(RangeCount)," parcels have been processed.\n"
except:
arcpy.AddMessage("***An error occured processing parcel "+str(value)+". Refer to error log for details.")
print "***An error occured processing parcel "+str(value)+". Refer to error log for details."
infile.write("An error occured processing parcel "+str(value)+".\n")
infile.write(arcpy.GetMessages()+"\n")
pass
del row
del sc
#Merge all summary tables into a single table
arcpy.SetProgressorLabel("Creating final viewshed table")
arcpy.env.workspace = SummaryTables
FinalTable = outputWorkspace+"\\Final_Viewsheds_"+Year+".dbf"
Tables = arcpy.ListTables()
arcpy.Merge_management(Tables,FinalTable)
#Delete uneeded fields from final table
arcpy.DeleteField_management(FinalTable,["FREQUENCY","SUM_GRIDCO"])
print "Final viewshed table for",Year,"is located in",outputWorkspace,"\n"
arcpy.AddMessage("Final viewshed table for "+Year+" is located in "+outputWorkspace+"\n")
#save copy of table to CSV format
##import win32com.client
##try:
##
## excel = win32com.client.Dispatch('Excel.Application')
##
## inDBF = FinalTable
## outCSV = FinalTable.split(".")[0]+".csv"
##
## workbook = excel.Workbooks.Open(inDBF)
## # 24 represents xlCSVMSDOS
## workbook.SaveAs(outCSV,FileFormat=24)
## workbook.Close(SaveChanges=0)
## excel.Quit()
##
## arcpy.AddMessage(FinalTable+" converted to a csv file, and saved in "+outputWorkspace+"\n")
## print FinalTable,"converted to a scv file and saved in",outputWorkspace,"\n"
##
##except:
## arcpy.AddMessage("\nERROR: Could not convert final viewshed table to csv file\n")
## arcpy.AddMessage(arcpy.GetMessages())
## infile.write("Could not convert final viewshed table to csv file\n")
## infile.write(arcpy.GetMessages()+"\n\n") | ##
#Delete individual summary tables
arcpy.SetProgressorLabel("Deleting Summary Tables... ")
try: | random_line_split |
|
OceanFront_ViewshedNEW2.py | maryTables) : os.makedirs(SummaryTables)
if not os.path.exists(ElevAvgTables): os.makedirs(ElevAvgTables)
if not os.path.exists(ArcLengths): os.makedirs(ArcLengths)
def | (input,post="_Output"):
"""Returns output name."""
outName=os.path.basename(input).split(".")[0]+post
return outName
def DegViewshed (FLOOR, HEIGHT):
"""Calculates a parcels viewshed, in degrees"""
#Select Record
arcpy.SelectLayerByAttribute_management(PointsFL,"NEW_SELECTION",SQL)
#Set Observer Height (OffSETA)
arcpy.CalculateField_management(PointsFL,"OFFSETA",HEIGHT,"PYTHON_9.3")
#perform viewshed analysis
arcpy.SetProgressorLabel("Performing Viewshed Analysis for point "+str(value))
outViewshed = IntermediateFiles+"\\vs_"+str(FLOOR)+"_"+str(value).split(".")[0]
arcpy.Viewshed_3d(outCon,PointsFL,outViewshed)
#convert viewshed to polygon
arcpy.SetProgressorLabel("Converting viewshed"+str(value)+" on floor "+str(FLOOR)+" to polygon.")
OutPoly = IntermediateFiles+"\\"+os.path.basename(outViewshed).split(".")[0]+"_poly.shp"
arcpy.RasterToPolygon_conversion(outViewshed,OutPoly)
#Intersect viewshed polygon with buffer clip
#This will allow the viewshed poly to inherit attribute fields needed for later analysis
FinalView = Final_Floor_Viewsheds+"\\FinalViewshed_"+str(FLOOR)+"_"+str(value)+".shp"
arcpy.Intersect_analysis([BufferClip,OutPoly],FinalView)
#Select features in viewshed polygon with Gridcode = 1
#If no records with grid = 1 exist, scriptwill skip to setting viewshed in degrees to 0
#Convert viewshed polygon to layer
ViewshedLayer = outName(FinalView,"lyr")
arcpy.MakeFeatureLayer_management(FinalView,ViewshedLayer)
#Select records with gridcode = 1
arcpy.SelectLayerByAttribute_management(ViewshedLayer,"NEW_SELECTION","GRIDCODE ="+str(1)+"")
#Get count of the # of records selected in viewshed poly layer
VsLyrCount = int(arcpy.GetCount_management(ViewshedLayer).getOutput(0))
NoView = SummaryTables+"\\summary_"+str(FLOOR)+"_"+str(value)+".dbf"
YesView = SummaryTables+"\\summary_"+str(FLOOR)+"_"+str(value)+".dbf"
StatsField0 = [["GRIDCODE","SUM"]]
CaseField0 = ["ID","SPOT",FloorField]
StatsField1 = [["LENGTH","SUM"]]
CaseField1 = ["GRIDCODE","ID","SPOT",FloorField]
VsArcLengths = ArcLengths+"\\ArcLength_"+str(FLOOR)+"_"+str(value)+".shp"
if VsLyrCount == 0: #no viewable areas exist
arcpy.SelectLayerByAttribute_management(ViewshedLayer,"CLEAR_SELECTION")
arcpy.SetProgressorLabel("Calculating viewshed statistics for parcel "+str(value))
arcpy.Statistics_analysis(ViewshedLayer,NoView, StatsField0,CaseField0)
#Add field to summary table to hold viewshed value of 0
#Add field to note which floor viewshed corresponds to
arcpy.AddField_management(NoView, "FLR_RAN","SHORT")
arcpy.AddField_management(NoView, "VIEW_"+Year,"DOUBLE")
arcpy.AddField_management(NoView,"OFFSETA","SHORT")
arcpy.CalculateField_management(NoView,"FLR_RAN",FLOOR)
arcpy.CalculateField_management(NoView,"VIEW_"+Year,0)
arcpy.CalculateField_management(NoView,"OFFSETA",HEIGHT)
else: #Calculate viewshed, in degrees, for selected records
arcpy.SetProgressorLabel("Getting arc length for parcel"+str(value)+" at the "+str(FLOOR)+" floor.")
arcpy.Intersect_analysis([BufferLine,ViewshedLayer],VsArcLengths,"",10,"LINE")#Intersect with any line within 10 ft.
arcpy.AddField_management(VsArcLengths, "Length","DOUBLE")
arcpy.CalculateField_management(VsArcLengths,"Length","!SHAPE.length@miles!","PYTHON_9.3")
arcpy.Statistics_analysis(VsArcLengths,YesView,StatsField1,CaseField1)
#Add fields to output summary table
arcpy.AddField_management(YesView,"FLR_RAN","SHORT")
arcpy.AddField_management(YesView,"VIEW_"+Year,"DOUBLE")
arcpy.AddField_management(YesView,"OFFSETA","SHORT")
arcpy.CalculateField_management(YesView,"FLR_RAN",FLOOR)
arcpy.CalculateField_management(YesView,"OFFSETA",HEIGHT)
arcpy.CalculateField_management(YesView,"VIEW_"+Year,"((!SUM_LENGTH!/3.14)*180)","PYTHON_9.3")
arcpy.SelectLayerByAttribute_management(ViewshedLayer,"CLEAR_SELECTION")
#Open error log file
infile = open(outputWorkspace+"\\Error_Log_"+Year+".txt","w")
#Perform field check for viewshed parameters within the observation point attribute table.
#Script will add field to the attribute table if the field does not already exist.
#Needed fields are SPOT - used to define the surface elevations for the observation points.
#Azimuth -define the horizontal angle limits to the scan (start and end points in degrees).
#Radius - defines the search distance when identifying areas visible from each observation point.
#Cells that are beyond a certain distance can be excluded from the analysis.
VSFieldList = ["SPOT","OFFSETA","AZIMUTH1","AZIMUTH2","RADIUS1","RADIUS2"]
arcpy.SetProgressorLabel("Checking fields in observation point attribute table")
for FieldList in VSFieldList:
ObsPtsFieldList=arcpy.ListFields(ObsPts,FieldList)
fieldNames=[field.name for field in ObsPtsFieldList]
if FieldList in fieldNames:
print "Field", FieldList, "found in", ObsPts
else:
print"Field", FieldList, "NOT found in", ObsPts
arcpy.AddField_management(ObsPts,FieldList, "DOUBLE")
print FieldList, "created"
#Populate viewshed parameters with correct values for viewshed
Az1Cal = 0
Az2Cal = 180
Rad1Cal = 0
Rad2Cal = 5280
arcpy.CalculateField_management(ObsPts,"AZIMUTH1",Az1Cal)
arcpy.CalculateField_management(ObsPts,"AZIMUTH2",Az2Cal)
arcpy.CalculateField_management(ObsPts,"RADIUS1",Rad1Cal)
arcpy.CalculateField_management(ObsPts,"RADIUS2",Rad2Cal)
#Create Feature Layers
arcpy.SetProgressorLabel("Creating feature layers")
PointsFL = outName(ObsPts,"_Lyr")
footprintFL = outName(footprint,"_Lyr")
arcpy.MakeFeatureLayer_management(ObsPts, PointsFL)
arcpy.MakeFeatureLayer_management(footprint,footprintFL)
#Select observation points one by one
arcpy.SetProgressorLabel("Starting viewshed analysis...")
RangeCount = int(arcpy.GetCount_management(PointsFL).getOutput(0))
#Count number of parcels being processed
arcpy.AddMessage("\nCalculating viewshed for "+str(RangeCount)+" parcels")
sc = arcpy.SearchCursor(PointsFL)
for row in sc:
try:
#Get Parcel ID value
value = row.ID
count = row.FID+1
FlrCnt = row.getValue(FloorField)
#Get bare earth elevation of parcel
arcpy.SetProgressorLabel("Changing elevation footprint to bare earth elevation for point "+str(value))
SQL = "Id =" +str(value)+""
arcpy.SelectLayerByAttribute_management(PointsFL,"NEW_SELECTION",SQL)
arcpy.SelectLayerByLocation_management(footprintFL,"INTERSECT",PointsFL)
arcpy.env.workspace = IntermediateFiles #need to change workspace so that the .save files get saved correctly
outExtractByMask = ExtractByMask(BareElevation,footprintFL)
outExtractByMask.save(IntermediateFiles+"\\ebm_"+str(value))
ElevAvg = ElevAvgTables+"\\avgelev_"+str(value)+".dbf"
arcpy.Statistics_analysis(outExtractByMask,ElevAvg,[["VALUE","MEAN"]])
arcpy.AddField_management(ElevAvg,"Pt_ID","SHORT")
arcpy.CalculateField_management(ElevAvg,"Pt_ID",value)
arcpy.AddJoin_management(PointsFL,"Id",ElevAvg,"Pt_ID","KEEP_COMMON")
Field1 = os.path.basename(ObsPts).split(".")[0]+".SPOT"
Field2 = "!"+os.path.basename(ElevAvg).split(".")[0]+".MEAN_VALUE!"
arcpy.CalculateField_management(PointsFL,Field1,Field2,"PYTHON_9.3")
arcpy.RemoveJoin_management(PointsFL)
#Set parcel elevation to 0 this will be replaced by SPOT value caclualted above
RastFootprint = IntermediateFiles+"\\fp_"+str(value).split(".")[0]
arcpy.PolygonToRaster_conversion(footprintFL,"FID",RastFootprint,"MAXIMUM_AREA","",6)
out | outName | identifier_name |
OceanFront_ViewshedNEW2.py | maryTables) : os.makedirs(SummaryTables)
if not os.path.exists(ElevAvgTables): os.makedirs(ElevAvgTables)
if not os.path.exists(ArcLengths): os.makedirs(ArcLengths)
def outName(input,post="_Output"):
|
def DegViewshed (FLOOR, HEIGHT):
"""Calculates a parcels viewshed, in degrees"""
#Select Record
arcpy.SelectLayerByAttribute_management(PointsFL,"NEW_SELECTION",SQL)
#Set Observer Height (OffSETA)
arcpy.CalculateField_management(PointsFL,"OFFSETA",HEIGHT,"PYTHON_9.3")
#perform viewshed analysis
arcpy.SetProgressorLabel("Performing Viewshed Analysis for point "+str(value))
outViewshed = IntermediateFiles+"\\vs_"+str(FLOOR)+"_"+str(value).split(".")[0]
arcpy.Viewshed_3d(outCon,PointsFL,outViewshed)
#convert viewshed to polygon
arcpy.SetProgressorLabel("Converting viewshed"+str(value)+" on floor "+str(FLOOR)+" to polygon.")
OutPoly = IntermediateFiles+"\\"+os.path.basename(outViewshed).split(".")[0]+"_poly.shp"
arcpy.RasterToPolygon_conversion(outViewshed,OutPoly)
#Intersect viewshed polygon with buffer clip
#This will allow the viewshed poly to inherit attribute fields needed for later analysis
FinalView = Final_Floor_Viewsheds+"\\FinalViewshed_"+str(FLOOR)+"_"+str(value)+".shp"
arcpy.Intersect_analysis([BufferClip,OutPoly],FinalView)
#Select features in viewshed polygon with Gridcode = 1
#If no records with grid = 1 exist, scriptwill skip to setting viewshed in degrees to 0
#Convert viewshed polygon to layer
ViewshedLayer = outName(FinalView,"lyr")
arcpy.MakeFeatureLayer_management(FinalView,ViewshedLayer)
#Select records with gridcode = 1
arcpy.SelectLayerByAttribute_management(ViewshedLayer,"NEW_SELECTION","GRIDCODE ="+str(1)+"")
#Get count of the # of records selected in viewshed poly layer
VsLyrCount = int(arcpy.GetCount_management(ViewshedLayer).getOutput(0))
NoView = SummaryTables+"\\summary_"+str(FLOOR)+"_"+str(value)+".dbf"
YesView = SummaryTables+"\\summary_"+str(FLOOR)+"_"+str(value)+".dbf"
StatsField0 = [["GRIDCODE","SUM"]]
CaseField0 = ["ID","SPOT",FloorField]
StatsField1 = [["LENGTH","SUM"]]
CaseField1 = ["GRIDCODE","ID","SPOT",FloorField]
VsArcLengths = ArcLengths+"\\ArcLength_"+str(FLOOR)+"_"+str(value)+".shp"
if VsLyrCount == 0: #no viewable areas exist
arcpy.SelectLayerByAttribute_management(ViewshedLayer,"CLEAR_SELECTION")
arcpy.SetProgressorLabel("Calculating viewshed statistics for parcel "+str(value))
arcpy.Statistics_analysis(ViewshedLayer,NoView, StatsField0,CaseField0)
#Add field to summary table to hold viewshed value of 0
#Add field to note which floor viewshed corresponds to
arcpy.AddField_management(NoView, "FLR_RAN","SHORT")
arcpy.AddField_management(NoView, "VIEW_"+Year,"DOUBLE")
arcpy.AddField_management(NoView,"OFFSETA","SHORT")
arcpy.CalculateField_management(NoView,"FLR_RAN",FLOOR)
arcpy.CalculateField_management(NoView,"VIEW_"+Year,0)
arcpy.CalculateField_management(NoView,"OFFSETA",HEIGHT)
else: #Calculate viewshed, in degrees, for selected records
arcpy.SetProgressorLabel("Getting arc length for parcel"+str(value)+" at the "+str(FLOOR)+" floor.")
arcpy.Intersect_analysis([BufferLine,ViewshedLayer],VsArcLengths,"",10,"LINE")#Intersect with any line within 10 ft.
arcpy.AddField_management(VsArcLengths, "Length","DOUBLE")
arcpy.CalculateField_management(VsArcLengths,"Length","!SHAPE.length@miles!","PYTHON_9.3")
arcpy.Statistics_analysis(VsArcLengths,YesView,StatsField1,CaseField1)
#Add fields to output summary table
arcpy.AddField_management(YesView,"FLR_RAN","SHORT")
arcpy.AddField_management(YesView,"VIEW_"+Year,"DOUBLE")
arcpy.AddField_management(YesView,"OFFSETA","SHORT")
arcpy.CalculateField_management(YesView,"FLR_RAN",FLOOR)
arcpy.CalculateField_management(YesView,"OFFSETA",HEIGHT)
arcpy.CalculateField_management(YesView,"VIEW_"+Year,"((!SUM_LENGTH!/3.14)*180)","PYTHON_9.3")
arcpy.SelectLayerByAttribute_management(ViewshedLayer,"CLEAR_SELECTION")
#Open error log file
infile = open(outputWorkspace+"\\Error_Log_"+Year+".txt","w")
#Perform field check for viewshed parameters within the observation point attribute table.
#Script will add field to the attribute table if the field does not already exist.
#Needed fields are SPOT - used to define the surface elevations for the observation points.
#Azimuth -define the horizontal angle limits to the scan (start and end points in degrees).
#Radius - defines the search distance when identifying areas visible from each observation point.
#Cells that are beyond a certain distance can be excluded from the analysis.
VSFieldList = ["SPOT","OFFSETA","AZIMUTH1","AZIMUTH2","RADIUS1","RADIUS2"]
arcpy.SetProgressorLabel("Checking fields in observation point attribute table")
for FieldList in VSFieldList:
ObsPtsFieldList=arcpy.ListFields(ObsPts,FieldList)
fieldNames=[field.name for field in ObsPtsFieldList]
if FieldList in fieldNames:
print "Field", FieldList, "found in", ObsPts
else:
print"Field", FieldList, "NOT found in", ObsPts
arcpy.AddField_management(ObsPts,FieldList, "DOUBLE")
print FieldList, "created"
#Populate viewshed parameters with correct values for viewshed
Az1Cal = 0
Az2Cal = 180
Rad1Cal = 0
Rad2Cal = 5280
arcpy.CalculateField_management(ObsPts,"AZIMUTH1",Az1Cal)
arcpy.CalculateField_management(ObsPts,"AZIMUTH2",Az2Cal)
arcpy.CalculateField_management(ObsPts,"RADIUS1",Rad1Cal)
arcpy.CalculateField_management(ObsPts,"RADIUS2",Rad2Cal)
#Create Feature Layers
arcpy.SetProgressorLabel("Creating feature layers")
PointsFL = outName(ObsPts,"_Lyr")
footprintFL = outName(footprint,"_Lyr")
arcpy.MakeFeatureLayer_management(ObsPts, PointsFL)
arcpy.MakeFeatureLayer_management(footprint,footprintFL)
#Select observation points one by one
arcpy.SetProgressorLabel("Starting viewshed analysis...")
RangeCount = int(arcpy.GetCount_management(PointsFL).getOutput(0))
#Count number of parcels being processed
arcpy.AddMessage("\nCalculating viewshed for "+str(RangeCount)+" parcels")
sc = arcpy.SearchCursor(PointsFL)
for row in sc:
try:
#Get Parcel ID value
value = row.ID
count = row.FID+1
FlrCnt = row.getValue(FloorField)
#Get bare earth elevation of parcel
arcpy.SetProgressorLabel("Changing elevation footprint to bare earth elevation for point "+str(value))
SQL = "Id =" +str(value)+""
arcpy.SelectLayerByAttribute_management(PointsFL,"NEW_SELECTION",SQL)
arcpy.SelectLayerByLocation_management(footprintFL,"INTERSECT",PointsFL)
arcpy.env.workspace = IntermediateFiles #need to change workspace so that the .save files get saved correctly
outExtractByMask = ExtractByMask(BareElevation,footprintFL)
outExtractByMask.save(IntermediateFiles+"\\ebm_"+str(value))
ElevAvg = ElevAvgTables+"\\avgelev_"+str(value)+".dbf"
arcpy.Statistics_analysis(outExtractByMask,ElevAvg,[["VALUE","MEAN"]])
arcpy.AddField_management(ElevAvg,"Pt_ID","SHORT")
arcpy.CalculateField_management(ElevAvg,"Pt_ID",value)
arcpy.AddJoin_management(PointsFL,"Id",ElevAvg,"Pt_ID","KEEP_COMMON")
Field1 = os.path.basename(ObsPts).split(".")[0]+".SPOT"
Field2 = "!"+os.path.basename(ElevAvg).split(".")[0]+".MEAN_VALUE!"
arcpy.CalculateField_management(PointsFL,Field1,Field2,"PYTHON_9.3")
arcpy.RemoveJoin_management(PointsFL)
#Set parcel elevation to 0 this will be replaced by SPOT value caclualted above
RastFootprint = IntermediateFiles+"\\fp_"+str(value).split(".")[0]
arcpy.PolygonToRaster_conversion(footprintFL,"FID",RastFootprint,"MAXIMUM_AREA","",6)
outIsNull | """Returns output name."""
outName=os.path.basename(input).split(".")[0]+post
return outName | identifier_body |
container.rs | )? {
kill(pid, signal)?;
}
}
// For cgroup v1, killing a process in a frozen cgroup does nothing until it's thawed.
// Only thaw the cgroup for SIGKILL.
// Ref: https://github.com/opencontainers/runc/pull/3217
if !is_cgroup2_unified_mode() && self.state == ContainerState::Paused && signal == SIGKILL {
freeze(&self.cgroup, FreezerState::Thawed)?;
}
Ok(())
}
pub async fn delete(&self, force: bool, logger: &Logger) -> Result<()> {
let status = &self.status;
let spec = status
.config
.spec
.as_ref()
.ok_or_else(|| anyhow!("spec config was not present in the status"))?;
let oci_state = OCIState {
version: status.oci_version.clone(),
id: status.id.clone(),
status: self.state,
pid: status.pid,
bundle: status
.bundle
.to_str()
.ok_or_else(|| anyhow!("invalid bundle path"))?
.to_string(),
annotations: spec.annotations.clone(),
};
if let Some(hooks) = spec.hooks.as_ref() {
info!(&logger, "Poststop Hooks");
let mut poststop_hookstates = HookStates::new();
poststop_hookstates.execute_hooks(&hooks.poststop, Some(oci_state.clone()))?;
}
match oci_state.status {
ContainerState::Stopped => {
self.destroy()?;
}
ContainerState::Created => {
// Kill an init process
self.kill(SIGKILL, false)?;
self.destroy()?;
}
_ => {
if force {
self.kill(SIGKILL, true)?;
self.destroy()?;
} else {
return Err(anyhow!(
"cannot delete container {} that is not stopped",
&status.id
));
}
}
}
Ok(())
}
pub fn pause(&self) -> Result<()> {
if self.state != ContainerState::Running && self.state != ContainerState::Created {
return Err(anyhow!(
"failed to pause container: current status is: {:?}",
self.state
));
}
freeze(&self.cgroup, FreezerState::Frozen)?;
Ok(())
}
pub fn resume(&self) -> Result<()> {
if self.state != ContainerState::Paused {
return Err(anyhow!(
"failed to resume container: current status is: {:?}",
self.state
));
}
freeze(&self.cgroup, FreezerState::Thawed)?;
Ok(())
}
pub fn destroy(&self) -> Result<()> {
remove_cgroup_dir(&self.cgroup)?;
self.status.remove_dir()
}
}
/// Used to run a process. If init is set, it will create a container and run the process in it.
/// If init is not set, it will run the process in an existing container.
#[derive(Debug)]
pub struct ContainerLauncher {
pub id: String,
pub bundle: PathBuf,
pub state_root: PathBuf,
pub init: bool,
pub runner: LinuxContainer,
pub pid_file: Option<PathBuf>,
}
impl ContainerLauncher {
pub fn new(
id: &str,
bundle: &Path,
state_root: &Path,
init: bool,
runner: LinuxContainer,
pid_file: Option<PathBuf>,
) -> Self {
ContainerLauncher {
id: id.to_string(),
bundle: bundle.to_path_buf(),
state_root: state_root.to_path_buf(),
init,
runner,
pid_file,
}
}
/// Launch a process. For init containers, we will create a container. For non-init, it will join an existing container.
pub async fn launch(&mut self, action: ContainerAction, logger: &Logger) -> Result<()> {
if self.init {
self.spawn_container(action, logger).await?;
} else {
if action == ContainerAction::Create {
return Err(anyhow!(
"ContainerAction::Create is used for init-container only"
));
}
self.spawn_process(action, logger).await?;
}
if let Some(pid_file) = self.pid_file.as_ref() {
fs::write(
pid_file,
format!("{}", self.runner.get_process(self.id.as_str())?.pid()),
)?;
}
Ok(())
}
/// Create the container by invoking runner to spawn the first process and save status.
async fn spawn_container(&mut self, action: ContainerAction, logger: &Logger) -> Result<()> {
// State root path root/id has been created in LinuxContainer::new(),
// so we don't have to create it again.
// Spawn a new process in the container by using the agent's codes.
self.spawn_process(action, logger).await?;
let status = self.get_status()?;
status.save()?;
debug!(logger, "saved status is {:?}", status);
// Clean up the fifo file created by LinuxContainer, which is used for block the created process.
if action == ContainerAction::Run || action == ContainerAction::Start {
let fifo_path = get_fifo_path(&status);
if fifo_path.exists() {
unlink(&fifo_path)?;
}
}
Ok(())
}
/// Generate rustjail::Process from OCI::Process
fn get_process(&self, logger: &Logger) -> Result<Process> {
let spec = self.runner.config.spec.as_ref().unwrap();
if spec.process.is_some() {
Ok(Process::new(
logger,
spec.process
.as_ref()
.ok_or_else(|| anyhow!("process config was not present in the spec file"))?,
// rustjail::LinuxContainer use the exec_id to identify processes in a container,
// so we can get the spawned process by ctr.get_process(exec_id) later.
// Since LinuxContainer is temporarily created to spawn one process in each runk invocation,
// we can use arbitrary string as the exec_id. Here we choose the container id.
&self.id,
self.init,
0,
)?)
} else {
Err(anyhow!("no process configuration"))
}
}
/// Spawn a new process in the container by invoking runner.
async fn spawn_process(&mut self, action: ContainerAction, logger: &Logger) -> Result<()> {
// Agent will chdir to bundle_path before creating LinuxContainer. Just do the same as agent.
let current_dir = current_dir()?;
chdir(&self.bundle)?;
defer! {
chdir(¤t_dir).unwrap();
}
let process = self.get_process(logger)?;
match action {
ContainerAction::Create => {
self.runner.start(process).await?;
}
ContainerAction::Start => {
self.runner.exec().await?;
}
ContainerAction::Run => {
self.runner.run(process).await?;
}
}
Ok(())
}
/// Generate runk specified Status
fn get_status(&self) -> Result<Status> {
let oci_state = self.runner.oci_state()?;
// read start time from /proc/<pid>/stat
let proc = procfs::process::Process::new(self.runner.init_process_pid)?;
let process_start_time = proc.stat()?.starttime;
Status::new(
&self.state_root,
&self.bundle,
oci_state,
process_start_time,
self.runner.created,
self.runner
.cgroup_manager
.as_ref()
.as_any()?
.downcast_ref::<CgroupManager>()
.unwrap()
.clone(),
self.runner.config.clone(),
)
}
}
pub fn create_linux_container(
id: &str,
root: &Path,
config: CreateOpts,
console_socket: Option<PathBuf>,
logger: &Logger,
) -> Result<LinuxContainer> {
let mut container = LinuxContainer::new(
id,
root.to_str()
.map(|s| s.to_string())
.ok_or_else(|| anyhow!("failed to convert bundle path"))?
.as_str(),
config,
logger,
)?;
if let Some(socket_path) = console_socket.as_ref() {
container.set_console_socket(socket_path)?;
}
Ok(container)
}
// Load rustjail's Linux container.
// "uid_map_path" and "gid_map_path" are always empty, so they are not set.
pub fn load_linux_container(
status: &Status,
console_socket: Option<PathBuf>,
logger: &Logger,
) -> Result<LinuxContainer> {
let mut container = LinuxContainer::new(
&status.id,
&status
.root
.to_str()
.map(|s| s.to_string())
.ok_or_else(|| anyhow!("failed to convert a root path"))?,
status.config.clone(),
logger,
)?;
if let Some(socket_path) = console_socket.as_ref() {
container.set_console_socket(socket_path)?;
}
container.init_process_pid = status.pid;
container.init_process_start_time = status.process_start_time;
container.created = status.created.into();
Ok(container)
}
pub fn get_config_path<P: AsRef<Path>>(bundle: P) -> PathBuf {
bundle.as_ref().join(CONFIG_FILE_NAME)
}
pub fn get_fifo_path(status: &Status) -> PathBuf {
status.root.join(&status.id).join(EXEC_FIFO_FILENAME)
} |
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::test_utils::*; | random_line_split |
|
container.rs | , Copy, Clone, PartialEq)]
pub enum ContainerAction {
Create,
Start,
Run,
}
#[derive(Debug)]
pub struct Container {
pub status: Status,
pub state: ContainerState,
pub cgroup: cgroups::Cgroup,
}
// Container represents a container that is created by the container runtime.
impl Container {
pub fn load(state_root: &Path, id: &str) -> Result<Self> {
let status = Status::load(state_root, id)?;
let spec = status
.config
.spec
.as_ref()
.ok_or_else(|| anyhow!("spec config was not present"))?;
let linux = spec
.linux
.as_ref()
.ok_or_else(|| anyhow!("linux config was not present"))?;
let cpath = if linux.cgroups_path.is_empty() {
id.to_string()
} else {
linux
.cgroups_path
.clone()
.trim_start_matches('/')
.to_string()
};
let cgroup = cgroups::Cgroup::load(cgroups::hierarchies::auto(), cpath);
let state = get_current_container_state(&status, &cgroup)?;
Ok(Self {
status,
state,
cgroup,
})
}
pub fn processes(&self) -> Result<Vec<Pid>> {
let pids = self.cgroup.tasks();
let result = pids.iter().map(|x| Pid::from_raw(x.pid as i32)).collect();
Ok(result)
}
pub fn kill(&self, signal: Signal, all: bool) -> Result<()> {
if all {
let pids = self.processes()?;
for pid in pids {
if !status::is_process_running(pid)? {
continue;
}
kill(pid, signal)?;
}
} else {
// If --all option is not specified and the container is stopped,
// kill operation generates an error in accordance with the OCI runtime spec.
if self.state == ContainerState::Stopped {
return Err(anyhow!(
"container {} can't be killed because it is {:?}",
self.status.id,
self.state
));
}
let pid = Pid::from_raw(self.status.pid);
if status::is_process_running(pid)? {
kill(pid, signal)?;
}
}
// For cgroup v1, killing a process in a frozen cgroup does nothing until it's thawed.
// Only thaw the cgroup for SIGKILL.
// Ref: https://github.com/opencontainers/runc/pull/3217
if !is_cgroup2_unified_mode() && self.state == ContainerState::Paused && signal == SIGKILL {
freeze(&self.cgroup, FreezerState::Thawed)?;
}
Ok(())
}
pub async fn delete(&self, force: bool, logger: &Logger) -> Result<()> {
let status = &self.status;
let spec = status
.config
.spec
.as_ref()
.ok_or_else(|| anyhow!("spec config was not present in the status"))?;
let oci_state = OCIState {
version: status.oci_version.clone(),
id: status.id.clone(),
status: self.state,
pid: status.pid,
bundle: status
.bundle
.to_str()
.ok_or_else(|| anyhow!("invalid bundle path"))?
.to_string(),
annotations: spec.annotations.clone(),
};
if let Some(hooks) = spec.hooks.as_ref() {
info!(&logger, "Poststop Hooks");
let mut poststop_hookstates = HookStates::new();
poststop_hookstates.execute_hooks(&hooks.poststop, Some(oci_state.clone()))?;
}
match oci_state.status {
ContainerState::Stopped => {
self.destroy()?;
}
ContainerState::Created => {
// Kill an init process
self.kill(SIGKILL, false)?;
self.destroy()?;
}
_ => {
if force {
self.kill(SIGKILL, true)?;
self.destroy()?;
} else {
return Err(anyhow!(
"cannot delete container {} that is not stopped",
&status.id
));
}
}
}
Ok(())
}
pub fn | (&self) -> Result<()> {
if self.state != ContainerState::Running && self.state != ContainerState::Created {
return Err(anyhow!(
"failed to pause container: current status is: {:?}",
self.state
));
}
freeze(&self.cgroup, FreezerState::Frozen)?;
Ok(())
}
pub fn resume(&self) -> Result<()> {
if self.state != ContainerState::Paused {
return Err(anyhow!(
"failed to resume container: current status is: {:?}",
self.state
));
}
freeze(&self.cgroup, FreezerState::Thawed)?;
Ok(())
}
pub fn destroy(&self) -> Result<()> {
remove_cgroup_dir(&self.cgroup)?;
self.status.remove_dir()
}
}
/// Used to run a process. If init is set, it will create a container and run the process in it.
/// If init is not set, it will run the process in an existing container.
#[derive(Debug)]
pub struct ContainerLauncher {
pub id: String,
pub bundle: PathBuf,
pub state_root: PathBuf,
pub init: bool,
pub runner: LinuxContainer,
pub pid_file: Option<PathBuf>,
}
impl ContainerLauncher {
pub fn new(
id: &str,
bundle: &Path,
state_root: &Path,
init: bool,
runner: LinuxContainer,
pid_file: Option<PathBuf>,
) -> Self {
ContainerLauncher {
id: id.to_string(),
bundle: bundle.to_path_buf(),
state_root: state_root.to_path_buf(),
init,
runner,
pid_file,
}
}
/// Launch a process. For init containers, we will create a container. For non-init, it will join an existing container.
pub async fn launch(&mut self, action: ContainerAction, logger: &Logger) -> Result<()> {
if self.init {
self.spawn_container(action, logger).await?;
} else {
if action == ContainerAction::Create {
return Err(anyhow!(
"ContainerAction::Create is used for init-container only"
));
}
self.spawn_process(action, logger).await?;
}
if let Some(pid_file) = self.pid_file.as_ref() {
fs::write(
pid_file,
format!("{}", self.runner.get_process(self.id.as_str())?.pid()),
)?;
}
Ok(())
}
/// Create the container by invoking runner to spawn the first process and save status.
async fn spawn_container(&mut self, action: ContainerAction, logger: &Logger) -> Result<()> {
// State root path root/id has been created in LinuxContainer::new(),
// so we don't have to create it again.
// Spawn a new process in the container by using the agent's codes.
self.spawn_process(action, logger).await?;
let status = self.get_status()?;
status.save()?;
debug!(logger, "saved status is {:?}", status);
// Clean up the fifo file created by LinuxContainer, which is used for block the created process.
if action == ContainerAction::Run || action == ContainerAction::Start {
let fifo_path = get_fifo_path(&status);
if fifo_path.exists() {
unlink(&fifo_path)?;
}
}
Ok(())
}
/// Generate rustjail::Process from OCI::Process
fn get_process(&self, logger: &Logger) -> Result<Process> {
let spec = self.runner.config.spec.as_ref().unwrap();
if spec.process.is_some() {
Ok(Process::new(
logger,
spec.process
.as_ref()
.ok_or_else(|| anyhow!("process config was not present in the spec file"))?,
// rustjail::LinuxContainer use the exec_id to identify processes in a container,
// so we can get the spawned process by ctr.get_process(exec_id) later.
// Since LinuxContainer is temporarily created to spawn one process in each runk invocation,
// we can use arbitrary string as the exec_id. Here we choose the container id.
&self.id,
self.init,
0,
)?)
} else {
Err(anyhow!("no process configuration"))
}
}
/// Spawn a new process in the container by invoking runner.
async fn spawn_process(&mut self, action: ContainerAction, logger: &Logger) -> Result<()> {
// Agent will chdir to bundle_path before creating LinuxContainer. Just do the same as agent.
let current_dir = current_dir()?;
chdir(&self.bundle)?;
defer! {
chdir(¤t_dir).unwrap();
}
let process = self.get_process(logger)?;
match action {
ContainerAction::Create => {
self.runner.start(process).await?;
}
ContainerAction::Start => {
self.runner.exec().await?;
}
ContainerAction::Run => {
self.runner.run(process).await?;
}
}
Ok(())
}
/// Generate runk specified Status
fn get_status(&self) -> Result<Status> {
let oci_state = self.runner.oci_state()?;
// read start time from /proc/<pid>/stat
let proc = procfs::process::Process::new(self.runner.init_process_pid)?;
| pause | identifier_name |
container.rs | , Copy, Clone, PartialEq)]
pub enum ContainerAction {
Create,
Start,
Run,
}
#[derive(Debug)]
pub struct Container {
pub status: Status,
pub state: ContainerState,
pub cgroup: cgroups::Cgroup,
}
// Container represents a container that is created by the container runtime.
impl Container {
pub fn load(state_root: &Path, id: &str) -> Result<Self> {
let status = Status::load(state_root, id)?;
let spec = status
.config
.spec
.as_ref()
.ok_or_else(|| anyhow!("spec config was not present"))?;
let linux = spec
.linux
.as_ref()
.ok_or_else(|| anyhow!("linux config was not present"))?;
let cpath = if linux.cgroups_path.is_empty() {
id.to_string()
} else {
linux
.cgroups_path
.clone()
.trim_start_matches('/')
.to_string()
};
let cgroup = cgroups::Cgroup::load(cgroups::hierarchies::auto(), cpath);
let state = get_current_container_state(&status, &cgroup)?;
Ok(Self {
status,
state,
cgroup,
})
}
pub fn processes(&self) -> Result<Vec<Pid>> {
let pids = self.cgroup.tasks();
let result = pids.iter().map(|x| Pid::from_raw(x.pid as i32)).collect();
Ok(result)
}
pub fn kill(&self, signal: Signal, all: bool) -> Result<()> {
if all {
let pids = self.processes()?;
for pid in pids {
if !status::is_process_running(pid)? {
continue;
}
kill(pid, signal)?;
}
} else {
// If --all option is not specified and the container is stopped,
// kill operation generates an error in accordance with the OCI runtime spec.
if self.state == ContainerState::Stopped {
return Err(anyhow!(
"container {} can't be killed because it is {:?}",
self.status.id,
self.state
));
}
let pid = Pid::from_raw(self.status.pid);
if status::is_process_running(pid)? {
kill(pid, signal)?;
}
}
// For cgroup v1, killing a process in a frozen cgroup does nothing until it's thawed.
// Only thaw the cgroup for SIGKILL.
// Ref: https://github.com/opencontainers/runc/pull/3217
if !is_cgroup2_unified_mode() && self.state == ContainerState::Paused && signal == SIGKILL {
freeze(&self.cgroup, FreezerState::Thawed)?;
}
Ok(())
}
pub async fn delete(&self, force: bool, logger: &Logger) -> Result<()> {
let status = &self.status;
let spec = status
.config
.spec
.as_ref()
.ok_or_else(|| anyhow!("spec config was not present in the status"))?;
let oci_state = OCIState {
version: status.oci_version.clone(),
id: status.id.clone(),
status: self.state,
pid: status.pid,
bundle: status
.bundle
.to_str()
.ok_or_else(|| anyhow!("invalid bundle path"))?
.to_string(),
annotations: spec.annotations.clone(),
};
if let Some(hooks) = spec.hooks.as_ref() {
info!(&logger, "Poststop Hooks");
let mut poststop_hookstates = HookStates::new();
poststop_hookstates.execute_hooks(&hooks.poststop, Some(oci_state.clone()))?;
}
match oci_state.status {
ContainerState::Stopped => {
self.destroy()?;
}
ContainerState::Created => {
// Kill an init process
self.kill(SIGKILL, false)?;
self.destroy()?;
}
_ => {
if force {
self.kill(SIGKILL, true)?;
self.destroy()?;
} else {
return Err(anyhow!(
"cannot delete container {} that is not stopped",
&status.id
));
}
}
}
Ok(())
}
pub fn pause(&self) -> Result<()> {
if self.state != ContainerState::Running && self.state != ContainerState::Created {
return Err(anyhow!(
"failed to pause container: current status is: {:?}",
self.state
));
}
freeze(&self.cgroup, FreezerState::Frozen)?;
Ok(())
}
pub fn resume(&self) -> Result<()> {
if self.state != ContainerState::Paused |
freeze(&self.cgroup, FreezerState::Thawed)?;
Ok(())
}
pub fn destroy(&self) -> Result<()> {
remove_cgroup_dir(&self.cgroup)?;
self.status.remove_dir()
}
}
/// Used to run a process. If init is set, it will create a container and run the process in it.
/// If init is not set, it will run the process in an existing container.
#[derive(Debug)]
pub struct ContainerLauncher {
pub id: String,
pub bundle: PathBuf,
pub state_root: PathBuf,
pub init: bool,
pub runner: LinuxContainer,
pub pid_file: Option<PathBuf>,
}
impl ContainerLauncher {
pub fn new(
id: &str,
bundle: &Path,
state_root: &Path,
init: bool,
runner: LinuxContainer,
pid_file: Option<PathBuf>,
) -> Self {
ContainerLauncher {
id: id.to_string(),
bundle: bundle.to_path_buf(),
state_root: state_root.to_path_buf(),
init,
runner,
pid_file,
}
}
/// Launch a process. For init containers, we will create a container. For non-init, it will join an existing container.
pub async fn launch(&mut self, action: ContainerAction, logger: &Logger) -> Result<()> {
if self.init {
self.spawn_container(action, logger).await?;
} else {
if action == ContainerAction::Create {
return Err(anyhow!(
"ContainerAction::Create is used for init-container only"
));
}
self.spawn_process(action, logger).await?;
}
if let Some(pid_file) = self.pid_file.as_ref() {
fs::write(
pid_file,
format!("{}", self.runner.get_process(self.id.as_str())?.pid()),
)?;
}
Ok(())
}
/// Create the container by invoking runner to spawn the first process and save status.
async fn spawn_container(&mut self, action: ContainerAction, logger: &Logger) -> Result<()> {
// State root path root/id has been created in LinuxContainer::new(),
// so we don't have to create it again.
// Spawn a new process in the container by using the agent's codes.
self.spawn_process(action, logger).await?;
let status = self.get_status()?;
status.save()?;
debug!(logger, "saved status is {:?}", status);
// Clean up the fifo file created by LinuxContainer, which is used for block the created process.
if action == ContainerAction::Run || action == ContainerAction::Start {
let fifo_path = get_fifo_path(&status);
if fifo_path.exists() {
unlink(&fifo_path)?;
}
}
Ok(())
}
/// Generate rustjail::Process from OCI::Process
fn get_process(&self, logger: &Logger) -> Result<Process> {
let spec = self.runner.config.spec.as_ref().unwrap();
if spec.process.is_some() {
Ok(Process::new(
logger,
spec.process
.as_ref()
.ok_or_else(|| anyhow!("process config was not present in the spec file"))?,
// rustjail::LinuxContainer use the exec_id to identify processes in a container,
// so we can get the spawned process by ctr.get_process(exec_id) later.
// Since LinuxContainer is temporarily created to spawn one process in each runk invocation,
// we can use arbitrary string as the exec_id. Here we choose the container id.
&self.id,
self.init,
0,
)?)
} else {
Err(anyhow!("no process configuration"))
}
}
/// Spawn a new process in the container by invoking runner.
async fn spawn_process(&mut self, action: ContainerAction, logger: &Logger) -> Result<()> {
// Agent will chdir to bundle_path before creating LinuxContainer. Just do the same as agent.
let current_dir = current_dir()?;
chdir(&self.bundle)?;
defer! {
chdir(¤t_dir).unwrap();
}
let process = self.get_process(logger)?;
match action {
ContainerAction::Create => {
self.runner.start(process).await?;
}
ContainerAction::Start => {
self.runner.exec().await?;
}
ContainerAction::Run => {
self.runner.run(process).await?;
}
}
Ok(())
}
/// Generate runk specified Status
fn get_status(&self) -> Result<Status> {
let oci_state = self.runner.oci_state()?;
// read start time from /proc/<pid>/stat
let proc = procfs::process::Process::new(self.runner.init_process_pid | {
return Err(anyhow!(
"failed to resume container: current status is: {:?}",
self.state
));
} | conditional_block |
InceptionResNet_Image_Captioning.py |
class RNN_Decoder(tf.keras.Model):
# def __init__(self, embedding_dim, units, vocab_size, embedding_matrix):
def __init__(self, embedding_dim, units, vocab_size):
super(RNN_Decoder, self).__init__()
self.units = units
# self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim,embeddings_initializer=tf.keras.initializers.Constant(embedding_matrix),
# trainable=False)
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
# self.lstm = tf.keras.layers.LSTM(self.units,
# return_sequences=True,
# return_state=True,
# recurrent_initializer='glorot_uniform')
self.gru = tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc1 = tf.keras.layers.Dense(self.units)
self.fc2 = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.units)
def call(self, x, features, hidden):
# defining attention as a separate model
context_vector, attention_weights = self.attention(features, hidden)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
# output, state, carry_state = self.lstm(x)
output, state = self.gru(x)
# shape == (batch_size, max_length, hidden_size)
x = self.fc1(output)
# x shape == (batch_size * max_length, hidden_size)
x = tf.reshape(x, (-1, x.shape[2]))
# output shape == (batch_size * max_length, vocab)
x = self.fc2(x)
return x, state, attention_weights
def reset_state(self, batch_size):
return tf.zeros((batch_size, self.units))
#%%
train_image_paths2 = []
for a in range(82783):
# loc = 'E:\\neural\\finIm/im' + str(a) + ".png"
loc = trainim_dir + 'im' + str(a) + ".png"
if (os.path.exists(loc)):
train_image_paths2.append(loc)
test_image_paths = []
for a in range(40504):
loc = testim_dir + 'im' + str(a) + ".png"
# loc = 'C:\\Users\\BARAN/Desktop/Dersler/EEE/EEE443/phtyon/finIm/im' + str(a) + ".png"
if (os.path.exists(loc)):
test_image_paths.append(loc)
# tot=0
# for a in range(40504):
# loc = 'testIm/im' + str(a) + ".png"
# #loc = 'C:\\Users\\BARAN/Desktop/Dersler/EEE/EEE443/phtyon/finIm/im' + str(a) + ".png.npy"
# if (os.path.exists(loc)):
# tot +=1
#%%
f1 = h5py.File(eee443_dataset_dir + 'eee443_project_dataset_train.h5', "r")
print("Keys: %s" % f1.keys())
train_cap = np.array(f1["train_cap"])
train_imid = np.array(f1["train_imid"]) - 1
train_ims = np.array(f1["train_ims"])
train_url = np.array(f1["train_url"]) # URL of the image
word_code = np.array(f1["word_code"])
f2 = h5py.File(eee443_dataset_dir + 'eee443_project_dataset_test.h5', "r")
print("Keys: %s" % f2.keys())
test_cap = np.array(f2["test_caps"])
test_imid = np.array(f2["test_imid"])
test_ims = np.array(f2["test_ims"])
test_url = np.array(f2["test_url"]) # URL of the image
#%%
@tf.autograph.experimental.do_not_convert
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_png(img, channels=3)
# img = tf.image.resize(img, (224, 224))
img = tf.image.resize(img, (299, 299))
# img = tf.keras.applications.resnet_v2.preprocess_input(img)
img = tf.keras.applications.inception_resnet_v2.preprocess_input(img)
return img, image_path
# image_model2 = tf.keras.applications.ResNet50V2(include_top=False,weights='imagenet')
image_model2 = tf.keras.applications.InceptionResNetV2(include_top=False,weights='imagenet')
new_input2 = image_model2.input
hidden_layer2 = image_model2.layers[-1].output
image_features_extract_model2 = tf.keras.Model(new_input2, hidden_layer2)
#%%
# Get unique images
encode_train2 = train_image_paths2
# Feel free to change batch_size according to your system configuration
image_dataset2 = tf.data.Dataset.from_tensor_slices(encode_train2)
# image_dataset2 = image_dataset2.map(
# load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(64)
image_dataset2 = image_dataset2.map(
load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(32)
#%%
i = 0
for img, path in image_dataset2:
i +=1
if i%100 == 0:
print(i)
batch_features2 = image_features_extract_model2(img)
batch_features2 = tf.reshape(batch_features2,
(batch_features2.shape[0], -1, batch_features2.shape[3]))
for bf, p in zip(batch_features2, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
#%%
# Get unique images
encode_test2 = test_image_paths
# Feel free to change batch_size according to your system configuration
image_dataset3 = tf.data.Dataset.from_tensor_slices(encode_test2)
# image_dataset3 = image_dataset3.map(
# load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(64)
image_dataset3 = image_dataset3.map(
load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(32)
#%%
i = 0
for img, path in image_dataset3:
i +=1
if i%100 == 0:
print(i)
batch_features3 = image_features_extract_model2(img)
batch_features3 = tf.reshape(batch_features3,
(batch_features3.shape[0], -1, batch_features3.shape[3]))
for bf, p in zip(batch_features3, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
#%%
img_to_cap_vector2 = collections.defaultdict(list)
for i in range(len(train_imid)):
cap = train_cap[i]
imid = train_imid[i]
key = trainim_dir + 'im' + str(imid) + ".png"
if(os.path.exists(key)):
img_to_cap_vector2[key].append(cap)
img_to_cap_vector3 = collections.defaultdict(list)
for i in range(len(test_imid)):
cap = test_cap[i]
imid = test_imid[i]
key = testim_dir + 'im' + str(imid) + ".png"
if(os.path.exists(key)):
img_to_cap_vector3[key].append(cap)
#%%
# Create training and validation sets using an 85-15 split randomly.
img_keys2 = list(img_to_cap_vector2.keys())
random.shuffle(img_keys2)
slice_index2 = int(len(img_keys2)*0.85)
img_name_train_keys2, img_name_val_keys2 = img_keys2[:slice_index2], img_keys2[slice_index2:]
img_name_train2 = []
cap_train2 = []
for imgt in img_name_train_keys2:
capt_len = len(img_to_cap_vector2[imgt])
img_name_train2.extend([imgt] * capt_len)
cap_train2.extend(img_to_cap_vector2[imgt])
img_name_val2 = []
cap_val2 = []
for imgv in img_name_val_keys2:
capv_len = len(img_to_cap_vector2[imgv])
img_name_val2.extend([imgv] * capv_len)
cap_val2.extend(img_to_cap_vector2[imgv])
img_keys3 = list(img_to_cap_vector3.keys())
img_name_test2 = []
cap_test2 = []
for imgv in img_keys3:
capv_len = len(img_to_cap_vector3[imgv])
img_name_test2.extend([imgv] * capv_len)
cap_test2.extend(img_to_cap_vector3[imgv])
#%%
word_ind = np.asarray(np.asarray(word_code.tolist()))
words = np.asarray(np.asarray(word_code.dtype.names))
# np.squeeze reduces | x = self.fc(x)
x = tf.nn.relu(x)
return x | identifier_body |
|
InceptionResNet_Image_Captioning.py | finIm/im' + str(a) + ".png"
|
test_image_paths = []
for a in range(40504):
loc = testim_dir + 'im' + str(a) + ".png"
# loc = 'C:\\Users\\BARAN/Desktop/Dersler/EEE/EEE443/phtyon/finIm/im' + str(a) + ".png"
if (os.path.exists(loc)):
test_image_paths.append(loc)
# tot=0
# for a in range(40504):
# loc = 'testIm/im' + str(a) + ".png"
# #loc = 'C:\\Users\\BARAN/Desktop/Dersler/EEE/EEE443/phtyon/finIm/im' + str(a) + ".png.npy"
# if (os.path.exists(loc)):
# tot +=1
#%%
f1 = h5py.File(eee443_dataset_dir + 'eee443_project_dataset_train.h5', "r")
print("Keys: %s" % f1.keys())
train_cap = np.array(f1["train_cap"])
train_imid = np.array(f1["train_imid"]) - 1
train_ims = np.array(f1["train_ims"])
train_url = np.array(f1["train_url"]) # URL of the image
word_code = np.array(f1["word_code"])
f2 = h5py.File(eee443_dataset_dir + 'eee443_project_dataset_test.h5', "r")
print("Keys: %s" % f2.keys())
test_cap = np.array(f2["test_caps"])
test_imid = np.array(f2["test_imid"])
test_ims = np.array(f2["test_ims"])
test_url = np.array(f2["test_url"]) # URL of the image
#%%
@tf.autograph.experimental.do_not_convert
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_png(img, channels=3)
# img = tf.image.resize(img, (224, 224))
img = tf.image.resize(img, (299, 299))
# img = tf.keras.applications.resnet_v2.preprocess_input(img)
img = tf.keras.applications.inception_resnet_v2.preprocess_input(img)
return img, image_path
# image_model2 = tf.keras.applications.ResNet50V2(include_top=False,weights='imagenet')
image_model2 = tf.keras.applications.InceptionResNetV2(include_top=False,weights='imagenet')
new_input2 = image_model2.input
hidden_layer2 = image_model2.layers[-1].output
image_features_extract_model2 = tf.keras.Model(new_input2, hidden_layer2)
#%%
# Get unique images
encode_train2 = train_image_paths2
# Feel free to change batch_size according to your system configuration
image_dataset2 = tf.data.Dataset.from_tensor_slices(encode_train2)
# image_dataset2 = image_dataset2.map(
# load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(64)
image_dataset2 = image_dataset2.map(
load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(32)
#%%
i = 0
for img, path in image_dataset2:
i +=1
if i%100 == 0:
print(i)
batch_features2 = image_features_extract_model2(img)
batch_features2 = tf.reshape(batch_features2,
(batch_features2.shape[0], -1, batch_features2.shape[3]))
for bf, p in zip(batch_features2, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
#%%
# Get unique images
encode_test2 = test_image_paths
# Feel free to change batch_size according to your system configuration
image_dataset3 = tf.data.Dataset.from_tensor_slices(encode_test2)
# image_dataset3 = image_dataset3.map(
# load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(64)
image_dataset3 = image_dataset3.map(
load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(32)
#%%
i = 0
for img, path in image_dataset3:
i +=1
if i%100 == 0:
print(i)
batch_features3 = image_features_extract_model2(img)
batch_features3 = tf.reshape(batch_features3,
(batch_features3.shape[0], -1, batch_features3.shape[3]))
for bf, p in zip(batch_features3, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
#%%
img_to_cap_vector2 = collections.defaultdict(list)
for i in range(len(train_imid)):
cap = train_cap[i]
imid = train_imid[i]
key = trainim_dir + 'im' + str(imid) + ".png"
if(os.path.exists(key)):
img_to_cap_vector2[key].append(cap)
img_to_cap_vector3 = collections.defaultdict(list)
for i in range(len(test_imid)):
cap = test_cap[i]
imid = test_imid[i]
key = testim_dir + 'im' + str(imid) + ".png"
if(os.path.exists(key)):
img_to_cap_vector3[key].append(cap)
#%%
# Create training and validation sets using an 85-15 split randomly.
img_keys2 = list(img_to_cap_vector2.keys())
random.shuffle(img_keys2)
slice_index2 = int(len(img_keys2)*0.85)
img_name_train_keys2, img_name_val_keys2 = img_keys2[:slice_index2], img_keys2[slice_index2:]
img_name_train2 = []
cap_train2 = []
for imgt in img_name_train_keys2:
capt_len = len(img_to_cap_vector2[imgt])
img_name_train2.extend([imgt] * capt_len)
cap_train2.extend(img_to_cap_vector2[imgt])
img_name_val2 = []
cap_val2 = []
for imgv in img_name_val_keys2:
capv_len = len(img_to_cap_vector2[imgv])
img_name_val2.extend([imgv] * capv_len)
cap_val2.extend(img_to_cap_vector2[imgv])
img_keys3 = list(img_to_cap_vector3.keys())
img_name_test2 = []
cap_test2 = []
for imgv in img_keys3:
capv_len = len(img_to_cap_vector3[imgv])
img_name_test2.extend([imgv] * capv_len)
cap_test2.extend(img_to_cap_vector3[imgv])
#%%
word_ind = np.asarray(np.asarray(word_code.tolist()))
words = np.asarray(np.asarray(word_code.dtype.names))
# np.squeeze reduces the dimension by 1. These conversions should be made for sorting
word_ind = np.squeeze(word_ind.astype(int))
words = np.squeeze(np.reshape(words, (1, 1004)))
# arg sort returns the indices to make the sorting
sort_indices = np.argsort(word_ind) # use the argsort to sort both words and word_indices
words = np.array(words)[sort_indices]
word_ind = np.array(word_ind)[sort_indices]
#%%
# Feel free to change these parameters according to your system's configuration
# BATCH_SIZE = 32
# BATCH_SIZE = 256
BATCH_SIZE = 128
BUFFER_SIZE = 1000
embedding_dim = 300
units = 256
vocab_size = len(words)
num_steps = len(img_name_train2) // BATCH_SIZE
val_num_steps = len(img_name_val2) // BATCH_SIZE
# Shape of the vector extracted from InceptionV3 is (64, 2048)
# These two variables represent that vector shape
features_shape = 2048
attention_features_shape = 64
# attention_features_shape = 49
#%%
# Load the numpy files
def map_func(img_name, cap):
img_tensor = np.load(img_name.decode('utf-8')+'.npy')
return img_tensor, cap
dataset = tf.data.Dataset.from_tensor_slices((img_name_train2, cap_train2))
# Use map to load the numpy files in parallel
dataset = dataset.map(lambda item1, item2: tf.numpy_function(
map_func, [item1, item2], [tf.float32, tf.int32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Shuffle and batch
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
val_dataset = tf.data.Dataset.from_tensor_slices((img_name_val2, cap_val2))
# Use map to load the numpy files in parallel
val_dataset = val_dataset.map(lambda item1, item2: tf.numpy_function(
map_func, [item1, item2], [tf.float32, tf.int32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Shuffle and batch
val_dataset = val_dataset.shuffle(BUFFER_SIZE).batch(BATCH | loc = trainim_dir + 'im' + str(a) + ".png"
if (os.path.exists(loc)):
train_image_paths2.append(loc) | conditional_block |
InceptionResNet_Image_Captioning.py | tf.keras.applications.resnet_v2.preprocess_input(img)
img = tf.keras.applications.inception_resnet_v2.preprocess_input(img)
return img, image_path
# image_model2 = tf.keras.applications.ResNet50V2(include_top=False,weights='imagenet')
image_model2 = tf.keras.applications.InceptionResNetV2(include_top=False,weights='imagenet')
new_input2 = image_model2.input
hidden_layer2 = image_model2.layers[-1].output
image_features_extract_model2 = tf.keras.Model(new_input2, hidden_layer2)
#%%
# Get unique images
encode_train2 = train_image_paths2
# Feel free to change batch_size according to your system configuration
image_dataset2 = tf.data.Dataset.from_tensor_slices(encode_train2)
# image_dataset2 = image_dataset2.map(
# load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(64)
image_dataset2 = image_dataset2.map(
load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(32)
#%%
i = 0
for img, path in image_dataset2:
i +=1
if i%100 == 0:
print(i)
batch_features2 = image_features_extract_model2(img)
batch_features2 = tf.reshape(batch_features2,
(batch_features2.shape[0], -1, batch_features2.shape[3]))
for bf, p in zip(batch_features2, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
#%%
# Get unique images
encode_test2 = test_image_paths
# Feel free to change batch_size according to your system configuration
image_dataset3 = tf.data.Dataset.from_tensor_slices(encode_test2)
# image_dataset3 = image_dataset3.map(
# load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(64)
image_dataset3 = image_dataset3.map(
load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(32)
#%%
i = 0
for img, path in image_dataset3:
i +=1
if i%100 == 0:
print(i)
batch_features3 = image_features_extract_model2(img)
batch_features3 = tf.reshape(batch_features3,
(batch_features3.shape[0], -1, batch_features3.shape[3]))
for bf, p in zip(batch_features3, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
#%%
img_to_cap_vector2 = collections.defaultdict(list)
for i in range(len(train_imid)):
cap = train_cap[i]
imid = train_imid[i]
key = trainim_dir + 'im' + str(imid) + ".png"
if(os.path.exists(key)):
img_to_cap_vector2[key].append(cap)
img_to_cap_vector3 = collections.defaultdict(list)
for i in range(len(test_imid)):
cap = test_cap[i]
imid = test_imid[i]
key = testim_dir + 'im' + str(imid) + ".png"
if(os.path.exists(key)):
img_to_cap_vector3[key].append(cap)
#%%
# Create training and validation sets using an 85-15 split randomly.
img_keys2 = list(img_to_cap_vector2.keys())
random.shuffle(img_keys2)
slice_index2 = int(len(img_keys2)*0.85)
img_name_train_keys2, img_name_val_keys2 = img_keys2[:slice_index2], img_keys2[slice_index2:]
img_name_train2 = []
cap_train2 = []
for imgt in img_name_train_keys2:
capt_len = len(img_to_cap_vector2[imgt])
img_name_train2.extend([imgt] * capt_len)
cap_train2.extend(img_to_cap_vector2[imgt])
img_name_val2 = []
cap_val2 = []
for imgv in img_name_val_keys2:
capv_len = len(img_to_cap_vector2[imgv])
img_name_val2.extend([imgv] * capv_len)
cap_val2.extend(img_to_cap_vector2[imgv])
img_keys3 = list(img_to_cap_vector3.keys())
img_name_test2 = []
cap_test2 = []
for imgv in img_keys3:
capv_len = len(img_to_cap_vector3[imgv])
img_name_test2.extend([imgv] * capv_len)
cap_test2.extend(img_to_cap_vector3[imgv])
#%%
word_ind = np.asarray(np.asarray(word_code.tolist()))
words = np.asarray(np.asarray(word_code.dtype.names))
# np.squeeze reduces the dimension by 1. These conversions should be made for sorting
word_ind = np.squeeze(word_ind.astype(int))
words = np.squeeze(np.reshape(words, (1, 1004)))
# arg sort returns the indices to make the sorting
sort_indices = np.argsort(word_ind) # use the argsort to sort both words and word_indices
words = np.array(words)[sort_indices]
word_ind = np.array(word_ind)[sort_indices]
#%%
# Feel free to change these parameters according to your system's configuration
# BATCH_SIZE = 32
# BATCH_SIZE = 256
BATCH_SIZE = 128
BUFFER_SIZE = 1000
embedding_dim = 300
units = 256
vocab_size = len(words)
num_steps = len(img_name_train2) // BATCH_SIZE
val_num_steps = len(img_name_val2) // BATCH_SIZE
# Shape of the vector extracted from InceptionV3 is (64, 2048)
# These two variables represent that vector shape
features_shape = 2048
attention_features_shape = 64
# attention_features_shape = 49
#%%
# Load the numpy files
def map_func(img_name, cap):
img_tensor = np.load(img_name.decode('utf-8')+'.npy')
return img_tensor, cap
dataset = tf.data.Dataset.from_tensor_slices((img_name_train2, cap_train2))
# Use map to load the numpy files in parallel
dataset = dataset.map(lambda item1, item2: tf.numpy_function(
map_func, [item1, item2], [tf.float32, tf.int32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Shuffle and batch
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
val_dataset = tf.data.Dataset.from_tensor_slices((img_name_val2, cap_val2))
# Use map to load the numpy files in parallel
val_dataset = val_dataset.map(lambda item1, item2: tf.numpy_function(
map_func, [item1, item2], [tf.float32, tf.int32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Shuffle and batch
val_dataset = val_dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
val_dataset = val_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# #%%
# embeddings_index = dict()
# f = open(word_embed_dir + 'glove.6B.300d.txt', encoding="utf8")
# for line in f:
# values = line.split()
# word = values[0]
# coefs = np.asarray(values[1:], dtype='float32')
# embeddings_index[word] = coefs
# f.close()
# #%%
# print('Loaded %s word vectors.' % len(embeddings_index))
# # create a weight matrix for words in training docs
# embedding_matrix = np.zeros((1004, 300))
# unks = np.array([3, 55, 80, 492, 561, 621])
# i = 0
# for word in words:
# embedding_vector = embeddings_index.get(word)
# a = np.zeros(300)
# if i == 0:
# a[100] = 1
# embedding_matrix[i] = a
# elif i == 1:
# a[0] = 1
# embedding_matrix[i] = a
# elif i == 2:
# a[-1] = 1
# embedding_matrix[i] = a
# elif any(unks == i):
# a[200] = 1
# embedding_matrix[i] = a
# else:
# embedding_matrix[i] = embedding_vector
# i +=1
#%%
encoder = CNN_Encoder(embedding_dim)
# decoder = RNN_Decoder(embedding_dim, units, vocab_size, embedding_matrix)
decoder = RNN_Decoder(embedding_dim, units, vocab_size)
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
#%%
t_loss_plot = []
v_loss_plot = []
#%%
# @tf.function
def | train_step | identifier_name |
|
InceptionResNet_Image_Captioning.py | Im/im' + str(a) + ".png"
loc = trainim_dir + 'im' + str(a) + ".png"
if (os.path.exists(loc)):
train_image_paths2.append(loc)
test_image_paths = []
for a in range(40504):
loc = testim_dir + 'im' + str(a) + ".png"
# loc = 'C:\\Users\\BARAN/Desktop/Dersler/EEE/EEE443/phtyon/finIm/im' + str(a) + ".png"
if (os.path.exists(loc)):
test_image_paths.append(loc)
# tot=0
# for a in range(40504):
# loc = 'testIm/im' + str(a) + ".png"
# #loc = 'C:\\Users\\BARAN/Desktop/Dersler/EEE/EEE443/phtyon/finIm/im' + str(a) + ".png.npy"
# if (os.path.exists(loc)):
# tot +=1
#%%
f1 = h5py.File(eee443_dataset_dir + 'eee443_project_dataset_train.h5', "r")
print("Keys: %s" % f1.keys())
train_cap = np.array(f1["train_cap"])
train_imid = np.array(f1["train_imid"]) - 1
train_ims = np.array(f1["train_ims"])
train_url = np.array(f1["train_url"]) # URL of the image
word_code = np.array(f1["word_code"])
f2 = h5py.File(eee443_dataset_dir + 'eee443_project_dataset_test.h5', "r")
print("Keys: %s" % f2.keys())
test_cap = np.array(f2["test_caps"])
test_imid = np.array(f2["test_imid"])
test_ims = np.array(f2["test_ims"])
test_url = np.array(f2["test_url"]) # URL of the image
#%%
@tf.autograph.experimental.do_not_convert
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_png(img, channels=3)
# img = tf.image.resize(img, (224, 224))
img = tf.image.resize(img, (299, 299))
# img = tf.keras.applications.resnet_v2.preprocess_input(img)
img = tf.keras.applications.inception_resnet_v2.preprocess_input(img)
return img, image_path
# image_model2 = tf.keras.applications.ResNet50V2(include_top=False,weights='imagenet')
image_model2 = tf.keras.applications.InceptionResNetV2(include_top=False,weights='imagenet')
new_input2 = image_model2.input
hidden_layer2 = image_model2.layers[-1].output
image_features_extract_model2 = tf.keras.Model(new_input2, hidden_layer2)
#%%
# Get unique images
encode_train2 = train_image_paths2
# Feel free to change batch_size according to your system configuration
image_dataset2 = tf.data.Dataset.from_tensor_slices(encode_train2)
# image_dataset2 = image_dataset2.map(
# load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(64)
image_dataset2 = image_dataset2.map(
load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(32)
#%%
i = 0
for img, path in image_dataset2:
i +=1
if i%100 == 0:
| print(i)
batch_features2 = image_features_extract_model2(img)
batch_features2 = tf.reshape(batch_features2,
(batch_features2.shape[0], -1, batch_features2.shape[3]))
for bf, p in zip(batch_features2, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
#%%
# Get unique images
encode_test2 = test_image_paths
# Feel free to change batch_size according to your system configuration
image_dataset3 = tf.data.Dataset.from_tensor_slices(encode_test2)
# image_dataset3 = image_dataset3.map(
# load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(64)
image_dataset3 = image_dataset3.map(
load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(32)
#%%
i = 0
for img, path in image_dataset3:
i +=1
if i%100 == 0:
print(i)
batch_features3 = image_features_extract_model2(img)
batch_features3 = tf.reshape(batch_features3,
(batch_features3.shape[0], -1, batch_features3.shape[3]))
for bf, p in zip(batch_features3, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
#%%
img_to_cap_vector2 = collections.defaultdict(list)
for i in range(len(train_imid)):
cap = train_cap[i]
imid = train_imid[i]
key = trainim_dir + 'im' + str(imid) + ".png"
if(os.path.exists(key)):
img_to_cap_vector2[key].append(cap)
img_to_cap_vector3 = collections.defaultdict(list)
for i in range(len(test_imid)):
cap = test_cap[i]
imid = test_imid[i]
key = testim_dir + 'im' + str(imid) + ".png"
if(os.path.exists(key)):
img_to_cap_vector3[key].append(cap)
#%%
# Create training and validation sets using an 85-15 split randomly.
img_keys2 = list(img_to_cap_vector2.keys())
random.shuffle(img_keys2)
slice_index2 = int(len(img_keys2)*0.85)
img_name_train_keys2, img_name_val_keys2 = img_keys2[:slice_index2], img_keys2[slice_index2:]
img_name_train2 = []
cap_train2 = []
for imgt in img_name_train_keys2:
capt_len = len(img_to_cap_vector2[imgt])
img_name_train2.extend([imgt] * capt_len)
cap_train2.extend(img_to_cap_vector2[imgt])
img_name_val2 = []
cap_val2 = []
for imgv in img_name_val_keys2:
capv_len = len(img_to_cap_vector2[imgv])
img_name_val2.extend([imgv] * capv_len)
cap_val2.extend(img_to_cap_vector2[imgv])
img_keys3 = list(img_to_cap_vector3.keys())
img_name_test2 = []
cap_test2 = []
for imgv in img_keys3:
capv_len = len(img_to_cap_vector3[imgv])
img_name_test2.extend([imgv] * capv_len)
cap_test2.extend(img_to_cap_vector3[imgv])
#%%
word_ind = np.asarray(np.asarray(word_code.tolist()))
words = np.asarray(np.asarray(word_code.dtype.names))
# np.squeeze reduces the dimension by 1. These conversions should be made for sorting
word_ind = np.squeeze(word_ind.astype(int))
words = np.squeeze(np.reshape(words, (1, 1004)))
# arg sort returns the indices to make the sorting
sort_indices = np.argsort(word_ind) # use the argsort to sort both words and word_indices
words = np.array(words)[sort_indices]
word_ind = np.array(word_ind)[sort_indices]
#%%
# Feel free to change these parameters according to your system's configuration
# BATCH_SIZE = 32
# BATCH_SIZE = 256
BATCH_SIZE = 128
BUFFER_SIZE = 1000
embedding_dim = 300
units = 256
vocab_size = len(words)
num_steps = len(img_name_train2) // BATCH_SIZE
val_num_steps = len(img_name_val2) // BATCH_SIZE
# Shape of the vector extracted from InceptionV3 is (64, 2048)
# These two variables represent that vector shape
features_shape = 2048
attention_features_shape = 64
# attention_features_shape = 49
#%%
# Load the numpy files
def map_func(img_name, cap):
img_tensor = np.load(img_name.decode('utf-8')+'.npy')
return img_tensor, cap
dataset = tf.data.Dataset.from_tensor_slices((img_name_train2, cap_train2))
# Use map to load the numpy files in parallel
dataset = dataset.map(lambda item1, item2: tf.numpy_function(
map_func, [item1, item2], [tf.float32, tf.int32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Shuffle and batch
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
val_dataset = tf.data.Dataset.from_tensor_slices((img_name_val2, cap_val2))
# Use map to load the numpy files in parallel
val_dataset = val_dataset.map(lambda item1, item2: tf.numpy_function(
map_func, [item1, item2], [tf.float32, tf.int32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Shuffle and batch
val_dataset = val_dataset.shuffle(BUFFER_SIZE).batch(BATCH | random_line_split |
|
utils.go | USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package generator
import (
"bytes"
"fmt"
"log"
"regexp"
"strings"
"text/template"
"github.com/tcncloud/protoc-gen-persist/v5/persist"
descriptor "google.golang.org/protobuf/types/descriptorpb"
"golang.org/x/tools/imports"
)
var reduceEmptyLines = regexp.MustCompile("(\n)+")
// GetGoPath get a go import url under the following formats
// github.com/path/project/dir;package
// github.com/path/project/dir
// project/dir;package
// project/dir
// and will return the path portion from url:
// github.com/path/project/dir
// project/dir
func GetGoPath(url string) string {
idx := strings.LastIndex(url, ";")
switch {
case idx >= 0:
return url[0:idx]
default:
return url
}
}
// GetGoPackage get a go import url under the following formats
// github.com/path/project/dir;package
// github.com/path/project/dir
// project/dir;package
// project/dir
// and will return the package name from url
// package
// dir
// package
// dir
func GetGoPackage(url string) string {
switch {
case strings.Contains(url, ";"):
idx := strings.LastIndex(url, ";")
return url[idx+1:]
case strings.Contains(url, "/"):
idx := strings.LastIndex(url, "/")
return url[idx+1:]
default:
return url
}
}
func FormatCode(filename string, buffer []byte) []byte {
// reduce the empty lines
tmp := reduceEmptyLines.ReplaceAll(buffer, []byte{'\n'})
buf, err := imports.Process(filename, tmp, &imports.Options{FormatOnly: false})
if err != nil {
// logrus.WithError(err).Errorf("Error processing file %s", filename)
log.Panicf("error formatting file %s: %s", filename, err)
return tmp
}
return buf
}
func getGoNamesForTypeMapping(tm *persist.TypeMapping_TypeDescriptor, file *FileStruct) (string, string) {
titledName := ""
name := file.GetGoTypeName(tm.GetProtoTypeName())
if tm.GetProtoType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
nameParts := strings.Split(name, ".")
for i, v := range nameParts {
nameParts[i] = strings.Title(v)
}
titledName = strings.Join(nameParts, "")
} else if typ := tm.GetProtoType(); typ != descriptor.FieldDescriptorProto_TYPE_GROUP &&
typ != descriptor.FieldDescriptorProto_TYPE_ENUM {
name, _ = defaultMapping(TmAsField{tm}, file)
titledName = strings.Title(name)
// we never want the slice parts
titledName = strings.Map(func(r rune) rune {
if r == ']' || r == '[' {
return -1
}
return r
}, titledName)
}
if tm.GetProtoLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
titledName += "Slice"
}
return name, titledName
}
func needsExtraStar(tm *persist.TypeMapping_TypeDescriptor) (bool, string) {
if tm.GetProtoType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
return true, "*"
}
return false, ""
}
func convertedMsgTypeByProtoName(protoName string, f *FileStruct) string {
return f.GetGoTypeName(protoName)
}
// TmAsField (TypeMappingAsField) Implements GetLabel and GetType, returning results from their GetProto equivalents
type TmAsField struct {
tm *persist.TypeMapping_TypeDescriptor
}
func (t TmAsField) GetLabel() descriptor.FieldDescriptorProto_Label { return t.tm.GetProtoLabel() }
func (t TmAsField) GetType() descriptor.FieldDescriptorProto_Type { return t.tm.GetProtoType() }
func (t TmAsField) | () string { return t.tm.GetProtoTypeName() }
type HasLabelAndType interface {
GetLabel() descriptor.FieldDescriptorProto_Label
GetType() descriptor.FieldDescriptorProto_Type
GetTypeName() string
}
// usualy typ is a *descriptor.FieldDescriptorProto, but it also could be a *TmAsField
func defaultMapping(typ HasLabelAndType, file *FileStruct) (string, error) {
switch typ.GetType() {
case descriptor.FieldDescriptorProto_TYPE_GROUP:
return "__unsupported__type__", fmt.Errorf("one of is unsupported")
//logrus.Fatalf("we currently don't support groups/oneof structures %s", typ.GetName())
case descriptor.FieldDescriptorProto_TYPE_ENUM:
if ret := file.GetGoTypeName(typ.GetTypeName()); ret != "" {
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]" + ret, nil
} else {
return ret, nil
}
}
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
if ret := file.GetGoTypeName(typ.GetTypeName()); ret != "" {
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]*" + ret, nil
} else {
return "*" + ret, nil
}
}
case descriptor.FieldDescriptorProto_TYPE_BOOL:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]bool", nil
} else {
return "bool", nil
}
case descriptor.FieldDescriptorProto_TYPE_BYTES:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[][]byte", nil
} else {
return "[]byte", nil
}
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float64", nil
} else {
return "float64", nil
}
case descriptor.FieldDescriptorProto_TYPE_FIXED32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint32", nil
} else {
return "uint32", nil
}
case descriptor.FieldDescriptorProto_TYPE_FIXED64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint64", nil
} else {
return "uint64", nil
}
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float32", nil
} else {
return "float32", nil
}
case descriptor.FieldDescriptorProto_TYPE_INT32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_INT64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_SINT32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_SINT64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_STRING:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]string", nil
} else {
return "string", nil
}
case descriptor.FieldDescriptorProto_TYPE_UINT32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint32", nil
} else {
return "uint32", nil
}
case descriptor.FieldDescriptorProto_TYPE_UINT64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint64", nil
} else {
return "uint64", nil
}
}
return "__type__", fmt.Errorf("unknown type")
}
type Printer struct {
str string
}
func P(args ...string) string {
printer := &Printer{}
printer.Q(args...)
return printer.String()
}
func (p *Printer) P(formatString string, args ...interface{}) {
p.str += fmt.Sprintf(formatString, args...)
}
func (p *Printer) Q(args ...string) {
for _, arg := range args {
p.str += arg
}
}
func (p *Printer) PA(formatStrings []string, args ...interface{}) {
s := strings.Join(formatStrings, "")
p.P(s, args...)
}
func (p *Printer) | GetTypeName | identifier_name |
utils.go | USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package generator
import (
"bytes"
"fmt"
"log"
"regexp"
"strings"
"text/template"
"github.com/tcncloud/protoc-gen-persist/v5/persist"
descriptor "google.golang.org/protobuf/types/descriptorpb"
"golang.org/x/tools/imports"
)
var reduceEmptyLines = regexp.MustCompile("(\n)+")
// GetGoPath get a go import url under the following formats
// github.com/path/project/dir;package
// github.com/path/project/dir
// project/dir;package
// project/dir
// and will return the path portion from url:
// github.com/path/project/dir
// project/dir
func GetGoPath(url string) string {
idx := strings.LastIndex(url, ";")
switch {
case idx >= 0:
return url[0:idx]
default:
return url
}
}
// GetGoPackage get a go import url under the following formats
// github.com/path/project/dir;package
// github.com/path/project/dir
// project/dir;package
// project/dir
// and will return the package name from url
// package
// dir
// package
// dir
func GetGoPackage(url string) string {
switch {
case strings.Contains(url, ";"):
idx := strings.LastIndex(url, ";")
return url[idx+1:]
case strings.Contains(url, "/"):
idx := strings.LastIndex(url, "/")
return url[idx+1:]
default:
return url
}
}
func FormatCode(filename string, buffer []byte) []byte {
// reduce the empty lines
tmp := reduceEmptyLines.ReplaceAll(buffer, []byte{'\n'})
buf, err := imports.Process(filename, tmp, &imports.Options{FormatOnly: false})
if err != nil {
// logrus.WithError(err).Errorf("Error processing file %s", filename)
log.Panicf("error formatting file %s: %s", filename, err)
return tmp
}
return buf
}
func getGoNamesForTypeMapping(tm *persist.TypeMapping_TypeDescriptor, file *FileStruct) (string, string) {
titledName := ""
name := file.GetGoTypeName(tm.GetProtoTypeName())
if tm.GetProtoType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
nameParts := strings.Split(name, ".")
for i, v := range nameParts {
nameParts[i] = strings.Title(v)
}
titledName = strings.Join(nameParts, "")
} else if typ := tm.GetProtoType(); typ != descriptor.FieldDescriptorProto_TYPE_GROUP &&
typ != descriptor.FieldDescriptorProto_TYPE_ENUM {
name, _ = defaultMapping(TmAsField{tm}, file)
titledName = strings.Title(name)
// we never want the slice parts
titledName = strings.Map(func(r rune) rune {
if r == ']' || r == '[' {
return -1
}
return r
}, titledName)
}
if tm.GetProtoLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
titledName += "Slice"
}
return name, titledName
}
func needsExtraStar(tm *persist.TypeMapping_TypeDescriptor) (bool, string) {
if tm.GetProtoType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
return true, "*"
}
return false, ""
}
func convertedMsgTypeByProtoName(protoName string, f *FileStruct) string |
// TmAsField (TypeMappingAsField) Implements GetLabel and GetType, returning results from their GetProto equivalents
type TmAsField struct {
tm *persist.TypeMapping_TypeDescriptor
}
func (t TmAsField) GetLabel() descriptor.FieldDescriptorProto_Label { return t.tm.GetProtoLabel() }
func (t TmAsField) GetType() descriptor.FieldDescriptorProto_Type { return t.tm.GetProtoType() }
func (t TmAsField) GetTypeName() string { return t.tm.GetProtoTypeName() }
type HasLabelAndType interface {
GetLabel() descriptor.FieldDescriptorProto_Label
GetType() descriptor.FieldDescriptorProto_Type
GetTypeName() string
}
// usualy typ is a *descriptor.FieldDescriptorProto, but it also could be a *TmAsField
func defaultMapping(typ HasLabelAndType, file *FileStruct) (string, error) {
switch typ.GetType() {
case descriptor.FieldDescriptorProto_TYPE_GROUP:
return "__unsupported__type__", fmt.Errorf("one of is unsupported")
//logrus.Fatalf("we currently don't support groups/oneof structures %s", typ.GetName())
case descriptor.FieldDescriptorProto_TYPE_ENUM:
if ret := file.GetGoTypeName(typ.GetTypeName()); ret != "" {
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]" + ret, nil
} else {
return ret, nil
}
}
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
if ret := file.GetGoTypeName(typ.GetTypeName()); ret != "" {
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]*" + ret, nil
} else {
return "*" + ret, nil
}
}
case descriptor.FieldDescriptorProto_TYPE_BOOL:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]bool", nil
} else {
return "bool", nil
}
case descriptor.FieldDescriptorProto_TYPE_BYTES:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[][]byte", nil
} else {
return "[]byte", nil
}
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float64", nil
} else {
return "float64", nil
}
case descriptor.FieldDescriptorProto_TYPE_FIXED32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint32", nil
} else {
return "uint32", nil
}
case descriptor.FieldDescriptorProto_TYPE_FIXED64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint64", nil
} else {
return "uint64", nil
}
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float32", nil
} else {
return "float32", nil
}
case descriptor.FieldDescriptorProto_TYPE_INT32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_INT64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_SINT32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_SINT64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_STRING:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]string", nil
} else {
return "string", nil
}
case descriptor.FieldDescriptorProto_TYPE_UINT32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint32", nil
} else {
return "uint32", nil
}
case descriptor.FieldDescriptorProto_TYPE_UINT64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint64", nil
} else {
return "uint64", nil
}
}
return "__type__", fmt.Errorf("unknown type")
}
type Printer struct {
str string
}
func P(args ...string) string {
printer := &Printer{}
printer.Q(args...)
return printer.String()
}
func (p *Printer) P(formatString string, args ...interface{}) {
p.str += fmt.Sprintf(formatString, args...)
}
func (p *Printer) Q(args ...string) {
for _, arg := range args {
p.str += arg
}
}
func (p *Printer) PA(formatStrings []string, args ...interface{}) {
s := strings.Join(formatStrings, "")
p.P(s, args...)
}
func (p *Printer | {
return f.GetGoTypeName(protoName)
} | identifier_body |
utils.go | // * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of TCN Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package generator
import (
"bytes"
"fmt"
"log"
"regexp"
"strings"
"text/template"
"github.com/tcncloud/protoc-gen-persist/v5/persist"
descriptor "google.golang.org/protobuf/types/descriptorpb"
"golang.org/x/tools/imports"
)
var reduceEmptyLines = regexp.MustCompile("(\n)+")
// GetGoPath get a go import url under the following formats
// github.com/path/project/dir;package
// github.com/path/project/dir
// project/dir;package
// project/dir
// and will return the path portion from url:
// github.com/path/project/dir
// project/dir
func GetGoPath(url string) string {
idx := strings.LastIndex(url, ";")
switch {
case idx >= 0:
return url[0:idx]
default:
return url
}
}
// GetGoPackage get a go import url under the following formats
// github.com/path/project/dir;package
// github.com/path/project/dir
// project/dir;package
// project/dir
// and will return the package name from url
// package
// dir
// package
// dir
func GetGoPackage(url string) string {
switch {
case strings.Contains(url, ";"):
idx := strings.LastIndex(url, ";")
return url[idx+1:]
case strings.Contains(url, "/"):
idx := strings.LastIndex(url, "/")
return url[idx+1:]
default:
return url
}
}
func FormatCode(filename string, buffer []byte) []byte {
// reduce the empty lines
tmp := reduceEmptyLines.ReplaceAll(buffer, []byte{'\n'})
buf, err := imports.Process(filename, tmp, &imports.Options{FormatOnly: false})
if err != nil {
// logrus.WithError(err).Errorf("Error processing file %s", filename)
log.Panicf("error formatting file %s: %s", filename, err)
return tmp
}
return buf
}
func getGoNamesForTypeMapping(tm *persist.TypeMapping_TypeDescriptor, file *FileStruct) (string, string) {
titledName := ""
name := file.GetGoTypeName(tm.GetProtoTypeName())
if tm.GetProtoType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
nameParts := strings.Split(name, ".")
for i, v := range nameParts {
nameParts[i] = strings.Title(v)
}
titledName = strings.Join(nameParts, "")
} else if typ := tm.GetProtoType(); typ != descriptor.FieldDescriptorProto_TYPE_GROUP &&
typ != descriptor.FieldDescriptorProto_TYPE_ENUM {
name, _ = defaultMapping(TmAsField{tm}, file)
titledName = strings.Title(name)
// we never want the slice parts
titledName = strings.Map(func(r rune) rune {
if r == ']' || r == '[' {
return -1
}
return r
}, titledName)
}
if tm.GetProtoLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
titledName += "Slice"
}
return name, titledName
}
func needsExtraStar(tm *persist.TypeMapping_TypeDescriptor) (bool, string) {
if tm.GetProtoType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
return true, "*"
}
return false, ""
}
func convertedMsgTypeByProtoName(protoName string, f *FileStruct) string {
return f.GetGoTypeName(protoName)
}
// TmAsField (TypeMappingAsField) Implements GetLabel and GetType, returning results from their GetProto equivalents
type TmAsField struct {
tm *persist.TypeMapping_TypeDescriptor
}
func (t TmAsField) GetLabel() descriptor.FieldDescriptorProto_Label { return t.tm.GetProtoLabel() }
func (t TmAsField) GetType() descriptor.FieldDescriptorProto_Type { return t.tm.GetProtoType() }
func (t TmAsField) GetTypeName() string { return t.tm.GetProtoTypeName() }
type HasLabelAndType interface {
GetLabel() descriptor.FieldDescriptorProto_Label
GetType() descriptor.FieldDescriptorProto_Type
GetTypeName() string
}
// usualy typ is a *descriptor.FieldDescriptorProto, but it also could be a *TmAsField
func defaultMapping(typ HasLabelAndType, file *FileStruct) (string, error) {
switch typ.GetType() {
case descriptor.FieldDescriptorProto_TYPE_GROUP:
return "__unsupported__type__", fmt.Errorf("one of is unsupported")
//logrus.Fatalf("we currently don't support groups/oneof structures %s", typ.GetName())
case descriptor.FieldDescriptorProto_TYPE_ENUM:
if ret := file.GetGoTypeName(typ.GetTypeName()); ret != "" {
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]" + ret, nil
} else {
return ret, nil
}
}
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
if ret := file.GetGoTypeName(typ.GetTypeName()); ret != "" {
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]*" + ret, nil
} else {
return "*" + ret, nil
}
}
case descriptor.FieldDescriptorProto_TYPE_BOOL:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]bool", nil
} else {
return "bool", nil
}
case descriptor.FieldDescriptorProto_TYPE_BYTES:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[][]byte", nil
} else {
return "[]byte", nil
}
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float64", nil
} else {
return "float64", nil
}
case descriptor.FieldDescriptorProto_TYPE_FIXED32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint32", nil
} else {
return "uint32", nil
}
case descriptor.FieldDescriptorProto_TYPE_FIXED64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint64", nil
} else {
return "uint64", nil
}
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float32", nil
} else {
return "float32", nil
}
case descriptor.FieldDescriptorProto_TYPE_INT32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_INT64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_SINT32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_SINT64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_STRING:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]string", nil
} else {
return "string", nil
| random_line_split |
||
utils.go | USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package generator
import (
"bytes"
"fmt"
"log"
"regexp"
"strings"
"text/template"
"github.com/tcncloud/protoc-gen-persist/v5/persist"
descriptor "google.golang.org/protobuf/types/descriptorpb"
"golang.org/x/tools/imports"
)
var reduceEmptyLines = regexp.MustCompile("(\n)+")
// GetGoPath get a go import url under the following formats
// github.com/path/project/dir;package
// github.com/path/project/dir
// project/dir;package
// project/dir
// and will return the path portion from url:
// github.com/path/project/dir
// project/dir
func GetGoPath(url string) string {
idx := strings.LastIndex(url, ";")
switch {
case idx >= 0:
return url[0:idx]
default:
return url
}
}
// GetGoPackage get a go import url under the following formats
// github.com/path/project/dir;package
// github.com/path/project/dir
// project/dir;package
// project/dir
// and will return the package name from url
// package
// dir
// package
// dir
func GetGoPackage(url string) string {
switch {
case strings.Contains(url, ";"):
idx := strings.LastIndex(url, ";")
return url[idx+1:]
case strings.Contains(url, "/"):
idx := strings.LastIndex(url, "/")
return url[idx+1:]
default:
return url
}
}
func FormatCode(filename string, buffer []byte) []byte {
// reduce the empty lines
tmp := reduceEmptyLines.ReplaceAll(buffer, []byte{'\n'})
buf, err := imports.Process(filename, tmp, &imports.Options{FormatOnly: false})
if err != nil {
// logrus.WithError(err).Errorf("Error processing file %s", filename)
log.Panicf("error formatting file %s: %s", filename, err)
return tmp
}
return buf
}
func getGoNamesForTypeMapping(tm *persist.TypeMapping_TypeDescriptor, file *FileStruct) (string, string) {
titledName := ""
name := file.GetGoTypeName(tm.GetProtoTypeName())
if tm.GetProtoType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
nameParts := strings.Split(name, ".")
for i, v := range nameParts {
nameParts[i] = strings.Title(v)
}
titledName = strings.Join(nameParts, "")
} else if typ := tm.GetProtoType(); typ != descriptor.FieldDescriptorProto_TYPE_GROUP &&
typ != descriptor.FieldDescriptorProto_TYPE_ENUM {
name, _ = defaultMapping(TmAsField{tm}, file)
titledName = strings.Title(name)
// we never want the slice parts
titledName = strings.Map(func(r rune) rune {
if r == ']' || r == '[' {
return -1
}
return r
}, titledName)
}
if tm.GetProtoLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
titledName += "Slice"
}
return name, titledName
}
func needsExtraStar(tm *persist.TypeMapping_TypeDescriptor) (bool, string) {
if tm.GetProtoType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
return true, "*"
}
return false, ""
}
func convertedMsgTypeByProtoName(protoName string, f *FileStruct) string {
return f.GetGoTypeName(protoName)
}
// TmAsField (TypeMappingAsField) Implements GetLabel and GetType, returning results from their GetProto equivalents
type TmAsField struct {
tm *persist.TypeMapping_TypeDescriptor
}
func (t TmAsField) GetLabel() descriptor.FieldDescriptorProto_Label { return t.tm.GetProtoLabel() }
func (t TmAsField) GetType() descriptor.FieldDescriptorProto_Type { return t.tm.GetProtoType() }
func (t TmAsField) GetTypeName() string { return t.tm.GetProtoTypeName() }
type HasLabelAndType interface {
GetLabel() descriptor.FieldDescriptorProto_Label
GetType() descriptor.FieldDescriptorProto_Type
GetTypeName() string
}
// usualy typ is a *descriptor.FieldDescriptorProto, but it also could be a *TmAsField
func defaultMapping(typ HasLabelAndType, file *FileStruct) (string, error) {
switch typ.GetType() {
case descriptor.FieldDescriptorProto_TYPE_GROUP:
return "__unsupported__type__", fmt.Errorf("one of is unsupported")
//logrus.Fatalf("we currently don't support groups/oneof structures %s", typ.GetName())
case descriptor.FieldDescriptorProto_TYPE_ENUM:
if ret := file.GetGoTypeName(typ.GetTypeName()); ret != "" {
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]" + ret, nil
} else {
return ret, nil
}
}
case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
if ret := file.GetGoTypeName(typ.GetTypeName()); ret != "" {
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]*" + ret, nil
} else {
return "*" + ret, nil
}
}
case descriptor.FieldDescriptorProto_TYPE_BOOL:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]bool", nil
} else {
return "bool", nil
}
case descriptor.FieldDescriptorProto_TYPE_BYTES:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED | else {
return "[]byte", nil
}
case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float64", nil
} else {
return "float64", nil
}
case descriptor.FieldDescriptorProto_TYPE_FIXED32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint32", nil
} else {
return "uint32", nil
}
case descriptor.FieldDescriptorProto_TYPE_FIXED64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint64", nil
} else {
return "uint64", nil
}
case descriptor.FieldDescriptorProto_TYPE_FLOAT:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]float32", nil
} else {
return "float32", nil
}
case descriptor.FieldDescriptorProto_TYPE_INT32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_INT64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_SINT32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int32", nil
} else {
return "int32", nil
}
case descriptor.FieldDescriptorProto_TYPE_SINT64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]int64", nil
} else {
return "int64", nil
}
case descriptor.FieldDescriptorProto_TYPE_STRING:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]string", nil
} else {
return "string", nil
}
case descriptor.FieldDescriptorProto_TYPE_UINT32:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint32", nil
} else {
return "uint32", nil
}
case descriptor.FieldDescriptorProto_TYPE_UINT64:
if typ.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
return "[]uint64", nil
} else {
return "uint64", nil
}
}
return "__type__", fmt.Errorf("unknown type")
}
type Printer struct {
str string
}
func P(args ...string) string {
printer := &Printer{}
printer.Q(args...)
return printer.String()
}
func (p *Printer) P(formatString string, args ...interface{}) {
p.str += fmt.Sprintf(formatString, args...)
}
func (p *Printer) Q(args ...string) {
for _, arg := range args {
p.str += arg
}
}
func (p *Printer) PA(formatStrings []string, args ...interface{}) {
s := strings.Join(formatStrings, "")
p.P(s, args...)
}
func (p *Printer) | {
return "[][]byte", nil
} | conditional_block |
feature_tracking.py | x_curr = i
y_curr = j
else:
print("Please enter valid similarity poisson_mode")
return x_curr, y_curr
def find_points(prev_frame, curr_frame, prev_frame_points, detector, similarity_mode):
global curr_points, display_keypoints
curr_points = []
display_keypoints = []
for idx, (x_prev, y_prev) in enumerate(prev_frame_points):
# Create block of image intensities
# using neighboring pixels around each
# previously identified corner point
#20 works for bed scene
descriptor_offset = 20
search_offset = .5
# Get bounds of block
top, bottom, left, right = get_bounds(x_prev, y_prev, descriptor_offset, 1)
# Adjust the bounds
# top, bottom, left, right = adjust_bounds(top,bottom,left,right,prev_frame.shape[0], prev_frame.shape[1])
# Get descriptor for previous image
prev_frame_intensities = prev_frame[top:bottom, left:right]
prev_frame_descriptor = custom_descriptor(prev_frame_intensities)
print("SHAPE",prev_frame_descriptor.shape)
# Define bounds of search area
top, bottom, left, right = get_bounds(x_prev, y_prev, descriptor_offset, search_offset)
# Adjust the bounds
# top, bottom, left, right = adjust_bounds(top,bottom,left,right, prev_frame.shape[0], prev_frame.shape[1])
# Get search window
search_window = curr_frame[top:bottom, left:right]
# Compute keypoints
keypoints = None
if detector == 'harris':
harris_corners = compute_harris(search_window)
# Threshold harris corners
keypoints = np.argwhere(harris_corners > .7 * harris_corners.max())
# Recall numpy arrays use y,x indexing
keypoints = np.flip(keypoints, axis=1)
elif detector == 'orb':
keypoints = compute_orb(search_window)
if len(keypoints) == 0:
print("No keypoints could be found near ({},{})".format(x_prev, y_prev))
continue
keypoints_adjusted = np.zeros_like(keypoints)
keypoints_adjusted[:, 0] = x_prev - int(search_offset * descriptor_offset) + keypoints[:, 0]
keypoints_adjusted[:, 1] = y_prev - int(search_offset * descriptor_offset) + keypoints[:, 1]
# Visualize all keypoints
display_keypoints.extend(keypoints_adjusted.tolist())
# Slide window throughout search area of size equal
# to feature descriptor block
x_curr, y_curr = compute_similarity(x_prev, y_prev, curr_frame, keypoints_adjusted, descriptor_offset,
prev_frame_descriptor, similarity_mode)
curr_points.append([x_curr, y_curr])
return curr_points
def compute_harris(window):
gray_frame = cv2.cvtColor(window, cv2.COLOR_BGR2GRAY)
# result is dilated for marking the corners, not important
harris_frame = cv2.cornerHarris(gray_frame, 5, 3, 0.04)
harris_frame = cv2.dilate(harris_frame, None)
return harris_frame
def compute_orb(window):
detector = cv2.ORB_create(edgeThreshold=0)
keypoints_ = detector.detect(window, None) # list of keypoint objects, get raw indicies
keypoints = []
for kp in keypoints_:
x, y = kp.pt
keypoints.append([int(x), int(y)])
print("Number of ORB keypoins found: {}".format(len(keypoints)))
return np.array(keypoints)
def draw_point(frame, x, y, color, radius):
cv2.circle(frame, (x, y), radius, color, -1)
def | (frame, points, color, radius):
for (x, y) in points:
draw_point(frame, x, y, color, radius)
current_frame_gui = None
clicked_points = []
display_keypoints = []
class Modes:
MOVIE = 1
IMAGE = 2
OTHER = 3
# POINTS SHOULD BE ADDED IN THE FOLLOWING ORDER:
#
# TOP LEFT, TOP RIGHT, BOTTOM LEFT, BOTTOM RIGHT
def click(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
clicked_points.append((x, y))
draw_point(current_frame_gui, x, y, (0, 255, 0), 5)
def apply_point_offset(points):
offset = 20
points_offset = []
# top left
x, y = points[0]
x = x - offset
y = y - offset
points_offset.append([x, y])
# top right
x, y = points[1]
x = x + offset
y = y - offset
points_offset.append([x, y])
# bottom left
x, y = points[2]
x = x - offset
y = y + offset
points_offset.append([x, y])
# bottom rightharris-laplace
x, y = points[3]
x = x + offset
y = y + offset
points_offset.append([x, y])
return points_offset
def create_text_bubble(points, frame, bubble_text_queue, bubble_text_bin):
# Height and width
H = frame.shape[0]
W = frame.shape[1]
# Find centroid of points
c_x = 0
c_y = 0
for p in points:
c_x += p[0]
c_y += p[1]
c_x = c_x//len(points)
c_y = c_y//len(points)
cv2.circle(frame, (c_x,c_y), 20, (255,0,0), thickness=-1, lineType=8, shift=0)
# Ellipse size
ellipse_vertical_offset = -140
ellipse_horizontal_offset = -70
ellipse_major_axis_size = 200
ellipse_minor_axis_size = 100
# Centroid offset
c_x += ellipse_horizontal_offset
c_y += ellipse_vertical_offset
# Adjust bounds (if needed)
if c_x - ellipse_major_axis_size < 0:
c_x = ellipse_major_axis_size
elif c_x + ellipse_major_axis_size > W:
c_x = W - ellipse_major_axis_size
if c_y - ellipse_minor_axis_size < 0:
c_y = ellipse_minor_axis_size
elif c_y + ellipse_minor_axis_size > H:
c_y = H - ellipse_minor_axis_size
# ###### MANUALLY OVERRIDE CENTROID LOCATION
# # i.e. no tracking, text stays in fixed location
# c_x = 400
# c_y = 700
# Create overlay
overlay = frame.copy()
# https://docs.opencv.org/4.1.2/d6/d6e/group__imgproc__draw.html
cv2.circle(overlay, (c_x, c_y), 20, (0, 0, 255), -1)
# Change speaker bubble color based on who is speaking/texting
speaker = bubble_text_queue[bubble_text_bin][0]
message = bubble_text_queue[bubble_text_bin][1]
bubble_color = (255, 255, 51)
if(speaker == "John"):
bubble_color = (100,0,255)
cv2.ellipse(overlay, (c_x, c_y), (ellipse_major_axis_size, ellipse_minor_axis_size), 0, 0, 360, bubble_color, -1)
cv2.ellipse(overlay, (c_x, c_y), (ellipse_major_axis_size, ellipse_minor_axis_size), 0, 0, 360, (0, 0, 255), 4)
# https://stackoverflow.com/questions/27647424/opencv-puttext-new-line-character
text = "{}:\n{}".format(speaker,message)
text_vertical_offset = int(-ellipse_minor_axis_size * .55)
text_horizontal_offset = int(-ellipse_major_axis_size * .6)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = .7
thickness = 1
textHeight = cv2.getTextSize(text,fontFace,fontScale,thickness)[0][1]
# For simulating newlines
dy = textHeight + 10
# Insert text
c_x += text_horizontal_offset
c_y += text_vertical_offset
for i, line in enumerate(text.split('\n')):
cv2.putText(overlay, line, (c_x, c_y + i * dy), fontFace, fontScale, thickness)
# alpha blend overlay with frame
alpha = 0.8
frame = alpha * overlay + (1-alpha) * frame
return frame
def create_warp_comosite(composite_image,curr_frame_points_offset,current_frame):
# Create point correspondences for perspective transformation
curr_frame_points_offset_array = np.array(curr_frame_points_offset).astype(np.float32)
input_image_boundary_points_array = np.array(
[(0, 0), (composite_image.shape[1], 0 | draw_points | identifier_name |
feature_tracking.py | x_curr = i
y_curr = j
else:
print("Please enter valid similarity poisson_mode")
return x_curr, y_curr
def find_points(prev_frame, curr_frame, prev_frame_points, detector, similarity_mode):
| prev_frame_descriptor = custom_descriptor(prev_frame_intensities)
print("SHAPE",prev_frame_descriptor.shape)
# Define bounds of search area
top, bottom, left, right = get_bounds(x_prev, y_prev, descriptor_offset, search_offset)
# Adjust the bounds
# top, bottom, left, right = adjust_bounds(top,bottom,left,right, prev_frame.shape[0], prev_frame.shape[1])
# Get search window
search_window = curr_frame[top:bottom, left:right]
# Compute keypoints
keypoints = None
if detector == 'harris':
harris_corners = compute_harris(search_window)
# Threshold harris corners
keypoints = np.argwhere(harris_corners > .7 * harris_corners.max())
# Recall numpy arrays use y,x indexing
keypoints = np.flip(keypoints, axis=1)
elif detector == 'orb':
keypoints = compute_orb(search_window)
if len(keypoints) == 0:
print("No keypoints could be found near ({},{})".format(x_prev, y_prev))
continue
keypoints_adjusted = np.zeros_like(keypoints)
keypoints_adjusted[:, 0] = x_prev - int(search_offset * descriptor_offset) + keypoints[:, 0]
keypoints_adjusted[:, 1] = y_prev - int(search_offset * descriptor_offset) + keypoints[:, 1]
# Visualize all keypoints
display_keypoints.extend(keypoints_adjusted.tolist())
# Slide window throughout search area of size equal
# to feature descriptor block
x_curr, y_curr = compute_similarity(x_prev, y_prev, curr_frame, keypoints_adjusted, descriptor_offset,
prev_frame_descriptor, similarity_mode)
curr_points.append([x_curr, y_curr])
return curr_points
def compute_harris(window):
gray_frame = cv2.cvtColor(window, cv2.COLOR_BGR2GRAY)
# result is dilated for marking the corners, not important
harris_frame = cv2.cornerHarris(gray_frame, 5, 3, 0.04)
harris_frame = cv2.dilate(harris_frame, None)
return harris_frame
def compute_orb(window):
detector = cv2.ORB_create(edgeThreshold=0)
keypoints_ = detector.detect(window, None) # list of keypoint objects, get raw indicies
keypoints = []
for kp in keypoints_:
x, y = kp.pt
keypoints.append([int(x), int(y)])
print("Number of ORB keypoins found: {}".format(len(keypoints)))
return np.array(keypoints)
def draw_point(frame, x, y, color, radius):
cv2.circle(frame, (x, y), radius, color, -1)
def draw_points(frame, points, color, radius):
for (x, y) in points:
draw_point(frame, x, y, color, radius)
current_frame_gui = None
clicked_points = []
display_keypoints = []
class Modes:
MOVIE = 1
IMAGE = 2
OTHER = 3
# POINTS SHOULD BE ADDED IN THE FOLLOWING ORDER:
#
# TOP LEFT, TOP RIGHT, BOTTOM LEFT, BOTTOM RIGHT
def click(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
clicked_points.append((x, y))
draw_point(current_frame_gui, x, y, (0, 255, 0), 5)
def apply_point_offset(points):
offset = 20
points_offset = []
# top left
x, y = points[0]
x = x - offset
y = y - offset
points_offset.append([x, y])
# top right
x, y = points[1]
x = x + offset
y = y - offset
points_offset.append([x, y])
# bottom left
x, y = points[2]
x = x - offset
y = y + offset
points_offset.append([x, y])
# bottom rightharris-laplace
x, y = points[3]
x = x + offset
y = y + offset
points_offset.append([x, y])
return points_offset
def create_text_bubble(points, frame, bubble_text_queue, bubble_text_bin):
# Height and width
H = frame.shape[0]
W = frame.shape[1]
# Find centroid of points
c_x = 0
c_y = 0
for p in points:
c_x += p[0]
c_y += p[1]
c_x = c_x//len(points)
c_y = c_y//len(points)
cv2.circle(frame, (c_x,c_y), 20, (255,0,0), thickness=-1, lineType=8, shift=0)
# Ellipse size
ellipse_vertical_offset = -140
ellipse_horizontal_offset = -70
ellipse_major_axis_size = 200
ellipse_minor_axis_size = 100
# Centroid offset
c_x += ellipse_horizontal_offset
c_y += ellipse_vertical_offset
# Adjust bounds (if needed)
if c_x - ellipse_major_axis_size < 0:
c_x = ellipse_major_axis_size
elif c_x + ellipse_major_axis_size > W:
c_x = W - ellipse_major_axis_size
if c_y - ellipse_minor_axis_size < 0:
c_y = ellipse_minor_axis_size
elif c_y + ellipse_minor_axis_size > H:
c_y = H - ellipse_minor_axis_size
# ###### MANUALLY OVERRIDE CENTROID LOCATION
# # i.e. no tracking, text stays in fixed location
# c_x = 400
# c_y = 700
# Create overlay
overlay = frame.copy()
# https://docs.opencv.org/4.1.2/d6/d6e/group__imgproc__draw.html
cv2.circle(overlay, (c_x, c_y), 20, (0, 0, 255), -1)
# Change speaker bubble color based on who is speaking/texting
speaker = bubble_text_queue[bubble_text_bin][0]
message = bubble_text_queue[bubble_text_bin][1]
bubble_color = (255, 255, 51)
if(speaker == "John"):
bubble_color = (100,0,255)
cv2.ellipse(overlay, (c_x, c_y), (ellipse_major_axis_size, ellipse_minor_axis_size), 0, 0, 360, bubble_color, -1)
cv2.ellipse(overlay, (c_x, c_y), (ellipse_major_axis_size, ellipse_minor_axis_size), 0, 0, 360, (0, 0, 255), 4)
# https://stackoverflow.com/questions/27647424/opencv-puttext-new-line-character
text = "{}:\n{}".format(speaker,message)
text_vertical_offset = int(-ellipse_minor_axis_size * .55)
text_horizontal_offset = int(-ellipse_major_axis_size * .6)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = .7
thickness = 1
textHeight = cv2.getTextSize(text,fontFace,fontScale,thickness)[0][1]
# For simulating newlines
dy = textHeight + 10
# Insert text
c_x += text_horizontal_offset
c_y += text_vertical_offset
for i, line in enumerate(text.split('\n')):
cv2.putText(overlay, line, (c_x, c_y + i * dy), fontFace, fontScale, thickness)
# alpha blend overlay with frame
alpha = 0.8
frame = alpha * overlay + (1-alpha) * frame
return frame
def create_warp_comosite(composite_image,curr_frame_points_offset,current_frame):
# Create point correspondences for perspective transformation
curr_frame_points_offset_array = np.array(curr_frame_points_offset).astype(np.float32)
input_image_boundary_points_array = np.array(
[(0, 0), (composite_image.shape[1], 0), | global curr_points, display_keypoints
curr_points = []
display_keypoints = []
for idx, (x_prev, y_prev) in enumerate(prev_frame_points):
# Create block of image intensities
# using neighboring pixels around each
# previously identified corner point
#20 works for bed scene
descriptor_offset = 20
search_offset = .5
# Get bounds of block
top, bottom, left, right = get_bounds(x_prev, y_prev, descriptor_offset, 1)
# Adjust the bounds
# top, bottom, left, right = adjust_bounds(top,bottom,left,right,prev_frame.shape[0], prev_frame.shape[1])
# Get descriptor for previous image
prev_frame_intensities = prev_frame[top:bottom, left:right] | identifier_body |
feature_tracking.py | x_curr = i
y_curr = j
else:
print("Please enter valid similarity poisson_mode")
return x_curr, y_curr
def find_points(prev_frame, curr_frame, prev_frame_points, detector, similarity_mode):
global curr_points, display_keypoints
curr_points = []
display_keypoints = []
for idx, (x_prev, y_prev) in enumerate(prev_frame_points):
# Create block of image intensities
# using neighboring pixels around each
# previously identified corner point
#20 works for bed scene
descriptor_offset = 20
search_offset = .5
# Get bounds of block
top, bottom, left, right = get_bounds(x_prev, y_prev, descriptor_offset, 1)
# Adjust the bounds
# top, bottom, left, right = adjust_bounds(top,bottom,left,right,prev_frame.shape[0], prev_frame.shape[1])
# Get descriptor for previous image
prev_frame_intensities = prev_frame[top:bottom, left:right]
prev_frame_descriptor = custom_descriptor(prev_frame_intensities)
print("SHAPE",prev_frame_descriptor.shape)
# Define bounds of search area
top, bottom, left, right = get_bounds(x_prev, y_prev, descriptor_offset, search_offset)
# Adjust the bounds
# top, bottom, left, right = adjust_bounds(top,bottom,left,right, prev_frame.shape[0], prev_frame.shape[1])
# Get search window
search_window = curr_frame[top:bottom, left:right]
# Compute keypoints
keypoints = None
if detector == 'harris':
harris_corners = compute_harris(search_window)
# Threshold harris corners
keypoints = np.argwhere(harris_corners > .7 * harris_corners.max())
# Recall numpy arrays use y,x indexing
keypoints = np.flip(keypoints, axis=1)
elif detector == 'orb':
keypoints = compute_orb(search_window)
if len(keypoints) == 0:
print("No keypoints could be found near ({},{})".format(x_prev, y_prev))
continue
keypoints_adjusted = np.zeros_like(keypoints)
keypoints_adjusted[:, 0] = x_prev - int(search_offset * descriptor_offset) + keypoints[:, 0]
keypoints_adjusted[:, 1] = y_prev - int(search_offset * descriptor_offset) + keypoints[:, 1]
# Visualize all keypoints
display_keypoints.extend(keypoints_adjusted.tolist())
# Slide window throughout search area of size equal
# to feature descriptor block
x_curr, y_curr = compute_similarity(x_prev, y_prev, curr_frame, keypoints_adjusted, descriptor_offset,
prev_frame_descriptor, similarity_mode)
curr_points.append([x_curr, y_curr])
return curr_points
def compute_harris(window):
gray_frame = cv2.cvtColor(window, cv2.COLOR_BGR2GRAY)
# result is dilated for marking the corners, not important
harris_frame = cv2.cornerHarris(gray_frame, 5, 3, 0.04)
harris_frame = cv2.dilate(harris_frame, None)
return harris_frame
def compute_orb(window):
detector = cv2.ORB_create(edgeThreshold=0)
keypoints_ = detector.detect(window, None) # list of keypoint objects, get raw indicies
keypoints = []
for kp in keypoints_:
x, y = kp.pt
keypoints.append([int(x), int(y)])
print("Number of ORB keypoins found: {}".format(len(keypoints)))
return np.array(keypoints)
def draw_point(frame, x, y, color, radius):
cv2.circle(frame, (x, y), radius, color, -1)
def draw_points(frame, points, color, radius):
for (x, y) in points:
draw_point(frame, x, y, color, radius)
current_frame_gui = None
clicked_points = []
display_keypoints = []
class Modes:
MOVIE = 1
IMAGE = 2
OTHER = 3
# POINTS SHOULD BE ADDED IN THE FOLLOWING ORDER:
#
# TOP LEFT, TOP RIGHT, BOTTOM LEFT, BOTTOM RIGHT
def click(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
clicked_points.append((x, y))
draw_point(current_frame_gui, x, y, (0, 255, 0), 5)
def apply_point_offset(points):
offset = 20
points_offset = []
# top left
x, y = points[0]
x = x - offset
y = y - offset
points_offset.append([x, y])
# top right
x, y = points[1]
x = x + offset
y = y - offset
points_offset.append([x, y])
# bottom left
x, y = points[2]
x = x - offset
y = y + offset
points_offset.append([x, y])
# bottom rightharris-laplace
x, y = points[3]
x = x + offset
y = y + offset
points_offset.append([x, y])
return points_offset
def create_text_bubble(points, frame, bubble_text_queue, bubble_text_bin):
# Height and width
H = frame.shape[0]
W = frame.shape[1]
# Find centroid of points
c_x = 0
c_y = 0
for p in points:
c_x += p[0]
c_y += p[1]
c_x = c_x//len(points)
c_y = c_y//len(points)
cv2.circle(frame, (c_x,c_y), 20, (255,0,0), thickness=-1, lineType=8, shift=0)
# Ellipse size
ellipse_vertical_offset = -140
ellipse_horizontal_offset = -70
ellipse_major_axis_size = 200
ellipse_minor_axis_size = 100
# Centroid offset
c_x += ellipse_horizontal_offset
c_y += ellipse_vertical_offset
# Adjust bounds (if needed)
if c_x - ellipse_major_axis_size < 0:
c_x = ellipse_major_axis_size
elif c_x + ellipse_major_axis_size > W:
c_x = W - ellipse_major_axis_size
if c_y - ellipse_minor_axis_size < 0:
c_y = ellipse_minor_axis_size
elif c_y + ellipse_minor_axis_size > H:
c_y = H - ellipse_minor_axis_size
# ###### MANUALLY OVERRIDE CENTROID LOCATION
# # i.e. no tracking, text stays in fixed location
# c_x = 400
# c_y = 700
# Create overlay
overlay = frame.copy()
# https://docs.opencv.org/4.1.2/d6/d6e/group__imgproc__draw.html
cv2.circle(overlay, (c_x, c_y), 20, (0, 0, 255), -1)
# Change speaker bubble color based on who is speaking/texting
speaker = bubble_text_queue[bubble_text_bin][0]
message = bubble_text_queue[bubble_text_bin][1]
bubble_color = (255, 255, 51)
if(speaker == "John"):
bubble_color = (100,0,255)
cv2.ellipse(overlay, (c_x, c_y), (ellipse_major_axis_size, ellipse_minor_axis_size), 0, 0, 360, bubble_color, -1)
cv2.ellipse(overlay, (c_x, c_y), (ellipse_major_axis_size, ellipse_minor_axis_size), 0, 0, 360, (0, 0, 255), 4)
# https://stackoverflow.com/questions/27647424/opencv-puttext-new-line-character
text = "{}:\n{}".format(speaker,message)
text_vertical_offset = int(-ellipse_minor_axis_size * .55)
text_horizontal_offset = int(-ellipse_major_axis_size * .6)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = .7
thickness = 1
textHeight = cv2.getTextSize(text,fontFace,fontScale,thickness)[0][1]
# For simulating newlines
dy = textHeight + 10
# Insert text
c_x += text_horizontal_offset | cv2.putText(overlay, line, (c_x, c_y + i * dy), fontFace, fontScale, thickness)
# alpha blend overlay with frame
alpha = 0.8
frame = alpha * overlay + (1-alpha) * frame
return frame
def create_warp_comosite(composite_image,curr_frame_points_offset,current_frame):
# Create point correspondences for perspective transformation
curr_frame_points_offset_array = np.array(curr_frame_points_offset).astype(np.float32)
input_image_boundary_points_array = np.array(
[(0, 0), (composite_image.shape[1], 0), | c_y += text_vertical_offset
for i, line in enumerate(text.split('\n')): | random_line_split |
feature_tracking.py | )
return harris_frame
def compute_orb(window):
detector = cv2.ORB_create(edgeThreshold=0)
keypoints_ = detector.detect(window, None) # list of keypoint objects, get raw indicies
keypoints = []
for kp in keypoints_:
x, y = kp.pt
keypoints.append([int(x), int(y)])
print("Number of ORB keypoins found: {}".format(len(keypoints)))
return np.array(keypoints)
def draw_point(frame, x, y, color, radius):
cv2.circle(frame, (x, y), radius, color, -1)
def draw_points(frame, points, color, radius):
for (x, y) in points:
draw_point(frame, x, y, color, radius)
current_frame_gui = None
clicked_points = []
display_keypoints = []
class Modes:
MOVIE = 1
IMAGE = 2
OTHER = 3
# POINTS SHOULD BE ADDED IN THE FOLLOWING ORDER:
#
# TOP LEFT, TOP RIGHT, BOTTOM LEFT, BOTTOM RIGHT
def click(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
clicked_points.append((x, y))
draw_point(current_frame_gui, x, y, (0, 255, 0), 5)
def apply_point_offset(points):
offset = 20
points_offset = []
# top left
x, y = points[0]
x = x - offset
y = y - offset
points_offset.append([x, y])
# top right
x, y = points[1]
x = x + offset
y = y - offset
points_offset.append([x, y])
# bottom left
x, y = points[2]
x = x - offset
y = y + offset
points_offset.append([x, y])
# bottom rightharris-laplace
x, y = points[3]
x = x + offset
y = y + offset
points_offset.append([x, y])
return points_offset
def create_text_bubble(points, frame, bubble_text_queue, bubble_text_bin):
# Height and width
H = frame.shape[0]
W = frame.shape[1]
# Find centroid of points
c_x = 0
c_y = 0
for p in points:
c_x += p[0]
c_y += p[1]
c_x = c_x//len(points)
c_y = c_y//len(points)
cv2.circle(frame, (c_x,c_y), 20, (255,0,0), thickness=-1, lineType=8, shift=0)
# Ellipse size
ellipse_vertical_offset = -140
ellipse_horizontal_offset = -70
ellipse_major_axis_size = 200
ellipse_minor_axis_size = 100
# Centroid offset
c_x += ellipse_horizontal_offset
c_y += ellipse_vertical_offset
# Adjust bounds (if needed)
if c_x - ellipse_major_axis_size < 0:
c_x = ellipse_major_axis_size
elif c_x + ellipse_major_axis_size > W:
c_x = W - ellipse_major_axis_size
if c_y - ellipse_minor_axis_size < 0:
c_y = ellipse_minor_axis_size
elif c_y + ellipse_minor_axis_size > H:
c_y = H - ellipse_minor_axis_size
# ###### MANUALLY OVERRIDE CENTROID LOCATION
# # i.e. no tracking, text stays in fixed location
# c_x = 400
# c_y = 700
# Create overlay
overlay = frame.copy()
# https://docs.opencv.org/4.1.2/d6/d6e/group__imgproc__draw.html
cv2.circle(overlay, (c_x, c_y), 20, (0, 0, 255), -1)
# Change speaker bubble color based on who is speaking/texting
speaker = bubble_text_queue[bubble_text_bin][0]
message = bubble_text_queue[bubble_text_bin][1]
bubble_color = (255, 255, 51)
if(speaker == "John"):
bubble_color = (100,0,255)
cv2.ellipse(overlay, (c_x, c_y), (ellipse_major_axis_size, ellipse_minor_axis_size), 0, 0, 360, bubble_color, -1)
cv2.ellipse(overlay, (c_x, c_y), (ellipse_major_axis_size, ellipse_minor_axis_size), 0, 0, 360, (0, 0, 255), 4)
# https://stackoverflow.com/questions/27647424/opencv-puttext-new-line-character
text = "{}:\n{}".format(speaker,message)
text_vertical_offset = int(-ellipse_minor_axis_size * .55)
text_horizontal_offset = int(-ellipse_major_axis_size * .6)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = .7
thickness = 1
textHeight = cv2.getTextSize(text,fontFace,fontScale,thickness)[0][1]
# For simulating newlines
dy = textHeight + 10
# Insert text
c_x += text_horizontal_offset
c_y += text_vertical_offset
for i, line in enumerate(text.split('\n')):
cv2.putText(overlay, line, (c_x, c_y + i * dy), fontFace, fontScale, thickness)
# alpha blend overlay with frame
alpha = 0.8
frame = alpha * overlay + (1-alpha) * frame
return frame
def create_warp_comosite(composite_image,curr_frame_points_offset,current_frame):
# Create point correspondences for perspective transformation
curr_frame_points_offset_array = np.array(curr_frame_points_offset).astype(np.float32)
input_image_boundary_points_array = np.array(
[(0, 0), (composite_image.shape[1], 0), (0, composite_image.shape[0]),
(composite_image.shape[1], composite_image.shape[0])], dtype=np.float32)
M = cv2.getPerspectiveTransform(input_image_boundary_points_array, curr_frame_points_offset_array)
maxWidth = current_frame.shape[1]
maxHeight = current_frame.shape[0]
# Warp composite image using perspective transformation matrix
warped = cv2.warpPerspective(composite_image, M, (maxWidth, maxHeight))
# use warped as mask to superimpose warped on current background
# frame
mask = (warped == [0, 0, 0]).all(-1)
assert (current_frame.shape == composite_image.shape)
current_frame_output_composite = np.where(mask[..., None], current_frame, warped)
return current_frame_output_composite
def main():
# Open and save video files using unique path id
scene = "bed_scene"
warp_flag = False
bubble_flag = True
# Used for text scene
#bubble_text_queue = [("Hayley","Evil interdimensional\nmonsters are attacking\ncampus"),("Hayley","Snevy needs us to\ndefeat their boss,\nThe GOLIATH"),("Hayley","So the monsters can\ngo back to their\nown dimension"),("John","I'm in! (For Snevy)\n"),("Hayley","Great! Okay, run\nto the VCC! Be careful\n...monsters around")]
bubble_text_queue = [("Snevy","A giant scary\nmonster is attacking!\nCan you help me\ndefeat it?"),("Snevy","Thank you!")]
###### TRACKING VIDEO
# Open video stream to input movie
tracking_video = "inputs/tracking_videos/{}.MOV".format(scene)
track_cap = cv2.VideoCapture(tracking_video)
start_frame = 0
# Get metadata from input movie
track_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
fps = track_cap.get(cv2.CAP_PROP_FPS)
frame_count = int(track_cap.get(cv2.CAP_PROP_FRAME_COUNT))
frame_width = int(track_cap.get(3))
frame_height = int(track_cap.get(4))
###### OUTPUT VIDEO
# Define the codec and create VideoWriter object
# to write a new movie to disk
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out_composite = cv2.VideoWriter('outputs/video_output_composite_{}.MOV'.format(scene), fourcc, fps,
(frame_width, frame_height))
out_tracking = cv2.VideoWriter('outputs/video_output_tracking_{}.MOV'.format(scene), fourcc, fps,
(frame_width, frame_height))
###### Composite Input
mode = Modes.MOVIE
# Choose to composite a video or image into the tracked planar object
composite_cap = None
composite_image = None
if mode == Modes.IMAGE:
composite_image = cv2.imread("inputs/composite_images/brick_wall.JPG")
elif mode == Modes.MOVIE:
| composite_cap = cv2.VideoCapture("inputs/composite_videos/space.mp4") | conditional_block |
|
upload.py | ext = os.path.splitext(os.path.basename(filename))
is_gz = False
if ext in {'.gz', '.gzip', '.bz', '.bz2', '.bzip'}:
is_gz = True
new_filename, ext = os.path.splitext(new_filename)
final_filename = new_filename + ext
if validate or is_gz:
final_filename = final_filename + '.gz'
return final_filename, file_size
def _wrap_files(filename, logger=None, validate=True):
"""
A little helper to wrap a sequencing file (or join and wrap R1/R2 pairs)
and return a merged file_object
"""
if isinstance(filename, tuple):
if not validate:
raise UploadException('Validation is required in order to auto-interleave files.')
file_obj = FASTXTranslator(open(filename[0], 'rb'), pair=open(filename[1], 'rb'),
progress_callback=logger)
else:
if validate:
file_obj = FASTXTranslator(open(filename, 'rb'), progress_callback=logger)
else:
file_obj = FASTXReader(open(filename, 'rb'), progress_callback=logger)
return file_obj
def upload(files, session, samples_resource, server_url, threads=DEFAULT_UPLOAD_THREADS,
validate=True, log_to=None, metadata=None, tags=None):
"""
Uploads several files to the One Codex server, auto-detecting sizes and using the appropriate
downstream upload functions. Also, wraps the files with a streaming validator to ensure they
work.
"""
if threads is None:
threads = 1
filenames = []
file_sizes = []
for file_path in files:
normalized_filename, file_size = _file_stats(file_path, validate=validate)
filenames.append(normalized_filename)
file_sizes.append(file_size)
# set up the logging
bar_length = 20
if log_to is not None:
log_to.write('Uploading: Preparing upload(s)... ')
log_to.flush()
overall_size = sum(file_sizes)
validated_sizes = {filename: 0 for filename in filenames}
transferred_sizes = {filename: 0 for filename in filenames}
# TODO: we should use click.progressbar?
def progress_bar_display(file_id, bytes_transferred, validation=False):
validation_in_progress = sum(validated_sizes.values()) != overall_size
if validation and validation_in_progress:
# Validating mode
prev_progress = sum(validated_sizes.values()) / overall_size
validated_sizes[file_id] = bytes_transferred
progress = sum(validated_sizes.values()) / overall_size
else:
# Uploading mode
prev_progress = sum(transferred_sizes.values()) / overall_size
transferred_sizes[file_id] = bytes_transferred
progress = sum(transferred_sizes.values()) / overall_size
if floor(100 * prev_progress) == floor(100 * progress):
return
block = int(round(bar_length * progress))
bar = '#' * block + '-' * (bar_length - block)
if validation and validation_in_progress:
log_to.write('\rValidating: [{}] {:.0f}% '.format(bar, progress * 100))
elif progress != 1:
log_to.write('\rUploading: [{}] {:.0f}% '.format(bar, progress * 100))
else:
log_to.write('\rUploading: Finalizing upload... ')
log_to.flush()
progress_bar = None if log_to is None else progress_bar_display
# first, upload all the smaller files in parallel (if multiple threads are requested)
uploading_uuids = []
if threads > 1:
import ctypes
thread_error = Value(ctypes.c_wchar_p, '')
semaphore = BoundedSemaphore(threads)
upload_threads = []
def threaded_upload(*args):
def _wrapped(*wrapped_args):
semaphore.acquire()
try:
file_uuid = upload_file(*wrapped_args[:-1]) | if file_uuid:
uploading_uuids.append(file_uuid)
except Exception as e:
# handle inside the thread to prevent the exception message from leaking out
wrapped_args[-1].value = '{}'.format(e)
raise SystemExit
semaphore.release()
# the thread error message must be the last parameter
thread = Thread(target=_wrapped, args=args + (thread_error, ))
thread.daemon = True
thread.start()
upload_threads.append(thread)
else:
threaded_upload = upload_file
upload_threads = []
uploading_files = []
for file_path, filename, file_size in zip(files, filenames, file_sizes):
if file_size < MULTIPART_SIZE:
file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)
file_uuid = threaded_upload(file_obj, filename, session, samples_resource, log_to,
metadata, tags)
if file_uuid:
uploading_uuids.append(file_uuid)
uploading_files.append(file_obj)
if threads > 1:
# we need to do this funky wait loop to ensure threads get killed by ctrl-c
while True:
for thread in upload_threads:
# hopefully no one has a <5Gb file that takes longer than a week to upload
thread.join(604800)
if all(not thread.is_alive() for thread in upload_threads):
break
if thread_error.value != '':
raise UploadException(thread_error.value)
# lastly, upload all the very big files sequentially
for file_path, filename, file_size in zip(files, filenames, file_sizes):
if file_size >= MULTIPART_SIZE:
file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)
upload_large_file(file_obj, filename, session, samples_resource, server_url,
threads=threads, log_to=log_to)
file_obj.close()
if log_to is not None:
log_to.write('\rUploading: All complete.' + (bar_length - 3) * ' ' + '\n')
log_to.flush()
return uploading_uuids
def upload_large_file(file_obj, filename, session, samples_resource, server_url, threads=10,
log_to=None):
"""
Uploads a file to the One Codex server via an intermediate S3 bucket (and handles files >5Gb)
"""
import boto3
from boto3.s3.transfer import TransferConfig
from boto3.exceptions import S3UploadFailedError
# first check with the one codex server to get upload parameters
try:
upload_params = samples_resource.init_multipart_upload()
except requests.exceptions.HTTPError:
raise UploadException('Could not initiate upload with One Codex server')
callback_url = server_url.rstrip('/') + upload_params['callback_url']
access_key = upload_params['upload_aws_access_key_id']
secret_key = upload_params['upload_aws_secret_access_key']
# actually do the upload
client = boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key)
# TODO: this automatically uses 10 threads, but we'd probably like it to be configurable
config = TransferConfig(max_concurrency=threads)
try:
client.upload_fileobj(file_obj, upload_params['s3_bucket'], upload_params['file_id'],
ExtraArgs={'ServerSideEncryption': 'AES256'}, Config=config)
except S3UploadFailedError:
raise UploadException("Upload of %s has failed. Please contact [email protected] "
"if you experience further issues" % filename)
# return completed status to the one codex server
s3_path = 's3://{}/{}'.format(upload_params['s3_bucket'], upload_params['file_id'])
req = session.post(callback_url, json={'s3_path': s3_path, 'filename': filename})
if req.status_code != 200:
raise UploadException("Upload confirmation of %s has failed. Please contact "
"[email protected] if you experience further issues" % filename)
if log_to is not None:
log_to.write('\rUploading: {} finished.\n'.format(filename))
log_to.flush()
def upload_file(file_obj, filename, session, samples_resource, log_to, metadata, tags):
"""
Uploads a file to the One Codex server directly to the users S3 bucket by self-signing
"""
upload_args = {
'filename': filename,
'size': 1, # because we don't have the actually uploaded size yet b/c we're gziping it
'upload_type': 'standard' # This is multipart form data
}
if metadata:
upload_args['metadata'] = metadata
if tags:
upload_args['tags'] = tags
try:
upload_info = samples_resource.init_upload(upload_args)
except requests.exceptions.HTTPError as e:
error_object = e[0]
process_api_error(error_object)
upload_url = upload_info['upload_url']
# Need a OrderedDict to preserve order for S3 (although this doesn't actually matter?)
multipart_fields = OrderedDict()
for k, v in upload_info['additional_fields'].items():
multipart_fields[str(k)] = str(v)
# First validate the file if a FASTXTranslator
if isinstance(file_obj, FASTXTranslator):
file_obj.validate()
# If it isn't being modified and is already compressed, don't bother re-parsing it
if not file_obj.modified and file_obj.is | random_line_split |
|
upload.py | = os.path.splitext(os.path.basename(filename))
is_gz = False
if ext in {'.gz', '.gzip', '.bz', '.bz2', '.bzip'}:
is_gz = True
new_filename, ext = os.path.splitext(new_filename)
final_filename = new_filename + ext
if validate or is_gz:
final_filename = final_filename + '.gz'
return final_filename, file_size
def _wrap_files(filename, logger=None, validate=True):
"""
A little helper to wrap a sequencing file (or join and wrap R1/R2 pairs)
and return a merged file_object
"""
if isinstance(filename, tuple):
if not validate:
raise UploadException('Validation is required in order to auto-interleave files.')
file_obj = FASTXTranslator(open(filename[0], 'rb'), pair=open(filename[1], 'rb'),
progress_callback=logger)
else:
if validate:
file_obj = FASTXTranslator(open(filename, 'rb'), progress_callback=logger)
else:
file_obj = FASTXReader(open(filename, 'rb'), progress_callback=logger)
return file_obj
def upload(files, session, samples_resource, server_url, threads=DEFAULT_UPLOAD_THREADS,
validate=True, log_to=None, metadata=None, tags=None):
"""
Uploads several files to the One Codex server, auto-detecting sizes and using the appropriate
downstream upload functions. Also, wraps the files with a streaming validator to ensure they
work.
"""
if threads is None:
threads = 1
filenames = []
file_sizes = []
for file_path in files:
normalized_filename, file_size = _file_stats(file_path, validate=validate)
filenames.append(normalized_filename)
file_sizes.append(file_size)
# set up the logging
bar_length = 20
if log_to is not None:
log_to.write('Uploading: Preparing upload(s)... ')
log_to.flush()
overall_size = sum(file_sizes)
validated_sizes = {filename: 0 for filename in filenames}
transferred_sizes = {filename: 0 for filename in filenames}
# TODO: we should use click.progressbar?
def progress_bar_display(file_id, bytes_transferred, validation=False):
validation_in_progress = sum(validated_sizes.values()) != overall_size
if validation and validation_in_progress:
# Validating mode
prev_progress = sum(validated_sizes.values()) / overall_size
validated_sizes[file_id] = bytes_transferred
progress = sum(validated_sizes.values()) / overall_size
else:
# Uploading mode
prev_progress = sum(transferred_sizes.values()) / overall_size
transferred_sizes[file_id] = bytes_transferred
progress = sum(transferred_sizes.values()) / overall_size
if floor(100 * prev_progress) == floor(100 * progress):
return
block = int(round(bar_length * progress))
bar = '#' * block + '-' * (bar_length - block)
if validation and validation_in_progress:
log_to.write('\rValidating: [{}] {:.0f}% '.format(bar, progress * 100))
elif progress != 1:
log_to.write('\rUploading: [{}] {:.0f}% '.format(bar, progress * 100))
else:
log_to.write('\rUploading: Finalizing upload... ')
log_to.flush()
progress_bar = None if log_to is None else progress_bar_display
# first, upload all the smaller files in parallel (if multiple threads are requested)
uploading_uuids = []
if threads > 1:
import ctypes
thread_error = Value(ctypes.c_wchar_p, '')
semaphore = BoundedSemaphore(threads)
upload_threads = []
def threaded_upload(*args):
def _wrapped(*wrapped_args):
semaphore.acquire()
try:
file_uuid = upload_file(*wrapped_args[:-1])
if file_uuid:
|
except Exception as e:
# handle inside the thread to prevent the exception message from leaking out
wrapped_args[-1].value = '{}'.format(e)
raise SystemExit
semaphore.release()
# the thread error message must be the last parameter
thread = Thread(target=_wrapped, args=args + (thread_error, ))
thread.daemon = True
thread.start()
upload_threads.append(thread)
else:
threaded_upload = upload_file
upload_threads = []
uploading_files = []
for file_path, filename, file_size in zip(files, filenames, file_sizes):
if file_size < MULTIPART_SIZE:
file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)
file_uuid = threaded_upload(file_obj, filename, session, samples_resource, log_to,
metadata, tags)
if file_uuid:
uploading_uuids.append(file_uuid)
uploading_files.append(file_obj)
if threads > 1:
# we need to do this funky wait loop to ensure threads get killed by ctrl-c
while True:
for thread in upload_threads:
# hopefully no one has a <5Gb file that takes longer than a week to upload
thread.join(604800)
if all(not thread.is_alive() for thread in upload_threads):
break
if thread_error.value != '':
raise UploadException(thread_error.value)
# lastly, upload all the very big files sequentially
for file_path, filename, file_size in zip(files, filenames, file_sizes):
if file_size >= MULTIPART_SIZE:
file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)
upload_large_file(file_obj, filename, session, samples_resource, server_url,
threads=threads, log_to=log_to)
file_obj.close()
if log_to is not None:
log_to.write('\rUploading: All complete.' + (bar_length - 3) * ' ' + '\n')
log_to.flush()
return uploading_uuids
def upload_large_file(file_obj, filename, session, samples_resource, server_url, threads=10,
log_to=None):
"""
Uploads a file to the One Codex server via an intermediate S3 bucket (and handles files >5Gb)
"""
import boto3
from boto3.s3.transfer import TransferConfig
from boto3.exceptions import S3UploadFailedError
# first check with the one codex server to get upload parameters
try:
upload_params = samples_resource.init_multipart_upload()
except requests.exceptions.HTTPError:
raise UploadException('Could not initiate upload with One Codex server')
callback_url = server_url.rstrip('/') + upload_params['callback_url']
access_key = upload_params['upload_aws_access_key_id']
secret_key = upload_params['upload_aws_secret_access_key']
# actually do the upload
client = boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key)
# TODO: this automatically uses 10 threads, but we'd probably like it to be configurable
config = TransferConfig(max_concurrency=threads)
try:
client.upload_fileobj(file_obj, upload_params['s3_bucket'], upload_params['file_id'],
ExtraArgs={'ServerSideEncryption': 'AES256'}, Config=config)
except S3UploadFailedError:
raise UploadException("Upload of %s has failed. Please contact [email protected] "
"if you experience further issues" % filename)
# return completed status to the one codex server
s3_path = 's3://{}/{}'.format(upload_params['s3_bucket'], upload_params['file_id'])
req = session.post(callback_url, json={'s3_path': s3_path, 'filename': filename})
if req.status_code != 200:
raise UploadException("Upload confirmation of %s has failed. Please contact "
"[email protected] if you experience further issues" % filename)
if log_to is not None:
log_to.write('\rUploading: {} finished.\n'.format(filename))
log_to.flush()
def upload_file(file_obj, filename, session, samples_resource, log_to, metadata, tags):
"""
Uploads a file to the One Codex server directly to the users S3 bucket by self-signing
"""
upload_args = {
'filename': filename,
'size': 1, # because we don't have the actually uploaded size yet b/c we're gziping it
'upload_type': 'standard' # This is multipart form data
}
if metadata:
upload_args['metadata'] = metadata
if tags:
upload_args['tags'] = tags
try:
upload_info = samples_resource.init_upload(upload_args)
except requests.exceptions.HTTPError as e:
error_object = e[0]
process_api_error(error_object)
upload_url = upload_info['upload_url']
# Need a OrderedDict to preserve order for S3 (although this doesn't actually matter?)
multipart_fields = OrderedDict()
for k, v in upload_info['additional_fields'].items():
multipart_fields[str(k)] = str(v)
# First validate the file if a FASTXTranslator
if isinstance(file_obj, FASTXTranslator):
file_obj.validate()
# If it isn't being modified and is already compressed, don't bother re-parsing it
if not file_obj.modified and file_obj | uploading_uuids.append(file_uuid) | conditional_block |
upload.py | ext = os.path.splitext(os.path.basename(filename))
is_gz = False
if ext in {'.gz', '.gzip', '.bz', '.bz2', '.bzip'}:
is_gz = True
new_filename, ext = os.path.splitext(new_filename)
final_filename = new_filename + ext
if validate or is_gz:
final_filename = final_filename + '.gz'
return final_filename, file_size
def _wrap_files(filename, logger=None, validate=True):
"""
A little helper to wrap a sequencing file (or join and wrap R1/R2 pairs)
and return a merged file_object
"""
if isinstance(filename, tuple):
if not validate:
raise UploadException('Validation is required in order to auto-interleave files.')
file_obj = FASTXTranslator(open(filename[0], 'rb'), pair=open(filename[1], 'rb'),
progress_callback=logger)
else:
if validate:
file_obj = FASTXTranslator(open(filename, 'rb'), progress_callback=logger)
else:
file_obj = FASTXReader(open(filename, 'rb'), progress_callback=logger)
return file_obj
def upload(files, session, samples_resource, server_url, threads=DEFAULT_UPLOAD_THREADS,
validate=True, log_to=None, metadata=None, tags=None):
"""
Uploads several files to the One Codex server, auto-detecting sizes and using the appropriate
downstream upload functions. Also, wraps the files with a streaming validator to ensure they
work.
"""
if threads is None:
threads = 1
filenames = []
file_sizes = []
for file_path in files:
normalized_filename, file_size = _file_stats(file_path, validate=validate)
filenames.append(normalized_filename)
file_sizes.append(file_size)
# set up the logging
bar_length = 20
if log_to is not None:
log_to.write('Uploading: Preparing upload(s)... ')
log_to.flush()
overall_size = sum(file_sizes)
validated_sizes = {filename: 0 for filename in filenames}
transferred_sizes = {filename: 0 for filename in filenames}
# TODO: we should use click.progressbar?
def progress_bar_display(file_id, bytes_transferred, validation=False):
validation_in_progress = sum(validated_sizes.values()) != overall_size
if validation and validation_in_progress:
# Validating mode
prev_progress = sum(validated_sizes.values()) / overall_size
validated_sizes[file_id] = bytes_transferred
progress = sum(validated_sizes.values()) / overall_size
else:
# Uploading mode
prev_progress = sum(transferred_sizes.values()) / overall_size
transferred_sizes[file_id] = bytes_transferred
progress = sum(transferred_sizes.values()) / overall_size
if floor(100 * prev_progress) == floor(100 * progress):
return
block = int(round(bar_length * progress))
bar = '#' * block + '-' * (bar_length - block)
if validation and validation_in_progress:
log_to.write('\rValidating: [{}] {:.0f}% '.format(bar, progress * 100))
elif progress != 1:
log_to.write('\rUploading: [{}] {:.0f}% '.format(bar, progress * 100))
else:
log_to.write('\rUploading: Finalizing upload... ')
log_to.flush()
progress_bar = None if log_to is None else progress_bar_display
# first, upload all the smaller files in parallel (if multiple threads are requested)
uploading_uuids = []
if threads > 1:
import ctypes
thread_error = Value(ctypes.c_wchar_p, '')
semaphore = BoundedSemaphore(threads)
upload_threads = []
def threaded_upload(*args):
def _wrapped(*wrapped_args):
semaphore.acquire()
try:
file_uuid = upload_file(*wrapped_args[:-1])
if file_uuid:
uploading_uuids.append(file_uuid)
except Exception as e:
# handle inside the thread to prevent the exception message from leaking out
wrapped_args[-1].value = '{}'.format(e)
raise SystemExit
semaphore.release()
# the thread error message must be the last parameter
thread = Thread(target=_wrapped, args=args + (thread_error, ))
thread.daemon = True
thread.start()
upload_threads.append(thread)
else:
threaded_upload = upload_file
upload_threads = []
uploading_files = []
for file_path, filename, file_size in zip(files, filenames, file_sizes):
if file_size < MULTIPART_SIZE:
file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)
file_uuid = threaded_upload(file_obj, filename, session, samples_resource, log_to,
metadata, tags)
if file_uuid:
uploading_uuids.append(file_uuid)
uploading_files.append(file_obj)
if threads > 1:
# we need to do this funky wait loop to ensure threads get killed by ctrl-c
while True:
for thread in upload_threads:
# hopefully no one has a <5Gb file that takes longer than a week to upload
thread.join(604800)
if all(not thread.is_alive() for thread in upload_threads):
break
if thread_error.value != '':
raise UploadException(thread_error.value)
# lastly, upload all the very big files sequentially
for file_path, filename, file_size in zip(files, filenames, file_sizes):
if file_size >= MULTIPART_SIZE:
file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)
upload_large_file(file_obj, filename, session, samples_resource, server_url,
threads=threads, log_to=log_to)
file_obj.close()
if log_to is not None:
log_to.write('\rUploading: All complete.' + (bar_length - 3) * ' ' + '\n')
log_to.flush()
return uploading_uuids
def upload_large_file(file_obj, filename, session, samples_resource, server_url, threads=10,
log_to=None):
| config = TransferConfig(max_concurrency=threads)
try:
client.upload_fileobj(file_obj, upload_params['s3_bucket'], upload_params['file_id'],
ExtraArgs={'ServerSideEncryption': 'AES256'}, Config=config)
except S3UploadFailedError:
raise UploadException("Upload of %s has failed. Please contact [email protected] "
"if you experience further issues" % filename)
# return completed status to the one codex server
s3_path = 's3://{}/{}'.format(upload_params['s3_bucket'], upload_params['file_id'])
req = session.post(callback_url, json={'s3_path': s3_path, 'filename': filename})
if req.status_code != 200:
raise UploadException("Upload confirmation of %s has failed. Please contact "
"[email protected] if you experience further issues" % filename)
if log_to is not None:
log_to.write('\rUploading: {} finished.\n'.format(filename))
log_to.flush()
def upload_file(file_obj, filename, session, samples_resource, log_to, metadata, tags):
"""
Uploads a file to the One Codex server directly to the users S3 bucket by self-signing
"""
upload_args = {
'filename': filename,
'size': 1, # because we don't have the actually uploaded size yet b/c we're gziping it
'upload_type': 'standard' # This is multipart form data
}
if metadata:
upload_args['metadata'] = metadata
if tags:
upload_args['tags'] = tags
try:
upload_info = samples_resource.init_upload(upload_args)
except requests.exceptions.HTTPError as e:
error_object = e[0]
process_api_error(error_object)
upload_url = upload_info['upload_url']
# Need a OrderedDict to preserve order for S3 (although this doesn't actually matter?)
multipart_fields = OrderedDict()
for k, v in upload_info['additional_fields'].items():
multipart_fields[str(k)] = str(v)
# First validate the file if a FASTXTranslator
if isinstance(file_obj, FASTXTranslator):
file_obj.validate()
# If it isn't being modified and is already compressed, don't bother re-parsing it
if not file_obj.modified and file_obj.is_g | """
Uploads a file to the One Codex server via an intermediate S3 bucket (and handles files >5Gb)
"""
import boto3
from boto3.s3.transfer import TransferConfig
from boto3.exceptions import S3UploadFailedError
# first check with the one codex server to get upload parameters
try:
upload_params = samples_resource.init_multipart_upload()
except requests.exceptions.HTTPError:
raise UploadException('Could not initiate upload with One Codex server')
callback_url = server_url.rstrip('/') + upload_params['callback_url']
access_key = upload_params['upload_aws_access_key_id']
secret_key = upload_params['upload_aws_secret_access_key']
# actually do the upload
client = boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key)
# TODO: this automatically uses 10 threads, but we'd probably like it to be configurable | identifier_body |
upload.py | ext = os.path.splitext(os.path.basename(filename))
is_gz = False
if ext in {'.gz', '.gzip', '.bz', '.bz2', '.bzip'}:
is_gz = True
new_filename, ext = os.path.splitext(new_filename)
final_filename = new_filename + ext
if validate or is_gz:
final_filename = final_filename + '.gz'
return final_filename, file_size
def | (filename, logger=None, validate=True):
"""
A little helper to wrap a sequencing file (or join and wrap R1/R2 pairs)
and return a merged file_object
"""
if isinstance(filename, tuple):
if not validate:
raise UploadException('Validation is required in order to auto-interleave files.')
file_obj = FASTXTranslator(open(filename[0], 'rb'), pair=open(filename[1], 'rb'),
progress_callback=logger)
else:
if validate:
file_obj = FASTXTranslator(open(filename, 'rb'), progress_callback=logger)
else:
file_obj = FASTXReader(open(filename, 'rb'), progress_callback=logger)
return file_obj
def upload(files, session, samples_resource, server_url, threads=DEFAULT_UPLOAD_THREADS,
validate=True, log_to=None, metadata=None, tags=None):
"""
Uploads several files to the One Codex server, auto-detecting sizes and using the appropriate
downstream upload functions. Also, wraps the files with a streaming validator to ensure they
work.
"""
if threads is None:
threads = 1
filenames = []
file_sizes = []
for file_path in files:
normalized_filename, file_size = _file_stats(file_path, validate=validate)
filenames.append(normalized_filename)
file_sizes.append(file_size)
# set up the logging
bar_length = 20
if log_to is not None:
log_to.write('Uploading: Preparing upload(s)... ')
log_to.flush()
overall_size = sum(file_sizes)
validated_sizes = {filename: 0 for filename in filenames}
transferred_sizes = {filename: 0 for filename in filenames}
# TODO: we should use click.progressbar?
def progress_bar_display(file_id, bytes_transferred, validation=False):
validation_in_progress = sum(validated_sizes.values()) != overall_size
if validation and validation_in_progress:
# Validating mode
prev_progress = sum(validated_sizes.values()) / overall_size
validated_sizes[file_id] = bytes_transferred
progress = sum(validated_sizes.values()) / overall_size
else:
# Uploading mode
prev_progress = sum(transferred_sizes.values()) / overall_size
transferred_sizes[file_id] = bytes_transferred
progress = sum(transferred_sizes.values()) / overall_size
if floor(100 * prev_progress) == floor(100 * progress):
return
block = int(round(bar_length * progress))
bar = '#' * block + '-' * (bar_length - block)
if validation and validation_in_progress:
log_to.write('\rValidating: [{}] {:.0f}% '.format(bar, progress * 100))
elif progress != 1:
log_to.write('\rUploading: [{}] {:.0f}% '.format(bar, progress * 100))
else:
log_to.write('\rUploading: Finalizing upload... ')
log_to.flush()
progress_bar = None if log_to is None else progress_bar_display
# first, upload all the smaller files in parallel (if multiple threads are requested)
uploading_uuids = []
if threads > 1:
import ctypes
thread_error = Value(ctypes.c_wchar_p, '')
semaphore = BoundedSemaphore(threads)
upload_threads = []
def threaded_upload(*args):
def _wrapped(*wrapped_args):
semaphore.acquire()
try:
file_uuid = upload_file(*wrapped_args[:-1])
if file_uuid:
uploading_uuids.append(file_uuid)
except Exception as e:
# handle inside the thread to prevent the exception message from leaking out
wrapped_args[-1].value = '{}'.format(e)
raise SystemExit
semaphore.release()
# the thread error message must be the last parameter
thread = Thread(target=_wrapped, args=args + (thread_error, ))
thread.daemon = True
thread.start()
upload_threads.append(thread)
else:
threaded_upload = upload_file
upload_threads = []
uploading_files = []
for file_path, filename, file_size in zip(files, filenames, file_sizes):
if file_size < MULTIPART_SIZE:
file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)
file_uuid = threaded_upload(file_obj, filename, session, samples_resource, log_to,
metadata, tags)
if file_uuid:
uploading_uuids.append(file_uuid)
uploading_files.append(file_obj)
if threads > 1:
# we need to do this funky wait loop to ensure threads get killed by ctrl-c
while True:
for thread in upload_threads:
# hopefully no one has a <5Gb file that takes longer than a week to upload
thread.join(604800)
if all(not thread.is_alive() for thread in upload_threads):
break
if thread_error.value != '':
raise UploadException(thread_error.value)
# lastly, upload all the very big files sequentially
for file_path, filename, file_size in zip(files, filenames, file_sizes):
if file_size >= MULTIPART_SIZE:
file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)
upload_large_file(file_obj, filename, session, samples_resource, server_url,
threads=threads, log_to=log_to)
file_obj.close()
if log_to is not None:
log_to.write('\rUploading: All complete.' + (bar_length - 3) * ' ' + '\n')
log_to.flush()
return uploading_uuids
def upload_large_file(file_obj, filename, session, samples_resource, server_url, threads=10,
log_to=None):
"""
Uploads a file to the One Codex server via an intermediate S3 bucket (and handles files >5Gb)
"""
import boto3
from boto3.s3.transfer import TransferConfig
from boto3.exceptions import S3UploadFailedError
# first check with the one codex server to get upload parameters
try:
upload_params = samples_resource.init_multipart_upload()
except requests.exceptions.HTTPError:
raise UploadException('Could not initiate upload with One Codex server')
callback_url = server_url.rstrip('/') + upload_params['callback_url']
access_key = upload_params['upload_aws_access_key_id']
secret_key = upload_params['upload_aws_secret_access_key']
# actually do the upload
client = boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key)
# TODO: this automatically uses 10 threads, but we'd probably like it to be configurable
config = TransferConfig(max_concurrency=threads)
try:
client.upload_fileobj(file_obj, upload_params['s3_bucket'], upload_params['file_id'],
ExtraArgs={'ServerSideEncryption': 'AES256'}, Config=config)
except S3UploadFailedError:
raise UploadException("Upload of %s has failed. Please contact [email protected] "
"if you experience further issues" % filename)
# return completed status to the one codex server
s3_path = 's3://{}/{}'.format(upload_params['s3_bucket'], upload_params['file_id'])
req = session.post(callback_url, json={'s3_path': s3_path, 'filename': filename})
if req.status_code != 200:
raise UploadException("Upload confirmation of %s has failed. Please contact "
"[email protected] if you experience further issues" % filename)
if log_to is not None:
log_to.write('\rUploading: {} finished.\n'.format(filename))
log_to.flush()
def upload_file(file_obj, filename, session, samples_resource, log_to, metadata, tags):
"""
Uploads a file to the One Codex server directly to the users S3 bucket by self-signing
"""
upload_args = {
'filename': filename,
'size': 1, # because we don't have the actually uploaded size yet b/c we're gziping it
'upload_type': 'standard' # This is multipart form data
}
if metadata:
upload_args['metadata'] = metadata
if tags:
upload_args['tags'] = tags
try:
upload_info = samples_resource.init_upload(upload_args)
except requests.exceptions.HTTPError as e:
error_object = e[0]
process_api_error(error_object)
upload_url = upload_info['upload_url']
# Need a OrderedDict to preserve order for S3 (although this doesn't actually matter?)
multipart_fields = OrderedDict()
for k, v in upload_info['additional_fields'].items():
multipart_fields[str(k)] = str(v)
# First validate the file if a FASTXTranslator
if isinstance(file_obj, FASTXTranslator):
file_obj.validate()
# If it isn't being modified and is already compressed, don't bother re-parsing it
if not file_obj.modified and file_obj.is | _wrap_files | identifier_name |
event_queue.rs | queue
///
/// This handle gives you access to methods on an event queue
/// that are safe to do from within a callback.
///
/// They are also available on an `EventQueue` object via `Deref`.
pub struct EventQueueHandle {
state: State,
wlevq: Option<*mut wl_event_queue>,
}
impl EventQueueHandle {
/// Register a proxy to this event queue.
///
/// You are required to provide a valid implementation for this proxy
/// as well as some associated implementation data. This implementation
/// is expected to be a struct holding the various relevant
/// function pointers.
///
/// This implementation data can typically contain indexes to state value
/// that the implementation will need to work on.
///
/// This overwrites any precedently set implementation for this proxy.
///
/// Returns appropriately and does nothing if this proxy is dead or already managed by
/// something else than this library.
pub fn register<P, ID>(&mut self, proxy: &P, implementation: P::Implementation, idata: ID)
-> RegisterStatus
where
P: Proxy + Implementable<ID>,
ID: 'static,
{
match proxy.status() {
::Liveness::Dead => return RegisterStatus::Dead,
::Liveness::Unmanaged => return RegisterStatus::Unmanaged,
::Liveness::Alive => { /* ok, we can continue */ }
}
unsafe {
let data: *mut ProxyUserData =
ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_proxy_get_user_data, proxy.ptr()) as *mut _;
// This cast from *const to *mut is legit because we enforce that a proxy
// can only be assigned to a single EventQueue.
// (this is actually the whole point of the design of this lib)
(&mut *data).0 = self as *const _ as *mut _;
(&mut *data).1 = Some(Box::new((implementation, idata)) as Box<Any>);
// even if this call fails, we updated the user_data, so the new implementation is in place.
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_proxy_add_dispatcher,
proxy.ptr(),
dispatch_func::<P, ID>,
&RUST_MANAGED as *const _ as *const _,
data as *mut c_void
);
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_proxy_set_queue,
proxy.ptr(),
match self.wlevq {
Some(ptr) => ptr,
None => ::std::ptr::null_mut(),
}
);
}
RegisterStatus::Registered
}
/// Get a handle to the internal state
///
/// The returned guard object allows you to get references
/// to the handler objects you previously inserted in this
/// event queue.
pub fn state(&mut self) -> &mut State {
&mut self.state
}
}
/// An event queue managing wayland events
///
/// Each wayland object can receive events from the server. To handle these events
/// you must associate to these objects an implementation: a struct defined in their
/// respective module, in which you provide a set of functions that will handle each event.
///
/// Your implementation can also access a shared state managed by the event queue. See
/// the `State` struct and the `state()` method on `EventQueueHandle`. If you need this,
/// the way to do it is:
///
/// - insert your state value in the event queue state store, your are then provided with a
/// token to access it
/// - provide this token (you can clone it) as implementation data to any wayland object
/// that need to access this state in its event callbacks.
///
/// The event queues also provides you control on the flow of the program, via the `dispatch()` and
/// `dispatch_pending()` methods.
pub struct EventQueue {
handle: Box<EventQueueHandle>,
display: *mut wl_display,
}
impl EventQueue {
/// Dispatches events from the internal buffer.
///
/// Dispatches all events to their appropriate handlers.
/// If not events were in the internal buffer, will block until
/// some events are read and dispatch them.
/// This process can insert events in the internal buffers of
/// other event queues.
///
/// If an error is returned, your connection with the wayland
/// compositor is probably lost.
pub fn dispatch(&mut self) -> IoResult<u32> {
let ret = match self.handle.wlevq {
Some(evq) => unsafe {
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_dispatch_queue,
self.display,
evq
)
},
None => unsafe { ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_dispatch, self.display) },
};
if ret >= 0 {
Ok(ret as u32)
} else {
Err(IoError::last_os_error())
}
}
/// Dispatches pending events from the internal buffer.
///
/// Dispatches all events to their appropriate handlers.
/// Never blocks, if not events were pending, simply returns
/// `Ok(0)`.
///
/// If an error is returned, your connection with the wayland
/// compositor is probably lost.
pub fn dispatch_pending(&mut self) -> IoResult<u32> {
let ret = match self.handle.wlevq {
Some(evq) => unsafe {
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_dispatch_queue_pending,
self.display,
evq
)
},
None => unsafe {
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_dispatch_pending,
self.display
)
},
};
if ret >= 0 {
Ok(ret as u32)
} else {
Err(IoError::last_os_error())
}
}
/// Synchronous roundtrip
///
/// This call will cause a synchonous roundtrip with the wayland server. It will block until all
/// pending requests of this queue are sent to the server and it has processed all of them and
/// send the appropriate events.
///
/// Handlers are called as a consequence.
///
/// On success returns the number of dispatched events.
pub fn sync_roundtrip(&mut self) -> IoResult<i32> {
let ret = unsafe {
match self.handle.wlevq {
Some(evtq) => ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_roundtrip_queue,
self.display,
evtq
),
None => ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_roundtrip, self.display),
}
};
if ret >= 0 {
Ok(ret)
} else {
Err(IoError::last_os_error())
}
}
/// Prepare an conccurent read
///
/// Will declare your intention to read events from the server socket.
///
/// Will return `None` if there are still some events awaiting dispatch on this EventIterator.
/// In this case, you need to call `dispatch_pending()` before calling this method again.
///
/// As long as the returned guard is in scope, no events can be dispatched to any event iterator.
///
/// The guard can then be destroyed by two means:
///
/// - Calling its `cancel()` method (or letting it go out of scope): the read intention will
/// be cancelled
/// - Calling its `read_events()` method: will block until all existing guards are destroyed
/// by one of these methods, then events will be read and all blocked `read_events()` calls
/// will return.
///
/// This call will otherwise not block on the server socket if it is empty, and return
/// an io error `WouldBlock` in such cases.
pub fn prepare_read(&self) -> Option<ReadEventsGuard> {
let ret = unsafe { | Some(evtq) => ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_prepare_read_queue,
self.display,
evtq
),
None => ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_prepare_read, self.display),
}
};
if ret >= 0 {
Some(ReadEventsGuard {
display: self.display,
})
} else {
None
}
}
}
impl Deref for EventQueue {
type Target = EventQueueHandle;
fn deref(&self) -> &EventQueueHandle {
&*self.handle
}
}
impl DerefMut for EventQueue {
fn deref_mut(&mut self) -> &mut EventQueueHandle {
&mut *self.handle
}
}
/// A guard over a read intention.
///
/// See `EventQueue::prepare_read()` for details about its use.
pub struct ReadEventsGuard {
display: *mut wl_display,
}
impl ReadEventsGuard {
/// Read events
///
/// Reads events from the server socket. If other `ReadEventsGuard` exists, will block
/// until they are all consumed or destroyed.
pub fn read_events(self) -> IoResult<i32> {
let ret = unsafe { ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_read_events, self.display) };
// Don't run destructor that would cancel the read intent
::std::mem::forget(self);
if ret >= 0 {
Ok(ret)
} else {
Err(IoError::last_os_error())
}
}
/// | match self.handle.wlevq { | random_line_split |
event_queue.rs | queue
///
/// This handle gives you access to methods on an event queue
/// that are safe to do from within a callback.
///
/// They are also available on an `EventQueue` object via `Deref`.
pub struct EventQueueHandle {
state: State,
wlevq: Option<*mut wl_event_queue>,
}
impl EventQueueHandle {
/// Register a proxy to this event queue.
///
/// You are required to provide a valid implementation for this proxy
/// as well as some associated implementation data. This implementation
/// is expected to be a struct holding the various relevant
/// function pointers.
///
/// This implementation data can typically contain indexes to state value
/// that the implementation will need to work on.
///
/// This overwrites any precedently set implementation for this proxy.
///
/// Returns appropriately and does nothing if this proxy is dead or already managed by
/// something else than this library.
pub fn register<P, ID>(&mut self, proxy: &P, implementation: P::Implementation, idata: ID)
-> RegisterStatus
where
P: Proxy + Implementable<ID>,
ID: 'static,
{
match proxy.status() {
::Liveness::Dead => return RegisterStatus::Dead,
::Liveness::Unmanaged => return RegisterStatus::Unmanaged,
::Liveness::Alive => { /* ok, we can continue */ }
}
unsafe {
let data: *mut ProxyUserData =
ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_proxy_get_user_data, proxy.ptr()) as *mut _;
// This cast from *const to *mut is legit because we enforce that a proxy
// can only be assigned to a single EventQueue.
// (this is actually the whole point of the design of this lib)
(&mut *data).0 = self as *const _ as *mut _;
(&mut *data).1 = Some(Box::new((implementation, idata)) as Box<Any>);
// even if this call fails, we updated the user_data, so the new implementation is in place.
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_proxy_add_dispatcher,
proxy.ptr(),
dispatch_func::<P, ID>,
&RUST_MANAGED as *const _ as *const _,
data as *mut c_void
);
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_proxy_set_queue,
proxy.ptr(),
match self.wlevq {
Some(ptr) => ptr,
None => ::std::ptr::null_mut(),
}
);
}
RegisterStatus::Registered
}
/// Get a handle to the internal state
///
/// The returned guard object allows you to get references
/// to the handler objects you previously inserted in this
/// event queue.
pub fn state(&mut self) -> &mut State |
}
/// An event queue managing wayland events
///
/// Each wayland object can receive events from the server. To handle these events
/// you must associate to these objects an implementation: a struct defined in their
/// respective module, in which you provide a set of functions that will handle each event.
///
/// Your implementation can also access a shared state managed by the event queue. See
/// the `State` struct and the `state()` method on `EventQueueHandle`. If you need this,
/// the way to do it is:
///
/// - insert your state value in the event queue state store, your are then provided with a
/// token to access it
/// - provide this token (you can clone it) as implementation data to any wayland object
/// that need to access this state in its event callbacks.
///
/// The event queues also provides you control on the flow of the program, via the `dispatch()` and
/// `dispatch_pending()` methods.
pub struct EventQueue {
handle: Box<EventQueueHandle>,
display: *mut wl_display,
}
impl EventQueue {
/// Dispatches events from the internal buffer.
///
/// Dispatches all events to their appropriate handlers.
/// If not events were in the internal buffer, will block until
/// some events are read and dispatch them.
/// This process can insert events in the internal buffers of
/// other event queues.
///
/// If an error is returned, your connection with the wayland
/// compositor is probably lost.
pub fn dispatch(&mut self) -> IoResult<u32> {
let ret = match self.handle.wlevq {
Some(evq) => unsafe {
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_dispatch_queue,
self.display,
evq
)
},
None => unsafe { ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_dispatch, self.display) },
};
if ret >= 0 {
Ok(ret as u32)
} else {
Err(IoError::last_os_error())
}
}
/// Dispatches pending events from the internal buffer.
///
/// Dispatches all events to their appropriate handlers.
/// Never blocks, if not events were pending, simply returns
/// `Ok(0)`.
///
/// If an error is returned, your connection with the wayland
/// compositor is probably lost.
pub fn dispatch_pending(&mut self) -> IoResult<u32> {
let ret = match self.handle.wlevq {
Some(evq) => unsafe {
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_dispatch_queue_pending,
self.display,
evq
)
},
None => unsafe {
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_dispatch_pending,
self.display
)
},
};
if ret >= 0 {
Ok(ret as u32)
} else {
Err(IoError::last_os_error())
}
}
/// Synchronous roundtrip
///
/// This call will cause a synchonous roundtrip with the wayland server. It will block until all
/// pending requests of this queue are sent to the server and it has processed all of them and
/// send the appropriate events.
///
/// Handlers are called as a consequence.
///
/// On success returns the number of dispatched events.
pub fn sync_roundtrip(&mut self) -> IoResult<i32> {
let ret = unsafe {
match self.handle.wlevq {
Some(evtq) => ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_roundtrip_queue,
self.display,
evtq
),
None => ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_roundtrip, self.display),
}
};
if ret >= 0 {
Ok(ret)
} else {
Err(IoError::last_os_error())
}
}
/// Prepare an conccurent read
///
/// Will declare your intention to read events from the server socket.
///
/// Will return `None` if there are still some events awaiting dispatch on this EventIterator.
/// In this case, you need to call `dispatch_pending()` before calling this method again.
///
/// As long as the returned guard is in scope, no events can be dispatched to any event iterator.
///
/// The guard can then be destroyed by two means:
///
/// - Calling its `cancel()` method (or letting it go out of scope): the read intention will
/// be cancelled
/// - Calling its `read_events()` method: will block until all existing guards are destroyed
/// by one of these methods, then events will be read and all blocked `read_events()` calls
/// will return.
///
/// This call will otherwise not block on the server socket if it is empty, and return
/// an io error `WouldBlock` in such cases.
pub fn prepare_read(&self) -> Option<ReadEventsGuard> {
let ret = unsafe {
match self.handle.wlevq {
Some(evtq) => ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_prepare_read_queue,
self.display,
evtq
),
None => ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_prepare_read, self.display),
}
};
if ret >= 0 {
Some(ReadEventsGuard {
display: self.display,
})
} else {
None
}
}
}
impl Deref for EventQueue {
type Target = EventQueueHandle;
fn deref(&self) -> &EventQueueHandle {
&*self.handle
}
}
impl DerefMut for EventQueue {
fn deref_mut(&mut self) -> &mut EventQueueHandle {
&mut *self.handle
}
}
/// A guard over a read intention.
///
/// See `EventQueue::prepare_read()` for details about its use.
pub struct ReadEventsGuard {
display: *mut wl_display,
}
impl ReadEventsGuard {
/// Read events
///
/// Reads events from the server socket. If other `ReadEventsGuard` exists, will block
/// until they are all consumed or destroyed.
pub fn read_events(self) -> IoResult<i32> {
let ret = unsafe { ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_read_events, self.display) };
// Don't run destructor that would cancel the read intent
::std::mem::forget(self);
if ret >= 0 {
Ok(ret)
} else {
Err(IoError::last_os_error())
}
}
| {
&mut self.state
} | identifier_body |
event_queue.rs | queue
///
/// This handle gives you access to methods on an event queue
/// that are safe to do from within a callback.
///
/// They are also available on an `EventQueue` object via `Deref`.
pub struct EventQueueHandle {
state: State,
wlevq: Option<*mut wl_event_queue>,
}
impl EventQueueHandle {
/// Register a proxy to this event queue.
///
/// You are required to provide a valid implementation for this proxy
/// as well as some associated implementation data. This implementation
/// is expected to be a struct holding the various relevant
/// function pointers.
///
/// This implementation data can typically contain indexes to state value
/// that the implementation will need to work on.
///
/// This overwrites any precedently set implementation for this proxy.
///
/// Returns appropriately and does nothing if this proxy is dead or already managed by
/// something else than this library.
pub fn register<P, ID>(&mut self, proxy: &P, implementation: P::Implementation, idata: ID)
-> RegisterStatus
where
P: Proxy + Implementable<ID>,
ID: 'static,
{
match proxy.status() {
::Liveness::Dead => return RegisterStatus::Dead,
::Liveness::Unmanaged => return RegisterStatus::Unmanaged,
::Liveness::Alive => { /* ok, we can continue */ }
}
unsafe {
let data: *mut ProxyUserData =
ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_proxy_get_user_data, proxy.ptr()) as *mut _;
// This cast from *const to *mut is legit because we enforce that a proxy
// can only be assigned to a single EventQueue.
// (this is actually the whole point of the design of this lib)
(&mut *data).0 = self as *const _ as *mut _;
(&mut *data).1 = Some(Box::new((implementation, idata)) as Box<Any>);
// even if this call fails, we updated the user_data, so the new implementation is in place.
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_proxy_add_dispatcher,
proxy.ptr(),
dispatch_func::<P, ID>,
&RUST_MANAGED as *const _ as *const _,
data as *mut c_void
);
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_proxy_set_queue,
proxy.ptr(),
match self.wlevq {
Some(ptr) => ptr,
None => ::std::ptr::null_mut(),
}
);
}
RegisterStatus::Registered
}
/// Get a handle to the internal state
///
/// The returned guard object allows you to get references
/// to the handler objects you previously inserted in this
/// event queue.
pub fn | (&mut self) -> &mut State {
&mut self.state
}
}
/// An event queue managing wayland events
///
/// Each wayland object can receive events from the server. To handle these events
/// you must associate to these objects an implementation: a struct defined in their
/// respective module, in which you provide a set of functions that will handle each event.
///
/// Your implementation can also access a shared state managed by the event queue. See
/// the `State` struct and the `state()` method on `EventQueueHandle`. If you need this,
/// the way to do it is:
///
/// - insert your state value in the event queue state store, your are then provided with a
/// token to access it
/// - provide this token (you can clone it) as implementation data to any wayland object
/// that need to access this state in its event callbacks.
///
/// The event queues also provides you control on the flow of the program, via the `dispatch()` and
/// `dispatch_pending()` methods.
pub struct EventQueue {
handle: Box<EventQueueHandle>,
display: *mut wl_display,
}
impl EventQueue {
/// Dispatches events from the internal buffer.
///
/// Dispatches all events to their appropriate handlers.
/// If not events were in the internal buffer, will block until
/// some events are read and dispatch them.
/// This process can insert events in the internal buffers of
/// other event queues.
///
/// If an error is returned, your connection with the wayland
/// compositor is probably lost.
pub fn dispatch(&mut self) -> IoResult<u32> {
let ret = match self.handle.wlevq {
Some(evq) => unsafe {
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_dispatch_queue,
self.display,
evq
)
},
None => unsafe { ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_dispatch, self.display) },
};
if ret >= 0 {
Ok(ret as u32)
} else {
Err(IoError::last_os_error())
}
}
/// Dispatches pending events from the internal buffer.
///
/// Dispatches all events to their appropriate handlers.
/// Never blocks, if not events were pending, simply returns
/// `Ok(0)`.
///
/// If an error is returned, your connection with the wayland
/// compositor is probably lost.
pub fn dispatch_pending(&mut self) -> IoResult<u32> {
let ret = match self.handle.wlevq {
Some(evq) => unsafe {
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_dispatch_queue_pending,
self.display,
evq
)
},
None => unsafe {
ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_dispatch_pending,
self.display
)
},
};
if ret >= 0 {
Ok(ret as u32)
} else {
Err(IoError::last_os_error())
}
}
/// Synchronous roundtrip
///
/// This call will cause a synchonous roundtrip with the wayland server. It will block until all
/// pending requests of this queue are sent to the server and it has processed all of them and
/// send the appropriate events.
///
/// Handlers are called as a consequence.
///
/// On success returns the number of dispatched events.
pub fn sync_roundtrip(&mut self) -> IoResult<i32> {
let ret = unsafe {
match self.handle.wlevq {
Some(evtq) => ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_roundtrip_queue,
self.display,
evtq
),
None => ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_roundtrip, self.display),
}
};
if ret >= 0 {
Ok(ret)
} else {
Err(IoError::last_os_error())
}
}
/// Prepare an conccurent read
///
/// Will declare your intention to read events from the server socket.
///
/// Will return `None` if there are still some events awaiting dispatch on this EventIterator.
/// In this case, you need to call `dispatch_pending()` before calling this method again.
///
/// As long as the returned guard is in scope, no events can be dispatched to any event iterator.
///
/// The guard can then be destroyed by two means:
///
/// - Calling its `cancel()` method (or letting it go out of scope): the read intention will
/// be cancelled
/// - Calling its `read_events()` method: will block until all existing guards are destroyed
/// by one of these methods, then events will be read and all blocked `read_events()` calls
/// will return.
///
/// This call will otherwise not block on the server socket if it is empty, and return
/// an io error `WouldBlock` in such cases.
pub fn prepare_read(&self) -> Option<ReadEventsGuard> {
let ret = unsafe {
match self.handle.wlevq {
Some(evtq) => ffi_dispatch!(
WAYLAND_CLIENT_HANDLE,
wl_display_prepare_read_queue,
self.display,
evtq
),
None => ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_prepare_read, self.display),
}
};
if ret >= 0 {
Some(ReadEventsGuard {
display: self.display,
})
} else {
None
}
}
}
impl Deref for EventQueue {
type Target = EventQueueHandle;
fn deref(&self) -> &EventQueueHandle {
&*self.handle
}
}
impl DerefMut for EventQueue {
fn deref_mut(&mut self) -> &mut EventQueueHandle {
&mut *self.handle
}
}
/// A guard over a read intention.
///
/// See `EventQueue::prepare_read()` for details about its use.
pub struct ReadEventsGuard {
display: *mut wl_display,
}
impl ReadEventsGuard {
/// Read events
///
/// Reads events from the server socket. If other `ReadEventsGuard` exists, will block
/// until they are all consumed or destroyed.
pub fn read_events(self) -> IoResult<i32> {
let ret = unsafe { ffi_dispatch!(WAYLAND_CLIENT_HANDLE, wl_display_read_events, self.display) };
// Don't run destructor that would cancel the read intent
::std::mem::forget(self);
if ret >= 0 {
Ok(ret)
} else {
Err(IoError::last_os_error())
}
}
| state | identifier_name |
horovod_patches.py | 32',
initializer=tf.constant_initializer(0), trainable=False)
#global_step = tf.train.get_or_create_global_step()
loss = tf.get_collection(tf.GraphKeys.LOSSES)
if len(loss) == 0:
raise RuntimeError("No losses found in losses collection")
loss = tf.add_n(loss, name="loss")
if len(tf.get_collection(tf.GraphKeys.SUMMARIES)) > 0:
# Add any summaries client stored in SUMMARIES
summary_tensor = tf.summary.merge([[tf.summary.tensor_summary("loss", loss)] +
tf.get_collection(tf.GraphKeys.SUMMARIES)])
else:
summary_tensor = tf.summary.tensor_summary("loss", loss)
train_objective = loss
regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(regularizers) > 0:
regularization_loss = tf.add_n(regularizers, name="regularization_loss")
if train_params.regularization_weight is not None:
train_objective = train_objective + regularization_loss * train_params.regularization_weight
else:
train_objective = train_objective + regularization_loss
else:
regularization_loss = None
opt = train_params.opt.get()
opt = hvd.DistributedOptimizer(opt)
#train_opt = opt.apply_gradients(opt.compute_gradients(train_objective), global_step=global_step)
train_opt = opt.minimize(train_objective, global_step=global_step)
if train_params.ema is not None:
ema = tf.train.ExponentialMovingAverage(decay=train_params.ema)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_opt]):
# Run the old training op, then update the averages.
train_opt = tf.group(ema_op)
else:
ema = None
# Any collections starting with "monitor" are also added as summaries
to_monitor = {}
for col in tf.get_default_graph().get_all_collection_keys():
if col.startswith("monitor"):
v = tf.get_collection(col)
if len(v) > 0:
print("Monitoring: " + col)
v = tf.add_n(v)
to_monitor[col] = v
if len(to_monitor) > 0:
monitor_ema = tf.train.ExponentialMovingAverage(decay=train_params.monitor_ema, name="MonitorEMA",
zero_debias=True)
train_opt = tf.group(train_opt, monitor_ema.apply(list(to_monitor.values())))
summary_tensor = tf.summary.merge(
[tf.summary.scalar(col, monitor_ema.average(v)) for col, v in to_monitor.items()] +
[summary_tensor])
# EMA for the loss and what we monitoring
if train_params.loss_ema is not None:
loss_ema = tf.train.ExponentialMovingAverage(decay=train_params.loss_ema, name="LossEMA", zero_debias=True)
if regularization_loss is None:
ema_op = loss_ema.apply([loss])
train_opt = tf.group(train_opt, ema_op)
ema_var = loss_ema.average(loss)
summary_tensor = tf.summary.merge([tf.summary.scalar("training-ema/loss", ema_var), summary_tensor])
else:
to_track = [loss, train_objective, regularization_loss]
ema_op = loss_ema.apply(to_track)
train_opt = tf.group(train_opt, ema_op)
tensor_vars = [
tf.summary.scalar("training-ema/loss", loss_ema.average(loss)),
tf.summary.scalar("training-ema/objective", loss_ema.average(train_objective)),
tf.summary.scalar("training-ema/regularization-loss",
loss_ema.average(regularization_loss))
]
summary_tensor = tf.summary.merge([tensor_vars, summary_tensor])
return loss, summary_tensor, train_opt, global_step, ema
def _train(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: trainer.TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False):
print('Horovod size: ', hvd.size())
print('Horovod rank: ', hvd.rank())
print('Horovod local rank: ', hvd.local_rank())
if train_params.async_encoding:
_train_async(model, data, checkpoint, parameter_checkpoint, save_start, train_params,
evaluators, out, notes, dry_run, start_eval)
return
else:
raise NotImplementedError('Syncronous training with Horovod not supported yet')
def _train_async(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: trainer.TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False):
| is_train = tf.placeholder(tf.bool, ())
input_tensors = tf.cond(is_train, lambda: train_queue.dequeue(),
lambda: evaluator_runner.eval_queue.dequeue())
# tensorfow can't infer the shape for an unsized queue, so set it manually
for input_tensor, pl in zip(input_tensors, placeholders):
input_tensor.set_shape(pl.shape)
bcast = hvd.broadcast_global_variables(0)
print("Init model...")
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
with sess.as_default():
pred = model.get_predictions_for(dict(zip(placeholders, input_tensors)))
evaluator_runner.set_input(pred)
if parameter_checkpoint is not None:
print("Restoring parameters from %s" % parameter_checkpoint)
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
saver = None
print("Setting up model prediction / tf...")
all_vars = tf.global_variables()
loss, summary_tensor, train_opt, global_step, weight_ema = _build_train_ops(train_params)
# Pre-compute tensors we need at evaluations time
eval_tensors = []
for ev in evaluators:
eval_tensors.append(ev.tensors_needed(pred))
if train_params.best_weights is not None:
lst = all_vars
if weight_ema is not None:
for x in lst:
v = weight_ema.average(x)
if v is not None:
lst.append(v)
best_weight_saver = tf.train.Saver(var_list=lst, max_to_keep=1)
cur_best = None
else:
best_weight_saver = None
cur_best = None
saver = tf.train.Saver(max_to_keep=train_params.max_checkpoints_to_keep)
summary_writer = tf.summary.FileWriter(out.log_dir)
# Load or initialize the model parameters
if checkpoint is not None:
print("Restoring from checkpoint...")
saver.restore(sess, checkpoint)
print("Loaded checkpoint: " + str(sess.run(global_step)))
else:
print("Initializing parameters...")
sess.run(tf.global_variables_initializer())
sess.run(bcast)
# Make sure no bugs occur that add to the graph in the train loop, that can cause (eventuall) OOMs
tf.get_default_graph().finalize()
if dry_run:
return
on_step = sess.run(global_step)
if save_start:
# summary_writer.add_graph(sess.graph, global_step=on_step)
if hvd.rank() == 0:
trainer.save_train_start(out.dir, data, sess.run(global_step), evaluators, train_params, notes)
def enqueue_train():
try:
# feed data from the dataset iterator -> encoder -> queue
for epoch in range(train_params.num_epochs):
for batch in train.get_epoch():
feed_dict = model.encode(batch, True)
sess.run(train_enqeue, feed_dict)
except tf.errors.CancelledError:
# The queue_close operator has been called, exit gracefully
return
except Exception as e:
# Crashes the main thread with a queue exception
sess.run(train_close)
raise e
train_enqueue_thread = Thread(target=enqueue_train)
train_enqueue_thread.daemon = True # Ensure we exit the program on an excpetion
print("Start training!")
batch_time = 0
train_dict = {is_train: True}
eval_dict = {is_train: False}
try:
train_enqueue_thread.start()
for epoch in | """ Train while encoding batches on a seperate thread and storing them in a tensorflow Queue, can
be much faster then using the feed_dict approach """
train = data.get_train()
eval_datasets = data.get_eval()
loader = data.get_resource_loader()
print("Training on %d batches" % len(train))
print("Evaluation datasets: " + " ".join("%s (%d)" % (name, len(data)) for name, data in eval_datasets.items()))
# spec the model for the given datasets
model.set_inputs([train] + list(eval_datasets.values()), loader)
placeholders = model.get_placeholders()
train_queue = tf.FIFOQueue(train_params.async_encoding, [x.dtype for x in placeholders], name="train_queue")
evaluator_runner = AysncEvaluatorRunner(evaluators, model, train_params.async_encoding)
train_enqeue = train_queue.enqueue(placeholders)
train_close = train_queue.close(True)
| identifier_body |
horovod_patches.py | 32',
initializer=tf.constant_initializer(0), trainable=False)
#global_step = tf.train.get_or_create_global_step()
loss = tf.get_collection(tf.GraphKeys.LOSSES)
if len(loss) == 0:
raise RuntimeError("No losses found in losses collection")
loss = tf.add_n(loss, name="loss")
if len(tf.get_collection(tf.GraphKeys.SUMMARIES)) > 0:
# Add any summaries client stored in SUMMARIES
summary_tensor = tf.summary.merge([[tf.summary.tensor_summary("loss", loss)] +
tf.get_collection(tf.GraphKeys.SUMMARIES)])
else:
summary_tensor = tf.summary.tensor_summary("loss", loss)
train_objective = loss
regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(regularizers) > 0:
regularization_loss = tf.add_n(regularizers, name="regularization_loss")
if train_params.regularization_weight is not None:
train_objective = train_objective + regularization_loss * train_params.regularization_weight
else:
train_objective = train_objective + regularization_loss
else:
regularization_loss = None
opt = train_params.opt.get()
opt = hvd.DistributedOptimizer(opt)
#train_opt = opt.apply_gradients(opt.compute_gradients(train_objective), global_step=global_step)
train_opt = opt.minimize(train_objective, global_step=global_step)
if train_params.ema is not None:
ema = tf.train.ExponentialMovingAverage(decay=train_params.ema)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_opt]):
# Run the old training op, then update the averages.
train_opt = tf.group(ema_op)
else:
ema = None
# Any collections starting with "monitor" are also added as summaries
to_monitor = {}
for col in tf.get_default_graph().get_all_collection_keys():
if col.startswith("monitor"):
v = tf.get_collection(col)
if len(v) > 0:
print("Monitoring: " + col)
v = tf.add_n(v)
to_monitor[col] = v
if len(to_monitor) > 0:
monitor_ema = tf.train.ExponentialMovingAverage(decay=train_params.monitor_ema, name="MonitorEMA",
zero_debias=True)
train_opt = tf.group(train_opt, monitor_ema.apply(list(to_monitor.values())))
summary_tensor = tf.summary.merge(
[tf.summary.scalar(col, monitor_ema.average(v)) for col, v in to_monitor.items()] +
[summary_tensor])
# EMA for the loss and what we monitoring
if train_params.loss_ema is not None:
loss_ema = tf.train.ExponentialMovingAverage(decay=train_params.loss_ema, name="LossEMA", zero_debias=True)
if regularization_loss is None:
ema_op = loss_ema.apply([loss])
train_opt = tf.group(train_opt, ema_op)
ema_var = loss_ema.average(loss)
summary_tensor = tf.summary.merge([tf.summary.scalar("training-ema/loss", ema_var), summary_tensor])
else:
to_track = [loss, train_objective, regularization_loss]
ema_op = loss_ema.apply(to_track)
train_opt = tf.group(train_opt, ema_op)
tensor_vars = [
tf.summary.scalar("training-ema/loss", loss_ema.average(loss)),
tf.summary.scalar("training-ema/objective", loss_ema.average(train_objective)),
tf.summary.scalar("training-ema/regularization-loss",
loss_ema.average(regularization_loss))
]
summary_tensor = tf.summary.merge([tensor_vars, summary_tensor])
return loss, summary_tensor, train_opt, global_step, ema
def _train(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: trainer.TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False):
print('Horovod size: ', hvd.size())
print('Horovod rank: ', hvd.rank())
print('Horovod local rank: ', hvd.local_rank())
if train_params.async_encoding:
_train_async(model, data, checkpoint, parameter_checkpoint, save_start, train_params,
evaluators, out, notes, dry_run, start_eval)
return
else:
raise NotImplementedError('Syncronous training with Horovod not supported yet')
def _train_async(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: trainer.TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False):
""" Train while encoding batches on a seperate thread and storing them in a tensorflow Queue, can
be much faster then using the feed_dict approach """
train = data.get_train()
eval_datasets = data.get_eval()
loader = data.get_resource_loader()
print("Training on %d batches" % len(train))
print("Evaluation datasets: " + " ".join("%s (%d)" % (name, len(data)) for name, data in eval_datasets.items()))
# spec the model for the given datasets
model.set_inputs([train] + list(eval_datasets.values()), loader)
placeholders = model.get_placeholders()
train_queue = tf.FIFOQueue(train_params.async_encoding, [x.dtype for x in placeholders], name="train_queue")
evaluator_runner = AysncEvaluatorRunner(evaluators, model, train_params.async_encoding)
train_enqeue = train_queue.enqueue(placeholders)
train_close = train_queue.close(True)
is_train = tf.placeholder(tf.bool, ())
input_tensors = tf.cond(is_train, lambda: train_queue.dequeue(),
lambda: evaluator_runner.eval_queue.dequeue())
# tensorfow can't infer the shape for an unsized queue, so set it manually
for input_tensor, pl in zip(input_tensors, placeholders):
input_tensor.set_shape(pl.shape)
bcast = hvd.broadcast_global_variables(0)
print("Init model...")
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
with sess.as_default():
pred = model.get_predictions_for(dict(zip(placeholders, input_tensors)))
evaluator_runner.set_input(pred)
if parameter_checkpoint is not None:
print("Restoring parameters from %s" % parameter_checkpoint)
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
saver = None
print("Setting up model prediction / tf...")
all_vars = tf.global_variables()
loss, summary_tensor, train_opt, global_step, weight_ema = _build_train_ops(train_params)
# Pre-compute tensors we need at evaluations time
eval_tensors = []
for ev in evaluators:
eval_tensors.append(ev.tensors_needed(pred))
if train_params.best_weights is not None:
lst = all_vars
if weight_ema is not None:
for x in lst:
v = weight_ema.average(x)
if v is not None:
lst.append(v)
best_weight_saver = tf.train.Saver(var_list=lst, max_to_keep=1)
cur_best = None
else:
best_weight_saver = None
cur_best = None
saver = tf.train.Saver(max_to_keep=train_params.max_checkpoints_to_keep)
summary_writer = tf.summary.FileWriter(out.log_dir)
# Load or initialize the model parameters
if checkpoint is not None:
print("Restoring from checkpoint...")
saver.restore(sess, checkpoint)
print("Loaded checkpoint: " + str(sess.run(global_step)))
else: | tf.get_default_graph().finalize()
if dry_run:
return
on_step = sess.run(global_step)
if save_start:
# summary_writer.add_graph(sess.graph, global_step=on_step)
if hvd.rank() == 0:
trainer.save_train_start(out.dir, data, sess.run(global_step), evaluators, train_params, notes)
def enqueue_train():
try:
# feed data from the dataset iterator -> encoder -> queue
for epoch in range(train_params.num_epochs):
for batch in train.get_epoch():
feed_dict = model.encode(batch, True)
sess.run(train_enqeue, feed_dict)
except tf.errors.CancelledError:
# The queue_close operator has been called, exit gracefully
return
except Exception as e:
# Crashes the main thread with a queue exception
sess.run(train_close)
raise e
train_enqueue_thread = Thread(target=enqueue_train)
train_enqueue_thread.daemon = True # Ensure we exit the program on an excpetion
print("Start training!")
batch_time = 0
train_dict = {is_train: True}
eval_dict = {is_train: False}
try:
train_enqueue_thread.start()
for epoch in range | print("Initializing parameters...")
sess.run(tf.global_variables_initializer())
sess.run(bcast)
# Make sure no bugs occur that add to the graph in the train loop, that can cause (eventuall) OOMs | random_line_split |
horovod_patches.py | (train_params):
""" Bulid ops we should run during training, including learning, EMA, and summary ops"""
global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
#global_step = tf.train.get_or_create_global_step()
loss = tf.get_collection(tf.GraphKeys.LOSSES)
if len(loss) == 0:
raise RuntimeError("No losses found in losses collection")
loss = tf.add_n(loss, name="loss")
if len(tf.get_collection(tf.GraphKeys.SUMMARIES)) > 0:
# Add any summaries client stored in SUMMARIES
summary_tensor = tf.summary.merge([[tf.summary.tensor_summary("loss", loss)] +
tf.get_collection(tf.GraphKeys.SUMMARIES)])
else:
summary_tensor = tf.summary.tensor_summary("loss", loss)
train_objective = loss
regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(regularizers) > 0:
regularization_loss = tf.add_n(regularizers, name="regularization_loss")
if train_params.regularization_weight is not None:
train_objective = train_objective + regularization_loss * train_params.regularization_weight
else:
train_objective = train_objective + regularization_loss
else:
regularization_loss = None
opt = train_params.opt.get()
opt = hvd.DistributedOptimizer(opt)
#train_opt = opt.apply_gradients(opt.compute_gradients(train_objective), global_step=global_step)
train_opt = opt.minimize(train_objective, global_step=global_step)
if train_params.ema is not None:
ema = tf.train.ExponentialMovingAverage(decay=train_params.ema)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_opt]):
# Run the old training op, then update the averages.
train_opt = tf.group(ema_op)
else:
ema = None
# Any collections starting with "monitor" are also added as summaries
to_monitor = {}
for col in tf.get_default_graph().get_all_collection_keys():
if col.startswith("monitor"):
v = tf.get_collection(col)
if len(v) > 0:
print("Monitoring: " + col)
v = tf.add_n(v)
to_monitor[col] = v
if len(to_monitor) > 0:
monitor_ema = tf.train.ExponentialMovingAverage(decay=train_params.monitor_ema, name="MonitorEMA",
zero_debias=True)
train_opt = tf.group(train_opt, monitor_ema.apply(list(to_monitor.values())))
summary_tensor = tf.summary.merge(
[tf.summary.scalar(col, monitor_ema.average(v)) for col, v in to_monitor.items()] +
[summary_tensor])
# EMA for the loss and what we monitoring
if train_params.loss_ema is not None:
loss_ema = tf.train.ExponentialMovingAverage(decay=train_params.loss_ema, name="LossEMA", zero_debias=True)
if regularization_loss is None:
ema_op = loss_ema.apply([loss])
train_opt = tf.group(train_opt, ema_op)
ema_var = loss_ema.average(loss)
summary_tensor = tf.summary.merge([tf.summary.scalar("training-ema/loss", ema_var), summary_tensor])
else:
to_track = [loss, train_objective, regularization_loss]
ema_op = loss_ema.apply(to_track)
train_opt = tf.group(train_opt, ema_op)
tensor_vars = [
tf.summary.scalar("training-ema/loss", loss_ema.average(loss)),
tf.summary.scalar("training-ema/objective", loss_ema.average(train_objective)),
tf.summary.scalar("training-ema/regularization-loss",
loss_ema.average(regularization_loss))
]
summary_tensor = tf.summary.merge([tensor_vars, summary_tensor])
return loss, summary_tensor, train_opt, global_step, ema
def _train(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: trainer.TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False):
print('Horovod size: ', hvd.size())
print('Horovod rank: ', hvd.rank())
print('Horovod local rank: ', hvd.local_rank())
if train_params.async_encoding:
_train_async(model, data, checkpoint, parameter_checkpoint, save_start, train_params,
evaluators, out, notes, dry_run, start_eval)
return
else:
raise NotImplementedError('Syncronous training with Horovod not supported yet')
def _train_async(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: trainer.TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False):
""" Train while encoding batches on a seperate thread and storing them in a tensorflow Queue, can
be much faster then using the feed_dict approach """
train = data.get_train()
eval_datasets = data.get_eval()
loader = data.get_resource_loader()
print("Training on %d batches" % len(train))
print("Evaluation datasets: " + " ".join("%s (%d)" % (name, len(data)) for name, data in eval_datasets.items()))
# spec the model for the given datasets
model.set_inputs([train] + list(eval_datasets.values()), loader)
placeholders = model.get_placeholders()
train_queue = tf.FIFOQueue(train_params.async_encoding, [x.dtype for x in placeholders], name="train_queue")
evaluator_runner = AysncEvaluatorRunner(evaluators, model, train_params.async_encoding)
train_enqeue = train_queue.enqueue(placeholders)
train_close = train_queue.close(True)
is_train = tf.placeholder(tf.bool, ())
input_tensors = tf.cond(is_train, lambda: train_queue.dequeue(),
lambda: evaluator_runner.eval_queue.dequeue())
# tensorfow can't infer the shape for an unsized queue, so set it manually
for input_tensor, pl in zip(input_tensors, placeholders):
input_tensor.set_shape(pl.shape)
bcast = hvd.broadcast_global_variables(0)
print("Init model...")
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
with sess.as_default():
pred = model.get_predictions_for(dict(zip(placeholders, input_tensors)))
evaluator_runner.set_input(pred)
if parameter_checkpoint is not None:
print("Restoring parameters from %s" % parameter_checkpoint)
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
saver = None
print("Setting up model prediction / tf...")
all_vars = tf.global_variables()
loss, summary_tensor, train_opt, global_step, weight_ema = _build_train_ops(train_params)
# Pre-compute tensors we need at evaluations time
eval_tensors = []
for ev in evaluators:
eval_tensors.append(ev.tensors_needed(pred))
if train_params.best_weights is not None:
lst = all_vars
if weight_ema is not None:
for x in lst:
v = weight_ema.average(x)
if v is not None:
lst.append(v)
best_weight_saver = tf.train.Saver(var_list=lst, max_to_keep=1)
cur_best = None
else:
best_weight_saver = None
cur_best = None
saver = tf.train.Saver(max_to_keep=train_params.max_checkpoints_to_keep)
summary_writer = tf.summary.FileWriter(out.log_dir)
# Load or initialize the model parameters
if checkpoint is not None:
print("Restoring from checkpoint...")
saver.restore(sess, checkpoint)
print("Loaded checkpoint: " + str(sess.run(global_step)))
else:
print("Initializing parameters...")
sess.run(tf.global_variables_initializer())
sess.run(bcast)
# Make sure no bugs occur that add to the graph in the train loop, that can cause (eventuall) OOMs
tf.get_default_graph().finalize()
if dry_run:
return
on_step = sess.run(global_step)
if save_start:
# summary_writer.add_graph(sess.graph, global_step=on_step)
if hvd.rank() == 0:
trainer.save_train_start(out.dir, data, sess.run(global_step), evaluators, train_params, notes)
def enqueue_train():
try:
# feed data from the dataset iterator -> encoder -> queue
for epoch in range(train_params.num_epochs):
for batch in train.get_epoch():
feed_dict = model.encode(batch, True)
sess.run(train_enqeue, feed_dict)
except tf.errors.CancelledError:
# The queue_close operator has been called, exit gracefully
return
except Exception as e:
# Crashes the main thread with a queue exception
sess.run(train_close)
raise e
train_enqueue_thread = Thread(target=enqueue_train)
train_enqueue_thread.daemon = True # Ensure we exit the program on an excpetion
print(" | _build_train_ops | identifier_name |
|
horovod_patches.py | (list(to_monitor.values())))
summary_tensor = tf.summary.merge(
[tf.summary.scalar(col, monitor_ema.average(v)) for col, v in to_monitor.items()] +
[summary_tensor])
# EMA for the loss and what we monitoring
if train_params.loss_ema is not None:
loss_ema = tf.train.ExponentialMovingAverage(decay=train_params.loss_ema, name="LossEMA", zero_debias=True)
if regularization_loss is None:
ema_op = loss_ema.apply([loss])
train_opt = tf.group(train_opt, ema_op)
ema_var = loss_ema.average(loss)
summary_tensor = tf.summary.merge([tf.summary.scalar("training-ema/loss", ema_var), summary_tensor])
else:
to_track = [loss, train_objective, regularization_loss]
ema_op = loss_ema.apply(to_track)
train_opt = tf.group(train_opt, ema_op)
tensor_vars = [
tf.summary.scalar("training-ema/loss", loss_ema.average(loss)),
tf.summary.scalar("training-ema/objective", loss_ema.average(train_objective)),
tf.summary.scalar("training-ema/regularization-loss",
loss_ema.average(regularization_loss))
]
summary_tensor = tf.summary.merge([tensor_vars, summary_tensor])
return loss, summary_tensor, train_opt, global_step, ema
def _train(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: trainer.TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False):
print('Horovod size: ', hvd.size())
print('Horovod rank: ', hvd.rank())
print('Horovod local rank: ', hvd.local_rank())
if train_params.async_encoding:
_train_async(model, data, checkpoint, parameter_checkpoint, save_start, train_params,
evaluators, out, notes, dry_run, start_eval)
return
else:
raise NotImplementedError('Syncronous training with Horovod not supported yet')
def _train_async(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: trainer.TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False):
""" Train while encoding batches on a seperate thread and storing them in a tensorflow Queue, can
be much faster then using the feed_dict approach """
train = data.get_train()
eval_datasets = data.get_eval()
loader = data.get_resource_loader()
print("Training on %d batches" % len(train))
print("Evaluation datasets: " + " ".join("%s (%d)" % (name, len(data)) for name, data in eval_datasets.items()))
# spec the model for the given datasets
model.set_inputs([train] + list(eval_datasets.values()), loader)
placeholders = model.get_placeholders()
train_queue = tf.FIFOQueue(train_params.async_encoding, [x.dtype for x in placeholders], name="train_queue")
evaluator_runner = AysncEvaluatorRunner(evaluators, model, train_params.async_encoding)
train_enqeue = train_queue.enqueue(placeholders)
train_close = train_queue.close(True)
is_train = tf.placeholder(tf.bool, ())
input_tensors = tf.cond(is_train, lambda: train_queue.dequeue(),
lambda: evaluator_runner.eval_queue.dequeue())
# tensorfow can't infer the shape for an unsized queue, so set it manually
for input_tensor, pl in zip(input_tensors, placeholders):
input_tensor.set_shape(pl.shape)
bcast = hvd.broadcast_global_variables(0)
print("Init model...")
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
with sess.as_default():
pred = model.get_predictions_for(dict(zip(placeholders, input_tensors)))
evaluator_runner.set_input(pred)
if parameter_checkpoint is not None:
print("Restoring parameters from %s" % parameter_checkpoint)
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
saver = None
print("Setting up model prediction / tf...")
all_vars = tf.global_variables()
loss, summary_tensor, train_opt, global_step, weight_ema = _build_train_ops(train_params)
# Pre-compute tensors we need at evaluations time
eval_tensors = []
for ev in evaluators:
eval_tensors.append(ev.tensors_needed(pred))
if train_params.best_weights is not None:
lst = all_vars
if weight_ema is not None:
for x in lst:
v = weight_ema.average(x)
if v is not None:
lst.append(v)
best_weight_saver = tf.train.Saver(var_list=lst, max_to_keep=1)
cur_best = None
else:
best_weight_saver = None
cur_best = None
saver = tf.train.Saver(max_to_keep=train_params.max_checkpoints_to_keep)
summary_writer = tf.summary.FileWriter(out.log_dir)
# Load or initialize the model parameters
if checkpoint is not None:
print("Restoring from checkpoint...")
saver.restore(sess, checkpoint)
print("Loaded checkpoint: " + str(sess.run(global_step)))
else:
print("Initializing parameters...")
sess.run(tf.global_variables_initializer())
sess.run(bcast)
# Make sure no bugs occur that add to the graph in the train loop, that can cause (eventuall) OOMs
tf.get_default_graph().finalize()
if dry_run:
return
on_step = sess.run(global_step)
if save_start:
# summary_writer.add_graph(sess.graph, global_step=on_step)
if hvd.rank() == 0:
trainer.save_train_start(out.dir, data, sess.run(global_step), evaluators, train_params, notes)
def enqueue_train():
try:
# feed data from the dataset iterator -> encoder -> queue
for epoch in range(train_params.num_epochs):
for batch in train.get_epoch():
feed_dict = model.encode(batch, True)
sess.run(train_enqeue, feed_dict)
except tf.errors.CancelledError:
# The queue_close operator has been called, exit gracefully
return
except Exception as e:
# Crashes the main thread with a queue exception
sess.run(train_close)
raise e
train_enqueue_thread = Thread(target=enqueue_train)
train_enqueue_thread.daemon = True # Ensure we exit the program on an excpetion
print("Start training!")
batch_time = 0
train_dict = {is_train: True}
eval_dict = {is_train: False}
try:
train_enqueue_thread.start()
for epoch in range(train_params.num_epochs):
for batch_ix in range(len(train)):
t0 = time.perf_counter()
on_step = sess.run(global_step) + 1
get_summary = on_step % train_params.log_period == 0
if get_summary:
summary, _, batch_loss = sess.run([summary_tensor, train_opt, loss], feed_dict=train_dict)
else:
summary = None
_, batch_loss = sess.run([train_opt, loss], feed_dict=train_dict)
if np.isnan(batch_loss):
raise RuntimeError("NaN loss!")
batch_time += time.perf_counter() - t0
if hvd.rank() == 0:
if summary is not None:
print("on epoch=%d batch=%d step=%d, time=%.3f" %
(epoch, batch_ix + 1, on_step, batch_time))
summary_writer.add_summary(
tf.Summary(value=[tf.Summary.Value(tag="time", simple_value=batch_time)]), on_step)
summary_writer.add_summary(summary, on_step)
batch_time = 0
# occasional saving
if hvd.rank() == 0:
if on_step % train_params.save_period == 0:
print("Checkpointing")
saver.save(sess, join(out.save_dir, "checkpoint-" + str(on_step)), global_step=global_step)
# Occasional evaluation
if (on_step % train_params.eval_period == 0) or start_eval:
print("Running evaluation...")
start_eval = False
t0 = time.perf_counter()
for name, data in eval_datasets.items():
n_samples = train_params.eval_samples.get(name)
evaluation = evaluator_runner.run_evaluators(sess, data, name, n_samples, eval_dict)
if hvd.rank() == 0:
for s in evaluation.to_summaries(name + "-"):
summary_writer.add_summary(s, on_step)
# Maybe save as the best weights
if train_params.best_weights is not None and name == train_params.best_weights[0]:
val = evaluation.scalars[train_params.best_weights[1]]
if cur_best is None or val > cur_best:
| print("Save weights with current best weights (%s vs %.5f)" % (
"None" if cur_best is None else ("%.5f" % cur_best), val))
best_weight_saver.save(sess, join(out.best_weight_dir, "best"), global_step=global_step)
cur_best = val | conditional_block |
|
main.rs | Class {
Colony,
Scout,
Fighter
}
struct Ship {
class: ShipClass,
health: u32,
speed: u32,
damage: u32
}
impl Ship {
fn new(class: ShipClass) -> Ship {
match class {
ShipClass::Colony => Ship { class: class, health: 100, speed: 10, damage: 10},
ShipClass::Scout => Ship { class: class, health: 50, speed: 30, damage: 5},
ShipClass::Fighter => Ship { class: class, health: 150, speed: 10, damage: 100}
}
}
}
enum FleetLocation {
Moving, // from -> to, turns/needed_turns
Somewhere // exact location
}
struct Fleet {
ships: HashMap<ShipClass, Vec<Ship>>,
location: FleetLocation,
}
impl Fleet{
fn new() -> Fleet {
Fleet { ships: HashMap::new(), location: FleetLocation::Somewhere }
}
fn add(&mut self, ship: Ship) {
match self.ships.get(&ship.class) {
None => { self.ships.insert(ship.class, Vec::new()); },
Some(_) => ()
}
self.ships.get_mut(&ship.class).unwrap().push(ship);
}
fn merge(&mut self, fleet: Box<Fleet>) {
for (ship_class, ships) in fleet.ships.into_iter() {
for ship in ships.into_iter() {
self.add(ship);
}
}
}
fn size(&self) -> u32 {
let mut count = 0u32;
for ships in self.ships.values() {
count += ships.len() as u32;
}
count
}
fn count(&self, class: ShipClass) -> u32 {
match self.ships.get(&class) {
Some(ships) => ships.len() as u32,
None => 0u32
}
}
fn move_to(
&mut self, fleet: &mut Fleet, number: u32, class: ShipClass
) -> Result<(), &'static str> {
if number > self.count(class) {
return Err("There are no enough ships");
}
let ships = match self.ships.get_mut(&class) {
Some(s) => s,
None => return Ok(())
};
for _ in (0..number) {
fleet.add(ships.pop().unwrap());
}
Ok(())
}
}
#[derive(Eq, PartialEq, Hash, Copy)]
struct PlayerId(pub u32);
struct Player {
id: PlayerId,
resources: Resources
}
impl PartialEq for Player {
fn eq(&self, other : &Player) -> bool {
self.id == other.id
}
}
impl Player {
fn new(id: u32) -> Player {
Player {
id: PlayerId(id),
resources: Resources::new()
}
}
fn gather_resources(&mut self, stars: &Starmap) -> () {
let id = self.id;
let owned_systems = stars.systems.values().filter(|s| s.borrow().owner == Some(id));
let owned_buildings = owned_systems.filter_map(|s| s.borrow().building.clone());
let owned_production = owned_buildings.map(|b| b.produce());
self.resources = owned_production.fold(self.resources.clone(), |r, p| r + p );
}
fn create_players(num : u32) -> Vec<Player> {
(0..num).map(|i| Player::new(i)).collect()
}
}
#[derive(Eq, PartialEq, Hash, Copy, PartialOrd, Ord)]
struct SolarSystemId(pub u32);
struct SolarSystem {
id: SolarSystemId,
building: Option<Building>,
owner: Option<PlayerId>,
fleet: Option<Fleet>,
location: (i32, i32)
}
impl <H: Hasher + Writer> Hash<H> for SolarSystem {
fn hash(&self, state: &mut H) {
self.id.hash(state);
}
}
impl SolarSystem {
fn new(id: SolarSystemId) -> SolarSystem {
SolarSystem { id: id, building: None, owner: None, fleet: None, location: (0, 0) }
}
fn set_homeworld(&mut self, player: PlayerId) {
self.owner = Some(player);
self.build(BuildingClass::GoldMine);
}
fn build(&mut self, class: BuildingClass) {
let building = Building::new(class);
self.building = Some(building);
}
fn clear(&mut self) {
self.building = None;
self.owner = None;
self.fleet = None;
}
fn display(&self, drawer: &mut RenderDrawer) {
drawer.set_draw_color(Color::RGB(0, 0, 255));
let (x,y) = self.location;
let display_x = x.to_i32().unwrap()*80;
let display_y = y.to_i32().unwrap()*80;
drawer.draw_rect(&Rect::new(display_x, display_y, 50, 50));
}
fn display_location(&self) -> (i32, i32) {
let (x,y) = self.location;
(x*80, y*80)
}
fn center(&self) -> (i32, i32) {
let (x,y) = self.display_location();
(x+25, y+25)
}
}
type SolarSystemCell = Rc<RefCell<SolarSystem>>;
pub struct Starmap {
systems: HashMap<SolarSystemId, SolarSystemCell>,
neighbours: HashSet<SystemsConnection>
}
impl Starmap {
fn new() -> Starmap {
Starmap { systems: HashMap::new(), neighbours: HashSet::new() }
}
fn generate_universe() -> Starmap {
// 0 - 1 - 2
// | / |
// 3 4 5
// | / |
// 6 - 7 - 8
let neighbours = [
(0,1), (1,2), (2,5),
(5,8), (7,8), (6,7),
(3,6), (0,3), (4,6),
(2,4)
];
let mut starmap = Starmap::new();
for n in 0..9 {
let system = Rc::new(RefCell::new(SolarSystem::new(SolarSystemId(n))));
system.borrow_mut().location = ((n % 3).to_i32().unwrap(), (n / 3).to_i32().unwrap());
starmap.systems.insert(SolarSystemId(n), system);
}
for neighbour in neighbours.iter() {
let system_a = starmap.systems[SolarSystemId(neighbour.0)].clone();
let system_b = starmap.systems[SolarSystemId(neighbour.1)].clone();
starmap.neighbours.insert(SystemsConnection::new(system_a, system_b));
}
starmap
}
fn set_homeworlds(&mut self, players: &[PlayerId]) -> Result<(), &'static str> {
if players.len() != 2 {
return Err("Only two players are possible now!");
}
self.systems.get_mut(&SolarSystemId(0)).unwrap().borrow_mut().set_homeworld(players[0]);
self.systems.get_mut(&SolarSystemId(8)).unwrap().borrow_mut().set_homeworld(players[1]);
Ok(())
}
fn display(&self, drawer: &mut RenderDrawer) {
for system in self.systems.values() {
system.borrow().display(drawer);
}
for connection in self.neighbours.iter() {
connection.display(drawer);
}
}
}
struct SystemsConnection {
first: SolarSystemCell,
second: SolarSystemCell
}
impl <H: Hasher + Writer> Hash<H> for SystemsConnection {
fn hash(&self, state: &mut H) {
self.first.borrow().hash(state);
self.second.borrow().hash(state);
}
}
impl PartialEq for SystemsConnection {
fn eq(&self, other : &SystemsConnection) -> bool {
self.first.borrow().id == other.first.borrow().id &&
self.second.borrow().id == other.second.borrow().id
}
}
impl Eq for SystemsConnection {}
impl SystemsConnection {
fn new(system_a: SolarSystemCell, system_b: SolarSystemCell) -> SystemsConnection {
if system_a.borrow().id < system_b.borrow().id {
SystemsConnection{first: system_a, second: system_b}
} else {
SystemsConnection{first: system_b, second: system_a}
}
}
fn display(&self, drawer: &mut RenderDrawer) | {
let (x1, y1) = self.first.borrow().center();
let (x2, y2) = self.second.borrow().center();
drawer.draw_line(
Point{x: x1, y: y1},
Point{x: x2, y: y2});
} | identifier_body |
|
main.rs | Class {
Farm,
Laboratory,
GoldMine
}
#[derive(Debug, Clone)]
struct Building {
class: BuildingClass,
production: Resources
}
impl Building {
fn new(class: BuildingClass) -> Building {
let production = match class {
BuildingClass::Farm => Resources { food: 5, technology: 0, gold: 0 },
BuildingClass::Laboratory => Resources { food: 0, technology: 2, gold: 0 },
BuildingClass::GoldMine => Resources { food: 0, technology: 0, gold: 8 }
};
Building { class: class, production: production }
}
fn produce(&self) -> Resources {
self.production.clone()
}
}
#[derive(Hash, Eq, PartialEq, Copy)]
enum ShipClass {
Colony,
Scout,
Fighter
}
struct Ship {
class: ShipClass,
health: u32,
speed: u32,
damage: u32
}
impl Ship {
fn new(class: ShipClass) -> Ship {
match class {
ShipClass::Colony => Ship { class: class, health: 100, speed: 10, damage: 10},
ShipClass::Scout => Ship { class: class, health: 50, speed: 30, damage: 5},
ShipClass::Fighter => Ship { class: class, health: 150, speed: 10, damage: 100}
}
}
}
enum FleetLocation {
Moving, // from -> to, turns/needed_turns
Somewhere // exact location
}
struct Fleet {
ships: HashMap<ShipClass, Vec<Ship>>,
location: FleetLocation,
}
impl Fleet{
fn new() -> Fleet {
Fleet { ships: HashMap::new(), location: FleetLocation::Somewhere }
} | }
self.ships.get_mut(&ship.class).unwrap().push(ship);
}
fn merge(&mut self, fleet: Box<Fleet>) {
for (ship_class, ships) in fleet.ships.into_iter() {
for ship in ships.into_iter() {
self.add(ship);
}
}
}
fn size(&self) -> u32 {
let mut count = 0u32;
for ships in self.ships.values() {
count += ships.len() as u32;
}
count
}
fn count(&self, class: ShipClass) -> u32 {
match self.ships.get(&class) {
Some(ships) => ships.len() as u32,
None => 0u32
}
}
fn move_to(
&mut self, fleet: &mut Fleet, number: u32, class: ShipClass
) -> Result<(), &'static str> {
if number > self.count(class) {
return Err("There are no enough ships");
}
let ships = match self.ships.get_mut(&class) {
Some(s) => s,
None => return Ok(())
};
for _ in (0..number) {
fleet.add(ships.pop().unwrap());
}
Ok(())
}
}
#[derive(Eq, PartialEq, Hash, Copy)]
struct PlayerId(pub u32);
struct Player {
id: PlayerId,
resources: Resources
}
impl PartialEq for Player {
fn eq(&self, other : &Player) -> bool {
self.id == other.id
}
}
impl Player {
fn new(id: u32) -> Player {
Player {
id: PlayerId(id),
resources: Resources::new()
}
}
fn gather_resources(&mut self, stars: &Starmap) -> () {
let id = self.id;
let owned_systems = stars.systems.values().filter(|s| s.borrow().owner == Some(id));
let owned_buildings = owned_systems.filter_map(|s| s.borrow().building.clone());
let owned_production = owned_buildings.map(|b| b.produce());
self.resources = owned_production.fold(self.resources.clone(), |r, p| r + p );
}
fn create_players(num : u32) -> Vec<Player> {
(0..num).map(|i| Player::new(i)).collect()
}
}
#[derive(Eq, PartialEq, Hash, Copy, PartialOrd, Ord)]
struct SolarSystemId(pub u32);
struct SolarSystem {
id: SolarSystemId,
building: Option<Building>,
owner: Option<PlayerId>,
fleet: Option<Fleet>,
location: (i32, i32)
}
impl <H: Hasher + Writer> Hash<H> for SolarSystem {
fn hash(&self, state: &mut H) {
self.id.hash(state);
}
}
impl SolarSystem {
fn new(id: SolarSystemId) -> SolarSystem {
SolarSystem { id: id, building: None, owner: None, fleet: None, location: (0, 0) }
}
fn set_homeworld(&mut self, player: PlayerId) {
self.owner = Some(player);
self.build(BuildingClass::GoldMine);
}
fn build(&mut self, class: BuildingClass) {
let building = Building::new(class);
self.building = Some(building);
}
fn clear(&mut self) {
self.building = None;
self.owner = None;
self.fleet = None;
}
fn display(&self, drawer: &mut RenderDrawer) {
drawer.set_draw_color(Color::RGB(0, 0, 255));
let (x,y) = self.location;
let display_x = x.to_i32().unwrap()*80;
let display_y = y.to_i32().unwrap()*80;
drawer.draw_rect(&Rect::new(display_x, display_y, 50, 50));
}
fn display_location(&self) -> (i32, i32) {
let (x,y) = self.location;
(x*80, y*80)
}
fn center(&self) -> (i32, i32) {
let (x,y) = self.display_location();
(x+25, y+25)
}
}
type SolarSystemCell = Rc<RefCell<SolarSystem>>;
pub struct Starmap {
systems: HashMap<SolarSystemId, SolarSystemCell>,
neighbours: HashSet<SystemsConnection>
}
impl Starmap {
fn new() -> Starmap {
Starmap { systems: HashMap::new(), neighbours: HashSet::new() }
}
fn generate_universe() -> Starmap {
// 0 - 1 - 2
// | / |
// 3 4 5
// | / |
// 6 - 7 - 8
let neighbours = [
(0,1), (1,2), (2,5),
(5,8), (7,8), (6,7),
(3,6), (0,3), (4,6),
(2,4)
];
let mut starmap = Starmap::new();
for n in 0..9 {
let system = Rc::new(RefCell::new(SolarSystem::new(SolarSystemId(n))));
system.borrow_mut().location = ((n % 3).to_i32().unwrap(), (n / 3).to_i32().unwrap());
starmap.systems.insert(SolarSystemId(n), system);
}
for neighbour in neighbours.iter() {
let system_a = starmap.systems[SolarSystemId(neighbour.0)].clone();
let system_b = starmap.systems[SolarSystemId(neighbour.1)].clone();
starmap.neighbours.insert(SystemsConnection::new(system_a, system_b));
}
starmap
}
fn set_homeworlds(&mut self, players: &[PlayerId]) -> Result<(), &'static str> {
if players.len() != 2 {
return Err("Only two players are possible now!");
}
self.systems.get_mut(&SolarSystemId(0)).unwrap().borrow_mut().set_homeworld(players[0]);
self.systems.get_mut(&SolarSystemId(8)).unwrap().borrow_mut().set_homeworld(players[1]);
Ok(())
}
fn display(&self, drawer: &mut RenderDrawer) {
for system in self.systems.values() {
system.borrow().display(drawer);
}
for connection in self.neighbours.iter() {
connection.display(drawer);
}
}
}
struct SystemsConnection {
first: SolarSystemCell,
second: SolarSystemCell
}
impl <H: Hasher + Writer> Hash<H> for SystemsConnection {
fn hash(&self, state: &mut H) {
self.first.borrow().hash(state);
self.second.borrow().hash(state);
}
}
impl PartialEq for SystemsConnection {
fn eq(&self, other : &SystemsConnection) -> bool {
self.first.borrow().id == other.first.borrow().id &&
|
fn add(&mut self, ship: Ship) {
match self.ships.get(&ship.class) {
None => { self.ships.insert(ship.class, Vec::new()); },
Some(_) => () | random_line_split |
main.rs | (self, other:Resources) -> Resources {
Resources {
food: self.food + other.food,
technology: self.technology + other.technology,
gold: self.gold + other.gold
}
}
}
impl Resources {
fn new() -> Resources {
Resources{food: 0, technology: 0, gold: 0}
}
}
#[derive(Debug, Clone)]
enum BuildingClass {
Farm,
Laboratory,
GoldMine
}
#[derive(Debug, Clone)]
struct Building {
class: BuildingClass,
production: Resources
}
impl Building {
fn new(class: BuildingClass) -> Building {
let production = match class {
BuildingClass::Farm => Resources { food: 5, technology: 0, gold: 0 },
BuildingClass::Laboratory => Resources { food: 0, technology: 2, gold: 0 },
BuildingClass::GoldMine => Resources { food: 0, technology: 0, gold: 8 }
};
Building { class: class, production: production }
}
fn produce(&self) -> Resources {
self.production.clone()
}
}
#[derive(Hash, Eq, PartialEq, Copy)]
enum ShipClass {
Colony,
Scout,
Fighter
}
struct Ship {
class: ShipClass,
health: u32,
speed: u32,
damage: u32
}
impl Ship {
fn new(class: ShipClass) -> Ship {
match class {
ShipClass::Colony => Ship { class: class, health: 100, speed: 10, damage: 10},
ShipClass::Scout => Ship { class: class, health: 50, speed: 30, damage: 5},
ShipClass::Fighter => Ship { class: class, health: 150, speed: 10, damage: 100}
}
}
}
enum FleetLocation {
Moving, // from -> to, turns/needed_turns
Somewhere // exact location
}
struct Fleet {
ships: HashMap<ShipClass, Vec<Ship>>,
location: FleetLocation,
}
impl Fleet{
fn new() -> Fleet {
Fleet { ships: HashMap::new(), location: FleetLocation::Somewhere }
}
fn add(&mut self, ship: Ship) {
match self.ships.get(&ship.class) {
None => { self.ships.insert(ship.class, Vec::new()); },
Some(_) => ()
}
self.ships.get_mut(&ship.class).unwrap().push(ship);
}
fn merge(&mut self, fleet: Box<Fleet>) {
for (ship_class, ships) in fleet.ships.into_iter() {
for ship in ships.into_iter() {
self.add(ship);
}
}
}
fn size(&self) -> u32 {
let mut count = 0u32;
for ships in self.ships.values() {
count += ships.len() as u32;
}
count
}
fn count(&self, class: ShipClass) -> u32 {
match self.ships.get(&class) {
Some(ships) => ships.len() as u32,
None => 0u32
}
}
fn move_to(
&mut self, fleet: &mut Fleet, number: u32, class: ShipClass
) -> Result<(), &'static str> {
if number > self.count(class) {
return Err("There are no enough ships");
}
let ships = match self.ships.get_mut(&class) {
Some(s) => s,
None => return Ok(())
};
for _ in (0..number) {
fleet.add(ships.pop().unwrap());
}
Ok(())
}
}
#[derive(Eq, PartialEq, Hash, Copy)]
struct PlayerId(pub u32);
struct Player {
id: PlayerId,
resources: Resources
}
impl PartialEq for Player {
fn eq(&self, other : &Player) -> bool {
self.id == other.id
}
}
impl Player {
fn new(id: u32) -> Player {
Player {
id: PlayerId(id),
resources: Resources::new()
}
}
fn gather_resources(&mut self, stars: &Starmap) -> () {
let id = self.id;
let owned_systems = stars.systems.values().filter(|s| s.borrow().owner == Some(id));
let owned_buildings = owned_systems.filter_map(|s| s.borrow().building.clone());
let owned_production = owned_buildings.map(|b| b.produce());
self.resources = owned_production.fold(self.resources.clone(), |r, p| r + p );
}
fn create_players(num : u32) -> Vec<Player> {
(0..num).map(|i| Player::new(i)).collect()
}
}
#[derive(Eq, PartialEq, Hash, Copy, PartialOrd, Ord)]
struct SolarSystemId(pub u32);
struct SolarSystem {
id: SolarSystemId,
building: Option<Building>,
owner: Option<PlayerId>,
fleet: Option<Fleet>,
location: (i32, i32)
}
impl <H: Hasher + Writer> Hash<H> for SolarSystem {
fn hash(&self, state: &mut H) {
self.id.hash(state);
}
}
impl SolarSystem {
fn new(id: SolarSystemId) -> SolarSystem {
SolarSystem { id: id, building: None, owner: None, fleet: None, location: (0, 0) }
}
fn set_homeworld(&mut self, player: PlayerId) {
self.owner = Some(player);
self.build(BuildingClass::GoldMine);
}
fn build(&mut self, class: BuildingClass) {
let building = Building::new(class);
self.building = Some(building);
}
fn clear(&mut self) {
self.building = None;
self.owner = None;
self.fleet = None;
}
fn display(&self, drawer: &mut RenderDrawer) {
drawer.set_draw_color(Color::RGB(0, 0, 255));
let (x,y) = self.location;
let display_x = x.to_i32().unwrap()*80;
let display_y = y.to_i32().unwrap()*80;
drawer.draw_rect(&Rect::new(display_x, display_y, 50, 50));
}
fn display_location(&self) -> (i32, i32) {
let (x,y) = self.location;
(x*80, y*80)
}
fn center(&self) -> (i32, i32) {
let (x,y) = self.display_location();
(x+25, y+25)
}
}
type SolarSystemCell = Rc<RefCell<SolarSystem>>;
pub struct Starmap {
systems: HashMap<SolarSystemId, SolarSystemCell>,
neighbours: HashSet<SystemsConnection>
}
impl Starmap {
fn new() -> Starmap {
Starmap { systems: HashMap::new(), neighbours: HashSet::new() }
}
fn generate_universe() -> Starmap {
// 0 - 1 - 2
// | / |
// 3 4 5
// | / |
// 6 - 7 - 8
let neighbours = [
(0,1), (1,2), (2,5),
(5,8), (7,8), (6,7),
(3,6), (0,3), (4,6),
(2,4)
];
let mut starmap = Starmap::new();
for n in 0..9 {
let system = Rc::new(RefCell::new(SolarSystem::new(SolarSystemId(n))));
system.borrow_mut().location = ((n % 3).to_i32().unwrap(), (n / 3).to_i32().unwrap());
starmap.systems.insert(SolarSystemId(n), system);
}
for neighbour in neighbours.iter() {
let system_a = starmap.systems[SolarSystemId(neighbour.0)].clone();
let system_b = starmap.systems[SolarSystemId(neighbour.1)].clone();
starmap.neighbours.insert(SystemsConnection::new(system_a, system_b));
}
starmap
}
fn set_homeworlds(&mut self, players: &[PlayerId]) -> Result<(), &'static str> {
if players.len() != 2 {
return Err("Only two players are possible now!");
}
self.systems.get_mut(&SolarSystemId(0)).unwrap().borrow_mut().set_homeworld(players[0]);
self.systems.get_mut(&SolarSystemId(8)).unwrap().borrow_mut().set_homeworld(players[1]);
Ok(())
}
fn display(&self, drawer: &mut RenderDrawer) {
for system in self.systems.values() {
system.borrow().display(drawer);
}
for connection in self.neighbours.iter() {
connection.display(drawer);
}
}
}
struct SystemsConnection {
first: SolarSystemCell,
second: Solar | add | identifier_name |
|
class_loader_context.go | = android.InstallPathToOnDevicePath(ctx, installPath.(android.InstallPath))
}
// Nested class loader context shouldn't have conditional part (it is allowed only at the top level).
for ver, _ := range nestedClcMap {
if ver != AnySdkVersion {
clcStr, _ := ComputeClassLoaderContext(nestedClcMap)
return fmt.Errorf("nested class loader context shouldn't have conditional part: %s", clcStr)
}
}
subcontexts := nestedClcMap[AnySdkVersion]
// If the library with this name is already present as one of the unconditional top-level
// components, do not re-add it.
for _, clc := range clcMap[sdkVer] {
if clc.Name == lib {
return nil
}
}
clcMap[sdkVer] = append(clcMap[sdkVer], &ClassLoaderContext{
Name: lib,
Host: hostPath,
Device: devicePath,
Subcontexts: subcontexts,
})
return nil
}
// Add class loader context for the given SDK version. Don't fail on unknown build/install paths, as
// libraries with unknown paths still need to be processed by manifest_fixer (which doesn't care
// about paths). For the subset of libraries that are used in dexpreopt, their build/install paths
// are validated later before CLC is used (in validateClassLoaderContext).
func (clcMap ClassLoaderContextMap) AddContext(ctx android.ModuleInstallPathContext, sdkVer int,
lib string, hostPath, installPath android.Path, nestedClcMap ClassLoaderContextMap) {
err := clcMap.addContext(ctx, sdkVer, lib, hostPath, installPath, nestedClcMap)
if err != nil {
ctx.ModuleErrorf(err.Error())
}
}
// Merge the other class loader context map into this one, do not override existing entries.
// The implicitRootLib parameter is the name of the library for which the other class loader
// context map was constructed. If the implicitRootLib is itself a <uses-library>, it should be
// already present in the class loader context (with the other context as its subcontext) -- in
// that case do not re-add the other context. Otherwise add the other context at the top-level.
func (clcMap ClassLoaderContextMap) AddContextMap(otherClcMap ClassLoaderContextMap, implicitRootLib string) {
if otherClcMap == nil {
return
}
// If the implicit root of the merged map is already present as one of top-level subtrees, do
// not merge it second time.
for _, clc := range clcMap[AnySdkVersion] {
if clc.Name == implicitRootLib {
return
}
}
for sdkVer, otherClcs := range otherClcMap {
for _, otherClc := range otherClcs {
alreadyHave := false
for _, clc := range clcMap[sdkVer] {
if clc.Name == otherClc.Name {
alreadyHave = true
break
}
}
if !alreadyHave {
clcMap[sdkVer] = append(clcMap[sdkVer], otherClc)
}
}
}
}
// Returns top-level libraries in the CLC (conditional CLC, i.e. compatibility libraries are not
// included). This is the list of libraries that should be in the <uses-library> tags in the
// manifest. Some of them may be present in the source manifest, others are added by manifest_fixer.
func (clcMap ClassLoaderContextMap) UsesLibs() (ulibs []string) {
if clcMap != nil {
clcs := clcMap[AnySdkVersion]
ulibs = make([]string, 0, len(clcs))
for _, clc := range clcs {
ulibs = append(ulibs, clc.Name)
}
}
return ulibs
}
// Now that the full unconditional context is known, reconstruct conditional context.
// Apply filters for individual libraries, mirroring what the PackageManager does when it
// constructs class loader context on device.
//
// TODO(b/132357300): remove "android.hidl.manager" and "android.hidl.base" for non-system apps.
//
func fixClassLoaderContext(clcMap ClassLoaderContextMap) {
usesLibs := clcMap.UsesLibs()
for sdkVer, clcs := range clcMap {
if sdkVer == AnySdkVersion {
continue
}
fixedClcs := []*ClassLoaderContext{}
for _, clc := range clcs {
if android.InList(clc.Name, usesLibs) {
// skip compatibility libraries that are already included in unconditional context
} else if clc.Name == AndroidTestMock && !android.InList("android.test.runner", usesLibs) {
// android.test.mock is only needed as a compatibility library (in conditional class
// loader context) if android.test.runner is used, otherwise skip it
} else {
fixedClcs = append(fixedClcs, clc)
}
clcMap[sdkVer] = fixedClcs
}
}
}
// Return true if all build/install library paths are valid (including recursive subcontexts),
// otherwise return false. A build path is valid if it's not nil. An install path is valid if it's
// not equal to a special "error" value.
func validateClassLoaderContext(clcMap ClassLoaderContextMap) (bool, error) {
for sdkVer, clcs := range clcMap {
if valid, err := validateClassLoaderContextRec(sdkVer, clcs); !valid || err != nil {
return valid, err
}
}
return true, nil
}
// Helper function for validateClassLoaderContext() that handles recursion.
func validateClassLoaderContextRec(sdkVer int, clcs []*ClassLoaderContext) (bool, error) {
for _, clc := range clcs {
if clc.Host == nil || clc.Device == UnknownInstallLibraryPath {
if sdkVer == AnySdkVersion {
// Return error if dexpreopt doesn't know paths to one of the <uses-library>
// dependencies. In the future we may need to relax this and just disable dexpreopt.
if clc.Host == nil {
return false, fmt.Errorf("invalid build path for <uses-library> \"%s\"", clc.Name)
} else {
return false, fmt.Errorf("invalid install path for <uses-library> \"%s\"", clc.Name)
}
} else {
// No error for compatibility libraries, as Soong doesn't know if they are needed
// (this depends on the targetSdkVersion in the manifest), but the CLC is invalid.
return false, nil
}
}
if valid, err := validateClassLoaderContextRec(sdkVer, clc.Subcontexts); !valid || err != nil {
return valid, err
}
}
return true, nil
}
// Return the class loader context as a string, and a slice of build paths for all dependencies.
// Perform a depth-first preorder traversal of the class loader context tree for each SDK version.
// Return the resulting string and a slice of on-host build paths to all library dependencies.
func ComputeClassLoaderContext(clcMap ClassLoaderContextMap) (clcStr string, paths android.Paths) {
// CLC for different SDK versions should come in specific order that agrees with PackageManager.
// Since PackageManager processes SDK versions in ascending order and prepends compatibility
// libraries at the front, the required order is descending, except for AnySdkVersion that has
// numerically the largest order, but must be the last one. Example of correct order: [30, 29,
// 28, AnySdkVersion]. There are Soong tests to ensure that someone doesn't change this by
// accident, but there is no way to guard against changes in the PackageManager, except for
// grepping logcat on the first boot for absence of the following messages:
//
// `logcat | grep -E 'ClassLoaderContext [a-z ]+ mismatch`
//
versions := make([]int, 0, len(clcMap))
for ver, _ := range clcMap {
if ver != AnySdkVersion {
versions = append(versions, ver)
}
}
sort.Sort(sort.Reverse(sort.IntSlice(versions))) // descending order
versions = append(versions, AnySdkVersion)
for _, sdkVer := range versions {
sdkVerStr := fmt.Sprintf("%d", sdkVer)
if sdkVer == AnySdkVersion {
sdkVerStr = "any" // a special keyword that means any SDK version
}
hostClc, targetClc, hostPaths := computeClassLoaderContextRec(clcMap[sdkVer])
if hostPaths != nil {
clcStr += fmt.Sprintf(" --host-context-for-sdk %s %s", sdkVerStr, hostClc)
clcStr += fmt.Sprintf(" --target-context-for-sdk %s %s", sdkVerStr, targetClc)
}
paths = append(paths, hostPaths...)
}
return clcStr, android.FirstUniquePaths(paths)
}
// Helper function for ComputeClassLoaderContext() that handles recursion.
func computeClassLoaderContextRec(clcs []*ClassLoader | Context) (string, string, an | identifier_name |
|
class_loader_context.go | Clcs := range otherClcMap {
for _, otherClc := range otherClcs {
alreadyHave := false
for _, clc := range clcMap[sdkVer] {
if clc.Name == otherClc.Name {
alreadyHave = true
break
}
}
if !alreadyHave {
clcMap[sdkVer] = append(clcMap[sdkVer], otherClc)
}
}
}
}
// Returns top-level libraries in the CLC (conditional CLC, i.e. compatibility libraries are not
// included). This is the list of libraries that should be in the <uses-library> tags in the
// manifest. Some of them may be present in the source manifest, others are added by manifest_fixer.
func (clcMap ClassLoaderContextMap) UsesLibs() (ulibs []string) {
if clcMap != nil {
clcs := clcMap[AnySdkVersion]
ulibs = make([]string, 0, len(clcs))
for _, clc := range clcs {
ulibs = append(ulibs, clc.Name)
}
}
return ulibs
}
// Now that the full unconditional context is known, reconstruct conditional context.
// Apply filters for individual libraries, mirroring what the PackageManager does when it
// constructs class loader context on device.
//
// TODO(b/132357300): remove "android.hidl.manager" and "android.hidl.base" for non-system apps.
//
func fixClassLoaderContext(clcMap ClassLoaderContextMap) {
usesLibs := clcMap.UsesLibs()
for sdkVer, clcs := range clcMap {
if sdkVer == AnySdkVersion {
continue
}
fixedClcs := []*ClassLoaderContext{}
for _, clc := range clcs {
if android.InList(clc.Name, usesLibs) {
// skip compatibility libraries that are already included in unconditional context
} else if clc.Name == AndroidTestMock && !android.InList("android.test.runner", usesLibs) {
// android.test.mock is only needed as a compatibility library (in conditional class
// loader context) if android.test.runner is used, otherwise skip it
} else {
fixedClcs = append(fixedClcs, clc)
}
clcMap[sdkVer] = fixedClcs
}
}
}
// Return true if all build/install library paths are valid (including recursive subcontexts),
// otherwise return false. A build path is valid if it's not nil. An install path is valid if it's
// not equal to a special "error" value.
func validateClassLoaderContext(clcMap ClassLoaderContextMap) (bool, error) {
for sdkVer, clcs := range clcMap {
if valid, err := validateClassLoaderContextRec(sdkVer, clcs); !valid || err != nil {
return valid, err
}
}
return true, nil
}
// Helper function for validateClassLoaderContext() that handles recursion.
func validateClassLoaderContextRec(sdkVer int, clcs []*ClassLoaderContext) (bool, error) {
for _, clc := range clcs {
if clc.Host == nil || clc.Device == UnknownInstallLibraryPath {
if sdkVer == AnySdkVersion {
// Return error if dexpreopt doesn't know paths to one of the <uses-library>
// dependencies. In the future we may need to relax this and just disable dexpreopt.
if clc.Host == nil {
return false, fmt.Errorf("invalid build path for <uses-library> \"%s\"", clc.Name)
} else {
return false, fmt.Errorf("invalid install path for <uses-library> \"%s\"", clc.Name)
}
} else {
// No error for compatibility libraries, as Soong doesn't know if they are needed
// (this depends on the targetSdkVersion in the manifest), but the CLC is invalid.
return false, nil
}
}
if valid, err := validateClassLoaderContextRec(sdkVer, clc.Subcontexts); !valid || err != nil {
return valid, err
}
}
return true, nil
}
// Return the class loader context as a string, and a slice of build paths for all dependencies.
// Perform a depth-first preorder traversal of the class loader context tree for each SDK version.
// Return the resulting string and a slice of on-host build paths to all library dependencies.
func ComputeClassLoaderContext(clcMap ClassLoaderContextMap) (clcStr string, paths android.Paths) {
// CLC for different SDK versions should come in specific order that agrees with PackageManager.
// Since PackageManager processes SDK versions in ascending order and prepends compatibility
// libraries at the front, the required order is descending, except for AnySdkVersion that has
// numerically the largest order, but must be the last one. Example of correct order: [30, 29,
// 28, AnySdkVersion]. There are Soong tests to ensure that someone doesn't change this by
// accident, but there is no way to guard against changes in the PackageManager, except for
// grepping logcat on the first boot for absence of the following messages:
//
// `logcat | grep -E 'ClassLoaderContext [a-z ]+ mismatch`
//
versions := make([]int, 0, len(clcMap))
for ver, _ := range clcMap {
if ver != AnySdkVersion {
versions = append(versions, ver)
}
}
sort.Sort(sort.Reverse(sort.IntSlice(versions))) // descending order
versions = append(versions, AnySdkVersion)
for _, sdkVer := range versions {
sdkVerStr := fmt.Sprintf("%d", sdkVer)
if sdkVer == AnySdkVersion {
sdkVerStr = "any" // a special keyword that means any SDK version
}
hostClc, targetClc, hostPaths := computeClassLoaderContextRec(clcMap[sdkVer])
if hostPaths != nil {
clcStr += fmt.Sprintf(" --host-context-for-sdk %s %s", sdkVerStr, hostClc)
clcStr += fmt.Sprintf(" --target-context-for-sdk %s %s", sdkVerStr, targetClc)
}
paths = append(paths, hostPaths...)
}
return clcStr, android.FirstUniquePaths(paths)
}
// Helper function for ComputeClassLoaderContext() that handles recursion.
func computeClassLoaderContextRec(clcs []*ClassLoaderContext) (string, string, android.Paths) {
var paths android.Paths
var clcsHost, clcsTarget []string
for _, clc := range clcs {
subClcHost, subClcTarget, subPaths := computeClassLoaderContextRec(clc.Subcontexts)
if subPaths != nil {
subClcHost = "{" + subClcHost + "}"
subClcTarget = "{" + subClcTarget + "}"
}
clcsHost = append(clcsHost, "PCL["+clc.Host.String()+"]"+subClcHost)
clcsTarget = append(clcsTarget, "PCL["+clc.Device+"]"+subClcTarget)
paths = append(paths, clc.Host)
paths = append(paths, subPaths...)
}
clcHost := strings.Join(clcsHost, "#")
clcTarget := strings.Join(clcsTarget, "#")
return clcHost, clcTarget, paths
}
// Class loader contexts that come from Make via JSON dexpreopt.config. JSON CLC representation is
// the same as Soong representation except that SDK versions and paths are represented with strings.
type jsonClassLoaderContext struct {
Name string
Host string
Device string
Subcontexts []*jsonClassLoaderContext
}
// A map from SDK version (represented with a JSON string) to JSON CLCs.
type jsonClassLoaderContextMap map[string][]*jsonClassLoaderContext
// Convert JSON CLC map to Soong represenation.
func fromJsonClassLoaderContext(ctx android.PathContext, jClcMap jsonClassLoaderContextMap) ClassLoaderContextMap {
clcMap := make(ClassLoaderContextMap)
for sdkVerStr, clcs := range jClcMap {
sdkVer, ok := strconv.Atoi(sdkVerStr)
if ok != nil {
if sdkVerStr == "any" {
sdkVer = AnySdkVersion
} else {
android.ReportPathErrorf(ctx, "failed to parse SDK version in dexpreopt.config: '%s'", sdkVerStr)
}
}
clcMap[sdkVer] = fromJsonClassLoaderContextRec(ctx, clcs)
}
return clcMap
}
// Recursive helper for fromJsonClassLoaderContext.
func fromJsonClassLoaderContextRec(ctx android.PathContext, jClcs []*jsonClassLoaderContext) []*ClassLoaderContext {
clcs := make([]*ClassLoaderContext, 0, len(jC | lcs))
for _, clc := range jClcs {
clcs = append(clcs, &ClassLoaderContext{
Name: clc.Name,
Host: constructPath(ctx, clc.Host),
Device: clc.Device,
Subcontexts: fromJsonClassLoaderContextRec(ctx, clc.Subcontexts),
})
}
return clcs
}
// Convert Soong CLC map to JSON representatio | identifier_body |
|
class_loader_context.go | same thing, but computed in two different ways.
//
// It is important that build-time and run-time CLCs coincide, otherwise the AOT-compiled code
// created by dexpreopt will be rejected. In order to check the equality of build-time and
// run-time CLCs, the dex2oat compiler records build-time CLC in the *.odex files (in the
// "classpath" field of the OAT file header). To find the stored CLC, use the following command:
// `oatdump --oat-file=<FILE> | grep '^classpath = '`.
//
// Mismatch between build-time and run-time CLC is reported in logcat during boot (search with
// `logcat | grep -E 'ClassLoaderContext [a-z ]+ mismatch'`. Mismatch is bad for performance, as it
// forces the library/app to either be dexopted, or to run without any optimizations (e.g. the app's
// code may need to be extracted in memory from the APK, a very expensive operation).
//
// A <uses-library> can be either optional or required. From dexpreopt standpoint, required library
// must be present at build time (its absence is a build error). An optional library may be either
// present or absent at build time: if present, it will be added to the CLC, passed to dex2oat and
// recorded in the *.odex file; otherwise, if the library is absent, it will be skipped and not
// added to CLC. If there is a mismatch between built-time and run-time status (optional library is
// present in one case, but not the other), then the build-time and run-time CLCs won't match and
// the compiled code will be rejected. It is unknown at build time if the library will be present at
// runtime, therefore either including or excluding it may cause CLC mismatch.
//
// 4. Manifest fixer
// -----------------
//
// Sometimes <uses-library> tags are missing from the source manifest of a library/app. This may
// happen for example if one of the transitive dependencies of the library/app starts using another
// <uses-library>, and the library/app's manifest isn't updated to include it.
//
// Soong can compute some of the missing <uses-library> tags for a given library/app automatically
// as SDK libraries in the transitive dependency closure of the library/app. The closure is needed
// because a library/app may depend on a static library that may in turn depend on an SDK library,
// (possibly transitively via another library).
//
// Not all <uses-library> tags can be computed in this way, because some of the <uses-library>
// dependencies are not SDK libraries, or they are not reachable via transitive dependency closure.
// But when possible, allowing Soong to calculate the manifest entries is less prone to errors and
// simplifies maintenance. For example, consider a situation when many apps use some static library
// that adds a new <uses-library> dependency -- all the apps will have to be updated. That is
// difficult to maintain.
//
// Soong computes the libraries that need to be in the manifest as the top-level libraries in CLC.
// These libraries are passed to the manifest_fixer.
//
// All libraries added to the manifest should be "shared" libraries, so that PackageManager can look
// up their dependencies and reconstruct the nested subcontexts at runtime. There is no build check
// to ensure this, it is an assumption.
//
// 5. Build system support
// -----------------------
//
// In order to construct CLC for dexpreopt and manifest_fixer, the build system needs to know all
// <uses-library> dependencies of the dexpreopted library/app (including transitive dependencies).
// For each <uses-librarry> dependency it needs to know the following information:
//
// - the real name of the <uses-library> (it may be different from the module name)
// - build-time (on host) and run-time (on device) paths to the DEX jar file of the library
// - whether this library is optional or required
// - all <uses-library> dependencies
//
// Since the build system doesn't have access to the manifest contents (it cannot read manifests at
// the time of build rule generation), it is necessary to copy this information to the Android.bp
// and Android.mk files. For blueprints, the relevant properties are `uses_libs` and
// `optional_uses_libs`. For makefiles, relevant variables are `LOCAL_USES_LIBRARIES` and
// `LOCAL_OPTIONAL_USES_LIBRARIES`. It is preferable to avoid specifying these properties explicilty
// when they can be computed automatically by Soong (as the transitive closure of SDK library
// dependencies).
//
// Some of the Java libraries that are used as <uses-library> are not SDK libraries (they are
// defined as `java_library` rather than `java_sdk_library` in the Android.bp files). In order for
// the build system to handle them automatically like SDK libraries, it is possible to set a
// property `provides_uses_lib` or variable `LOCAL_PROVIDES_USES_LIBRARY` on the blueprint/makefile
// module of such library. This property can also be used to specify real library name in cases
// when it differs from the module name.
//
// Because the information from the manifests has to be duplicated in the Android.bp/Android.mk
// files, there is a danger that it may get out of sync. To guard against that, the build system
// generates a rule that checks the metadata in the build files against the contents of a manifest
// (verify_uses_libraries). The manifest can be available as a source file, or as part of a prebuilt
// APK. Note that reading the manifests at the Ninja stage of the build is fine, unlike the build
// rule generation phase.
//
// ClassLoaderContext is a structure that represents CLC.
//
type ClassLoaderContext struct {
// The name of the library.
Name string
// On-host build path to the library dex file (used in dex2oat argument --class-loader-context).
Host android.Path
// On-device install path (used in dex2oat argument --stored-class-loader-context).
Device string
// Nested sub-CLC for dependencies.
Subcontexts []*ClassLoaderContext
}
// ClassLoaderContextMap is a map from SDK version to CLC. There is a special entry with key
// AnySdkVersion that stores unconditional CLC that is added regardless of the target SDK version.
//
// Conditional CLC is for compatibility libraries which didn't exist prior to a certain SDK version
// (say, N), but classes in them were in the bootclasspath jars, etc., and in version N they have
// been separated into a standalone <uses-library>. Compatibility libraries should only be in the
// CLC if the library/app that uses them has `targetSdkVersion` less than N in the manifest.
//
// Currently only apps (but not libraries) use conditional CLC.
//
// Target SDK version information is unavailable to the build system at rule generation time, so
// the build system doesn't know whether conditional CLC is needed for a given app or not. So it
// generates a build rule that includes conditional CLC for all versions, extracts the target SDK
// version from the manifest, and filters the CLCs based on that version. Exact final CLC that is
// passed to dex2oat is unknown to the build system, and gets known only at Ninja stage.
//
type ClassLoaderContextMap map[int][]*ClassLoaderContext
// Compatibility libraries. Some are optional, and some are required: this is the default that
// affects how they are handled by the Soong logic that automatically adds implicit SDK libraries
// to the manifest_fixer, but an explicit `uses_libs`/`optional_uses_libs` can override this.
var OrgApacheHttpLegacy = "org.apache.http.legacy"
var AndroidTestBase = "android.test.base"
var AndroidTestMock = "android.test.mock"
var AndroidHidlBase = "android.hidl.base-V1.0-java"
var AndroidHidlManager = "android.hidl.manager-V1.0-java"
// Compatibility libraries grouped by version/optionality (for convenience, to avoid repeating the
// same lists in multiple places).
var OptionalCompatUsesLibs28 = []string{
OrgApacheHttpLegacy,
}
var OptionalCompatUsesLibs30 = []string{
AndroidTestBase,
AndroidTestMock,
}
var CompatUsesLibs29 = []string{ | AndroidHidlBase,
}
var OptionalCompatUsesLibs = append(android.CopyOf(OptionalCompatUsesLibs28), OptionalCompatUsesLibs30...)
var CompatUsesLibs = android.CopyOf(CompatUsesLibs29)
const UnknownInstallLibraryPath = "error"
// AnySdkVersion means that the class loader context is needed regardless of the targetSdkVersion
// of the app. The numeric value affects the key order in the map and, as a result, the order of
// arguments passed to construct_context.py (high value means that the unconditional context goes
// last). We use the converntional "current" SDK level (10000), but any big number would do as well.
const AnySdkVersion int = android.FutureApiLevelInt
// Add class loader context for the given library to the map entry for the given SDK version.
func (clcMap ClassLoaderContextMap) addContext(ctx android.ModuleInstallPathContext, sdkVer int, lib string,
hostPath, installPath android | AndroidHidlManager, | random_line_split |
class_loader_context.go | is a map from SDK version to CLC. There is a special entry with key
// AnySdkVersion that stores unconditional CLC that is added regardless of the target SDK version.
//
// Conditional CLC is for compatibility libraries which didn't exist prior to a certain SDK version
// (say, N), but classes in them were in the bootclasspath jars, etc., and in version N they have
// been separated into a standalone <uses-library>. Compatibility libraries should only be in the
// CLC if the library/app that uses them has `targetSdkVersion` less than N in the manifest.
//
// Currently only apps (but not libraries) use conditional CLC.
//
// Target SDK version information is unavailable to the build system at rule generation time, so
// the build system doesn't know whether conditional CLC is needed for a given app or not. So it
// generates a build rule that includes conditional CLC for all versions, extracts the target SDK
// version from the manifest, and filters the CLCs based on that version. Exact final CLC that is
// passed to dex2oat is unknown to the build system, and gets known only at Ninja stage.
//
type ClassLoaderContextMap map[int][]*ClassLoaderContext
// Compatibility libraries. Some are optional, and some are required: this is the default that
// affects how they are handled by the Soong logic that automatically adds implicit SDK libraries
// to the manifest_fixer, but an explicit `uses_libs`/`optional_uses_libs` can override this.
var OrgApacheHttpLegacy = "org.apache.http.legacy"
var AndroidTestBase = "android.test.base"
var AndroidTestMock = "android.test.mock"
var AndroidHidlBase = "android.hidl.base-V1.0-java"
var AndroidHidlManager = "android.hidl.manager-V1.0-java"
// Compatibility libraries grouped by version/optionality (for convenience, to avoid repeating the
// same lists in multiple places).
var OptionalCompatUsesLibs28 = []string{
OrgApacheHttpLegacy,
}
var OptionalCompatUsesLibs30 = []string{
AndroidTestBase,
AndroidTestMock,
}
var CompatUsesLibs29 = []string{
AndroidHidlManager,
AndroidHidlBase,
}
var OptionalCompatUsesLibs = append(android.CopyOf(OptionalCompatUsesLibs28), OptionalCompatUsesLibs30...)
var CompatUsesLibs = android.CopyOf(CompatUsesLibs29)
const UnknownInstallLibraryPath = "error"
// AnySdkVersion means that the class loader context is needed regardless of the targetSdkVersion
// of the app. The numeric value affects the key order in the map and, as a result, the order of
// arguments passed to construct_context.py (high value means that the unconditional context goes
// last). We use the converntional "current" SDK level (10000), but any big number would do as well.
const AnySdkVersion int = android.FutureApiLevelInt
// Add class loader context for the given library to the map entry for the given SDK version.
func (clcMap ClassLoaderContextMap) addContext(ctx android.ModuleInstallPathContext, sdkVer int, lib string,
hostPath, installPath android.Path, nestedClcMap ClassLoaderContextMap) error {
// For prebuilts, library should have the same name as the source module.
lib = android.RemoveOptionalPrebuiltPrefix(lib)
devicePath := UnknownInstallLibraryPath
if installPath == nil {
if android.InList(lib, CompatUsesLibs) || android.InList(lib, OptionalCompatUsesLibs) {
// Assume that compatibility libraries are installed in /system/framework.
installPath = android.PathForModuleInstall(ctx, "framework", lib+".jar")
} else {
// For some stub libraries the only known thing is the name of their implementation
// library, but the library itself is unavailable (missing or part of a prebuilt). In
// such cases we still need to add the library to <uses-library> tags in the manifest,
// but we cannot use it for dexpreopt.
}
}
if installPath != nil {
devicePath = android.InstallPathToOnDevicePath(ctx, installPath.(android.InstallPath))
}
// Nested class loader context shouldn't have conditional part (it is allowed only at the top level).
for ver, _ := range nestedClcMap {
if ver != AnySdkVersion {
clcStr, _ := ComputeClassLoaderContext(nestedClcMap)
return fmt.Errorf("nested class loader context shouldn't have conditional part: %s", clcStr)
}
}
subcontexts := nestedClcMap[AnySdkVersion]
// If the library with this name is already present as one of the unconditional top-level
// components, do not re-add it.
for _, clc := range clcMap[sdkVer] {
if clc.Name == lib {
return nil
}
}
clcMap[sdkVer] = append(clcMap[sdkVer], &ClassLoaderContext{
Name: lib,
Host: hostPath,
Device: devicePath,
Subcontexts: subcontexts,
})
return nil
}
// Add class loader context for the given SDK version. Don't fail on unknown build/install paths, as
// libraries with unknown paths still need to be processed by manifest_fixer (which doesn't care
// about paths). For the subset of libraries that are used in dexpreopt, their build/install paths
// are validated later before CLC is used (in validateClassLoaderContext).
func (clcMap ClassLoaderContextMap) AddContext(ctx android.ModuleInstallPathContext, sdkVer int,
lib string, hostPath, installPath android.Path, nestedClcMap ClassLoaderContextMap) {
err := clcMap.addContext(ctx, sdkVer, lib, hostPath, installPath, nestedClcMap)
if err != nil {
ctx.ModuleErrorf(err.Error())
}
}
// Merge the other class loader context map into this one, do not override existing entries.
// The implicitRootLib parameter is the name of the library for which the other class loader
// context map was constructed. If the implicitRootLib is itself a <uses-library>, it should be
// already present in the class loader context (with the other context as its subcontext) -- in
// that case do not re-add the other context. Otherwise add the other context at the top-level.
func (clcMap ClassLoaderContextMap) AddContextMap(otherClcMap ClassLoaderContextMap, implicitRootLib string) {
if otherClcMap == nil {
return
}
// If the implicit root of the merged map is already present as one of top-level subtrees, do
// not merge it second time.
for _, clc := range clcMap[AnySdkVersion] {
if clc.Name == implicitRootLib {
return
}
}
for sdkVer, otherClcs := range otherClcMap {
for _, otherClc := range otherClcs {
alreadyHave := false
for _, clc := range clcMap[sdkVer] {
if clc.Name == otherClc.Name {
alreadyHave = true
break
}
}
if !alreadyHave {
clcMap[sdkVer] = append(clcMap[sdkVer], otherClc)
}
}
}
}
// Returns top-level libraries in the CLC (conditional CLC, i.e. compatibility libraries are not
// included). This is the list of libraries that should be in the <uses-library> tags in the
// manifest. Some of them may be present in the source manifest, others are added by manifest_fixer.
func (clcMap ClassLoaderContextMap) UsesLibs() (ulibs []string) {
if clcMap != nil {
clcs := clcMap[AnySdkVersion]
ulibs = make([]string, 0, len(clcs))
for _, clc := range clcs {
ulibs = append(ulibs, clc.Name)
}
}
return ulibs
}
// Now that the full unconditional context is known, reconstruct conditional context.
// Apply filters for individual libraries, mirroring what the PackageManager does when it
// constructs class loader context on device.
//
// TODO(b/132357300): remove "android.hidl.manager" and "android.hidl.base" for non-system apps.
//
func fixClassLoaderContext(clcMap ClassLoaderContextMap) {
usesLibs := clcMap.UsesLibs()
for sdkVer, clcs := range clcMap {
if sdkVer == AnySdkVersion {
continue
} |
fixedClcs := []*ClassLoaderContext{}
for _, clc := range clcs {
if android.InList(clc.Name, usesLibs) {
// skip compatibility libraries that are already included in unconditional context
} else if clc.Name == AndroidTestMock && !android.InList("android.test.runner", usesLibs) {
// android.test.mock is only needed as a compatibility library (in conditional class
// loader context) if android.test.runner is used, otherwise skip it
} else {
fixedClcs = append(fixedClcs, clc)
}
clcMap[sdkVer] = fixedClcs
}
}
}
// Return true if all build/install library | conditional_block |
|
train_DQN_vs_overlap_rnn_no_perception_new.py | #hidden_state = model_out[1][0]
#cell_state = model_out[1][1]
hidden_state = model_out[1]
#return action, hidden_state, cell_state
return action, hidden_state
def update(self, batch_size, time_step=5):
print('batch_size = {}'.format(batch_size))
#hidden_batch, cell_batch = self.actor.init_hidden_states(batch_size=batch_size)
hidden_batch = self.actor.init_hidden_states(batch_size=batch_size)
batches = self.memory.sample(batch_size, time_step=time_step)
#print('batches.shape = {}'.format(batches.shape))
states = torch.zeros((batch_size, time_step, 256), dtype=torch.float32).to(device)
actions = torch.zeros((batch_size, time_step), dtype=torch.long).to(device)
rewards = torch.zeros((batch_size, time_step), dtype=torch.float32).to(device)
next_states = torch.zeros((batch_size, time_step, 256), dtype=torch.float32).to(device)
done = torch.zeros((batch_size, time_step), dtype=torch.float32).to(device)
for i, b in enumerate(batches):
ac, rw, do = [], [], []
previous_pose = b[0][2][0]
goal_pose = b[0][2][2]
start_pose = b[0][2][3]
for j, elem in enumerate(b):
states[i, j] = torch.tensor(elem[0], dtype=torch.float32)
ac.append(elem[1])
#rw.append(elem[2])
#print('elem[3].shape = {}'.format(elem[3].shape))
#print('next_states[i,j].shape = {}'.format(next_states[i,j].shape))
current_pose = elem[2][1]
current_reward, current_done, _ = decide_reward_and_done(previous_pose, current_pose, goal_pose, start_pose)
next_states[i, j] = torch.tensor(elem[3], dtype=torch.float32)
rw.append(current_reward)
do.append(float(current_done))
#do.append(elem[4])
actions[i] = torch.tensor(ac, dtype=torch.long)
rewards[i] = torch.tensor(rw, dtype=torch.float32)
done[i] = torch.tensor(do, dtype=torch.float32)
# Critic loss (value function loss)
## Get predicted next-state actions and Q values from target models
## gather() accumulates values at given index
#Qvals, _ = self.actor.forward(states, hidden_batch, cell_batch, batch_size=batch_size, time_step=time_step) ## batch_size x action_space
#print('Qvals = {}'.format(Qvals))
Qvals, _ = self.actor.forward(states, hidden_batch, batch_size=batch_size, time_step=time_step) ## batch_size x action_space
Qvals = Qvals.gather(1, actions[:, time_step-1].unsqueeze(1)).squeeze(1) ## batch_size
#print('actions = {}'.format(actions))
#print('Qvals = {}'.format(Qvals))
#next_Q, _ = self.critic.forward(next_states, hidden_batch, cell_batch, batch_size=batch_size, time_step=time_step)##batch_size x action_space
#print('next_Q = {}'.format(next_Q))
next_Q, _ = self.critic.forward(next_states, hidden_batch, batch_size=batch_size, time_step=time_step)##batch_size x action_space
next_Q = next_Q.max(1)[0].detach() ##batch_size
#print('next_Q = {}'.format(next_Q))
# Compute Q targets for current states (y_i)
#print('rewards.shape = {}'.format(rewards[:, time_step-1].shape))
#print('rewards = {}'.format(rewards))
#print('done = {}'.format(done))
Qprime = rewards[:, time_step-1] + self.gamma * next_Q * (1-done[:, time_step-1])
#print('Qprime = {}'.format(Qprime))
loss = F.smooth_l1_loss(Qvals, Qprime)
#print('loss = {}'.format(loss))
#assert 1==2
# update networks
self.actor_optimizer.zero_grad()
loss.backward()
for param in self.actor.parameters():
param.grad.data.clamp_(-1, 1)
self.actor_optimizer.step()
agent = DQN_vs_overlap_rnn_no_perception_new(trained_model_path=None, num_actions=action_space, input_channels=2)
#agent = DQN_vs_overlap_rnn_no_perception(trained_model_path=model_weights_save_path, num_actions=action_space, input_channels=2)
rewards = []
avg_rewards = []
for i_epoch in range(actual_episodes):
## go through each point folder
for point_idx in range(0, num_startPoints):
#for point_idx in range(0, 1):
print('point_idx = {}'.format(point_idx))
## read in start img and start pose
point_image_folder = '{}/{}/point_{}'.format(base_folder, scene_name, point_idx)
point_pose_npy_file = np.load('{}/{}/point_{}_poses.npy'.format(base_folder, scene_name, point_idx))
#start_img = cv2.imread('{}/{}.png'.format(point_image_folder, point_pose_npy_file[0]['img_name']))[:, :, ::-1]
start_pose = point_pose_npy_file[0]['pose']
start_img, start_depth = get_obs(start_pose)
start_depth = start_depth.copy()
## index 0 is the left image, so right_img_idx starts from index 1
for right_img_idx in range(1, len(point_pose_npy_file)):
#for right_img_idx in range(3, 4):
#print('right_img_idx = {}'.format(right_img_idx))
current_pose = start_pose
right_img_name = point_pose_npy_file[right_img_idx]['img_name']
goal_pose = point_pose_npy_file[right_img_idx]['pose']
#goal_img = cv2.imread('{}/{}.png'.format(point_image_folder, right_img_name), 1)[:,:,::-1]
goal_img, goal_depth = get_obs(goal_pose)
goal_img, goal_depth = goal_img.copy(), goal_depth.copy()
overlapArea = genGtDenseCorrespondenseFlowMap(start_depth, goal_depth, start_pose, goal_pose)[:,:,:2]
tensor_start = torch.tensor(overlapArea, dtype=torch.float32).to(device).unsqueeze(0).permute(0, 3, 1, 2)
ft_overlapArea = perception_model.perception(tensor_start).detach().cpu().numpy().squeeze(0)
state = ft_overlapArea
episode_reward = 0
local_memory = []
extend_action_done = False
#hidden_state, cell_state = agent.actor.init_hidden_states(batch_size=1)
hidden_state = agent.actor.init_hidden_states(batch_size=1)
for i_step in range(seq_len):
#action, hidden_state, cell_state = agent.select_action(state, hidden_state, cell_state)
action, hidden_state = agent.select_action(state, hidden_state)
print('action = {}'.format(action))
## update current_pose
vz, omegay = action_table[action.item()]
#print('vz = {:.2f}, omegay = {:.2f}'.format(vz, omegay))
vx = 0.0
vx = vx * lambda_action
vz = vz * lambda_action
omegay = omegay * pi * lambda_action
#print('actual velocity = {:.2f}, {:.2f}, {:.2f}'.format(vx, vz, omegay))
previous_pose = current_pose
current_pose = update_current_pose(current_pose, vx, vz, omegay)
## compute new_state
current_img, current_depth = get_obs(current_pose)
next_left_img, next_left_depth = current_img.copy(), current_depth.copy()
new_overlapArea = genGtDenseCorrespondenseFlowMap(next_left_depth, goal_depth, current_pose, goal_pose)[:,:,:2]
tensor_left = torch.tensor(new_overlapArea, dtype=torch.float32).to(device).unsqueeze(0).permute(0, 3, 1, 2)
ft_new_overlapArea = perception_model.perception(tensor_left).detach().cpu().numpy().squeeze(0)
new_state = ft_new_overlapArea
#print('new_state.shape = {}'.format(new_state.shape))
## visualize the state
'''
fig = plt.figure(figsize=(20, 5)) #cols, rows
r, c = 1, 5
ax = fig.add_subplot(r, c, 1)
ax.imshow(current_img)
ax = fig.add_subplot(r, c, 2)
ax.imshow(goal_img)
ax = fig.add_subplot(r, c, 3)
ax.imshow(new_overlapArea[:, :, 0])
ax = fig.add_subplot(r, c, 4)
ax.imshow(new_overlapArea[:, :, 1])
ax = fig.add_subplot(r, c, 5) | ax.imshow(ft_new_overlapArea.reshape(16, 16))
plt.show()
'''
## collision done only stops continuing the sequence, but won't affect reward computing | random_line_split |
|
train_DQN_vs_overlap_rnn_no_perception_new.py | goal_pose, start_pose)
next_states[i, j] = torch.tensor(elem[3], dtype=torch.float32)
rw.append(current_reward)
do.append(float(current_done))
#do.append(elem[4])
actions[i] = torch.tensor(ac, dtype=torch.long)
rewards[i] = torch.tensor(rw, dtype=torch.float32)
done[i] = torch.tensor(do, dtype=torch.float32)
# Critic loss (value function loss)
## Get predicted next-state actions and Q values from target models
## gather() accumulates values at given index
#Qvals, _ = self.actor.forward(states, hidden_batch, cell_batch, batch_size=batch_size, time_step=time_step) ## batch_size x action_space
#print('Qvals = {}'.format(Qvals))
Qvals, _ = self.actor.forward(states, hidden_batch, batch_size=batch_size, time_step=time_step) ## batch_size x action_space
Qvals = Qvals.gather(1, actions[:, time_step-1].unsqueeze(1)).squeeze(1) ## batch_size
#print('actions = {}'.format(actions))
#print('Qvals = {}'.format(Qvals))
#next_Q, _ = self.critic.forward(next_states, hidden_batch, cell_batch, batch_size=batch_size, time_step=time_step)##batch_size x action_space
#print('next_Q = {}'.format(next_Q))
next_Q, _ = self.critic.forward(next_states, hidden_batch, batch_size=batch_size, time_step=time_step)##batch_size x action_space
next_Q = next_Q.max(1)[0].detach() ##batch_size
#print('next_Q = {}'.format(next_Q))
# Compute Q targets for current states (y_i)
#print('rewards.shape = {}'.format(rewards[:, time_step-1].shape))
#print('rewards = {}'.format(rewards))
#print('done = {}'.format(done))
Qprime = rewards[:, time_step-1] + self.gamma * next_Q * (1-done[:, time_step-1])
#print('Qprime = {}'.format(Qprime))
loss = F.smooth_l1_loss(Qvals, Qprime)
#print('loss = {}'.format(loss))
#assert 1==2
# update networks
self.actor_optimizer.zero_grad()
loss.backward()
for param in self.actor.parameters():
param.grad.data.clamp_(-1, 1)
self.actor_optimizer.step()
agent = DQN_vs_overlap_rnn_no_perception_new(trained_model_path=None, num_actions=action_space, input_channels=2)
#agent = DQN_vs_overlap_rnn_no_perception(trained_model_path=model_weights_save_path, num_actions=action_space, input_channels=2)
rewards = []
avg_rewards = []
for i_epoch in range(actual_episodes):
## go through each point folder
for point_idx in range(0, num_startPoints):
#for point_idx in range(0, 1):
print('point_idx = {}'.format(point_idx))
## read in start img and start pose
point_image_folder = '{}/{}/point_{}'.format(base_folder, scene_name, point_idx)
point_pose_npy_file = np.load('{}/{}/point_{}_poses.npy'.format(base_folder, scene_name, point_idx))
#start_img = cv2.imread('{}/{}.png'.format(point_image_folder, point_pose_npy_file[0]['img_name']))[:, :, ::-1]
start_pose = point_pose_npy_file[0]['pose']
start_img, start_depth = get_obs(start_pose)
start_depth = start_depth.copy()
## index 0 is the left image, so right_img_idx starts from index 1
for right_img_idx in range(1, len(point_pose_npy_file)):
#for right_img_idx in range(3, 4):
#print('right_img_idx = {}'.format(right_img_idx))
current_pose = start_pose
right_img_name = point_pose_npy_file[right_img_idx]['img_name']
goal_pose = point_pose_npy_file[right_img_idx]['pose']
#goal_img = cv2.imread('{}/{}.png'.format(point_image_folder, right_img_name), 1)[:,:,::-1]
goal_img, goal_depth = get_obs(goal_pose)
goal_img, goal_depth = goal_img.copy(), goal_depth.copy()
overlapArea = genGtDenseCorrespondenseFlowMap(start_depth, goal_depth, start_pose, goal_pose)[:,:,:2]
tensor_start = torch.tensor(overlapArea, dtype=torch.float32).to(device).unsqueeze(0).permute(0, 3, 1, 2)
ft_overlapArea = perception_model.perception(tensor_start).detach().cpu().numpy().squeeze(0)
state = ft_overlapArea
episode_reward = 0
local_memory = []
extend_action_done = False
#hidden_state, cell_state = agent.actor.init_hidden_states(batch_size=1)
hidden_state = agent.actor.init_hidden_states(batch_size=1)
for i_step in range(seq_len):
#action, hidden_state, cell_state = agent.select_action(state, hidden_state, cell_state)
action, hidden_state = agent.select_action(state, hidden_state)
print('action = {}'.format(action))
## update current_pose
vz, omegay = action_table[action.item()]
#print('vz = {:.2f}, omegay = {:.2f}'.format(vz, omegay))
vx = 0.0
vx = vx * lambda_action
vz = vz * lambda_action
omegay = omegay * pi * lambda_action
#print('actual velocity = {:.2f}, {:.2f}, {:.2f}'.format(vx, vz, omegay))
previous_pose = current_pose
current_pose = update_current_pose(current_pose, vx, vz, omegay)
## compute new_state
current_img, current_depth = get_obs(current_pose)
next_left_img, next_left_depth = current_img.copy(), current_depth.copy()
new_overlapArea = genGtDenseCorrespondenseFlowMap(next_left_depth, goal_depth, current_pose, goal_pose)[:,:,:2]
tensor_left = torch.tensor(new_overlapArea, dtype=torch.float32).to(device).unsqueeze(0).permute(0, 3, 1, 2)
ft_new_overlapArea = perception_model.perception(tensor_left).detach().cpu().numpy().squeeze(0)
new_state = ft_new_overlapArea
#print('new_state.shape = {}'.format(new_state.shape))
## visualize the state
'''
fig = plt.figure(figsize=(20, 5)) #cols, rows
r, c = 1, 5
ax = fig.add_subplot(r, c, 1)
ax.imshow(current_img)
ax = fig.add_subplot(r, c, 2)
ax.imshow(goal_img)
ax = fig.add_subplot(r, c, 3)
ax.imshow(new_overlapArea[:, :, 0])
ax = fig.add_subplot(r, c, 4)
ax.imshow(new_overlapArea[:, :, 1])
ax = fig.add_subplot(r, c, 5)
ax.imshow(ft_new_overlapArea.reshape(16, 16))
plt.show()
'''
## collision done only stops continuing the sequence, but won't affect reward computing
reward, done, collision_done = decide_reward_and_done(previous_pose, current_pose, goal_pose, start_pose)
print('done = {}, collision_done = {}'.format(done, collision_done))
if i_step == seq_len-1:
print('used up all the steps ...')
done = 1
## execute one more action as the episode length is smaller than 5
if extend_action_done == True:
done = 1
if done:
extend_action_done = True
#local_memory.append((state, action, torch.tensor([reward], device=device), new_state, torch.tensor([done], device=device)))
local_memory.append((state, action, (previous_pose, current_pose, goal_pose, start_pose), new_state))
#assert 1==2
'''
if len(agent.memory) >= 300:
agent.update(batch_size, time_step=time_step)
agent.update(batch_size, time_step=8)
agent.update(batch_size//2, time_step=16)
agent.update(batch_size//4, time_step=32)
elif len(agent.memory) >= batch_size:
agent.update(batch_size, time_step)
elif len(agent.memory) >= 2:
agent.update(len(agent.memory), time_step)
'''
if len(agent.memory) >= 50:
agent.update(batch_size, 3)
agent.update(batch_size, 8)
#agent.update(batch_size//2, 16)
#agent.update(batch_size//2, 24)
#agent.update(batch_size//4, 32)
#agent.update(batch_size//4, 40)
elif len(agent.memory) >= 10:
agent.update(batch_size, 3)
elif len(agent.memory) >= 2:
| agent.update(len(agent.memory), 3) | conditional_block |
|
train_DQN_vs_overlap_rnn_no_perception_new.py | _heading, location_angle))
if pho_dist >= 0.05:
## alpha angle in goToPose is the difference between location angle and left_pose_heading
a1, b1 = cos(location_angle), sin(location_angle)
a2, b2 = cos(left_pose_heading), sin(left_pose_heading)
alpha_dist = math.sqrt((a1-a2)**2 + (b1-b2)**2)
## beta angle in goToPose is the difference between right_pose_heading and location angle
a1, b1 = cos(right_pose_heading), sin(right_pose_heading)
a2, b2 = cos(location_angle), sin(location_angle)
beta_dist = math.sqrt((a1-a2)**2 + (b1-b2)**2)
else:
## when pho_dist is close to zero, alpha_dist is not important
alpha_dist = 0.0
## beta angle becomes the anlge between left and right poses
a1, b1 = cos(right_pose_heading), sin(right_pose_heading)
a2, b2 = cos(left_pose_heading), sin(left_pose_heading)
beta_dist = math.sqrt((a1-a2)**2 + (b1-b2)**2)
#print('pho_dist = {:.2f}, alpha_dist = {:.2f}, beta_dist = {:.2f}'.format(pho_dist, alpha_dist, beta_dist))
return pho_dist + lamb_alpha * alpha_dist + lamb_beta * beta_dist
def decide_reward_and_done(previous_pose, current_pose, goal_pose, start_pose):
## check if the new step is on free space or not
reward = 0.0
done = 0
## check if current_pose is closer to goal_pose than previous_pose
#'''
dist_init = compute_distance(start_pose, goal_pose, lamb_alpha=0.2)
dist_current = compute_distance(current_pose, goal_pose, lamb_alpha=0.2)
dist_previous = compute_distance(previous_pose, goal_pose, lamb_alpha=0.2)
reward = max(0, min(dist_previous/dist_init, 1.0) - min(dist_current/dist_init, 1.0))
#print('dist_init = {:.2f}, dist_current = {:.2f}, dist_previous = {:.2f}, reward = {:.2f}'.format(dist_init, dist_current, dist_previous, reward))
#'''
## following Fereshteh's DiVIs paper
'''
dist_init = compute_distance(start_pose, goal_pose, lamb_alpha=0.2)
dist_current = compute_distance(current_pose, goal_pose, lamb_alpha=0.2)
reward = max(0, 1 - min(dist_init, dist_current)/(dist_init+0.00001))
'''
#print('dist_init = {:.2f}, dist_current = {:.2f}, reward = {:.2f}'.format(dist_init, dist_current, reward))
## check if current_pose is close to goal
## goal reward should be larger than all the previously accumulated reward
flag_close_to_goal = close_to_goal(current_pose, goal_pose)
if flag_close_to_goal:
reward = 50.0
done = 1
#print('current_pose = {}, goal_pose = {}, flag_close_to_goal = {}, reward = {}'.format(current_pose, goal_pose, flag_close_to_goal, reward))
#collision_done = 0
## if there is a collision, reward is -1 and the episode is done
left_pixel = path_finder.point_to_pixel((previous_pose[0], previous_pose[1]))
right_pixel = path_finder.point_to_pixel((current_pose[0], current_pose[1]))
## rrt.line_check returns True when there is no obstacle
if not rrt.line_check(left_pixel, right_pixel, free):
print('bumped into obstacle ....')
reward = 0.0
#collision_done = 1
done=1
#print('final reward = {}'.format(reward))
return float(reward), done, 0 #, collision_done
##============================================================================================================
## import the trained perception module
device = torch.device('cuda:0')
perception = Perception_overlap(2).to(device)
perception_model = DQN_OVERLAP_Controller(perception, action_space, input_size=256).to(device)
approach = 'twentyseventh_try_opticalFlow_newDistMetric'
perception_model.load_state_dict(torch.load('{}/{}/dqn_epoch_{}_Uvalda.pt'.format('/home/reza/Datasets/GibsonEnv/my_code/vs_controller/trained_dqn', approach, 200000)))
##============================================================================================================
base_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_{}'.format('train')
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from model_vs import *
from util_vscontroller import ReplayMemory_vs_dqn, ReplayMemory_overlap_dqn, ReplayMemory_overlap_dqn_recurrent
import random
import numpy as np
import math
class DQN_vs_overlap_rnn_no_perception_new:
def __init__(self, trained_model_path=None, num_actions=2, input_channels=2, actor_learning_rate=1e-5, critic_learning_rate=1e-4, gamma=0.97, tau=1e-2, max_memory_size=1000):
# Params
self.num_actions = num_actions
self.gamma = gamma
self.tau = tau
self.steps_done = 0
self.input_channels = input_channels
# Networks
self.actor = DQN_OVERLAP_Recurrent_Controller_no_perception(self.num_actions, input_size=256).to(device)
self.critic = DQN_OVERLAP_Recurrent_Controller_no_perception(self.num_actions, input_size=256).to(device)
if trained_model_path != None:
self.actor.load_state_dict(torch.load('{}/dqn_epoch_200000.pt'.format(trained_model_path)))
print('*********************************successfully read the model ...')
## copy params from actor.parameters to critic.parameters.
## when calling the copy_(), the argument param.data is the src.
for target_param, param in zip(self.critic.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data)
self.critic.eval()
# Training
self.memory = ReplayMemory_overlap_dqn_recurrent(max_memory_size)
## only update weights of actor's linear layer
self.actor_optimizer = optim.RMSprop(self.actor.parameters(), lr=actor_learning_rate)
def | (self):
for target_param, param in zip(self.critic.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data)
self.critic.eval()
## for collecting (state, action, next_state) tuples
#def select_action(self, state, hidden_state, cell_state, EPS_START=0.9, EPS_END=0.05, EPS_DECAY=10000):
def select_action(self, state, hidden_state, EPS_START=0.9, EPS_END=0.05, EPS_DECAY=10000):
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * self.steps_done / EPS_DECAY)
self.steps_done += 1
## take action with the maximum reward by using the actor
if sample > eps_threshold:
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
obs = state#[0] ## 256 x 256 x 1
print('obs.shape = {}'.format(obs.shape))
#print('obs.shape = {}'.format(obs.shape))
obs = torch.tensor(obs, dtype=torch.float32).to(device).unsqueeze(0).unsqueeze(0)
#print('obs.shape = {}'.format(obs.shape))
#model_out = self.actor.forward(obs, hidden_state, cell_state, batch_size=1, time_step=1)
model_out = self.actor.forward(obs, hidden_state, batch_size=1, time_step=1)
action = model_out[0].max(1)[1].view(1, 1)
#hidden_state = model_out[1][0]
#cell_state = model_out[1][1]
hidden_state = model_out[1]
#return action, hidden_state, cell_state
return action, hidden_state
else:
## take random actions, do exploration
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
obs = state#[0] ## 256 x 256 x 1
#print('obs.shape = {}'.format(obs.shape))
obs = torch.tensor(obs, dtype=torch.float32).to(device).unsqueeze(0).unsqueeze(0)
#print('obs.shape = {}'.format(obs.shape))
#model_out = self.actor.forward(obs, hidden_state, cell_state, batch_size=1, time_step=1)
model_out = self.actor.forward(obs, hidden_state, batch_size=1, time_step=1 | update_critic | identifier_name |
train_DQN_vs_overlap_rnn_no_perception_new.py |
def close_to_goal(pose1, pose2, thresh=0.15):
L2_dist = math.sqrt((pose1[0] - pose2[0])**2 + (pose1[1] - pose2[1])**2)
thresh_L2_dist = thresh
theta_change = abs(pose1[2] - pose2[2])/math.pi * 180
return (L2_dist < thresh_L2_dist) and (theta_change <= 30)
def compute_distance_old(left_pose, right_pose, lamb=0.5):
x1, y1 = left_pose[0], left_pose[1]
a1, b1 = cos(left_pose[2]), sin(left_pose[2])
x2, y2 = right_pose[0], right_pose[1]
a2, b2 = cos(right_pose[2]), sin(right_pose[2])
x_y_dist = math.sqrt((x1-x2)**2 + (y1-y2)**2)
theta_dist = math.sqrt((a1-a2)**2 + (b1-b2)**2)
return x_y_dist + lamb * theta_dist
def compute_distance(left_pose, right_pose, lamb_alpha=0.5, lamb_beta=0.2):
x1, y1 = left_pose[0], left_pose[1]
x2, y2 = right_pose[0], right_pose[1]
pho_dist = math.sqrt((x1-x2)**2 + (y1-y2)**2)
left_pose_heading = left_pose[2]
right_pose_heading = right_pose[2]
location_angle = atan2(y2-y1, x2-x1)
#print('left_pose_heading = {}, right_pose_heading = {}, location_angle = {}'.format(left_pose_heading, right_pose_heading, location_angle))
if pho_dist >= 0.05:
## alpha angle in goToPose is the difference between location angle and left_pose_heading
a1, b1 = cos(location_angle), sin(location_angle)
a2, b2 = cos(left_pose_heading), sin(left_pose_heading)
alpha_dist = math.sqrt((a1-a2)**2 + (b1-b2)**2)
## beta angle in goToPose is the difference between right_pose_heading and location angle
a1, b1 = cos(right_pose_heading), sin(right_pose_heading)
a2, b2 = cos(location_angle), sin(location_angle)
beta_dist = math.sqrt((a1-a2)**2 + (b1-b2)**2)
else:
## when pho_dist is close to zero, alpha_dist is not important
alpha_dist = 0.0
## beta angle becomes the anlge between left and right poses
a1, b1 = cos(right_pose_heading), sin(right_pose_heading)
a2, b2 = cos(left_pose_heading), sin(left_pose_heading)
beta_dist = math.sqrt((a1-a2)**2 + (b1-b2)**2)
#print('pho_dist = {:.2f}, alpha_dist = {:.2f}, beta_dist = {:.2f}'.format(pho_dist, alpha_dist, beta_dist))
return pho_dist + lamb_alpha * alpha_dist + lamb_beta * beta_dist
def decide_reward_and_done(previous_pose, current_pose, goal_pose, start_pose):
## check if the new step is on free space or not
reward = 0.0
done = 0
## check if current_pose is closer to goal_pose than previous_pose
#'''
dist_init = compute_distance(start_pose, goal_pose, lamb_alpha=0.2)
dist_current = compute_distance(current_pose, goal_pose, lamb_alpha=0.2)
dist_previous = compute_distance(previous_pose, goal_pose, lamb_alpha=0.2)
reward = max(0, min(dist_previous/dist_init, 1.0) - min(dist_current/dist_init, 1.0))
#print('dist_init = {:.2f}, dist_current = {:.2f}, dist_previous = {:.2f}, reward = {:.2f}'.format(dist_init, dist_current, dist_previous, reward))
#'''
## following Fereshteh's DiVIs paper
'''
dist_init = compute_distance(start_pose, goal_pose, lamb_alpha=0.2)
dist_current = compute_distance(current_pose, goal_pose, lamb_alpha=0.2)
reward = max(0, 1 - min(dist_init, dist_current)/(dist_init+0.00001))
'''
#print('dist_init = {:.2f}, dist_current = {:.2f}, reward = {:.2f}'.format(dist_init, dist_current, reward))
## check if current_pose is close to goal
## goal reward should be larger than all the previously accumulated reward
flag_close_to_goal = close_to_goal(current_pose, goal_pose)
if flag_close_to_goal:
reward = 50.0
done = 1
#print('current_pose = {}, goal_pose = {}, flag_close_to_goal = {}, reward = {}'.format(current_pose, goal_pose, flag_close_to_goal, reward))
#collision_done = 0
## if there is a collision, reward is -1 and the episode is done
left_pixel = path_finder.point_to_pixel((previous_pose[0], previous_pose[1]))
right_pixel = path_finder.point_to_pixel((current_pose[0], current_pose[1]))
## rrt.line_check returns True when there is no obstacle
if not rrt.line_check(left_pixel, right_pixel, free):
print('bumped into obstacle ....')
reward = 0.0
#collision_done = 1
done=1
#print('final reward = {}'.format(reward))
return float(reward), done, 0 #, collision_done
##============================================================================================================
## import the trained perception module
device = torch.device('cuda:0')
perception = Perception_overlap(2).to(device)
perception_model = DQN_OVERLAP_Controller(perception, action_space, input_size=256).to(device)
approach = 'twentyseventh_try_opticalFlow_newDistMetric'
perception_model.load_state_dict(torch.load('{}/{}/dqn_epoch_{}_Uvalda.pt'.format('/home/reza/Datasets/GibsonEnv/my_code/vs_controller/trained_dqn', approach, 200000)))
##============================================================================================================
base_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_{}'.format('train')
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from model_vs import *
from util_vscontroller import ReplayMemory_vs_dqn, ReplayMemory_overlap_dqn, ReplayMemory_overlap_dqn_recurrent
import random
import numpy as np
import math
class DQN_vs_overlap_rnn_no_perception_new:
def __init__(self, trained_model_path=None, num_actions=2, input_channels=2, actor_learning_rate=1e-5, critic_learning_rate=1e-4, gamma=0.97, tau=1e-2, max_memory_size=1000):
# Params
self.num_actions = num_actions
self.gamma = gamma
self.tau = tau
self.steps_done = 0
self.input_channels = input_channels
# Networks
self.actor = DQN_OVERLAP_Recurrent_Controller_no_perception(self.num_actions, input_size=256).to(device)
self.critic = DQN_OVERLAP_Recurrent_Controller_no_perception(self.num_actions, input_size=256).to(device)
if trained_model_path != None:
self.actor.load_state_dict(torch.load('{}/dqn_epoch_200000.pt'.format(trained_model_path)))
print('*********************************successfully read the model ...')
## copy params from actor.parameters to critic.parameters.
## when calling the copy_(), the argument param.data is the src.
for target_param, param in zip(self.critic.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data)
self.critic.eval()
# Training
self.memory = ReplayMemory_overlap_dqn_recurrent(max_memory_size)
## only update weights of actor's linear layer
self.actor_optimizer = optim.RMSprop(self.actor.parameters(), lr=actor_learning_rate)
def update_critic(self):
for target_param, param in zip(self.critic.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data)
self.critic.eval()
## for collecting (state, action, next_state) tuples
#def select_action(self, state, hidden_state, cell_state, EPS_START=0.9, EPS_END=0.05, EPS_DECAY=10000):
def select_action(self, state, hidden_state, EPS_START=0.9, EPS_END=0.05, EPS_DECAY=10000):
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * self.steps_done / EPS_DECAY)
| pos, orn = func_pose2posAndorn(current_pose, mapper_scene2z[scene_name])
env.robot.reset_new_pose(pos, orn)
obs, _, _, _ = env.step(4)
obs_rgb = obs['rgb_filled']
obs_depth = obs['depth']
#obs_normal = obs['normal']
return obs_rgb, obs_depth#, obs_normal | identifier_body |
|
firebaseMap.js | ;
ulList.appendChild(li).Trailer;
fahrerOnChange();
};
//chat option buttons fahrer
$(document).ready( function (){
$('#mdb-select input').on('click', function(){
radioChanged += 1;
//alert($('input[name=group1]:checked', '#mdb-select').val());
selectedFahrer = $('input[name=group1]:checked', '#mdb-select').val();
document.getElementById('selectFahrer').innerHTML = selectedFahrer;
document.getElementById('selectFahrer').style.color = "green";
console.log(selectedFahrer);
console.log(databaseSend);
database.child(selectedFahrer).on('child_added', fahrerListe);
});
});
database.on('child_changed', sendChat);
database.on('child_changed', recieveChat);
// since I can connect from multiple devices or browser tabs, we store each connection instance separately
// any time that connectionsRef's value is null (i.e. has no children) I am offline
var myConnectionsRef = firebase.database().ref('users/Farzat/connections');
// stores the timestamp of my last disconnect (the last time I was seen online)
var lastOnlineRef = firebase.database().ref('users/Farzat/lastOnline');
var connectedRef = firebase.database().ref('.info/connected');
connectedRef.on('value', function(snap) {
if (snap.val() === true) {
// We're connected (or reconnected)! Do anything here that should happen only if online (or on reconnect)
var con = myConnectionsRef.push();
// When I disconnect, remove this device
con.onDisconnect().remove();
// Add this device to my connections list
// this value could contain info about the device or a timestamp too
con.set(true);
// When I disconnect, update the last time I was seen online
lastOnlineRef.onDisconnect().set(firebase.database.ServerValue.TIMESTAMP);
}
});
var usernameInput = document.querySelector('#konto');
var textInput = document.querySelector('#fahrerChat');
var postButton = document.querySelector('#post');
postButton.addEventListener("click", function(snapshot) {
var msgUser = usernameInput.value;
var msgText = textInput.value;
var recieve = '"' + msgText + '"';
var postData = {
username: msgUser,
recieve: msgText,
send: ''
};
if (radioChanged > 0) {
//alert('Change function occurred ' + radioChanged + ' times.');
/*database.set(msgUser + " says: " + msgText);*/
//database.push({username:msgUser, recieve:msgText});
if ( msgText == ""){
toastr.error('Bitte Text hinzufügen.', 'Textfeld ist Leer!')
}else{
// Get a key for a new Post.
//var newPostKey = database.push().key;
// Write the new post's data simultaneously in the posts list and the user's post list.
var updates = {};
updates['/' + selectedFahrer +'/recieve'] = recieve;
updates['/user-posts/' + /*newPostKey +*/ new Date()] = postData;
toastr.success('Die Nachricht wurde gesendet.' , 'Danke!', {timeOut: 4000});
textInput.value = "";
return database.update(updates);
}
} else {
alert('Bitte Radio button klicken.');
}
});
// Or you can save a line of code by using an inline function
// and on()'s return value.
//var recieveChat = database.on('value', function(dataSnapshot) { ... });
// Sometime later...
//var query = database.orderByChild('recieve').equalTo('joe');
//database.child('recieve').on('child_removed', recieveChat);
//database.off('child_changed', recieveChat);
/*
database.on('child_changed', function (snapshot){
if (snapshot.ref.child('recieve').key) {
recieveChat();
console.log(snapshot.ref.child('recieve').key);
}
if (snapshot.ref.child('send').key) {
database.on('child_changed', sendChat);
console.log(snapshot.ref.child('send').key);
}
});
//database.child("Fahrer 1").on('child_changed', startChat);
*/
function u | unixtime) {
var timestamp = parseInt(data.Zeit);
var u = new Date(unixtime);
return ('0' + u.getUTCDate()).slice(-2) +
'.' + ('0' + u.getUTCMonth() + 11 ).slice(-2)+
'.' + u.getUTCFullYear() +
' ' + ('0' + u.getUTCHours()+13 ).slice(-2) +
':' + ('0' + u.getUTCMinutes()).slice(-2) +
':' + ('0' + u.getUTCSeconds()).slice(-2) +
'.' + (u.getUTCMilliseconds() / 1000).toFixed(3).slice(2, 5)
};
//liste
var fahrerOnChange = function(snapshot, prevChildKey) {
fahrerName = snapshot.key;
var data = snapshot.val();
var datum = unixTime(parseInt(data.Zeit))
var trailerList=
"<b>"+snapshot.child('Trailer').key+" "+ snapshot.child('1').key + ": " +"</b>"+ data.Trailer[1].replace(/"/gi, " ") +
"<b>"+snapshot.child('Trailer').key+" " + snapshot.child('2').key + ": " +"</b>"+ data.Trailer[2].replace(/"/gi, " ") +
"<b>"+snapshot.child('Trailer').key+" " + snapshot.child('3').key + ": " +"</b>"+ data.Trailer[3].replace(/"/gi, " ") +
"<b>"+snapshot.child('Trailer').key+" " + snapshot.child('4').key + ": " +"</b>"+ data.Trailer[4].replace(/"/gi, " ") +
"<b>"+snapshot.child('Trailer').key+" " + snapshot.child('5').key + ": " +"</b>"+ data.Trailer[5].replace(/"/gi, " ") ;
if (fahrerName == "" || fahrerName == null)
{
alert("You must enter a name for your fahrerName!");
return;
}
switch (fahrerName) {
case "Fahrer 1":
if ( data.onOff == 'true'){
fahrer1OnOff.className = "green accent-4 view admin-up";
fahrer1.innerHTML = snapshot.key + ' ist online';
}else{
fahrer1OnOff.className = "red accent-4 view admin-up";
fahrer1.innerHTML = snapshot.key + ' ist offline';
}
mafiF1.innerText = data.Mafi.replace(/"/gi, " ");
zeitF1.innerHTML = datum;
trailerF1.innerHTML = trailerList;
//console.log(data.Trailer);
//fahrer1OnOff.innerHTML = data.onOff;
var date = (data.Zeit).substring(0, 13) ;
//console.log(date.trim())
//var timestamp = parseInt(data.Zeit);
//var ts = new Date(timestamp);
//console.log(ts.toUTCString());
console.log(unixTime(parseInt(data.Zeit)))
break;
case "Fahrer 2":
if ( data.onOff == 'true'){
fahrer2OnOff.className = "green accent-4 view admin-up";
fahrer2.innerHTML = snapshot.key + ' ist online'; }else{
fahrer2OnOff.className = "red accent-4 view admin-up";
fahrer2.innerHTML = snapshot.key + ' ist offline';}
mafiF2.innerText = data.Mafi.replace(/"/gi, " ");
zeitF2.innerHTML = datum;
trailerF2.innerHTML = trailerList;
break;
case "Fahrer 3":
if ( data.onOff == 'true'){
fahrer3OnOff.className = "green accent-4 view admin-up";
fahrer3.innerHTML = snapshot.key + ' ist online'; }else{
fahrer3OnOff.className = "red accent-4 view admin-up";
fahrer3.innerHTML = snapshot.key + ' ist offline';}
mafiF3.innerText = data.Mafi.replace(/"/gi, " ");
zeitF3.innerHTML = datum;
trailerF3.innerHTML = trailerList;
break;
case "Fahrer 4":
if ( data.onOff == 'true'){
fahrer4OnOff.className = "green accent-4 view admin-up";
fahrer4.innerHTML = snapshot.key + ' ist online'; }else{
fahrer4OnOff.className = "red accent-4 view admin-up";
fahrer4.innerHTML = snapshot.key + ' ist offline';}
mafiF4.innerText = data.Mafi.replace(/"/gi, " ");
zeitF4.innerHTML = datum;
trailerF | nixTime( | identifier_name |
firebaseMap.js | onto');
var textInput = document.querySelector('#fahrerChat');
var postButton = document.querySelector('#post');
postButton.addEventListener("click", function(snapshot) {
var msgUser = usernameInput.value;
var msgText = textInput.value;
var recieve = '"' + msgText + '"';
var postData = {
username: msgUser,
recieve: msgText,
send: ''
};
if (radioChanged > 0) {
//alert('Change function occurred ' + radioChanged + ' times.');
/*database.set(msgUser + " says: " + msgText);*/
//database.push({username:msgUser, recieve:msgText});
if ( msgText == ""){
toastr.error('Bitte Text hinzufügen.', 'Textfeld ist Leer!')
}else{
// Get a key for a new Post.
//var newPostKey = database.push().key;
// Write the new post's data simultaneously in the posts list and the user's post list.
var updates = {};
updates['/' + selectedFahrer +'/recieve'] = recieve;
updates['/user-posts/' + /*newPostKey +*/ new Date()] = postData;
toastr.success('Die Nachricht wurde gesendet.' , 'Danke!', {timeOut: 4000});
textInput.value = "";
return database.update(updates);
}
} else {
alert('Bitte Radio button klicken.');
}
});
// Or you can save a line of code by using an inline function
// and on()'s return value.
//var recieveChat = database.on('value', function(dataSnapshot) { ... });
// Sometime later...
//var query = database.orderByChild('recieve').equalTo('joe');
//database.child('recieve').on('child_removed', recieveChat);
//database.off('child_changed', recieveChat);
/*
database.on('child_changed', function (snapshot){
if (snapshot.ref.child('recieve').key) {
recieveChat();
console.log(snapshot.ref.child('recieve').key);
}
if (snapshot.ref.child('send').key) {
database.on('child_changed', sendChat);
console.log(snapshot.ref.child('send').key);
}
});
//database.child("Fahrer 1").on('child_changed', startChat);
*/
function unixTime(unixtime) {
var timestamp = parseInt(data.Zeit);
var u = new Date(unixtime);
return ('0' + u.getUTCDate()).slice(-2) +
'.' + ('0' + u.getUTCMonth() + 11 ).slice(-2)+
'.' + u.getUTCFullYear() +
' ' + ('0' + u.getUTCHours()+13 ).slice(-2) +
':' + ('0' + u.getUTCMinutes()).slice(-2) +
':' + ('0' + u.getUTCSeconds()).slice(-2) +
'.' + (u.getUTCMilliseconds() / 1000).toFixed(3).slice(2, 5)
};
//liste
var fahrerOnChange = function(snapshot, prevChildKey) {
fahrerName = snapshot.key;
var data = snapshot.val();
var datum = unixTime(parseInt(data.Zeit))
var trailerList=
"<b>"+snapshot.child('Trailer').key+" "+ snapshot.child('1').key + ": " +"</b>"+ data.Trailer[1].replace(/"/gi, " ") +
"<b>"+snapshot.child('Trailer').key+" " + snapshot.child('2').key + ": " +"</b>"+ data.Trailer[2].replace(/"/gi, " ") +
"<b>"+snapshot.child('Trailer').key+" " + snapshot.child('3').key + ": " +"</b>"+ data.Trailer[3].replace(/"/gi, " ") +
"<b>"+snapshot.child('Trailer').key+" " + snapshot.child('4').key + ": " +"</b>"+ data.Trailer[4].replace(/"/gi, " ") +
"<b>"+snapshot.child('Trailer').key+" " + snapshot.child('5').key + ": " +"</b>"+ data.Trailer[5].replace(/"/gi, " ") ;
if (fahrerName == "" || fahrerName == null)
{
alert("You must enter a name for your fahrerName!");
return;
}
switch (fahrerName) {
case "Fahrer 1":
if ( data.onOff == 'true'){
fahrer1OnOff.className = "green accent-4 view admin-up";
fahrer1.innerHTML = snapshot.key + ' ist online';
}else{
fahrer1OnOff.className = "red accent-4 view admin-up";
fahrer1.innerHTML = snapshot.key + ' ist offline';
}
mafiF1.innerText = data.Mafi.replace(/"/gi, " ");
zeitF1.innerHTML = datum;
trailerF1.innerHTML = trailerList;
//console.log(data.Trailer);
//fahrer1OnOff.innerHTML = data.onOff;
var date = (data.Zeit).substring(0, 13) ;
//console.log(date.trim())
//var timestamp = parseInt(data.Zeit);
//var ts = new Date(timestamp);
//console.log(ts.toUTCString());
console.log(unixTime(parseInt(data.Zeit)))
break;
case "Fahrer 2":
if ( data.onOff == 'true'){
fahrer2OnOff.className = "green accent-4 view admin-up";
fahrer2.innerHTML = snapshot.key + ' ist online'; }else{
fahrer2OnOff.className = "red accent-4 view admin-up";
fahrer2.innerHTML = snapshot.key + ' ist offline';}
mafiF2.innerText = data.Mafi.replace(/"/gi, " ");
zeitF2.innerHTML = datum;
trailerF2.innerHTML = trailerList;
break;
case "Fahrer 3":
if ( data.onOff == 'true'){
fahrer3OnOff.className = "green accent-4 view admin-up";
fahrer3.innerHTML = snapshot.key + ' ist online'; }else{
fahrer3OnOff.className = "red accent-4 view admin-up";
fahrer3.innerHTML = snapshot.key + ' ist offline';}
mafiF3.innerText = data.Mafi.replace(/"/gi, " ");
zeitF3.innerHTML = datum;
trailerF3.innerHTML = trailerList;
break;
case "Fahrer 4":
if ( data.onOff == 'true'){
fahrer4OnOff.className = "green accent-4 view admin-up";
fahrer4.innerHTML = snapshot.key + ' ist online'; }else{
fahrer4OnOff.className = "red accent-4 view admin-up";
fahrer4.innerHTML = snapshot.key + ' ist offline';}
mafiF4.innerText = data.Mafi.replace(/"/gi, " ");
zeitF4.innerHTML = datum;
trailerF4.innerHTML = trailerList;
}
//ulChange.innerText = snapshot.key;
};
database.on('child_changed', fahrerOnChange);
console.log(fahrerName);
// map start
function makeInfoBox(controlDiv, map) {
// Set CSS for the control border.
var controlUI = document.createElement('div');
controlUI.style.boxShadow = 'rgba(0, 0, 0, 0.298039) 0px 1px 4px -1px';
controlUI.style.backgroundColor = '#fff';
controlUI.style.border = '4px solid #78909c';
controlUI.style.borderRadius = '2px';
controlUI.style.marginBottom = '22px';
controlUI.style.marginTop = '15px';
controlUI.style.textAlign = 'center';
controlDiv.appendChild(controlUI);
// Set CSS for the control interior.
var controlText = document.createElement('div');
controlText.style.color = 'rgb(25,25,25)';
controlText.style.fontFamily = 'Roboto,Arial,sans-serif';
controlText.style.fontSize = '100%';
controlText.style.padding = '8px';
controlText.textContent = 'Fritz Gruppe Toutenplannung';
controlUI.appendChild(controlText);
}
function initMap() { |
var map = new google.maps.Map(document.getElementById('map'), {
center: {
lat: 49.1833848,
lng: 9.17934696
},
zoom: 18,
styles: [{
featureType: 'poi',
stylers: [{
visibility: 'off'
}] // Turn off POI.
},
{
featureType: 'transit.station',
stylers: [{
visibility: 'off'
}] // Turn off bus, train stations etc.
}
],
| identifier_body |
|
firebaseMap.js | //fahrer 1
const fahrer1OnOff = document.getElementById('fahrer1OnOff');
var fahrer1 = document.querySelector('fahrer1');
var mafiF1 = document.querySelector('mafiF1');
var trailerF1 = document.querySelector('trailerF1');
var zeitF1 = document.querySelector('zeitF1');
//fahrer 2
const fahrer2OnOff = document.getElementById('fahrer2OnOff');
var fahrer2 = document.querySelector('fahrer2');
var mafiF2 = document.querySelector('mafiF2');
var trailerF2 = document.querySelector('trailerF2');
var zeitF2 = document.querySelector('zeitF2');
//fahrer 3
const fahrer3OnOff = document.getElementById('fahrer3OnOff');
var fahrer3 = document.querySelector('fahrer3');
var mafiF3 = document.querySelector('mafiF3');
var trailerF3 = document.querySelector('trailerF3');
var zeitF3 = document.querySelector('zeitF3');
//fahrer 4
const fahrer4OnOff = document.getElementById('fahrer4OnOff');
var fahrer4 = document.querySelector('fahrer4');
var mafiF4 = document.querySelector('mafiF4');
var trailerF4 = document.querySelector('trailerF4');
var zeitF4 = document.querySelector('zeitF4');
var fahrerName;
var selectedFahrer;
var radioChanged = 0;
//map
var markers = [];
var marker;
var heatmap;
var map;
var src = 'http://www.google.com/maps/d/kml?forcekml=1&mid=1UAmecXT0q4f7N-pYfbkhG5q9Euk';
var data = {
sender: null,
timestamp: null,
lat: null,
lng: null
};
firebase.auth().signInAnonymously().catch(function(error) {
// Handle Errors here.
var errorCode = error.code;
var errorMessage = error.message;
if (errorCode === 'auth/operation-not-allowed') {
alert('You must enable Anonymous auth in the Firebase Console.');
} else {
console.error(error);
}
});
firebase.auth().onAuthStateChanged(function(user) {
if (user) {
// User is signed in.
var isAnonymous = user.isAnonymous;
var uid = user.uid;
uid.name = 222;
console.log(uid);
// ...
} else {
// User is signed out.
// ...
}
// ...
});
//var Ref = firebase.database().ref('Fritz_Tourenplanung/Fahrer/' + fahrerName );
var databaseSend = firebase.database().ref('Fritz_Tourenplanung/Fahrer/' + 'Fahrer 1');
var databaseRecive = firebase.database().ref('Fritz_Tourenplanung/Fahrer/' + 'Fahrer 1');
//send nachricht
var sendChat = function (snapshot, prevChildKey) {
var msg = snapshot.val();
//if (snapshot.hasChild("recieve")){
var msgTextElement = document.createElement("p");
//msgTextElement.className = "white-text mb-0 p-2";
msgTextElement.textContent = msg.recieve; //snapshot.child('recieve').key + ' ' +
msgTextElement.className = "card mdb-color lighten-2 text-center z-depth-2 mb-2 white-text";
document.getElementById("sendChat").innerHTML = msg.recieve.replace(/"/gi, " ") ;
//document.getElementById("sendChat").appendChild(msgTextElement);
}
//nachricht bekommen
var recieveChat = function (snapshot, prevChildKey) {
var msg = snapshot.val();
var msgUsernameElement = document.createElement("b");
var fahrerTextElement = document.createElement("p");
//msgUsernameElement.className = "white-text mb-0 p-2";
msgUsernameElement.textContent = msg.send;
fahrerTextElement.textContent = snapshot.key;
document.getElementById("recieveChat").innerHTML = snapshot.key + ':' + msg.send.replace(/"/gi, " ") //appendChild(msgUsernameElement);
//var msgElement = document.createElement("div");
//msgElement.appendChild(fahrerTextElement);
//msgElement.appendChild(msgUsernameElement);
//document.getElementById("recieveChat").appendChild(msgElement);
//msgElement.className = "card info-color lighten-2 text-center z-depth-2 mb-2 white-text";
}
//Fahrer Liste--->
var fahrerListe = function (snapshot, prevChildKey){
const li = document.createElement('li');
li.className = "list-group-item z-depth-2 mb-1";
li.innerText = snapshot.key + ' : ' +snapshot.val();
li.id = snapshot.key;
ulList.appendChild(li).Trailer;
fahrerOnChange();
};
//chat option buttons fahrer
$(document).ready( function (){
$('#mdb-select input').on('click', function(){
radioChanged += 1;
//alert($('input[name=group1]:checked', '#mdb-select').val());
selectedFahrer = $('input[name=group1]:checked', '#mdb-select').val();
document.getElementById('selectFahrer').innerHTML = selectedFahrer;
document.getElementById('selectFahrer').style.color = "green";
console.log(selectedFahrer);
console.log(databaseSend);
database.child(selectedFahrer).on('child_added', fahrerListe);
});
});
database.on('child_changed', sendChat);
database.on('child_changed', recieveChat);
// since I can connect from multiple devices or browser tabs, we store each connection instance separately
// any time that connectionsRef's value is null (i.e. has no children) I am offline
var myConnectionsRef = firebase.database().ref('users/Farzat/connections');
// stores the timestamp of my last disconnect (the last time I was seen online)
var lastOnlineRef = firebase.database().ref('users/Farzat/lastOnline');
var connectedRef = firebase.database().ref('.info/connected');
connectedRef.on('value', function(snap) {
if (snap.val() === true) {
// We're connected (or reconnected)! Do anything here that should happen only if online (or on reconnect)
var con = myConnectionsRef.push();
// When I disconnect, remove this device
con.onDisconnect().remove();
// Add this device to my connections list
// this value could contain info about the device or a timestamp too
con.set(true);
// When I disconnect, update the last time I was seen online
lastOnlineRef.onDisconnect().set(firebase.database.ServerValue.TIMESTAMP);
}
});
var usernameInput = document.querySelector('#konto');
var textInput = document.querySelector('#fahrerChat');
var postButton = document.querySelector('#post');
postButton.addEventListener("click", function(snapshot) {
var msgUser = usernameInput.value;
var msgText = textInput.value;
var recieve = '"' + msgText + '"';
var postData = {
username: msgUser,
recieve: msgText,
send: ''
};
if (radioChanged > 0) {
//alert('Change function occurred ' + radioChanged + ' times.');
/*database.set(msgUser + " says: " + msgText);*/
//database.push({username:msgUser, recieve:msgText});
if ( msgText == ""){
toastr.error('Bitte Text hinzufügen.', 'Textfeld ist Leer!')
}else{
// Get a key for a new Post.
//var newPostKey = database.push().key;
// Write the new post's data simultaneously in the posts list and the user's post list.
var updates = {};
updates['/' + selectedFahrer +'/recieve'] = recieve;
updates['/user-posts/' + /*newPostKey +*/ new Date()] = postData;
toastr.success('Die Nachricht wurde gesendet.' , 'Danke!', {timeOut: 4000});
textInput.value = "";
return database.update(updates);
}
} else {
alert('Bitte Radio button klicken.');
}
});
// Or you can save a line of code by using an inline function
// and on()'s return value.
//var recieveChat = database.on('value', function(dataSnapshot) { ... });
// Sometime later...
//var query = database.orderByChild('recieve').equalTo('joe');
//database.child('recieve').on('child_removed', recieveChat);
//database.off('child_changed', recieveChat);
/*
database.on('child_changed', function (snapshot){
if (snapshot.ref.child('recieve').key) {
recieveChat();
console.log(snapshot.ref.child('recieve').key);
}
if (snapshot.ref.child('send').key) {
database.on('child_changed', sendChat);
console.log(snapshot.ref.child('send').key);
}
});
//database.child("Fahrer 1").on('child_changed', startChat);
*/
function unixTime(unixtime) {
var timestamp = parseInt(data.Ze | const liList = document.getElementById('listLi');
| random_line_split |
|
value.rs | color value (and optionally, its source string).
Color(Color, Option<String>),
/// The null value.
Null,
/// The true boolean value.
True,
/// The false boolean value.
False,
/// A binary operation, two operands and an operator.
/// The booleans represents possible whitespace.
BinOp(Box<Value>, bool, Operator, bool, Box<Value>),
/// A unary operator and its operand.
UnaryOp(Operator, Box<Value>),
/// A map of values.
Map(ValueMap),
/// A unicode range for font selections. U+NN, U+N?, U+NN-MM.
/// The string is the entire value, including the "U+" tag.
UnicodeRange(String),
/// A value in parenthesis.
Paren(Box<Value>),
}
/// An OrderMap where both the keys and the values are css values.
pub type ValueMap = OrderMap<Value, Value>;
impl Value {
/// Create a numeric value with no unit.
pub fn scalar<T: Into<Number>>(v: T) -> Self {
Value::Numeric(Numeric::scalar(v), true)
}
/// Get the type name of this value.
pub fn type_name(&self) -> &'static str {
match *self {
Value::Color(..) => "color",
Value::Literal(..) => "string",
Value::Map(..) => "map",
Value::Numeric(..) => "number",
Value::List(..) => "list",
Value::Function(..) => "function",
Value::True | Value::False => "bool",
Value::Null => "null",
_ => "unknown",
}
}
/// Return true if this is a calculated value.
///
/// The return of functions or operators are calculated, verbatim
/// values are not.
pub fn is_calculated(&self) -> bool {
match *self {
Value::Numeric(.., calculated) => calculated,
Value::Color(_, None) => true,
_ => false,
}
}
/// Get this value, but marked as calculated.
pub fn into_calculated(self) -> Self {
match self {
Value::Numeric(num, _) => Value::Numeric(num, true),
Value::List(v, sep, bracketed) => Value::List(
v.into_iter().map(|i| i.into_calculated()).collect(),
sep,
bracketed,
),
other => other,
}
}
/// All values other than `False` and `Null` should be considered true.
pub fn is_true(&self) -> bool {
!matches!(self, Value::False | Value::Null)
}
/// Return true if this value is null.
///
/// Note that an empty unquoted string and a list containing no
/// non-null values is also considered null.
pub fn is_null(&self) -> bool {
match *self {
Value::Null => true,
Value::List(ref list, _, false) => {
list.iter().all(|v| v.is_null())
}
Value::Literal(ref s, Quotes::None) if s.is_empty() => true,
Value::Paren(ref v) => v.is_null(),
_ => false,
}
}
/// Check if this value is numeric.
///
/// If it is, get the number and unit, otherwise, get the value
/// itself as error.
pub fn numeric_value(self) -> Result<Numeric, Self> {
match self {
Value::Numeric(num, ..) => Ok(num),
v => Err(v),
}
}
/// Check that this value is an integer.
#[deprecated]
pub fn integer_value(&self) -> Result<i64, Error> {
match self {
&Value::Numeric(ref num, ..) => num
.value
.clone()
.into_integer()
.map_err(|_| Error::bad_value("an integer", self)),
v => Err(Error::bad_value("a number", v)),
}
}
/// Unquote this value.
///
/// If the value is a quoted string, the content is unquoted.
pub fn unquote(self) -> Value {
match self {
Value::Literal(s, Quotes::None) => {
Value::Literal(s, Quotes::None)
}
Value::Literal(s, _) => {
let mut result = String::new();
let mut iter = s.chars().peekable();
while let Some(c) = iter.next() {
if c == '\\' {
let mut val: u32 = 0;
let mut got_num = false;
let nextchar = loop {
match iter.peek() {
Some(&c) if c.is_ascii_hexdigit() => {
val = val * 10 + u32::from(hexvalue(c));
got_num = true;
iter.next();
}
Some(' ') if got_num => {
iter.next();
break (None);
}
Some(_) if !got_num => break (iter.next()),
_ => break (None),
}
};
if got_num {
if let Ok(c) = char::try_from(val) {
result.push(c);
} else {
result.push('\u{fffd}');
}
}
match nextchar {
Some('\n') => {
result.push('\\');
result.push('a');
}
Some(c) => {
result.push(c);
}
None => (),
}
} else {
result.push(c)
}
}
Value::Literal(result, Quotes::None)
}
Value::List(list, s, b) => Value::List(
list.into_iter().map(|v| v.unquote()).collect(),
s,
b,
),
Value::Paren(v) => *v,
v => v,
}
}
/// Get this value as iterable items.
///
/// Lists and maps have iterable items, which are returned as a
/// vector of values. Other values are returned as a vec
/// containing the value as a single item.
pub fn iter_items(self) -> Vec<Value> {
match self {
Value::List(v, _, _) => v,
Value::Map(map) => map
.iter()
.map(|&(ref k, ref v)| {
Value::List(
vec![k.clone(), v.clone()],
Some(ListSeparator::Space),
false,
)
})
.collect(),
Value::Paren(v) => v.iter_items(),
v => vec![v],
}
}
/// Get a reference to this `Value` bound to an output format.
///
/// The bound referene implements `Display`, so it can be written
/// with the rust `format!(...) macros or coverted with the
/// `to_string()` method.
///
/// # Example
///
/// ```
/// # use rsass::css::Value;
/// assert_eq!(
/// Value::scalar(42).format(Default::default()).to_string(),
/// "42",
/// );
/// ```
pub fn format(&self, format: Format) -> Formatted<Value> {
Formatted {
value: self,
format,
}
}
}
fn hexvalue(c: char) -> u8 {
if ('0'..='9').contains(&c) {
c as u8 - b'0'
} else if ('a'..='f').contains(&c) {
c as u8 - b'a' + 10
} else if ('A'..='F').contains(&c) {
c as u8 - b'A' + 10
} else {
0
}
}
/// Some Values are equal according to spec even with some
/// implementation differences.
impl PartialEq for Value {
fn eq(&self, other: &Value) -> bool {
match (&self, other) {
(Value::Bang(a), Value::Bang(b)) => a == b,
(Value::Numeric(a, _), Value::Numeric(b, _)) => a == b,
(Value::Literal(a, aq), Value::Literal(b, bq)) => {
if aq == bq {
a == b
} else {
let a = if aq.is_none() {
a.replace('\\', "\\\\")
} else {
a.clone()
};
let b = if bq.is_none() {
b.replace('\\', "\\\\")
} else {
b.clone()
};
a == b
}
}
(Value::Null, Value::Null) => true,
(Value::True, Value::True) => true,
(Value::False, Value::False) => true,
(Value::Color(a, _), Value::Color(b, _)) => a == b,
(Value::Call(af, aa), Value::Call(bf, ba)) => {
af == bf && aa == ba
}
(Value::Function(a, abody), Value::Function(b, bbody)) => {
a == b && abody == bbody
}
(Value::List(av, asep, ab), Value::List(bv, bsep, bb)) => {
av == bv && asep == bsep && ab == bb
}
(Value::Map(a), Value::Map(b)) => a == b, | random_line_split |
||
value.rs | <ListSeparator>, bool),
/// A Numeric value is a rational value with a Unit (which may be
/// Unit::None) and flags.
///
/// The boolean flag is true for calculated values and false for
/// literal values.
Numeric(Numeric, bool),
/// A color value (and optionally, its source string).
Color(Color, Option<String>),
/// The null value.
Null,
/// The true boolean value.
True,
/// The false boolean value.
False,
/// A binary operation, two operands and an operator.
/// The booleans represents possible whitespace.
BinOp(Box<Value>, bool, Operator, bool, Box<Value>),
/// A unary operator and its operand.
UnaryOp(Operator, Box<Value>),
/// A map of values.
Map(ValueMap),
/// A unicode range for font selections. U+NN, U+N?, U+NN-MM.
/// The string is the entire value, including the "U+" tag.
UnicodeRange(String),
/// A value in parenthesis.
Paren(Box<Value>),
}
/// An OrderMap where both the keys and the values are css values.
pub type ValueMap = OrderMap<Value, Value>;
impl Value {
/// Create a numeric value with no unit.
pub fn scalar<T: Into<Number>>(v: T) -> Self {
Value::Numeric(Numeric::scalar(v), true)
}
/// Get the type name of this value.
pub fn type_name(&self) -> &'static str {
match *self {
Value::Color(..) => "color",
Value::Literal(..) => "string",
Value::Map(..) => "map",
Value::Numeric(..) => "number",
Value::List(..) => "list",
Value::Function(..) => "function",
Value::True | Value::False => "bool",
Value::Null => "null",
_ => "unknown",
}
}
/// Return true if this is a calculated value.
///
/// The return of functions or operators are calculated, verbatim
/// values are not.
pub fn is_calculated(&self) -> bool {
match *self {
Value::Numeric(.., calculated) => calculated,
Value::Color(_, None) => true,
_ => false,
}
}
/// Get this value, but marked as calculated.
pub fn into_calculated(self) -> Self {
match self {
Value::Numeric(num, _) => Value::Numeric(num, true),
Value::List(v, sep, bracketed) => Value::List(
v.into_iter().map(|i| i.into_calculated()).collect(),
sep,
bracketed,
),
other => other,
}
}
/// All values other than `False` and `Null` should be considered true.
pub fn is_true(&self) -> bool {
!matches!(self, Value::False | Value::Null)
}
/// Return true if this value is null.
///
/// Note that an empty unquoted string and a list containing no
/// non-null values is also considered null.
pub fn is_null(&self) -> bool {
match *self {
Value::Null => true,
Value::List(ref list, _, false) => {
list.iter().all(|v| v.is_null())
}
Value::Literal(ref s, Quotes::None) if s.is_empty() => true,
Value::Paren(ref v) => v.is_null(),
_ => false,
}
}
/// Check if this value is numeric.
///
/// If it is, get the number and unit, otherwise, get the value
/// itself as error.
pub fn numeric_value(self) -> Result<Numeric, Self> {
match self {
Value::Numeric(num, ..) => Ok(num),
v => Err(v),
}
}
/// Check that this value is an integer.
#[deprecated]
pub fn integer_value(&self) -> Result<i64, Error> {
match self {
&Value::Numeric(ref num, ..) => num
.value
.clone()
.into_integer()
.map_err(|_| Error::bad_value("an integer", self)),
v => Err(Error::bad_value("a number", v)),
}
}
/// Unquote this value.
///
/// If the value is a quoted string, the content is unquoted.
pub fn unquote(self) -> Value {
match self {
Value::Literal(s, Quotes::None) => {
Value::Literal(s, Quotes::None)
}
Value::Literal(s, _) => {
let mut result = String::new();
let mut iter = s.chars().peekable();
while let Some(c) = iter.next() {
if c == '\\' {
let mut val: u32 = 0;
let mut got_num = false;
let nextchar = loop {
match iter.peek() {
Some(&c) if c.is_ascii_hexdigit() => {
val = val * 10 + u32::from(hexvalue(c));
got_num = true;
iter.next();
}
Some(' ') if got_num => {
iter.next();
break (None);
}
Some(_) if !got_num => break (iter.next()),
_ => break (None),
}
};
if got_num {
if let Ok(c) = char::try_from(val) {
result.push(c);
} else {
result.push('\u{fffd}');
}
}
match nextchar {
Some('\n') => {
result.push('\\');
result.push('a');
}
Some(c) => {
result.push(c);
}
None => (),
}
} else {
result.push(c)
}
}
Value::Literal(result, Quotes::None)
}
Value::List(list, s, b) => Value::List(
list.into_iter().map(|v| v.unquote()).collect(),
s,
b,
),
Value::Paren(v) => *v,
v => v,
}
}
/// Get this value as iterable items.
///
/// Lists and maps have iterable items, which are returned as a
/// vector of values. Other values are returned as a vec
/// containing the value as a single item.
pub fn iter_items(self) -> Vec<Value> {
match self {
Value::List(v, _, _) => v,
Value::Map(map) => map
.iter()
.map(|&(ref k, ref v)| {
Value::List(
vec![k.clone(), v.clone()],
Some(ListSeparator::Space),
false,
)
})
.collect(),
Value::Paren(v) => v.iter_items(),
v => vec![v],
}
}
/// Get a reference to this `Value` bound to an output format.
///
/// The bound referene implements `Display`, so it can be written
/// with the rust `format!(...) macros or coverted with the
/// `to_string()` method.
///
/// # Example
///
/// ```
/// # use rsass::css::Value;
/// assert_eq!(
/// Value::scalar(42).format(Default::default()).to_string(),
/// "42",
/// );
/// ```
pub fn format(&self, format: Format) -> Formatted<Value> {
Formatted {
value: self,
format,
}
}
}
fn hexvalue(c: char) -> u8 {
if ('0'..='9').contains(&c) {
c as u8 - b'0'
} else if ('a'..='f').contains(&c) {
c as u8 - b'a' + 10
} else if ('A'..='F').contains(&c) {
c as u8 - b'A' + 10
} else {
0
}
}
/// Some Values are equal according to spec even with some
/// implementation differences.
impl PartialEq for Value {
fn | (&self, other: &Value) -> bool {
match (&self, other) {
(Value::Bang(a), Value::Bang(b)) => a == b,
(Value::Numeric(a, _), Value::Numeric(b, _)) => a == b,
(Value::Literal(a, aq), Value::Literal(b, bq)) => {
if aq == bq {
a == b
} else {
let a = if aq.is_none() {
a.replace('\\', "\\\\")
} else {
a.clone()
};
let b = if bq.is_none() {
b.replace('\\', "\\\\")
} else {
b.clone()
};
a == b
}
}
(Value::Null, Value::Null) => true,
(Value::True, Value::True) => true,
(Value::False, Value::False) => true,
(Value::Color(a, _), Value::Color(b, _)) => a == b,
(Value::Call(af, aa), Value::Call(bf, ba)) => {
af == bf && aa == ba
}
(Value::Function(a, abody), Value::Function(b, bbody)) => {
a == b && abody == b | eq | identifier_name |
main.rs | mush, facet);
let questions = (1..facet_vals.len())
.flat_map(move |k| facet_vals.combination(k))
.map(move |combis| Question {
facet,
vals: combis.into_iter().cloned().collect(),
});
for question in questions {
let answer = question.answer(&mush);
match SPLIT_TYPE {
SplitType::Gini => {
let ans_imp = answer.impurity;
if let Some((min_i, _, _)) = min_impurinty {
if ans_imp < min_i {
min_impurinty = Some((ans_imp, question, answer));
}
} else {
min_impurinty = Some((ans_imp, question, answer));
}
}
SplitType::Twoing => {
let p_no = answer.no.rows.len() as f64 / mush.len() as f64;
let p_yes = answer.yes.rows.len() as f64 / mush.len() as f64;
let sum_poison = (answer.yes.poison_cnt as f64
/ answer.yes.rows.len() as f64)
- (answer.no.poison_cnt as f64 / answer.no.rows.len() as f64);
let sum_edible = (1.0
- answer.yes.poison_cnt as f64 / answer.yes.rows.len() as f64)
- (1.0 - answer.no.poison_cnt as f64 / answer.no.rows.len() as f64);
let sum = sum_edible.abs() + sum_poison.abs();
let twoing = p_no * p_yes * 0.25 * (sum * sum);
if let Some((max_two, _, _)) = max_twoing {
if max_two < twoing {
max_twoing = Some((twoing, question, answer));
}
} else {
max_twoing = Some((twoing, question, answer));
}
}
}
}
}
let (quest, ans) = match SPLIT_TYPE {
SplitType::Gini => {
let (_, quest, ans) = min_impurinty.expect("huh? no nodes or sumpin?");
(quest, ans)
}
SplitType::Twoing => {
let (_, quest, ans) = max_twoing.expect("Huh? no nodes?");
(quest, ans)
}
};
println!("page {}: {}", page, quest);
for (txt, node) in &[("yes", &ans.yes), ("no", &ans.no)] {
if node.impurity == 0.0 {
println!("\tif {}, done. {}", txt, node);
} else {
next_page += 1;
println!("\tif {}, {}, goto page {}", txt, node, next_page);
half_nodes.push_back((node.rows.clone(), next_page));
}
}
}
}
fn facet_vals(mushs: &[Mush], facet: usize) -> Vec<char> {
mushs
.iter()
.map(|m| m.attrs[facet])
.collect::<BTreeSet<_>>()
.into_iter()
.collect()
}
#[derive(Clone, Debug)]
struct Question {
facet: usize,
vals: BTreeSet<char>,
}
impl Question {
/// Applies the question to the group, separating it into two.
fn answer(&self, input: &[Mush]) -> Answer {
let (yes, no) = input
.iter()
.partition(|mush| self.vals.contains(&mush.attrs[self.facet]));
Answer::new(yes, no)
}
}
impl std::fmt::Display for Question {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let (facet_name, facet_map) = &FACETS[self.facet];
let choices = facet_map
.iter()
.filter_map(|(k, v)| if self.vals.contains(k) { Some(v) } else { None })
.cloned()
.collect::<Vec<_>>()
.join(", ");
write!(
f,
"Examine '{}'. Is it {}{}?",
facet_name,
if self.vals.len() > 1 { "one of " } else { "" },
choices
)
}
}
#[test]
fn test_question_fmt() {
use std::iter::FromIterator;
let q = Question {
facet: 0,
vals: BTreeSet::from_iter(['b', 'c', 'x'].iter().cloned()),
};
format!("{}", q);
}
lazy_static::lazy_static! {
static ref FACETS: Vec<(&'static str,HashMap<char,&'static str>)> = {
let facet_data = [
("cap-shape" ,"bell=b,conical=c,convex=x,flat=f,knobbed=k,sunken=s"),
("cap-surface" ,"fibrous=f,grooves=g,scaly=y,smooth=s"),
("cap-color" ,"brown=n,buff=b,cinnamon=c,gray=g,green=r,pink=p,purple=u,red=e,white=w,yellow=y"),
("bruises?" ,"bruises=t,no=f"),
("odor" ,"almond=a,anise=l,creosote=c,fishy=y,foul=f,musty=m,none=n,pungent=p,spicy=s"),
("gill-attachment" ,"attached=a,descending=d,free=f,notched=n"),
("gill-spacing" ,"close=c,crowded=w,distant=d"),
("gill-size" ,"broad=b,narrow=n"),
("gill-color" ,"black=k,brown=n,buff=b,chocolate=h,gray=g,green=r,orange=o,pink=p,purple=u,red=e,white=w,yellow=y"),
("stalk-shape" ,"enlarging=e,tapering=t"),
("stalk-root" ,"bulbous=b,club=c,cup=u,equal=e,rhizomorphs=z,rooted=r,missing=?"),
("stalk-surface-above-ring" ,"fibrous=f,scaly=y,silky=k,smooth=s"),
("stalk-surface-below-ring" ,"fibrous=f,scaly=y,silky=k,smooth=s"),
("stalk-color-above-ring" ,"brown=n,buff=b,cinnamon=c,gray=g,orange=o,pink=p,red=e,white=w,yellow=y"),
("stalk-color-below-ring" ,"brown=n,buff=b,cinnamon=c,gray=g,orange=o,pink=p,red=e,white=w,yellow=y"),
("veil-type" ,"partial=p,universal=u"),
("veil-color" ,"brown=n,orange=o,white=w,yellow=y"),
("ring-number" ,"none=n,one=o,two=t"),
("ring-type" ,"cobwebby=c,evanescent=e,flaring=f,large=l,none=n,pendant=p,sheathing=s,zone=z"),
("spore-print-color" ,"black=k,brown=n,buff=b,chocolate=h,green=r,orange=o,purple=u,white=w,yellow=y"),
("population" ,"abundant=a,clustered=c,numerous=n,scattered=s,several=v,solitary=y"),
("habitat" ,"grasses=g,leaves=l,meadows=m,paths=p,urban=u,waste=w,woods=d"),
];
let mut result = Vec::new();
for (facet, cats) in &facet_data {
let mut facet_map = HashMap::new();
for cat in cats.split(',') {
let mut i = cat.splitn(2,'=');
if let (Some(name),Some(c)) = (i.next(), i.next()) {
facet_map.insert(c.chars().next().unwrap(), name);
} else {
panic!("Can't parse: {}", cat);
}
}
result.push((*facet,facet_map));
}
result
};
}
#[test]
fn | () {
let q = Question {
facet: 0,
vals: ['a', 'b', 'c'].iter().cloned().collect(),
};
let mushs = [
Mush {
poison: 'p',
attrs: ['a'; 22],
},
Mush {
poison: 'p',
attrs: ['b'; 22],
},
Mush {
poison: 'p',
attrs: ['c'; 22],
},
Mush {
poison: 'p',
attrs: ['d'; 22],
},
Mush {
poison: 'p',
attrs: ['e'; 22],
},
];
let a = q.answer(&mushs);
assert_eq!(a.yes.rows.len(), 3);
assert_eq!(a.no.rows.len(), 2);
}
#[derive(Debug)]
struct Answer {
yes: Node,
no: Node,
impurity: f64,
parent_idx: Option<usize>,
}
impl Answer {
fn new(yes: Vec<Mush>, no: Vec<Mush>) -> Answer {
let yes_node = Node::new(yes);
let no_node = Node::new(no);
let answer_impurity = | test_answer | identifier_name |
main.rs | mush, facet);
let questions = (1..facet_vals.len())
.flat_map(move |k| facet_vals.combination(k))
.map(move |combis| Question {
facet,
vals: combis.into_iter().cloned().collect(),
});
for question in questions {
let answer = question.answer(&mush);
match SPLIT_TYPE {
SplitType::Gini => {
let ans_imp = answer.impurity;
if let Some((min_i, _, _)) = min_impurinty {
if ans_imp < min_i {
min_impurinty = Some((ans_imp, question, answer));
}
} else {
min_impurinty = Some((ans_imp, question, answer));
}
}
SplitType::Twoing => {
let p_no = answer.no.rows.len() as f64 / mush.len() as f64;
let p_yes = answer.yes.rows.len() as f64 / mush.len() as f64;
let sum_poison = (answer.yes.poison_cnt as f64
/ answer.yes.rows.len() as f64)
- (answer.no.poison_cnt as f64 / answer.no.rows.len() as f64);
let sum_edible = (1.0
- answer.yes.poison_cnt as f64 / answer.yes.rows.len() as f64)
- (1.0 - answer.no.poison_cnt as f64 / answer.no.rows.len() as f64);
let sum = sum_edible.abs() + sum_poison.abs();
let twoing = p_no * p_yes * 0.25 * (sum * sum);
if let Some((max_two, _, _)) = max_twoing {
if max_two < twoing {
max_twoing = Some((twoing, question, answer));
}
} else {
max_twoing = Some((twoing, question, answer));
}
}
}
}
}
let (quest, ans) = match SPLIT_TYPE {
SplitType::Gini => {
let (_, quest, ans) = min_impurinty.expect("huh? no nodes or sumpin?");
(quest, ans)
}
SplitType::Twoing => {
let (_, quest, ans) = max_twoing.expect("Huh? no nodes?");
(quest, ans)
}
};
println!("page {}: {}", page, quest);
for (txt, node) in &[("yes", &ans.yes), ("no", &ans.no)] {
if node.impurity == 0.0 {
println!("\tif {}, done. {}", txt, node);
} else {
next_page += 1;
println!("\tif {}, {}, goto page {}", txt, node, next_page);
half_nodes.push_back((node.rows.clone(), next_page));
}
}
}
}
fn facet_vals(mushs: &[Mush], facet: usize) -> Vec<char> {
mushs
.iter()
.map(|m| m.attrs[facet])
.collect::<BTreeSet<_>>()
.into_iter()
.collect()
}
#[derive(Clone, Debug)]
struct Question {
facet: usize,
vals: BTreeSet<char>,
}
impl Question {
/// Applies the question to the group, separating it into two.
fn answer(&self, input: &[Mush]) -> Answer {
let (yes, no) = input
.iter()
.partition(|mush| self.vals.contains(&mush.attrs[self.facet]));
Answer::new(yes, no)
}
}
impl std::fmt::Display for Question {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let (facet_name, facet_map) = &FACETS[self.facet];
let choices = facet_map
.iter()
.filter_map(|(k, v)| if self.vals.contains(k) { Some(v) } else { None })
.cloned()
.collect::<Vec<_>>()
.join(", ");
write!(
f,
"Examine '{}'. Is it {}{}?",
facet_name,
if self.vals.len() > 1 { "one of " } else { "" },
choices
)
}
}
#[test]
fn test_question_fmt() {
use std::iter::FromIterator;
let q = Question {
facet: 0,
vals: BTreeSet::from_iter(['b', 'c', 'x'].iter().cloned()),
};
format!("{}", q);
}
lazy_static::lazy_static! {
static ref FACETS: Vec<(&'static str,HashMap<char,&'static str>)> = {
let facet_data = [
("cap-shape" ,"bell=b,conical=c,convex=x,flat=f,knobbed=k,sunken=s"),
("cap-surface" ,"fibrous=f,grooves=g,scaly=y,smooth=s"), | ("gill-size" ,"broad=b,narrow=n"),
("gill-color" ,"black=k,brown=n,buff=b,chocolate=h,gray=g,green=r,orange=o,pink=p,purple=u,red=e,white=w,yellow=y"),
("stalk-shape" ,"enlarging=e,tapering=t"),
("stalk-root" ,"bulbous=b,club=c,cup=u,equal=e,rhizomorphs=z,rooted=r,missing=?"),
("stalk-surface-above-ring" ,"fibrous=f,scaly=y,silky=k,smooth=s"),
("stalk-surface-below-ring" ,"fibrous=f,scaly=y,silky=k,smooth=s"),
("stalk-color-above-ring" ,"brown=n,buff=b,cinnamon=c,gray=g,orange=o,pink=p,red=e,white=w,yellow=y"),
("stalk-color-below-ring" ,"brown=n,buff=b,cinnamon=c,gray=g,orange=o,pink=p,red=e,white=w,yellow=y"),
("veil-type" ,"partial=p,universal=u"),
("veil-color" ,"brown=n,orange=o,white=w,yellow=y"),
("ring-number" ,"none=n,one=o,two=t"),
("ring-type" ,"cobwebby=c,evanescent=e,flaring=f,large=l,none=n,pendant=p,sheathing=s,zone=z"),
("spore-print-color" ,"black=k,brown=n,buff=b,chocolate=h,green=r,orange=o,purple=u,white=w,yellow=y"),
("population" ,"abundant=a,clustered=c,numerous=n,scattered=s,several=v,solitary=y"),
("habitat" ,"grasses=g,leaves=l,meadows=m,paths=p,urban=u,waste=w,woods=d"),
];
let mut result = Vec::new();
for (facet, cats) in &facet_data {
let mut facet_map = HashMap::new();
for cat in cats.split(',') {
let mut i = cat.splitn(2,'=');
if let (Some(name),Some(c)) = (i.next(), i.next()) {
facet_map.insert(c.chars().next().unwrap(), name);
} else {
panic!("Can't parse: {}", cat);
}
}
result.push((*facet,facet_map));
}
result
};
}
#[test]
fn test_answer() {
let q = Question {
facet: 0,
vals: ['a', 'b', 'c'].iter().cloned().collect(),
};
let mushs = [
Mush {
poison: 'p',
attrs: ['a'; 22],
},
Mush {
poison: 'p',
attrs: ['b'; 22],
},
Mush {
poison: 'p',
attrs: ['c'; 22],
},
Mush {
poison: 'p',
attrs: ['d'; 22],
},
Mush {
poison: 'p',
attrs: ['e'; 22],
},
];
let a = q.answer(&mushs);
assert_eq!(a.yes.rows.len(), 3);
assert_eq!(a.no.rows.len(), 2);
}
#[derive(Debug)]
struct Answer {
yes: Node,
no: Node,
impurity: f64,
parent_idx: Option<usize>,
}
impl Answer {
fn new(yes: Vec<Mush>, no: Vec<Mush>) -> Answer {
let yes_node = Node::new(yes);
let no_node = Node::new(no);
let answer_impurity = yes | ("cap-color" ,"brown=n,buff=b,cinnamon=c,gray=g,green=r,pink=p,purple=u,red=e,white=w,yellow=y"),
("bruises?" ,"bruises=t,no=f"),
("odor" ,"almond=a,anise=l,creosote=c,fishy=y,foul=f,musty=m,none=n,pungent=p,spicy=s"),
("gill-attachment" ,"attached=a,descending=d,free=f,notched=n"),
("gill-spacing" ,"close=c,crowded=w,distant=d"), | random_line_split |
main.rs | mush, facet);
let questions = (1..facet_vals.len())
.flat_map(move |k| facet_vals.combination(k))
.map(move |combis| Question {
facet,
vals: combis.into_iter().cloned().collect(),
});
for question in questions {
let answer = question.answer(&mush);
match SPLIT_TYPE {
SplitType::Gini => {
let ans_imp = answer.impurity;
if let Some((min_i, _, _)) = min_impurinty {
if ans_imp < min_i {
min_impurinty = Some((ans_imp, question, answer));
}
} else {
min_impurinty = Some((ans_imp, question, answer));
}
}
SplitType::Twoing => {
let p_no = answer.no.rows.len() as f64 / mush.len() as f64;
let p_yes = answer.yes.rows.len() as f64 / mush.len() as f64;
let sum_poison = (answer.yes.poison_cnt as f64
/ answer.yes.rows.len() as f64)
- (answer.no.poison_cnt as f64 / answer.no.rows.len() as f64);
let sum_edible = (1.0
- answer.yes.poison_cnt as f64 / answer.yes.rows.len() as f64)
- (1.0 - answer.no.poison_cnt as f64 / answer.no.rows.len() as f64);
let sum = sum_edible.abs() + sum_poison.abs();
let twoing = p_no * p_yes * 0.25 * (sum * sum);
if let Some((max_two, _, _)) = max_twoing {
if max_two < twoing {
max_twoing = Some((twoing, question, answer));
}
} else {
max_twoing = Some((twoing, question, answer));
}
}
}
}
}
let (quest, ans) = match SPLIT_TYPE {
SplitType::Gini => {
let (_, quest, ans) = min_impurinty.expect("huh? no nodes or sumpin?");
(quest, ans)
}
SplitType::Twoing => {
let (_, quest, ans) = max_twoing.expect("Huh? no nodes?");
(quest, ans)
}
};
println!("page {}: {}", page, quest);
for (txt, node) in &[("yes", &ans.yes), ("no", &ans.no)] {
if node.impurity == 0.0 {
println!("\tif {}, done. {}", txt, node);
} else {
next_page += 1;
println!("\tif {}, {}, goto page {}", txt, node, next_page);
half_nodes.push_back((node.rows.clone(), next_page));
}
}
}
}
fn facet_vals(mushs: &[Mush], facet: usize) -> Vec<char> {
mushs
.iter()
.map(|m| m.attrs[facet])
.collect::<BTreeSet<_>>()
.into_iter()
.collect()
}
#[derive(Clone, Debug)]
struct Question {
facet: usize,
vals: BTreeSet<char>,
}
impl Question {
/// Applies the question to the group, separating it into two.
fn answer(&self, input: &[Mush]) -> Answer {
let (yes, no) = input
.iter()
.partition(|mush| self.vals.contains(&mush.attrs[self.facet]));
Answer::new(yes, no)
}
}
impl std::fmt::Display for Question {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let (facet_name, facet_map) = &FACETS[self.facet];
let choices = facet_map
.iter()
.filter_map(|(k, v)| if self.vals.contains(k) | else { None })
.cloned()
.collect::<Vec<_>>()
.join(", ");
write!(
f,
"Examine '{}'. Is it {}{}?",
facet_name,
if self.vals.len() > 1 { "one of " } else { "" },
choices
)
}
}
#[test]
fn test_question_fmt() {
use std::iter::FromIterator;
let q = Question {
facet: 0,
vals: BTreeSet::from_iter(['b', 'c', 'x'].iter().cloned()),
};
format!("{}", q);
}
lazy_static::lazy_static! {
static ref FACETS: Vec<(&'static str,HashMap<char,&'static str>)> = {
let facet_data = [
("cap-shape" ,"bell=b,conical=c,convex=x,flat=f,knobbed=k,sunken=s"),
("cap-surface" ,"fibrous=f,grooves=g,scaly=y,smooth=s"),
("cap-color" ,"brown=n,buff=b,cinnamon=c,gray=g,green=r,pink=p,purple=u,red=e,white=w,yellow=y"),
("bruises?" ,"bruises=t,no=f"),
("odor" ,"almond=a,anise=l,creosote=c,fishy=y,foul=f,musty=m,none=n,pungent=p,spicy=s"),
("gill-attachment" ,"attached=a,descending=d,free=f,notched=n"),
("gill-spacing" ,"close=c,crowded=w,distant=d"),
("gill-size" ,"broad=b,narrow=n"),
("gill-color" ,"black=k,brown=n,buff=b,chocolate=h,gray=g,green=r,orange=o,pink=p,purple=u,red=e,white=w,yellow=y"),
("stalk-shape" ,"enlarging=e,tapering=t"),
("stalk-root" ,"bulbous=b,club=c,cup=u,equal=e,rhizomorphs=z,rooted=r,missing=?"),
("stalk-surface-above-ring" ,"fibrous=f,scaly=y,silky=k,smooth=s"),
("stalk-surface-below-ring" ,"fibrous=f,scaly=y,silky=k,smooth=s"),
("stalk-color-above-ring" ,"brown=n,buff=b,cinnamon=c,gray=g,orange=o,pink=p,red=e,white=w,yellow=y"),
("stalk-color-below-ring" ,"brown=n,buff=b,cinnamon=c,gray=g,orange=o,pink=p,red=e,white=w,yellow=y"),
("veil-type" ,"partial=p,universal=u"),
("veil-color" ,"brown=n,orange=o,white=w,yellow=y"),
("ring-number" ,"none=n,one=o,two=t"),
("ring-type" ,"cobwebby=c,evanescent=e,flaring=f,large=l,none=n,pendant=p,sheathing=s,zone=z"),
("spore-print-color" ,"black=k,brown=n,buff=b,chocolate=h,green=r,orange=o,purple=u,white=w,yellow=y"),
("population" ,"abundant=a,clustered=c,numerous=n,scattered=s,several=v,solitary=y"),
("habitat" ,"grasses=g,leaves=l,meadows=m,paths=p,urban=u,waste=w,woods=d"),
];
let mut result = Vec::new();
for (facet, cats) in &facet_data {
let mut facet_map = HashMap::new();
for cat in cats.split(',') {
let mut i = cat.splitn(2,'=');
if let (Some(name),Some(c)) = (i.next(), i.next()) {
facet_map.insert(c.chars().next().unwrap(), name);
} else {
panic!("Can't parse: {}", cat);
}
}
result.push((*facet,facet_map));
}
result
};
}
#[test]
fn test_answer() {
let q = Question {
facet: 0,
vals: ['a', 'b', 'c'].iter().cloned().collect(),
};
let mushs = [
Mush {
poison: 'p',
attrs: ['a'; 22],
},
Mush {
poison: 'p',
attrs: ['b'; 22],
},
Mush {
poison: 'p',
attrs: ['c'; 22],
},
Mush {
poison: 'p',
attrs: ['d'; 22],
},
Mush {
poison: 'p',
attrs: ['e'; 22],
},
];
let a = q.answer(&mushs);
assert_eq!(a.yes.rows.len(), 3);
assert_eq!(a.no.rows.len(), 2);
}
#[derive(Debug)]
struct Answer {
yes: Node,
no: Node,
impurity: f64,
parent_idx: Option<usize>,
}
impl Answer {
fn new(yes: Vec<Mush>, no: Vec<Mush>) -> Answer {
let yes_node = Node::new(yes);
let no_node = Node::new(no);
let answer_impurity = | { Some(v) } | conditional_block |
main.rs | ush, facet);
let questions = (1..facet_vals.len())
.flat_map(move |k| facet_vals.combination(k))
.map(move |combis| Question {
facet,
vals: combis.into_iter().cloned().collect(),
});
for question in questions {
let answer = question.answer(&mush);
match SPLIT_TYPE {
SplitType::Gini => {
let ans_imp = answer.impurity;
if let Some((min_i, _, _)) = min_impurinty {
if ans_imp < min_i {
min_impurinty = Some((ans_imp, question, answer));
}
} else {
min_impurinty = Some((ans_imp, question, answer));
}
}
SplitType::Twoing => {
let p_no = answer.no.rows.len() as f64 / mush.len() as f64;
let p_yes = answer.yes.rows.len() as f64 / mush.len() as f64;
let sum_poison = (answer.yes.poison_cnt as f64
/ answer.yes.rows.len() as f64)
- (answer.no.poison_cnt as f64 / answer.no.rows.len() as f64);
let sum_edible = (1.0
- answer.yes.poison_cnt as f64 / answer.yes.rows.len() as f64)
- (1.0 - answer.no.poison_cnt as f64 / answer.no.rows.len() as f64);
let sum = sum_edible.abs() + sum_poison.abs();
let twoing = p_no * p_yes * 0.25 * (sum * sum);
if let Some((max_two, _, _)) = max_twoing {
if max_two < twoing {
max_twoing = Some((twoing, question, answer));
}
} else {
max_twoing = Some((twoing, question, answer));
}
}
}
}
}
let (quest, ans) = match SPLIT_TYPE {
SplitType::Gini => {
let (_, quest, ans) = min_impurinty.expect("huh? no nodes or sumpin?");
(quest, ans)
}
SplitType::Twoing => {
let (_, quest, ans) = max_twoing.expect("Huh? no nodes?");
(quest, ans)
}
};
println!("page {}: {}", page, quest);
for (txt, node) in &[("yes", &ans.yes), ("no", &ans.no)] {
if node.impurity == 0.0 {
println!("\tif {}, done. {}", txt, node);
} else {
next_page += 1;
println!("\tif {}, {}, goto page {}", txt, node, next_page);
half_nodes.push_back((node.rows.clone(), next_page));
}
}
}
}
fn facet_vals(mushs: &[Mush], facet: usize) -> Vec<char> {
mushs
.iter()
.map(|m| m.attrs[facet])
.collect::<BTreeSet<_>>()
.into_iter()
.collect()
}
#[derive(Clone, Debug)]
struct Question {
facet: usize,
vals: BTreeSet<char>,
}
impl Question {
/// Applies the question to the group, separating it into two.
fn answer(&self, input: &[Mush]) -> Answer {
let (yes, no) = input
.iter()
.partition(|mush| self.vals.contains(&mush.attrs[self.facet]));
Answer::new(yes, no)
}
}
impl std::fmt::Display for Question {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result |
}
#[test]
fn test_question_fmt() {
use std::iter::FromIterator;
let q = Question {
facet: 0,
vals: BTreeSet::from_iter(['b', 'c', 'x'].iter().cloned()),
};
format!("{}", q);
}
lazy_static::lazy_static! {
static ref FACETS: Vec<(&'static str,HashMap<char,&'static str>)> = {
let facet_data = [
("cap-shape" ,"bell=b,conical=c,convex=x,flat=f,knobbed=k,sunken=s"),
("cap-surface" ,"fibrous=f,grooves=g,scaly=y,smooth=s"),
("cap-color" ,"brown=n,buff=b,cinnamon=c,gray=g,green=r,pink=p,purple=u,red=e,white=w,yellow=y"),
("bruises?" ,"bruises=t,no=f"),
("odor" ,"almond=a,anise=l,creosote=c,fishy=y,foul=f,musty=m,none=n,pungent=p,spicy=s"),
("gill-attachment" ,"attached=a,descending=d,free=f,notched=n"),
("gill-spacing" ,"close=c,crowded=w,distant=d"),
("gill-size" ,"broad=b,narrow=n"),
("gill-color" ,"black=k,brown=n,buff=b,chocolate=h,gray=g,green=r,orange=o,pink=p,purple=u,red=e,white=w,yellow=y"),
("stalk-shape" ,"enlarging=e,tapering=t"),
("stalk-root" ,"bulbous=b,club=c,cup=u,equal=e,rhizomorphs=z,rooted=r,missing=?"),
("stalk-surface-above-ring" ,"fibrous=f,scaly=y,silky=k,smooth=s"),
("stalk-surface-below-ring" ,"fibrous=f,scaly=y,silky=k,smooth=s"),
("stalk-color-above-ring" ,"brown=n,buff=b,cinnamon=c,gray=g,orange=o,pink=p,red=e,white=w,yellow=y"),
("stalk-color-below-ring" ,"brown=n,buff=b,cinnamon=c,gray=g,orange=o,pink=p,red=e,white=w,yellow=y"),
("veil-type" ,"partial=p,universal=u"),
("veil-color" ,"brown=n,orange=o,white=w,yellow=y"),
("ring-number" ,"none=n,one=o,two=t"),
("ring-type" ,"cobwebby=c,evanescent=e,flaring=f,large=l,none=n,pendant=p,sheathing=s,zone=z"),
("spore-print-color" ,"black=k,brown=n,buff=b,chocolate=h,green=r,orange=o,purple=u,white=w,yellow=y"),
("population" ,"abundant=a,clustered=c,numerous=n,scattered=s,several=v,solitary=y"),
("habitat" ,"grasses=g,leaves=l,meadows=m,paths=p,urban=u,waste=w,woods=d"),
];
let mut result = Vec::new();
for (facet, cats) in &facet_data {
let mut facet_map = HashMap::new();
for cat in cats.split(',') {
let mut i = cat.splitn(2,'=');
if let (Some(name),Some(c)) = (i.next(), i.next()) {
facet_map.insert(c.chars().next().unwrap(), name);
} else {
panic!("Can't parse: {}", cat);
}
}
result.push((*facet,facet_map));
}
result
};
}
#[test]
fn test_answer() {
let q = Question {
facet: 0,
vals: ['a', 'b', 'c'].iter().cloned().collect(),
};
let mushs = [
Mush {
poison: 'p',
attrs: ['a'; 22],
},
Mush {
poison: 'p',
attrs: ['b'; 22],
},
Mush {
poison: 'p',
attrs: ['c'; 22],
},
Mush {
poison: 'p',
attrs: ['d'; 22],
},
Mush {
poison: 'p',
attrs: ['e'; 22],
},
];
let a = q.answer(&mushs);
assert_eq!(a.yes.rows.len(), 3);
assert_eq!(a.no.rows.len(), 2);
}
#[derive(Debug)]
struct Answer {
yes: Node,
no: Node,
impurity: f64,
parent_idx: Option<usize>,
}
impl Answer {
fn new(yes: Vec<Mush>, no: Vec<Mush>) -> Answer {
let yes_node = Node::new(yes);
let no_node = Node::new(no);
let answer_impurity = | {
let (facet_name, facet_map) = &FACETS[self.facet];
let choices = facet_map
.iter()
.filter_map(|(k, v)| if self.vals.contains(k) { Some(v) } else { None })
.cloned()
.collect::<Vec<_>>()
.join(", ");
write!(
f,
"Examine '{}'. Is it {}{}?",
facet_name,
if self.vals.len() > 1 { "one of " } else { "" },
choices
)
} | identifier_body |
mod.rs | node.close();
}
}
cluster.set_nodes(vec![]);
}
fn tend(&self) -> Result<()> {
let mut nodes = self.nodes();
// All node additions/deletions are performed in tend thread.
// If active nodes don't exist, seed cluster.
if nodes.is_empty() {
debug!("No connections available; seeding...");
self.seed_nodes();
nodes = self.nodes();
}
let mut friend_list: Vec<Host> = vec![];
let mut refresh_count = 0;
// Refresh all known nodes.
for node in nodes {
let old_gen = node.partition_generation();
if node.is_active() {
match node.refresh(self.aliases()) {
Ok(friends) => {
refresh_count += 1;
if !friends.is_empty() {
friend_list.extend_from_slice(&friends);
}
if old_gen != node.partition_generation() {
self.update_partitions(node.clone())?;
}
}
Err(err) => {
node.increase_failures();
warn!("Node `{}` refresh failed: {}", node, err);
}
}
}
}
// Add nodes in a batch.
let add_list = self.find_new_nodes_to_add(friend_list);
self.add_nodes_and_aliases(&add_list);
// IMPORTANT: Remove must come after add to remove aliases
// Handle nodes changes determined from refreshes.
// Remove nodes in a batch.
let remove_list = self.find_nodes_to_remove(refresh_count);
self.remove_nodes_and_aliases(remove_list);
Ok(())
}
fn wait_till_stabilized(cluster: Arc<Cluster>) -> Result<()> {
let timeout = cluster
.client_policy()
.timeout
.unwrap_or_else(|| Duration::from_secs(3));
let deadline = Instant::now() + timeout;
let sleep_between_tend = Duration::from_millis(1);
let handle = thread::spawn(move || {
let mut count: isize = -1;
loop {
if Instant::now() > deadline {
break;
}
if let Err(err) = cluster.tend() {
log_error_chain!(err, "Error during initial cluster tend");
}
let old_count = count;
count = cluster.nodes().len() as isize;
if count == old_count {
break;
}
thread::sleep(sleep_between_tend);
}
});
handle
.join()
.map_err(|err| format!("Error during initial cluster tend: {:?}", err).into())
}
pub const fn cluster_name(&self) -> &Option<String> {
&self.client_policy.cluster_name
}
pub const fn client_policy(&self) -> &ClientPolicy {
&self.client_policy
}
pub fn add_seeds(&self, new_seeds: &[Host]) -> Result<()> {
let mut seeds = self.seeds.write();
seeds.extend_from_slice(new_seeds);
Ok(())
}
pub fn alias_exists(&self, host: &Host) -> Result<bool> {
let aliases = self.aliases.read();
Ok(aliases.contains_key(host))
}
fn set_partitions(&self, partitions: HashMap<String, Vec<Arc<Node>>>) {
let mut partition_map = self.partition_write_map.write();
*partition_map = partitions;
}
fn partitions(&self) -> Arc<RwLock<HashMap<String, Vec<Arc<Node>>>>> {
self.partition_write_map.clone()
}
pub fn node_partitions(&self, node: &Node, namespace: &str) -> Vec<u16> {
let mut res = vec![];
let partitions = self.partitions();
let partitions = partitions.read();
if let Some(node_array) = partitions.get(namespace) {
let mut i = 0;
for tnode in node_array {
if node == tnode.as_ref() {
res.push(i);
}
i += 1;
}
}
res
}
pub fn update_partitions(&self, node: Arc<Node>) -> Result<()> {
let mut conn = node.get_connection(self.client_policy.timeout)?;
let tokens = PartitionTokenizer::new(&mut conn).map_err(|e| {
conn.invalidate();
e
})?;
let nmap = tokens.update_partition(self.partitions(), node)?;
self.set_partitions(nmap);
Ok(())
}
pub fn seed_nodes(&self) -> bool {
let seed_array = self.seeds.read();
info!("Seeding the cluster. Seeds count: {}", seed_array.len());
let mut list: Vec<Arc<Node>> = vec![];
for seed in &*seed_array {
let mut seed_node_validator = NodeValidator::new(self);
if let Err(err) = seed_node_validator.validate_node(self, seed) {
log_error_chain!(err, "Failed to validate seed host: {}", seed);
continue;
};
for alias in &*seed_node_validator.aliases() {
let nv = if *seed == *alias {
seed_node_validator.clone()
} else {
let mut nv2 = NodeValidator::new(self);
if let Err(err) = nv2.validate_node(self, seed) {
log_error_chain!(err, "Seeding host {} failed with error", alias);
continue;
};
nv2
};
if self.find_node_name(&list, &nv.name) {
continue;
}
let node = self.create_node(nv);
let node = Arc::new(node);
self.add_aliases(node.clone());
list.push(node);
}
}
self.add_nodes_and_aliases(&list);
!list.is_empty()
}
fn find_node_name(&self, list: &[Arc<Node>], name: &str) -> bool {
list.iter().any(|node| node.name() == name)
}
fn find_new_nodes_to_add(&self, hosts: Vec<Host>) -> Vec<Arc<Node>> {
let mut list: Vec<Arc<Node>> = vec![];
for host in hosts {
let mut nv = NodeValidator::new(self);
if let Err(err) = nv.validate_node(self, &host) {
log_error_chain!(err, "Adding node {} failed with error", host.name);
continue;
};
// Duplicate node name found. This usually occurs when the server
// services list contains both internal and external IP addresses
// for the same node. Add new host to list of alias filters
// and do not add new node.
let mut dup = false;
match self.get_node_by_name(&nv.name) {
Ok(node) => {
self.add_alias(host, node.clone());
dup = true;
}
Err(_) => {
if let Some(node) = list.iter().find(|n| n.name() == nv.name) {
self.add_alias(host, node.clone());
dup = true;
}
}
};
if !dup {
let node = self.create_node(nv);
list.push(Arc::new(node));
}
}
list
}
fn create_node(&self, nv: NodeValidator) -> Node {
Node::new(self.client_policy.clone(), Arc::new(nv))
}
fn find_nodes_to_remove(&self, refresh_count: usize) -> Vec<Arc<Node>> {
let nodes = self.nodes();
let mut remove_list: Vec<Arc<Node>> = vec![];
let cluster_size = nodes.len();
for node in nodes {
let tnode = node.clone();
if !node.is_active() {
remove_list.push(tnode);
continue;
}
match cluster_size {
// Single node clusters rely on whether it responded to info requests.
1 if node.failures() > 5 => {
// 5 consecutive info requests failed. Try seeds.
if self.seed_nodes() {
remove_list.push(tnode);
}
}
// Two node clusters require at least one successful refresh before removing.
2 if refresh_count == 1 && node.reference_count() == 0 && node.failures() > 0 => {
remove_list.push(node)
}
_ => {
// Multi-node clusters require two successful node refreshes before removing.
if refresh_count >= 2 && node.reference_count() == 0 {
// Node is not referenced by other nodes.
// Check if node responded to info request.
if node.failures() == 0 {
// Node is alive, but not referenced by other nodes. Check if mapped.
if !self.find_node_in_partition_map(node) {
remove_list.push(tnode);
}
} else {
// Node not responding. Remove it.
remove_list.push(tnode);
}
}
}
}
}
remove_list
}
fn add_nodes_and_aliases(&self, friend_list: &[Arc<Node>]) {
for node in friend_list {
self.add_aliases(node.clone());
}
self.add_nodes(friend_list);
}
fn remove_nodes_and_aliases(&self, mut nodes_to_remove: Vec<Arc<Node>>) {
for node in &mut nodes_to_remove {
for alias in node.aliases() {
self.remove_alias(&alias);
}
if let Some(node) = Arc::get_mut(node) {
node.close();
}
}
self.remove_nodes(&nodes_to_remove);
}
fn add_alias(&self, host: Host, node: Arc<Node>) | {
let mut aliases = self.aliases.write();
node.add_alias(host.clone());
aliases.insert(host, node);
} | identifier_body |
|
mod.rs | thread::spawn(move || Cluster::tend_thread(cluster_for_tend, rx));
debug!("New cluster initialized and ready to be used...");
Ok(cluster)
}
fn tend_thread(cluster: Arc<Cluster>, rx: Receiver<()>) {
let tend_interval = cluster.client_policy.tend_interval;
loop {
// try to read from the receive channel to see if it hung up
match rx.try_recv() {
Ok(_) => unreachable!(),
// signaled to end
Err(TryRecvError::Disconnected) => break,
Err(TryRecvError::Empty) => {
if let Err(err) = cluster.tend() {
log_error_chain!(err, "Error tending cluster");
}
thread::sleep(tend_interval);
}
}
}
// close all nodes
let nodes = cluster.nodes();
for mut node in nodes {
if let Some(node) = Arc::get_mut(&mut node) {
node.close();
}
}
cluster.set_nodes(vec![]);
}
fn tend(&self) -> Result<()> {
let mut nodes = self.nodes();
// All node additions/deletions are performed in tend thread.
// If active nodes don't exist, seed cluster.
if nodes.is_empty() {
debug!("No connections available; seeding...");
self.seed_nodes();
nodes = self.nodes();
}
let mut friend_list: Vec<Host> = vec![];
let mut refresh_count = 0;
// Refresh all known nodes.
for node in nodes {
let old_gen = node.partition_generation();
if node.is_active() {
match node.refresh(self.aliases()) {
Ok(friends) => {
refresh_count += 1;
if !friends.is_empty() {
friend_list.extend_from_slice(&friends);
}
if old_gen != node.partition_generation() {
self.update_partitions(node.clone())?;
}
}
Err(err) => {
node.increase_failures();
warn!("Node `{}` refresh failed: {}", node, err);
}
}
}
}
// Add nodes in a batch.
let add_list = self.find_new_nodes_to_add(friend_list);
self.add_nodes_and_aliases(&add_list);
// IMPORTANT: Remove must come after add to remove aliases
// Handle nodes changes determined from refreshes.
// Remove nodes in a batch.
let remove_list = self.find_nodes_to_remove(refresh_count);
self.remove_nodes_and_aliases(remove_list);
Ok(())
}
fn wait_till_stabilized(cluster: Arc<Cluster>) -> Result<()> {
let timeout = cluster
.client_policy()
.timeout
.unwrap_or_else(|| Duration::from_secs(3));
let deadline = Instant::now() + timeout;
let sleep_between_tend = Duration::from_millis(1);
let handle = thread::spawn(move || {
let mut count: isize = -1;
loop {
if Instant::now() > deadline {
break;
}
if let Err(err) = cluster.tend() {
log_error_chain!(err, "Error during initial cluster tend");
}
let old_count = count;
count = cluster.nodes().len() as isize;
if count == old_count {
break;
}
thread::sleep(sleep_between_tend);
}
});
handle
.join()
.map_err(|err| format!("Error during initial cluster tend: {:?}", err).into())
}
pub const fn cluster_name(&self) -> &Option<String> {
&self.client_policy.cluster_name
}
pub const fn client_policy(&self) -> &ClientPolicy {
&self.client_policy
}
pub fn add_seeds(&self, new_seeds: &[Host]) -> Result<()> {
let mut seeds = self.seeds.write();
seeds.extend_from_slice(new_seeds);
Ok(())
}
pub fn alias_exists(&self, host: &Host) -> Result<bool> {
let aliases = self.aliases.read();
Ok(aliases.contains_key(host))
}
fn set_partitions(&self, partitions: HashMap<String, Vec<Arc<Node>>>) {
let mut partition_map = self.partition_write_map.write();
*partition_map = partitions;
}
fn partitions(&self) -> Arc<RwLock<HashMap<String, Vec<Arc<Node>>>>> {
self.partition_write_map.clone()
}
pub fn node_partitions(&self, node: &Node, namespace: &str) -> Vec<u16> {
let mut res = vec![];
let partitions = self.partitions();
let partitions = partitions.read();
if let Some(node_array) = partitions.get(namespace) {
let mut i = 0;
for tnode in node_array {
if node == tnode.as_ref() {
res.push(i);
}
i += 1;
}
}
res
}
pub fn update_partitions(&self, node: Arc<Node>) -> Result<()> {
let mut conn = node.get_connection(self.client_policy.timeout)?;
let tokens = PartitionTokenizer::new(&mut conn).map_err(|e| {
conn.invalidate();
e
})?;
let nmap = tokens.update_partition(self.partitions(), node)?;
self.set_partitions(nmap);
Ok(())
}
pub fn seed_nodes(&self) -> bool {
let seed_array = self.seeds.read();
info!("Seeding the cluster. Seeds count: {}", seed_array.len());
let mut list: Vec<Arc<Node>> = vec![];
for seed in &*seed_array {
let mut seed_node_validator = NodeValidator::new(self);
if let Err(err) = seed_node_validator.validate_node(self, seed) {
log_error_chain!(err, "Failed to validate seed host: {}", seed);
continue;
};
for alias in &*seed_node_validator.aliases() {
let nv = if *seed == *alias {
seed_node_validator.clone()
} else {
let mut nv2 = NodeValidator::new(self);
if let Err(err) = nv2.validate_node(self, seed) {
log_error_chain!(err, "Seeding host {} failed with error", alias);
continue;
};
nv2
};
if self.find_node_name(&list, &nv.name) {
continue;
}
let node = self.create_node(nv);
let node = Arc::new(node);
self.add_aliases(node.clone());
list.push(node);
}
}
self.add_nodes_and_aliases(&list);
!list.is_empty()
}
fn find_node_name(&self, list: &[Arc<Node>], name: &str) -> bool {
list.iter().any(|node| node.name() == name)
}
fn find_new_nodes_to_add(&self, hosts: Vec<Host>) -> Vec<Arc<Node>> {
let mut list: Vec<Arc<Node>> = vec![];
for host in hosts {
let mut nv = NodeValidator::new(self);
if let Err(err) = nv.validate_node(self, &host) {
log_error_chain!(err, "Adding node {} failed with error", host.name);
continue;
};
// Duplicate node name found. This usually occurs when the server
// services list contains both internal and external IP addresses
// for the same node. Add new host to list of alias filters
// and do not add new node.
let mut dup = false;
match self.get_node_by_name(&nv.name) {
Ok(node) => {
self.add_alias(host, node.clone());
dup = true;
}
Err(_) => {
if let Some(node) = list.iter().find(|n| n.name() == nv.name) {
self.add_alias(host, node.clone());
dup = true;
}
}
};
if !dup {
let node = self.create_node(nv);
list.push(Arc::new(node));
}
}
list
}
fn create_node(&self, nv: NodeValidator) -> Node {
Node::new(self.client_policy.clone(), Arc::new(nv))
}
fn find_nodes_to_remove(&self, refresh_count: usize) -> Vec<Arc<Node>> {
let nodes = self.nodes();
let mut remove_list: Vec<Arc<Node>> = vec![];
let cluster_size = nodes.len();
for node in nodes {
let tnode = node.clone();
if !node.is_active() {
remove_list.push(tnode);
continue;
}
match cluster_size {
// Single node clusters rely on whether it responded to info requests.
1 if node.failures() > 5 => {
// 5 consecutive info requests failed. Try seeds.
if self.seed_nodes() {
remove_list.push(tnode);
}
}
// Two node clusters require at least one successful refresh before removing.
2 if refresh_count == 1 && node.reference_count() == 0 && node.failures() > 0 => {
remove_list.push(node)
}
_ => {
// Multi-node clusters require two successful node refreshes before removing.
if refresh_count >= 2 && node.reference_count() == 0 {
// Node is not referenced by other nodes.
// Check if node responded to info request.
if node.failures() == 0 {
// Node is alive, but not referenced by other nodes. Check if mapped.
if !self.find_node_in_partition_map(node) | {
remove_list.push(tnode);
} | conditional_block |
|
mod.rs | old_gen != node.partition_generation() {
self.update_partitions(node.clone())?;
}
}
Err(err) => {
node.increase_failures();
warn!("Node `{}` refresh failed: {}", node, err);
}
}
}
}
// Add nodes in a batch.
let add_list = self.find_new_nodes_to_add(friend_list);
self.add_nodes_and_aliases(&add_list);
// IMPORTANT: Remove must come after add to remove aliases
// Handle nodes changes determined from refreshes.
// Remove nodes in a batch.
let remove_list = self.find_nodes_to_remove(refresh_count);
self.remove_nodes_and_aliases(remove_list);
Ok(())
}
fn wait_till_stabilized(cluster: Arc<Cluster>) -> Result<()> {
let timeout = cluster
.client_policy()
.timeout
.unwrap_or_else(|| Duration::from_secs(3));
let deadline = Instant::now() + timeout;
let sleep_between_tend = Duration::from_millis(1);
let handle = thread::spawn(move || {
let mut count: isize = -1;
loop {
if Instant::now() > deadline {
break;
}
if let Err(err) = cluster.tend() {
log_error_chain!(err, "Error during initial cluster tend");
}
let old_count = count;
count = cluster.nodes().len() as isize;
if count == old_count {
break;
}
thread::sleep(sleep_between_tend);
}
});
handle
.join()
.map_err(|err| format!("Error during initial cluster tend: {:?}", err).into())
}
pub const fn cluster_name(&self) -> &Option<String> {
&self.client_policy.cluster_name
}
pub const fn client_policy(&self) -> &ClientPolicy {
&self.client_policy
}
pub fn add_seeds(&self, new_seeds: &[Host]) -> Result<()> {
let mut seeds = self.seeds.write();
seeds.extend_from_slice(new_seeds);
Ok(())
}
pub fn alias_exists(&self, host: &Host) -> Result<bool> {
let aliases = self.aliases.read();
Ok(aliases.contains_key(host))
}
fn set_partitions(&self, partitions: HashMap<String, Vec<Arc<Node>>>) {
let mut partition_map = self.partition_write_map.write();
*partition_map = partitions;
}
fn partitions(&self) -> Arc<RwLock<HashMap<String, Vec<Arc<Node>>>>> {
self.partition_write_map.clone()
}
pub fn node_partitions(&self, node: &Node, namespace: &str) -> Vec<u16> {
let mut res = vec![];
let partitions = self.partitions();
let partitions = partitions.read();
if let Some(node_array) = partitions.get(namespace) {
let mut i = 0;
for tnode in node_array {
if node == tnode.as_ref() {
res.push(i);
}
i += 1;
}
}
res
}
pub fn update_partitions(&self, node: Arc<Node>) -> Result<()> {
let mut conn = node.get_connection(self.client_policy.timeout)?;
let tokens = PartitionTokenizer::new(&mut conn).map_err(|e| {
conn.invalidate();
e
})?;
let nmap = tokens.update_partition(self.partitions(), node)?;
self.set_partitions(nmap);
Ok(())
}
pub fn seed_nodes(&self) -> bool {
let seed_array = self.seeds.read();
info!("Seeding the cluster. Seeds count: {}", seed_array.len());
let mut list: Vec<Arc<Node>> = vec![];
for seed in &*seed_array {
let mut seed_node_validator = NodeValidator::new(self);
if let Err(err) = seed_node_validator.validate_node(self, seed) {
log_error_chain!(err, "Failed to validate seed host: {}", seed);
continue;
};
for alias in &*seed_node_validator.aliases() {
let nv = if *seed == *alias {
seed_node_validator.clone()
} else {
let mut nv2 = NodeValidator::new(self);
if let Err(err) = nv2.validate_node(self, seed) {
log_error_chain!(err, "Seeding host {} failed with error", alias);
continue;
};
nv2
};
if self.find_node_name(&list, &nv.name) {
continue;
}
let node = self.create_node(nv);
let node = Arc::new(node);
self.add_aliases(node.clone());
list.push(node);
}
}
self.add_nodes_and_aliases(&list);
!list.is_empty()
}
fn find_node_name(&self, list: &[Arc<Node>], name: &str) -> bool {
list.iter().any(|node| node.name() == name)
}
fn find_new_nodes_to_add(&self, hosts: Vec<Host>) -> Vec<Arc<Node>> {
let mut list: Vec<Arc<Node>> = vec![];
for host in hosts {
let mut nv = NodeValidator::new(self);
if let Err(err) = nv.validate_node(self, &host) {
log_error_chain!(err, "Adding node {} failed with error", host.name);
continue;
};
// Duplicate node name found. This usually occurs when the server
// services list contains both internal and external IP addresses
// for the same node. Add new host to list of alias filters
// and do not add new node.
let mut dup = false;
match self.get_node_by_name(&nv.name) {
Ok(node) => {
self.add_alias(host, node.clone());
dup = true;
}
Err(_) => {
if let Some(node) = list.iter().find(|n| n.name() == nv.name) {
self.add_alias(host, node.clone());
dup = true;
}
}
};
if !dup {
let node = self.create_node(nv);
list.push(Arc::new(node));
}
}
list
}
fn create_node(&self, nv: NodeValidator) -> Node {
Node::new(self.client_policy.clone(), Arc::new(nv))
}
fn find_nodes_to_remove(&self, refresh_count: usize) -> Vec<Arc<Node>> {
let nodes = self.nodes();
let mut remove_list: Vec<Arc<Node>> = vec![];
let cluster_size = nodes.len();
for node in nodes {
let tnode = node.clone();
if !node.is_active() {
remove_list.push(tnode);
continue;
}
match cluster_size {
// Single node clusters rely on whether it responded to info requests.
1 if node.failures() > 5 => {
// 5 consecutive info requests failed. Try seeds.
if self.seed_nodes() {
remove_list.push(tnode);
}
}
// Two node clusters require at least one successful refresh before removing.
2 if refresh_count == 1 && node.reference_count() == 0 && node.failures() > 0 => {
remove_list.push(node)
}
_ => {
// Multi-node clusters require two successful node refreshes before removing.
if refresh_count >= 2 && node.reference_count() == 0 {
// Node is not referenced by other nodes.
// Check if node responded to info request.
if node.failures() == 0 {
// Node is alive, but not referenced by other nodes. Check if mapped.
if !self.find_node_in_partition_map(node) {
remove_list.push(tnode);
}
} else {
// Node not responding. Remove it.
remove_list.push(tnode);
}
}
}
}
}
remove_list
}
fn add_nodes_and_aliases(&self, friend_list: &[Arc<Node>]) {
for node in friend_list {
self.add_aliases(node.clone());
}
self.add_nodes(friend_list);
}
fn remove_nodes_and_aliases(&self, mut nodes_to_remove: Vec<Arc<Node>>) {
for node in &mut nodes_to_remove {
for alias in node.aliases() {
self.remove_alias(&alias);
}
if let Some(node) = Arc::get_mut(node) {
node.close();
}
}
self.remove_nodes(&nodes_to_remove);
}
fn add_alias(&self, host: Host, node: Arc<Node>) {
let mut aliases = self.aliases.write();
node.add_alias(host.clone());
aliases.insert(host, node);
}
fn remove_alias(&self, host: &Host) {
let mut aliases = self.aliases.write();
aliases.remove(host);
}
fn add_aliases(&self, node: Arc<Node>) {
let mut aliases = self.aliases.write();
for alias in node.aliases() {
aliases.insert(alias, node.clone());
}
}
fn find_node_in_partition_map(&self, filter: Arc<Node>) -> bool {
let partitions = self.partition_write_map.read();
(*partitions)
.values()
.any(|map| map.iter().any(|node| *node == filter))
}
fn add_nodes(&self, friend_list: &[Arc<Node>]) {
if friend_list.is_empty() {
return;
}
let mut nodes = self.nodes();
nodes.extend(friend_list.iter().cloned());
self.set_nodes(nodes) | } | random_line_split |
|
mod.rs | rx): (Sender<()>, Receiver<()>) = mpsc::channel();
let cluster = Arc::new(Cluster {
client_policy: policy,
seeds: Arc::new(RwLock::new(hosts.to_vec())),
aliases: Arc::new(RwLock::new(HashMap::new())),
nodes: Arc::new(RwLock::new(vec![])),
partition_write_map: Arc::new(RwLock::new(HashMap::new())),
node_index: AtomicIsize::new(0),
tend_channel: Mutex::new(tx),
closed: AtomicBool::new(false),
});
// try to seed connections for first use
Cluster::wait_till_stabilized(cluster.clone())?;
// apply policy rules
if cluster.client_policy.fail_if_not_connected && !cluster.is_connected() {
bail!(ErrorKind::Connection(
"Failed to connect to host(s). The network \
connection(s) to cluster nodes may have timed out, or \
the cluster may be in a state of flux."
.to_string()
));
}
let cluster_for_tend = cluster.clone();
thread::spawn(move || Cluster::tend_thread(cluster_for_tend, rx));
debug!("New cluster initialized and ready to be used...");
Ok(cluster)
}
fn tend_thread(cluster: Arc<Cluster>, rx: Receiver<()>) {
let tend_interval = cluster.client_policy.tend_interval;
loop {
// try to read from the receive channel to see if it hung up
match rx.try_recv() {
Ok(_) => unreachable!(),
// signaled to end
Err(TryRecvError::Disconnected) => break,
Err(TryRecvError::Empty) => {
if let Err(err) = cluster.tend() {
log_error_chain!(err, "Error tending cluster");
}
thread::sleep(tend_interval);
}
}
}
// close all nodes
let nodes = cluster.nodes();
for mut node in nodes {
if let Some(node) = Arc::get_mut(&mut node) {
node.close();
}
}
cluster.set_nodes(vec![]);
}
fn tend(&self) -> Result<()> {
let mut nodes = self.nodes();
// All node additions/deletions are performed in tend thread.
// If active nodes don't exist, seed cluster.
if nodes.is_empty() {
debug!("No connections available; seeding...");
self.seed_nodes();
nodes = self.nodes();
}
let mut friend_list: Vec<Host> = vec![];
let mut refresh_count = 0;
// Refresh all known nodes.
for node in nodes {
let old_gen = node.partition_generation();
if node.is_active() {
match node.refresh(self.aliases()) {
Ok(friends) => {
refresh_count += 1;
if !friends.is_empty() {
friend_list.extend_from_slice(&friends);
}
if old_gen != node.partition_generation() {
self.update_partitions(node.clone())?;
}
}
Err(err) => {
node.increase_failures();
warn!("Node `{}` refresh failed: {}", node, err);
}
}
}
}
// Add nodes in a batch.
let add_list = self.find_new_nodes_to_add(friend_list);
self.add_nodes_and_aliases(&add_list);
// IMPORTANT: Remove must come after add to remove aliases
// Handle nodes changes determined from refreshes.
// Remove nodes in a batch.
let remove_list = self.find_nodes_to_remove(refresh_count);
self.remove_nodes_and_aliases(remove_list);
Ok(())
}
fn wait_till_stabilized(cluster: Arc<Cluster>) -> Result<()> {
let timeout = cluster
.client_policy()
.timeout
.unwrap_or_else(|| Duration::from_secs(3));
let deadline = Instant::now() + timeout;
let sleep_between_tend = Duration::from_millis(1);
let handle = thread::spawn(move || {
let mut count: isize = -1;
loop {
if Instant::now() > deadline {
break;
}
if let Err(err) = cluster.tend() {
log_error_chain!(err, "Error during initial cluster tend");
}
let old_count = count;
count = cluster.nodes().len() as isize;
if count == old_count {
break;
}
thread::sleep(sleep_between_tend);
}
});
handle
.join()
.map_err(|err| format!("Error during initial cluster tend: {:?}", err).into())
}
pub const fn cluster_name(&self) -> &Option<String> {
&self.client_policy.cluster_name
}
pub const fn client_policy(&self) -> &ClientPolicy {
&self.client_policy
}
pub fn add_seeds(&self, new_seeds: &[Host]) -> Result<()> {
let mut seeds = self.seeds.write();
seeds.extend_from_slice(new_seeds);
Ok(())
}
pub fn alias_exists(&self, host: &Host) -> Result<bool> {
let aliases = self.aliases.read();
Ok(aliases.contains_key(host))
}
fn set_partitions(&self, partitions: HashMap<String, Vec<Arc<Node>>>) {
let mut partition_map = self.partition_write_map.write();
*partition_map = partitions;
}
fn partitions(&self) -> Arc<RwLock<HashMap<String, Vec<Arc<Node>>>>> {
self.partition_write_map.clone()
}
pub fn node_partitions(&self, node: &Node, namespace: &str) -> Vec<u16> {
let mut res = vec![];
let partitions = self.partitions();
let partitions = partitions.read();
if let Some(node_array) = partitions.get(namespace) {
let mut i = 0;
for tnode in node_array {
if node == tnode.as_ref() {
res.push(i);
}
i += 1;
}
}
res
}
pub fn update_partitions(&self, node: Arc<Node>) -> Result<()> {
let mut conn = node.get_connection(self.client_policy.timeout)?;
let tokens = PartitionTokenizer::new(&mut conn).map_err(|e| {
conn.invalidate();
e
})?;
let nmap = tokens.update_partition(self.partitions(), node)?;
self.set_partitions(nmap);
Ok(())
}
pub fn | (&self) -> bool {
let seed_array = self.seeds.read();
info!("Seeding the cluster. Seeds count: {}", seed_array.len());
let mut list: Vec<Arc<Node>> = vec![];
for seed in &*seed_array {
let mut seed_node_validator = NodeValidator::new(self);
if let Err(err) = seed_node_validator.validate_node(self, seed) {
log_error_chain!(err, "Failed to validate seed host: {}", seed);
continue;
};
for alias in &*seed_node_validator.aliases() {
let nv = if *seed == *alias {
seed_node_validator.clone()
} else {
let mut nv2 = NodeValidator::new(self);
if let Err(err) = nv2.validate_node(self, seed) {
log_error_chain!(err, "Seeding host {} failed with error", alias);
continue;
};
nv2
};
if self.find_node_name(&list, &nv.name) {
continue;
}
let node = self.create_node(nv);
let node = Arc::new(node);
self.add_aliases(node.clone());
list.push(node);
}
}
self.add_nodes_and_aliases(&list);
!list.is_empty()
}
fn find_node_name(&self, list: &[Arc<Node>], name: &str) -> bool {
list.iter().any(|node| node.name() == name)
}
fn find_new_nodes_to_add(&self, hosts: Vec<Host>) -> Vec<Arc<Node>> {
let mut list: Vec<Arc<Node>> = vec![];
for host in hosts {
let mut nv = NodeValidator::new(self);
if let Err(err) = nv.validate_node(self, &host) {
log_error_chain!(err, "Adding node {} failed with error", host.name);
continue;
};
// Duplicate node name found. This usually occurs when the server
// services list contains both internal and external IP addresses
// for the same node. Add new host to list of alias filters
// and do not add new node.
let mut dup = false;
match self.get_node_by_name(&nv.name) {
Ok(node) => {
self.add_alias(host, node.clone());
dup = true;
}
Err(_) => {
if let Some(node) = list.iter().find(|n| n.name() == nv.name) {
self.add_alias(host, node.clone());
dup = true;
}
}
};
if !dup {
let node = self.create_node(nv);
list.push(Arc::new(node));
}
}
list
}
fn create_node(&self, nv: NodeValidator) -> Node {
Node::new(self.client_policy.clone(), Arc::new(nv))
}
fn find_nodes_to_remove(&self, refresh_count: usize) -> Vec<Arc<Node>> {
let nodes = self.nodes();
let mut remove_list: Vec<Arc<Node>> = vec![];
let cluster_size = nodes.len();
for node in nodes {
let | seed_nodes | identifier_name |
session.rs | )
}
}
/// State of game progression. Whether the game is on, over and what kind of over.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Progression {
PlayOn(LastAttack),
GameOverWinner(Player),
GameOverStalemate(Vec<Player>), // Easier to calculate than a draw...
}
/// The state of the session.
#[derive(Debug, Clone, Getters)]
pub struct State {
/// Whether we continue play or not.
game: Progression,
/// If a bunch of single move turns needed to be made first. These include players
/// being knocked out or players only being able to pass their turn.
traversal: Vec<(Board, Choice)>,
/// State of game.
board: Board,
/// Choices available to current player.
choices: Vec<Choice>,
}
impl State {
fn new(
game: Progression,
traversal: &[(Board, Choice)],
board: Board,
choices: &[Choice],
) -> Self {
State {
game,
traversal: traversal
.into_iter()
.map(|(b, c)| (b.to_owned(), c.to_owned()))
.collect(),
board,
choices: choices
.into_iter()
.map(|c| c.to_owned())
.collect(),
}
}
}
/// Generate a `State` from a chosen `Board` consequence and the `Tree` where that `Board`
/// must exist. Runs inside a loop skipping over states that have only one turn left in
/// them except for Winning states. Uses some logic to detect draw states.
fn state_from_board(
mut current_board: Board, tree: &Tree, outcome: LastAttack,
) -> Result<State, usize> {
let mut traversal: Vec<(Board, Choice)> = Vec::new();
let mut depth: usize = 1;
let state = loop {
let choices = tree
.fetch_choices(¤t_board)
.ok_or(depth)?;
// If there's only one choice left, it may be a passing/gameover/win move. Or the
// last available attack.
if choices.len() == 1 {
depth += 1;
match choices[0].action() {
Action::Attack(_, _, _, _) => {
// There is one last attack to make. We won't execute this choice
// for the player as that'd be overstepping our bounds. Thus we jump
// out of this loop.
break State::new(
Progression::PlayOn(outcome),
traversal.as_slice(),
current_board,
choices,
);
},
Action::Pass => {
// It'd be cumbersome to manually pass a move. The player can't "do"
// anything. So let's just deal with it automatically.
// In order to do this, we need to figure out the passing consequence.
match choices[0].consequence() {
Consequence::Stalemate(next_board) => break State::new(
Progression::GameOverStalemate(next_board.players().playing()),
traversal.as_slice(),
next_board.to_owned(),
choices,
),
Consequence::Winner(next_board) => break State::new(
Progression::GameOverWinner(next_board.players().current()),
traversal.as_slice(),
next_board.to_owned(),
choices,
),
Consequence::GameOver(next_board) => {
// We need to iterate the progression.
traversal.push((current_board, choices[0].to_owned()));
current_board = next_board.to_owned();
continue;
},
Consequence::TurnOver(next_board) => {
// We need to iterate the progression.
traversal.push((current_board, choices[0].to_owned()));
current_board = next_board.to_owned();
continue;
},
Consequence::Continue(_) => unreachable!(),
}
},
}
}
// If we make it here, there are choices that need to be made.
break State::new(
Progression::PlayOn(outcome), | choices,
);
};
Ok(state)
}
/// A game in progress. The `traversals` indicate how many turns have passed. Maintains
/// all state of the game.
///
/// ## Invariants
/// 1. The `Tree` will always be valid.
/// 2. The first `State` in the `turns` is the starting position sans any inital traversals.
/// 3. There will always be at least one `State` in the `turns`.
#[derive(Debug, Clone, Getters)]
pub struct Session {
turns: Vec<State>,
tree: Option<Tree>,
move_limit: NonZeroU8,
rand: rngs::ThreadRng,
}
impl Session {
pub fn new(start: Board, tree: Tree, move_limit: NonZeroU8) -> Self {
// The start may contain pass move. Cycle to get at the first true turn.
// This code is a copy of what's happening in `advance` below. TODO: Refactor me.
let mut tree = Some(tree);
let first_turn = loop {
match state_from_board(
start.clone(), tree.as_ref().unwrap(), LastAttack::default()
) {
Ok(state) => break state,
Err(depth) => {
let new_tree = game::start_tree_horizon_limited(
start.clone(), depth, move_limit.get(),
);
tree = Some(new_tree);
},
}
};
Session {
turns: vec![first_turn],
tree,
move_limit,
rand: rand::thread_rng(),
}
}
pub fn reset(self) -> Self {
let first = self.turns.first().unwrap().board.to_owned();
Session::new(
first.clone(),
game::start_tree_horizon_limited(first, 1, self.move_limit.get()),
self.move_limit,
)
}
pub fn current_turn(&self) -> &State {
self.turns.last().unwrap()
}
/// Take an `Action` and advance the game state. Advances the tree if necessary. Takes
/// an `index` of the `[Choice]`. The `Choice` will always be an attacking action.
pub fn advance(&mut self, index: usize) -> Result<&State, String> {
let choice = self
.current_turn()
.choices()
.get(index)
.ok_or("Index out of bounds.".to_owned())?
.to_owned();
let (attacker_coordinate, attacker_dice, defender_dice) = match choice.action() {
Action::Attack(ac, _, ad, dd) => (*ac, *ad, *dd),
Action::Pass => unreachable!(), // Must never happen. `Session` must always
// return with attack choices or game over.
};
let attacker_roll = roll_d6s(attacker_dice, &mut self.rand);
let defender_roll = roll_d6s(defender_dice, &mut self.rand);
let outcome = LastAttack::new(
attacker_dice, attacker_roll, defender_dice, defender_roll
);
let next_board = if attacker_roll > defender_roll {
// Board advances due to win.
choice.consequence().board().to_owned()
} else {
// Board stays the same sans one move due to loss and the losing hex frozen.
let current_board = &self.current_turn().board;
Board::new(
*current_board.players(),
current_board
.grid()
.fork_with(|coordinate, hold| {
// Freeze the losing hex til next turn.
if coordinate == &attacker_coordinate {
u8::new(hold.owner(), hold.dice(), false)
} else {
hold
}
}),
*current_board.captured_dice(),
*current_board.moved() + 1,
)
};
let state = loop {
match state_from_board(
next_board.clone(), &self.tree.as_ref().unwrap(), outcome,
) {
Ok(state) => break state,
Err(depth) => {
let new_tree = game::start_tree_horizon_limited(
next_board.to_owned(), depth, self.move_limit.get(),
);
self.tree = Some(new_tree);
},
}
};
self.turns.push(state);
Ok(self.current_turn())
}
/// Score the tree up to the depth specified in `horizon`. Will then edit current
/// `State` to put the scoring into the current choices. A deep horizon will cause the
/// system to lock up. High chance that an OOM error will follow.
pub fn score_with_depth_horizon(&mut self, horizon: usize) -> &State {
let current_board = self.current_turn().board.to_owned();
let tree = game::start_tree_horizon_limited(
current_board, horizon, self.move_limit.get(),
);
let _ = game::score_tree(&tree);
let choices = tree.fetch_choices(tree.root()).unwrap().to_owned();
let last_state = self.turns.last_mut().unwrap();
last_state.choices = choices;
self.tree = Some(tree);
last_state
}
/// Score the tree up to the the board insert budget specified. The first tree layer
/// though will be computed without taking into account the budget, this way there will
/// always be all available choices for the turn.
pub fn score_with_insert_budget(&mut self, insert_budget: usize) -> &State {
let current_board | traversal.as_slice(),
current_board, | random_line_split |
session.rs | )
}
}
/// State of game progression. Whether the game is on, over and what kind of over.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Progression {
PlayOn(LastAttack),
GameOverWinner(Player),
GameOverStalemate(Vec<Player>), // Easier to calculate than a draw...
}
/// The state of the session.
#[derive(Debug, Clone, Getters)]
pub struct State {
/// Whether we continue play or not.
game: Progression,
/// If a bunch of single move turns needed to be made first. These include players
/// being knocked out or players only being able to pass their turn.
traversal: Vec<(Board, Choice)>,
/// State of game.
board: Board,
/// Choices available to current player.
choices: Vec<Choice>,
}
impl State {
fn new(
game: Progression,
traversal: &[(Board, Choice)],
board: Board,
choices: &[Choice],
) -> Self {
State {
game,
traversal: traversal
.into_iter()
.map(|(b, c)| (b.to_owned(), c.to_owned()))
.collect(),
board,
choices: choices
.into_iter()
.map(|c| c.to_owned())
.collect(),
}
}
}
/// Generate a `State` from a chosen `Board` consequence and the `Tree` where that `Board`
/// must exist. Runs inside a loop skipping over states that have only one turn left in
/// them except for Winning states. Uses some logic to detect draw states.
fn state_from_board(
mut current_board: Board, tree: &Tree, outcome: LastAttack,
) -> Result<State, usize> {
let mut traversal: Vec<(Board, Choice)> = Vec::new();
let mut depth: usize = 1;
let state = loop {
let choices = tree
.fetch_choices(¤t_board)
.ok_or(depth)?;
// If there's only one choice left, it may be a passing/gameover/win move. Or the
// last available attack.
if choices.len() == 1 {
depth += 1;
match choices[0].action() {
Action::Attack(_, _, _, _) => {
// There is one last attack to make. We won't execute this choice
// for the player as that'd be overstepping our bounds. Thus we jump
// out of this loop.
break State::new(
Progression::PlayOn(outcome),
traversal.as_slice(),
current_board,
choices,
);
},
Action::Pass => {
// It'd be cumbersome to manually pass a move. The player can't "do"
// anything. So let's just deal with it automatically.
// In order to do this, we need to figure out the passing consequence.
match choices[0].consequence() {
Consequence::Stalemate(next_board) => break State::new(
Progression::GameOverStalemate(next_board.players().playing()),
traversal.as_slice(),
next_board.to_owned(),
choices,
),
Consequence::Winner(next_board) => break State::new(
Progression::GameOverWinner(next_board.players().current()),
traversal.as_slice(),
next_board.to_owned(),
choices,
),
Consequence::GameOver(next_board) => {
// We need to iterate the progression.
traversal.push((current_board, choices[0].to_owned()));
current_board = next_board.to_owned();
continue;
},
Consequence::TurnOver(next_board) => {
// We need to iterate the progression.
traversal.push((current_board, choices[0].to_owned()));
current_board = next_board.to_owned();
continue;
},
Consequence::Continue(_) => unreachable!(),
}
},
}
}
// If we make it here, there are choices that need to be made.
break State::new(
Progression::PlayOn(outcome),
traversal.as_slice(),
current_board,
choices,
);
};
Ok(state)
}
/// A game in progress. The `traversals` indicate how many turns have passed. Maintains
/// all state of the game.
///
/// ## Invariants
/// 1. The `Tree` will always be valid.
/// 2. The first `State` in the `turns` is the starting position sans any inital traversals.
/// 3. There will always be at least one `State` in the `turns`.
#[derive(Debug, Clone, Getters)]
pub struct Session {
turns: Vec<State>,
tree: Option<Tree>,
move_limit: NonZeroU8,
rand: rngs::ThreadRng,
}
impl Session {
pub fn new(start: Board, tree: Tree, move_limit: NonZeroU8) -> Self | turns: vec![first_turn],
tree,
move_limit,
rand: rand::thread_rng(),
}
}
pub fn reset(self) -> Self {
let first = self.turns.first().unwrap().board.to_owned();
Session::new(
first.clone(),
game::start_tree_horizon_limited(first, 1, self.move_limit.get()),
self.move_limit,
)
}
pub fn current_turn(&self) -> &State {
self.turns.last().unwrap()
}
/// Take an `Action` and advance the game state. Advances the tree if necessary. Takes
/// an `index` of the `[Choice]`. The `Choice` will always be an attacking action.
pub fn advance(&mut self, index: usize) -> Result<&State, String> {
let choice = self
.current_turn()
.choices()
.get(index)
.ok_or("Index out of bounds.".to_owned())?
.to_owned();
let (attacker_coordinate, attacker_dice, defender_dice) = match choice.action() {
Action::Attack(ac, _, ad, dd) => (*ac, *ad, *dd),
Action::Pass => unreachable!(), // Must never happen. `Session` must always
// return with attack choices or game over.
};
let attacker_roll = roll_d6s(attacker_dice, &mut self.rand);
let defender_roll = roll_d6s(defender_dice, &mut self.rand);
let outcome = LastAttack::new(
attacker_dice, attacker_roll, defender_dice, defender_roll
);
let next_board = if attacker_roll > defender_roll {
// Board advances due to win.
choice.consequence().board().to_owned()
} else {
// Board stays the same sans one move due to loss and the losing hex frozen.
let current_board = &self.current_turn().board;
Board::new(
*current_board.players(),
current_board
.grid()
.fork_with(|coordinate, hold| {
// Freeze the losing hex til next turn.
if coordinate == &attacker_coordinate {
u8::new(hold.owner(), hold.dice(), false)
} else {
hold
}
}),
*current_board.captured_dice(),
*current_board.moved() + 1,
)
};
let state = loop {
match state_from_board(
next_board.clone(), &self.tree.as_ref().unwrap(), outcome,
) {
Ok(state) => break state,
Err(depth) => {
let new_tree = game::start_tree_horizon_limited(
next_board.to_owned(), depth, self.move_limit.get(),
);
self.tree = Some(new_tree);
},
}
};
self.turns.push(state);
Ok(self.current_turn())
}
/// Score the tree up to the depth specified in `horizon`. Will then edit current
/// `State` to put the scoring into the current choices. A deep horizon will cause the
/// system to lock up. High chance that an OOM error will follow.
pub fn score_with_depth_horizon(&mut self, horizon: usize) -> &State {
let current_board = self.current_turn().board.to_owned();
let tree = game::start_tree_horizon_limited(
current_board, horizon, self.move_limit.get(),
);
let _ = game::score_tree(&tree);
let choices = tree.fetch_choices(tree.root()).unwrap().to_owned();
let last_state = self.turns.last_mut().unwrap();
last_state.choices = choices;
self.tree = Some(tree);
last_state
}
/// Score the tree up to the the board insert budget specified. The first tree layer
/// though will be computed without taking into account the budget, this way there will
/// always be all available choices for the turn.
pub fn score_with_insert_budget(&mut self, insert_budget: usize) -> &State {
let current | {
// The start may contain pass move. Cycle to get at the first true turn.
// This code is a copy of what's happening in `advance` below. TODO: Refactor me.
let mut tree = Some(tree);
let first_turn = loop {
match state_from_board(
start.clone(), tree.as_ref().unwrap(), LastAttack::default()
) {
Ok(state) => break state,
Err(depth) => {
let new_tree = game::start_tree_horizon_limited(
start.clone(), depth, move_limit.get(),
);
tree = Some(new_tree);
},
}
};
Session { | identifier_body |
session.rs | (
attacker_dice: u8, attacker_rolled: usize, defender_dice: u8, defender_rolled: usize
) -> Self {
LastAttack { attacker_dice, attacker_rolled, defender_dice, defender_rolled }
}
}
impl fmt::Display for LastAttack {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.attacker_rolled == 0 && self.defender_rolled == 0 {
write!(f, "") // Sentinel value for first turn thus no preceding attacks.
} else {
if self.attacker_rolled > self.defender_rolled {
write!(
f,
"Attacker with {} dice rolled {} beating \
defender with {} dice who rolled {}.",
&self.attacker_dice,
&self.attacker_rolled,
&self.defender_dice,
&self.defender_rolled,
)
} else {
write!(
f,
"Defender with {} dice rolled {} holding against \
attacker with {} dice who rolled {}.",
&self.defender_dice,
&self.defender_rolled,
&self.attacker_dice,
&self.attacker_rolled,
)
}
}
}
}
impl Default for LastAttack {
fn default() -> Self {
LastAttack::new(0, 0, 0, 0)
}
}
/// State of game progression. Whether the game is on, over and what kind of over.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Progression {
PlayOn(LastAttack),
GameOverWinner(Player),
GameOverStalemate(Vec<Player>), // Easier to calculate than a draw...
}
/// The state of the session.
#[derive(Debug, Clone, Getters)]
pub struct State {
/// Whether we continue play or not.
game: Progression,
/// If a bunch of single move turns needed to be made first. These include players
/// being knocked out or players only being able to pass their turn.
traversal: Vec<(Board, Choice)>,
/// State of game.
board: Board,
/// Choices available to current player.
choices: Vec<Choice>,
}
impl State {
fn new(
game: Progression,
traversal: &[(Board, Choice)],
board: Board,
choices: &[Choice],
) -> Self {
State {
game,
traversal: traversal
.into_iter()
.map(|(b, c)| (b.to_owned(), c.to_owned()))
.collect(),
board,
choices: choices
.into_iter()
.map(|c| c.to_owned())
.collect(),
}
}
}
/// Generate a `State` from a chosen `Board` consequence and the `Tree` where that `Board`
/// must exist. Runs inside a loop skipping over states that have only one turn left in
/// them except for Winning states. Uses some logic to detect draw states.
fn state_from_board(
mut current_board: Board, tree: &Tree, outcome: LastAttack,
) -> Result<State, usize> {
let mut traversal: Vec<(Board, Choice)> = Vec::new();
let mut depth: usize = 1;
let state = loop {
let choices = tree
.fetch_choices(¤t_board)
.ok_or(depth)?;
// If there's only one choice left, it may be a passing/gameover/win move. Or the
// last available attack.
if choices.len() == 1 {
depth += 1;
match choices[0].action() {
Action::Attack(_, _, _, _) => {
// There is one last attack to make. We won't execute this choice
// for the player as that'd be overstepping our bounds. Thus we jump
// out of this loop.
break State::new(
Progression::PlayOn(outcome),
traversal.as_slice(),
current_board,
choices,
);
},
Action::Pass => {
// It'd be cumbersome to manually pass a move. The player can't "do"
// anything. So let's just deal with it automatically.
// In order to do this, we need to figure out the passing consequence.
match choices[0].consequence() {
Consequence::Stalemate(next_board) => break State::new(
Progression::GameOverStalemate(next_board.players().playing()),
traversal.as_slice(),
next_board.to_owned(),
choices,
),
Consequence::Winner(next_board) => break State::new(
Progression::GameOverWinner(next_board.players().current()),
traversal.as_slice(),
next_board.to_owned(),
choices,
),
Consequence::GameOver(next_board) => {
// We need to iterate the progression.
traversal.push((current_board, choices[0].to_owned()));
current_board = next_board.to_owned();
continue;
},
Consequence::TurnOver(next_board) => {
// We need to iterate the progression.
traversal.push((current_board, choices[0].to_owned()));
current_board = next_board.to_owned();
continue;
},
Consequence::Continue(_) => unreachable!(),
}
},
}
}
// If we make it here, there are choices that need to be made.
break State::new(
Progression::PlayOn(outcome),
traversal.as_slice(),
current_board,
choices,
);
};
Ok(state)
}
/// A game in progress. The `traversals` indicate how many turns have passed. Maintains
/// all state of the game.
///
/// ## Invariants
/// 1. The `Tree` will always be valid.
/// 2. The first `State` in the `turns` is the starting position sans any inital traversals.
/// 3. There will always be at least one `State` in the `turns`.
#[derive(Debug, Clone, Getters)]
pub struct Session {
turns: Vec<State>,
tree: Option<Tree>,
move_limit: NonZeroU8,
rand: rngs::ThreadRng,
}
impl Session {
pub fn new(start: Board, tree: Tree, move_limit: NonZeroU8) -> Self {
// The start may contain pass move. Cycle to get at the first true turn.
// This code is a copy of what's happening in `advance` below. TODO: Refactor me.
let mut tree = Some(tree);
let first_turn = loop {
match state_from_board(
start.clone(), tree.as_ref().unwrap(), LastAttack::default()
) {
Ok(state) => break state,
Err(depth) => {
let new_tree = game::start_tree_horizon_limited(
start.clone(), depth, move_limit.get(),
);
tree = Some(new_tree);
},
}
};
Session {
turns: vec![first_turn],
tree,
move_limit,
rand: rand::thread_rng(),
}
}
pub fn reset(self) -> Self {
let first = self.turns.first().unwrap().board.to_owned();
Session::new(
first.clone(),
game::start_tree_horizon_limited(first, 1, self.move_limit.get()),
self.move_limit,
)
}
pub fn current_turn(&self) -> &State {
self.turns.last().unwrap()
}
/// Take an `Action` and advance the game state. Advances the tree if necessary. Takes
/// an `index` of the `[Choice]`. The `Choice` will always be an attacking action.
pub fn advance(&mut self, index: usize) -> Result<&State, String> {
let choice = self
.current_turn()
.choices()
.get(index)
.ok_or("Index out of bounds.".to_owned())?
.to_owned();
let (attacker_coordinate, attacker_dice, defender_dice) = match choice.action() {
Action::Attack(ac, _, ad, dd) => (*ac, *ad, *dd),
Action::Pass => unreachable!(), // Must never happen. `Session` must always
// return with attack choices or game over.
};
let attacker_roll = roll_d6s(attacker_dice, &mut self.rand);
let defender_roll = roll_d6s(defender_dice, &mut self.rand);
let outcome = LastAttack::new(
attacker_dice, attacker_roll, defender_dice, defender_roll
);
let next_board = if attacker_roll > defender_roll {
// Board advances due to win.
choice.consequence().board().to_owned()
} else {
// Board stays the same sans one move due to loss and the losing hex frozen.
let current_board = &self.current_turn().board;
Board::new(
*current_board.players(),
current_board
.grid()
.fork_with(|coordinate, hold| {
// Freeze the losing hex til next turn.
if coordinate == &attacker_coordinate {
u8::new(hold.owner(), hold.dice(), false)
} else {
hold
}
}),
*current_board.captured_dice(),
*current_board.moved() + 1,
)
};
let state = loop {
match state_from_board(
next_board.clone(), &self.tree.as_ref().unwrap(), outcome,
) {
Ok(state) => break state,
Err(depth) => {
let new_tree = game::start_tree_horizon_limited(
next | new | identifier_name |
|
global.js | 时,跳转到此
*/
onIconError : function(img) {
// 记录原有路径
var originalSrc = img.getAttribute('originalSrc');
var gender = img.getAttribute('gender');
var src = img.getAttribute('src');
if (!originalSrc) {
img.setAttribute('originalSrc', src);
originalSrc = src;
}
var noImgUrl = '/scfc/images/tpri/male.png';
if (gender == "2") {
noImgUrl = '/scfc/images/tpri/female.png';
}
var errorUrl = img.getAttribute('errorUrl');
var errorCount = parseInt(img.getAttribute('errorCount') || '0');
if (errorCount >= 1) {
img.src = noImgUrl;
return;
}
// 立即修改errorCount,img的src变了之后,如果还是加载失败,会立即调用onImgError,因此需要提前设置errorCount
img.setAttribute('errorCount', errorCount + 1);
if (errorCount == 0 && errorUrl) {
img.src = errorUrl;
} else {
img.src = noImgUrl;
}
},
// 判断浏览器
myBrowser : function() {
var userAgent = navigator.userAgent; // 取得浏览器的userAgent字符串
var isOpera = userAgent.indexOf("Opera") > -1;
// 判断是否Opera浏览器
if (isOpera) {
return "Opera";
}
// 判断是否Firefox浏览器
if (userAgent.indexOf("Firefox") > -1) {
return "Firefox";
}
// 判断是否Chrome浏览器
if (userAgent.indexOf("Chrome") > -1) {
return "Chrome";
}
// 判断是否Safari浏览器
if (userAgent.indexOf("Safari") > -1) {
return "Safari";
}
// 判断是否IE浏览器
if (userAgent.indexOf("compatible") > -1 && userAgent.indexOf("MSIE") > -1 && !isOpera) {
return "IE";
}
},
// 获取枚举值
getEnumName : function(enumId) {
if (!Global.enums) {
return "";
}
var enumName = Global.enums[enumId];
if (!enumName) {
return "";
}
return enumName;
},
// 获取代码名称
getCodeName : function(codeId) {
if (!Global.codes) {
return "";
}
var codeName = Global.codes[codeId];
if (!codeName) {
return "";
}
return codeName;
},
// 获取日期时间串中的日期
getDate : function(dateTime) {
if (!dateTime) {
return "";
}
return dateTime.substr(0, 10);
},
// 星期的中文字符
getDay : function(day) {
switch (day) {
case 0:
return "日";
case 1:
return "一";
case 2:
return "二";
case 3:
return "三";
case 4:
return "四";
case 5:
return "五";
case 6:
return "六";
default:
return "-";
}
},
// 获取cookie
getCookie : function(name) {
var arr = document.cookie.match(new RegExp("(^| )" + name + "=([^;]*)(;|$)"));
if (arr != null)
return decodeURIComponent(arr[2]);
return null;
},
// 设置cookie
setCookie : function(name, value, options) {
var expires = '', path = '', domain = '', secure = '';
if (options) {
if (options.expires && (typeof options.expires == 'number' || options.expires.toUTCString)) {
var exp;
if (typeof options.expires == 'number') {
exp = new Date();
exp.setTime(exp.getTime() + options.expires * 24 * 60 * 60 * 1000);
} else {
exp = options.expires;
}
expires = ';expires=' + exp.toUTCString();
}
path = options.path ? '; path=' + options.path : '';
domain = options.domain ? ';domain=' + options.domain : '';
secure = options.secure ? ';secure' : '';
}
document.cookie = [ name, '=', encodeURIComponent(value), expires, path, domain, secure ].join('');
},
initLoader : function(divId) {
//divId 加载的divId
if (divId == '' || divId == null) {
divId = '#loader';
}
$(divId).html('<div style="width:100px;margin:0 auto;"><i class="fa fa-spinner fa-spin fa-5x fa-fw" style="color:#de2810;"></i></div>');
},
// 设置url参数值,ref参数名,value新的参数值
changeURLPara : function(url, ref, value) {
var str = "";
if (url.indexOf('?') != -1) {
str = url.substr(url.indexOf('?') + 1);
} else {
return url + "?" + ref + "=" + value;
}
var returnurl = "";
var setparam = "";
var arr;
var modify = "0";
if (str.indexOf('&') != -1) {
arr = str.split('&');
for (i in arr) {
if (arr[i].split('=')[0] == ref) {
setparam = value;
modify = "1";
} else {
setparam = arr[i].split('=')[1];
}
returnurl = returnurl + arr[i].split('=')[0] + "=" + setparam + "&";
}
returnurl = returnurl.substr(0, returnurl.length - 1);
if (modify == "0") {
if (returnurl == str) {
returnurl = returnurl + "&" + ref + "=" + value;
}
}
} else {
if (str.indexOf('=') != -1) {
arr = str.split('=');
if (arr[0] == ref) {
setparam = value;
modify = "1";
} else {
setparam = arr[1];
}
returnurl = arr[0] + "=" + setparam;
if (modify == "0")
if (returnurl == str)
returnurl = returnurl + "&" + ref + "=" + value;
} else {
returnurl = ref + "=" + value;
}
}
return url.substr(0, url.indexOf('?')) + "?" + returnurl;
},
/**
* @description 浏览器打开弹出窗
* @author yiwenjun
*/
openWindow : function(url, name, iWidth, iHeight) {
var iTop = (window.screen.height - 30 - iHeight) / 2; //获得窗口的垂直位置;
var iLeft = (window.screen.width - 10 - iWidth) / 2; //获得窗口的水平位置;
var windowObject=window.open(url, name, 'height=' + iHeight + ',innerHeight=' + iHeight + ',width=' + iWidth + ',innerWidth=' + iWidth + ',top=' + iTop + ',left=' + iLeft + ',toolbar=no,menubar=no,scrollbars=yes,resizeable=no,location=no,status=no');
return windowObject;
},
/**
* @description 获取年份的select选项
* @author yiwenjun
* @since 2015-09-23
*
* @param type:
* 1当前年往后num年;2当前年份往前num年; 3当前年份前后num年,默认1
* @param num:年数,默认10
* @param order:0顺序;1倒序,默认0
* @param isFirstBlack:0显示否为空;1显示第一个;默认0
* @return select下的选项html
*/
getYearForSelectOption : function(type, num, order, isFirstBlack) {
var today = new Date();
var currentYear = today.getFullYear();
if (!num || num < 1) {
num = 10;
}
if (!isFirstBlack) {
isFirstBlack = 0;
}
var options = "";
if (isFirstBlack == 0) {
options += '<option value=""></option>';
}
if (type == 2) {
if (order == 1) {
for (var year = currentYear; year > currentYear - num; year--) {
options += '<option value=' + year + '>' + year + '</option>';
}
} else {
for (var year = currentYear - num + 1; year <= currentYear; year++) {
options += '<option value=' + year + '>' + year + '</option>';
}
}
} else if (type == 3) {
if (order == 1) {
for (var year = currentYear + num; year > currentYear - num; year--) {
options += '<option value=' + year + '>' + year + '</option>';
| }
} else {
for (var y | conditional_block |
|
global.js | ").each(function() {
$(this).addClass("span-hover");
});
} else if (event.type == "mouseout") {
$(this).find("span").each(function() {
$(this).removeClass("span-hover");
});
}
});
},
// 初始化点击样式为module-todo的元素的时间
initTodoClick : function() {
$(document).on("click", ".module-todo", function() {
t.todo();
});
},
todo : function() {
Notify.info("正在建设中...");
},
cloneAttributes : function(attributes) {
$.extend(Global, attributes);
},
/** 判断用户是否具有某个权限 */
hasPrivilege : function(privilegeId) {
for ( var index in Global.allPrivilegeIds) {
if (Global.allPrivilegeIds[index] == privilegeId) {
return true;
}
}
return false;
},
/** 字符串转json对象的方法 */
string2json : function(string) {
if (string == undefined || string == null) {
return null;
}
return $.parseJSON(string);
},
/** json对象转字符串的方法 */
json2string : function(obj) {
return JSON.stringify(obj);
},
/**
* 图片加载失败时的处理: PS:errorCount是计数器,避免进入死循环判断
*/
onImgError : function(img) {
// 记录原有路径
var originalSrc = img.getAttribute('originalSrc');
var src = img.getAttribute('src');
if (!originalSrc) {
img.setAttribute('originalSrc', src);
originalSrc = src;
}
var noImgUrl = '/scfc/images/tpri/404.jpg';
var errorUrl = img.getAttribute('errorUrl');
var errorCount = parseInt(img.getAttribute('errorCount') || '0');
if (errorCount >= 1) {
img.src = noImgUrl;
return;
}
// 立即修改errorCount,img的src变了之后,如果还是加载失败,会立即调用onImgError,因此需要提前设置errorCount
img.setAttribute('errorCount', errorCount + 1);
if (errorCount == 0 && errorUrl) {
img.src = errorUrl;
} else {
img.src = noImgUrl;
}
},
/**
* 找不到党员照片时,跳转到此
*/
onIconError : function(img) {
// 记录原有路径
var originalSrc = img.getAttribute('originalSrc');
var gender = img.getAttribute('gender');
var src = img.getAttribute('src');
if (!originalSrc) {
img.setAttribute('originalSrc', src);
originalSrc = src;
}
var noImgUrl = '/scfc/images/tpri/male.png';
if (gender == "2") {
noImgUrl = '/scfc/images/tpri/female.png';
}
var errorUrl = img.getAttribute('errorUrl');
var errorCount = parseInt(img.getAttribute('errorCount') || '0');
if (errorCount >= 1) {
img.src = noImgUrl;
return;
}
// 立即修改errorCount,img的src变了之后,如果还是加载失败,会立即调用onImgError,因此需要提前设置errorCount
img.setAttribute('errorCount', errorCount + 1);
if (errorCount == 0 && errorUrl) {
img.src = errorUrl;
} else {
img.src = noImgUrl;
}
},
// 判断浏览器
myBrowser : function() {
var userAgent = navigator.userAgent; // 取得浏览器的userAgent字符串
var isOpera = userAgent.indexOf("Opera") > -1;
// 判断是否Opera浏览器
if (isOpera) {
return "Opera";
}
// 判断是否Firefox浏览器
if (userAgent.indexOf("Firefox") > -1) {
return "Firefox";
}
// 判断是否Chrome浏览器
if (userAgent.indexOf("Chrome") > -1) {
return "Chrome";
}
// 判断是否Safari浏览器
if (userAgent.indexOf("Safari") > -1) {
return "Safari";
}
// 判断是否IE浏览器
if (userAgent.indexOf("compatible") > -1 && userAgent.indexOf("MSIE") > -1 && !isOpera) {
return "IE";
}
},
// 获取枚举值
getEnumName : function(enumId) {
if (!Global.enums) {
return "";
}
var enumName = Global.enums[enumId];
if (!enumName) {
return "";
}
return enumName;
},
// 获取代码名称
getCodeName : function(codeId) {
if (!Global.codes) {
return "";
}
var codeName = Global.codes[codeId];
if (!codeName) {
return "";
}
return codeName;
},
// 获取日期时间串中的日期
getDate : function(dateTime) {
if (!dateTime) {
return "";
}
return dateTime.substr(0, 10);
},
// 星期的中文字符
getDay : function(day) {
switch (day) {
case 0:
return "日";
case 1:
return "一";
case 2:
return "二";
case 3:
return "三";
case 4:
return "四";
case 5:
return "五";
case 6:
return "六";
default:
return "-";
}
},
// 获取cookie
getCookie : function(name) {
var arr = document.cookie.match(new RegExp("(^| )" + name + "=([^;]*)(;|$)"));
if (arr != null)
return decodeURIComponent(arr[2]);
return null;
},
// 设置cookie
setCookie : function(name, value, options) {
var expires = '', path = '', domain = '', secure = '';
if (options) {
if (options.expires && (typeof options.expires == 'number' || options.expires.toUTCString)) {
var exp;
| exp.setTime(exp.getTime() + options.expires * 24 * 60 * 60 * 1000);
} else {
exp = options.expires;
}
expires = ';expires=' + exp.toUTCString();
}
path = options.path ? '; path=' + options.path : '';
domain = options.domain ? ';domain=' + options.domain : '';
secure = options.secure ? ';secure' : '';
}
document.cookie = [ name, '=', encodeURIComponent(value), expires, path, domain, secure ].join('');
},
initLoader : function(divId) {
//divId 加载的divId
if (divId == '' || divId == null) {
divId = '#loader';
}
$(divId).html('<div style="width:100px;margin:0 auto;"><i class="fa fa-spinner fa-spin fa-5x fa-fw" style="color:#de2810;"></i></div>');
},
// 设置url参数值,ref参数名,value新的参数值
changeURLPara : function(url, ref, value) {
var str = "";
if (url.indexOf('?') != -1) {
str = url.substr(url.indexOf('?') + 1);
} else {
return url + "?" + ref + "=" + value;
}
var returnurl = "";
var setparam = "";
var arr;
var modify = "0";
if (str.indexOf('&') != -1) {
arr = str.split('&');
for (i in arr) {
if (arr[i].split('=')[0] == ref) {
setparam = value;
modify = "1";
} else {
setparam = arr[i].split('=')[1];
}
returnurl = returnurl + arr[i].split('=')[0] + "=" + setparam + "&";
}
returnurl = returnurl.substr(0, returnurl.length - 1);
if (modify == "0") {
if (returnurl == str) {
returnurl = returnurl + "&" + ref + "=" + value;
}
}
} else {
if (str.indexOf('=') != -1) {
arr = str.split('=');
if (arr[0] == ref) {
setparam = value;
modify = "1";
} else {
setparam = arr[1];
}
returnurl = arr[0] + "=" + setparam;
if (modify == "0")
if (returnurl == str)
returnurl = returnurl + "&" + ref + "=" + value;
} else {
returnurl = ref + "=" + value;
}
}
return url.substr(0, url.indexOf('?')) + "?" + returnurl;
},
/**
* @description 浏览器打开弹出窗
* @author yiwenjun
*/
openWindow : function(url, name, iWidth, iHeight) {
var iTop = (window.screen.height - 30 - iHeight) / | if (typeof options.expires == 'number') {
exp = new Date();
| random_line_split |
monitor.go | &MonitoringManager{
MonitorList: make([]*Monitor, 0),
LastMessageList: make(map[string]*[]byte),
Start: false,
}
}
// Return the global monitor manager object
func GetManager() *MonitoringManager {
if globalMonitor == nil {
fmt.Println("Monitor not initialized. Initializing")
Init() // Create monitor
}
return globalMonitor
}
// Mark that the client has received the monitor update message
// This will clear the message from the resend queue in the last message list
func (mgmt *MonitoringManager) MarkSubmit(msgObj messagesocket.Message) {
println(mgmt.LastMessageList)
// Set last message list to nil and remove it from the map
mgmt.LastMessageList[msgObj.Addr.String()] = nil
delete(mgmt.LastMessageList, msgObj.Addr.String())
println(mgmt.LastMessageList)
fmt.Printf("[MON] Marked %s as submitted\n", msgObj.Addr.String())
}
// Function to handle checking the last message list and resending them all if still unacknowledged
func (mgmt *MonitoringManager) Resend() {
fmt.Println("[MON] Resending unacknowledged messages")
// Iterate through all clients currently being monitored
for _,v := range mgmt.MonitorList {
// Check if client address/port combo matches that in the last message list
// Last message list contains all messages that are yet to be acknowledged as received
if value, ok := mgmt.LastMessageList[v.Message.Addr.String()]; ok {
// Check that there is a message being kept in the list
if mgmt.LastMessageList[v.Message.Addr.String()] != nil {
fmt.Printf("Broadcasting update to: %s\n", v.Message.Addr.String())
// Resend message to client
v.Message.Reply(*value)
}
}
}
}
// Multithreaded Goroutine to handle checking for unacknowledged messages
// Unacknowledged messages will be resent in a loop until all messages are acknowledged
func (mgmt *MonitoringManager) goStartCheckingResend() {
// Defers setting resend check to set back to false
// This check is used to ensure that only 1 go function is called at any time
defer func() {mgmt.Start = false}()
// Set resend check to true. This makes it such that we do not start anymore goroutine to do resend checks
mgmt.Start = true
for {
// Wait 5 seconds
time.Sleep(5 * time.Second)
// Check if there are still messages in the list
if len(mgmt.LastMessageList) <= 0 {
// No messages left in list, stop and exit goroutine
fmt.Println("Nothing to ack. bye")
break
}
// Resend all unacknowledged messages
mgmt.Resend()
}
}
// Adds an IP address to the list of clients currently monitoring a facility
func (mgmt *MonitoringManager) AddIP(msgObj messagesocket.Message, duration int64, facility facility.Facility) {
// Check that IP Combination current exists, replace if so
mgmt.RemoveIPIfExists(msgObj.Addr, facility)
// Get current time and end time for monitoring
start := time.Now()
dura := time.Duration(duration) * time.Second
end := start.Add(dura)
// Add to monitoring list
mgmt.MonitorList = append(mgmt.MonitorList, &Monitor{
Message: msgObj,
Start: start,
End: end,
Interval: dura,
Facility: facility,
})
fmt.Printf("Added %s to monitor list to monitor %s for %d seconds", msgObj.Addr.String(), facility, duration)
}
// Debug Helper function to print list of clients currently monitoring a facility
func (mgmt *MonitoringManager) PrintMonitoring() {
if len(mgmt.MonitorList) <= 0 {
fmt.Println("No IP monitoring facility")
return
}
fmt.Println("==========================================")
for _,v := range mgmt.MonitorList {
fmt.Printf("%s - %s - %s to %s\n", v.Message.Addr.String(), v.Facility, v.Start.Format("02/01/2006 15:04:05"), v.End.Format("02/01/2006 15:04:05"))
}
fmt.Println("==========================================")
}
// Broadcast function that is used by the Booking manager to send a monitor broadcast to clients monitoring the specific facility
// This function is called after the add/modify/delete booking functions are executed successfully
func (mgmt *MonitoringManager) Broadcast(facility facility.Facility, delType string, bm *BookingManager, name string) {
// Check if there are any clients currently on the monitoring list that has expired and should be removed. Remove if so
mgmt.CheckExpiry()
blastMsg := fmt.Sprintf("Booking %s for %s by %s", delType, facility, name)
fmt.Println(blastMsg)
// Get facility availability dates and generate bytestream message from it to be broadcasted to the client
days := []Day{Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday}
dates := bm.GetAvailableDates(facility, days...)
byteArr, err := MarshalQueryAvailabilityMonitorMsg(dates, blastMsg, days)
// Get list of all clients to broadcast this message to
// toBc contains the list of clients that are currently monitoring this facility and have not expired
toBc := mgmt.GetClientsToBroadcast(facility)
messageList := make([]messagesocket.Message, len(toBc))
for i,bc := range toBc {
messageList[i] = bc.Message
}
var marshalled []byte
if err != nil {
fmt.Printf("%s\n", err.Error())
return // Don't send availability on error
} else {
marshalled = byteArr
}
// Broadcast the message to the clients
mgmt.BroadcastUsingMsgList(marshalled, messageList)
// Check if the unacknowledged messages handler is started
// If it is not started, start it up
if !mgmt.Start {
fmt.Println("Starting resend loop")
go mgmt.goStartCheckingResend()
} else {
fmt.Println("Existing routing ignoring")
}
}
// Takes a list of clients, and the data to broadcast in bytes and sends it to each of the clients
func (mgmt *MonitoringManager) BroadcastUsingMsgList(data []byte, list []messagesocket.Message){
for _, a := range list {
mgmt.LastMessageList[a.Addr.String()] = &data
fmt.Printf("Broadcasting update to: %s\n", a.Addr.String())
a.Reply(data)
}
}
// Obtain a list of clients that are currently listening to the specified facility from the monitoring list
func (mgmt *MonitoringManager) GetClientsToBroadcast(fac facility.Facility) []*Monitor {
inform := make([]*Monitor, 0)
// Iterate through the monitoring list to get the list of clients
// If the client is currently monitoring the facility, add it to the final list that would be returned
for _,v := range mgmt.MonitorList {
if v.Facility == fac {
inform = append(inform, v)
}
}
return inform
}
// Check if the clients in the monitoring list have expired
// Remove automatically if so
func (mgmt *MonitoringManager) CheckExpiry() {
// Get current time
curTime := time.Now
// Get new list to store all unexpired clients
unexpired := make([]*Monitor, 0)
// Iterate through current list of clients monitoring a facility
for _,v := range mgmt.MonitorList {
// If client's monitoring end time is after the current time, add to the new list
if v.End.After(curTime()) {
// Not expired
unexpired = append(unexpired, v)
}
}
// Overwrite the current list with the new list of monitoring clients
mgmt.MonitorList = unexpired
}
// Helper function to remove the IP/Port combo if exists from the monitoring list
func (mgmt *MonitoringManager) RemoveIPIfExists(address net.Addr, fac facility.Facility) {
// Check that IP exists. if Ind is -1, IP does not exist
ind := mgmt.CheckIPExistIndex(address, fac)
if ind > -1 {
// IP exists, remove it from the list
mgmt.RemoveIPWithIndex(ind)
}
}
// Remove IP address from monitoring list based on index
func (mgmt *MonitoringManager) RemoveIPWithIndex(index int) {
if index > -1 {
// We do not need to care about order, so lets do a O(1) removal
// We start by just swapping the last element of the list with the item specified in the index
// Then we truncate the list by 1 to reduce the size
mgmt.MonitorList[index] = mgmt.MonitorList[len(mgmt.MonitorList)-1]
mgmt.MonitorList[len(mgmt.MonitorList)-1] = nil
mgmt.MonitorList = mgmt.MonitorList[:len(mgmt.MonitorList)-1]
}
}
// Obtain the index of the IP address in the monitoring list if it exists
func (mgmt *MonitoringManager) | CheckIPExistIndex | identifier_name |
|
monitor.go | Message messagesocket.Message
// The start time of the monitor session
Start time.Time
// The end time of the monitor session
End time.Time
// The total duration the monitor session remain active
Interval time.Duration
// The facility the monitoring session is monitoring
Facility facility.Facility
}
// Initialize the global monitor manager (Used by the server to manage monitoring sessions)
func Init() {
globalMonitor = &MonitoringManager{
MonitorList: make([]*Monitor, 0),
LastMessageList: make(map[string]*[]byte),
Start: false,
}
}
// Return the global monitor manager object
func GetManager() *MonitoringManager {
if globalMonitor == nil {
fmt.Println("Monitor not initialized. Initializing")
Init() // Create monitor
}
return globalMonitor
}
// Mark that the client has received the monitor update message
// This will clear the message from the resend queue in the last message list
func (mgmt *MonitoringManager) MarkSubmit(msgObj messagesocket.Message) {
println(mgmt.LastMessageList)
// Set last message list to nil and remove it from the map
mgmt.LastMessageList[msgObj.Addr.String()] = nil
delete(mgmt.LastMessageList, msgObj.Addr.String())
println(mgmt.LastMessageList)
fmt.Printf("[MON] Marked %s as submitted\n", msgObj.Addr.String())
}
// Function to handle checking the last message list and resending them all if still unacknowledged
func (mgmt *MonitoringManager) Resend() {
fmt.Println("[MON] Resending unacknowledged messages")
// Iterate through all clients currently being monitored
for _,v := range mgmt.MonitorList {
// Check if client address/port combo matches that in the last message list
// Last message list contains all messages that are yet to be acknowledged as received
if value, ok := mgmt.LastMessageList[v.Message.Addr.String()]; ok {
// Check that there is a message being kept in the list
if mgmt.LastMessageList[v.Message.Addr.String()] != nil {
fmt.Printf("Broadcasting update to: %s\n", v.Message.Addr.String())
// Resend message to client
v.Message.Reply(*value)
}
}
}
}
// Multithreaded Goroutine to handle checking for unacknowledged messages
// Unacknowledged messages will be resent in a loop until all messages are acknowledged
func (mgmt *MonitoringManager) goStartCheckingResend() {
// Defers setting resend check to set back to false
// This check is used to ensure that only 1 go function is called at any time
defer func() {mgmt.Start = false}()
// Set resend check to true. This makes it such that we do not start anymore goroutine to do resend checks
mgmt.Start = true
for {
// Wait 5 seconds
time.Sleep(5 * time.Second)
// Check if there are still messages in the list
if len(mgmt.LastMessageList) <= 0 {
// No messages left in list, stop and exit goroutine
fmt.Println("Nothing to ack. bye")
break
}
// Resend all unacknowledged messages
mgmt.Resend()
}
}
// Adds an IP address to the list of clients currently monitoring a facility
func (mgmt *MonitoringManager) AddIP(msgObj messagesocket.Message, duration int64, facility facility.Facility) {
// Check that IP Combination current exists, replace if so
mgmt.RemoveIPIfExists(msgObj.Addr, facility)
// Get current time and end time for monitoring
start := time.Now()
dura := time.Duration(duration) * time.Second
end := start.Add(dura)
// Add to monitoring list
mgmt.MonitorList = append(mgmt.MonitorList, &Monitor{
Message: msgObj,
Start: start,
End: end,
Interval: dura,
Facility: facility,
})
fmt.Printf("Added %s to monitor list to monitor %s for %d seconds", msgObj.Addr.String(), facility, duration)
}
// Debug Helper function to print list of clients currently monitoring a facility
func (mgmt *MonitoringManager) PrintMonitoring() {
if len(mgmt.MonitorList) <= 0 {
fmt.Println("No IP monitoring facility")
return
}
fmt.Println("==========================================")
for _,v := range mgmt.MonitorList {
fmt.Printf("%s - %s - %s to %s\n", v.Message.Addr.String(), v.Facility, v.Start.Format("02/01/2006 15:04:05"), v.End.Format("02/01/2006 15:04:05"))
}
fmt.Println("==========================================")
}
// Broadcast function that is used by the Booking manager to send a monitor broadcast to clients monitoring the specific facility
// This function is called after the add/modify/delete booking functions are executed successfully
func (mgmt *MonitoringManager) Broadcast(facility facility.Facility, delType string, bm *BookingManager, name string) {
// Check if there are any clients currently on the monitoring list that has expired and should be removed. Remove if so
mgmt.CheckExpiry()
blastMsg := fmt.Sprintf("Booking %s for %s by %s", delType, facility, name)
fmt.Println(blastMsg)
// Get facility availability dates and generate bytestream message from it to be broadcasted to the client
days := []Day{Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday}
dates := bm.GetAvailableDates(facility, days...)
byteArr, err := MarshalQueryAvailabilityMonitorMsg(dates, blastMsg, days)
// Get list of all clients to broadcast this message to
// toBc contains the list of clients that are currently monitoring this facility and have not expired
toBc := mgmt.GetClientsToBroadcast(facility)
messageList := make([]messagesocket.Message, len(toBc))
for i,bc := range toBc {
messageList[i] = bc.Message
}
var marshalled []byte
if err != nil {
fmt.Printf("%s\n", err.Error())
return // Don't send availability on error
} else {
marshalled = byteArr
}
// Broadcast the message to the clients
mgmt.BroadcastUsingMsgList(marshalled, messageList)
// Check if the unacknowledged messages handler is started
// If it is not started, start it up
if !mgmt.Start {
fmt.Println("Starting resend loop")
go mgmt.goStartCheckingResend()
} else {
fmt.Println("Existing routing ignoring")
}
}
// Takes a list of clients, and the data to broadcast in bytes and sends it to each of the clients
func (mgmt *MonitoringManager) BroadcastUsingMsgList(data []byte, list []messagesocket.Message){
for _, a := range list {
mgmt.LastMessageList[a.Addr.String()] = &data
fmt.Printf("Broadcasting update to: %s\n", a.Addr.String())
a.Reply(data)
}
}
// Obtain a list of clients that are currently listening to the specified facility from the monitoring list
func (mgmt *MonitoringManager) GetClientsToBroadcast(fac facility.Facility) []*Monitor {
inform := make([]*Monitor, 0)
// Iterate through the monitoring list to get the list of clients
// If the client is currently monitoring the facility, add it to the final list that would be returned
for _,v := range mgmt.MonitorList {
if v.Facility == fac {
inform = append(inform, v)
}
}
return inform
}
// Check if the clients in the monitoring list have expired
// Remove automatically if so
func (mgmt *MonitoringManager) CheckExpiry() {
// Get current time
curTime := time.Now
// Get new list to store all unexpired clients
unexpired := make([]*Monitor, 0)
// Iterate through current list of clients monitoring a facility
for _,v := range mgmt.MonitorList {
// If client's monitoring end time is after the current time, add to the new list
if v.End.After(curTime()) {
// Not expired
unexpired = append(unexpired, v)
}
}
// Overwrite the current list with the new list of monitoring clients
mgmt.MonitorList = unexpired
}
// Helper function to remove the IP/Port combo if exists from the monitoring list
func (mgmt *MonitoringManager) RemoveIPIfExists(address net.Addr, fac facility.Facility) {
// Check that IP exists. if Ind is -1, IP does not exist
ind := mgmt.CheckIPExistIndex(address, fac)
if ind > -1 {
// IP exists, remove it from the list
mgmt.RemoveIPWithIndex(ind)
}
}
// Remove IP address from monitoring list based on index
func (mgmt *MonitoringManager) RemoveIPWithIndex(index int) {
if index > -1 {
// We do not need to care about order, so lets do a O(1) removal
// We start | }
// Monitor is the implementaion of a monitoring session
type Monitor struct {
// Store the client StartMonitoringMessage to identiy the client that initiate the monitoring session | random_line_split |
|
monitor.go | time.Time
// The end time of the monitor session
End time.Time
// The total duration the monitor session remain active
Interval time.Duration
// The facility the monitoring session is monitoring
Facility facility.Facility
}
// Initialize the global monitor manager (Used by the server to manage monitoring sessions)
func Init() {
globalMonitor = &MonitoringManager{
MonitorList: make([]*Monitor, 0),
LastMessageList: make(map[string]*[]byte),
Start: false,
}
}
// Return the global monitor manager object
func GetManager() *MonitoringManager {
if globalMonitor == nil {
fmt.Println("Monitor not initialized. Initializing")
Init() // Create monitor
}
return globalMonitor
}
// Mark that the client has received the monitor update message
// This will clear the message from the resend queue in the last message list
func (mgmt *MonitoringManager) MarkSubmit(msgObj messagesocket.Message) {
println(mgmt.LastMessageList)
// Set last message list to nil and remove it from the map
mgmt.LastMessageList[msgObj.Addr.String()] = nil
delete(mgmt.LastMessageList, msgObj.Addr.String())
println(mgmt.LastMessageList)
fmt.Printf("[MON] Marked %s as submitted\n", msgObj.Addr.String())
}
// Function to handle checking the last message list and resending them all if still unacknowledged
func (mgmt *MonitoringManager) Resend() {
fmt.Println("[MON] Resending unacknowledged messages")
// Iterate through all clients currently being monitored
for _,v := range mgmt.MonitorList {
// Check if client address/port combo matches that in the last message list
// Last message list contains all messages that are yet to be acknowledged as received
if value, ok := mgmt.LastMessageList[v.Message.Addr.String()]; ok {
// Check that there is a message being kept in the list
if mgmt.LastMessageList[v.Message.Addr.String()] != nil {
fmt.Printf("Broadcasting update to: %s\n", v.Message.Addr.String())
// Resend message to client
v.Message.Reply(*value)
}
}
}
}
// Multithreaded Goroutine to handle checking for unacknowledged messages
// Unacknowledged messages will be resent in a loop until all messages are acknowledged
func (mgmt *MonitoringManager) goStartCheckingResend() {
// Defers setting resend check to set back to false
// This check is used to ensure that only 1 go function is called at any time
defer func() {mgmt.Start = false}()
// Set resend check to true. This makes it such that we do not start anymore goroutine to do resend checks
mgmt.Start = true
for {
// Wait 5 seconds
time.Sleep(5 * time.Second)
// Check if there are still messages in the list
if len(mgmt.LastMessageList) <= 0 {
// No messages left in list, stop and exit goroutine
fmt.Println("Nothing to ack. bye")
break
}
// Resend all unacknowledged messages
mgmt.Resend()
}
}
// Adds an IP address to the list of clients currently monitoring a facility
func (mgmt *MonitoringManager) AddIP(msgObj messagesocket.Message, duration int64, facility facility.Facility) {
// Check that IP Combination current exists, replace if so
mgmt.RemoveIPIfExists(msgObj.Addr, facility)
// Get current time and end time for monitoring
start := time.Now()
dura := time.Duration(duration) * time.Second
end := start.Add(dura)
// Add to monitoring list
mgmt.MonitorList = append(mgmt.MonitorList, &Monitor{
Message: msgObj,
Start: start,
End: end,
Interval: dura,
Facility: facility,
})
fmt.Printf("Added %s to monitor list to monitor %s for %d seconds", msgObj.Addr.String(), facility, duration)
}
// Debug Helper function to print list of clients currently monitoring a facility
func (mgmt *MonitoringManager) PrintMonitoring() {
if len(mgmt.MonitorList) <= 0 {
fmt.Println("No IP monitoring facility")
return
}
fmt.Println("==========================================")
for _,v := range mgmt.MonitorList {
fmt.Printf("%s - %s - %s to %s\n", v.Message.Addr.String(), v.Facility, v.Start.Format("02/01/2006 15:04:05"), v.End.Format("02/01/2006 15:04:05"))
}
fmt.Println("==========================================")
}
// Broadcast function that is used by the Booking manager to send a monitor broadcast to clients monitoring the specific facility
// This function is called after the add/modify/delete booking functions are executed successfully
func (mgmt *MonitoringManager) Broadcast(facility facility.Facility, delType string, bm *BookingManager, name string) {
// Check if there are any clients currently on the monitoring list that has expired and should be removed. Remove if so
mgmt.CheckExpiry()
blastMsg := fmt.Sprintf("Booking %s for %s by %s", delType, facility, name)
fmt.Println(blastMsg)
// Get facility availability dates and generate bytestream message from it to be broadcasted to the client
days := []Day{Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday}
dates := bm.GetAvailableDates(facility, days...)
byteArr, err := MarshalQueryAvailabilityMonitorMsg(dates, blastMsg, days)
// Get list of all clients to broadcast this message to
// toBc contains the list of clients that are currently monitoring this facility and have not expired
toBc := mgmt.GetClientsToBroadcast(facility)
messageList := make([]messagesocket.Message, len(toBc))
for i,bc := range toBc {
messageList[i] = bc.Message
}
var marshalled []byte
if err != nil | else {
marshalled = byteArr
}
// Broadcast the message to the clients
mgmt.BroadcastUsingMsgList(marshalled, messageList)
// Check if the unacknowledged messages handler is started
// If it is not started, start it up
if !mgmt.Start {
fmt.Println("Starting resend loop")
go mgmt.goStartCheckingResend()
} else {
fmt.Println("Existing routing ignoring")
}
}
// Takes a list of clients, and the data to broadcast in bytes and sends it to each of the clients
func (mgmt *MonitoringManager) BroadcastUsingMsgList(data []byte, list []messagesocket.Message){
for _, a := range list {
mgmt.LastMessageList[a.Addr.String()] = &data
fmt.Printf("Broadcasting update to: %s\n", a.Addr.String())
a.Reply(data)
}
}
// Obtain a list of clients that are currently listening to the specified facility from the monitoring list
func (mgmt *MonitoringManager) GetClientsToBroadcast(fac facility.Facility) []*Monitor {
inform := make([]*Monitor, 0)
// Iterate through the monitoring list to get the list of clients
// If the client is currently monitoring the facility, add it to the final list that would be returned
for _,v := range mgmt.MonitorList {
if v.Facility == fac {
inform = append(inform, v)
}
}
return inform
}
// Check if the clients in the monitoring list have expired
// Remove automatically if so
func (mgmt *MonitoringManager) CheckExpiry() {
// Get current time
curTime := time.Now
// Get new list to store all unexpired clients
unexpired := make([]*Monitor, 0)
// Iterate through current list of clients monitoring a facility
for _,v := range mgmt.MonitorList {
// If client's monitoring end time is after the current time, add to the new list
if v.End.After(curTime()) {
// Not expired
unexpired = append(unexpired, v)
}
}
// Overwrite the current list with the new list of monitoring clients
mgmt.MonitorList = unexpired
}
// Helper function to remove the IP/Port combo if exists from the monitoring list
func (mgmt *MonitoringManager) RemoveIPIfExists(address net.Addr, fac facility.Facility) {
// Check that IP exists. if Ind is -1, IP does not exist
ind := mgmt.CheckIPExistIndex(address, fac)
if ind > -1 {
// IP exists, remove it from the list
mgmt.RemoveIPWithIndex(ind)
}
}
// Remove IP address from monitoring list based on index
func (mgmt *MonitoringManager) RemoveIPWithIndex(index int) {
if index > -1 {
// We do not need to care about order, so lets do a O(1) removal
// We start by just swapping the last element of the list with the item specified in the index
// Then we truncate the list by 1 to reduce the size
mgmt.MonitorList[index] = mgmt.MonitorList[len(mgmt.M | {
fmt.Printf("%s\n", err.Error())
return // Don't send availability on error
} | conditional_block |
monitor.go | ())
// Resend message to client
v.Message.Reply(*value)
}
}
}
}
// Multithreaded Goroutine to handle checking for unacknowledged messages
// Unacknowledged messages will be resent in a loop until all messages are acknowledged
func (mgmt *MonitoringManager) goStartCheckingResend() {
// Defers setting resend check to set back to false
// This check is used to ensure that only 1 go function is called at any time
defer func() {mgmt.Start = false}()
// Set resend check to true. This makes it such that we do not start anymore goroutine to do resend checks
mgmt.Start = true
for {
// Wait 5 seconds
time.Sleep(5 * time.Second)
// Check if there are still messages in the list
if len(mgmt.LastMessageList) <= 0 {
// No messages left in list, stop and exit goroutine
fmt.Println("Nothing to ack. bye")
break
}
// Resend all unacknowledged messages
mgmt.Resend()
}
}
// Adds an IP address to the list of clients currently monitoring a facility
func (mgmt *MonitoringManager) AddIP(msgObj messagesocket.Message, duration int64, facility facility.Facility) {
// Check that IP Combination current exists, replace if so
mgmt.RemoveIPIfExists(msgObj.Addr, facility)
// Get current time and end time for monitoring
start := time.Now()
dura := time.Duration(duration) * time.Second
end := start.Add(dura)
// Add to monitoring list
mgmt.MonitorList = append(mgmt.MonitorList, &Monitor{
Message: msgObj,
Start: start,
End: end,
Interval: dura,
Facility: facility,
})
fmt.Printf("Added %s to monitor list to monitor %s for %d seconds", msgObj.Addr.String(), facility, duration)
}
// Debug Helper function to print list of clients currently monitoring a facility
func (mgmt *MonitoringManager) PrintMonitoring() {
if len(mgmt.MonitorList) <= 0 {
fmt.Println("No IP monitoring facility")
return
}
fmt.Println("==========================================")
for _,v := range mgmt.MonitorList {
fmt.Printf("%s - %s - %s to %s\n", v.Message.Addr.String(), v.Facility, v.Start.Format("02/01/2006 15:04:05"), v.End.Format("02/01/2006 15:04:05"))
}
fmt.Println("==========================================")
}
// Broadcast function that is used by the Booking manager to send a monitor broadcast to clients monitoring the specific facility
// This function is called after the add/modify/delete booking functions are executed successfully
func (mgmt *MonitoringManager) Broadcast(facility facility.Facility, delType string, bm *BookingManager, name string) {
// Check if there are any clients currently on the monitoring list that has expired and should be removed. Remove if so
mgmt.CheckExpiry()
blastMsg := fmt.Sprintf("Booking %s for %s by %s", delType, facility, name)
fmt.Println(blastMsg)
// Get facility availability dates and generate bytestream message from it to be broadcasted to the client
days := []Day{Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday}
dates := bm.GetAvailableDates(facility, days...)
byteArr, err := MarshalQueryAvailabilityMonitorMsg(dates, blastMsg, days)
// Get list of all clients to broadcast this message to
// toBc contains the list of clients that are currently monitoring this facility and have not expired
toBc := mgmt.GetClientsToBroadcast(facility)
messageList := make([]messagesocket.Message, len(toBc))
for i,bc := range toBc {
messageList[i] = bc.Message
}
var marshalled []byte
if err != nil {
fmt.Printf("%s\n", err.Error())
return // Don't send availability on error
} else {
marshalled = byteArr
}
// Broadcast the message to the clients
mgmt.BroadcastUsingMsgList(marshalled, messageList)
// Check if the unacknowledged messages handler is started
// If it is not started, start it up
if !mgmt.Start {
fmt.Println("Starting resend loop")
go mgmt.goStartCheckingResend()
} else {
fmt.Println("Existing routing ignoring")
}
}
// Takes a list of clients, and the data to broadcast in bytes and sends it to each of the clients
func (mgmt *MonitoringManager) BroadcastUsingMsgList(data []byte, list []messagesocket.Message){
for _, a := range list {
mgmt.LastMessageList[a.Addr.String()] = &data
fmt.Printf("Broadcasting update to: %s\n", a.Addr.String())
a.Reply(data)
}
}
// Obtain a list of clients that are currently listening to the specified facility from the monitoring list
func (mgmt *MonitoringManager) GetClientsToBroadcast(fac facility.Facility) []*Monitor {
inform := make([]*Monitor, 0)
// Iterate through the monitoring list to get the list of clients
// If the client is currently monitoring the facility, add it to the final list that would be returned
for _,v := range mgmt.MonitorList {
if v.Facility == fac {
inform = append(inform, v)
}
}
return inform
}
// Check if the clients in the monitoring list have expired
// Remove automatically if so
func (mgmt *MonitoringManager) CheckExpiry() {
// Get current time
curTime := time.Now
// Get new list to store all unexpired clients
unexpired := make([]*Monitor, 0)
// Iterate through current list of clients monitoring a facility
for _,v := range mgmt.MonitorList {
// If client's monitoring end time is after the current time, add to the new list
if v.End.After(curTime()) {
// Not expired
unexpired = append(unexpired, v)
}
}
// Overwrite the current list with the new list of monitoring clients
mgmt.MonitorList = unexpired
}
// Helper function to remove the IP/Port combo if exists from the monitoring list
func (mgmt *MonitoringManager) RemoveIPIfExists(address net.Addr, fac facility.Facility) {
// Check that IP exists. if Ind is -1, IP does not exist
ind := mgmt.CheckIPExistIndex(address, fac)
if ind > -1 {
// IP exists, remove it from the list
mgmt.RemoveIPWithIndex(ind)
}
}
// Remove IP address from monitoring list based on index
func (mgmt *MonitoringManager) RemoveIPWithIndex(index int) {
if index > -1 {
// We do not need to care about order, so lets do a O(1) removal
// We start by just swapping the last element of the list with the item specified in the index
// Then we truncate the list by 1 to reduce the size
mgmt.MonitorList[index] = mgmt.MonitorList[len(mgmt.MonitorList)-1]
mgmt.MonitorList[len(mgmt.MonitorList)-1] = nil
mgmt.MonitorList = mgmt.MonitorList[:len(mgmt.MonitorList)-1]
}
}
// Obtain the index of the IP address in the monitoring list if it exists
func (mgmt *MonitoringManager) CheckIPExistIndex(address net.Addr, fac facility.Facility) int {
exist := -1
// Iterate through monitoring list of clients to find the IP combination
for i,v := range mgmt.MonitorList {
// If IP/Port matches and the facility being tracked also matches, returns the index of this item
if v.Message.Addr.String() == address.String() && v.Facility == fac {
exist = i
break
}
}
// Return -1 if not found
return exist
}
// Marshals the query availability message that would be sent to the client
// This is identical to that of the query availbility message, however there are extra data that are used by monitoring that is appended to it
func MarshalQueryAvailabilityMonitorMsg(raw [][]DateRange, actionString string, dname []Day) ([]byte, error) | {
payload := make([]byte, 0)
// We place the action string in front first (length of string 1 byte, string x byte)
fmt.Println(len(actionString))
// We obtain the length of the action string
asLen, _ := hex.DecodeString(fmt.Sprintf("%02x", len(actionString)))
// We append the action string length to the beginning of the data payload
payload = append(payload, asLen...)
// We append the action string itself after
payload = append(payload, []byte(actionString)...)
// This is similar to query availbility function where we simply append the availability of the facility for each day of the week
for idx, x := range raw {
payload = append(payload, byte(dname[idx]))
payload = append(payload, byte(len(x)))
for _, v := range x {
temp := make([]byte, 6) | identifier_body |
|
scene.py | .models.get(parent).attachNewNode(actorNode)
# Attach the actor to the physics manager
self.app.physicsMgr.attachPhysicalNode(actorNode)
# reparent the model to the actor node
model.reparentTo(actorNodePath)
colliderNodePath = self.addColliderNode(parent)
if collider:
|
else:
# Parent the model to either the render tree or the parent
model.reparentTo(self.renderTree if parent is None else self.models.get(parent))
else:
# If the model is being instanced to an existing model
model = self.addInstance(pos, scale, instanceTo)
# Add the model to the scenes model dictionary
self.models[key if key is not None else len(self.models)] = model
# If the game is running under the RenderPipeline, initialise the model
if self.app.quality != 'super-low' and modelName.endswith('.bam'):
self.app.render_pipeline.prepare_scene(model)
# Return the model nodepath
return model
def addInstance(self, pos, scale, instanceTo):
'''
Adds an Instance of the chosen model to the scene.
'''
# Create a new empty node and attach it to the render tree
model = self.renderTree.attachNewNode("model_placeholder")
# Set the position and scale of the model
model.setPos(*pos)
model.setScale(*scale)
# Instance the model to the chosen object
self.models[instanceTo].instanceTo(model)
# Return the nodepath
return model
def addColliderNode(self, parent=None):
'''
Add an empty colliderNode to the render tree
'''
if parent:
return self.models.get(parent).attachNewNode(CollisionNode('cnode'))
else:
return self.renderTree.attachNewNode(CollisionNode('cnode'))
def loadModel(self, modelName, isActor, anims):
'''
Load the model into the engine and return it.
'''
# Check if the model is an Actor or static model
if isActor:
# Add the model as an Actor with the required animations
return Actor(modelName, anims)
else:
# Add the model as a static model
return self.loader.loadModel(modelName)
def initScene(self):
'''
A event hook method for running events when the scene is first loaded
'''
pass
def exitScene(self):
'''
An event hook method for running events as the scene is exited
'''
pass
class IntroClipScene(Scene):
'''
A subclass of the Scene class to handle the intro clip
and all of it's required tasks + events
'''
def __init__(self, app):
'''
Initialise and run any events BEFORE loading the scene
'''
self.app = app
self.isPlayerControlled = False
self.models = {}
self.loader = app.loader
self.renderTree = deepcopy(app.emptyRenderTree)
# Add the play button
# playButton = DirectButton(text=('normal','pressed','rollover','disabled'),
# pos=(0,0,0), frameSize=(-0.3, 0.3, -0.1, 0.1),
# text_scale=(0.3, 0.2))
# Add the options menu button
# Add the quit button
def initScene(self):
'''
Run events upon starting the scene
'''
# Unhide the mouse and allow it free movement
self.app.props.setMouseMode(WindowProperties.M_absolute)
self.app.props.setCursorHidden(False)
def exitScene(self):
'''
Run events upon exiting the scene
'''
# Hide the mouse and center it in the window
self.app.props.setCursorHidden(True)
self.app.props.setMouseMode(WindowProperties.M_relative)
def eventRun(self, task):
'''
Run constantly updating events
'''
return Task.cont
class IntroScene(Scene):
'''
A subclass of the Scene class to handle the main menu
and all of it's required tasks + events
'''
def __init__(self, app):
'''
Initialise and run any events BEFORE loading the scene
'''
self.app = app
self.isPlayerControlled = False
self.models = {}
self.loader = app.loader
self.renderTree = deepcopy(app.emptyRenderTree)
# Add the ground model
self.addObject("ground.bam", scale=(3.6,3.6,2), key="ground")
# Add the barn
barnModel = self.addObject("barn.bam", scale=(1, 1, 1))
# Create a corn model and add it in the bottom corner
self.addObject("corn.egg", pos=(-62, -62, 0), scale=(1, 1, 1.3), key="corn")
# Iterate a 25x25 square for the corn
for x in range(25):
for z in range(25):
# Use basic maths to create a 'lollypop' shape cutout
if (x-12)**2+(z-12)**2 > 25 and (abs(x-12) > 1 or z > 12):
# Add a corn instance to the scene
self.addObject("corn.egg", (x*5, z*5, 0), instanceTo="corn")
# Add the AI World
self.AIworld = AIWorld(self.renderTree)
# Add generic linear fog on super-low quality mode
if app.quality == 'super-low':
fog = Fog("corn_fog")
# Set the fog colour
fog.setColor(0.8,0.8, 0.8)
# Set the density of the fog
fog.setExpDensity(0.005)
# Add the fog to the scene
self.renderTree.setFog(fog)
# Set the scene's background colour
base.setBackgroundColor(0.635, 0.454, 0.494)
# Create an empty ambient light
alight = AmbientLight('alight')
# Set the colour
alight.setColor(VBase4(0.2, 0.2, 0.2, 0.2))
# Attach the node to the render tree
alnp = render.attachNewNode(alight)
# Set the light to illuminate the scene
render.setLight(alnp)
# Add the two chickens and set the maximum velocity (force) on them
self.chickenOne = Chicken(self, (20, -50, 0))
self.chickenOne.aiChar.setMaxForce(70)
self.chickenTwo = Chicken(self, (-20, -40, 0))
self.chickenTwo.aiChar.setMaxForce(70)
# Add them to the AI World
self.AIworld.addAiChar(self.chickenOne.aiChar)
self.AIworld.addAiChar(self.chickenTwo.aiChar)
# Enable the pursue behaviour
self.chickenOne.aiBehaviour.pursue(self.app.camera)
self.chickenTwo.aiBehaviour.pursue(self.app.camera)
def eventRun(self, task):
'''
Run any constant events for the scene
'''
# If the movement controller has finished its path then
if self.app.controller and self.app.controller.clock_obj.get_frame_time() > self.app.controller.curve_time_end:
# delete the motion controller
self.app.controller = None
# Load the first scene of the gameplay
self.app.sceneMgr.loadScene(SceneOne(self.app))
# Update the AI world
self.AIworld.update()
return Task.cont
def initScene(self):
'''
Set up the movement controller and begin the motion path.
'''
# Make the motion path
motionPath = (
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -56, 4), Vec3(-90, -10, 0)),
(Vec3(0, -52, 4), Vec3(0, -70, 0)),
(Vec3(0, -46, 4), Vec3(90, 0, 20)),
(Vec3(0, -40, 4), Vec3(0, 0, 0)),
(Vec3(0, -30, 4), Vec3(0, 0, 0)),
(Vec3(0, -20, 4), Vec3(0, 0, 0)),
(Vec3(5, -10, 4), Vec3(-40, 0, -5)),
(Vec3(5, -9, 4), Vec | colliderNodePath.node().addSolid(collider) | conditional_block |
scene.py | self.models.get(parent).attachNewNode(actorNode)
# Attach the actor to the physics manager
self.app.physicsMgr.attachPhysicalNode(actorNode)
# reparent the model to the actor node
model.reparentTo(actorNodePath)
colliderNodePath = self.addColliderNode(parent) | model.reparentTo(self.renderTree if parent is None else self.models.get(parent))
else:
# If the model is being instanced to an existing model
model = self.addInstance(pos, scale, instanceTo)
# Add the model to the scenes model dictionary
self.models[key if key is not None else len(self.models)] = model
# If the game is running under the RenderPipeline, initialise the model
if self.app.quality != 'super-low' and modelName.endswith('.bam'):
self.app.render_pipeline.prepare_scene(model)
# Return the model nodepath
return model
def addInstance(self, pos, scale, instanceTo):
'''
Adds an Instance of the chosen model to the scene.
'''
# Create a new empty node and attach it to the render tree
model = self.renderTree.attachNewNode("model_placeholder")
# Set the position and scale of the model
model.setPos(*pos)
model.setScale(*scale)
# Instance the model to the chosen object
self.models[instanceTo].instanceTo(model)
# Return the nodepath
return model
def addColliderNode(self, parent=None):
'''
Add an empty colliderNode to the render tree
'''
if parent:
return self.models.get(parent).attachNewNode(CollisionNode('cnode'))
else:
return self.renderTree.attachNewNode(CollisionNode('cnode'))
def loadModel(self, modelName, isActor, anims):
'''
Load the model into the engine and return it.
'''
# Check if the model is an Actor or static model
if isActor:
# Add the model as an Actor with the required animations
return Actor(modelName, anims)
else:
# Add the model as a static model
return self.loader.loadModel(modelName)
def initScene(self):
'''
A event hook method for running events when the scene is first loaded
'''
pass
def exitScene(self):
'''
An event hook method for running events as the scene is exited
'''
pass
class IntroClipScene(Scene):
'''
A subclass of the Scene class to handle the intro clip
and all of it's required tasks + events
'''
def __init__(self, app):
'''
Initialise and run any events BEFORE loading the scene
'''
self.app = app
self.isPlayerControlled = False
self.models = {}
self.loader = app.loader
self.renderTree = deepcopy(app.emptyRenderTree)
# Add the play button
# playButton = DirectButton(text=('normal','pressed','rollover','disabled'),
# pos=(0,0,0), frameSize=(-0.3, 0.3, -0.1, 0.1),
# text_scale=(0.3, 0.2))
# Add the options menu button
# Add the quit button
def initScene(self):
'''
Run events upon starting the scene
'''
# Unhide the mouse and allow it free movement
self.app.props.setMouseMode(WindowProperties.M_absolute)
self.app.props.setCursorHidden(False)
def exitScene(self):
'''
Run events upon exiting the scene
'''
# Hide the mouse and center it in the window
self.app.props.setCursorHidden(True)
self.app.props.setMouseMode(WindowProperties.M_relative)
def eventRun(self, task):
'''
Run constantly updating events
'''
return Task.cont
class IntroScene(Scene):
'''
A subclass of the Scene class to handle the main menu
and all of it's required tasks + events
'''
def __init__(self, app):
'''
Initialise and run any events BEFORE loading the scene
'''
self.app = app
self.isPlayerControlled = False
self.models = {}
self.loader = app.loader
self.renderTree = deepcopy(app.emptyRenderTree)
# Add the ground model
self.addObject("ground.bam", scale=(3.6,3.6,2), key="ground")
# Add the barn
barnModel = self.addObject("barn.bam", scale=(1, 1, 1))
# Create a corn model and add it in the bottom corner
self.addObject("corn.egg", pos=(-62, -62, 0), scale=(1, 1, 1.3), key="corn")
# Iterate a 25x25 square for the corn
for x in range(25):
for z in range(25):
# Use basic maths to create a 'lollypop' shape cutout
if (x-12)**2+(z-12)**2 > 25 and (abs(x-12) > 1 or z > 12):
# Add a corn instance to the scene
self.addObject("corn.egg", (x*5, z*5, 0), instanceTo="corn")
# Add the AI World
self.AIworld = AIWorld(self.renderTree)
# Add generic linear fog on super-low quality mode
if app.quality == 'super-low':
fog = Fog("corn_fog")
# Set the fog colour
fog.setColor(0.8,0.8, 0.8)
# Set the density of the fog
fog.setExpDensity(0.005)
# Add the fog to the scene
self.renderTree.setFog(fog)
# Set the scene's background colour
base.setBackgroundColor(0.635, 0.454, 0.494)
# Create an empty ambient light
alight = AmbientLight('alight')
# Set the colour
alight.setColor(VBase4(0.2, 0.2, 0.2, 0.2))
# Attach the node to the render tree
alnp = render.attachNewNode(alight)
# Set the light to illuminate the scene
render.setLight(alnp)
# Add the two chickens and set the maximum velocity (force) on them
self.chickenOne = Chicken(self, (20, -50, 0))
self.chickenOne.aiChar.setMaxForce(70)
self.chickenTwo = Chicken(self, (-20, -40, 0))
self.chickenTwo.aiChar.setMaxForce(70)
# Add them to the AI World
self.AIworld.addAiChar(self.chickenOne.aiChar)
self.AIworld.addAiChar(self.chickenTwo.aiChar)
# Enable the pursue behaviour
self.chickenOne.aiBehaviour.pursue(self.app.camera)
self.chickenTwo.aiBehaviour.pursue(self.app.camera)
def eventRun(self, task):
'''
Run any constant events for the scene
'''
# If the movement controller has finished its path then
if self.app.controller and self.app.controller.clock_obj.get_frame_time() > self.app.controller.curve_time_end:
# delete the motion controller
self.app.controller = None
# Load the first scene of the gameplay
self.app.sceneMgr.loadScene(SceneOne(self.app))
# Update the AI world
self.AIworld.update()
return Task.cont
def initScene(self):
'''
Set up the movement controller and begin the motion path.
'''
# Make the motion path
motionPath = (
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -56, 4), Vec3(-90, -10, 0)),
(Vec3(0, -52, 4), Vec3(0, -70, 0)),
(Vec3(0, -46, 4), Vec3(90, 0, 20)),
(Vec3(0, -40, 4), Vec3(0, 0, 0)),
(Vec3(0, -30, 4), Vec3(0, 0, 0)),
(Vec3(0, -20, 4), Vec3(0, 0, 0)),
(Vec3(5, -10, 4), Vec3(-40, 0, -5)),
(Vec3(5, -9, 4), Vec3 | if collider:
colliderNodePath.node().addSolid(collider)
else:
# Parent the model to either the render tree or the parent | random_line_split |
scene.py | .models.get(parent).attachNewNode(actorNode)
# Attach the actor to the physics manager
self.app.physicsMgr.attachPhysicalNode(actorNode)
# reparent the model to the actor node
model.reparentTo(actorNodePath)
colliderNodePath = self.addColliderNode(parent)
if collider:
colliderNodePath.node().addSolid(collider)
else:
# Parent the model to either the render tree or the parent
model.reparentTo(self.renderTree if parent is None else self.models.get(parent))
else:
# If the model is being instanced to an existing model
model = self.addInstance(pos, scale, instanceTo)
# Add the model to the scenes model dictionary
self.models[key if key is not None else len(self.models)] = model
# If the game is running under the RenderPipeline, initialise the model
if self.app.quality != 'super-low' and modelName.endswith('.bam'):
self.app.render_pipeline.prepare_scene(model)
# Return the model nodepath
return model
def | (self, pos, scale, instanceTo):
'''
Adds an Instance of the chosen model to the scene.
'''
# Create a new empty node and attach it to the render tree
model = self.renderTree.attachNewNode("model_placeholder")
# Set the position and scale of the model
model.setPos(*pos)
model.setScale(*scale)
# Instance the model to the chosen object
self.models[instanceTo].instanceTo(model)
# Return the nodepath
return model
def addColliderNode(self, parent=None):
'''
Add an empty colliderNode to the render tree
'''
if parent:
return self.models.get(parent).attachNewNode(CollisionNode('cnode'))
else:
return self.renderTree.attachNewNode(CollisionNode('cnode'))
def loadModel(self, modelName, isActor, anims):
'''
Load the model into the engine and return it.
'''
# Check if the model is an Actor or static model
if isActor:
# Add the model as an Actor with the required animations
return Actor(modelName, anims)
else:
# Add the model as a static model
return self.loader.loadModel(modelName)
def initScene(self):
'''
A event hook method for running events when the scene is first loaded
'''
pass
def exitScene(self):
'''
An event hook method for running events as the scene is exited
'''
pass
class IntroClipScene(Scene):
'''
A subclass of the Scene class to handle the intro clip
and all of it's required tasks + events
'''
def __init__(self, app):
'''
Initialise and run any events BEFORE loading the scene
'''
self.app = app
self.isPlayerControlled = False
self.models = {}
self.loader = app.loader
self.renderTree = deepcopy(app.emptyRenderTree)
# Add the play button
# playButton = DirectButton(text=('normal','pressed','rollover','disabled'),
# pos=(0,0,0), frameSize=(-0.3, 0.3, -0.1, 0.1),
# text_scale=(0.3, 0.2))
# Add the options menu button
# Add the quit button
def initScene(self):
'''
Run events upon starting the scene
'''
# Unhide the mouse and allow it free movement
self.app.props.setMouseMode(WindowProperties.M_absolute)
self.app.props.setCursorHidden(False)
def exitScene(self):
'''
Run events upon exiting the scene
'''
# Hide the mouse and center it in the window
self.app.props.setCursorHidden(True)
self.app.props.setMouseMode(WindowProperties.M_relative)
def eventRun(self, task):
'''
Run constantly updating events
'''
return Task.cont
class IntroScene(Scene):
'''
A subclass of the Scene class to handle the main menu
and all of it's required tasks + events
'''
def __init__(self, app):
'''
Initialise and run any events BEFORE loading the scene
'''
self.app = app
self.isPlayerControlled = False
self.models = {}
self.loader = app.loader
self.renderTree = deepcopy(app.emptyRenderTree)
# Add the ground model
self.addObject("ground.bam", scale=(3.6,3.6,2), key="ground")
# Add the barn
barnModel = self.addObject("barn.bam", scale=(1, 1, 1))
# Create a corn model and add it in the bottom corner
self.addObject("corn.egg", pos=(-62, -62, 0), scale=(1, 1, 1.3), key="corn")
# Iterate a 25x25 square for the corn
for x in range(25):
for z in range(25):
# Use basic maths to create a 'lollypop' shape cutout
if (x-12)**2+(z-12)**2 > 25 and (abs(x-12) > 1 or z > 12):
# Add a corn instance to the scene
self.addObject("corn.egg", (x*5, z*5, 0), instanceTo="corn")
# Add the AI World
self.AIworld = AIWorld(self.renderTree)
# Add generic linear fog on super-low quality mode
if app.quality == 'super-low':
fog = Fog("corn_fog")
# Set the fog colour
fog.setColor(0.8,0.8, 0.8)
# Set the density of the fog
fog.setExpDensity(0.005)
# Add the fog to the scene
self.renderTree.setFog(fog)
# Set the scene's background colour
base.setBackgroundColor(0.635, 0.454, 0.494)
# Create an empty ambient light
alight = AmbientLight('alight')
# Set the colour
alight.setColor(VBase4(0.2, 0.2, 0.2, 0.2))
# Attach the node to the render tree
alnp = render.attachNewNode(alight)
# Set the light to illuminate the scene
render.setLight(alnp)
# Add the two chickens and set the maximum velocity (force) on them
self.chickenOne = Chicken(self, (20, -50, 0))
self.chickenOne.aiChar.setMaxForce(70)
self.chickenTwo = Chicken(self, (-20, -40, 0))
self.chickenTwo.aiChar.setMaxForce(70)
# Add them to the AI World
self.AIworld.addAiChar(self.chickenOne.aiChar)
self.AIworld.addAiChar(self.chickenTwo.aiChar)
# Enable the pursue behaviour
self.chickenOne.aiBehaviour.pursue(self.app.camera)
self.chickenTwo.aiBehaviour.pursue(self.app.camera)
def eventRun(self, task):
'''
Run any constant events for the scene
'''
# If the movement controller has finished its path then
if self.app.controller and self.app.controller.clock_obj.get_frame_time() > self.app.controller.curve_time_end:
# delete the motion controller
self.app.controller = None
# Load the first scene of the gameplay
self.app.sceneMgr.loadScene(SceneOne(self.app))
# Update the AI world
self.AIworld.update()
return Task.cont
def initScene(self):
'''
Set up the movement controller and begin the motion path.
'''
# Make the motion path
motionPath = (
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -56, 4), Vec3(-90, -10, 0)),
(Vec3(0, -52, 4), Vec3(0, -70, 0)),
(Vec3(0, -46, 4), Vec3(90, 0, 20)),
(Vec3(0, -40, 4), Vec3(0, 0, 0)),
(Vec3(0, -30, 4), Vec3(0, 0, 0)),
(Vec3(0, -20, 4), Vec3(0, 0, 0)),
(Vec3(5, -10, 4), Vec3(-40, 0, -5)),
(Vec3(5, -9, 4), Vec | addInstance | identifier_name |
scene.py | self.models.get(parent).attachNewNode(actorNode)
# Attach the actor to the physics manager
self.app.physicsMgr.attachPhysicalNode(actorNode)
# reparent the model to the actor node
model.reparentTo(actorNodePath)
colliderNodePath = self.addColliderNode(parent)
if collider:
colliderNodePath.node().addSolid(collider)
else:
# Parent the model to either the render tree or the parent
model.reparentTo(self.renderTree if parent is None else self.models.get(parent))
else:
# If the model is being instanced to an existing model
model = self.addInstance(pos, scale, instanceTo)
# Add the model to the scenes model dictionary
self.models[key if key is not None else len(self.models)] = model
# If the game is running under the RenderPipeline, initialise the model
if self.app.quality != 'super-low' and modelName.endswith('.bam'):
self.app.render_pipeline.prepare_scene(model)
# Return the model nodepath
return model
def addInstance(self, pos, scale, instanceTo):
'''
Adds an Instance of the chosen model to the scene.
'''
# Create a new empty node and attach it to the render tree
model = self.renderTree.attachNewNode("model_placeholder")
# Set the position and scale of the model
model.setPos(*pos)
model.setScale(*scale)
# Instance the model to the chosen object
self.models[instanceTo].instanceTo(model)
# Return the nodepath
return model
def addColliderNode(self, parent=None):
'''
Add an empty colliderNode to the render tree
'''
if parent:
return self.models.get(parent).attachNewNode(CollisionNode('cnode'))
else:
return self.renderTree.attachNewNode(CollisionNode('cnode'))
def loadModel(self, modelName, isActor, anims):
'''
Load the model into the engine and return it.
'''
# Check if the model is an Actor or static model
if isActor:
# Add the model as an Actor with the required animations
return Actor(modelName, anims)
else:
# Add the model as a static model
return self.loader.loadModel(modelName)
def initScene(self):
|
def exitScene(self):
'''
An event hook method for running events as the scene is exited
'''
pass
class IntroClipScene(Scene):
'''
A subclass of the Scene class to handle the intro clip
and all of it's required tasks + events
'''
def __init__(self, app):
'''
Initialise and run any events BEFORE loading the scene
'''
self.app = app
self.isPlayerControlled = False
self.models = {}
self.loader = app.loader
self.renderTree = deepcopy(app.emptyRenderTree)
# Add the play button
# playButton = DirectButton(text=('normal','pressed','rollover','disabled'),
# pos=(0,0,0), frameSize=(-0.3, 0.3, -0.1, 0.1),
# text_scale=(0.3, 0.2))
# Add the options menu button
# Add the quit button
def initScene(self):
'''
Run events upon starting the scene
'''
# Unhide the mouse and allow it free movement
self.app.props.setMouseMode(WindowProperties.M_absolute)
self.app.props.setCursorHidden(False)
def exitScene(self):
'''
Run events upon exiting the scene
'''
# Hide the mouse and center it in the window
self.app.props.setCursorHidden(True)
self.app.props.setMouseMode(WindowProperties.M_relative)
def eventRun(self, task):
'''
Run constantly updating events
'''
return Task.cont
class IntroScene(Scene):
'''
A subclass of the Scene class to handle the main menu
and all of it's required tasks + events
'''
def __init__(self, app):
'''
Initialise and run any events BEFORE loading the scene
'''
self.app = app
self.isPlayerControlled = False
self.models = {}
self.loader = app.loader
self.renderTree = deepcopy(app.emptyRenderTree)
# Add the ground model
self.addObject("ground.bam", scale=(3.6,3.6,2), key="ground")
# Add the barn
barnModel = self.addObject("barn.bam", scale=(1, 1, 1))
# Create a corn model and add it in the bottom corner
self.addObject("corn.egg", pos=(-62, -62, 0), scale=(1, 1, 1.3), key="corn")
# Iterate a 25x25 square for the corn
for x in range(25):
for z in range(25):
# Use basic maths to create a 'lollypop' shape cutout
if (x-12)**2+(z-12)**2 > 25 and (abs(x-12) > 1 or z > 12):
# Add a corn instance to the scene
self.addObject("corn.egg", (x*5, z*5, 0), instanceTo="corn")
# Add the AI World
self.AIworld = AIWorld(self.renderTree)
# Add generic linear fog on super-low quality mode
if app.quality == 'super-low':
fog = Fog("corn_fog")
# Set the fog colour
fog.setColor(0.8,0.8, 0.8)
# Set the density of the fog
fog.setExpDensity(0.005)
# Add the fog to the scene
self.renderTree.setFog(fog)
# Set the scene's background colour
base.setBackgroundColor(0.635, 0.454, 0.494)
# Create an empty ambient light
alight = AmbientLight('alight')
# Set the colour
alight.setColor(VBase4(0.2, 0.2, 0.2, 0.2))
# Attach the node to the render tree
alnp = render.attachNewNode(alight)
# Set the light to illuminate the scene
render.setLight(alnp)
# Add the two chickens and set the maximum velocity (force) on them
self.chickenOne = Chicken(self, (20, -50, 0))
self.chickenOne.aiChar.setMaxForce(70)
self.chickenTwo = Chicken(self, (-20, -40, 0))
self.chickenTwo.aiChar.setMaxForce(70)
# Add them to the AI World
self.AIworld.addAiChar(self.chickenOne.aiChar)
self.AIworld.addAiChar(self.chickenTwo.aiChar)
# Enable the pursue behaviour
self.chickenOne.aiBehaviour.pursue(self.app.camera)
self.chickenTwo.aiBehaviour.pursue(self.app.camera)
def eventRun(self, task):
'''
Run any constant events for the scene
'''
# If the movement controller has finished its path then
if self.app.controller and self.app.controller.clock_obj.get_frame_time() > self.app.controller.curve_time_end:
# delete the motion controller
self.app.controller = None
# Load the first scene of the gameplay
self.app.sceneMgr.loadScene(SceneOne(self.app))
# Update the AI world
self.AIworld.update()
return Task.cont
def initScene(self):
'''
Set up the movement controller and begin the motion path.
'''
# Make the motion path
motionPath = (
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -63, 4), Vec3(0, 0, 0)),
(Vec3(0, -56, 4), Vec3(-90, -10, 0)),
(Vec3(0, -52, 4), Vec3(0, -70, 0)),
(Vec3(0, -46, 4), Vec3(90, 0, 20)),
(Vec3(0, -40, 4), Vec3(0, 0, 0)),
(Vec3(0, -30, 4), Vec3(0, 0, 0)),
(Vec3(0, -20, 4), Vec3(0, 0, 0)),
(Vec3(5, -10, 4), Vec3(-40, 0, -5)),
(Vec3(5, -9, 4), Vec3 | '''
A event hook method for running events when the scene is first loaded
'''
pass | identifier_body |
villager_bot.py | _id: "lang"}
self.prefix_cache = dict[int, str]() # {guild_id: "prefix"}
self.disabled_commands = defaultdict[int, set[str]](
set
) # {guild_id: set({command, command,..})}
self.replies_cache = set[int]() # {guild_id, guild_id,..}
self.rcon_cache = dict[tuple[int, Any], Any]() # {(user_id, mc_server): rcon_client}
self.existing_users_cache = set[
int
]() # so the database doesn't have to make a query every time an econ command is ran to ensure user exists
self.existing_user_lbs_cache = set[
int
]() # for same reason above, but for leaderboards instead
self.support_server: Optional[discord.Guild] = None
self.error_channel: Optional[discord.TextChannel] = None
self.vote_channel: Optional[discord.TextChannel] = None
# counters and other things
self.command_count = 0
self.message_count = 0
self.error_count = 0
self.session_votes = 0
self.font_files = list[str]()
self.captcha_generator: ImageCaptcha | None = None
self.final_ready = asyncio.Event()
self.add_check(self.check_global) # register global check
self.before_invoke(
self.before_command_invoked
) # register self.before_command_invoked as a before_invoked event
self.after_invoke(
self.after_command_invoked
) # register self.after_command_invoked as a after_invoked event
self.event(self.on_app_command_completion)
@property
def embed_color(self) -> discord.Color:
return getattr(discord.Color, self.d.embed_color)()
async def start(self):
self.font_files = await FontHandler(
font_urls=self.d.font_urls, output_directory="fonts"
).retrieve()
self.captcha_generator = captcha.image.ImageCaptcha(fonts=self.font_files)
self.karen = KarenClient(self.k.karen, self.get_packet_handlers(), self.logger)
self.db = DatabaseProxy(self.karen)
await self.karen.connect()
cluster_info = await self.karen.fetch_cluster_init_info()
self.shard_count = cluster_info.shard_count
self.shard_ids = cluster_info.shard_ids
self.cluster_id = cluster_info.cluster_id
self.aiohttp = aiohttp.ClientSession()
for cog in self.cog_list:
await self.load_extension(f"bot.cogs.{cog}")
await super().start(self.k.discord_token)
async def close(self, *args, **kwargs):
if self.karen is not None:
await self.karen.disconnect()
if self.aiohttp is not None:
await self.aiohttp.close()
self.logger.info("Closed aiohttp ClientSession")
await super().close(*args, **kwargs)
async def get_prefix(self, message: discord.Message) -> str:
if message.guild:
return self.prefix_cache.get(message.guild.id, self.k.default_prefix)
return self.k.default_prefix
def get_language(self, ctx: CustomContext) -> Translation:
if ctx.guild:
return self.l[self.language_cache.get(ctx.guild.id, "en")]
return self.l["en"]
async def on_ready(self):
if self.cluster_id == 0:
try:
self.logger.info("Syncing slash commands...")
self.tree.copy_global_to(guild=await self.fetch_guild(self.k.support_server_id))
await self.tree.sync()
self.logger.info("Slash commands synced!")
except Exception:
self.logger.error(
"An error occurred in on_ready while syncing slash commands", exc_info=True
)
try:
self.logger.info("Syncing db item prices...")
item_prices = {
v.db_entry.item: v.db_entry.sell_price for k, v in self.d.shop_items.items()
}
item_prices.update(
{
self.d.farming.name_map[k]: v
for k, v in self.d.farming.emerald_yields.items()
}
)
item_prices.update({f.item: f.sell_price for f in self.d.fishing_findables})
await self.get_cog("Database").sync_item_prices(item_prices)
self.logger.info("Done syncing db item prices!")
except Exception:
self.logger.error(
"An error occurred in on_ready while syncing db item prices", exc_info=True
)
async def get_context(self, *args, **kwargs) -> CustomContext:
ctx = await super().get_context(*args, **kwargs, cls=CustomContext)
ctx.embed_color = self.embed_color
ctx.l = self.get_language(ctx)
return ctx
async def send_embed(self, location, message: str, *, ignore_exceptions: bool = False) -> None:
embed = discord.Embed(color=self.embed_color, description=message)
try:
await location.send(embed=embed)
except discord.errors.HTTPException:
if not ignore_exceptions:
raise
async def reply_embed(
self, location, message: str, ping: bool = False, *, ignore_exceptions: bool = False
) -> None:
embed = discord.Embed(color=self.embed_color, description=message)
try:
await location.reply(embed=embed, mention_author=ping)
except discord.errors.HTTPException as e:
if (
e.code == 50035
): # invalid form body, happens sometimes when the message to reply to can't be found?
await self.send_embed(location, message, ignore_exceptions=ignore_exceptions)
elif not ignore_exceptions:
raise
async def send_tip(self, ctx: CustomContext) -> None:
await asyncio.sleep(random.randint(100, 200) / 100)
await self.send_embed(
ctx, f"{random.choice(ctx.l.misc.tip_intros)} {random.choice(ctx.l.misc.tips)}"
)
async def check_global(self, ctx: CustomContext) -> bool: # the global command check
command_name = ctx.command.qualified_name
if ctx.author.id in self.botban_cache:
ctx.failure_reason = "bot_banned"
return False
if not self.is_ready():
ctx.failure_reason = "not_ready"
return False
if ctx.guild is not None and command_name in self.disabled_commands.get(ctx.guild.id, ()):
ctx.failure_reason = "disabled"
return False
# handle cooldowns that need to be synced between shard groups / processes (aka karen cooldowns)
if command_name in self.d.cooldown_rates:
cooldown_info = await self.karen.cooldown(command_name, ctx.author.id)
if not cooldown_info.can_run:
ctx.custom_error = CommandOnKarenCooldown(cooldown_info.remaining)
return False
if command_name in self.d.concurrency_limited:
if not await self.karen.check_concurrency(command_name, ctx.author.id):
ctx.custom_error = MaxKarenConcurrencyReached()
return False
if ctx.command.cog_name == "Econ":
# check if user has paused econ
if await self.karen.check_econ_paused(ctx.author.id):
ctx.failure_reason = "econ_paused"
return False
return True
async def before_command_invoked(self, ctx: CustomContext):
self.command_count += 1
if ctx.command.cog_name == "Econ":
# random chance to spawn mob
if random.randint(0, self.d.mob_chance) == 0:
if self.d.cooldown_rates.get(ctx.command.qualified_name, 0) >= 2:
asyncio.create_task(self.get_cog("MobSpawner").spawn_event(ctx))
elif random.randint(0, self.d.tip_chance) == 0: # random chance to send tip
asyncio.create_task(self.send_tip(ctx))
try:
if ctx.command.qualified_name in self.d.concurrency_limited:
await self.karen.acquire_concurrency(ctx.command.qualified_name, ctx.author.id)
except Exception:
self.logger.error(
"An error occurred while attempting to acquire a concurrency lock for command %s for user %s",
ctx.command,
ctx.author.id,
exc_info=True,
)
raise
if ctx.command.qualified_name in self.d.cooldown_rates:
|
await self.karen.command_execution(
ctx.author.id, getattr(ctx.guild, "id", None), ctx.command.qualified_name, False
)
async def after_command_invoked(self, ctx: CustomContext):
try:
if ctx.command.qualified_name in self.d.concurrency_limited:
await self.karen.release_concurrency(ctx.command.qualified_name, ctx.author.id)
except Exception:
self.logger.error(
"An error occurred while attempting to release a concurrency lock for command %s for user %s",
ctx.command,
ctx.author.id,
exc_info=True,
)
raise
async def on_app_command_completion(
self,
inter: discord.Interaction,
command: discord.app_commands.Command | discord.app_commands.ContextMenu,
):
if isinstance(command, discord.app_commands.Command):
await self.karen.command_execution(
inter.user.id, inter.guild_id, command.qualified_name, True
)
###### packet handlers #####################################################
@handle_packet(PacketType.EXEC_CODE)
async def packet_exec_code(self, code: str | await self.karen.lb_command_ran(ctx.author.id) | conditional_block |
villager_bot.py | _id: "lang"}
self.prefix_cache = dict[int, str]() # {guild_id: "prefix"}
self.disabled_commands = defaultdict[int, set[str]](
set
) # {guild_id: set({command, command,..})}
self.replies_cache = set[int]() # {guild_id, guild_id,..}
self.rcon_cache = dict[tuple[int, Any], Any]() # {(user_id, mc_server): rcon_client}
self.existing_users_cache = set[
int
]() # so the database doesn't have to make a query every time an econ command is ran to ensure user exists
self.existing_user_lbs_cache = set[
int
]() # for same reason above, but for leaderboards instead
self.support_server: Optional[discord.Guild] = None
self.error_channel: Optional[discord.TextChannel] = None
self.vote_channel: Optional[discord.TextChannel] = None
# counters and other things
self.command_count = 0
self.message_count = 0
self.error_count = 0
self.session_votes = 0
self.font_files = list[str]()
self.captcha_generator: ImageCaptcha | None = None
self.final_ready = asyncio.Event()
self.add_check(self.check_global) # register global check
self.before_invoke(
self.before_command_invoked
) # register self.before_command_invoked as a before_invoked event
self.after_invoke(
self.after_command_invoked
) # register self.after_command_invoked as a after_invoked event
self.event(self.on_app_command_completion)
@property
def embed_color(self) -> discord.Color:
|
async def start(self):
self.font_files = await FontHandler(
font_urls=self.d.font_urls, output_directory="fonts"
).retrieve()
self.captcha_generator = captcha.image.ImageCaptcha(fonts=self.font_files)
self.karen = KarenClient(self.k.karen, self.get_packet_handlers(), self.logger)
self.db = DatabaseProxy(self.karen)
await self.karen.connect()
cluster_info = await self.karen.fetch_cluster_init_info()
self.shard_count = cluster_info.shard_count
self.shard_ids = cluster_info.shard_ids
self.cluster_id = cluster_info.cluster_id
self.aiohttp = aiohttp.ClientSession()
for cog in self.cog_list:
await self.load_extension(f"bot.cogs.{cog}")
await super().start(self.k.discord_token)
async def close(self, *args, **kwargs):
if self.karen is not None:
await self.karen.disconnect()
if self.aiohttp is not None:
await self.aiohttp.close()
self.logger.info("Closed aiohttp ClientSession")
await super().close(*args, **kwargs)
async def get_prefix(self, message: discord.Message) -> str:
if message.guild:
return self.prefix_cache.get(message.guild.id, self.k.default_prefix)
return self.k.default_prefix
def get_language(self, ctx: CustomContext) -> Translation:
if ctx.guild:
return self.l[self.language_cache.get(ctx.guild.id, "en")]
return self.l["en"]
async def on_ready(self):
if self.cluster_id == 0:
try:
self.logger.info("Syncing slash commands...")
self.tree.copy_global_to(guild=await self.fetch_guild(self.k.support_server_id))
await self.tree.sync()
self.logger.info("Slash commands synced!")
except Exception:
self.logger.error(
"An error occurred in on_ready while syncing slash commands", exc_info=True
)
try:
self.logger.info("Syncing db item prices...")
item_prices = {
v.db_entry.item: v.db_entry.sell_price for k, v in self.d.shop_items.items()
}
item_prices.update(
{
self.d.farming.name_map[k]: v
for k, v in self.d.farming.emerald_yields.items()
}
)
item_prices.update({f.item: f.sell_price for f in self.d.fishing_findables})
await self.get_cog("Database").sync_item_prices(item_prices)
self.logger.info("Done syncing db item prices!")
except Exception:
self.logger.error(
"An error occurred in on_ready while syncing db item prices", exc_info=True
)
async def get_context(self, *args, **kwargs) -> CustomContext:
ctx = await super().get_context(*args, **kwargs, cls=CustomContext)
ctx.embed_color = self.embed_color
ctx.l = self.get_language(ctx)
return ctx
async def send_embed(self, location, message: str, *, ignore_exceptions: bool = False) -> None:
embed = discord.Embed(color=self.embed_color, description=message)
try:
await location.send(embed=embed)
except discord.errors.HTTPException:
if not ignore_exceptions:
raise
async def reply_embed(
self, location, message: str, ping: bool = False, *, ignore_exceptions: bool = False
) -> None:
embed = discord.Embed(color=self.embed_color, description=message)
try:
await location.reply(embed=embed, mention_author=ping)
except discord.errors.HTTPException as e:
if (
e.code == 50035
): # invalid form body, happens sometimes when the message to reply to can't be found?
await self.send_embed(location, message, ignore_exceptions=ignore_exceptions)
elif not ignore_exceptions:
raise
async def send_tip(self, ctx: CustomContext) -> None:
await asyncio.sleep(random.randint(100, 200) / 100)
await self.send_embed(
ctx, f"{random.choice(ctx.l.misc.tip_intros)} {random.choice(ctx.l.misc.tips)}"
)
async def check_global(self, ctx: CustomContext) -> bool: # the global command check
command_name = ctx.command.qualified_name
if ctx.author.id in self.botban_cache:
ctx.failure_reason = "bot_banned"
return False
if not self.is_ready():
ctx.failure_reason = "not_ready"
return False
if ctx.guild is not None and command_name in self.disabled_commands.get(ctx.guild.id, ()):
ctx.failure_reason = "disabled"
return False
# handle cooldowns that need to be synced between shard groups / processes (aka karen cooldowns)
if command_name in self.d.cooldown_rates:
cooldown_info = await self.karen.cooldown(command_name, ctx.author.id)
if not cooldown_info.can_run:
ctx.custom_error = CommandOnKarenCooldown(cooldown_info.remaining)
return False
if command_name in self.d.concurrency_limited:
if not await self.karen.check_concurrency(command_name, ctx.author.id):
ctx.custom_error = MaxKarenConcurrencyReached()
return False
if ctx.command.cog_name == "Econ":
# check if user has paused econ
if await self.karen.check_econ_paused(ctx.author.id):
ctx.failure_reason = "econ_paused"
return False
return True
async def before_command_invoked(self, ctx: CustomContext):
self.command_count += 1
if ctx.command.cog_name == "Econ":
# random chance to spawn mob
if random.randint(0, self.d.mob_chance) == 0:
if self.d.cooldown_rates.get(ctx.command.qualified_name, 0) >= 2:
asyncio.create_task(self.get_cog("MobSpawner").spawn_event(ctx))
elif random.randint(0, self.d.tip_chance) == 0: # random chance to send tip
asyncio.create_task(self.send_tip(ctx))
try:
if ctx.command.qualified_name in self.d.concurrency_limited:
await self.karen.acquire_concurrency(ctx.command.qualified_name, ctx.author.id)
except Exception:
self.logger.error(
"An error occurred while attempting to acquire a concurrency lock for command %s for user %s",
ctx.command,
ctx.author.id,
exc_info=True,
)
raise
if ctx.command.qualified_name in self.d.cooldown_rates:
await self.karen.lb_command_ran(ctx.author.id)
await self.karen.command_execution(
ctx.author.id, getattr(ctx.guild, "id", None), ctx.command.qualified_name, False
)
async def after_command_invoked(self, ctx: CustomContext):
try:
if ctx.command.qualified_name in self.d.concurrency_limited:
await self.karen.release_concurrency(ctx.command.qualified_name, ctx.author.id)
except Exception:
self.logger.error(
"An error occurred while attempting to release a concurrency lock for command %s for user %s",
ctx.command,
ctx.author.id,
exc_info=True,
)
raise
async def on_app_command_completion(
self,
inter: discord.Interaction,
command: discord.app_commands.Command | discord.app_commands.ContextMenu,
):
if isinstance(command, discord.app_commands.Command):
await self.karen.command_execution(
inter.user.id, inter.guild_id, command.qualified_name, True
)
###### packet handlers #####################################################
@handle_packet(PacketType.EXEC_CODE)
async def packet_exec_code(self, code: str | return getattr(discord.Color, self.d.embed_color)() | identifier_body |
villager_bot.py | _id: "lang"}
self.prefix_cache = dict[int, str]() # {guild_id: "prefix"}
self.disabled_commands = defaultdict[int, set[str]](
set
) # {guild_id: set({command, command,..})}
self.replies_cache = set[int]() # {guild_id, guild_id,..}
self.rcon_cache = dict[tuple[int, Any], Any]() # {(user_id, mc_server): rcon_client}
self.existing_users_cache = set[
int
]() # so the database doesn't have to make a query every time an econ command is ran to ensure user exists
self.existing_user_lbs_cache = set[
int
]() # for same reason above, but for leaderboards instead
self.support_server: Optional[discord.Guild] = None
self.error_channel: Optional[discord.TextChannel] = None
self.vote_channel: Optional[discord.TextChannel] = None
# counters and other things
self.command_count = 0
self.message_count = 0
self.error_count = 0
self.session_votes = 0
self.font_files = list[str]()
self.captcha_generator: ImageCaptcha | None = None
self.final_ready = asyncio.Event()
self.add_check(self.check_global) # register global check
self.before_invoke(
self.before_command_invoked
) # register self.before_command_invoked as a before_invoked event
self.after_invoke(
self.after_command_invoked
) # register self.after_command_invoked as a after_invoked event
self.event(self.on_app_command_completion)
@property
def embed_color(self) -> discord.Color:
return getattr(discord.Color, self.d.embed_color)()
async def start(self):
self.font_files = await FontHandler(
font_urls=self.d.font_urls, output_directory="fonts"
).retrieve()
self.captcha_generator = captcha.image.ImageCaptcha(fonts=self.font_files)
self.karen = KarenClient(self.k.karen, self.get_packet_handlers(), self.logger)
self.db = DatabaseProxy(self.karen)
await self.karen.connect()
cluster_info = await self.karen.fetch_cluster_init_info()
self.shard_count = cluster_info.shard_count
self.shard_ids = cluster_info.shard_ids
self.cluster_id = cluster_info.cluster_id
self.aiohttp = aiohttp.ClientSession()
for cog in self.cog_list:
await self.load_extension(f"bot.cogs.{cog}")
await super().start(self.k.discord_token)
async def close(self, *args, **kwargs):
if self.karen is not None:
await self.karen.disconnect()
if self.aiohttp is not None:
await self.aiohttp.close()
self.logger.info("Closed aiohttp ClientSession")
await super().close(*args, **kwargs)
async def get_prefix(self, message: discord.Message) -> str:
if message.guild:
return self.prefix_cache.get(message.guild.id, self.k.default_prefix)
return self.k.default_prefix
def get_language(self, ctx: CustomContext) -> Translation:
if ctx.guild:
return self.l[self.language_cache.get(ctx.guild.id, "en")]
return self.l["en"]
async def on_ready(self):
if self.cluster_id == 0:
try:
self.logger.info("Syncing slash commands...")
self.tree.copy_global_to(guild=await self.fetch_guild(self.k.support_server_id))
await self.tree.sync()
self.logger.info("Slash commands synced!")
except Exception:
self.logger.error(
"An error occurred in on_ready while syncing slash commands", exc_info=True
)
try:
self.logger.info("Syncing db item prices...")
item_prices = {
v.db_entry.item: v.db_entry.sell_price for k, v in self.d.shop_items.items()
}
item_prices.update(
{
self.d.farming.name_map[k]: v
for k, v in self.d.farming.emerald_yields.items()
}
)
item_prices.update({f.item: f.sell_price for f in self.d.fishing_findables})
await self.get_cog("Database").sync_item_prices(item_prices)
self.logger.info("Done syncing db item prices!")
except Exception:
self.logger.error(
"An error occurred in on_ready while syncing db item prices", exc_info=True
)
async def get_context(self, *args, **kwargs) -> CustomContext:
ctx = await super().get_context(*args, **kwargs, cls=CustomContext)
ctx.embed_color = self.embed_color
ctx.l = self.get_language(ctx)
return ctx
async def send_embed(self, location, message: str, *, ignore_exceptions: bool = False) -> None:
embed = discord.Embed(color=self.embed_color, description=message)
try:
await location.send(embed=embed)
except discord.errors.HTTPException:
if not ignore_exceptions: | ) -> None:
embed = discord.Embed(color=self.embed_color, description=message)
try:
await location.reply(embed=embed, mention_author=ping)
except discord.errors.HTTPException as e:
if (
e.code == 50035
): # invalid form body, happens sometimes when the message to reply to can't be found?
await self.send_embed(location, message, ignore_exceptions=ignore_exceptions)
elif not ignore_exceptions:
raise
async def send_tip(self, ctx: CustomContext) -> None:
await asyncio.sleep(random.randint(100, 200) / 100)
await self.send_embed(
ctx, f"{random.choice(ctx.l.misc.tip_intros)} {random.choice(ctx.l.misc.tips)}"
)
async def check_global(self, ctx: CustomContext) -> bool: # the global command check
command_name = ctx.command.qualified_name
if ctx.author.id in self.botban_cache:
ctx.failure_reason = "bot_banned"
return False
if not self.is_ready():
ctx.failure_reason = "not_ready"
return False
if ctx.guild is not None and command_name in self.disabled_commands.get(ctx.guild.id, ()):
ctx.failure_reason = "disabled"
return False
# handle cooldowns that need to be synced between shard groups / processes (aka karen cooldowns)
if command_name in self.d.cooldown_rates:
cooldown_info = await self.karen.cooldown(command_name, ctx.author.id)
if not cooldown_info.can_run:
ctx.custom_error = CommandOnKarenCooldown(cooldown_info.remaining)
return False
if command_name in self.d.concurrency_limited:
if not await self.karen.check_concurrency(command_name, ctx.author.id):
ctx.custom_error = MaxKarenConcurrencyReached()
return False
if ctx.command.cog_name == "Econ":
# check if user has paused econ
if await self.karen.check_econ_paused(ctx.author.id):
ctx.failure_reason = "econ_paused"
return False
return True
async def before_command_invoked(self, ctx: CustomContext):
self.command_count += 1
if ctx.command.cog_name == "Econ":
# random chance to spawn mob
if random.randint(0, self.d.mob_chance) == 0:
if self.d.cooldown_rates.get(ctx.command.qualified_name, 0) >= 2:
asyncio.create_task(self.get_cog("MobSpawner").spawn_event(ctx))
elif random.randint(0, self.d.tip_chance) == 0: # random chance to send tip
asyncio.create_task(self.send_tip(ctx))
try:
if ctx.command.qualified_name in self.d.concurrency_limited:
await self.karen.acquire_concurrency(ctx.command.qualified_name, ctx.author.id)
except Exception:
self.logger.error(
"An error occurred while attempting to acquire a concurrency lock for command %s for user %s",
ctx.command,
ctx.author.id,
exc_info=True,
)
raise
if ctx.command.qualified_name in self.d.cooldown_rates:
await self.karen.lb_command_ran(ctx.author.id)
await self.karen.command_execution(
ctx.author.id, getattr(ctx.guild, "id", None), ctx.command.qualified_name, False
)
async def after_command_invoked(self, ctx: CustomContext):
try:
if ctx.command.qualified_name in self.d.concurrency_limited:
await self.karen.release_concurrency(ctx.command.qualified_name, ctx.author.id)
except Exception:
self.logger.error(
"An error occurred while attempting to release a concurrency lock for command %s for user %s",
ctx.command,
ctx.author.id,
exc_info=True,
)
raise
async def on_app_command_completion(
self,
inter: discord.Interaction,
command: discord.app_commands.Command | discord.app_commands.ContextMenu,
):
if isinstance(command, discord.app_commands.Command):
await self.karen.command_execution(
inter.user.id, inter.guild_id, command.qualified_name, True
)
###### packet handlers #####################################################
@handle_packet(PacketType.EXEC_CODE)
async def packet_exec_code(self, code: str):
| raise
async def reply_embed(
self, location, message: str, ping: bool = False, *, ignore_exceptions: bool = False | random_line_split |
villager_bot.py | self.logger)
self.db = DatabaseProxy(self.karen)
await self.karen.connect()
cluster_info = await self.karen.fetch_cluster_init_info()
self.shard_count = cluster_info.shard_count
self.shard_ids = cluster_info.shard_ids
self.cluster_id = cluster_info.cluster_id
self.aiohttp = aiohttp.ClientSession()
for cog in self.cog_list:
await self.load_extension(f"bot.cogs.{cog}")
await super().start(self.k.discord_token)
async def close(self, *args, **kwargs):
if self.karen is not None:
await self.karen.disconnect()
if self.aiohttp is not None:
await self.aiohttp.close()
self.logger.info("Closed aiohttp ClientSession")
await super().close(*args, **kwargs)
async def get_prefix(self, message: discord.Message) -> str:
if message.guild:
return self.prefix_cache.get(message.guild.id, self.k.default_prefix)
return self.k.default_prefix
def get_language(self, ctx: CustomContext) -> Translation:
if ctx.guild:
return self.l[self.language_cache.get(ctx.guild.id, "en")]
return self.l["en"]
async def on_ready(self):
if self.cluster_id == 0:
try:
self.logger.info("Syncing slash commands...")
self.tree.copy_global_to(guild=await self.fetch_guild(self.k.support_server_id))
await self.tree.sync()
self.logger.info("Slash commands synced!")
except Exception:
self.logger.error(
"An error occurred in on_ready while syncing slash commands", exc_info=True
)
try:
self.logger.info("Syncing db item prices...")
item_prices = {
v.db_entry.item: v.db_entry.sell_price for k, v in self.d.shop_items.items()
}
item_prices.update(
{
self.d.farming.name_map[k]: v
for k, v in self.d.farming.emerald_yields.items()
}
)
item_prices.update({f.item: f.sell_price for f in self.d.fishing_findables})
await self.get_cog("Database").sync_item_prices(item_prices)
self.logger.info("Done syncing db item prices!")
except Exception:
self.logger.error(
"An error occurred in on_ready while syncing db item prices", exc_info=True
)
async def get_context(self, *args, **kwargs) -> CustomContext:
ctx = await super().get_context(*args, **kwargs, cls=CustomContext)
ctx.embed_color = self.embed_color
ctx.l = self.get_language(ctx)
return ctx
async def send_embed(self, location, message: str, *, ignore_exceptions: bool = False) -> None:
embed = discord.Embed(color=self.embed_color, description=message)
try:
await location.send(embed=embed)
except discord.errors.HTTPException:
if not ignore_exceptions:
raise
async def reply_embed(
self, location, message: str, ping: bool = False, *, ignore_exceptions: bool = False
) -> None:
embed = discord.Embed(color=self.embed_color, description=message)
try:
await location.reply(embed=embed, mention_author=ping)
except discord.errors.HTTPException as e:
if (
e.code == 50035
): # invalid form body, happens sometimes when the message to reply to can't be found?
await self.send_embed(location, message, ignore_exceptions=ignore_exceptions)
elif not ignore_exceptions:
raise
async def send_tip(self, ctx: CustomContext) -> None:
await asyncio.sleep(random.randint(100, 200) / 100)
await self.send_embed(
ctx, f"{random.choice(ctx.l.misc.tip_intros)} {random.choice(ctx.l.misc.tips)}"
)
async def check_global(self, ctx: CustomContext) -> bool: # the global command check
command_name = ctx.command.qualified_name
if ctx.author.id in self.botban_cache:
ctx.failure_reason = "bot_banned"
return False
if not self.is_ready():
ctx.failure_reason = "not_ready"
return False
if ctx.guild is not None and command_name in self.disabled_commands.get(ctx.guild.id, ()):
ctx.failure_reason = "disabled"
return False
# handle cooldowns that need to be synced between shard groups / processes (aka karen cooldowns)
if command_name in self.d.cooldown_rates:
cooldown_info = await self.karen.cooldown(command_name, ctx.author.id)
if not cooldown_info.can_run:
ctx.custom_error = CommandOnKarenCooldown(cooldown_info.remaining)
return False
if command_name in self.d.concurrency_limited:
if not await self.karen.check_concurrency(command_name, ctx.author.id):
ctx.custom_error = MaxKarenConcurrencyReached()
return False
if ctx.command.cog_name == "Econ":
# check if user has paused econ
if await self.karen.check_econ_paused(ctx.author.id):
ctx.failure_reason = "econ_paused"
return False
return True
async def before_command_invoked(self, ctx: CustomContext):
self.command_count += 1
if ctx.command.cog_name == "Econ":
# random chance to spawn mob
if random.randint(0, self.d.mob_chance) == 0:
if self.d.cooldown_rates.get(ctx.command.qualified_name, 0) >= 2:
asyncio.create_task(self.get_cog("MobSpawner").spawn_event(ctx))
elif random.randint(0, self.d.tip_chance) == 0: # random chance to send tip
asyncio.create_task(self.send_tip(ctx))
try:
if ctx.command.qualified_name in self.d.concurrency_limited:
await self.karen.acquire_concurrency(ctx.command.qualified_name, ctx.author.id)
except Exception:
self.logger.error(
"An error occurred while attempting to acquire a concurrency lock for command %s for user %s",
ctx.command,
ctx.author.id,
exc_info=True,
)
raise
if ctx.command.qualified_name in self.d.cooldown_rates:
await self.karen.lb_command_ran(ctx.author.id)
await self.karen.command_execution(
ctx.author.id, getattr(ctx.guild, "id", None), ctx.command.qualified_name, False
)
async def after_command_invoked(self, ctx: CustomContext):
try:
if ctx.command.qualified_name in self.d.concurrency_limited:
await self.karen.release_concurrency(ctx.command.qualified_name, ctx.author.id)
except Exception:
self.logger.error(
"An error occurred while attempting to release a concurrency lock for command %s for user %s",
ctx.command,
ctx.author.id,
exc_info=True,
)
raise
async def on_app_command_completion(
self,
inter: discord.Interaction,
command: discord.app_commands.Command | discord.app_commands.ContextMenu,
):
if isinstance(command, discord.app_commands.Command):
await self.karen.command_execution(
inter.user.id, inter.guild_id, command.qualified_name, True
)
###### packet handlers #####################################################
@handle_packet(PacketType.EXEC_CODE)
async def packet_exec_code(self, code: str):
result = await execute_code(
code,
{"bot": self, "db": self.db, "dbc": self.get_cog("Database"), "http": self.aiohttp},
)
if not isinstance(result, PACKET_DATA_TYPES):
result = repr(result)
return result
@handle_packet(PacketType.REMINDER)
async def packet_reminder(self, channel_id: int, user_id: int, message_id: int, reminder: str):
success = False
channel = self.get_channel(channel_id)
if channel is not None:
user = self.get_user(user_id)
if user is not None:
lang = self.get_language(channel)
try:
message = await channel.fetch_message(message_id)
await message.reply(
lang.useful.remind.reminder.format(user.mention, reminder),
mention_author=True,
)
success = True
except Exception:
try:
await channel.send(
lang.useful.remind.reminder.format(user.mention, reminder)
)
success = True
except Exception:
self.logger.error(
"An error occurred while sending a reminder", exc_info=True
)
return {"success": success}
@handle_packet(PacketType.FETCH_BOT_STATS)
async def packet_fetch_bot_stats(self):
return [
len(self.guilds),
len(self.users),
self.message_count,
self.command_count,
self.latency,
len(self.private_channels),
self.session_votes,
]
@handle_packet(PacketType.FETCH_SYSTEM_STATS)
async def packet_fetch_system_stats(self):
memory_info = psutil.virtual_memory()
return SystemStats(
identifier=f'Cluster {self.cluster_id} ({",".join(map(str, self.shard_ids))})',
cpu_usage_percent=psutil.getloadavg()[0],
memory_usage_bytes=(memory_info.total - memory_info.available),
memory_max_bytes=memory_info.total,
threads=psutil.Process().num_threads(),
asyncio_tasks=len(asyncio.all_tasks()),
start_time=self.start_time.datetime,
)
@handle_packet(PacketType.FETCH_GUILD_COUNT)
async def | packet_fetch_guild_count | identifier_name |
|
panel-page-manager.js | var LINKED_HASH_MAP = xutil.LinkedHashMap;
//------------------------------------------
// 类型声明
//------------------------------------------
var PANEL_PAGE_MANAGER = $namespace().PanelPageManager =
inheritsObject(
XDATASOURCE,
/**
* @param {Object} options
* {Object} adapter 适配器
*/
function(options) {
// 记录页面访问顺序的队列,队尾为最近访问的
this._oPanelPageSet = new LINKED_HASH_MAP();
this._oCurrPageWrap;
this._sCurrPageId;
// 挂适配器的方法
extend(this, options.adapter);
}
);
var PANEL_PAGE_MANAGER_CLASS = PANEL_PAGE_MANAGER.prototype;
/**
* 初始化
*
* @public
*/
PANEL_PAGE_MANAGER_CLASS.init = function() {
this.$bind();
};
/**
* 根据url。如果没有则创建,如果有则打开
*
* @public
* @param {string} uri 如di.some.SomePage?pageId=XXX&pageTitle=XXX&otherParam=XXX
* @param {Object} options 其他要传入页面的参数(所有在uri中的参数,都可以用这个覆盖)
* @param {string} options.pageId
* @param {string} options.pageTitle
* @param {boolean} options.forceCreate 强制创建新页面。如果为true,则传入的pageId不起作用,会新建pageId
* @param {boolean} options.forceActive 强制激活, 默认为false
*/
PANEL_PAGE_MANAGER_CLASS.openByURI = function(uri, options, oncreate) {
var arr = uri.split('?');
var pageClass = getByPath(arr[0]);
var param = parseParam(arr[1]);
options = options || {};
extend(param, options);
var forceCreate = param.forceCreate;
var pageId = forceCreate
? ('PANEL_PAGE_' + getUID('PANEL_PAGE'))
: param.pageId;
var pageTitle = param.pageTitle;
param.panelPageManager = this;
// 不存在则新建tab页
var page = this.getPage(pageId);
if (!page || forceCreate) {
this.add(
function(opt) {
opt.el.appendChild(param.el = document.createElement('div'));
// 这里的pageClass都是di.shared.ui.PanelPage的派生类
page = new pageClass(param);
return page;
},
{
pageId: pageId,
title: pageTitle,
canClose: true
}
);
// 初始化
page.init();
oncreate && oncreate(page);
}
// 选择激活
this.select(pageId, param);
return page;
};
/**
* 增加 panel pange
*
* @public
* @param {ecui.ui.PanelPage|Function} panelPage 要添加的panel page,
* 或者创建panel page的回调函数
* 如果为函数,则:
* @param {Object} options 参数
* {HTMLElement} el 在此dom元素内创建
* (根据不同的实现类,可能为空)
* {ecui.ui.Control} parent 父控件
* {string} pageId 页ID
* @return {ecui.ui.PanelPage} 页内对象
* @param {Object} options 参数
* {string} pageId 页面ID,如果不传则自动生成一个
* {string} title 页面标题,可缺省
* {number} index 序号,缺省则在最后添加
* {boolean} canClose 是否可以关闭
* @return {number} 页面实例ID
*/
PANEL_PAGE_MANAGER_CLASS.add = function(panelPage, options) {
var o, pageId;
options = options || {};
if (!panelPage) { return null; }
!hasValue(pageId = options.pageId)
&& (pageId = options.pageId = this.$genPageId());
if (this._oPanelPageSet.containsKey(pageId)) {
throw new Error('Duplicate panel page ID! id=' + pageId);
}
o = this.$addItem(panelPage, options);
this._oPanelPageSet.addFirst(
{ page: o.content, item: o.item },
pageId
);
return pageId;
};
/**
* panel pange是否存在
*
* @public
* @param {string} panelPageWrap 页面的ID
* @return {boolean} 是否存在
*/
PANEL_PAGE_MANAGER_CLASS.exists = function(pageId) {
return !!this._oPanelPageSet.containsKey(pageId);
};
/**
* 选择 panel pange
*
* @public
* @param {string} nextPageId 页面的ID
* @param {Object} options 额外参数
* @param {boolean=} options.forceActive 强制激活(默认为false)
*/
PANEL_PAGE_MANAGER_CLASS.select = function(nextPageId, options) {
options = options || {};
var forceActive = options.forceActive;
var nextPageWrap = this._oPanelPageSet.get(nextPageId);
if (nextPageWrap) {
var isChange = nextPageWrap != this._oCurrPageWrap;
if (isChange) {
// inactive上一个页面
if (this._oCurrPageWrap) {
this._oCurrPageWrap.page.inactive();
this.notify('page.inactive', [this._sCurrPageId]);
}
// tab切换
this._oCurrPageWrap = nextPageWrap;
var lastPageId = this._sCurrPageId;
this._sCurrPageId = nextPageId;
this.$selectItem(nextPageWrap);
// 下一个页面移动到队尾
this._oPanelPageSet.remove(nextPageId);
this._oPanelPageSet.addLast(nextPageWrap, nextPageId);
this.notify('page.change', [nextPageId, lastPageId]);
}
if (forceActive || isChange) {
// active下一个页面
nextPageWrap.page.active(options);
this.notify('page.active', [nextPageId]);
}
}
};
/**
* 跳到栈中的某一页面
*
* @public
* @return {number} pageId page号
* @return {Object} options 额外参数
*/
PANEL_PAGE_MANAGER_CLASS.goTo = function(pageId, options) {
this.select(pageId, options);
};
/**
* 含有的panel page数量
*
* @public
* @return {number} 数量
*/
PANEL_PAGE_MANAGER_CLASS.size = function() {
return this._oPanelPageSet.size();
};
/**
* 得到页面实例
*
* @public
* @param {string} pageId 页id
* @retur | /**
* 得到当前页面实例
*
* @public
* @return {PanelPage} panelPage
*/
PANEL_PAGE_MANAGER_CLASS.getCurrentPage = function() {
return this._oCurrPageWrap ? this._oCurrPageWrap.page : null;
};
/**
* 得到当前页面ID
*
* @public
* @return {string} pageId
*/
PANEL_PAGE_MANAGER_CLASS.getCurrentPageId = function() {
return this._sCurrPageId;
};
/**
* 更改标题
*
* @public
* @param {string} pageId 页id
* @param {string} title 标题
*/
PANEL_PAGE_MANAGER_CLASS.setTitle = function(pageId, title) {
return this.$setTitle(pageId, title);
};
/**
* 打标记
*
* @public
* @param {string} pageId 页id
* @return {string} title 标题
*/
PANEL_PAGE_MANAGER_CLASS.mark = function(pageId, mark) {
return this.$mark(pageId, mark);
};
/**
* page before change事件处理
*
* @protected
*/
PANEL_PAGE_MANAGER_CLASS.$pageBeforeChangeHandler = function() {
if (this._oCurrPageWrap) {
// inactive上一页
this._oCurrPageWrap.page.inactive();
this.notify('page.inactive', [this._sCurrPageId]);
}
};
/**
* page after change事件处理
*
* @protected
*/
PANEL_PAGE_MANAGER_CLASS.$pageAfterChangeHandler = function() {
var nextPageId = this.$retrievalPageId.apply(this, arguments);
var lastPageId = this._sCurrPageId;
var nextPageWrap;
if (nextPageWrap = this._oPanelPageSet.get(nextPageId)) {
// 当前页面放到记录列表最后
this._oCurrPageWrap = nextPageWrap;
this._sCurrPageId | n {PanelPage} panelPage
*/
PANEL_PAGE_MANAGER_CLASS.getPage = function(pageId) {
return (this._oPanelPageSet.get(pageId) || {}).page;
};
| conditional_block |
panel-page-manager.js | ;
var LINKED_HASH_MAP = xutil.LinkedHashMap;
//------------------------------------------
// 类型声明
//------------------------------------------
var PANEL_PAGE_MANAGER = $namespace().PanelPageManager =
inheritsObject(
XDATASOURCE,
/**
* @param {Object} options
* {Object} adapter 适配器
*/
function(options) {
// 记录页面访问顺序的队列,队尾为最近访问的
this._oPanelPageSet = new LINKED_HASH_MAP();
this._oCurrPageWrap;
this._sCurrPageId;
// 挂适配器的方法
extend(this, options.adapter);
}
);
var PANEL_PAGE_MANAGER_CLASS = PANEL_PAGE_MANAGER.prototype;
/**
* 初始化
*
* @public
*/
PANEL_PAGE_MANAGER_CLASS.init = function() {
this.$bind();
};
/**
* 根据url。如果没有则创建,如果有则打开
*
* @public
* @param {string} uri 如di.some.SomePage?pageId=XXX&pageTitle=XXX&otherParam=XXX
* @param {Object} options 其他要传入页面的参数(所有在uri中的参数,都可以用这个覆盖)
* @param {string} options.pageId
* @param {string} options.pageTitle
* @param {boolean} options.forceCreate 强制创建新页面。如果为true,则传入的pageId不起作用,会新建pageId
* @param {boolean} options.forceActive 强制激活, 默认为false
*/
PANEL_PAGE_MANAGER_CLASS.openByURI = function(uri, options, oncreate) {
var arr = uri.split('?');
var pageClass = getByPath(arr[0]);
var param = parseParam(arr[1]);
options = options || {};
extend(param, options);
var forceCreate = param.forceCreate;
var pageId = forceCreate
? ('PANEL_PAGE_' + getUID('PANEL_PAGE'))
: param.pageId;
var pageTitle = param.pageTitle;
param.panelPageManager = this;
// 不存在则新建tab页
var page = this.getPage(pageId);
if (!page || forceCreate) {
this.add(
function(opt) {
opt.el.appendChild(param.el = document.createElement('div'));
// 这里的pageClass都是di.shared.ui.PanelPage的派生类
page = new pageClass(param);
return page;
},
{
pageId: pageId,
title: pageTitle,
canClose: true
}
);
// 初始化
page.init(); | }
// 选择激活
this.select(pageId, param);
return page;
};
/**
* 增加 panel pange
*
* @public
* @param {ecui.ui.PanelPage|Function} panelPage 要添加的panel page,
* 或者创建panel page的回调函数
* 如果为函数,则:
* @param {Object} options 参数
* {HTMLElement} el 在此dom元素内创建
* (根据不同的实现类,可能为空)
* {ecui.ui.Control} parent 父控件
* {string} pageId 页ID
* @return {ecui.ui.PanelPage} 页内对象
* @param {Object} options 参数
* {string} pageId 页面ID,如果不传则自动生成一个
* {string} title 页面标题,可缺省
* {number} index 序号,缺省则在最后添加
* {boolean} canClose 是否可以关闭
* @return {number} 页面实例ID
*/
PANEL_PAGE_MANAGER_CLASS.add = function(panelPage, options) {
var o, pageId;
options = options || {};
if (!panelPage) { return null; }
!hasValue(pageId = options.pageId)
&& (pageId = options.pageId = this.$genPageId());
if (this._oPanelPageSet.containsKey(pageId)) {
throw new Error('Duplicate panel page ID! id=' + pageId);
}
o = this.$addItem(panelPage, options);
this._oPanelPageSet.addFirst(
{ page: o.content, item: o.item },
pageId
);
return pageId;
};
/**
* panel pange是否存在
*
* @public
* @param {string} panelPageWrap 页面的ID
* @return {boolean} 是否存在
*/
PANEL_PAGE_MANAGER_CLASS.exists = function(pageId) {
return !!this._oPanelPageSet.containsKey(pageId);
};
/**
* 选择 panel pange
*
* @public
* @param {string} nextPageId 页面的ID
* @param {Object} options 额外参数
* @param {boolean=} options.forceActive 强制激活(默认为false)
*/
PANEL_PAGE_MANAGER_CLASS.select = function(nextPageId, options) {
options = options || {};
var forceActive = options.forceActive;
var nextPageWrap = this._oPanelPageSet.get(nextPageId);
if (nextPageWrap) {
var isChange = nextPageWrap != this._oCurrPageWrap;
if (isChange) {
// inactive上一个页面
if (this._oCurrPageWrap) {
this._oCurrPageWrap.page.inactive();
this.notify('page.inactive', [this._sCurrPageId]);
}
// tab切换
this._oCurrPageWrap = nextPageWrap;
var lastPageId = this._sCurrPageId;
this._sCurrPageId = nextPageId;
this.$selectItem(nextPageWrap);
// 下一个页面移动到队尾
this._oPanelPageSet.remove(nextPageId);
this._oPanelPageSet.addLast(nextPageWrap, nextPageId);
this.notify('page.change', [nextPageId, lastPageId]);
}
if (forceActive || isChange) {
// active下一个页面
nextPageWrap.page.active(options);
this.notify('page.active', [nextPageId]);
}
}
};
/**
* 跳到栈中的某一页面
*
* @public
* @return {number} pageId page号
* @return {Object} options 额外参数
*/
PANEL_PAGE_MANAGER_CLASS.goTo = function(pageId, options) {
this.select(pageId, options);
};
/**
* 含有的panel page数量
*
* @public
* @return {number} 数量
*/
PANEL_PAGE_MANAGER_CLASS.size = function() {
return this._oPanelPageSet.size();
};
/**
* 得到页面实例
*
* @public
* @param {string} pageId 页id
* @return {PanelPage} panelPage
*/
PANEL_PAGE_MANAGER_CLASS.getPage = function(pageId) {
return (this._oPanelPageSet.get(pageId) || {}).page;
};
/**
* 得到当前页面实例
*
* @public
* @return {PanelPage} panelPage
*/
PANEL_PAGE_MANAGER_CLASS.getCurrentPage = function() {
return this._oCurrPageWrap ? this._oCurrPageWrap.page : null;
};
/**
* 得到当前页面ID
*
* @public
* @return {string} pageId
*/
PANEL_PAGE_MANAGER_CLASS.getCurrentPageId = function() {
return this._sCurrPageId;
};
/**
* 更改标题
*
* @public
* @param {string} pageId 页id
* @param {string} title 标题
*/
PANEL_PAGE_MANAGER_CLASS.setTitle = function(pageId, title) {
return this.$setTitle(pageId, title);
};
/**
* 打标记
*
* @public
* @param {string} pageId 页id
* @return {string} title 标题
*/
PANEL_PAGE_MANAGER_CLASS.mark = function(pageId, mark) {
return this.$mark(pageId, mark);
};
/**
* page before change事件处理
*
* @protected
*/
PANEL_PAGE_MANAGER_CLASS.$pageBeforeChangeHandler = function() {
if (this._oCurrPageWrap) {
// inactive上一页
this._oCurrPageWrap.page.inactive();
this.notify('page.inactive', [this._sCurrPageId]);
}
};
/**
* page after change事件处理
*
* @protected
*/
PANEL_PAGE_MANAGER_CLASS.$pageAfterChangeHandler = function() {
var nextPageId = this.$retrievalPageId.apply(this, arguments);
var lastPageId = this._sCurrPageId;
var nextPageWrap;
if (nextPageWrap = this._oPanelPageSet.get(nextPageId)) {
// 当前页面放到记录列表最后
this._oCurrPageWrap = nextPageWrap;
this._sCurrPageId = nextPage | oncreate && oncreate(page); | random_line_split |
kvstore.go | (s, offset)
if nil == p {
return false
}
switch p.tt {
case proposal_lease:
this.rn.lease.update(this.rn, p.values[0].(int), p.values[1].(uint64))
case proposal_snapshot, proposal_update, proposal_kick:
unikey := p.values[0].(string)
if p.tt == proposal_kick {
logger.Debugln(unikey, "cache_kick")
kv, ok := this.elements[unikey]
if !ok {
return false
} else {
this.removeLRU(kv)
delete(this.elements, unikey)
}
} else {
kv, ok := this.elements[unikey]
if p.tt == proposal_update && !ok {
return false
}
version := p.values[1].(int64)
if !ok {
table, key := splitUniKey(unikey)
meta := this.storeMgr.dbmeta.GetTableMeta(table)
if nil == meta {
return false
}
kv = newkv(this, meta, key, unikey, false)
this.elements[unikey] = kv
}
if version == 0 {
kv.setStatus(cache_missing)
kv.fields = nil
logger.Debugln(p.tt, unikey, version, "cache_missing", kv.fields)
} else {
kv.setStatus(cache_ok)
kv.version = version
fields := p.values[2].([]*proto.Field)
logger.Debugln(p.tt, unikey, version, "cache_ok", kv.getStatus(), kv.isWriteBack(), fields)
if nil == kv.fields {
kv.fields = map[string]*proto.Field{}
}
for _, v := range fields {
//不一致表示数据库字段类型发生变更,老数据直接丢弃
if !kv.meta.CheckFieldMeta(v) {
logger.Debugln("drop field", v.GetName())
} else {
kv.fields[v.GetName()] = v
}
}
}
kv.setSnapshoted(true)
this.updateLRU(kv)
}
default:
return false
}
}
return true
}
func (this *kvstore) readCommits(snapshotter *snap.Snapshotter, commitC <-chan interface{}, errorC <-chan error) {
for e := range commitC {
switch e.(type) {
case *commitedBatchProposal:
data := e.(*commitedBatchProposal)
if data == replaySnapshot {
// done replaying log; new data incoming
// OR signaled to load snapshot
snapshot, err := snapshotter.Load()
if err != nil {
logger.Fatalln(err)
} else {
logger.Infof("loading snapshot at term %d and index %d", snapshot.Metadata.Term, snapshot.Metadata.Index)
if !this.apply(snapshot.Data[8:], true) {
logger.Fatalln("recoverFromSnapshot failed")
}
}
} else if data == replayOK {
logger.Infoln("reply ok,keycount", len(this.elements))
return
} else {
data.apply(this)
}
case *readBatchSt:
e.(*readBatchSt).reply()
case leaseNotify:
this.gotLease()
}
}
if err, ok := <-errorC; ok {
logger.Fatalln(err)
}
}
func (this *kvstore) checkKvCount() bool {
MaxCachePerGroupSize := conf.GetConfig().MaxCachePerGroupSize
if len(this.elements) > MaxCachePerGroupSize {
return false
} else {
return true
}
}
type kvsnap struct {
uniKey string
fields map[string]*proto.Field
version int64
}
func (this *kvsnap) append2Str(s *str.Str) {
appendProposal2Str(s, proposal_snapshot, this.uniKey, this.version, this.fields)
}
func (this *kvstore) getSnapshot() [][]*kvsnap {
beg := time.Now()
ret := make([][]*kvsnap, 0, snapGroupSize)
snapGroup := make([][]*kv, snapGroupSize, snapGroupSize)
ch := make(chan []*kvsnap, snapGroupSize)
this.Lock()
defer this.Unlock()
//根据key对kv分组
for k, v := range this.elements {
i := futil.StringHash(k) % snapGroupSize
snapGroup[i] = append(snapGroup[i], v)
}
//并行序列化每组中的kv
for i := 0; i < snapGroupSize; i++ {
go func(i int) {
kvsnaps := make([]*kvsnap, 0, len(this.elements))
for _, v := range snapGroup[i] {
v.Lock()
status := v.getStatus()
if status == cache_ok || status == cache_missing {
snap := &kvsnap{
uniKey: v.uniKey,
version: v.version,
}
if v.fields != nil {
snap.fields = map[string]*proto.Field{}
for kk, vv := range v.fields {
snap.fields[kk] = vv
}
}
kvsnaps = append(kvsnaps, snap)
}
v.Unlock()
}
ch <- kvsnaps
}(i)
}
for i := 0; i < snapGroupSize; i++ {
v := <-ch
ret = append(ret, v)
}
logger.Infoln("clone time", time.Now().Sub(beg))
return ret
}
func (this *kvstore) gotLease() {
this.Lock()
defer this.Unlock()
//获得租约,强制store对所有kv执行一次sql回写
for _, vv := range this.elements {
vv.Lock()
if !vv.isWriteBack() {
status := vv.getStatus()
if status == cache_ok || status == cache_missing {
vv.setWriteBack(true)
if status == cache_ok {
vv.setSqlFlag(sql_insert_update)
} else if status == cache_missing {
vv.setSqlFlag(sql_delete)
}
logger.Debugln("pushUpdateReq", vv.uniKey, status, vv.fields)
this.kvNode.sqlMgr.pushUpdateReq(vv)
}
}
vv.Unlock()
}
}
type storeMgr struct {
sync.RWMutex
stores map[int]*kvstore
mask int
dbmeta *dbmeta.DBMeta
}
func (this *storeMgr) getkvOnly(table string, key string, uniKey string) *kv {
store := this.getStore(uniKey)
if store != nil {
store.Lock()
defer store.Unlock()
return store.elements[uniKey]
} else {
return nil
}
}
func (this *storeMgr) getkv(table string, key string, uniKey string) (*kv, int32) {
var k *kv
var ok bool
var err int32 = errcode.ERR_OK
store := this.getStore(uniKey)
if nil != store {
store.Lock()
defer store.Unlock()
k, ok = store.elements[uniKey]
if ok {
if !this.dbmeta.CheckMetaVersion(k.meta.Version()) {
newMeta := this.dbmeta.GetTableMeta(table)
if newMeta != nil {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&k.meta)), unsafe.Pointer(newMeta))
} else {
//log error
err = errcode.ERR_INVAILD_TABLE
}
}
} else {
if !store.checkKvCount() {
//容量限制,不允许再插入新的kv
err = errcode.ERR_BUSY
} else {
meta := this.dbmeta.GetTableMeta(table)
if meta == nil {
err = errcode.ERR_INVAILD_TABLE
} else {
k = newkv(store, meta, key, uniKey, true)
store.elements[uniKey] = k
store.updateLRU(k)
}
}
}
} else {
fmt.Println("store == nil")
}
return k, err
}
func (this *storeMgr) getStoreByIndex(index int) *kvstore {
this.RLock()
defer this.RUnlock()
return this.stores[index]
}
func (this *storeMgr) getStore(uniKey string) *kvstore {
this.RLock()
defer this.RUnlock()
index := (futil.StringHash(uniKey) % this.mask) + 1
return this.stores[index]
}
func (this *storeMgr) addStore(index int, store *kvstore) bool {
if 0 == index || nil == store {
logger.Fatalln("0 == index || nil == store")
}
this.Lock()
defer this.Unlock()
_, ok := this.stores[index]
if ok {
return false
}
this.stores[index] = store
return true
}
func (this *storeMgr) stop() {
this.RLock()
defer this.RUnlock()
for _, v := range this.stores {
v.stop()
}
}
func newKVStore(storeMgr *storeMgr, kvNode *KVNode, proposeC *util.BlockQueue, readRe | qC * | identifier_name |
|
kvstore.go | oseC.AddNoWait(&asynTaskKick{kv: kv}); nil == err {
return true, removeDirect
} else {
kv.setKicking(false)
return false, removeDirect
}
}
func splitUniKey(s string) (table string, key string) {
i := -1
for k, v := range s {
if v == 58 {
i = k
break
}
}
if i >= 0 {
table = s[:i]
key = s[i+1:]
}
return
}
func (this *kvstore) apply(data []byte, snapshot bool) bool {
compressFlag := binary.BigEndian.Uint16(data[:2])
if compressFlag == compressMagic {
var err error
data, err = this.unCompressor.UnCompress(data[2:])
if nil != err {
logger.Errorln("uncompress error")
return false
}
} else {
data = data[2:]
}
s := str.NewStr(data, len(data))
offset := 0
this.Lock()
defer this.Unlock()
if snapshot {
this.elements = map[string]*kv{}
this.lruHead.nnext = &this.lruTail
this.lruTail.pprev = &this.lruHead
}
var p *proposal
for offset < s.Len() {
p, offset = readProposal(s, offset)
if nil == p {
return false
}
switch p.tt {
case proposal_lease:
this.rn.lease.update(this.rn, p.values[0].(int), p.values[1].(uint64))
case proposal_snapshot, proposal_update, proposal_kick:
unikey := p.values[0].(string)
if p.tt == proposal_kick {
logger.Debugln(unikey, "cache_kick")
kv, ok := this.elements[unikey]
if !ok {
return false
} else {
this.removeLRU(kv)
delete(this.elements, unikey)
}
} else {
kv, ok := this.elements[unikey]
if p.tt == proposal_update && !ok {
return false
}
version := p.values[1].(int64)
if !ok {
table, key := splitUniKey(unikey)
meta := this.storeMgr.dbmeta.GetTableMeta(table)
if nil == meta {
return false
}
kv = newkv(this, meta, key, unikey, false)
this.elements[unikey] = kv
}
if version == 0 {
kv.setStatus(cache_missing)
kv.fields = nil
logger.Debugln(p.tt, unikey, version, "cache_missing", kv.fields)
} else {
kv.setStatus(cache_ok)
kv.version = version
fields := p.values[2].([]*proto.Field)
logger.Debugln(p.tt, unikey, version, "cache_ok", kv.getStatus(), kv.isWriteBack(), fields)
if nil == kv.fields {
kv.fields = map[string]*proto.Field{}
}
for _, v := range fields {
//不一致表示数据库字段类型发生变更,老数据直接丢弃
if !kv.meta.CheckFieldMeta(v) {
logger.Debugln("drop field", v.GetName())
} else {
kv.fields[v.GetName()] = v
}
}
}
kv.setSnapshoted(true)
this.updateLRU(kv)
}
default:
return false
}
}
return true
}
func (this *kvstore) readCommits(snapshotter *snap.Snapshotter, commitC <-chan interface{}, errorC <-chan error) {
for e := range commitC {
switch e.(type) {
case *commitedBatchProposal:
data := e.(*commitedBatchProposal)
if data == replaySnapshot {
// done replaying log; new data incoming
// OR signaled to load snapshot
snapshot, err := snapshotter.Load()
if err != nil {
logger.Fatalln(err)
} else {
logger.Infof("loading snapshot at term %d and index %d", snapshot.Metadata.Term, snapshot.Metadata.Index)
if !this.apply(snapshot.Data[8:], true) {
logger.Fatalln("recoverFromSnapshot failed")
}
}
} else if data == replayOK {
logger.Infoln("reply ok,keycount", len(this.elements))
return
} else {
data.apply(this)
}
case *readBatchSt:
e.(*readBatchSt).reply()
case leaseNotify:
this.gotLease()
}
}
if err, ok := <-errorC; ok {
logger.Fatalln(err)
}
}
func (this *kvstore) checkKvCount() bool {
MaxCachePerGroupSize := conf.GetConfig().MaxCachePerGroupSize
if len(this.elements) > MaxCachePerGroupSize {
return false
} else {
return true
}
}
type kvsnap struct {
uniKey string
fields map[string]*proto.Field
version int64
}
func (this *kvsnap) append2Str(s *str.Str) {
appendProposal2Str(s, proposal_snapshot, this.uniKey, this.version, this.fields)
}
func (this *kvstore) getSnapshot() [][]*kvsnap {
beg := time.Now()
ret := make([][]*kvsnap, 0, snapGroupSize)
snapGroup := make([][]*kv, snapGroupSize, snapGroupSize)
ch := make(chan []*kvsnap, snapGroupSize)
this.Lock()
defer this.Unlock()
//根据key对kv分组
for k, v := range this.elements {
i := futil.StringHash(k) % snapGroupSize
snapGroup[i] = append(snapGroup[i], v)
}
//并行序列化每组中的kv
for i := 0; i < snapGroupSize; i++ {
go func(i int) {
kvsnaps := make([]*kvsnap, 0, len(this.elements))
for _, v := range snapGroup[i] {
v.Lock()
status := v.getStatus()
if status == cache_ok || status == cache_missing {
snap := &kvsnap{
uniKey: v.uniKey,
version: v.version,
}
if v.fields != nil {
snap.fields = map[string]*proto.Field{}
for kk, vv := range v.fields {
snap.fields[kk] = vv
}
}
kvsnaps = append(kvsnaps, snap)
}
v.Unlock()
}
ch <- kvsnaps
}(i)
}
for i := 0; i < snapGroupSize; i++ {
v := <-ch
ret = append(ret, v)
}
logger.Infoln("clone time", time.Now().Sub(beg))
return ret
}
func (this *kvstore) gotLease() {
this.Lock()
defer this.Unlock()
//获得租约,强制store对所有kv执行一次sql回写
for _, vv := range this.elements {
vv.Lock()
if !vv.isWriteBack() {
status := vv.getStatus()
if status == cache_ok || status == cache_missing {
vv.setWriteBack(true)
if status == cache_ok {
vv.setSqlFlag(sql_insert_update)
} else if status == cache_missing {
vv.setSqlFlag(sql_delete)
}
logger.Debugln("pushUpdateReq", vv.uniKey, status, vv.fields)
this.kvNode.sqlMgr.pushUpdateReq(vv)
}
}
vv.Unlock()
}
}
type storeMgr struct {
sync.RWMutex
stores map[int]*kvstore
mask int
dbmeta *dbmeta.DBMeta
}
func (this *storeMgr) getkvOnly(table string, key string, uniKey string) *kv {
store := this.getStore(uniKey)
if store != nil {
store.Lock()
defer store.Unlock()
return store.elements[uniKey]
} else {
return nil
}
}
func (this *storeMgr) getkv(table string, key string, uniKey string) (*kv, int32) {
var k *kv
var ok bool
var err int32 = errcode.ERR_OK
store := this.getStore(uniKey)
if nil != store {
store.Lock()
defer store.Unlock()
k, ok | = store.elements[uniKey]
if ok {
if !this.dbmeta.CheckMetaVersion(k.meta.Version()) {
newMeta := this.dbmeta.GetTableMeta(table)
if newMeta != nil {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&k.meta)), unsafe.Pointer(newMeta))
} else {
//log error
err = errcode.ERR_INVAILD_TABLE
}
}
} else {
if !store.checkKvCount() {
//容量限制,不允许再插入新的kv
err = errcode.ERR_BUSY
} else {
meta := this.dbmeta.GetTableMeta(table)
if meta == nil {
err = errcode.ERR_INVAILD_TABLE | identifier_body |
|
kvstore.go |
func (this *kvstore) getRaftNode() *raftNode {
return this.rn
}
func (this *kvstore) removeKv(k *kv) {
processAgain := false
this.Lock()
k.Lock()
if !k.cmdQueue.empty() {
k.resetStatus()
processAgain = true
} else {
k.setStatus(cache_remove)
this.removeLRU(k)
delete(this.elements, k.uniKey)
}
k.Unlock()
this.Unlock()
if processAgain {
k.processCmd(nil)
}
}
//发起一致读请求
func (this *kvstore) issueReadReq(task asynCmdTaskI) {
if err := this.readReqC.AddNoWait(task); nil != err {
task.onError(errcode.ERR_SERVER_STOPED)
}
}
func (this *kvstore) issueConfChange(task *asynTaskConfChange) {
this.confChangeC <- task
}
func (this *kvstore) updateLRU(kv *kv) {
if kv.nnext != nil || kv.pprev != nil {
//先移除
kv.pprev.nnext = kv.nnext
kv.nnext.pprev = kv.pprev
kv.nnext = nil
kv.pprev = nil
}
//插入头部
kv.nnext = this.lruHead.nnext
kv.nnext.pprev = kv
kv.pprev = &this.lruHead
this.lruHead.nnext = kv
}
func (this *kvstore) removeLRU(kv *kv) {
kv.pprev.nnext = kv.nnext
kv.nnext.pprev = kv.pprev
kv.nnext = nil
kv.pprev = nil
}
func (this *kvstore) doLRU() {
this.Lock()
defer this.Unlock()
//未取得租约时不得执行kick
if this.rn.hasLease() {
MaxCachePerGroupSize := conf.GetConfig().MaxCachePerGroupSize
if this.lruHead.nnext != &this.lruTail {
kv := this.lruTail.pprev
count := 0
for len(this.elements)-count > MaxCachePerGroupSize {
if kv == &this.lruHead {
return
}
ok, removeDirect := this.tryKick(kv)
if !ok {
return
}
prev := kv.pprev
if removeDirect {
this.removeLRU(kv)
kv.setStatus(cache_remove)
delete(this.elements, kv.uniKey)
} else {
count++
}
kv = prev
}
}
}
}
func (this *kvstore) kick(taskKick *asynCmdTaskKick) bool {
kv := taskKick.getKV()
kv.setKicking(true)
if err := this.proposeC.AddNoWait(taskKick); nil == err {
return true
} else {
return false
}
}
func (this *kvstore) tryKick(kv *kv) (bool, bool) {
removeDirect := false
kv.Lock()
defer kv.Unlock()
if kv.isKicking() {
return true, removeDirect
}
if kv.kickable() {
kv.setKicking(true)
} else {
return false, removeDirect
}
if kv.getStatus() == cache_new {
removeDirect = true
return true, removeDirect
}
if err := this.proposeC.AddNoWait(&asynTaskKick{kv: kv}); nil == err {
return true, removeDirect
} else {
kv.setKicking(false)
return false, removeDirect
}
}
func splitUniKey(s string) (table string, key string) {
i := -1
for k, v := range s {
if v == 58 {
i = k
break
}
}
if i >= 0 {
table = s[:i]
key = s[i+1:]
}
return
}
func (this *kvstore) apply(data []byte, snapshot bool) bool {
compressFlag := binary.BigEndian.Uint16(data[:2])
if compressFlag == compressMagic {
var err error
data, err = this.unCompressor.UnCompress(data[2:])
if nil != err {
logger.Errorln("uncompress error")
return false
}
} else {
data = data[2:]
}
s := str.NewStr(data, len(data))
offset := 0
this.Lock()
defer this.Unlock()
if snapshot {
this.elements = map[string]*kv{}
this.lruHead.nnext = &this.lruTail
this.lruTail.pprev = &this.lruHead
}
var p *proposal
for offset < s.Len() {
p, offset = readProposal(s, offset)
if nil == p {
return false
}
switch p.tt {
case proposal_lease:
this.rn.lease.update(this.rn, p.values[0].(int), p.values[1].(uint64))
case proposal_snapshot, proposal_update, proposal_kick:
unikey := p.values[0].(string)
if p.tt == proposal_kick {
logger.Debugln(unikey, "cache_kick")
kv, ok := this.elements[unikey]
if !ok {
return false
} else {
this.removeLRU(kv)
delete(this.elements, unikey)
}
} else {
kv, ok := this.elements[unikey]
if p.tt == proposal_update && !ok {
return false
}
version := p.values[1].(int64)
if !ok {
table, key := splitUniKey(unikey)
meta := this.storeMgr.dbmeta.GetTableMeta(table)
if nil == meta {
return false
}
kv = newkv(this, meta, key, unikey, false)
this.elements[unikey] = kv
}
if version == 0 {
kv.setStatus(cache_missing)
kv.fields = nil
logger.Debugln(p.tt, unikey, version, "cache_missing", kv.fields)
} else {
kv.setStatus(cache_ok)
kv.version = version
fields := p.values[2].([]*proto.Field)
logger.Debugln(p.tt, unikey, version, "cache_ok", kv.getStatus(), kv.isWriteBack(), fields)
if nil == kv.fields {
kv.fields = map[string]*proto.Field{}
}
for _, v := range fields {
//不一致表示数据库字段类型发生变更,老数据直接丢弃
if !kv.meta.CheckFieldMeta(v) {
logger.Debugln("drop field", v.GetName())
} else {
kv.fields[v.GetName()] = v
}
}
}
kv.setSnapshoted(true)
this.updateLRU(kv)
}
default:
return false
}
}
return true
}
func (this *kvstore) readCommits(snapshotter *snap.Snapshotter, commitC <-chan interface{}, errorC <-chan error) {
for e := range commitC {
switch e.(type) {
case *commitedBatchProposal:
data := e.(*commitedBatchProposal)
if data == replaySnapshot {
// done replaying log; new data incoming
// OR signaled to load snapshot
snapshot, err := snapshotter.Load()
if err != nil {
logger.Fatalln(err)
} else {
logger.Infof("loading snapshot at term %d and index %d", snapshot.Metadata.Term, snapshot.Metadata.Index)
if !this.apply(snapshot.Data[8:], true) {
logger.Fatalln("recoverFromSnapshot failed")
}
}
} else if data == replayOK {
logger.Infoln("reply ok,keycount", len(this.elements))
return
} else {
data.apply(this)
}
case *readBatchSt:
e.(*readBatchSt).reply()
case leaseNotify:
this.gotLease()
}
}
if err, ok := <-errorC; ok {
logger.Fatalln(err)
}
}
func (this *kvstore) checkKvCount() bool {
MaxCachePerGroupSize := conf.GetConfig().MaxCachePerGroupSize
if len(this.elements) > MaxCachePerGroupSize {
return false
} else {
return true
}
}
type kvsnap struct {
uniKey string
fields map[string]*proto.Field
version int64
}
func (this *kvsnap) append2Str(s *str.Str) {
appendProposal2Str(s, proposal_snapshot, this.uniKey, this.version, this.fields)
}
func (this *kvstore) getSnapshot() [][]*kvsnap {
beg := time.Now()
ret := make([][]*kvsnap, 0, snapGroupSize)
snapGroup := make([][]*kv, snapGroupSize, snapGroupSize)
ch := make(chan []*kvsnap, snapGroupSize)
this.Lock()
defer this.Unlock()
//根据key对kv分组
for k, v := range this.elements {
i := futil.StringHash(k) % snapGroupSize
snapGroup[i] = append | return this.kvNode
} | random_line_split |
|
kvstore.go | (this *kvstore) issueReadReq(task asynCmdTaskI) {
if err := this.readReqC.AddNoWait(task); nil != err {
task.onError(errcode.ERR_SERVER_STOPED)
}
}
func (this *kvstore) issueConfChange(task *asynTaskConfChange) {
this.confChangeC <- task
}
func (this *kvstore) updateLRU(kv *kv) {
if kv.nnext != nil || kv.pprev != nil {
//先移除
kv.pprev.nnext = kv.nnext
kv.nnext.pprev = kv.pprev
kv.nnext = nil
kv.pprev = nil
}
//插入头部
kv.nnext = this.lruHead.nnext
kv.nnext.pprev = kv
kv.pprev = &this.lruHead
this.lruHead.nnext = kv
}
func (this *kvstore) removeLRU(kv *kv) {
kv.pprev.nnext = kv.nnext
kv.nnext.pprev = kv.pprev
kv.nnext = nil
kv.pprev = nil
}
func (this *kvstore) doLRU() {
this.Lock()
defer this.Unlock()
//未取得租约时不得执行kick
if this.rn.hasLease() {
MaxCachePerGroupSize := conf.GetConfig().MaxCachePerGroupSize
if this.lruHead.nnext != &this.lruTail {
kv := this.lruTail.pprev
count := 0
for len(this.elements)-count > MaxCachePerGroupSize {
if kv == &this.lruHead {
return
}
ok, removeDirect := this.tryKick(kv)
if !ok {
return
}
prev := kv.pprev
if removeDirect {
this.removeLRU(kv)
kv.setStatus(cache_remove)
delete(this.elements, kv.uniKey)
} else {
count++
}
kv = prev
}
}
}
}
func (this *kvstore) kick(taskKick *asynCmdTaskKick) bool {
kv := taskKick.getKV()
kv.setKicking(true)
if err := this.proposeC.AddNoWait(taskKick); nil == err {
return true
} else {
return false
}
}
func (this *kvstore) tryKick(kv *kv) (bool, bool) {
removeDirect := false
kv.Lock()
defer kv.Unlock()
if kv.isKicking() {
return true, removeDirect
}
if kv.kickable() {
kv.setKicking(true)
} else {
return false, removeDirect
}
if kv.getStatus() == cache_new {
removeDirect = true
return true, removeDirect
}
if err := this.proposeC.AddNoWait(&asynTaskKick{kv: kv}); nil == err {
return true, removeDirect
} else {
kv.setKicking(false)
return false, removeDirect
}
}
func splitUniKey(s string) (table string, key string) {
i := -1
for k, v := range s {
if v == 58 {
i = k
break
}
}
if i >= 0 {
table = s[:i]
key = s[i+1:]
}
return
}
func (this *kvstore) apply(data []byte, snapshot bool) bool {
compressFlag := binary.BigEndian.Uint16(data[:2])
if compressFlag == compressMagic {
var err error
data, err = this.unCompressor.UnCompress(data[2:])
if nil != err {
logger.Errorln("uncompress error")
return false
}
} else {
data = data[2:]
}
s := str.NewStr(data, len(data))
offset := 0
this.Lock()
defer this.Unlock()
if snapshot {
this.elements = map[string]*kv{}
this.lruHead.nnext = &this.lruTail
this.lruTail.pprev = &this.lruHead
}
var p *proposal
for offset < s.Len() {
p, offset = readProposal(s, offset)
if nil == p {
return false
}
switch p.tt {
case proposal_lease:
this.rn.lease.update(this.rn, p.values[0].(int), p.values[1].(uint64))
case proposal_snapshot, proposal_update, proposal_kick:
unikey := p.values[0].(string)
if p.tt == proposal_kick {
logger.Debugln(unikey, "cache_kick")
kv, ok := this.elements[unikey]
if !ok {
return false
} else {
this.removeLRU(kv)
delete(this.elements, unikey)
}
} else {
kv, ok := this.elements[unikey]
if p.tt == proposal_update && !ok {
return false
}
version := p.valu | {
table, key := splitUniKey(unikey)
meta := this.storeMgr.dbmeta.GetTableMeta(table)
if nil == meta {
return false
}
kv = newkv(this, meta, key, unikey, false)
this.elements[unikey] = kv
}
if version == 0 {
kv.setStatus(cache_missing)
kv.fields = nil
logger.Debugln(p.tt, unikey, version, "cache_missing", kv.fields)
} else {
kv.setStatus(cache_ok)
kv.version = version
fields := p.values[2].([]*proto.Field)
logger.Debugln(p.tt, unikey, version, "cache_ok", kv.getStatus(), kv.isWriteBack(), fields)
if nil == kv.fields {
kv.fields = map[string]*proto.Field{}
}
for _, v := range fields {
//不一致表示数据库字段类型发生变更,老数据直接丢弃
if !kv.meta.CheckFieldMeta(v) {
logger.Debugln("drop field", v.GetName())
} else {
kv.fields[v.GetName()] = v
}
}
}
kv.setSnapshoted(true)
this.updateLRU(kv)
}
default:
return false
}
}
return true
}
func (this *kvstore) readCommits(snapshotter *snap.Snapshotter, commitC <-chan interface{}, errorC <-chan error) {
for e := range commitC {
switch e.(type) {
case *commitedBatchProposal:
data := e.(*commitedBatchProposal)
if data == replaySnapshot {
// done replaying log; new data incoming
// OR signaled to load snapshot
snapshot, err := snapshotter.Load()
if err != nil {
logger.Fatalln(err)
} else {
logger.Infof("loading snapshot at term %d and index %d", snapshot.Metadata.Term, snapshot.Metadata.Index)
if !this.apply(snapshot.Data[8:], true) {
logger.Fatalln("recoverFromSnapshot failed")
}
}
} else if data == replayOK {
logger.Infoln("reply ok,keycount", len(this.elements))
return
} else {
data.apply(this)
}
case *readBatchSt:
e.(*readBatchSt).reply()
case leaseNotify:
this.gotLease()
}
}
if err, ok := <-errorC; ok {
logger.Fatalln(err)
}
}
func (this *kvstore) checkKvCount() bool {
MaxCachePerGroupSize := conf.GetConfig().MaxCachePerGroupSize
if len(this.elements) > MaxCachePerGroupSize {
return false
} else {
return true
}
}
type kvsnap struct {
uniKey string
fields map[string]*proto.Field
version int64
}
func (this *kvsnap) append2Str(s *str.Str) {
appendProposal2Str(s, proposal_snapshot, this.uniKey, this.version, this.fields)
}
func (this *kvstore) getSnapshot() [][]*kvsnap {
beg := time.Now()
ret := make([][]*kvsnap, 0, snapGroupSize)
snapGroup := make([][]*kv, snapGroupSize, snapGroupSize)
ch := make(chan []*kvsnap, snapGroupSize)
this.Lock()
defer this.Unlock()
//根据key对kv分组
for k, v := range this.elements {
i := futil.StringHash(k) % snapGroupSize
snapGroup[i] = append(snapGroup[i], v)
}
//并行序列化每组中的kv
for i := 0; i < snapGroupSize; i++ {
go func(i int) {
kvsnaps := make([]*kvsnap, 0, len(this.elements))
for _, v := range snapGroup[i] {
v.Lock()
status := v.getStatus()
if status == cache_ok || status == cache_missing {
snap := &kvsnap{
uniKey: v.uniKey,
version: v.version,
}
if v.fields != | es[1].(int64)
if !ok | conditional_block |
shortid.go | s
// KpTvcui99k
// KFTGcuiQ9p
// KFmGeu-Q9O
// tFTvcu-QQt
// tpTveu-99u
//
// Life span
//
// The package guarantees the generation of unique Ids with zero collisions for 34 years
// (1/1/2016-1/1/2050) using the same worker Id within a single (although concurrent) application if
// application restarts take longer than 1 millisecond. The package supports up to 32 works, all
// providing unique sequences.
//
// Implementation details
//
// Although heavily inspired by the node.js shortid library this is
// not a simple Go port. In addition it
//
// - is safe to concurrency;
// - does not require any yearly version/epoch resets;
// - provides stable Id size over a long period at the rate of 1ms;
// - guarantees no collisions (due to guaranteed fixed size of Ids between milliseconds and because
// multiple requests within the same ms lead to longer Ids with the prefix unique to the ms);
// - supports 32 over 16 workers.
//
// The algorithm uses less randomness than the original node.js implementation, which permits to
// extend the life span as well as reduce and guarantee the length. In general terms, each Id
// has the following 3 pieces of information encoded: the millisecond (first 8 symbols), the worker
// Id (9th symbol), running concurrent counter within the same millisecond, only if required, over
// all remaining symbols. The element of randomness per symbol is 1/2 for the worker and the
// millisecond and 0 for the counter. Here 0 means no randomness, i.e. every value is encoded using
// a 64-base alphabet; 1/2 means one of two matching symbols of the supplied alphabet, 1/4 one of
// four matching symbols. The original algorithm of the node.js module uses 1/4 throughout.
//
// All methods accepting the parameters that govern the randomness are exported and can be used
// to directly implement an algorithm with e.g. more randomness, but with longer Ids and shorter
// life spans.
package shortid
import (
randc "crypto/rand"
"errors"
"fmt"
"math"
randm "math/rand"
"sync"
"sync/atomic"
"time"
"unsafe"
)
// Version defined the library version.
const Version = 1.1
// DefaultABC is the default URL-friendly alphabet.
const DefaultABC = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-"
// Abc represents a shuffled alphabet used to generate the Ids and provides methods to
// encode data.
type Abc struct {
alphabet []rune
}
// Shortid type represents a short Id generator working with a given alphabet.
type Shortid struct {
abc Abc
worker uint
epoch time.Time // ids can be generated for 34 years since this date
ms uint // ms since epoch for the last id
count uint // request count within the same ms
mx sync.Mutex // locks access to ms and count
}
var shortid *Shortid
func init() {
shortid = MustNew(0, DefaultABC, 1)
}
// GetDefault retrieves the default short Id generator initialised with the default alphabet,
// worker=0 and seed=1. The default can be overwritten using SetDefault.
func GetDefault() *Shortid {
return (*Shortid)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&shortid))))
}
// SetDefault overwrites the default generator.
func SetDefault(sid *Shortid) {
target := (*unsafe.Pointer)(unsafe.Pointer(&shortid))
source := unsafe.Pointer(sid)
atomic.SwapPointer(target, source)
}
// Generate generates an Id using the default generator.
func Generate() (string, error) {
return shortid.Generate()
}
// MustGenerate acts just like Generate, but panics instead of returning errors.
func MustGenerate() string {
id, err := Generate()
if err == nil {
return id
}
panic(err)
}
// New constructs an instance of the short Id generator for the given worker number [0,31], alphabet
// (64 unique symbols) and seed value (to shuffle the alphabet). The worker number should be
// different for multiple or distributed processes generating Ids into the same data space. The
// seed, on contrary, should be identical.
func New(worker uint8, alphabet string, seed uint64) (*Shortid, error) {
if worker > 31 {
return nil, errors.New("expected worker in the range [0,31]")
}
abc, err := NewAbc(alphabet, seed)
if err == nil {
sid := &Shortid{
abc: abc,
worker: uint(worker),
epoch: time.Date(2016, time.January, 1, 0, 0, 0, 0, time.UTC),
ms: 0,
count: 0,
}
return sid, nil
}
return nil, err
}
// MustNew acts just like New, but panics instead of returning errors.
func MustNew(worker uint8, alphabet string, seed uint64) *Shortid {
sid, err := New(worker, alphabet, seed)
if err == nil {
return sid
}
panic(err)
}
// Generate generates a new short Id.
func (sid *Shortid) Generate() (string, error) {
return sid.GenerateInternal(nil, sid.epoch)
}
// MustGenerate acts just like Generate, but panics instead of returning errors.
func (sid *Shortid) MustGenerate() string {
id, err := sid.Generate()
if err == nil {
return id
}
panic(err)
}
// GenerateInternal should only be used for testing purposes.
func (sid *Shortid) GenerateInternal(tm *time.Time, epoch time.Time) (string, error) {
ms, count := sid.getMsAndCounter(tm, epoch)
idrunes := make([]rune, 9)
if tmp, err := sid.abc.Encode(ms, 8, 5); err == nil {
copy(idrunes, tmp) // first 8 symbols
} else {
return "", err
}
if tmp, err := sid.abc.Encode(sid.worker, 1, 5); err == nil {
idrunes[8] = tmp[0]
} else {
return "", err
}
if count > 0 {
if countrunes, err := sid.abc.Encode(count, 0, 6); err == nil {
// only extend if really need it
idrunes = append(idrunes, countrunes...)
} else {
return "", err
}
}
return string(idrunes), nil
}
func (sid *Shortid) getMsAndCounter(tm *time.Time, epoch time.Time) (uint, uint) {
sid.mx.Lock()
defer sid.mx.Unlock()
var ms uint
if tm != nil {
ms = uint(tm.Sub(epoch).Nanoseconds() / 1000000)
} else {
ms = uint(time.Now().Sub(epoch).Nanoseconds() / 1000000)
}
if ms == sid.ms {
sid.count++
} else {
sid.count = 0
sid.ms = ms
}
return sid.ms, sid.count
}
// String returns a string representation of the short Id generator.
func (sid *Shortid) String() string {
return fmt.Sprintf("Shortid(worker=%v, epoch=%v, abc=%v)", sid.worker, sid.epoch, sid.abc)
}
// Abc returns the instance of alphabet used for representing the Ids.
func (sid *Shortid) Abc() Abc |
// Epoch returns the value of epoch used as the beginning of millisecond counting (normally
// 2016-01-01 00:00:00 local time)
func (sid *Shortid) Epoch() time.Time {
return sid.epoch
}
// Worker returns the value of worker for this short Id generator.
func (sid *Shortid) Worker() uint {
return sid.worker
}
// NewAbc constructs a new instance of shuffled alphabet to be used for Id representation.
func NewAbc(alphabet string, seed uint64) (Abc, error) {
runes := []rune(alphabet)
if len(runes) != len(DefaultABC) {
return Abc{}, fmt.Errorf("alphabet must contain %v unique characters", len(DefaultABC))
}
if nonUnique(runes) {
return Abc{}, errors.New("alphabet must contain unique characters only")
}
abc := Abc{alphabet: nil}
abc.shuffle(alphabet, seed)
return abc, nil
}
// MustNewAbc acts just like NewAbc, but panics instead of returning errors.
func MustNewAbc(alphabet string, seed uint64) Abc {
res, err := NewAbc(alphabet, seed)
if err == nil {
return res
}
panic(err)
}
func nonUnique(runes []rune) bool {
found := make(map[rune]struct{})
for _, r := range runes {
if _, seen := found[r]; | {
return sid.abc
} | identifier_body |
shortid.go | guarantees no collisions (due to guaranteed fixed size of Ids between milliseconds and because
// multiple requests within the same ms lead to longer Ids with the prefix unique to the ms);
// - supports 32 over 16 workers.
//
// The algorithm uses less randomness than the original node.js implementation, which permits to
// extend the life span as well as reduce and guarantee the length. In general terms, each Id
// has the following 3 pieces of information encoded: the millisecond (first 8 symbols), the worker
// Id (9th symbol), running concurrent counter within the same millisecond, only if required, over
// all remaining symbols. The element of randomness per symbol is 1/2 for the worker and the
// millisecond and 0 for the counter. Here 0 means no randomness, i.e. every value is encoded using
// a 64-base alphabet; 1/2 means one of two matching symbols of the supplied alphabet, 1/4 one of
// four matching symbols. The original algorithm of the node.js module uses 1/4 throughout.
//
// All methods accepting the parameters that govern the randomness are exported and can be used
// to directly implement an algorithm with e.g. more randomness, but with longer Ids and shorter
// life spans.
package shortid
import (
randc "crypto/rand"
"errors"
"fmt"
"math"
randm "math/rand"
"sync"
"sync/atomic"
"time"
"unsafe"
)
// Version defined the library version.
const Version = 1.1
// DefaultABC is the default URL-friendly alphabet.
const DefaultABC = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-"
// Abc represents a shuffled alphabet used to generate the Ids and provides methods to
// encode data.
type Abc struct {
alphabet []rune
}
// Shortid type represents a short Id generator working with a given alphabet.
type Shortid struct {
abc Abc
worker uint
epoch time.Time // ids can be generated for 34 years since this date
ms uint // ms since epoch for the last id
count uint // request count within the same ms
mx sync.Mutex // locks access to ms and count
}
var shortid *Shortid
func init() {
shortid = MustNew(0, DefaultABC, 1)
}
// GetDefault retrieves the default short Id generator initialised with the default alphabet,
// worker=0 and seed=1. The default can be overwritten using SetDefault.
func GetDefault() *Shortid {
return (*Shortid)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&shortid))))
}
// SetDefault overwrites the default generator.
func SetDefault(sid *Shortid) {
target := (*unsafe.Pointer)(unsafe.Pointer(&shortid))
source := unsafe.Pointer(sid)
atomic.SwapPointer(target, source)
}
// Generate generates an Id using the default generator.
func Generate() (string, error) {
return shortid.Generate()
}
// MustGenerate acts just like Generate, but panics instead of returning errors.
func MustGenerate() string {
id, err := Generate()
if err == nil {
return id
}
panic(err)
}
// New constructs an instance of the short Id generator for the given worker number [0,31], alphabet
// (64 unique symbols) and seed value (to shuffle the alphabet). The worker number should be
// different for multiple or distributed processes generating Ids into the same data space. The
// seed, on contrary, should be identical.
func New(worker uint8, alphabet string, seed uint64) (*Shortid, error) {
if worker > 31 {
return nil, errors.New("expected worker in the range [0,31]")
}
abc, err := NewAbc(alphabet, seed)
if err == nil {
sid := &Shortid{
abc: abc,
worker: uint(worker),
epoch: time.Date(2016, time.January, 1, 0, 0, 0, 0, time.UTC),
ms: 0,
count: 0,
}
return sid, nil
}
return nil, err
}
// MustNew acts just like New, but panics instead of returning errors.
func MustNew(worker uint8, alphabet string, seed uint64) *Shortid {
sid, err := New(worker, alphabet, seed)
if err == nil {
return sid
}
panic(err)
}
// Generate generates a new short Id.
func (sid *Shortid) Generate() (string, error) {
return sid.GenerateInternal(nil, sid.epoch)
}
// MustGenerate acts just like Generate, but panics instead of returning errors.
func (sid *Shortid) MustGenerate() string {
id, err := sid.Generate()
if err == nil {
return id
}
panic(err)
}
// GenerateInternal should only be used for testing purposes.
func (sid *Shortid) GenerateInternal(tm *time.Time, epoch time.Time) (string, error) {
ms, count := sid.getMsAndCounter(tm, epoch)
idrunes := make([]rune, 9)
if tmp, err := sid.abc.Encode(ms, 8, 5); err == nil {
copy(idrunes, tmp) // first 8 symbols
} else {
return "", err
}
if tmp, err := sid.abc.Encode(sid.worker, 1, 5); err == nil {
idrunes[8] = tmp[0]
} else {
return "", err
}
if count > 0 {
if countrunes, err := sid.abc.Encode(count, 0, 6); err == nil {
// only extend if really need it
idrunes = append(idrunes, countrunes...)
} else {
return "", err
}
}
return string(idrunes), nil
}
func (sid *Shortid) getMsAndCounter(tm *time.Time, epoch time.Time) (uint, uint) {
sid.mx.Lock()
defer sid.mx.Unlock()
var ms uint
if tm != nil {
ms = uint(tm.Sub(epoch).Nanoseconds() / 1000000)
} else {
ms = uint(time.Now().Sub(epoch).Nanoseconds() / 1000000)
}
if ms == sid.ms {
sid.count++
} else {
sid.count = 0
sid.ms = ms
}
return sid.ms, sid.count
}
// String returns a string representation of the short Id generator.
func (sid *Shortid) String() string {
return fmt.Sprintf("Shortid(worker=%v, epoch=%v, abc=%v)", sid.worker, sid.epoch, sid.abc)
}
// Abc returns the instance of alphabet used for representing the Ids.
func (sid *Shortid) Abc() Abc {
return sid.abc
}
// Epoch returns the value of epoch used as the beginning of millisecond counting (normally
// 2016-01-01 00:00:00 local time)
func (sid *Shortid) Epoch() time.Time {
return sid.epoch
}
// Worker returns the value of worker for this short Id generator.
func (sid *Shortid) Worker() uint {
return sid.worker
}
// NewAbc constructs a new instance of shuffled alphabet to be used for Id representation.
func NewAbc(alphabet string, seed uint64) (Abc, error) {
runes := []rune(alphabet)
if len(runes) != len(DefaultABC) {
return Abc{}, fmt.Errorf("alphabet must contain %v unique characters", len(DefaultABC))
}
if nonUnique(runes) {
return Abc{}, errors.New("alphabet must contain unique characters only")
}
abc := Abc{alphabet: nil}
abc.shuffle(alphabet, seed)
return abc, nil
}
// MustNewAbc acts just like NewAbc, but panics instead of returning errors.
func MustNewAbc(alphabet string, seed uint64) Abc {
res, err := NewAbc(alphabet, seed)
if err == nil {
return res
}
panic(err)
}
func nonUnique(runes []rune) bool {
found := make(map[rune]struct{})
for _, r := range runes {
if _, seen := found[r]; !seen {
found[r] = struct{}{}
}
}
return len(found) < len(runes)
}
func (abc *Abc) shuffle(alphabet string, seed uint64) {
source := []rune(alphabet)
for len(source) > 1 {
seed = (seed*9301 + 49297) % 233280
i := int(seed * uint64(len(source)) / 233280)
abc.alphabet = append(abc.alphabet, source[i])
source = append(source[:i], source[i+1:]...)
}
abc.alphabet = append(abc.alphabet, source[0])
} |
// Encode encodes a given value into a slice of runes of length nsymbols. In case nsymbols==0, the
// length of the result is automatically computed from data. Even if fewer symbols is required to
// encode the data than nsymbols, all positions are used encoding 0 where required to guarantee | random_line_split |
|
shortid.go | QQs
// KpTvcui99k
// KFTGcuiQ9p
// KFmGeu-Q9O
// tFTvcu-QQt
// tpTveu-99u
//
// Life span
//
// The package guarantees the generation of unique Ids with zero collisions for 34 years
// (1/1/2016-1/1/2050) using the same worker Id within a single (although concurrent) application if
// application restarts take longer than 1 millisecond. The package supports up to 32 works, all
// providing unique sequences.
//
// Implementation details
//
// Although heavily inspired by the node.js shortid library this is
// not a simple Go port. In addition it
//
// - is safe to concurrency;
// - does not require any yearly version/epoch resets;
// - provides stable Id size over a long period at the rate of 1ms;
// - guarantees no collisions (due to guaranteed fixed size of Ids between milliseconds and because
// multiple requests within the same ms lead to longer Ids with the prefix unique to the ms);
// - supports 32 over 16 workers.
//
// The algorithm uses less randomness than the original node.js implementation, which permits to
// extend the life span as well as reduce and guarantee the length. In general terms, each Id
// has the following 3 pieces of information encoded: the millisecond (first 8 symbols), the worker
// Id (9th symbol), running concurrent counter within the same millisecond, only if required, over
// all remaining symbols. The element of randomness per symbol is 1/2 for the worker and the
// millisecond and 0 for the counter. Here 0 means no randomness, i.e. every value is encoded using
// a 64-base alphabet; 1/2 means one of two matching symbols of the supplied alphabet, 1/4 one of
// four matching symbols. The original algorithm of the node.js module uses 1/4 throughout.
//
// All methods accepting the parameters that govern the randomness are exported and can be used
// to directly implement an algorithm with e.g. more randomness, but with longer Ids and shorter
// life spans.
package shortid
import (
randc "crypto/rand"
"errors"
"fmt"
"math"
randm "math/rand"
"sync"
"sync/atomic"
"time"
"unsafe"
)
// Version defined the library version.
const Version = 1.1
// DefaultABC is the default URL-friendly alphabet.
const DefaultABC = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-"
// Abc represents a shuffled alphabet used to generate the Ids and provides methods to
// encode data.
type Abc struct {
alphabet []rune
}
// Shortid type represents a short Id generator working with a given alphabet.
type Shortid struct {
abc Abc
worker uint
epoch time.Time // ids can be generated for 34 years since this date
ms uint // ms since epoch for the last id
count uint // request count within the same ms
mx sync.Mutex // locks access to ms and count
}
var shortid *Shortid
func init() {
shortid = MustNew(0, DefaultABC, 1)
}
// GetDefault retrieves the default short Id generator initialised with the default alphabet,
// worker=0 and seed=1. The default can be overwritten using SetDefault.
func GetDefault() *Shortid {
return (*Shortid)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&shortid))))
}
// SetDefault overwrites the default generator.
func SetDefault(sid *Shortid) {
target := (*unsafe.Pointer)(unsafe.Pointer(&shortid))
source := unsafe.Pointer(sid)
atomic.SwapPointer(target, source)
}
// Generate generates an Id using the default generator.
func Generate() (string, error) {
return shortid.Generate()
}
// MustGenerate acts just like Generate, but panics instead of returning errors.
func MustGenerate() string {
id, err := Generate()
if err == nil {
return id
}
panic(err)
}
// New constructs an instance of the short Id generator for the given worker number [0,31], alphabet
// (64 unique symbols) and seed value (to shuffle the alphabet). The worker number should be
// different for multiple or distributed processes generating Ids into the same data space. The
// seed, on contrary, should be identical.
func New(worker uint8, alphabet string, seed uint64) (*Shortid, error) {
if worker > 31 {
return nil, errors.New("expected worker in the range [0,31]")
}
abc, err := NewAbc(alphabet, seed)
if err == nil {
sid := &Shortid{
abc: abc,
worker: uint(worker),
epoch: time.Date(2016, time.January, 1, 0, 0, 0, 0, time.UTC),
ms: 0,
count: 0,
}
return sid, nil
}
return nil, err
}
// MustNew acts just like New, but panics instead of returning errors.
func MustNew(worker uint8, alphabet string, seed uint64) *Shortid {
sid, err := New(worker, alphabet, seed)
if err == nil {
return sid
}
panic(err)
}
// Generate generates a new short Id.
func (sid *Shortid) Generate() (string, error) {
return sid.GenerateInternal(nil, sid.epoch)
}
// MustGenerate acts just like Generate, but panics instead of returning errors.
func (sid *Shortid) MustGenerate() string {
id, err := sid.Generate()
if err == nil {
return id
}
panic(err)
}
// GenerateInternal should only be used for testing purposes.
func (sid *Shortid) GenerateInternal(tm *time.Time, epoch time.Time) (string, error) {
ms, count := sid.getMsAndCounter(tm, epoch)
idrunes := make([]rune, 9)
if tmp, err := sid.abc.Encode(ms, 8, 5); err == nil {
copy(idrunes, tmp) // first 8 symbols
} else {
return "", err
}
if tmp, err := sid.abc.Encode(sid.worker, 1, 5); err == nil {
idrunes[8] = tmp[0]
} else {
return "", err
}
if count > 0 {
if countrunes, err := sid.abc.Encode(count, 0, 6); err == nil {
// only extend if really need it
idrunes = append(idrunes, countrunes...)
} else {
return "", err
}
}
return string(idrunes), nil
}
func (sid *Shortid) getMsAndCounter(tm *time.Time, epoch time.Time) (uint, uint) {
sid.mx.Lock()
defer sid.mx.Unlock()
var ms uint
if tm != nil {
ms = uint(tm.Sub(epoch).Nanoseconds() / 1000000)
} else {
ms = uint(time.Now().Sub(epoch).Nanoseconds() / 1000000)
}
if ms == sid.ms {
sid.count++
} else {
sid.count = 0
sid.ms = ms
}
return sid.ms, sid.count
}
// String returns a string representation of the short Id generator.
func (sid *Shortid) String() string {
return fmt.Sprintf("Shortid(worker=%v, epoch=%v, abc=%v)", sid.worker, sid.epoch, sid.abc)
}
// Abc returns the instance of alphabet used for representing the Ids.
func (sid *Shortid) Abc() Abc {
return sid.abc
}
// Epoch returns the value of epoch used as the beginning of millisecond counting (normally
// 2016-01-01 00:00:00 local time)
func (sid *Shortid) Epoch() time.Time {
return sid.epoch
}
// Worker returns the value of worker for this short Id generator.
func (sid *Shortid) Worker() uint {
return sid.worker
}
// NewAbc constructs a new instance of shuffled alphabet to be used for Id representation.
func | (alphabet string, seed uint64) (Abc, error) {
runes := []rune(alphabet)
if len(runes) != len(DefaultABC) {
return Abc{}, fmt.Errorf("alphabet must contain %v unique characters", len(DefaultABC))
}
if nonUnique(runes) {
return Abc{}, errors.New("alphabet must contain unique characters only")
}
abc := Abc{alphabet: nil}
abc.shuffle(alphabet, seed)
return abc, nil
}
// MustNewAbc acts just like NewAbc, but panics instead of returning errors.
func MustNewAbc(alphabet string, seed uint64) Abc {
res, err := NewAbc(alphabet, seed)
if err == nil {
return res
}
panic(err)
}
func nonUnique(runes []rune) bool {
found := make(map[rune]struct{})
for _, r := range runes {
if _, seen := found[r]; | NewAbc | identifier_name |
shortid.go | }
// GetDefault retrieves the default short Id generator initialised with the default alphabet,
// worker=0 and seed=1. The default can be overwritten using SetDefault.
func GetDefault() *Shortid {
return (*Shortid)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&shortid))))
}
// SetDefault overwrites the default generator.
func SetDefault(sid *Shortid) {
target := (*unsafe.Pointer)(unsafe.Pointer(&shortid))
source := unsafe.Pointer(sid)
atomic.SwapPointer(target, source)
}
// Generate generates an Id using the default generator.
func Generate() (string, error) {
return shortid.Generate()
}
// MustGenerate acts just like Generate, but panics instead of returning errors.
func MustGenerate() string {
id, err := Generate()
if err == nil {
return id
}
panic(err)
}
// New constructs an instance of the short Id generator for the given worker number [0,31], alphabet
// (64 unique symbols) and seed value (to shuffle the alphabet). The worker number should be
// different for multiple or distributed processes generating Ids into the same data space. The
// seed, on contrary, should be identical.
func New(worker uint8, alphabet string, seed uint64) (*Shortid, error) {
if worker > 31 {
return nil, errors.New("expected worker in the range [0,31]")
}
abc, err := NewAbc(alphabet, seed)
if err == nil {
sid := &Shortid{
abc: abc,
worker: uint(worker),
epoch: time.Date(2016, time.January, 1, 0, 0, 0, 0, time.UTC),
ms: 0,
count: 0,
}
return sid, nil
}
return nil, err
}
// MustNew acts just like New, but panics instead of returning errors.
func MustNew(worker uint8, alphabet string, seed uint64) *Shortid {
sid, err := New(worker, alphabet, seed)
if err == nil {
return sid
}
panic(err)
}
// Generate generates a new short Id.
func (sid *Shortid) Generate() (string, error) {
return sid.GenerateInternal(nil, sid.epoch)
}
// MustGenerate acts just like Generate, but panics instead of returning errors.
func (sid *Shortid) MustGenerate() string {
id, err := sid.Generate()
if err == nil {
return id
}
panic(err)
}
// GenerateInternal should only be used for testing purposes.
func (sid *Shortid) GenerateInternal(tm *time.Time, epoch time.Time) (string, error) {
ms, count := sid.getMsAndCounter(tm, epoch)
idrunes := make([]rune, 9)
if tmp, err := sid.abc.Encode(ms, 8, 5); err == nil {
copy(idrunes, tmp) // first 8 symbols
} else {
return "", err
}
if tmp, err := sid.abc.Encode(sid.worker, 1, 5); err == nil {
idrunes[8] = tmp[0]
} else {
return "", err
}
if count > 0 {
if countrunes, err := sid.abc.Encode(count, 0, 6); err == nil {
// only extend if really need it
idrunes = append(idrunes, countrunes...)
} else {
return "", err
}
}
return string(idrunes), nil
}
func (sid *Shortid) getMsAndCounter(tm *time.Time, epoch time.Time) (uint, uint) {
sid.mx.Lock()
defer sid.mx.Unlock()
var ms uint
if tm != nil {
ms = uint(tm.Sub(epoch).Nanoseconds() / 1000000)
} else {
ms = uint(time.Now().Sub(epoch).Nanoseconds() / 1000000)
}
if ms == sid.ms {
sid.count++
} else {
sid.count = 0
sid.ms = ms
}
return sid.ms, sid.count
}
// String returns a string representation of the short Id generator.
func (sid *Shortid) String() string {
return fmt.Sprintf("Shortid(worker=%v, epoch=%v, abc=%v)", sid.worker, sid.epoch, sid.abc)
}
// Abc returns the instance of alphabet used for representing the Ids.
func (sid *Shortid) Abc() Abc {
return sid.abc
}
// Epoch returns the value of epoch used as the beginning of millisecond counting (normally
// 2016-01-01 00:00:00 local time)
func (sid *Shortid) Epoch() time.Time {
return sid.epoch
}
// Worker returns the value of worker for this short Id generator.
func (sid *Shortid) Worker() uint {
return sid.worker
}
// NewAbc constructs a new instance of shuffled alphabet to be used for Id representation.
func NewAbc(alphabet string, seed uint64) (Abc, error) {
runes := []rune(alphabet)
if len(runes) != len(DefaultABC) {
return Abc{}, fmt.Errorf("alphabet must contain %v unique characters", len(DefaultABC))
}
if nonUnique(runes) {
return Abc{}, errors.New("alphabet must contain unique characters only")
}
abc := Abc{alphabet: nil}
abc.shuffle(alphabet, seed)
return abc, nil
}
// MustNewAbc acts just like NewAbc, but panics instead of returning errors.
func MustNewAbc(alphabet string, seed uint64) Abc {
res, err := NewAbc(alphabet, seed)
if err == nil {
return res
}
panic(err)
}
func nonUnique(runes []rune) bool {
found := make(map[rune]struct{})
for _, r := range runes {
if _, seen := found[r]; !seen {
found[r] = struct{}{}
}
}
return len(found) < len(runes)
}
func (abc *Abc) shuffle(alphabet string, seed uint64) {
source := []rune(alphabet)
for len(source) > 1 {
seed = (seed*9301 + 49297) % 233280
i := int(seed * uint64(len(source)) / 233280)
abc.alphabet = append(abc.alphabet, source[i])
source = append(source[:i], source[i+1:]...)
}
abc.alphabet = append(abc.alphabet, source[0])
}
// Encode encodes a given value into a slice of runes of length nsymbols. In case nsymbols==0, the
// length of the result is automatically computed from data. Even if fewer symbols is required to
// encode the data than nsymbols, all positions are used encoding 0 where required to guarantee
// uniqueness in case further data is added to the sequence. The value of digits [4,6] represents
// represents n in 2^n, which defines how much randomness flows into the algorithm: 4 -- every value
// can be represented by 4 symbols in the alphabet (permitting at most 16 values), 5 -- every value
// can be represented by 2 symbols in the alphabet (permitting at most 32 values), 6 -- every value
// is represented by exactly 1 symbol with no randomness (permitting 64 values).
func (abc *Abc) Encode(val, nsymbols, digits uint) ([]rune, error) {
if digits < 4 || 6 < digits {
return nil, fmt.Errorf("allowed digits range [4,6], found %v", digits)
}
var computedSize uint = 1
if val >= 1 {
computedSize = uint(math.Log2(float64(val)))/digits + 1
}
if nsymbols == 0 {
nsymbols = computedSize
} else if nsymbols < computedSize {
return nil, fmt.Errorf("cannot accommodate data, need %v digits, got %v", computedSize, nsymbols)
}
mask := 1<<digits - 1
random := make([]int, int(nsymbols))
// no random component if digits == 6
if digits < 6 {
copy(random, maskedRandomInts(len(random), 0x3f-mask))
}
res := make([]rune, int(nsymbols))
for i := range res {
shift := digits * uint(i)
index := (int(val>>shift) & mask) | random[i]
res[i] = abc.alphabet[index]
}
return res, nil
}
// MustEncode acts just like Encode, but panics instead of returning errors.
func (abc *Abc) MustEncode(val, size, digits uint) []rune {
res, err := abc.Encode(val, size, digits)
if err == nil {
return res
}
panic(err)
}
func maskedRandomInts(size, mask int) []int {
ints := make([]int, size)
bytes := make([]byte, size)
if _, err := randc.Read(bytes); err == nil {
for i, b := range bytes | {
ints[i] = int(b) & mask
} | conditional_block |
|
circuit.rs | , thus PK is a point, sk is a scalar and derive(sk) = sk * B, for a predefined base pont B
// - VRF_INPUT and VRF_OUTPUT are elliptic curve points, and vrf(sk, VRF_INPUT) = sk * VRF_INPUT
// - set //TODO
// These are the values that are required to construct the circuit and populate all the wires.
// They are defined as Options as for CRS generation only circuit structure is relevant,
// not the wires' assignments, so knowing the types is enough.
pub struct Ring<'a, E: JubjubEngine> { // TODO: name
// Jubjub curve parameters.
pub params: &'a E::Params,
// The secret key, an element of Jubjub scalar field.
pub sk: Option<E::Fs>,
// The VRF input, a point in Jubjub prime order subgroup.
pub vrf_input: Option<edwards::Point<E, PrimeOrder>>,
// The authentication path of the public key x-coordinate in the Merkle tree,
// the element of Jubjub base field.
// This is enough to build the root as the base point is hardcoded in the circuit in the lookup tables,
// so we can restore the public key from the secret key.
pub auth_path: Vec<Option<(E::Fr, bool)>>,
}
impl<'a, E: JubjubEngine> Circuit<E> for Ring<'a, E> {
fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
// Binary representation of the secret key, a prover's private input.
// fs_bits wires and fs_bits booleanity constraints, where fs_bits = 252 is Jubjub scalar field size.
// It isn't (range-)constrained to be an element of the field, so small values will have duplicate representations.
// That doesn't matter for the following reasons: // TODO: double-check
// 1. Knowledge of a congruence of the secret key is equivalent to the knowledge of the secret key,
// and the check sk * G = PK passes for a congruent (sk + n|fs|) * G = sk * G + n|fs| * G == PK + O
// 2. Multiplication by a congruent secret key results in the same VRF output:
// (sk + n|fs|) * H == sk * H, if ord(H) == |fs|
let sk_bits = boolean::field_into_boolean_vec_le(cs.namespace(|| "sk"), self.sk)?;
// Derives the public key from the secret key using the hardcoded generator,
// that is guaranteed to be in the primeorder subgroup,
// so no on-curve or subgroup checks are required //TODO: double-check
// 750 constraints according to Zcash spec A.3.3.7
let pk = ecc::fixed_base_multiplication(
cs.namespace(|| "PK = sk * G"),
FixedGenerators::SpendingKeyGenerator, //TODO: any NUMS point of full order
&sk_bits,
self.params,
)?;
//
// // Defines first 2 public input wires for the coordinates of the public key in Jubjub base field (~ BLS scalar field)
// // and assures their assignment matches the values calculated in the previous step in 2 constraints.
// // These 2 constraints are not strictly required, just Bellman is implemented this way.
// // TODO: x coordinate only
// pk.inputize(cs.namespace(|| "PK"))?;
// Allocates VRF_BASE on the circuit and checks that it is a point on the curve
// adds 4 constraints (A.3.3.1) to check that it is indeed a point on Jubjub
let vrf_input = ecc::EdwardsPoint::witness(
cs.namespace(|| "VRF_INPUT"),
self.vrf_input,
self.params,
)?;
// Checks that VRF_BASE lies in a proper subgroup of Jubjub. Not strictly required as it is the point provided
// externally as a public input, so MUST be previously checked by the verifier off-circuit.
// But why not double-check it in 16 = 3 * 5 (ec doubling) + 1 (!=0) constraints
// Moreover //TODO
vrf_input.assert_not_small_order(cs.namespace(|| "VRF_BASE not small order"), self.params)?;
// Defines the 3rd and the 4th input wires to be equal VRF_BASE coordinates,
// thus adding 2 more constraints
vrf_input.inputize(cs.namespace(|| "VRF_BASE input"))?;
// Produces VRF output = sk * VRF_BASE, it is a variable base multiplication, thus
// 3252 constraints (A.3.3.8)
// TODO: actually it is 13 more as it is full-length (252 bits) multiplication below
let vrf = vrf_input.mul(cs.namespace(|| "vrf = sk * VRF_BASE"), &sk_bits, self.params)?;
// And 2 more constraints to verify the output
vrf.inputize(cs.namespace(|| "vrf"))?;
// So the circuit is 6 (public inputs) + 252 (sk booleanity) + 750 (fixed-base mul)
// + 20 (on-curve + subgroup check) + 3252 (var-base mul)
// = 4280 constraints
// This is an injective encoding, as cur is a
// point in the prime order subgroup.
let mut cur = pk.get_x().clone();
// Ascend the merkle tree authentication path
for (i, e) in self.auth_path.into_iter().enumerate() {
let cs = &mut cs.namespace(|| format!("merkle tree hash {}", i));
// Determines if the current subtree is the "right" leaf at this
// depth of the tree.
let cur_is_right = boolean::Boolean::from(boolean::AllocatedBit::alloc(
cs.namespace(|| "position bit"),
e.map(|e| e.1),
)?);
// Witness the authentication path element adjacent
// at this depth.
let path_element =
num::AllocatedNum::alloc(cs.namespace(|| "path element"), || Ok(e.get()?.0))?;
// Swap the two if the current subtree is on the right
let (xl, xr) = num::AllocatedNum::conditionally_reverse(
cs.namespace(|| "conditional reversal of preimage"),
&cur,
&path_element,
&cur_is_right,
)?;
// We don't need to be strict, because the function is
// collision-resistant. If the prover witnesses a congruency,
// they will be unable to find an authentication path in the
// tree with high probability.
let mut preimage = vec![];
preimage.extend(xl.to_bits_le(cs.namespace(|| "xl into bits"))?);
preimage.extend(xr.to_bits_le(cs.namespace(|| "xr into bits"))?);
// Compute the new subtree value
cur = pedersen_hash::pedersen_hash(
cs.namespace(|| "computation of pedersen hash"),
pedersen_hash::Personalization::MerkleTree(i),
&preimage,
self.params,
)?
.get_x()
.clone(); // Injective encoding
}
cur.inputize(cs.namespace(|| "anchor"))?;
Ok(())
}
}
#[test]
fn test_ring() {
use bellman::gadgets::test::TestConstraintSystem;
use pairing::bls12_381::{Bls12, Fr,};
use zcash_primitives::pedersen_hash;
use zcash_primitives::jubjub::{JubjubBls12, fs, edwards,};
use rand_core::{RngCore, SeedableRng,};
use rand_xorshift::XorShiftRng;
let params = &JubjubBls12::new();
let rng = &mut XorShiftRng::from_seed([
0x58, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let sk = fs::Fs::random(rng);
let vrf_base = edwards::Point::rand(rng, params).mul_by_cofactor(params);
let base_point = params.generator(FixedGenerators::SpendingKeyGenerator);
let pk = base_point.mul(sk, params).to_xy();
let tree_depth = 10;
let auth_path = vec![Some((Fr::random(rng), rng.next_u32() % 2 != 0)); tree_depth];
let mut cur = pk.0;
for (i, val) in auth_path.clone().into_iter().enumerate() {
let (uncle, b) = val.unwrap();
let mut lhs = cur;
let mut rhs = uncle;
if b | {
::std::mem::swap(&mut lhs, &mut rhs);
} | conditional_block |
|
circuit.rs | the given vrf_output is valid for the given vrf_input under
// a key from the predefined set. It formalizes the following language:
// {(VRF_INPUT, VRF_OUTPUT, set) | VRF_OUTPUT = vrf(sk, VRF_INPUT), PK = derive(sk) and PK is in set }, where:
// - sk, PK is an elliptic curve keypair, thus PK is a point, sk is a scalar and derive(sk) = sk * B, for a predefined base pont B
// - VRF_INPUT and VRF_OUTPUT are elliptic curve points, and vrf(sk, VRF_INPUT) = sk * VRF_INPUT
// - set //TODO
// These are the values that are required to construct the circuit and populate all the wires.
// They are defined as Options as for CRS generation only circuit structure is relevant,
// not the wires' assignments, so knowing the types is enough.
pub struct | <'a, E: JubjubEngine> { // TODO: name
// Jubjub curve parameters.
pub params: &'a E::Params,
// The secret key, an element of Jubjub scalar field.
pub sk: Option<E::Fs>,
// The VRF input, a point in Jubjub prime order subgroup.
pub vrf_input: Option<edwards::Point<E, PrimeOrder>>,
// The authentication path of the public key x-coordinate in the Merkle tree,
// the element of Jubjub base field.
// This is enough to build the root as the base point is hardcoded in the circuit in the lookup tables,
// so we can restore the public key from the secret key.
pub auth_path: Vec<Option<(E::Fr, bool)>>,
}
impl<'a, E: JubjubEngine> Circuit<E> for Ring<'a, E> {
fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
// Binary representation of the secret key, a prover's private input.
// fs_bits wires and fs_bits booleanity constraints, where fs_bits = 252 is Jubjub scalar field size.
// It isn't (range-)constrained to be an element of the field, so small values will have duplicate representations.
// That doesn't matter for the following reasons: // TODO: double-check
// 1. Knowledge of a congruence of the secret key is equivalent to the knowledge of the secret key,
// and the check sk * G = PK passes for a congruent (sk + n|fs|) * G = sk * G + n|fs| * G == PK + O
// 2. Multiplication by a congruent secret key results in the same VRF output:
// (sk + n|fs|) * H == sk * H, if ord(H) == |fs|
let sk_bits = boolean::field_into_boolean_vec_le(cs.namespace(|| "sk"), self.sk)?;
// Derives the public key from the secret key using the hardcoded generator,
// that is guaranteed to be in the primeorder subgroup,
// so no on-curve or subgroup checks are required //TODO: double-check
// 750 constraints according to Zcash spec A.3.3.7
let pk = ecc::fixed_base_multiplication(
cs.namespace(|| "PK = sk * G"),
FixedGenerators::SpendingKeyGenerator, //TODO: any NUMS point of full order
&sk_bits,
self.params,
)?;
//
// // Defines first 2 public input wires for the coordinates of the public key in Jubjub base field (~ BLS scalar field)
// // and assures their assignment matches the values calculated in the previous step in 2 constraints.
// // These 2 constraints are not strictly required, just Bellman is implemented this way.
// // TODO: x coordinate only
// pk.inputize(cs.namespace(|| "PK"))?;
// Allocates VRF_BASE on the circuit and checks that it is a point on the curve
// adds 4 constraints (A.3.3.1) to check that it is indeed a point on Jubjub
let vrf_input = ecc::EdwardsPoint::witness(
cs.namespace(|| "VRF_INPUT"),
self.vrf_input,
self.params,
)?;
// Checks that VRF_BASE lies in a proper subgroup of Jubjub. Not strictly required as it is the point provided
// externally as a public input, so MUST be previously checked by the verifier off-circuit.
// But why not double-check it in 16 = 3 * 5 (ec doubling) + 1 (!=0) constraints
// Moreover //TODO
vrf_input.assert_not_small_order(cs.namespace(|| "VRF_BASE not small order"), self.params)?;
// Defines the 3rd and the 4th input wires to be equal VRF_BASE coordinates,
// thus adding 2 more constraints
vrf_input.inputize(cs.namespace(|| "VRF_BASE input"))?;
// Produces VRF output = sk * VRF_BASE, it is a variable base multiplication, thus
// 3252 constraints (A.3.3.8)
// TODO: actually it is 13 more as it is full-length (252 bits) multiplication below
let vrf = vrf_input.mul(cs.namespace(|| "vrf = sk * VRF_BASE"), &sk_bits, self.params)?;
// And 2 more constraints to verify the output
vrf.inputize(cs.namespace(|| "vrf"))?;
// So the circuit is 6 (public inputs) + 252 (sk booleanity) + 750 (fixed-base mul)
// + 20 (on-curve + subgroup check) + 3252 (var-base mul)
// = 4280 constraints
// This is an injective encoding, as cur is a
// point in the prime order subgroup.
let mut cur = pk.get_x().clone();
// Ascend the merkle tree authentication path
for (i, e) in self.auth_path.into_iter().enumerate() {
let cs = &mut cs.namespace(|| format!("merkle tree hash {}", i));
// Determines if the current subtree is the "right" leaf at this
// depth of the tree.
let cur_is_right = boolean::Boolean::from(boolean::AllocatedBit::alloc(
cs.namespace(|| "position bit"),
e.map(|e| e.1),
)?);
// Witness the authentication path element adjacent
// at this depth.
let path_element =
num::AllocatedNum::alloc(cs.namespace(|| "path element"), || Ok(e.get()?.0))?;
// Swap the two if the current subtree is on the right
let (xl, xr) = num::AllocatedNum::conditionally_reverse(
cs.namespace(|| "conditional reversal of preimage"),
&cur,
&path_element,
&cur_is_right,
)?;
// We don't need to be strict, because the function is
// collision-resistant. If the prover witnesses a congruency,
// they will be unable to find an authentication path in the
// tree with high probability.
let mut preimage = vec![];
preimage.extend(xl.to_bits_le(cs.namespace(|| "xl into bits"))?);
preimage.extend(xr.to_bits_le(cs.namespace(|| "xr into bits"))?);
// Compute the new subtree value
cur = pedersen_hash::pedersen_hash(
cs.namespace(|| "computation of pedersen hash"),
pedersen_hash::Personalization::MerkleTree(i),
&preimage,
self.params,
)?
.get_x()
.clone(); // Injective encoding
}
cur.inputize(cs.namespace(|| "anchor"))?;
Ok(())
}
}
#[test]
fn test_ring() {
use bellman::gadgets::test::TestConstraintSystem;
use pairing::bls12_381::{Bls12, Fr,};
use zcash_primitives::pedersen_hash;
use zcash_primitives::jubjub::{JubjubBls12, fs, edwards,};
use rand_core::{RngCore, SeedableRng,};
use rand_xorshift::XorShiftRng;
let params = &JubjubBls12::new();
let rng = &mut XorShiftRng::from_seed([
0x58, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let sk = fs::Fs::random(rng);
let vrf_base = edwards::Point::rand(rng, params).mul_by_cofactor(params);
let base_point = params.generator(FixedGenerators::SpendingKeyGenerator);
let pk = base_point.mul(sk, params).to_xy();
let tree_depth = 10;
let auth_path = vec![Some((Fr::random(rng), rng.next_u32() % 2 | Ring | identifier_name |
circuit.rs | the given vrf_output is valid for the given vrf_input under
// a key from the predefined set. It formalizes the following language:
// {(VRF_INPUT, VRF_OUTPUT, set) | VRF_OUTPUT = vrf(sk, VRF_INPUT), PK = derive(sk) and PK is in set }, where:
// - sk, PK is an elliptic curve keypair, thus PK is a point, sk is a scalar and derive(sk) = sk * B, for a predefined base pont B
// - VRF_INPUT and VRF_OUTPUT are elliptic curve points, and vrf(sk, VRF_INPUT) = sk * VRF_INPUT
// - set //TODO
// These are the values that are required to construct the circuit and populate all the wires.
// They are defined as Options as for CRS generation only circuit structure is relevant,
// not the wires' assignments, so knowing the types is enough.
pub struct Ring<'a, E: JubjubEngine> { // TODO: name
// Jubjub curve parameters.
pub params: &'a E::Params,
// The secret key, an element of Jubjub scalar field.
pub sk: Option<E::Fs>,
// The VRF input, a point in Jubjub prime order subgroup.
pub vrf_input: Option<edwards::Point<E, PrimeOrder>>,
// The authentication path of the public key x-coordinate in the Merkle tree,
// the element of Jubjub base field.
// This is enough to build the root as the base point is hardcoded in the circuit in the lookup tables,
// so we can restore the public key from the secret key.
pub auth_path: Vec<Option<(E::Fr, bool)>>,
}
impl<'a, E: JubjubEngine> Circuit<E> for Ring<'a, E> {
fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
// Binary representation of the secret key, a prover's private input.
// fs_bits wires and fs_bits booleanity constraints, where fs_bits = 252 is Jubjub scalar field size.
// It isn't (range-)constrained to be an element of the field, so small values will have duplicate representations.
// That doesn't matter for the following reasons: // TODO: double-check
// 1. Knowledge of a congruence of the secret key is equivalent to the knowledge of the secret key,
// and the check sk * G = PK passes for a congruent (sk + n|fs|) * G = sk * G + n|fs| * G == PK + O
// 2. Multiplication by a congruent secret key results in the same VRF output:
// (sk + n|fs|) * H == sk * H, if ord(H) == |fs|
let sk_bits = boolean::field_into_boolean_vec_le(cs.namespace(|| "sk"), self.sk)?;
// Derives the public key from the secret key using the hardcoded generator,
// that is guaranteed to be in the primeorder subgroup,
// so no on-curve or subgroup checks are required //TODO: double-check
// 750 constraints according to Zcash spec A.3.3.7
let pk = ecc::fixed_base_multiplication(
cs.namespace(|| "PK = sk * G"),
FixedGenerators::SpendingKeyGenerator, //TODO: any NUMS point of full order
&sk_bits,
self.params,
)?;
//
// // Defines first 2 public input wires for the coordinates of the public key in Jubjub base field (~ BLS scalar field)
// // and assures their assignment matches the values calculated in the previous step in 2 constraints.
// // These 2 constraints are not strictly required, just Bellman is implemented this way.
// // TODO: x coordinate only
// pk.inputize(cs.namespace(|| "PK"))?;
// Allocates VRF_BASE on the circuit and checks that it is a point on the curve
// adds 4 constraints (A.3.3.1) to check that it is indeed a point on Jubjub
let vrf_input = ecc::EdwardsPoint::witness(
cs.namespace(|| "VRF_INPUT"),
self.vrf_input,
self.params,
)?;
// Checks that VRF_BASE lies in a proper subgroup of Jubjub. Not strictly required as it is the point provided
// externally as a public input, so MUST be previously checked by the verifier off-circuit.
// But why not double-check it in 16 = 3 * 5 (ec doubling) + 1 (!=0) constraints
// Moreover //TODO
vrf_input.assert_not_small_order(cs.namespace(|| "VRF_BASE not small order"), self.params)?;
// Defines the 3rd and the 4th input wires to be equal VRF_BASE coordinates,
// thus adding 2 more constraints
vrf_input.inputize(cs.namespace(|| "VRF_BASE input"))?;
// Produces VRF output = sk * VRF_BASE, it is a variable base multiplication, thus
// 3252 constraints (A.3.3.8)
// TODO: actually it is 13 more as it is full-length (252 bits) multiplication below
let vrf = vrf_input.mul(cs.namespace(|| "vrf = sk * VRF_BASE"), &sk_bits, self.params)?;
// And 2 more constraints to verify the output
vrf.inputize(cs.namespace(|| "vrf"))?;
// So the circuit is 6 (public inputs) + 252 (sk booleanity) + 750 (fixed-base mul)
// + 20 (on-curve + subgroup check) + 3252 (var-base mul)
// = 4280 constraints
// This is an injective encoding, as cur is a
// point in the prime order subgroup.
let mut cur = pk.get_x().clone();
// Ascend the merkle tree authentication path
for (i, e) in self.auth_path.into_iter().enumerate() {
let cs = &mut cs.namespace(|| format!("merkle tree hash {}", i));
// Determines if the current subtree is the "right" leaf at this
// depth of the tree.
let cur_is_right = boolean::Boolean::from(boolean::AllocatedBit::alloc(
cs.namespace(|| "position bit"),
e.map(|e| e.1),
)?);
// Witness the authentication path element adjacent
// at this depth.
let path_element =
num::AllocatedNum::alloc(cs.namespace(|| "path element"), || Ok(e.get()?.0))?;
// Swap the two if the current subtree is on the right
let (xl, xr) = num::AllocatedNum::conditionally_reverse(
cs.namespace(|| "conditional reversal of preimage"),
&cur,
&path_element,
&cur_is_right,
)?;
// We don't need to be strict, because the function is
// collision-resistant. If the prover witnesses a congruency,
// they will be unable to find an authentication path in the
// tree with high probability.
let mut preimage = vec![];
preimage.extend(xl.to_bits_le(cs.namespace(|| "xl into bits"))?);
preimage.extend(xr.to_bits_le(cs.namespace(|| "xr into bits"))?);
// Compute the new subtree value
cur = pedersen_hash::pedersen_hash(
cs.namespace(|| "computation of pedersen hash"),
pedersen_hash::Personalization::MerkleTree(i),
&preimage,
self.params,
)?
.get_x()
.clone(); // Injective encoding
}
cur.inputize(cs.namespace(|| "anchor"))?;
Ok(())
}
} | use zcash_primitives::pedersen_hash;
use zcash_primitives::jubjub::{JubjubBls12, fs, edwards,};
use rand_core::{RngCore, SeedableRng,};
use rand_xorshift::XorShiftRng;
let params = &JubjubBls12::new();
let rng = &mut XorShiftRng::from_seed([
0x58, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
let sk = fs::Fs::random(rng);
let vrf_base = edwards::Point::rand(rng, params).mul_by_cofactor(params);
let base_point = params.generator(FixedGenerators::SpendingKeyGenerator);
let pk = base_point.mul(sk, params).to_xy();
let tree_depth = 10;
let auth_path = vec![Some((Fr::random(rng), rng.next_u32() % 2 |
#[test]
fn test_ring() {
use bellman::gadgets::test::TestConstraintSystem;
use pairing::bls12_381::{Bls12, Fr,}; | random_line_split |
blackjack.js | then(m => m.delete({ timeout: 8000 }));
}
if (bet < 1 || !Number.isInteger(Number(bet))) {
if (bet && bet.toLowerCase().includes('k')) {
const givenKay = bet.replace(/k/g, '');
if (!Number.isInteger(Number(givenKay * 1000)) || isNaN(givenKay * 1000)) {
return message.reply('you have to bet a whole number');
} else {
bet = givenKay * 1000;
}
} else if (bet.toLowerCase() === 'all') {
bet = cash;
} else if (bet.toLowerCase() === 'max') {
bet = Math.min(cash, 5000000);
} else if (bet.toLowerCase() === 'half') {
bet = Math.round(cash / 2);
} else {
return message.reply('You have to bet cash');
}
}
if (cash === 0) {
return message.reply('You dont have any cash to bet');
}
if (bet > cash) {
return message.reply(`You only have ${formatNumber(cash)}. You cant bet more than that`);
}
if (bet > 5000000) {
return message.reply(`You can't bet more than **${formatNumber(5000000)} cash** at a time.`);
}
if (bet < 10000) {
return message.reply(`You cant bet less than ${formatNumber(10000)} cash`);
}
let winnings = 0;
const faces = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'];
const values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10];
const suits = ['spades', 'hearts', 'diamonds', 'clubs'];
let stood = false;
let first = true;
function deal () {
// eslint-disable-next-line prefer-const
let card = { face: faces[Math.floor(Math.random() * faces.length)], suit: suits[Math.floor(Math.random() * suits.length)] };
return card;
}
let cards = {
bot: [deal(), deal()],
user: [deal(), deal()]
};
while (addCards('user') === 21 || addCards('bot') === 21) { // redraw
cards = {
bot: [deal(), deal()],
user: [deal(), deal()]
};
}
function getRespectiveIcon (suit) | function getValue (card) {
return { value: values[faces.indexOf(card.face)], card };
}
function addCards (type) {
return cards[type].sort((a, b) => getValue(b).value - getValue(a).value).reduce((p, c) => {
let newCard = getValue(c).value;
if (newCard === 1) {
if (p + 11 <= 21) {
newCard = 11;
}
}
return p + newCard;
}, 0);
}
function score () {
if (addCards('user') > 21) { // User busted
return { result: false, emoji: 'nope', message: 'You lose! Busted!' };
} else if (addCards('bot') > 21) { // Bot busted
return { result: true, emoji: 'ok', message: 'You win! Your opponent busted!' };
} else if (addCards('user') === 21) { // User has exactly 21
return { result: true, emoji: 'ok', message: 'You win! You have 21!' };
} else if (addCards('bot') === 21) { // Bot has exactly 21
return { result: false, emoji: 'nope', message: 'You lose! Your opponent reached 21 before you!' };
} else if (addCards('bot') === addCards('user') && stood) { // Tie
return { result: null, emoji: 'empty', message: 'You tied with your opponent!' };
} else if (addCards('user') <= 21 && cards.user.length === 5) { // User took more than 5 cards without going over 21
return { result: true, emoji: 'ok', message: 'You win! You took 5 cards without going over 21.' };
} else if (addCards('bot') <= 21 && cards.bot.length === 5) { // Bot took more than 5 cards without going over 21
return { result: false, emoji: 'nope', message: 'You lose! Your opponent took 5 cards without going above 21.' };
} else if (addCards('bot') > addCards('user') && stood) {
// If the bot has a score of 17 or more and the user has less than the bot, and the user is also stood
return { result: false, emoji: 'nope', message: `You lose! You have ${addCards('user')}, Dealer has ${addCards('bot')}.` };
} else if (addCards('user') > addCards('bot') && stood) {
// If the user has a higher score than the bot and they are
return { result: true, emoji: 'nope', message: `You win! You have ${addCards('user')}, Dealer has ${addCards('bot')}.` };
} else {
return addCards('user'); // else
}
}
const gambed = async (final) => {
const status = score();
let desc = '';
if (status.constructor === Object) {
const coinCheck = await DB.GetCash;
if (bet > coinCheck.cash) {
await DB.SubCash(message.author.id, (bet / 2));
return message.reply(`You cant afford to pay your bet anymore. I took half your bet instead`);
}
let finalMsg = '';
// Win
if (status.result) {
winnings = Math.ceil(bet); // ceil here to avoid winnings being 0
winnings = Math.floor(bet)
await DB.AddCash(message.author.id, winnings);
const newCash = await DB.GetCash(message.author.id)
finalMsg += `\nYou won **${winnings.toLocaleString()}** cash. You now have ${(formatNumber(newCash.cash))}.`;
} else {
// Tie
if (status.result === null) {
const newCash = await DB.GetCash(message.author.id)
finalMsg += `Your cash hasn't changed! You have ${formatNumber(newCash.cash)} cash still.`;
} else {
// Loss
await DB.SubCash(message.author.id, bet);
const newCash = await DB.GetCash(message.author.id);
finalMsg += `You lost **${Number(bet).toLocaleString()}** cash. You now have ${(formatNumber(newCash.cash)).toLocaleString()}.`;
}
}
final = true;
desc = `**${status.message}** ${finalMsg}`;
}
const satisfied = final;
message.channel.send({ content: !final ? `${first ? 'What do you want to do?\n' : ''}Type \`h\` to **hit**, type \`s\` to **stand**, or type \`e\` to **end** the game.` : '',
embed: {
author:
{
name: `${user.username}'s blackjack game`,
icon_url: user.displayAvatarURL({ dynamic: true })
},
color: final ? status.result === null ? 16757504 : (winnings ? 5025616 : 15022389) : 2533018,
description: desc,
fields: [
{
name: message.author.username,
value: `Cards - **${cards.user.map(card => `[\`${getRespectiveIcon(card.suit)} ${card.face}\`](https://google.com)`).join(' ')}**\nTotal - \`${addCards('user')}\``,
inline: true
},
{ // Always show the first card, all other cards are unknown until stood or final is called
name: 'WOK',
value: `Cards - **${cards.bot.slice(0, satisfied ? cards.bot.length : 2).map((card, i) => (!i || satisfied) ? `[\`${getRespectiveIcon(card.suit)} ${card.face}\`](https://google.com)` : '`?`').join(' ')}**\nTotal - \`${
satisfied ? addCards('bot') : ' ? '}\``,
inline: true
| {
switch (suit) {
case 'spades':
return '♠';
case 'hearts':
return '♥';
case 'diamonds':
return '♦';
case 'clubs':
return '♣';
}
}
| identifier_body |
blackjack.js | (message) {
const DB = require('djs-economy');
const user = message.author;
const cash = await DB.GetCash(message.author.id).cash;
const formatNumber = require('../../functions/regex');
const args = message.content.slice(prefix.length).trim().split(/ /g);
let bet = args[1];
if (!bet) {
return message.reply(`Invalid Syntax! ${prefix}hl <bet>`).then(m => m.delete({ timeout: 8000 }));
}
if (bet < 1 || !Number.isInteger(Number(bet))) {
if (bet && bet.toLowerCase().includes('k')) {
const givenKay = bet.replace(/k/g, '');
if (!Number.isInteger(Number(givenKay * 1000)) || isNaN(givenKay * 1000)) {
return message.reply('you have to bet a whole number');
} else {
bet = givenKay * 1000;
}
} else if (bet.toLowerCase() === 'all') {
bet = cash;
} else if (bet.toLowerCase() === 'max') {
bet = Math.min(cash, 5000000);
} else if (bet.toLowerCase() === 'half') {
bet = Math.round(cash / 2);
} else {
return message.reply('You have to bet cash');
}
}
if (cash === 0) {
return message.reply('You dont have any cash to bet');
}
if (bet > cash) {
return message.reply(`You only have ${formatNumber(cash)}. You cant bet more than that`);
}
if (bet > 5000000) {
return message.reply(`You can't bet more than **${formatNumber(5000000)} cash** at a time.`);
}
if (bet < 10000) {
return message.reply(`You cant bet less than ${formatNumber(10000)} cash`);
}
let winnings = 0;
const faces = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'];
const values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10];
const suits = ['spades', 'hearts', 'diamonds', 'clubs'];
let stood = false;
let first = true;
function deal () {
// eslint-disable-next-line prefer-const
let card = { face: faces[Math.floor(Math.random() * faces.length)], suit: suits[Math.floor(Math.random() * suits.length)] };
return card;
}
let cards = {
bot: [deal(), deal()],
user: [deal(), deal()]
};
while (addCards('user') === 21 || addCards('bot') === 21) { // redraw
cards = {
bot: [deal(), deal()],
user: [deal(), deal()]
};
}
function getRespectiveIcon (suit) {
switch (suit) {
case 'spades':
return '♠';
case 'hearts':
return '♥';
case 'diamonds':
return '♦';
case 'clubs':
return '♣';
}
}
function getValue (card) {
return { value: values[faces.indexOf(card.face)], card };
}
function addCards (type) {
return cards[type].sort((a, b) => getValue(b).value - getValue(a).value).reduce((p, c) => {
let newCard = getValue(c).value;
if (newCard === 1) {
if (p + 11 <= 21) {
newCard = 11;
}
}
return p + newCard;
}, 0);
}
function score () {
if (addCards('user') > 21) { // User busted
return { result: false, emoji: 'nope', message: 'You lose! Busted!' };
} else if (addCards('bot') > 21) { // Bot busted
return { result: true, emoji: 'ok', message: 'You win! Your opponent busted!' };
} else if (addCards('user') === 21) { // User has exactly 21
return { result: true, emoji: 'ok', message: 'You win! You have 21!' };
} else if (addCards('bot') === 21) { // Bot has exactly 21
return { result: false, emoji: 'nope', message: 'You lose! Your opponent reached 21 before you!' };
} else if (addCards('bot') === addCards('user') && stood) { // Tie
return { result: null, emoji: 'empty', message: 'You tied with your opponent!' };
} else if (addCards('user') <= 21 && cards.user.length === 5) { // User took more than 5 cards without going over 21
return { result: true, emoji: 'ok', message: 'You win! You took 5 cards without going over 21.' };
} else if (addCards('bot') <= 21 && cards.bot.length === 5) { // Bot took more than 5 cards without going over 21
return { result: false, emoji: 'nope', message: 'You lose! Your opponent took 5 cards without going above 21.' };
} else if (addCards('bot') > addCards('user') && stood) {
// If the bot has a score of 17 or more and the user has less than the bot, and the user is also stood
return { result: false, emoji: 'nope', message: `You lose! You have ${addCards('user')}, Dealer has ${addCards('bot')}.` };
} else if (addCards('user') > addCards('bot') && stood) {
// If the user has a higher score than the bot and they are
return { result: true, emoji: 'nope', message: `You win! You have ${addCards('user')}, Dealer has ${addCards('bot')}.` };
} else {
return addCards('user'); // else
}
}
const gambed = async (final) => {
const status = score();
let desc = '';
if (status.constructor === Object) {
const coinCheck = await DB.GetCash;
if (bet > coinCheck.cash) {
await DB.SubCash(message.author.id, (bet / 2));
return message.reply(`You cant afford to pay your bet anymore. I took half your bet instead`);
}
let finalMsg = '';
// Win
if (status.result) {
winnings = Math.ceil(bet); // ceil here to avoid winnings being 0
winnings = Math.floor(bet)
await DB.AddCash(message.author.id, winnings);
const newCash = await DB.GetCash(message.author.id)
finalMsg += `\nYou won **${winnings.toLocaleString()}** cash. You now have ${(formatNumber(newCash.cash))}.`;
} else {
// Tie
if (status.result === null) {
const newCash = await DB.GetCash(message.author.id)
finalMsg += `Your cash hasn't changed! You have ${formatNumber(newCash.cash)} cash still.`;
} else {
// Loss
await DB.SubCash(message.author.id, bet);
const newCash = await DB.GetCash(message.author.id);
finalMsg += `You lost **${Number(bet).toLocaleString()}** cash. You now have ${(formatNumber(newCash.cash)).toLocaleString()}.`;
}
}
final = true;
desc = `**${status.message}** ${finalMsg}`;
}
const satisfied = final;
message.channel.send({ content: !final ? `${first ? 'What do you want to do?\n' : ''}Type \`h\` to **hit**, type \`s\` to **stand**, or type \`e\` to **end** the game.` : '',
embed: {
author:
{
name: `${user.username}'s blackjack game`,
icon_url: user.displayAvatarURL({ dynamic: true })
},
color: final ? status.result === null ? 16757504 : (winnings ? 5025616 : 15022389) : 2533018,
description: desc,
fields: [
{
name: message.author.username,
value: `Cards - **${cards.user.map(card => `[\`${getRespectiveIcon(card.suit)} ${card.face}\`](https://google.com)`).join(' ')}**\nTotal - \`${addCards('user')}\``,
inline: true
},
{ // Always show the first card, all other cards are unknown until stood or final is called
name: 'WOK',
value | execute | identifier_name |
|
blackjack.js | then(m => m.delete({ timeout: 8000 }));
}
if (bet < 1 || !Number.isInteger(Number(bet))) {
if (bet && bet.toLowerCase().includes('k')) {
const givenKay = bet.replace(/k/g, '');
if (!Number.isInteger(Number(givenKay * 1000)) || isNaN(givenKay * 1000)) {
return message.reply('you have to bet a whole number');
} else {
bet = givenKay * 1000;
}
} else if (bet.toLowerCase() === 'all') {
bet = cash;
} else if (bet.toLowerCase() === 'max') {
bet = Math.min(cash, 5000000);
} else if (bet.toLowerCase() === 'half') {
bet = Math.round(cash / 2);
} else {
return message.reply('You have to bet cash');
}
}
if (cash === 0) {
return message.reply('You dont have any cash to bet');
}
if (bet > cash) {
return message.reply(`You only have ${formatNumber(cash)}. You cant bet more than that`);
}
if (bet > 5000000) {
return message.reply(`You can't bet more than **${formatNumber(5000000)} cash** at a time.`);
}
if (bet < 10000) {
return message.reply(`You cant bet less than ${formatNumber(10000)} cash`);
}
let winnings = 0;
const faces = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'];
const values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10];
const suits = ['spades', 'hearts', 'diamonds', 'clubs'];
let stood = false;
let first = true;
function deal () {
// eslint-disable-next-line prefer-const
let card = { face: faces[Math.floor(Math.random() * faces.length)], suit: suits[Math.floor(Math.random() * suits.length)] };
return card;
}
let cards = {
bot: [deal(), deal()],
user: [deal(), deal()]
};
while (addCards('user') === 21 || addCards('bot') === 21) |
function getRespectiveIcon (suit) {
switch (suit) {
case 'spades':
return '♠';
case 'hearts':
return '♥';
case 'diamonds':
return '♦';
case 'clubs':
return '♣';
}
}
function getValue (card) {
return { value: values[faces.indexOf(card.face)], card };
}
function addCards (type) {
return cards[type].sort((a, b) => getValue(b).value - getValue(a).value).reduce((p, c) => {
let newCard = getValue(c).value;
if (newCard === 1) {
if (p + 11 <= 21) {
newCard = 11;
}
}
return p + newCard;
}, 0);
}
function score () {
if (addCards('user') > 21) { // User busted
return { result: false, emoji: 'nope', message: 'You lose! Busted!' };
} else if (addCards('bot') > 21) { // Bot busted
return { result: true, emoji: 'ok', message: 'You win! Your opponent busted!' };
} else if (addCards('user') === 21) { // User has exactly 21
return { result: true, emoji: 'ok', message: 'You win! You have 21!' };
} else if (addCards('bot') === 21) { // Bot has exactly 21
return { result: false, emoji: 'nope', message: 'You lose! Your opponent reached 21 before you!' };
} else if (addCards('bot') === addCards('user') && stood) { // Tie
return { result: null, emoji: 'empty', message: 'You tied with your opponent!' };
} else if (addCards('user') <= 21 && cards.user.length === 5) { // User took more than 5 cards without going over 21
return { result: true, emoji: 'ok', message: 'You win! You took 5 cards without going over 21.' };
} else if (addCards('bot') <= 21 && cards.bot.length === 5) { // Bot took more than 5 cards without going over 21
return { result: false, emoji: 'nope', message: 'You lose! Your opponent took 5 cards without going above 21.' };
} else if (addCards('bot') > addCards('user') && stood) {
// If the bot has a score of 17 or more and the user has less than the bot, and the user is also stood
return { result: false, emoji: 'nope', message: `You lose! You have ${addCards('user')}, Dealer has ${addCards('bot')}.` };
} else if (addCards('user') > addCards('bot') && stood) {
// If the user has a higher score than the bot and they are
return { result: true, emoji: 'nope', message: `You win! You have ${addCards('user')}, Dealer has ${addCards('bot')}.` };
} else {
return addCards('user'); // else
}
}
const gambed = async (final) => {
const status = score();
let desc = '';
if (status.constructor === Object) {
const coinCheck = await DB.GetCash;
if (bet > coinCheck.cash) {
await DB.SubCash(message.author.id, (bet / 2));
return message.reply(`You cant afford to pay your bet anymore. I took half your bet instead`);
}
let finalMsg = '';
// Win
if (status.result) {
winnings = Math.ceil(bet); // ceil here to avoid winnings being 0
winnings = Math.floor(bet)
await DB.AddCash(message.author.id, winnings);
const newCash = await DB.GetCash(message.author.id)
finalMsg += `\nYou won **${winnings.toLocaleString()}** cash. You now have ${(formatNumber(newCash.cash))}.`;
} else {
// Tie
if (status.result === null) {
const newCash = await DB.GetCash(message.author.id)
finalMsg += `Your cash hasn't changed! You have ${formatNumber(newCash.cash)} cash still.`;
} else {
// Loss
await DB.SubCash(message.author.id, bet);
const newCash = await DB.GetCash(message.author.id);
finalMsg += `You lost **${Number(bet).toLocaleString()}** cash. You now have ${(formatNumber(newCash.cash)).toLocaleString()}.`;
}
}
final = true;
desc = `**${status.message}** ${finalMsg}`;
}
const satisfied = final;
message.channel.send({ content: !final ? `${first ? 'What do you want to do?\n' : ''}Type \`h\` to **hit**, type \`s\` to **stand**, or type \`e\` to **end** the game.` : '',
embed: {
author:
{
name: `${user.username}'s blackjack game`,
icon_url: user.displayAvatarURL({ dynamic: true })
},
color: final ? status.result === null ? 16757504 : (winnings ? 5025616 : 15022389) : 2533018,
description: desc,
fields: [
{
name: message.author.username,
value: `Cards - **${cards.user.map(card => `[\`${getRespectiveIcon(card.suit)} ${card.face}\`](https://google.com)`).join(' ')}**\nTotal - \`${addCards('user')}\``,
inline: true
},
{ // Always show the first card, all other cards are unknown until stood or final is called
name: 'WOK',
value: `Cards - **${cards.bot.slice(0, satisfied ? cards.bot.length : 2).map((card, i) => (!i || satisfied) ? `[\`${getRespectiveIcon(card.suit)} ${card.face}\`](https://google.com)` : '`?`').join(' ')}**\nTotal - \`${
satisfied ? addCards('bot') : ' ? '}\``,
inline: true
| { // redraw
cards = {
bot: [deal(), deal()],
user: [deal(), deal()]
};
} | conditional_block |
blackjack.js | then(m => m.delete({ timeout: 8000 }));
}
if (bet < 1 || !Number.isInteger(Number(bet))) {
if (bet && bet.toLowerCase().includes('k')) {
const givenKay = bet.replace(/k/g, '');
if (!Number.isInteger(Number(givenKay * 1000)) || isNaN(givenKay * 1000)) {
return message.reply('you have to bet a whole number');
} else {
bet = givenKay * 1000;
}
} else if (bet.toLowerCase() === 'all') {
bet = cash;
} else if (bet.toLowerCase() === 'max') {
bet = Math.min(cash, 5000000);
} else if (bet.toLowerCase() === 'half') {
bet = Math.round(cash / 2);
} else {
return message.reply('You have to bet cash');
} | }
if (cash === 0) {
return message.reply('You dont have any cash to bet');
}
if (bet > cash) {
return message.reply(`You only have ${formatNumber(cash)}. You cant bet more than that`);
}
if (bet > 5000000) {
return message.reply(`You can't bet more than **${formatNumber(5000000)} cash** at a time.`);
}
if (bet < 10000) {
return message.reply(`You cant bet less than ${formatNumber(10000)} cash`);
}
let winnings = 0;
const faces = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'];
const values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10];
const suits = ['spades', 'hearts', 'diamonds', 'clubs'];
let stood = false;
let first = true;
function deal () {
// eslint-disable-next-line prefer-const
let card = { face: faces[Math.floor(Math.random() * faces.length)], suit: suits[Math.floor(Math.random() * suits.length)] };
return card;
}
let cards = {
bot: [deal(), deal()],
user: [deal(), deal()]
};
while (addCards('user') === 21 || addCards('bot') === 21) { // redraw
cards = {
bot: [deal(), deal()],
user: [deal(), deal()]
};
}
function getRespectiveIcon (suit) {
switch (suit) {
case 'spades':
return '♠';
case 'hearts':
return '♥';
case 'diamonds':
return '♦';
case 'clubs':
return '♣';
}
}
function getValue (card) {
return { value: values[faces.indexOf(card.face)], card };
}
function addCards (type) {
return cards[type].sort((a, b) => getValue(b).value - getValue(a).value).reduce((p, c) => {
let newCard = getValue(c).value;
if (newCard === 1) {
if (p + 11 <= 21) {
newCard = 11;
}
}
return p + newCard;
}, 0);
}
function score () {
if (addCards('user') > 21) { // User busted
return { result: false, emoji: 'nope', message: 'You lose! Busted!' };
} else if (addCards('bot') > 21) { // Bot busted
return { result: true, emoji: 'ok', message: 'You win! Your opponent busted!' };
} else if (addCards('user') === 21) { // User has exactly 21
return { result: true, emoji: 'ok', message: 'You win! You have 21!' };
} else if (addCards('bot') === 21) { // Bot has exactly 21
return { result: false, emoji: 'nope', message: 'You lose! Your opponent reached 21 before you!' };
} else if (addCards('bot') === addCards('user') && stood) { // Tie
return { result: null, emoji: 'empty', message: 'You tied with your opponent!' };
} else if (addCards('user') <= 21 && cards.user.length === 5) { // User took more than 5 cards without going over 21
return { result: true, emoji: 'ok', message: 'You win! You took 5 cards without going over 21.' };
} else if (addCards('bot') <= 21 && cards.bot.length === 5) { // Bot took more than 5 cards without going over 21
return { result: false, emoji: 'nope', message: 'You lose! Your opponent took 5 cards without going above 21.' };
} else if (addCards('bot') > addCards('user') && stood) {
// If the bot has a score of 17 or more and the user has less than the bot, and the user is also stood
return { result: false, emoji: 'nope', message: `You lose! You have ${addCards('user')}, Dealer has ${addCards('bot')}.` };
} else if (addCards('user') > addCards('bot') && stood) {
// If the user has a higher score than the bot and they are
return { result: true, emoji: 'nope', message: `You win! You have ${addCards('user')}, Dealer has ${addCards('bot')}.` };
} else {
return addCards('user'); // else
}
}
const gambed = async (final) => {
const status = score();
let desc = '';
if (status.constructor === Object) {
const coinCheck = await DB.GetCash;
if (bet > coinCheck.cash) {
await DB.SubCash(message.author.id, (bet / 2));
return message.reply(`You cant afford to pay your bet anymore. I took half your bet instead`);
}
let finalMsg = '';
// Win
if (status.result) {
winnings = Math.ceil(bet); // ceil here to avoid winnings being 0
winnings = Math.floor(bet)
await DB.AddCash(message.author.id, winnings);
const newCash = await DB.GetCash(message.author.id)
finalMsg += `\nYou won **${winnings.toLocaleString()}** cash. You now have ${(formatNumber(newCash.cash))}.`;
} else {
// Tie
if (status.result === null) {
const newCash = await DB.GetCash(message.author.id)
finalMsg += `Your cash hasn't changed! You have ${formatNumber(newCash.cash)} cash still.`;
} else {
// Loss
await DB.SubCash(message.author.id, bet);
const newCash = await DB.GetCash(message.author.id);
finalMsg += `You lost **${Number(bet).toLocaleString()}** cash. You now have ${(formatNumber(newCash.cash)).toLocaleString()}.`;
}
}
final = true;
desc = `**${status.message}** ${finalMsg}`;
}
const satisfied = final;
message.channel.send({ content: !final ? `${first ? 'What do you want to do?\n' : ''}Type \`h\` to **hit**, type \`s\` to **stand**, or type \`e\` to **end** the game.` : '',
embed: {
author:
{
name: `${user.username}'s blackjack game`,
icon_url: user.displayAvatarURL({ dynamic: true })
},
color: final ? status.result === null ? 16757504 : (winnings ? 5025616 : 15022389) : 2533018,
description: desc,
fields: [
{
name: message.author.username,
value: `Cards - **${cards.user.map(card => `[\`${getRespectiveIcon(card.suit)} ${card.face}\`](https://google.com)`).join(' ')}**\nTotal - \`${addCards('user')}\``,
inline: true
},
{ // Always show the first card, all other cards are unknown until stood or final is called
name: 'WOK',
value: `Cards - **${cards.bot.slice(0, satisfied ? cards.bot.length : 2).map((card, i) => (!i || satisfied) ? `[\`${getRespectiveIcon(card.suit)} ${card.face}\`](https://google.com)` : '`?`').join(' ')}**\nTotal - \`${
satisfied ? addCards('bot') : ' ? '}\``,
inline: true
}
| random_line_split |
|
azure_logcollector.go | err
}
// Machine pool can be an AzureManagedMachinePool for AKS clusters.
_, err = getAzureManagedMachinePool(ctx, managementClusterClient, mp)
if err != nil {
return err
}
} else {
isWindows = isAzureMachinePoolWindows(am)
}
cluster, err := util.GetClusterFromMetadata(ctx, managementClusterClient, mp.ObjectMeta)
if err != nil {
return err
}
for i, instance := range mp.Spec.ProviderIDList {
if mp.Status.NodeRefs != nil && len(mp.Status.NodeRefs) >= (i+1) {
hostname := mp.Status.NodeRefs[i].Name
if err := collectLogsFromNode(cluster, hostname, isWindows, filepath.Join(outputPath, hostname)); err != nil {
errs = append(errs, err)
}
if err := collectVMSSBootLog(ctx, instance, filepath.Join(outputPath, hostname)); err != nil {
errs = append(errs, errors.Wrap(err, "Unable to collect VMSS Boot Diagnostic logs"))
}
} else {
Logf("MachinePool instance %s does not have a corresponding NodeRef", instance)
Logf("Skipping log collection for MachinePool instance %s", instance)
}
}
return kinderrors.NewAggregate(errs)
}
// CollectInfrastructureLogs collects log from the infrastructure.
// This is currently a no-op implementation to satisfy the LogCollector interface.
func (k AzureLogCollector) CollectInfrastructureLogs(ctx context.Context, managementClusterClient client.Client, c *clusterv1.Cluster, outputPath string) error {
return nil
}
// collectLogsFromNode collects logs from various sources by ssh'ing into the node
func collectLogsFromNode(cluster *clusterv1.Cluster, hostname string, isWindows bool, outputPath string) error {
nodeOSType := azure.LinuxOS
if isWindows {
nodeOSType = azure.WindowsOS
}
Logf("Collecting logs for %s node %s in cluster %s in namespace %s\n", nodeOSType, hostname, cluster.Name, cluster.Namespace)
controlPlaneEndpoint := cluster.Spec.ControlPlaneEndpoint.Host
execToPathFn := func(outputFileName, command string, args ...string) func() error {
return func() error {
return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {
f, err := fileOnHost(filepath.Join(outputPath, outputFileName))
if err != nil {
return err
}
defer f.Close()
return execOnHost(controlPlaneEndpoint, hostname, sshPort, collectLogTimeout, f, command, args...)
})
}
}
if isWindows {
// if we initiate to many ssh connections they get dropped (default is 10) so split it up
var errors []error
errors = append(errors, kinderrors.AggregateConcurrent(windowsInfo(execToPathFn)))
errors = append(errors, kinderrors.AggregateConcurrent(windowsK8sLogs(execToPathFn)))
errors = append(errors, kinderrors.AggregateConcurrent(windowsNetworkLogs(execToPathFn)))
errors = append(errors, kinderrors.AggregateConcurrent(windowsCrashDumpLogs(execToPathFn)))
errors = append(errors, sftpCopyFile(controlPlaneEndpoint, hostname, sshPort, collectLogTimeout, "/c:/crashdumps.tar", filepath.Join(outputPath, "crashdumps.tar")))
return kinderrors.NewAggregate(errors)
}
return kinderrors.AggregateConcurrent(linuxLogs(execToPathFn))
}
func getHostname(m *clusterv1.Machine, isWindows bool) string {
hostname := m.Spec.InfrastructureRef.Name
if isWindows {
// Windows host name ends up being different than the infra machine name
// due to Windows name limitations in Azure so use ip address instead.
if len(m.Status.Addresses) > 0 {
hostname = m.Status.Addresses[0].Address
} else {
Logf("Unable to collect logs as node doesn't have addresses")
}
}
return hostname
}
func getAzureCluster(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1.AzureCluster, error) {
key := client.ObjectKey{
Namespace: namespace,
Name: name,
}
azCluster := &infrav1.AzureCluster{}
err := managementClusterClient.Get(ctx, key, azCluster)
return azCluster, err
}
func getAzureManagedControlPlane(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1.AzureManagedControlPlane, error) { |
azManagedControlPlane := &infrav1.AzureManagedControlPlane{}
err := managementClusterClient.Get(ctx, key, azManagedControlPlane)
return azManagedControlPlane, err
}
func getAzureMachine(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine) (*infrav1.AzureMachine, error) {
key := client.ObjectKey{
Namespace: m.Spec.InfrastructureRef.Namespace,
Name: m.Spec.InfrastructureRef.Name,
}
azMachine := &infrav1.AzureMachine{}
err := managementClusterClient.Get(ctx, key, azMachine)
return azMachine, err
}
func getAzureMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1exp.AzureMachinePool, error) {
key := client.ObjectKey{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}
azMachinePool := &infrav1exp.AzureMachinePool{}
err := managementClusterClient.Get(ctx, key, azMachinePool)
return azMachinePool, err
}
func getAzureManagedMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1.AzureManagedMachinePool, error) {
key := client.ObjectKey{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}
azManagedMachinePool := &infrav1.AzureManagedMachinePool{}
err := managementClusterClient.Get(ctx, key, azManagedMachinePool)
return azManagedMachinePool, err
}
func linuxLogs(execToPathFn func(outputFileName string, command string, args ...string) func() error) []func() error {
return []func() error{
execToPathFn(
"journal.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise",
),
execToPathFn(
"kern.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-k",
),
execToPathFn(
"kubelet-version.txt",
"PATH=/opt/bin:${PATH}", "kubelet", "--version",
),
execToPathFn(
"kubelet.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-u", "kubelet.service",
),
execToPathFn(
"containerd.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-u", "containerd.service",
),
execToPathFn(
"ignition.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-at", "ignition",
),
execToPathFn(
"cloud-init.log",
"cat", "/var/log/cloud-init.log",
),
execToPathFn(
"cloud-init-output.log",
"cat", "/var/log/cloud-init-output.log",
),
execToPathFn(
"sentinel-file-dir.txt",
"ls", "/run/cluster-api/",
),
execToPathFn(
"cni.log",
"cat", "/var/log/calico/cni/cni.log",
),
}
}
func windowsK8sLogs(execToPathFn func(outputFileName string, command string, args ...string) func() error) []func() error {
return []func() error{
execToPathFn(
"hyperv-operation.log",
"Get-WinEvent", "-LogName Microsoft-Windows-Hyper-V-Compute-Operational | Select-Object -Property TimeCreated, Id, LevelDisplayName, Message | Sort-Object TimeCreated | Format-Table -Wrap -Autosize",
),
execToPathFn(
"containerd-containers.log",
"ctr.exe", "-n k8s.io containers list",
),
execToPathFn(
"containerd-tasks.log",
"ctr.exe", "-n k8s.io tasks list",
),
execToPathFn(
"containers-hcs.log",
"hcsdiag", "list",
),
execToPathFn(
"kubelet.log",
`Get-ChildItem "C:\\var\\log\\kubelet\\" | ForEach-Object { if ($_ -match 'log.INFO|err.*.log') { write-output "$_";cat "c:\\var\\log\\kubelet\\$_" } }`,
),
execToPathFn(
| key := client.ObjectKey{
Namespace: namespace,
Name: name,
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.