prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>Http_Delete.java<|end_file_name|><|fim▁begin|>package tasslegro.base;
import java.io.IOException;
import java.net.URI;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.ParseException;
import org.apache.http.annotation.NotThreadSafe;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.util.EntityUtils;
public class Http_Delete {
@NotThreadSafe
class HttpDeleteWithBody extends HttpEntityEnclosingRequestBase {
public static final String METHOD_NAME = "DELETE";
public String getMethod() {
return METHOD_NAME;
}
public HttpDeleteWithBody(final String uri) {
super();
setURI(URI.create(uri));
}
public HttpDeleteWithBody(final URI uri) {
super();
setURI(uri);
}
public HttpDeleteWithBody() {<|fim▁hole|> }
CloseableHttpClient httpClient = new DefaultHttpClient();
HttpDeleteWithBody delete = null;
HttpResponse response = null;
public Http_Delete(String url, String entity) throws ClientProtocolException, IOException {
this.delete = new HttpDeleteWithBody(url);
this.delete.setHeader("Content-type", "application/json");
StringEntity stringEntity = new StringEntity(entity.toString(), "UTF-8");
this.delete.setEntity(stringEntity);
this.response = this.httpClient.execute(this.delete);
}
public Http_Delete() {
}
public void setURL(String url, String entity) throws ClientProtocolException, IOException {
this.delete = new HttpDeleteWithBody(url);
this.delete.setHeader("Content-type", "application/json");
StringEntity stringEntity = new StringEntity(entity.toString(), "UTF-8");
this.delete.setEntity(stringEntity);
this.response = this.httpClient.execute(this.delete);
}
public int getStatusCode() {
if (response == null) {
return 0;
}
return this.response.getStatusLine().getStatusCode();
}
public String getStrinResponse() throws ParseException, IOException {
if (response == null) {
return null;
}
HttpEntity entity = this.response.getEntity();
return EntityUtils.toString(entity, "UTF-8");
}
}<|fim▁end|> | super();
} |
<|file_name|>LoginManager.java<|end_file_name|><|fim▁begin|>package com.libertacao.libertacao.manager;
import android.support.annotation.Nullable;
import com.libertacao.libertacao.persistence.UserPreferences;
import com.parse.ParseFacebookUtils;
import com.parse.ParseUser;
public class LoginManager {
private static final LoginManager ourInstance = new LoginManager();
public static LoginManager getInstance() {
return ourInstance;
}
private LoginManager() {
}
public boolean isLoggedIn(){
return ParseUser.getCurrentUser() != null;
}
public boolean isSuperAdmin() {
if(isLoggedIn()) {
int type = ParseUser.getCurrentUser().getInt("type");
if(type == 666) {
return true;
}
}
return false;
}
public boolean isAdmin() {<|fim▁hole|> return true;
}
}
return false;
}
public void logout() {
UserPreferences.clearSharedPreferences();
UserManager.getInstance().setCurrentLatLng(null);
}
@Nullable
public String getUsername() {
if(isLoggedIn()) {
ParseUser currentUser = ParseUser.getCurrentUser();
if(ParseFacebookUtils.isLinked(currentUser)) {
return currentUser.getString("name");
} else {
return currentUser.getUsername();
}
} else {
return null;
}
}
}<|fim▁end|> | if(isLoggedIn()) {
int type = ParseUser.getCurrentUser().getInt("type");
if(type % 66 == 6) { |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>#![allow(dead_code)]
pub mod vec3;
pub mod vec4;<|fim▁hole|><|fim▁end|> | pub mod mat4;
pub mod transform; |
<|file_name|>authenticationradiuspolicy_systemglobal_binding.py<|end_file_name|><|fim▁begin|>#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationradiuspolicy_systemglobal_binding(base_resource) :
""" Binding class showing the systemglobal that can be bound to authenticationradiuspolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._name = ""
self.___count = 0
@property
def name(self) :
"""Name of the RADIUS authentication policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the RADIUS authentication policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationradiuspolicy_systemglobal_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationradiuspolicy_systemglobal_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch authenticationradiuspolicy_systemglobal_binding resources.
"""
try :
obj = authenticationradiuspolicy_systemglobal_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of authenticationradiuspolicy_systemglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationradiuspolicy_systemglobal_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count authenticationradiuspolicy_systemglobal_binding resources configued on NetScaler.
"""
try :
obj = authenticationradiuspolicy_systemglobal_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of authenticationradiuspolicy_systemglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationradiuspolicy_systemglobal_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class authenticationradiuspolicy_systemglobal_binding_response(base_response) :
def __init__(self, length=1) :<|fim▁hole|> self.sessionid = ""
self.authenticationradiuspolicy_systemglobal_binding = [authenticationradiuspolicy_systemglobal_binding() for _ in range(length)]<|fim▁end|> | self.authenticationradiuspolicy_systemglobal_binding = []
self.errorcode = 0
self.message = ""
self.severity = "" |
<|file_name|>MAnalogProbe.cc<|end_file_name|><|fim▁begin|>/*<|fim▁hole|> * This file is part of RHexLib,
*
* Copyright (c) 2001 The University of Michigan, its Regents,
* Fellows, Employees and Agents. All rights reserved, and distributed as
* free software under the following license.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1) Redistributions of source code must retain the above copyright
* notice, this list of conditions, the following disclaimer and the
* file called "CREDITS" which accompanies this distribution.
*
* 2) Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions, the following disclaimer and the file
* called "CREDITS" which accompanies this distribution in the
* documentation and/or other materials provided with the distribution.
*
* 3) Neither the name of the University of Michigan, Ann Arbor or the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*********************************************************************
* $Id: MAnalogProbe.cc,v 1.3 2001/08/05 18:13:24 ulucs Exp $
*
* The MAnalogProbe module. Handles the analog inputs in the Michigan Hardware.
*
* Created : Uluc Saranli, 01/19/2001
* Last Modified : Uluc Saranli, 06/27/2001
*
********************************************************************/
#include <stdio.h>
#include <math.h>
#include "io/mpc550.hh"
#include "MichiganHW.hh"
#include "MComponents.hh"
#include "MAnalogProbe.hh"
// MAnalogProbe::MAnalogProbe : Class constructor
MAnalogProbe::MAnalogProbe( uint numchan, bool polling )
: Module( "manalogprobe", 0, false, polling ) {
channels = numchan;
controlWord = 0x40 | ADIO_RANGE5V | ADIO_UNIPOLAR;
curChannel = lastChannel = 0;
stepMark = 0;
convCount = 0;
conversion = false;
memset( value, 0, 16 * sizeof( float ) );
memset( timestamp, 0, 16 * sizeof( CLOCK ) );
}
void MAnalogProbe::update( void ) {
if ( curChannel >= channels ) {
if ( MMGetStepCount() == stepMark ) {
// Done with all the channels and waiting for a new module manager step.
// meaningful only when the module is polling
return;
} else {
// New module manager step. We can restart acquisition of the channels
curChannel = 0;
stepMark = MMGetStepCount();
}
}
// Still not done with all the channels for the current run. Check pending
// conversions and issue a new one if necessary
if ( conversion ) {
// There is a pending conversion, so try to read the data.
if ( MPC550Card->checkADC( 1 ) ) {
// The conversion was from AD1
value[ lastChannel ] = 5.0 * MPC550Card->readADC( 1 ) / 0xfff;
conversion = false;
} else if ( MPC550Card->checkADC( 2 ) ) {
// The conversion was from AD2
value[ lastChannel ] = 5.0 * MPC550Card->readADC( 2 ) / 0xfff;
conversion = false;
}
if ( !conversion ) {
timestamp[ lastChannel ] = MMReadClock();
curChannel++;
}
}
if ( !conversion && ( curChannel < channels ) ) {
// If the previous conversion is done and there are still more channels
// to go, start another conversion
if ( curChannel < 8 ) {
MPC550Card->acquireADC( 1, controlWord, curChannel );
} else {
MPC550Card->acquireADC( 2, controlWord, curChannel - 8 );
}
// printf( "Starting conversion of channel %i\n", curChannel );
conversion = true;
lastChannel = curChannel;
}
}<|fim▁end|> | |
<|file_name|>test_country.py<|end_file_name|><|fim▁begin|>test_records = [
[{<|fim▁hole|> "country_name": "_Test Country"
}]
]<|fim▁end|> | "doctype": "Country", |
<|file_name|>content_to_visible_time_reporter.cc<|end_file_name|><|fim▁begin|>// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/public/common/page/content_to_visible_time_reporter.h"
#include <utility>
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/debug/dump_without_crashing.h"
#include "base/feature_list.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/strings/strcat.h"
#include "base/trace_event/trace_event.h"
#include "third_party/blink/public/common/features.h"
#include "third_party/blink/public/mojom/widget/record_content_to_visible_time_request.mojom.h"
#include "ui/gfx/presentation_feedback.h"
namespace blink {
namespace {
// Used to generate unique "TabSwitching::Latency" event ids. Note: The address
// of ContentToVisibleTimeReporter can't be used as an id because a single
// ContentToVisibleTimeReporter can generate multiple overlapping events.
int g_num_trace_events_in_process = 0;
const char* GetHistogramSuffix(
bool has_saved_frames,
const mojom::RecordContentToVisibleTimeRequest& start_state) {
if (has_saved_frames)
return "WithSavedFrames";
if (start_state.destination_is_loaded) {
return "NoSavedFrames_Loaded";
} else {
return "NoSavedFrames_NotLoaded";
}
}
void ReportUnOccludedMetric(const base::TimeTicks requested_time,
const gfx::PresentationFeedback& feedback) {
const base::TimeDelta delta = feedback.timestamp - requested_time;
UMA_HISTOGRAM_TIMES("Aura.WebContentsWindowUnOccludedTime", delta);
}
void RecordBackForwardCacheRestoreMetric(
const base::TimeTicks requested_time,
const gfx::PresentationFeedback& feedback) {
const base::TimeDelta delta = feedback.timestamp - requested_time;
// Histogram to record the content to visible duration after restoring a page
// from back-forward cache. Here min, max bucket size are same as the
// "PageLoad.PaintTiming.NavigationToFirstContentfulPaint" metric.
base::UmaHistogramCustomTimes(
"BackForwardCache.Restore.NavigationToFirstPaint", delta,
base::Milliseconds(10), base::Minutes(10), 100);
}
} // namespace
void UpdateRecordContentToVisibleTimeRequest(
mojom::RecordContentToVisibleTimeRequest const& from,
mojom::RecordContentToVisibleTimeRequest& to) {
to.event_start_time = std::min(to.event_start_time, from.event_start_time);
to.destination_is_loaded |= from.destination_is_loaded;
to.show_reason_tab_switching |= from.show_reason_tab_switching;
to.show_reason_unoccluded |= from.show_reason_unoccluded;
to.show_reason_bfcache_restore |= from.show_reason_bfcache_restore;
}
ContentToVisibleTimeReporter::ContentToVisibleTimeReporter()
: is_tab_switch_metric2_feature_enabled_(
base::FeatureList::IsEnabled(blink::features::kTabSwitchMetrics2)) {}
ContentToVisibleTimeReporter::~ContentToVisibleTimeReporter() = default;
base::OnceCallback<void(const gfx::PresentationFeedback&)>
ContentToVisibleTimeReporter::TabWasShown(
bool has_saved_frames,
mojom::RecordContentToVisibleTimeRequestPtr start_state,
base::TimeTicks widget_visibility_request_timestamp) {
DCHECK(!start_state->event_start_time.is_null());
DCHECK(!widget_visibility_request_timestamp.is_null());
DCHECK(!tab_switch_start_state_);
DCHECK(widget_visibility_request_timestamp_.is_null());
// Invalidate previously issued callbacks, to avoid accessing a null
// |tab_switch_start_state_|.
//
// TODO(https://crbug.com/1121339): Make sure that TabWasShown() is never
// called twice without a call to TabWasHidden() in-between, and remove this
// mitigation.
weak_ptr_factory_.InvalidateWeakPtrs();
has_saved_frames_ = has_saved_frames;
tab_switch_start_state_ = std::move(start_state);
widget_visibility_request_timestamp_ = widget_visibility_request_timestamp;
// |tab_switch_start_state_| is only reset by RecordHistogramsAndTraceEvents
// once the metrics have been emitted.
return base::BindOnce(
&ContentToVisibleTimeReporter::RecordHistogramsAndTraceEvents,
weak_ptr_factory_.GetWeakPtr(), false /* is_incomplete */,
tab_switch_start_state_->show_reason_tab_switching,
tab_switch_start_state_->show_reason_unoccluded,
tab_switch_start_state_->show_reason_bfcache_restore);
}
base::OnceCallback<void(const gfx::PresentationFeedback&)>
ContentToVisibleTimeReporter::TabWasShown(
bool has_saved_frames,
base::TimeTicks event_start_time,
bool destination_is_loaded,
bool show_reason_tab_switching,
bool show_reason_unoccluded,
bool show_reason_bfcache_restore,
base::TimeTicks widget_visibility_request_timestamp) {
return TabWasShown(
has_saved_frames,
mojom::RecordContentToVisibleTimeRequest::New(
event_start_time, destination_is_loaded, show_reason_tab_switching,
show_reason_unoccluded, show_reason_bfcache_restore),
widget_visibility_request_timestamp);
}
void ContentToVisibleTimeReporter::TabWasHidden() {
if (tab_switch_start_state_) {
RecordHistogramsAndTraceEvents(true /* is_incomplete */,
true /* show_reason_tab_switching */,
false /* show_reason_unoccluded */,
false /* show_reason_bfcache_restore */,<|fim▁hole|> }
}
void ContentToVisibleTimeReporter::RecordHistogramsAndTraceEvents(
bool is_incomplete,
bool show_reason_tab_switching,
bool show_reason_unoccluded,
bool show_reason_bfcache_restore,
const gfx::PresentationFeedback& feedback) {
DCHECK(tab_switch_start_state_);
DCHECK(!widget_visibility_request_timestamp_.is_null());
// If the DCHECK fail, make sure RenderWidgetHostImpl::WasShown was triggered
// for recording the event.
DCHECK(show_reason_bfcache_restore || show_reason_unoccluded ||
show_reason_tab_switching);
if (show_reason_bfcache_restore) {
RecordBackForwardCacheRestoreMetric(
tab_switch_start_state_->event_start_time, feedback);
}
if (show_reason_unoccluded) {
ReportUnOccludedMetric(tab_switch_start_state_->event_start_time, feedback);
}
if (!show_reason_tab_switching)
return;
// Tab switching has occurred.
auto tab_switch_result = TabSwitchResult::kSuccess;
if (is_incomplete)
tab_switch_result = TabSwitchResult::kIncomplete;
else if (feedback.flags & gfx::PresentationFeedback::kFailure)
tab_switch_result = TabSwitchResult::kPresentationFailure;
const auto tab_switch_duration =
feedback.timestamp - tab_switch_start_state_->event_start_time;
// Record trace events.
TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(
"latency", "TabSwitching::Latency",
TRACE_ID_LOCAL(g_num_trace_events_in_process),
tab_switch_start_state_->event_start_time);
TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP2(
"latency", "TabSwitching::Latency",
TRACE_ID_LOCAL(g_num_trace_events_in_process), feedback.timestamp,
"result", tab_switch_result, "latency",
tab_switch_duration.InMillisecondsF());
++g_num_trace_events_in_process;
const char* suffix =
GetHistogramSuffix(has_saved_frames_, *tab_switch_start_state_);
// Record result histogram.
if (is_tab_switch_metric2_feature_enabled_) {
base::UmaHistogramEnumeration(
base::StrCat({"Browser.Tabs.TabSwitchResult2.", suffix}),
tab_switch_result);
} else {
base::UmaHistogramEnumeration(
base::StrCat({"Browser.Tabs.TabSwitchResult.", suffix}),
tab_switch_result);
}
// Record latency histogram.
switch (tab_switch_result) {
case TabSwitchResult::kSuccess: {
if (is_tab_switch_metric2_feature_enabled_) {
base::UmaHistogramTimes(
base::StrCat({"Browser.Tabs.TotalSwitchDuration2.", suffix}),
tab_switch_duration);
} else {
base::UmaHistogramTimes(
base::StrCat({"Browser.Tabs.TotalSwitchDuration.", suffix}),
tab_switch_duration);
}
break;
}
case TabSwitchResult::kIncomplete: {
if (is_tab_switch_metric2_feature_enabled_) {
base::UmaHistogramTimes(
base::StrCat(
{"Browser.Tabs.TotalIncompleteSwitchDuration2.", suffix}),
tab_switch_duration);
} else {
base::UmaHistogramTimes(
base::StrCat(
{"Browser.Tabs.TotalIncompleteSwitchDuration.", suffix}),
tab_switch_duration);
}
break;
}
case TabSwitchResult::kPresentationFailure: {
break;
}
}
// Record legacy latency histogram.
UMA_HISTOGRAM_TIMES(
"MPArch.RWH_TabSwitchPaintDuration",
feedback.timestamp - widget_visibility_request_timestamp_);
// Reset tab switch information.
has_saved_frames_ = false;
tab_switch_start_state_.reset();
widget_visibility_request_timestamp_ = base::TimeTicks();
}
} // namespace blink<|fim▁end|> | gfx::PresentationFeedback::Failure());
weak_ptr_factory_.InvalidateWeakPtrs(); |
<|file_name|>_i_x_n_annotation_format_8h.js<|end_file_name|><|fim▁begin|><|fim▁hole|> [ "IXNAnnotationFormatPlainString", "_i_x_n_annotation_format_8h.html#a607464fe85fc01c26df0c51826fd4841a7b4fb91d85f0776a7abe6e0623f6b0fc", null ],
[ "IXNAnnotationFormatJson", "_i_x_n_annotation_format_8h.html#a607464fe85fc01c26df0c51826fd4841a91b8ae8f1bcb5c68a714026c4674231d", null ],
[ "IXNAnnotationFormatOsc", "_i_x_n_annotation_format_8h.html#a607464fe85fc01c26df0c51826fd4841a611da1ac0a3eccbea277aad5ca3d57f1", null ]
] ]
];<|fim▁end|> | var _i_x_n_annotation_format_8h =
[
[ "IXNAnnotationFormat", "_i_x_n_annotation_format_8h.html#a607464fe85fc01c26df0c51826fd4841", [ |
<|file_name|>install_uninstall.py<|end_file_name|><|fim▁begin|>from __future__ import print_function
from __future__ import unicode_literals
import io
import os.path
import pipes
import sys
from pre_commit import output
from pre_commit.util import make_executable
from pre_commit.util import mkdirp
from pre_commit.util import resource_filename
# This is used to identify the hook file we install
PRIOR_HASHES = (
'4d9958c90bc262f47553e2c073f14cfe',
'd8ee923c46731b42cd95cc869add4062',
'49fd668cb42069aa1b6048464be5d395',
'79f09a650522a87b0da915d0d983b2de',
'e358c9dae00eac5d06b38dfdb1e33a8c',
)
CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
def is_our_script(filename):
if not os.path.exists(filename):
return False
contents = io.open(filename).read()
return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
<|fim▁hole|> runner, overwrite=False, hooks=False, hook_type='pre-commit',
skip_on_missing_conf=False,
):
"""Install the pre-commit hooks."""
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
mkdirp(os.path.dirname(hook_path))
# If we have an existing hook, move it to pre-commit.legacy
if os.path.lexists(hook_path) and not is_our_script(hook_path):
os.rename(hook_path, legacy_path)
# If we specify overwrite, we simply delete the legacy file
if overwrite and os.path.exists(legacy_path):
os.remove(legacy_path)
elif os.path.exists(legacy_path):
output.write_line(
'Running in migration mode with existing hooks at {}\n'
'Use -f to use only pre-commit.'.format(
legacy_path,
),
)
with io.open(hook_path, 'w') as pre_commit_file_obj:
if hook_type == 'pre-push':
with io.open(resource_filename('pre-push-tmpl')) as f:
hook_specific_contents = f.read()
elif hook_type == 'commit-msg':
with io.open(resource_filename('commit-msg-tmpl')) as f:
hook_specific_contents = f.read()
elif hook_type == 'pre-commit':
hook_specific_contents = ''
else:
raise AssertionError('Unknown hook type: {}'.format(hook_type))
skip_on_missing_conf = 'true' if skip_on_missing_conf else 'false'
contents = io.open(resource_filename('hook-tmpl')).read().format(
sys_executable=pipes.quote(sys.executable),
hook_type=hook_type,
hook_specific=hook_specific_contents,
config_file=runner.config_file,
skip_on_missing_conf=skip_on_missing_conf,
)
pre_commit_file_obj.write(contents)
make_executable(hook_path)
output.write_line('pre-commit installed at {}'.format(hook_path))
# If they requested we install all of the hooks, do so.
if hooks:
install_hooks(runner)
return 0
def install_hooks(runner):
for repository in runner.repositories:
repository.require_installed()
def uninstall(runner, hook_type='pre-commit'):
"""Uninstall the pre-commit hooks."""
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
# If our file doesn't exist or it isn't ours, gtfo.
if not os.path.exists(hook_path) or not is_our_script(hook_path):
return 0
os.remove(hook_path)
output.write_line('{} uninstalled'.format(hook_type))
if os.path.exists(legacy_path):
os.rename(legacy_path, hook_path)
output.write_line('Restored previous hooks to {}'.format(hook_path))
return 0<|fim▁end|> | def install( |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
###############################################################################
#
# SageMathCloud: A collaborative web-based interface to Sage, IPython, LaTeX and the Terminal.
#
# Copyright (C) 2014, William Stein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
"""
Administration and Launch control of salvus components
"""
####################
# Standard imports
####################
import json, logging, os, shutil, signal, socket, stat, subprocess, sys, tempfile, time
from string import Template
import misc
############################################################
# Paths where data and configuration are stored
############################################################
SITENAME = 'cloud.sagemath.com'
DATA = 'data'
CONF = 'conf'
AGENT = os.path.join(os.environ['HOME'], '.ssh', 'agent')
PWD = os.path.abspath('.')
PIDS = os.path.join(DATA, 'pids') # preferred location for pid files
LOGS = os.path.join(DATA, 'logs') # preferred location for pid files
BIN = os.path.join(DATA, 'local', 'bin')
PYTHON = os.path.join(BIN, 'python')
SECRETS = os.path.join(DATA,'secrets')
# Read in socket of ssh-agent, if there is an AGENT file.
# NOTE: I'm using this right now on my laptop, but it's not yet
# deployed on cloud.sagemath *yet*. When done, it will mean the
# ssh key used by the hub is password protected, which
# will be much more secure: someone who steals ~/.ssh gets nothing,
# though still if somebody logs in as the salvus user on one of
# these nodes, they can ssh to other nodes, though they can't
# change passwords, etc. Also, this means having the ssh private
# key on the compute vm's is no longer a security risk, since it
# is protected by a (very long, very random) passphrase.
if os.path.exists(AGENT):
for X in open(AGENT).readlines():
if 'SSH_AUTH_SOCK' in X:
# The AGENT file is as output by ssh-agent.
os.environ['SSH_AUTH_SOCK'] = X.split(';')[0][len('SSH_AUTH_SOCK='):]
# TODO: factor out all $HOME/salvus/salvus style stuff in code below and use BASE.
BASE = 'salvus/salvus/'
LOG_INTERVAL = 6
GIT_REPO='' # TODO
whoami = os.environ['USER']
# Default ports
HAPROXY_PORT = 8000
NGINX_PORT = 8080
HUB_PORT = 5000
HUB_PROXY_PORT = 5001
SYNCSTRING_PORT = 6001
# These are used by the firewall.
CASSANDRA_CLIENT_PORT = 9160
CASSANDRA_NATIVE_PORT = 9042
CASSANDRA_INTERNODE_PORTS = [7000, 7001]
CASSANDRA_PORTS = CASSANDRA_INTERNODE_PORTS + [CASSANDRA_CLIENT_PORT, CASSANDRA_NATIVE_PORT]
####################
# Sending an email (useful for monitoring script)
# See http://www.nixtutor.com/linux/send-mail-through-gmail-with-python/
####################
def email(msg= '', subject='ADMIN -- cloud.sagemath.com', toaddrs='[email protected],[email protected]', fromaddr='[email protected]'):
log.info("sending email to %s", toaddrs)
username = 'salvusmath'
password = open(os.path.join(os.environ['HOME'],'salvus/salvus/data/secrets/salvusmath_email_password')
).read().strip()
import smtplib
from email.mime.text import MIMEText
msg = MIMEText(msg)
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username,password)
for x in toaddrs.split(','):
toaddr = x.strip()
msg['Subject'] = subject
msg['From'] = fromaddr
msg['To'] = toaddr
server.sendmail(fromaddr, toaddr, msg.as_string())
server.quit()
def zfs_size(s):
"""
Convert a zfs size string to gigabytes (float)
"""
if len(s) == 0:
return 0.0
u = s[-1]; q = float(s[:-1])
if u == 'M':
q /= 1000
elif u == 'T':
q *= 1000
elif u == 'K':
q /= 1000000
return q
####################
# Running a subprocess
####################
MAXTIME_S=300
def run(args, maxtime=MAXTIME_S, verbose=True, stderr=True):
"""
Run the command line specified by args (using subprocess.Popen)
and return the stdout and stderr, killing the subprocess if it
takes more than maxtime seconds to run.
If stderr is false, don't include in the returned output.
If args is a list of lists, run all the commands separately in the
list.
if ignore_errors is true, completely ignores any error codes!
"""
if args and isinstance(args[0], list):
return '\n'.join([str(run(a, maxtime=maxtime, verbose=verbose)) for a in args])
args = [str(x) for x in args]
if maxtime:
def timeout(*a):
raise KeyboardInterrupt("running '%s' took more than %s seconds, so killed"%(' '.join(args), maxtime))
signal.signal(signal.SIGALRM, timeout)
signal.alarm(maxtime)
if verbose:
log.info("running '%s'", ' '.join(args))
try:
a = subprocess.Popen(args,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
if stderr:
out = a.stderr.read()
else:
out = ''
out += a.stdout.read()
if verbose:
log.info("output '%s'", out[:256])
return out
finally:
if maxtime:
signal.signal(signal.SIGALRM, signal.SIG_IGN) # cancel the alarm
# A convenience object "sh":
# sh['list', 'of', ..., 'arguments'] to run a shell command
class SH(object):
def __init__(self, maxtime=MAXTIME_S):
self.maxtime = maxtime
def __getitem__(self, args):
return run([args] if isinstance(args, str) else list(args), maxtime=self.maxtime)
sh = SH()
def process_status(pid, run):
"""
Return the status of a process, obtained using the ps command.
The run option is used to run the command (so it could run on
a remote machine). The result is a dictionary; it is empty if
the given process is not running.
"""
fields = ['%cpu', '%mem', 'etime', 'pid', 'start', 'cputime', 'rss', 'vsize']
v = run(['ps', '-p', str(int(pid)), '-o', ' '.join(fields)], verbose=False).splitlines()
if len(v) <= 1: return {}
return dict(zip(fields, v[-1].split()))
def dns(host, timeout=10):
"""
Return list of ip addresses of a given host. Errors out after timeout seconds.
"""
a = os.popen3("host -t A -W %s %s | awk '{print $4}'"%(timeout,host))
err = a[2].read().strip()
if err:
raise RuntimeError(err)
out = a[1].read()
if 'found' in out:
raise RuntimeError("unknown domain '%s'"%host)
else:
return out.split()
########################################
# Standard Python Logging
########################################
logging.basicConfig()
log = logging.getLogger('')
#log.setLevel(logging.DEBUG) # WARNING, INFO, etc.
log.setLevel(logging.WARNING) # WARNING, INFO, etc.
#log.setLevel(logging.INFO) # WARNING, INFO, etc.
def restrict(path):
#log.info("ensuring that '%s' has restrictive permissions", path)
if os.stat(path)[stat.ST_MODE] != 0o40700:
os.chmod(path, 0o40700)
def init_data_directory():
#log.info("ensuring that '%s' exist", DATA)
for path in [DATA, PIDS, LOGS]:
if not os.path.exists(path):
os.makedirs(path)
restrict(path)
#log.info("ensuring that PATH starts with programs in DATA directory")
os.environ['PATH'] = os.path.join(DATA, 'local/bin/') + ':' + os.environ['PATH']
init_data_directory()
########################################
# Misc operating system interaction
########################################
def system(args):
"""
Run the command line specified by args (using os.system) and
return the stdout and stderr, killing the subprocess if it takes
more than maxtime seconds to run. If args is a list of lists, run
all the commands separately in the list, returning *sum* of error
codes output by os.system.
"""
if args and isinstance(args[0], list):
return sum([system(a) for a in args])
c = ' '.join([str(x) for x in args])
log.info("running '%s' via system", c)
return os.system(c)
def abspath(path='.'):
return os.path.abspath(path)
def kill(pid, signal=15):
"""Send signal to the process with pid."""
if pid is not None:
return run(['kill', '-%s'%signal, pid])
def copyfile(src, target):
return shutil.copyfile(src, target)
def readfile(filename):
"""Read the named file and return its contents."""
if not os.path.exists(filename):
raise IOError, "no such file or directory: '%s'"%filename
try:
return open(filename).read()
except IOError:
pass
def writefile(filename, content):
open(filename,'w').write(content)
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def unlink(filename):
os.unlink(filename)
def path_exists(path):
return os.path.exists(path)
def is_running(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
########################################
# Component: named collection of Process objects
########################################
class Component(object):
def __init__(self, id, processes):
self._processes = processes
self._id = id
def __repr__(self):
return "Component %s with %s processes"%(self._id, len(self._processes))
def __getitem__(self, i):
return self._processes[i]
def _procs_with_id(self, ids):
return [p for p in self._processes if ids is None or p.id() in ids]
def start(self, ids=None):
return [p.start() for p in self._procs_with_id(ids)]
def stop(self, ids=None):
return [p.stop() for p in self._procs_with_id(ids)]
def reload(self, ids=None):
return [p.reload() for p in self._procs_with_id(ids)]
def restart(self, ids=None):
return [p.restart() for p in self._procs_with_id(ids)]
def status(self, ids=None):
return [p.status() for p in self._procs_with_id(ids)]
########################################
# Process: a daemon process
########################################
class Process(object):
def __init__(self, id, name, port,
pidfile, logfile=None, monitor_database=None,
start_cmd=None, stop_cmd=None, reload_cmd=None,
start_using_system = False,
service=None,
term_signal=15):
self._name = name
self._port = port
self._id = str(id)
assert len(self._id.split()) == 1
self._pidfile = pidfile
self._start_cmd = start_cmd
self._start_using_system = start_using_system
self._stop_cmd = stop_cmd
self._reload_cmd = reload_cmd<|fim▁hole|> self._monitor_pidfile = os.path.splitext(pidfile)[0] + '-log.pid'
self._term_signal = term_signal
def id(self):
return self._id
def log_tail(self):
if self._logfile is None:
raise NotImplementedError("the logfile is not known")
system(['tail', '-f', self._logfile])
def _parse_pidfile(self, contents):
return int(contents)
def _read_pid(self, file):
try:
return self._pids[file]
except KeyError:
try:
self._pids[file] = self._parse_pidfile(readfile(file).strip())
except: # no file
self._pids[file] = None
return self._pids[file]
def pid(self):
return self._read_pid(self._pidfile)
def is_running(self):
return len(self.status()) > 0
def _start_monitor(self):
# TODO: temporarily disabled -- they do no real good anyways.
return
if self._monitor_database and self._logfile:
run([PYTHON, 'monitor.py', '--logfile', self._logfile,
'--pidfile', self._monitor_pidfile, '--interval', LOG_INTERVAL,
'--database_nodes', self._monitor_database,
'--target_pidfile', self._pidfile,
'--target_name', self._name,
'--target_address', socket.gethostname(),
'--target_port', self._port])
def monitor_pid(self):
return self._read_pid(self._monitor_pidfile)
def _stop_monitor(self):
# NOTE: This function should never need to be called; the
# monitor stops automatically when the process it is
# monitoring stops and it has succeeded in recording this fact
# in the database.
if self._monitor_database and self._logfile and path_exists(self._monitor_pidfile):
try:
kill(self.monitor_pid())
unlink(self._monitor_pidfile)
except Exception, msg:
print msg
def _pre_start(self):
pass # overload to add extra config steps before start
def start(self):
if self.is_running(): return
self._pids = {}
self._pre_start()
if self._start_cmd is not None:
if self._start_using_system:
print system(self._start_cmd)
else:
print run(self._start_cmd)
print self._start_monitor()
def stop(self, force=False):
pid = self.pid()
if pid is None and not force: return
if self._stop_cmd is not None:
print run(self._stop_cmd)
else:
kill(pid, self._term_signal)
try:
if os.path.exists(self._pidfile): unlink(self._pidfile)
except Exception, msg:
print msg
if pid:
while True:
s = process_status(pid, run)
if not s:
break
print "waiting for %s to terminate"%pid
time.sleep(0.5)
self._pids = {}
def reload(self):
self._stop_monitor()
self._pids = {}
if self._reload_cmd is not None:
return run(self._reload_cmd)
else:
return 'reload not defined'
def status(self):
pid = self.pid()
if not pid: return {}
s = process_status(pid, run)
if not s:
self._stop_monitor()
self._pids = {}
if path_exists(self._pidfile):
unlink(self._pidfile)
return s
def restart(self):
self.stop()
self.start()
####################
# Nginx
####################
class Nginx(Process):
def __init__(self, id=0, port=NGINX_PORT, monitor_database=None, base_url=""):
self._base_url = base_url
self._port = port
self._log = 'nginx-%s.log'%id
self._pid = 'nginx-%s.pid'%id
self._nginx_conf = 'nginx-%s.conf'%id
nginx_cmd = ['nginx', '-c', '../' + self._nginx_conf]
Process.__init__(self, id, name='nginx', port=self._port,
monitor_database = monitor_database,
logfile = os.path.join(LOGS, self._log),
pidfile = os.path.join(PIDS, self._pid),
start_cmd = nginx_cmd,
stop_cmd = nginx_cmd + ['-s', 'stop'],
reload_cmd = nginx_cmd + ['-s', 'reload'])
def _pre_start(self):
# Create and write conf file
conf = Template(open(os.path.join(CONF, 'nginx.conf')).read())
conf = conf.substitute(logfile=self._log, pidfile=self._pid,
http_port=self._port, base_url=self._base_url,
ifbase='#' if not self._base_url else '',
ifnobase='' if not self._base_url else '#')
writefile(filename=os.path.join(DATA, self._nginx_conf), content=conf)
# Write base_url javascript file, so clients have access to it
s = "salvus_base_url='%s'; /* autogenerated on nginx startup by admin.py */"%self._base_url
open(os.path.join(PWD, 'static/salvus_base_url.js'), 'w').write(s)
def __repr__(self):
return "Nginx process %s"%self._id
####################
# HAproxy
####################
class Haproxy(Process):
def __init__(self, id=0,
sitename=SITENAME, # name of site, e.g., 'cloud.sagemath.com' if site is https://cloud.sagemath.com; used only if insecure_redirect is set
accept_proxy_port=HAPROXY_PORT, # port that stunnel sends decrypted traffic to
insecure_redirect_port=None, # if set to a port number (say 80), then all traffic to that port is immediately redirected to the secure site
insecure_testing_port=None, # if set to a port, then gives direct insecure access to full site
nginx_servers=None, # list of ip addresses
hub_servers=None, # list of ip addresses
proxy_servers=None, # list of ip addresses
monitor_database=None,
conf_file='conf/haproxy.conf',
base_url=''):
pidfile = os.path.join(PIDS, 'haproxy-%s.pid'%id)
# WARNING: this is now ignored since I don't want to patch haproxy; instead logging goes to syslog...
logfile = os.path.join(LOGS, 'haproxy-%s.log'%id)
# randomize the order of the servers to get better distribution between them by all the different
# haproxies that are running on the edge machines. (Users may hit any of them.)
import random
if nginx_servers:
random.shuffle(nginx_servers)
t = Template('server nginx$n $ip:$port maxconn $maxconn check ')
nginx_servers = ' ' + ('\n '.join([t.substitute(n=n, ip=x['ip'], port=x.get('port', NGINX_PORT), maxconn=x.get('maxconn',10000)) for
n, x in enumerate(nginx_servers)]))
if hub_servers:
random.shuffle(hub_servers)
t = Template('server hub$n $ip:$port cookie server:$ip:$port check inter 4000 maxconn $maxconn')
hub_servers = ' ' + ('\n '.join([t.substitute(n=n, ip=x['ip'], port=x.get('port', HUB_PORT), maxconn=x.get('maxconn',10000)) for
n, x in enumerate(hub_servers)]))
if proxy_servers:
random.shuffle(proxy_servers)
t = Template('server proxy$n $ip:$port cookie server:$ip:$cookie_port check inter 4000 maxconn $maxconn')
# WARNING: note that cookie_port -- that is the port in the cookie's value, is set to be 1 less, i.e., it is
# the corresponding hub port. This could cause bugs/confusion if that convention were changed!!!!!! ***
# We do this so we can use the same cookie for both the hub and proxy servers, for efficiency.
proxy_servers = ' ' + ('\n '.join([t.substitute(n = n,
ip = x['ip'],
port = x.get('proxy_port', HUB_PROXY_PORT),
cookie_port = str(int(x.get('proxy_port', HUB_PROXY_PORT))-1),
maxconn = x.get('maxconn',10000)) for
n, x in enumerate(proxy_servers)]))
if insecure_redirect_port:
insecure_redirect = Template(
"""
frontend unsecured *:$port
redirect location https://$sitename
""").substitute(port=insecure_redirect_port, sitename=sitename)
else:
insecure_redirect=''
conf = Template(open(conf_file).read()).substitute(
accept_proxy_port = accept_proxy_port,
insecure_testing_bind = 'bind *:%s'%insecure_testing_port if insecure_testing_port else '',
nginx_servers = nginx_servers,
hub_servers = hub_servers,
proxy_servers = proxy_servers,
insecure_redirect = insecure_redirect,
base_url = base_url
)
haproxy_conf = 'haproxy-%s.conf'%id
target_conf = os.path.join(DATA, haproxy_conf)
writefile(filename=target_conf, content=conf)
Process.__init__(self, id, name='haproxy', port=accept_proxy_port,
pidfile = pidfile,
logfile = logfile, monitor_database = monitor_database,
start_using_system = True,
start_cmd = ['HAPROXY_LOGFILE='+logfile, os.path.join(BIN, 'haproxy'), '-D', '-f', target_conf, '-p', pidfile])
def _parse_pidfile(self, contents):
return int(contents.splitlines()[0])
####################
# Hub
####################
class Hub(Process):
def __init__(self,
id = 0,
host = '',
port = HUB_PORT,
proxy_port = HUB_PROXY_PORT,
monitor_database = None,
keyspace = 'salvus',
debug = False,
logfile = None,
pidfile = None,
base_url = None,
local = False):
self._port = port
if pidfile is None:
pidfile = os.path.join(PIDS, 'hub-%s.pid'%id)
if logfile is None:
logfile = os.path.join(LOGS, 'hub-%s.log'%id)
extra = []
if debug:
extra.append('-g')
if base_url:
extra.append('--base_url')
extra.append(base_url)
if local:
extra.append('--local')
Process.__init__(self, id, name='hub', port=port,
pidfile = pidfile,
logfile = logfile, monitor_database=monitor_database,
start_cmd = [os.path.join(PWD, 'hub'), 'start',
'--id', id,
'--port', port,
'--proxy_port', proxy_port,
'--keyspace', keyspace,
'--host', host,
'--database_nodes', monitor_database,
'--pidfile', pidfile,
'--logfile', logfile] + extra,
stop_cmd = [os.path.join(PWD, 'hub'), 'stop', '--id', id],
reload_cmd = [os.path.join(PWD, 'hub'), 'restart', '--id', id])
def __repr__(self):
return "Hub server %s on port %s"%(self.id(), self._port)
########################################
# Rethinkdb database daemon
########################################
class Rethinkdb(Process):
def __init__(self, path = None, id=None, http_port=9000, monitor_database=None, **kwds):
"""
id -- arbitrary identifier
path -- path to where the rethinkdb files are stored.
"""
path = os.path.join(DATA, 'rethinkdb-%s'%id) if path is None else path
makedirs(path)
logs_path = os.path.join(path, 'logs'); makedirs(logs_path)
pidfile = os.path.abspath(os.path.join(PIDS, 'rethinkdb-%s.pid'%id))
log_link_path = os.path.join(os.environ['HOME'], 'logs')
makedirs(log_link_path)
log_link = os.path.join(log_link_path, 'rethinkdb.log')
if not os.path.exists(log_link):
os.symlink(os.path.abspath(os.path.join(path, 'log_file')), log_link)
Process.__init__(self, id=id, name='rethinkdb', port=9160,
logfile = '%s/system.log'%logs_path,
pidfile = pidfile,
start_cmd = ['rethinkdb', '--directory', path, '--http-port', http_port,
'--pid-file', pidfile, '--daemon'],
monitor_database=monitor_database)
########################################
# Compute server daemon
########################################
class Compute(Process):
def __init__(self, path = '/projects/conf', port=0, id=None, **kwds):
"""
id -- arbitrary identifier
path -- path to where the rethinkdb files are stored.
"""
logs_path = os.path.join(path, 'logs'); makedirs(logs_path)
pidfile = os.path.abspath(os.path.join(path, 'compute.pid'))
logfile =os.path.abspath(os.path.join(path, 'compute.log'))
log_link_path = os.path.join(os.environ['HOME'], 'logs')
makedirs(log_link_path)
log_link = os.path.join(log_link_path, 'compute.log')
if not os.path.exists(log_link):
os.symlink(logfile, log_link)
Process.__init__(self, id=id,
name='compute',
port=port,
logfile = logfile,
pidfile = pidfile,
stop_cmd = ['compute', 'stop'],
start_cmd = ['compute', 'start',
'--port', port,
'--pidfile', pidfile,
'--logfile', logfile])
##############################################
# A Virtual Machine running at UW
##############################################
class Vm(Process):
def __init__(self, ip_address, hostname=None, vcpus=2, ram=4, vnc=0, disk='', base='salvus', id=0, monitor_database=None, name='virtual_machine', fstab=''):
"""
INPUT:
- ip_address -- ip_address machine gets on the VPN
- hostname -- hostname to set on the machine itself (if
not given, sets to something based on the ip address)
- vcpus -- number of cpus
- ram -- number of gigabytes of ram (an integer)
- vnc -- port of vnc console (default: 0 for no vnc)
- disk -- string 'name1:size1,name2:size2,...' with size in gigabytes
- base -- string (default: 'salvus'); name of base vm image
- id -- optional, defaulta:0 (basically ignored)
- monitor_database -- default: None
- name -- default: "virtual_machine"
"""
self._ip_address = ip_address
self._hostname = hostname
self._vcpus = vcpus
self._ram = ram
self._vnc = vnc
self._base = base
self._disk = disk
self._fstab = fstab.strip()
pidfile = os.path.join(PIDS, 'vm-%s.pid'%ip_address)
logfile = os.path.join(LOGS, 'vm-%s.log'%ip_address)
start_cmd = [PYTHON, 'vm.py', '-d', '--ip_address', ip_address,
'--pidfile', pidfile, '--logfile', logfile,
'--vcpus', vcpus, '--ram', ram,
'--vnc', vnc,
'--base', base] + \
(['--fstab', fstab] if self._fstab else []) + \
(['--disk', disk] if self._disk else []) + \
(['--hostname', self._hostname] if self._hostname else [])
stop_cmd = [PYTHON, 'vm.py', '--stop', '--logfile', logfile, '--ip_address', ip_address] + (['--hostname', self._hostname] if self._hostname else [])
Process.__init__(self, id=id, name=name, port=0,
pidfile = pidfile, logfile = logfile,
start_cmd = start_cmd,
stop_cmd = stop_cmd,
monitor_database=monitor_database,
term_signal = 2 # must use 2 (=SIGINT) instead of 15 or 9 for proper cleanup!
)
def stop(self):
Process.stop(self, force=True)
##############################################
# A Virtual Machine instance running on Google Compute Engine
##############################################
class Vmgce(Process):
def __init__(self, ip_address='', hostname='', instance_type='n1-standard-1', base='', disk='', zone='', id=0, monitor_database=None, name='gce_virtual_machine'):
"""
INPUT:
- ip_address -- ip_address machine gets on the VPN
- hostname -- hostname to set on the machine itself
- instance_type -- see https://cloud.google.com/products/compute-engine/#pricing
- disk -- string 'name1:size1,name2:size2,...' with size in gigabytes
- base -- string (default: use newest); name of base snapshot
- id -- optional, defaulta:0 (basically ignored)
- monitor_database -- default: None
- name -- default: "gce_virtual_machine"
"""
if not ip_address:
raise RuntimeError("you must specify the ip_address")
if not hostname:
raise RuntimeError("you must specify the hostname")
self._ip_address = ip_address
self._hostname = hostname
self._instance_type = instance_type
self._base = base
self._disk = disk
self._zone = zone
pidfile = os.path.join(PIDS, 'vm_gce-%s.pid'%ip_address)
logfile = os.path.join(LOGS, 'vm_gce-%s.log'%ip_address)
start_cmd = ([PYTHON, 'vm_gce.py',
'--daemon', '--pidfile', pidfile, '--logfile', logfile] +
(['--zone', zone] if zone else []) +
['start',
'--ip_address', ip_address,
'--type', instance_type] +
(['--base', base] if base else []) +
(['--disk', disk] if disk else []) + [self._hostname])
stop_cmd = [PYTHON, 'vm_gce.py'] + (['--zone', zone] if zone else []) + ['stop', self._hostname]
Process.__init__(self, id=id, name=name, port=0,
pidfile = pidfile, logfile = logfile,
start_cmd = start_cmd,
stop_cmd = stop_cmd,
monitor_database=monitor_database,
term_signal = 2 # must use 2 (=SIGINT) instead of 15 or 9 for proper cleanup!
)
def stop(self):
Process.stop(self, force=True)
#################################
# Classical Sage Notebook Server
#################################
class Sagenb(Process):
def __init__(self, address, path, port, pool_size=16, monitor_database=None, debug=True, id=0):
self._address = address # to listen on
self._path = path
self._port = port
pidfile = os.path.join(PIDS, 'sagenb-%s.pid'%port)
logfile = os.path.join(LOGS, 'sagenb-%s.log'%port)
Process.__init__(self, id, name='sage', port=port,
pidfile = pidfile,
logfile = logfile,
monitor_database = monitor_database,
start_cmd = [PYTHON, 'sagenb_server.py', '--daemon',
'--path', path,
'--port', port,
'--address', address,
'--pool_size', pool_size,
'--pidfile', pidfile,
'--logfile', logfile]
)
########################################
# tinc VPN management
########################################
def ping(hostname, count=3, timeout=2):
"""
Try to ping hostname count times, timing out if we do not
finishing after timeout seconds.
Return False if the ping fails. If the ping succeeds, return
(min, average, max) ping times in milliseconds.
"""
p = subprocess.Popen(['ping', '-t', str(timeout), '-c', str(count), hostname],
stdin=subprocess.PIPE, stdout = subprocess.PIPE,
stderr=subprocess.PIPE)
if p.wait() == 0:
r = p.stdout.read()
i = r.rfind('=')
v = [float(t) for t in r[i+1:].strip().split()[0].split('/')]
return v[0], v[1], v[2]
else:
return False # fail
def tinc_conf(ip_address):
"""
Configure tinc on this machine, so it can be part of the VPN.
-- ip_address -- address this machine gets on the vpn
"""
SALVUS = os.path.realpath(__file__)
os.chdir(os.path.split(SALVUS)[0])
# make sure the directories are there
TARGET = 'data/local/etc/tinc'
if os.path.exists(TARGET):
print "deleting '%s'"%TARGET
shutil.rmtree(TARGET)
for path in [TARGET, 'data/local/var/run']: # .../run used for pidfile
if not os.path.exists(path):
os.makedirs(path)
# create symbolic link to hosts directory in salvus git repo
os.symlink(os.path.join('../../../../conf/tinc_hosts'),
os.path.join(TARGET, 'hosts'))
# determine what our external ip address is
external_ip = misc.local_ip_address(dest='8.8.8.8')
# determine our hostname
hostname = socket.gethostname()
# Create the tinc-up script
tinc_up = os.path.join(TARGET, 'tinc-up')
open(tinc_up,'w').write(
"""#!/bin/sh
ifconfig $INTERFACE %s netmask 255.0.0.0
"""%ip_address)
os.chmod(tinc_up, stat.S_IRWXU)
# Create tinc.conf
tinc_conf = open(os.path.join(TARGET, 'tinc.conf'),'w')
tinc_conf.write('Name = %s\n'%hostname)
for h in os.listdir(os.path.join(TARGET, 'hosts')):
if "Address" in open(os.path.join(TARGET, 'hosts', h)).read():
tinc_conf.write('ConnectTo = %s\n'%h)
# on OS X, we need this, but otherwise we don't:
if os.uname()[0] == "Darwin":
tinc_conf.write('Device = /dev/tap0\n')
tinc_conf.close()
host_file = os.path.join(TARGET, 'hosts', hostname)
open(host_file,'w').write(
"""Address = %s
Subnet = %s/32"""%(external_ip, ip_address))
# generate keys
print sh['data/local/sbin/tincd', '-K']
# add file to git and checkin, then push to official repo
gitaddr = "[email protected]:williamstein/salvus.git"
print sh['git', 'pull', gitaddr]
print sh['git', 'add', os.path.join('conf/tinc_hosts', hostname)]
print sh['git', 'commit', '-a', '-m', 'tinc config for %s'%hostname]
print sh['git', 'push', gitaddr]
print "To join the vpn on startup,"
print "add this line to /etc/rc.local:\n"
print " nice --19 /home/salvus/salvus/salvus/data/local/sbin/tincd"
print "You *must* also pull the git repo on"
print "at least one of the ConnectTo machines to connect."
########################################
# Grouped collection of hosts
# See the files conf/hosts* for examples.
# The format is
# [group1]
# hostname1
# hostname2
# [group2]
# hostname3
# hostname1 # repeats allowed, comments allowed
########################################
def parse_groupfile(filename):
groups = {None:[]}
group = None
group_opts = []
ordered_group_names = []
namespace = {}
namespace['os'] = os
for r in open(filename).xreadlines():
line = r.split('#')[0].strip() # ignore comments and leading/trailing whitespace
if line: # ignore blank lines
if line.startswith('import ') or '=' in line:
# import modules for use in assignments below
print "exec ", line
exec line in namespace
continue
i = line.find(' ')
if i == -1:
opts = {}
name = line
else:
name = line[:i]
opts = eval(line[i+1:], namespace)
if name.startswith('['): # host group
group = name.strip(' []')
group_opts = opts
groups[group] = []
ordered_group_names.append(group)
else:
opts.update(group_opts)
groups[group].append((name, opts))
for k in sorted(namespace.keys()):
if not k.startswith('_') and k not in ['os']:
print "%-20s = %s"%(k, namespace[k])
return groups, ordered_group_names
def parse_hosts_file(filename):
ip = {} # ip = dictionary mapping from hostname to a list of ip addresses
hn = {} # hn = canonical hostnames for each ip address
for r in open(filename).readlines():
line = r.split('#')[0].strip() # ignore comments and leading/trailing whitespace
v = line.split()
if len(v) == 0: continue
if len(v) <= 1:
raise ValueError("parsing hosts file -- invalid line '%s'"%r)
address = v[0]
hostnames = v[1:]
hn[address] = hostnames[-1]
for h in hostnames:
if len(h) < 1 or len(h) > 63 or not (h.replace('-','').isalnum()):
raise RuntimeError("invalid hostname: must be at most 63 characters from a-z, 0-9, or -")
if h in ip:
ip[h].append(address)
else:
ip[h] = [address]
# make ip address lists canonical
ip = dict([(host, list(sorted(set(addresses)))) for host, addresses in ip.iteritems()])
return ip, hn
class Hosts(object):
"""
Defines a set of hosts on a network and provides convenient tools
for running commands on them using ssh.
"""
def __init__(self, hosts_file, username=whoami, passwd=True, password=None):
"""
- passwd -- if False, don't ask for a password; in this case nothing must require sudo to
run, and all logins must work using ssh with keys
"""
self._ssh = {}
self._username = username
self._password = password
self._passwd = passwd
self._ip_addresses, self._canonical_hostnames = parse_hosts_file(hosts_file)
def __getitem__(self, hostname):
"""
Return list of dinstinct ip_address matching the given hostname. If the hostname
is an ip address defined in the hosts file, return [hostname].
"""
v = hostname.split()
if len(v) > 1:
return list(sorted(set(sum([self[q] for q in v], []))))
if hostname in self._canonical_hostnames.keys(): # it is already a known ip address
return [hostname]
if hostname == 'all': # return all ip addresses
return list(sorted(self._canonical_hostnames.keys()))
if hostname in self._ip_addresses:
return self._ip_addresses[hostname]
raise ValueError("unknown ip hostname or address '%s'"%hostname)
def hostname(self, ip):
return self._canonical_hostnames[ip]
def is_valid_hostname(self, hostname):
return hostname in self._canonical_hostnames # ok, since is dictionary mapping hostnames to canonical ones
def password(self, retry=False):
if not self._passwd:
log.info("Explicitly skipping asking for password, due to passwd=False option.")
return self._password
if self._password is None or retry:
import getpass
self._password = getpass.getpass("%s's password: "%self._username)
return self._password
def ssh(self, hostname, timeout=10, keepalive=None, use_cache=True, username=None):
if username is None:
username = self._username
key = (hostname, username)
if use_cache and key in self._ssh:
return self._ssh[key]
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=hostname, username=username, password=self._password, timeout=timeout)
if keepalive:
ssh.get_transport().set_keepalive(keepalive)
self._ssh[key] = ssh
return ssh
def _do_map(self, callable, address, **kwds):
log.info('%s (%s):', address, self.hostname(address))
x = callable(address, **kwds)
log.info(x)
return x
def map(self, callable, hostname, parallel=True, **kwds):
# needed before parallel
self.password()
def f(address, **kwds):
return ((address, self.hostname(address)), self._do_map(callable, address, **kwds))
if parallel:
return misc.thread_map(f, [((address,), kwds) for address in self[hostname]])
else:
return [f(address, **kwds) for address in self[hostname]]
def ping(self, hostname='all', timeout=3, count=3, parallel=True):
"""
Return list of pairs ((ip, hostname), ping_time) of those that succeed at pinging
and a list of pairs ((ip, hostname), False) for those that do not.
"""
v = self.map(ping, hostname, timeout=timeout, count=count, parallel=parallel)
return [x for x in v if x[1] is not False], [x for x in v if x[1] is False]
def ip_addresses(self, hostname):
return [socket.gethostbyname(h) for h in self[hostname]]
def exec_command(self, hostname, command, sudo=False, timeout=90, wait=True, parallel=True, username=None, verbose=True):
def f(hostname):
try:
return self._exec_command(command, hostname, sudo=sudo, timeout=timeout, wait=wait, username=username, verbose=verbose)
except Exception, msg:
return {'stdout':'', 'stderr':'Error connecting -- %s: %s'%(hostname, msg)}
return dict(self.map(f, hostname=hostname, parallel=parallel))
def __call__(self, *args, **kwds):
"""
>>> self(hostname, command)
"""
result = self.exec_command(*args, **kwds)
if kwds.get('verbose',True):
for h,v in result.iteritems():
print '%s :'%(h,),
print v.get('stdout',''),
print v.get('stderr',''),
print
return result
def _exec_command(self, command, hostname, sudo, timeout, wait, username=None, verbose=True):
if not self._passwd:
# never use sudo if self._passwd is false...
sudo = False
start = time.time()
ssh = self.ssh(hostname, username=username, timeout=timeout)
try:
chan = ssh.get_transport().open_session()
except:
# try again in case if remote machine got rebooted or something...
chan = self.ssh(hostname, username=username, timeout=timeout, use_cache=False).get_transport().open_session()
stdin = chan.makefile('wb')
stdout = chan.makefile('rb')
stderr = chan.makefile_stderr('rb')
cmd = ('sudo -S bash -c "%s"' % command.replace('"', '\\"')) if sudo else command
log.info("hostname=%s, command='%s'", hostname, cmd)
chan.exec_command(cmd)
if sudo and not stdin.channel.closed:
try:
print "sending sudo password..."
stdin.write('%s\n' % self.password()); stdin.flush()
except:
pass # could have closed in the meantime if password cached
if not wait:
return {'stdout':None, 'stderr':None, 'exit_status':None, 'note':"wait=False: '%s'"%cmd}
while not stdout.channel.closed:
time.sleep(0.05)
if time.time() - start >= timeout:
raise RuntimeError("on %s@%s command '%s' timed out"%(self._username, hostname, command))
return {'stdout':stdout.read(), 'stderr':stderr.read(), 'exit_status':chan.recv_exit_status()}
def public_ssh_keys(self, hostname, timeout=5):
return '\n'.join([x['stdout'] for x in self.exec_command(hostname, 'cat .ssh/id_rsa.pub', timeout=timeout).values()])
def git_pull(self, hostname, repo=GIT_REPO, timeout=60):
return self(hostname, 'cd salvus && git pull %s'%repo, timeout=timeout)
def build(self, hostname, pkg_name, timeout=250):
return self(hostname, 'cd $HOME/salvus/salvus && . ./salvus-env && ./build.py --build_%s'%pkg_name, timeout=timeout)
def python_c(self, hostname, cmd, timeout=60, sudo=False, wait=True):
command = 'cd \"$HOME/salvus/salvus\" && . ./salvus-env && python -c "%s"'%cmd
log.info("python_c: %s", command)
return self(hostname, command, sudo=sudo, timeout=timeout, wait=wait)
def apt_upgrade(self, hostname):
# some nodes (e.g., sage nodes) have a firewall that disables upgrading via apt,
# so we temporarily disable it.
try:
return self(hostname,'ufw --force disable && apt-get update && apt-get -y upgrade', sudo=True, timeout=120)
# very important to re-enable the firewall, no matter what!
finally:
self(hostname,'ufw --force enable', sudo=True, timeout=120)
def apt_install(self, hostname, pkg):
# EXAMPLE: hosts.apt_install('cassandra', 'openjdk-7-jre')
try:
return self(hostname, 'ufw --force disable && apt-get -y --force-yes install %s'%pkg, sudo=True, timeout=120)
finally:
self(hostname,'ufw --force enable', sudo=True, timeout=120)
def reboot(self, hostname):
return self(hostname, 'reboot -h now', sudo=True, timeout=5)
def ufw(self, hostname, commands):
if self[hostname] == ['127.0.0.1']:
print "Not enabling firewall on 127.0.0.1"
return
cmd = ' && '.join(['/home/salvus/salvus/salvus/scripts/ufw_clear'] + ['ufw disable'] +
['ufw default allow incoming'] + ['ufw default allow outgoing'] + ['ufw --force reset']
+ ['ufw ' + c for c in commands] +
(['ufw --force enable'] if commands else []))
return self(hostname, cmd, sudo=True, timeout=10, wait=False)
def nodetool(self, args='', hostname='cassandra', wait=False, timeout=120, parallel=False):
for k, v in self(hostname, 'salvus/salvus/data/local/cassandra/bin/nodetool %s'%args, timeout=timeout, wait=wait, parallel=parallel).iteritems():
print k
print v.get('stdout','')
def nodetool_repair(self):
# timeout is long since each repair can take quite a while; also, we wait, since we're supposed to do one at a time.
self.nodetool('repair', wait=True, timeout=36*60*60)
def nodetool_snapshot(self):
# we are supposed to do snapshots all at once.
self.nodetool('snapshot', wait=False, timeout=120, parallel=True)
#########################################################
# SFTP support
#########################################################
def put(self, hostname, local_filename, remote_filename=None, timeout=5):
if remote_filename is None:
remote_filename = local_filename
for hostname in self[hostname]:
sftp = self.ssh(hostname, timeout=timeout).open_sftp()
log.info('put: %s --> %s:%s', local_filename, hostname, remote_filename)
sftp.put(local_filename, remote_filename)
def putdir(self, hostname, local_path, remote_containing_path='.', timeout=5):
# recursively copy over the local_path directory tree so that it is contained
# in remote_containing_path on the target
for hostname in self[hostname]:
sftp = self.ssh(hostname, timeout=timeout).open_sftp()
self._mkdir(sftp, remote_containing_path)
for dirpath, dirnames, filenames in os.walk(local_path):
print dirpath, dirnames, filenames
self._mkdir(sftp, os.path.join(remote_containing_path, dirpath))
for name in filenames:
local = os.path.join(dirpath, name)
remote = os.path.join(remote_containing_path, dirpath, name)
log.info('put: %s --> %s:%s', local, hostname, remote)
sftp.put(local, remote)
def get(self, hostname, remote_filename, local_filename=None, timeout=5):
if local_filename is None:
local_filename = remote_filename
ssh = self.ssh(hostname, timeout=timeout)
sftp = ssh.open_sftp()
sftp.get(remote_filename, local_filename)
# If I want to implement recursive get of directory: http://stackoverflow.com/questions/6674862/recursive-directory-download-with-paramiko
def rmdir(self, hostname, path, timeout=10):
# this is a very dangerous function!
self(hostname, 'rm -rf "%s"'%path, timeout=timeout)
def _mkdir(self, sftp, path, mode=0o40700):
try:
sftp.mkdir(path, mode)
except IOError:
from stat import S_ISDIR
if not S_ISDIR(sftp.stat(path).st_mode):
raise IOError("remote '%s' (on %s) exists and is not a path"%(path, hostname))
def mkdir(self, hostname, path, timeout=10, mode=0o40700): # default mode is restrictive=user only, on general principle.
for hostname in self[hostname]:
ssh = self.ssh(hostname, timeout=timeout)
sftp = ssh.open_sftp()
self._mkdir(sftp, path, mode)
def unlink(self, hostname, filename, timeout=10):
for hostname in self[hostname]:
ssh = self.ssh(hostname, timeout=timeout)
sftp = ssh.open_sftp()
try:
sftp.remove(filename)
except:
pass # file doesn't exist
class Monitor(object):
def __init__(self, hosts, services):
self._hosts = hosts
self._services = services # used for self-healing
def compute(self):
ans = []
c = 'nproc && uptime && free -g && ps -C node -o args=|grep "local_hub.js run" |wc -l && cd salvus/salvus; . salvus-env'
for k, v in self._hosts('compute', c, wait=True, parallel=True, timeout=120).iteritems():
d = {'host':k[0], 'service':'compute'}
stdout = v.get('stdout','')
m = stdout.splitlines()
if v.get('exit_status',1) != 0 or len(m) < 7:
d['status'] = 'down'
else:
d['status'] = 'up'
d['nproc'] = int(m[0])
z = m[1].replace(',','').split()
d['load1'] = float(z[-3]) / d['nproc']
d['load5'] = float(z[-2]) / d['nproc']
d['load15'] = float(z[-1]) / d['nproc']
z = m[3].split()
d['ram_used_GB'] = int(z[2])
d['ram_free_GB'] = int(z[3])
d['nprojects'] = int(m[6])
ans.append(d)
w = [(-d.get('load15',0), d) for d in ans]
w.sort()
return [y for x,y in w]
def database(self):
ans = []
c = 'pidof rethinkdb'
for k, v in self._hosts('database', c, wait=True, parallel=True, timeout=120).iteritems():
d = {'host':k[0], 'service':'database'}
if v.get('exit_status',1) != 0 :
d['status'] = 'down'
else:
d['status'] = 'up'
ans.append(d)
return ans
def hub(self):
ans = []
cmd = 'export TERM=vt100; cd salvus/salvus&& . salvus-env && check_hub && check_hub_block |tail -1'
for k, v in self._hosts('hub', cmd, wait=True, parallel=True, timeout=60).iteritems():
d = {'host':k[0], 'service':'hub'}
if v['exit_status'] != 0 or v['stderr']:
d['status'] = 'down'
continue
for x in v['stdout'].splitlines()[:5]:
i = x.find(' ')
if i != -1:
d[x[:i]] = x[i:].strip()
if 'sign_in_timeouts' in d:
d['sign_in_timeouts'] = int(d['sign_in_timeouts'])
if 'db_errors' in d:
d['db_errors'] = int(d['db_errors'])
if 'concurrent_warn' in d:
d['concurrent_warn'] = int(d['concurrent_warn'])
d['status'] = 'up'
if d['etime'] == 'ELAPSED':
d['status'] = 'down'
if d['sign_in_timeouts'] > 4:
d['status'] = 'down' # demands attention!
if d['db_errors'] > 0:
d['status'] = 'down' # demands attention!
if d['concurrent_warn'] > 0:
d['status'] = 'down' # demands attention!
try:
d['block'] = int(v['stdout'].splitlines()[3].split()[-1].rstrip('ms'))
except: pass
ans.append(d)
def f(x,y):
if x['status'] == 'down':
return -1
if y['status'] == 'down':
return 1
if 'loadavg' in x and 'loadavg' in y:
return -cmp(float(x['loadavg'].split()[0]), float(y['loadavg'].split()[0]))
return -1
ans.sort(f)
return ans
def load(self):
"""
Return normalized load on *everything*, sorted by highest current load first.
"""
ans = []
for k, v in self._hosts('all', 'nproc && uptime', parallel=True, wait=True, timeout=80).iteritems():
d = {'host':k[0]}
m = v.get('stdout','').splitlines()
if v.get('exit_status',1) != 0 or len(m) < 2:
d['status'] = 'down'
else:
d['status'] = 'up'
d['nproc'] = int(m[0])
z = m[1].replace(',','').split()
d['load1'] = float(z[-3])/d['nproc']
d['load5'] = float(z[-2])/d['nproc']
d['load15'] = float(z[-1])/d['nproc']
ans.append(d)
w = [(-d['load15'], d) for d in ans]
w.sort()
return [y for x,y in w]
def pingall(self, hosts='all', on=None):
v = []
for x in hosts.split():
try:
v += self._hosts[x]
except ValueError:
v.append(x)
c = 'pingall ' + ' '.join(v)
if on is not None:
c = 'ssh %s "cd salvus/salvus && . salvus-env && %s"'%(on, c)
print c
s = os.popen(c).read()
print s
return json.loads(s)
def disk_usage(self, hosts='all', disk_threshold=96):
"""
Verify that no disk is more than disk_threshold (=disk_threshold%).
"""
cmd = "df --output=pcent,source |grep -v fuse | sort -n|tail -1"
ans = []
for k, v in self._hosts(hosts, cmd, parallel=True, wait=True, timeout=30).iteritems():
d = {'host':k[0], 'service':'disk_usage'}
percent = int((v.get('stdout','100') + ' 0').split()[0].strip().strip('%'))
d['percent'] = percent
if percent > disk_threshold:
d['status'] = 'down'
print k,v
else:
d['status'] = 'up'
ans.append(d)
w = [((-d['percent'],d['host']),d) for d in ans]
w.sort()
return [y for x,y in w]
def dns(self, hosts='all', rounds=1):
"""
Verify that DNS is working well on all machines.
"""
cmd = '&&'.join(["host -v google.com > /dev/null"]*rounds) + "; echo $?"
ans = []
exclude = set([]) # set(self._hosts['cellserver']) # + self._hosts['webdev'])
h = ' '.join([host for host in self._hosts[hosts] if host not in exclude])
if not h:
return []
for k, v in self._hosts(h, cmd, parallel=True, wait=True, timeout=30).iteritems():
d = {'host':k[0], 'service':'dns'}
exit_code = v.get('stdout','').strip()
if exit_code == '':
exit_code = '1'
if exit_code=='1' or v.get('exit_status',1) != 0:
d['status'] = 'down'
print k,v
else:
d['status'] = 'up'
ans.append(d)
w = [((d.get('status','down'),d['host']),d) for d in ans]
w.sort()
return [y for x,y in w]
def stats(self, timeout=90):
"""
Get all ip addresses that SITENAME resolves to, then verify that https://ip_address/stats returns
valid data, for each ip. This tests that all stunnel and haproxy servers are running.
"""
ans = []
import urllib2, ssl
ctx = ssl.create_default_context() # see http://stackoverflow.com/questions/19268548/python-ignore-certicate-validation-urllib2
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
try:
for ip_address in dns(SITENAME, timeout):
entry = {'host':ip_address, 'service':'stats'}
ans.append(entry)
try:
# site must return and be valid json
json.loads(urllib2.urlopen('https://%s/stats'%ip_address, timeout=timeout, context=ctx).read())
entry['status'] = 'up'
except: # urllib2.URLError: # there are other possible errors
entry['status'] = 'down'
except (RuntimeError, ValueError):
ans = [{'host':SITENAME, 'service':'stats', 'status':'down'}]
w = [(d.get('status','down'),d) for d in ans]
w.sort()
return [y for x,y in w]
def all(self):
return {
'timestamp' : time.time(),
'disk_usage' : self.disk_usage(),
'dns' : self.dns(),
'load' : self.load(),
'hub' : self.hub(),
'stats' : self.stats(),
'compute' : self.compute(),
'database' : self.database()
}
def down(self, all):
# Make a list of down services
down = []
for service, v in all.iteritems():
if isinstance(v, list):
for x in v:
if x.get('status','') == 'down':
down.append(x)
return down
def print_status(self, all=None, n=9):
if all is None:
all = self.all( )
print "TIME: " + time.strftime("%Y-%m-%d %H:%M:%S")
#print "DNS"
#for x in all['dns'][:n]:
# print x
print "HUB"
for x in all['hub'][:n]:
print x
print "DATABASE"
for x in all['database'][:n]:
print x
print "DISK USAGE"
for x in all['disk_usage'][:n]:
print x
print "LOAD"
for x in all['load'][:n]:
print x
print "STATS"
for x in all['stats'][:n]:
print x
print "COMPUTE"
vcompute = all['compute']
print "%s projects running"%(sum([x.get('nprojects',0) for x in vcompute]))
for x in all['compute'][:n]:
print x
def _go(self):
all = self.all()
self.print_status(all=all)
down = self.down(all=all)
m = ''
if len(down) > 0:
m += "The following are down: %s"%down
for x in all['load']:
if x['load15'] > 400:
m += "A machine is going *crazy* with load!: %s"%x
#for x in all['zfs']:
# if x['nproc'] > 10000:
# m += "Large amount of ZFS: %s"%x
if m:
try:
email(m, subject="SMC issue")
except Exception, msg:
print "Failed to send email! -- %s\n%s"%(msg, m)
def go(self, interval=5, residue=0):
"""
Run a full monitor scan when the current time in *minutes* since the epoch
is congruent to residue modulo interval.
"""
self._services._hosts.password() # ensure known for self-healing
import time
last_time = 0
i = 0
while True:
now = int(time.time()/60) # minutes since epoch
if now != last_time:
#print "%s minutes since epoch"%now
if now % interval == residue:
last_time = now
try:
self._go()
except:
print sys.exc_info()[:2]
print "ERROR"
try:
self._go()
except:
print sys.exc_info()[:2]
print "ERROR"
time.sleep(20)
class Services(object):
def __init__(self, path, username=whoami, keyspace='salvus', passwd=True, password=""):
"""
- passwd -- if False, don't ask for a password; in this case nothing must require sudo to
run, and all logins must work using ssh with keys
"""
self._keyspace = keyspace
self._path = path
self._username = username
self._hosts = Hosts(os.path.join(path, 'hosts'), username=username, passwd=passwd, password=password)
self._services, self._ordered_service_names = parse_groupfile(os.path.join(path, 'services'))
del self._services[None]
self.monitor = Monitor(Hosts(os.path.join(path, 'hosts'), username=username, passwd=False), services = self)
# this is the canonical list of options, expanded out by service and host.
def hostopts(service, query='all', copy=True):
"""Return list of pairs (hostname, options) defined in the services file, where
the hostname matches the given hostname/group"""
restrict = set(self._hosts[query])
return sum([[(h, dict(opts) if copy else opts) for h in self._hosts[query] if h in restrict]
for query, opts in self._services[service]], [])
self._options = dict([(service, hostopts(service)) for service in self._ordered_service_names])
def _all(self, callable, reverse=False):
names = self._ordered_service_names
return dict([(s, callable(s)) for s in (reversed(names) if reverse else names)])
def start(self, service, host='all', wait=True, parallel=False, **opts):
if service == 'all':
return self._all(lambda x: self.start(x, host=host, wait=wait, **opts), reverse=False)
return self._action(service, 'start', host, opts, wait=wait, parallel=parallel)
def stop(self, service, host='all', wait=True, parallel=False, **opts):
if service == 'all':
return self._all(lambda x: self.stop(x, host=host, wait=wait, **opts), reverse=True)
return self._action(service, 'stop', host, opts, wait, parallel=parallel)
def status(self, service, host='all', wait=True, parallel=False, **opts):
if service == 'all':
return self._all(lambda x: self.status(x, host=host, wait=True, **opts), reverse=False)
return self._action(service, 'status', host, opts, wait=True, parallel=parallel)
def restart(self, service, host='all', wait=True, reverse=True, parallel=False, **opts):
if service == 'all':
return self._all(lambda x: self.restart(x, host=host, reverse=reverse, wait=wait, **opts), reverse=reverse)
return self._action(service, 'restart', host, opts, wait, parallel=parallel)
def wait_until_up(self, host='all'):
while True:
v = self._hosts.ping(host)[1]
if not v: return
log.info("Waiting for %s"%(v,))
def _action(self, service, action, host, opts, wait, parallel):
if service not in self._services:
raise ValueError("unknown service '%s'"%service)
name = service.capitalize()
if name=='Compute' or not hasattr(self, '_cassandra'):
def db_string(address):
return ""
else:
def db_string(address):
dc = self.ip_address_to_dc(self._hosts[address][0])
if dc == -1:
return "monitor_database=''"
else:
return "monitor_database='%s'"%(','.join(self.cassandras_in_dc(dc)))
v = self._hostopts(service, host, opts)
self._hosts.password() # can't get password in thread
w = [((name, action, address, options, db_string(address), wait),{}) for address, options in v]
if parallel:
return misc.thread_map(self._do_action, w)
else:
return [self._do_action(*args, **kwds) for args, kwds in w]
def _hostopts(self, service, hostname, opts):
"""
Return copy of pairs (hostname, options_dict) for the given
service, restricted by the given hostname.
"""
hosts = set(self._hosts[hostname])
opts1 = set(opts.iteritems())
return [(h,dict(o)) for h,o in self._options[service] if h in hosts and opts1.issubset(set([(x,y) for x, y in o.iteritems() if x in opts]))]
def _do_action(self, name, action, address, options, db_string, wait):
if 'sudo' in options:
sudo = True
del options['sudo']
else:
sudo = False
if 'timeout' in options:
timeout = options['timeout']
del options['timeout']
else:
timeout = 60
for t in ['hub', 'nginx', 'proxy']:
s = '%s_servers'%t
if s in options:
# restrict to the subset of servers in the same data center
dc = self.ip_address_to_dc(address)
options[s] = [dict(x) for x in options[s] if self.ip_address_to_dc(x['ip']) == dc]
# turn the ip's into hostnames
for x in options[s]:
x['ip'] = self._hosts.hostname(x['ip'])
if 'id' not in options:
options['id'] = 0
if 'monitor_database' in options:
db_string = ''
elif db_string.strip():
db_string = db_string + ', '
cmd = "import admin; print admin.%s(%s**%r).%s()"%(name, db_string, options, action)
if name == "Cassandra":
self.cassandra_firewall(address, action)
ret = self._hosts.python_c(address, cmd, sudo=sudo, timeout=timeout, wait=wait)
if name == "Compute":
log.info("Recording compute server in database")
# TODO...
return (address, self._hosts.hostname(address), options, ret)<|fim▁end|> | self._pids = {}
self._logfile = logfile
self._monitor_database = monitor_database |
<|file_name|>map.py<|end_file_name|><|fim▁begin|>from flask import Response
from flask.views import View
from bson import json_util
from mcp import mongo
class Map(View):
def dispatch_request(self, komuna, viti):
json = mongo.db.procurements.aggregate([
{
"$match": {
"komuna.slug": komuna,
"viti": viti,
"kompania.selia.slug": {'$ne': ''}
}
},
{
"$group": {
"_id": {
"selia": "$kompania.selia.slug",
"emri": "$kompania.selia.emri",
"gjeresi": "$kompania.selia.kordinatat.gjeresi",
"gjatesi": "$kompania.selia.kordinatat.gjatesi",
},
"cmimi": {
"$sum": "$kontrata.qmimi"
},
"vlera": {
"$sum": "$kontrata.vlera"
},
"numriKontratave": {
"$sum": 1
}
}
},
{
"$sort": {
"_id.selia": 1
}
},
{
"$project": {
"selia": "$_id.selia",
"emri": "$_id.emri",
"gjeresia": "$_id.gjeresi",
"gjatesia": "$_id.gjatesi",
"cmimi": "$cmimi",
"vlera": "$vlera",
"numriKontratave": "$numriKontratave",
"_id": 0
}
}
])
json_min_max = mongo.db.procurements.aggregate([
{
"$match": {
"komuna.slug": komuna,
"viti": viti,
"kompania.selia.slug": {'$ne': ''}
}
},
{
"$group": {
"_id": {
"selia": "$kompania.selia.slug",
"gjeresi": "$kompania.selia.kordinatat.gjeresi",
"gjatesi": "$kompania.selia.kordinatat.gjatesi",
},
"sumCmimi": {
"$sum": "$kontrata.qmimi"
},
"sumVlera": {
"$sum": "$kontrata.vlera"
},
"sumNumriKontratave": {
"$sum": 1
}
}
},
{
"$group": {
"_id": {},
"maxCmimi": {
"$max": "$sumCmimi"
},
"maxVlera": {
"$max": "$sumVlera"
},
"maxNumriKontratave": {
"$max": "$sumNumriKontratave"
},
"minCmimi": {
"$min": "$sumCmimi"
},
"minVlera": {
"$min": "$sumVlera"
},
"minNumriKontratave": {
"$min": "$sumNumriKontratave"
},
}
},
{
"$project": {
"_id": 0,
"vlera": {
"min": "$minVlera",
"max": "$maxVlera",
},
"cmimi": {
"min": "$minCmimi",
"max": "$maxCmimi",
},
"numriKontratave": {
"min": "$minNumriKontratave",
"max": "$maxNumriKontratave",
}
}
}
])
#pergjigjen e kthyer dhe te konvertuar ne JSON ne baze te json_util.dumps() e ruajme ne resp
<|fim▁hole|> result_json['bounds'] = json_min_max['result'][0]
result_json['result'] = json['result']
resp = Response(
response=json_util.dumps(result_json),
mimetype='application/json')
return resp<|fim▁end|> | result_json = {}; |
<|file_name|>pipeline_fragment.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module to build pipeline fragment that produces given PCollections.
For internal use only; no backwards-compatibility guarantees.
"""
from __future__ import absolute_import
import apache_beam as beam
from apache_beam.pipeline import PipelineVisitor
from apache_beam.testing.test_stream import TestStream
class PipelineFragment(object):
"""A fragment of a pipeline definition.
A pipeline fragment is built from the original pipeline definition to include
only PTransforms that are necessary to produce the given PCollections.
"""
def __init__(self, pcolls, options=None):
"""Constructor of PipelineFragment.
Args:
pcolls: (List[PCollection]) a list of PCollections to build pipeline
fragment for.
options: (PipelineOptions) the pipeline options for the implicit
pipeline run.
"""
assert len(pcolls) > 0, (
'Need at least 1 PCollection as the target data to build a pipeline '
'fragment that produces it.')
for pcoll in pcolls:
assert isinstance(pcoll, beam.pvalue.PCollection), (
'{} is not an apache_beam.pvalue.PCollection.'.format(pcoll))
# No modification to self._user_pipeline is allowed.
self._user_pipeline = pcolls[0].pipeline
# These are user PCollections. Do not use them to deduce anything that
# will be executed by any runner. Instead, use
# `self._runner_pcolls_to_user_pcolls.keys()` to get copied PCollections.
self._pcolls = set(pcolls)
for pcoll in self._pcolls:
assert pcoll.pipeline is self._user_pipeline, (
'{} belongs to a different user pipeline than other PCollections '
'given and cannot be used to build a pipeline fragment that produces '
'the given PCollections.'.format(pcoll))
self._options = options
# A copied pipeline instance for modification without changing the user
# pipeline instance held by the end user. This instance can be processed
# into a pipeline fragment that later run by the underlying runner.
self._runner_pipeline = self._build_runner_pipeline()
_, self._context = self._runner_pipeline.to_runner_api(
return_context=True, use_fake_coders=True)
from apache_beam.runners.interactive import pipeline_instrument as instr
self._runner_pcoll_to_id = instr.pcolls_to_pcoll_id(
self._runner_pipeline, self._context)
# Correlate components in the runner pipeline to components in the user
# pipeline. The target pcolls are the pcolls given and defined in the user
# pipeline.
self._id_to_target_pcoll = self._calculate_target_pcoll_ids()
self._label_to_user_transform = self._calculate_user_transform_labels()
# Below will give us the 1:1 correlation between
# PCollections/AppliedPTransforms from the copied runner pipeline and
# PCollections/AppliedPTransforms from the user pipeline.
# (Dict[PCollection, PCollection])
(
self._runner_pcolls_to_user_pcolls,
# (Dict[AppliedPTransform, AppliedPTransform])
self._runner_transforms_to_user_transforms
) = self._build_correlation_between_pipelines(
self._runner_pcoll_to_id,
self._id_to_target_pcoll,
self._label_to_user_transform)
# Below are operated on the runner pipeline.
(self._necessary_transforms,
self._necessary_pcollections) = self._mark_necessary_transforms_and_pcolls(
self._runner_pcolls_to_user_pcolls)
self._runner_pipeline = self._prune_runner_pipeline_to_fragment(
self._runner_pipeline, self._necessary_transforms)
def deduce_fragment(self):
"""Deduce the pipeline fragment as an apache_beam.Pipeline instance."""
return beam.pipeline.Pipeline.from_runner_api(
self._runner_pipeline.to_runner_api(use_fake_coders=True),
self._runner_pipeline.runner,
self._options)
def run(self, display_pipeline_graph=False, use_cache=True, blocking=False):
"""Shorthand to run the pipeline fragment."""
try:
preserved_skip_display = self._runner_pipeline.runner._skip_display
preserved_force_compute = self._runner_pipeline.runner._force_compute
preserved_blocking = self._runner_pipeline.runner._blocking
self._runner_pipeline.runner._skip_display = not display_pipeline_graph
self._runner_pipeline.runner._force_compute = not use_cache
self._runner_pipeline.runner._blocking = blocking
return self.deduce_fragment().run()
finally:
self._runner_pipeline.runner._skip_display = preserved_skip_display
self._runner_pipeline.runner._force_compute = preserved_force_compute
self._runner_pipeline.runner._blocking = preserved_blocking
def _build_runner_pipeline(self):
return beam.pipeline.Pipeline.from_runner_api(
self._user_pipeline.to_runner_api(use_fake_coders=True),
self._user_pipeline.runner,
self._options)
def _calculate_target_pcoll_ids(self):
pcoll_id_to_target_pcoll = {}
for pcoll in self._pcolls:
pcoll_id_to_target_pcoll[self._runner_pcoll_to_id.get(str(pcoll),
'')] = pcoll
return pcoll_id_to_target_pcoll
def _calculate_user_transform_labels(self):
label_to_user_transform = {}
class UserTransformVisitor(PipelineVisitor):
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
if transform_node is not None:
label_to_user_transform[transform_node.full_label] = transform_node
v = UserTransformVisitor()
self._runner_pipeline.visit(v)
return label_to_user_transform
def _build_correlation_between_pipelines(
self, runner_pcoll_to_id, id_to_target_pcoll, label_to_user_transform):
runner_pcolls_to_user_pcolls = {}
runner_transforms_to_user_transforms = {}
class CorrelationVisitor(PipelineVisitor):
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
self._process_transform(transform_node)
for in_pcoll in transform_node.inputs:
self._process_pcoll(in_pcoll)
for out_pcoll in transform_node.outputs.values():
self._process_pcoll(out_pcoll)
def _process_pcoll(self, pcoll):
pcoll_id = runner_pcoll_to_id.get(str(pcoll), '')
if pcoll_id in id_to_target_pcoll:
runner_pcolls_to_user_pcolls[pcoll] = (id_to_target_pcoll[pcoll_id])
def _process_transform(self, transform_node):
if transform_node.full_label in label_to_user_transform:
runner_transforms_to_user_transforms[transform_node] = (
label_to_user_transform[transform_node.full_label])
v = CorrelationVisitor()
self._runner_pipeline.visit(v)
return runner_pcolls_to_user_pcolls, runner_transforms_to_user_transforms
def _mark_necessary_transforms_and_pcolls(self, runner_pcolls_to_user_pcolls):
necessary_transforms = set()
all_inputs = set()
updated_all_inputs = set(runner_pcolls_to_user_pcolls.keys())
# Do this until no more new PCollection is recorded.
while len(updated_all_inputs) != len(all_inputs):
all_inputs = set(updated_all_inputs)
for pcoll in all_inputs:
producer = pcoll.producer
while producer:
if producer in necessary_transforms:
break
# Mark the AppliedPTransform as necessary.
necessary_transforms.add(producer)
# Record all necessary input and side input PCollections.
updated_all_inputs.update(producer.inputs)
# pylint: disable=map-builtin-not-iterating
side_input_pvalues = set(
map(lambda side_input: side_input.pvalue, producer.side_inputs))
updated_all_inputs.update(side_input_pvalues)<|fim▁hole|>
def _prune_runner_pipeline_to_fragment(
self, runner_pipeline, necessary_transforms):
class PruneVisitor(PipelineVisitor):
def enter_composite_transform(self, transform_node):
if isinstance(transform_node.transform, TestStream):
return
pruned_parts = list(transform_node.parts)
for part in transform_node.parts:
if part not in necessary_transforms:
pruned_parts.remove(part)
transform_node.parts = tuple(pruned_parts)
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
if transform_node not in necessary_transforms:
transform_node.parent = None
v = PruneVisitor()
runner_pipeline.visit(v)
return runner_pipeline<|fim▁end|> | # Go to its parent AppliedPTransform.
producer = producer.parent
return necessary_transforms, all_inputs |
<|file_name|>task.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from threading import Thread
LOG = logging.getLogger(__name__)
_TASKS = {}
class _Task(Thread):
def __init__(self, tid, interval, repeat, func, *args, **kwargs):
super(_Task, self).__init__()
self.tid = tid
self.interval = interval
self.repeat = repeat
self.func = func
self.args = args
self.kwargs = kwargs
self._stop = False
_TASKS[tid] = self
self.start()
def run(self):
for dummy in range(self.repeat):
if self._stop:
break
retval = self.func(*self.args, **self.kwargs)
if retval is not None:
break
time.sleep(self.interval)
_TASKS.pop(self.tid, None)
def stop(self):
self._stop = True
def start(tid, interval, repeat, func, *args, **kwargs):
"""
Start a new task
"""
LOG.info('start(tid=%s, interval=%s, repeat=%s, func=%s, args=%s, '
'kwargs=%s)', tid, interval, repeat, func.__name__, args, kwargs)
_Task(tid, interval, repeat, func, *args, **kwargs)
def stop(tid):
"""
Stop a running task
"""
LOG.info('stop(tid=%s)', tid)<|fim▁hole|> if t is not None:
t.stop()
def stop_all(wait=False):
"""
Stop all running tasks
"""
LOG.info('stop_all()')
for tid in _TASKS:
stop(tid)
if wait:
while _TASKS:
time.sleep(0.5)<|fim▁end|> |
t = _TASKS.get(tid, None) |
<|file_name|>statement.rs<|end_file_name|><|fim▁begin|>use std::iter::IntoIterator;
use std::os::raw::{c_int, c_void};
#[cfg(feature = "array")]
use std::rc::Rc;
use std::slice::from_raw_parts;
use std::{convert, fmt, mem, ptr, str};
use super::ffi;
use super::{len_as_c_int, str_for_sqlite};
use super::{
AndThenRows, Connection, Error, MappedRows, Params, RawStatement, Result, Row, Rows, ValueRef,
};
use crate::types::{ToSql, ToSqlOutput};
#[cfg(feature = "array")]
use crate::vtab::array::{free_array, ARRAY_TYPE};
<|fim▁hole|> pub(crate) stmt: RawStatement,
}
impl Statement<'_> {
/// Execute the prepared statement.
///
/// On success, returns the number of rows that were changed or inserted or
/// deleted (via `sqlite3_changes`).
///
/// ## Example
///
/// ### Use with positional parameters
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result, params};
/// fn update_rows(conn: &Connection) -> Result<()> {
/// let mut stmt = conn.prepare("UPDATE foo SET bar = 'baz' WHERE qux = ?")?;
/// // The `rusqlite::params!` macro is mostly useful when the parameters do not
/// // all have the same type, or if there are more than 32 parameters
/// // at once.
/// stmt.execute(params![1i32])?;
/// // However, it's not required, many cases are fine as:
/// stmt.execute(&[&2i32])?;
/// // Or even:
/// stmt.execute([2i32])?;
/// Ok(())
/// }
/// ```
///
/// ### Use with named parameters
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result, named_params};
/// fn insert(conn: &Connection) -> Result<()> {
/// let mut stmt = conn.prepare("INSERT INTO test (key, value) VALUES (:key, :value)")?;
/// // The `rusqlite::named_params!` macro (like `params!`) is useful for heterogeneous
/// // sets of parameters (where all parameters are not the same type), or for queries
/// // with many (more than 32) statically known parameters.
/// stmt.execute(named_params!{ ":key": "one", ":val": 2 })?;
/// // However, named parameters can also be passed like:
/// stmt.execute(&[(":key", "three"), (":val", "four")])?;
/// // Or even: (note that a &T is required for the value type, currently)
/// stmt.execute(&[(":key", &100), (":val", &200)])?;
/// Ok(())
/// }
/// ```
///
/// ### Use without parameters
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result, params};
/// fn delete_all(conn: &Connection) -> Result<()> {
/// let mut stmt = conn.prepare("DELETE FROM users")?;
/// stmt.execute([])?;
/// Ok(())
/// }
/// ```
///
/// # Failure
///
/// Will return `Err` if binding parameters fails, the executed statement
/// returns rows (in which case `query` should be used instead), or the
/// underlying SQLite call fails.
#[inline]
pub fn execute<P: Params>(&mut self, params: P) -> Result<usize> {
params.__bind_in(self)?;
self.execute_with_bound_parameters()
}
/// Execute the prepared statement with named parameter(s).
///
/// Note: This function is deprecated in favor of [`Statement::execute`],
/// which can now take named parameters directly.
///
/// If any parameters that were in the prepared statement are not included
/// in `params`, they will continue to use the most-recently bound value
/// from a previous call to `execute_named`, or `NULL` if they have never
/// been bound.
///
/// On success, returns the number of rows that were changed or inserted or
/// deleted (via `sqlite3_changes`).
///
/// # Failure
///
/// Will return `Err` if binding parameters fails, the executed statement
/// returns rows (in which case `query` should be used instead), or the
/// underlying SQLite call fails.
#[deprecated = "You can use `execute` with named params now."]
#[inline]
pub fn execute_named(&mut self, params: &[(&str, &dyn ToSql)]) -> Result<usize> {
self.execute(params)
}
/// Execute an INSERT and return the ROWID.
///
/// # Note
///
/// This function is a convenience wrapper around [`execute()`](Statement::execute) intended for
/// queries that insert a single item. It is possible to misuse this
/// function in a way that it cannot detect, such as by calling it on a
/// statement which _updates_ a single
/// item rather than inserting one. Please don't do that.
///
/// # Failure
///
/// Will return `Err` if no row is inserted or many rows are inserted.
#[inline]
pub fn insert<P: Params>(&mut self, params: P) -> Result<i64> {
let changes = self.execute(params)?;
match changes {
1 => Ok(self.conn.last_insert_rowid()),
_ => Err(Error::StatementChangedRows(changes)),
}
}
/// Execute the prepared statement, returning a handle to the resulting
/// rows.
///
/// Due to lifetime restricts, the rows handle returned by `query` does not
/// implement the `Iterator` trait. Consider using [`query_map`](Statement::query_map) or
/// [`query_and_then`](Statement::query_and_then) instead, which do.
///
/// ## Example
///
/// ### Use without parameters
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result};
/// fn get_names(conn: &Connection) -> Result<Vec<String>> {
/// let mut stmt = conn.prepare("SELECT name FROM people")?;
/// let mut rows = stmt.query([])?;
///
/// let mut names = Vec::new();
/// while let Some(row) = rows.next()? {
/// names.push(row.get(0)?);
/// }
///
/// Ok(names)
/// }
/// ```
///
/// ### Use with positional parameters
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result};
/// fn query(conn: &Connection, name: &str) -> Result<()> {
/// let mut stmt = conn.prepare("SELECT * FROM test where name = ?")?;
/// let mut rows = stmt.query(rusqlite::params![name])?;
/// while let Some(row) = rows.next()? {
/// // ...
/// }
/// Ok(())
/// }
/// ```
///
/// Or, equivalently (but without the [`params!`] macro).
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result};
/// fn query(conn: &Connection, name: &str) -> Result<()> {
/// let mut stmt = conn.prepare("SELECT * FROM test where name = ?")?;
/// let mut rows = stmt.query([name])?;
/// while let Some(row) = rows.next()? {
/// // ...
/// }
/// Ok(())
/// }
/// ```
///
/// ### Use with named parameters
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result};
/// fn query(conn: &Connection) -> Result<()> {
/// let mut stmt = conn.prepare("SELECT * FROM test where name = :name")?;
/// let mut rows = stmt.query(&[(":name", "one")])?;
/// while let Some(row) = rows.next()? {
/// // ...
/// }
/// Ok(())
/// }
/// ```
///
/// Note, the `named_params!` macro is provided for syntactic convenience,
/// and so the above example could also be written as:
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result, named_params};
/// fn query(conn: &Connection) -> Result<()> {
/// let mut stmt = conn.prepare("SELECT * FROM test where name = :name")?;
/// let mut rows = stmt.query(named_params!{ ":name": "one" })?;
/// while let Some(row) = rows.next()? {
/// // ...
/// }
/// Ok(())
/// }
/// ```
///
/// ## Failure
///
/// Will return `Err` if binding parameters fails.
#[inline]
pub fn query<P: Params>(&mut self, params: P) -> Result<Rows<'_>> {
self.check_readonly()?;
params.__bind_in(self)?;
Ok(Rows::new(self))
}
/// Execute the prepared statement with named parameter(s), returning a
/// handle for the resulting rows.
///
/// Note: This function is deprecated in favor of [`Statement::query`],
/// which can now take named parameters directly.
///
/// If any parameters that were in the prepared statement are not included
/// in `params`, they will continue to use the most-recently bound value
/// from a previous call to `query_named`, or `NULL` if they have never been
/// bound.
///
/// # Failure
///
/// Will return `Err` if binding parameters fails.
#[deprecated = "You can use `query` with named params now."]
pub fn query_named(&mut self, params: &[(&str, &dyn ToSql)]) -> Result<Rows<'_>> {
self.query(params)
}
/// Executes the prepared statement and maps a function over the resulting
/// rows, returning an iterator over the mapped function results.
///
/// `f` is used to tranform the _streaming_ iterator into a _standard_
/// iterator.
///
/// This is equivalent to `stmt.query(params)?.mapped(f)`.
///
/// ## Example
///
/// ### Use with positional params
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result};
/// fn get_names(conn: &Connection) -> Result<Vec<String>> {
/// let mut stmt = conn.prepare("SELECT name FROM people")?;
/// let rows = stmt.query_map([], |row| row.get(0))?;
///
/// let mut names = Vec::new();
/// for name_result in rows {
/// names.push(name_result?);
/// }
///
/// Ok(names)
/// }
/// ```
///
/// ### Use with named params
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result};
/// fn get_names(conn: &Connection) -> Result<Vec<String>> {
/// let mut stmt = conn.prepare("SELECT name FROM people WHERE id = :id")?;
/// let rows = stmt.query_map(&[(":id", &"one")], |row| row.get(0))?;
///
/// let mut names = Vec::new();
/// for name_result in rows {
/// names.push(name_result?);
/// }
///
/// Ok(names)
/// }
/// ```
/// ## Failure
///
/// Will return `Err` if binding parameters fails.
pub fn query_map<T, P, F>(&mut self, params: P, f: F) -> Result<MappedRows<'_, F>>
where
P: Params,
F: FnMut(&Row<'_>) -> Result<T>,
{
self.query(params).map(|rows| rows.mapped(f))
}
/// Execute the prepared statement with named parameter(s), returning an
/// iterator over the result of calling the mapping function over the
/// query's rows.
///
/// Note: This function is deprecated in favor of [`Statement::query_map`],
/// which can now take named parameters directly.
///
/// If any parameters that were in the prepared statement
/// are not included in `params`, they will continue to use the
/// most-recently bound value from a previous call to `query_named`,
/// or `NULL` if they have never been bound.
///
/// `f` is used to tranform the _streaming_ iterator into a _standard_
/// iterator.
///
/// ## Failure
///
/// Will return `Err` if binding parameters fails.
#[deprecated = "You can use `query_map` with named params now."]
pub fn query_map_named<T, F>(
&mut self,
params: &[(&str, &dyn ToSql)],
f: F,
) -> Result<MappedRows<'_, F>>
where
F: FnMut(&Row<'_>) -> Result<T>,
{
self.query_map(params, f)
}
/// Executes the prepared statement and maps a function over the resulting
/// rows, where the function returns a `Result` with `Error` type
/// implementing `std::convert::From<Error>` (so errors can be unified).
///
/// This is equivalent to `stmt.query(params)?.and_then(f)`.
///
/// ## Example
///
/// ### Use with named params
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result};
/// struct Person {
/// name: String,
/// };
///
/// fn name_to_person(name: String) -> Result<Person> {
/// // ... check for valid name
/// Ok(Person { name: name })
/// }
///
/// fn get_names(conn: &Connection) -> Result<Vec<Person>> {
/// let mut stmt = conn.prepare("SELECT name FROM people WHERE id = :id")?;
/// let rows =
/// stmt.query_and_then(&[(":id", "one")], |row| name_to_person(row.get(0)?))?;
///
/// let mut persons = Vec::new();
/// for person_result in rows {
/// persons.push(person_result?);
/// }
///
/// Ok(persons)
/// }
/// ```
///
/// ### Use with positional params
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result};
/// fn get_names(conn: &Connection) -> Result<Vec<String>> {
/// let mut stmt = conn.prepare("SELECT name FROM people WHERE id = ?")?;
/// let rows = stmt.query_and_then(["one"], |row| row.get::<_, String>(0))?;
///
/// let mut persons = Vec::new();
/// for person_result in rows {
/// persons.push(person_result?);
/// }
///
/// Ok(persons)
/// }
/// ```
///
/// # Failure
///
/// Will return `Err` if binding parameters fails.
#[inline]
pub fn query_and_then<T, E, P, F>(&mut self, params: P, f: F) -> Result<AndThenRows<'_, F>>
where
P: Params,
E: convert::From<Error>,
F: FnMut(&Row<'_>) -> Result<T, E>,
{
self.query(params).map(|rows| rows.and_then(f))
}
/// Execute the prepared statement with named parameter(s), returning an
/// iterator over the result of calling the mapping function over the
/// query's rows.
///
/// Note: This function is deprecated in favor of [`Statement::query_and_then`],
/// which can now take named parameters directly.
///
/// If any parameters that were in the prepared statement are not included
/// in `params`, they will continue to use the most-recently bound value
/// from a previous call to `query_named`, or `NULL` if they have never been
/// bound.
///
/// ## Failure
///
/// Will return `Err` if binding parameters fails.
#[deprecated = "You can use `query_and_then` with named params now."]
pub fn query_and_then_named<T, E, F>(
&mut self,
params: &[(&str, &dyn ToSql)],
f: F,
) -> Result<AndThenRows<'_, F>>
where
E: convert::From<Error>,
F: FnMut(&Row<'_>) -> Result<T, E>,
{
self.query_and_then(params, f)
}
/// Return `true` if a query in the SQL statement it executes returns one
/// or more rows and `false` if the SQL returns an empty set.
#[inline]
pub fn exists<P: Params>(&mut self, params: P) -> Result<bool> {
let mut rows = self.query(params)?;
let exists = rows.next()?.is_some();
Ok(exists)
}
/// Convenience method to execute a query that is expected to return a
/// single row.
///
/// If the query returns more than one row, all rows except the first are
/// ignored.
///
/// Returns `Err(QueryReturnedNoRows)` if no results are returned. If the
/// query truly is optional, you can call [`.optional()`](crate::OptionalExtension::optional) on the result of
/// this to get a `Result<Option<T>>` (requires that the trait `rusqlite::OptionalExtension`
/// is imported).
///
/// # Failure
///
/// Will return `Err` if the underlying SQLite call fails.
pub fn query_row<T, P, F>(&mut self, params: P, f: F) -> Result<T>
where
P: Params,
F: FnOnce(&Row<'_>) -> Result<T>,
{
let mut rows = self.query(params)?;
rows.get_expected_row().and_then(|r| f(&r))
}
/// Convenience method to execute a query with named parameter(s) that is
/// expected to return a single row.
///
/// Note: This function is deprecated in favor of [`Statement::query_and_then`],
/// which can now take named parameters directly.
///
/// If the query returns more than one row, all rows except the first are
/// ignored.
///
/// Returns `Err(QueryReturnedNoRows)` if no results are returned. If the
/// query truly is optional, you can call [`.optional()`](crate::OptionalExtension::optional) on the result of
/// this to get a `Result<Option<T>>` (requires that the trait `rusqlite::OptionalExtension`
/// is imported).
///
/// # Failure
///
/// Will return `Err` if `sql` cannot be converted to a C-compatible string
/// or if the underlying SQLite call fails.
#[deprecated = "You can use `query_row` with named params now."]
pub fn query_row_named<T, F>(&mut self, params: &[(&str, &dyn ToSql)], f: F) -> Result<T>
where
F: FnOnce(&Row<'_>) -> Result<T>,
{
self.query_row(params, f)
}
/// Consumes the statement.
///
/// Functionally equivalent to the `Drop` implementation, but allows
/// callers to see any errors that occur.
///
/// # Failure
///
/// Will return `Err` if the underlying SQLite call fails.
#[inline]
pub fn finalize(mut self) -> Result<()> {
self.finalize_()
}
/// Return the (one-based) index of an SQL parameter given its name.
///
/// Note that the initial ":" or "$" or "@" or "?" used to specify the
/// parameter is included as part of the name.
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result};
/// fn example(conn: &Connection) -> Result<()> {
/// let stmt = conn.prepare("SELECT * FROM test WHERE name = :example")?;
/// let index = stmt.parameter_index(":example")?;
/// assert_eq!(index, Some(1));
/// Ok(())
/// }
/// ```
///
/// # Failure
///
/// Will return Err if `name` is invalid. Will return Ok(None) if the name
/// is valid but not a bound parameter of this statement.
#[inline]
pub fn parameter_index(&self, name: &str) -> Result<Option<usize>> {
Ok(self.stmt.bind_parameter_index(name))
}
#[inline]
pub(crate) fn bind_parameters<P>(&mut self, params: P) -> Result<()>
where
P: IntoIterator,
P::Item: ToSql,
{
let expected = self.stmt.bind_parameter_count();
let mut index = 0;
for p in params.into_iter() {
index += 1; // The leftmost SQL parameter has an index of 1.
if index > expected {
break;
}
self.bind_parameter(&p, index)?;
}
if index != expected {
Err(Error::InvalidParameterCount(index, expected))
} else {
Ok(())
}
}
#[inline]
pub(crate) fn bind_parameters_named<T: ?Sized + ToSql>(
&mut self,
params: &[(&str, &T)],
) -> Result<()> {
for &(name, value) in params {
if let Some(i) = self.parameter_index(name)? {
let ts: &dyn ToSql = &value;
self.bind_parameter(ts, i)?;
} else {
return Err(Error::InvalidParameterName(name.into()));
}
}
Ok(())
}
/// Return the number of parameters that can be bound to this statement.
#[inline]
pub fn parameter_count(&self) -> usize {
self.stmt.bind_parameter_count()
}
/// Low level API to directly bind a parameter to a given index.
///
/// Note that the index is one-based, that is, the first parameter index is
/// 1 and not 0. This is consistent with the SQLite API and the values given
/// to parameters bound as `?NNN`.
///
/// The valid values for `one_based_col_index` begin at `1`, and end at
/// [`Statement::parameter_count`], inclusive.
///
/// # Caveats
///
/// This should not generally be used, but is available for special cases
/// such as:
///
/// - binding parameters where a gap exists.
/// - binding named and positional parameters in the same query.
/// - separating parameter binding from query execution.
///
/// Statements that have had their parameters bound this way should be
/// queried or executed by [`Statement::raw_query`] or
/// [`Statement::raw_execute`]. Other functions are not guaranteed to work.
///
/// # Example
///
/// ```rust,no_run
/// # use rusqlite::{Connection, Result};
/// fn query(conn: &Connection) -> Result<()> {
/// let mut stmt = conn.prepare("SELECT * FROM test WHERE name = :name AND value > ?2")?;
/// let name_index = stmt.parameter_index(":name")?.expect("No such parameter");
/// stmt.raw_bind_parameter(name_index, "foo")?;
/// stmt.raw_bind_parameter(2, 100)?;
/// let mut rows = stmt.raw_query();
/// while let Some(row) = rows.next()? {
/// // ...
/// }
/// Ok(())
/// }
/// ```
#[inline]
pub fn raw_bind_parameter<T: ToSql>(
&mut self,
one_based_col_index: usize,
param: T,
) -> Result<()> {
// This is the same as `bind_parameter` but slightly more ergonomic and
// correctly takes `&mut self`.
self.bind_parameter(¶m, one_based_col_index)
}
/// Low level API to execute a statement given that all parameters were
/// bound explicitly with the [`Statement::raw_bind_parameter`] API.
///
/// # Caveats
///
/// Any unbound parameters will have `NULL` as their value.
///
/// This should not generally be used outside of special cases, and
/// functions in the [`Statement::execute`] family should be preferred.
///
/// # Failure
///
/// Will return `Err` if the executed statement returns rows (in which case
/// `query` should be used instead), or the underlying SQLite call fails.
#[inline]
pub fn raw_execute(&mut self) -> Result<usize> {
self.execute_with_bound_parameters()
}
/// Low level API to get `Rows` for this query given that all parameters
/// were bound explicitly with the [`Statement::raw_bind_parameter`] API.
///
/// # Caveats
///
/// Any unbound parameters will have `NULL` as their value.
///
/// This should not generally be used outside of special cases, and
/// functions in the [`Statement::query`] family should be preferred.
///
/// Note that if the SQL does not return results, [`Statement::raw_execute`]
/// should be used instead.
#[inline]
pub fn raw_query(&mut self) -> Rows<'_> {
Rows::new(self)
}
// generic because many of these branches can constant fold away.
fn bind_parameter<P: ?Sized + ToSql>(&self, param: &P, col: usize) -> Result<()> {
let value = param.to_sql()?;
let ptr = unsafe { self.stmt.ptr() };
let value = match value {
ToSqlOutput::Borrowed(v) => v,
ToSqlOutput::Owned(ref v) => ValueRef::from(v),
#[cfg(feature = "blob")]
ToSqlOutput::ZeroBlob(len) => {
return self
.conn
.decode_result(unsafe { ffi::sqlite3_bind_zeroblob(ptr, col as c_int, len) });
}
#[cfg(feature = "array")]
ToSqlOutput::Array(a) => {
return self.conn.decode_result(unsafe {
ffi::sqlite3_bind_pointer(
ptr,
col as c_int,
Rc::into_raw(a) as *mut c_void,
ARRAY_TYPE,
Some(free_array),
)
});
}
};
self.conn.decode_result(match value {
ValueRef::Null => unsafe { ffi::sqlite3_bind_null(ptr, col as c_int) },
ValueRef::Integer(i) => unsafe { ffi::sqlite3_bind_int64(ptr, col as c_int, i) },
ValueRef::Real(r) => unsafe { ffi::sqlite3_bind_double(ptr, col as c_int, r) },
ValueRef::Text(s) => unsafe {
let (c_str, len, destructor) = str_for_sqlite(s)?;
ffi::sqlite3_bind_text(ptr, col as c_int, c_str, len, destructor)
},
ValueRef::Blob(b) => unsafe {
let length = len_as_c_int(b.len())?;
if length == 0 {
ffi::sqlite3_bind_zeroblob(ptr, col as c_int, 0)
} else {
ffi::sqlite3_bind_blob(
ptr,
col as c_int,
b.as_ptr() as *const c_void,
length,
ffi::SQLITE_TRANSIENT(),
)
}
},
})
}
#[inline]
fn execute_with_bound_parameters(&mut self) -> Result<usize> {
self.check_update()?;
let r = self.stmt.step();
self.stmt.reset();
match r {
ffi::SQLITE_DONE => Ok(self.conn.changes()),
ffi::SQLITE_ROW => Err(Error::ExecuteReturnedResults),
_ => Err(self.conn.decode_result(r).unwrap_err()),
}
}
#[inline]
fn finalize_(&mut self) -> Result<()> {
let mut stmt = unsafe { RawStatement::new(ptr::null_mut(), 0) };
mem::swap(&mut stmt, &mut self.stmt);
self.conn.decode_result(stmt.finalize())
}
#[cfg(not(feature = "modern_sqlite"))]
#[inline]
fn check_readonly(&self) -> Result<()> {
Ok(())
}
#[cfg(feature = "modern_sqlite")]
#[inline]
fn check_readonly(&self) -> Result<()> {
/*if !self.stmt.readonly() { does not work for PRAGMA
return Err(Error::InvalidQuery);
}*/
Ok(())
}
#[cfg(all(feature = "modern_sqlite", feature = "extra_check"))]
#[inline]
fn check_update(&self) -> Result<()> {
// sqlite3_column_count works for DML but not for DDL (ie ALTER)
if self.column_count() > 0 && self.stmt.readonly() {
return Err(Error::ExecuteReturnedResults);
}
Ok(())
}
#[cfg(all(not(feature = "modern_sqlite"), feature = "extra_check"))]
#[inline]
fn check_update(&self) -> Result<()> {
// sqlite3_column_count works for DML but not for DDL (ie ALTER)
if self.column_count() > 0 {
return Err(Error::ExecuteReturnedResults);
}
Ok(())
}
#[cfg(not(feature = "extra_check"))]
#[inline]
fn check_update(&self) -> Result<()> {
Ok(())
}
/// Returns a string containing the SQL text of prepared statement with
/// bound parameters expanded.
#[cfg(feature = "modern_sqlite")]
pub fn expanded_sql(&self) -> Option<String> {
self.stmt
.expanded_sql()
.map(|s| s.to_string_lossy().to_string())
}
/// Get the value for one of the status counters for this statement.
#[inline]
pub fn get_status(&self, status: StatementStatus) -> i32 {
self.stmt.get_status(status, false)
}
/// Reset the value of one of the status counters for this statement,
#[inline]
/// returning the value it had before resetting.
pub fn reset_status(&self, status: StatementStatus) -> i32 {
self.stmt.get_status(status, true)
}
#[cfg(feature = "extra_check")]
#[inline]
pub(crate) fn check_no_tail(&self) -> Result<()> {
if self.stmt.has_tail() {
Err(Error::MultipleStatement)
} else {
Ok(())
}
}
#[cfg(not(feature = "extra_check"))]
#[inline]
pub(crate) fn check_no_tail(&self) -> Result<()> {
Ok(())
}
/// Safety: This is unsafe, because using `sqlite3_stmt` after the
/// connection has closed is illegal, but `RawStatement` does not enforce
/// this, as it loses our protective `'conn` lifetime bound.
#[inline]
pub(crate) unsafe fn into_raw(mut self) -> RawStatement {
let mut stmt = RawStatement::new(ptr::null_mut(), 0);
mem::swap(&mut stmt, &mut self.stmt);
stmt
}
}
impl fmt::Debug for Statement<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let sql = if self.stmt.is_null() {
Ok("")
} else {
str::from_utf8(self.stmt.sql().unwrap().to_bytes())
};
f.debug_struct("Statement")
.field("conn", self.conn)
.field("stmt", &self.stmt)
.field("sql", &sql)
.finish()
}
}
impl Drop for Statement<'_> {
#[allow(unused_must_use)]
#[inline]
fn drop(&mut self) {
self.finalize_();
}
}
impl Statement<'_> {
#[inline]
pub(super) fn new(conn: &Connection, stmt: RawStatement) -> Statement<'_> {
Statement { conn, stmt }
}
pub(super) fn value_ref(&self, col: usize) -> ValueRef<'_> {
let raw = unsafe { self.stmt.ptr() };
match self.stmt.column_type(col) {
ffi::SQLITE_NULL => ValueRef::Null,
ffi::SQLITE_INTEGER => {
ValueRef::Integer(unsafe { ffi::sqlite3_column_int64(raw, col as c_int) })
}
ffi::SQLITE_FLOAT => {
ValueRef::Real(unsafe { ffi::sqlite3_column_double(raw, col as c_int) })
}
ffi::SQLITE_TEXT => {
let s = unsafe {
// Quoting from "Using SQLite" book:
// To avoid problems, an application should first extract the desired type using
// a sqlite3_column_xxx() function, and then call the
// appropriate sqlite3_column_bytes() function.
let text = ffi::sqlite3_column_text(raw, col as c_int);
let len = ffi::sqlite3_column_bytes(raw, col as c_int);
assert!(
!text.is_null(),
"unexpected SQLITE_TEXT column type with NULL data"
);
from_raw_parts(text as *const u8, len as usize)
};
ValueRef::Text(s)
}
ffi::SQLITE_BLOB => {
let (blob, len) = unsafe {
(
ffi::sqlite3_column_blob(raw, col as c_int),
ffi::sqlite3_column_bytes(raw, col as c_int),
)
};
assert!(
len >= 0,
"unexpected negative return from sqlite3_column_bytes"
);
if len > 0 {
assert!(
!blob.is_null(),
"unexpected SQLITE_BLOB column type with NULL data"
);
ValueRef::Blob(unsafe { from_raw_parts(blob as *const u8, len as usize) })
} else {
// The return value from sqlite3_column_blob() for a zero-length BLOB
// is a NULL pointer.
ValueRef::Blob(&[])
}
}
_ => unreachable!("sqlite3_column_type returned invalid value"),
}
}
#[inline]
pub(super) fn step(&self) -> Result<bool> {
match self.stmt.step() {
ffi::SQLITE_ROW => Ok(true),
ffi::SQLITE_DONE => Ok(false),
code => Err(self.conn.decode_result(code).unwrap_err()),
}
}
#[inline]
pub(super) fn reset(&self) -> c_int {
self.stmt.reset()
}
}
/// Prepared statement status counters.
///
/// See `https://www.sqlite.org/c3ref/c_stmtstatus_counter.html`
/// for explanations of each.
///
/// Note that depending on your version of SQLite, all of these
/// may not be available.
#[repr(i32)]
#[derive(Clone, Copy, PartialEq, Eq)]
#[non_exhaustive]
pub enum StatementStatus {
/// Equivalent to SQLITE_STMTSTATUS_FULLSCAN_STEP
FullscanStep = 1,
/// Equivalent to SQLITE_STMTSTATUS_SORT
Sort = 2,
/// Equivalent to SQLITE_STMTSTATUS_AUTOINDEX
AutoIndex = 3,
/// Equivalent to SQLITE_STMTSTATUS_VM_STEP
VmStep = 4,
/// Equivalent to SQLITE_STMTSTATUS_REPREPARE
RePrepare = 5,
/// Equivalent to SQLITE_STMTSTATUS_RUN
Run = 6,
/// Equivalent to SQLITE_STMTSTATUS_MEMUSED
MemUsed = 99,
}
#[cfg(test)]
mod test {
use crate::types::ToSql;
use crate::{params_from_iter, Connection, Error, Result};
#[test]
#[allow(deprecated)]
fn test_execute_named() -> Result<()> {
let db = Connection::open_in_memory()?;
db.execute_batch("CREATE TABLE foo(x INTEGER)")?;
assert_eq!(
db.execute_named("INSERT INTO foo(x) VALUES (:x)", &[(":x", &1i32)])?,
1
);
assert_eq!(
db.execute("INSERT INTO foo(x) VALUES (:x)", &[(":x", &2i32)])?,
1
);
assert_eq!(
db.execute(
"INSERT INTO foo(x) VALUES (:x)",
crate::named_params! {":x": 3i32}
)?,
1
);
assert_eq!(
6i32,
db.query_row_named::<i32, _>(
"SELECT SUM(x) FROM foo WHERE x > :x",
&[(":x", &0i32)],
|r| r.get(0)
)?
);
assert_eq!(
5i32,
db.query_row::<i32, _, _>(
"SELECT SUM(x) FROM foo WHERE x > :x",
&[(":x", &1i32)],
|r| r.get(0)
)?
);
Ok(())
}
#[test]
#[allow(deprecated)]
fn test_stmt_execute_named() -> Result<()> {
let db = Connection::open_in_memory()?;
let sql = "CREATE TABLE test (id INTEGER PRIMARY KEY NOT NULL, name TEXT NOT NULL, flag \
INTEGER)";
db.execute_batch(sql)?;
let mut stmt = db.prepare("INSERT INTO test (name) VALUES (:name)")?;
stmt.execute_named(&[(":name", &"one")])?;
let mut stmt = db.prepare("SELECT COUNT(*) FROM test WHERE name = :name")?;
assert_eq!(
1i32,
stmt.query_row_named::<i32, _>(&[(":name", &"one")], |r| r.get(0))?
);
assert_eq!(
1i32,
stmt.query_row::<i32, _, _>(&[(":name", "one")], |r| r.get(0))?
);
Ok(())
}
#[test]
#[allow(deprecated)]
fn test_query_named() -> Result<()> {
let db = Connection::open_in_memory()?;
let sql = r#"
CREATE TABLE test (id INTEGER PRIMARY KEY NOT NULL, name TEXT NOT NULL, flag INTEGER);
INSERT INTO test(id, name) VALUES (1, "one");
"#;
db.execute_batch(sql)?;
let mut stmt = db.prepare("SELECT id FROM test where name = :name")?;
// legacy `_named` api
{
let mut rows = stmt.query_named(&[(":name", &"one")])?;
let id: Result<i32> = rows.next()?.unwrap().get(0);
assert_eq!(Ok(1), id);
}
// plain api
{
let mut rows = stmt.query(&[(":name", "one")])?;
let id: Result<i32> = rows.next()?.unwrap().get(0);
assert_eq!(Ok(1), id);
}
Ok(())
}
#[test]
#[allow(deprecated)]
fn test_query_map_named() -> Result<()> {
let db = Connection::open_in_memory()?;
let sql = r#"
CREATE TABLE test (id INTEGER PRIMARY KEY NOT NULL, name TEXT NOT NULL, flag INTEGER);
INSERT INTO test(id, name) VALUES (1, "one");
"#;
db.execute_batch(sql)?;
let mut stmt = db.prepare("SELECT id FROM test where name = :name")?;
// legacy `_named` api
{
let mut rows = stmt.query_map_named(&[(":name", &"one")], |row| {
let id: Result<i32> = row.get(0);
id.map(|i| 2 * i)
})?;
let doubled_id: i32 = rows.next().unwrap()?;
assert_eq!(2, doubled_id);
}
// plain api
{
let mut rows = stmt.query_map(&[(":name", "one")], |row| {
let id: Result<i32> = row.get(0);
id.map(|i| 2 * i)
})?;
let doubled_id: i32 = rows.next().unwrap()?;
assert_eq!(2, doubled_id);
}
Ok(())
}
#[test]
#[allow(deprecated)]
fn test_query_and_then_named() -> Result<()> {
let db = Connection::open_in_memory()?;
let sql = r#"
CREATE TABLE test (id INTEGER PRIMARY KEY NOT NULL, name TEXT NOT NULL, flag INTEGER);
INSERT INTO test(id, name) VALUES (1, "one");
INSERT INTO test(id, name) VALUES (2, "one");
"#;
db.execute_batch(sql)?;
let mut stmt = db.prepare("SELECT id FROM test where name = :name ORDER BY id ASC")?;
let mut rows = stmt.query_and_then_named(&[(":name", &"one")], |row| {
let id: i32 = row.get(0)?;
if id == 1 {
Ok(id)
} else {
Err(Error::SqliteSingleThreadedMode)
}
})?;
// first row should be Ok
let doubled_id: i32 = rows.next().unwrap()?;
assert_eq!(1, doubled_id);
// second row should be Err
#[allow(clippy::match_wild_err_arm)]
match rows.next().unwrap() {
Ok(_) => panic!("invalid Ok"),
Err(Error::SqliteSingleThreadedMode) => (),
Err(_) => panic!("invalid Err"),
}
Ok(())
}
#[test]
fn test_query_and_then_by_name() -> Result<()> {
let db = Connection::open_in_memory()?;
let sql = r#"
CREATE TABLE test (id INTEGER PRIMARY KEY NOT NULL, name TEXT NOT NULL, flag INTEGER);
INSERT INTO test(id, name) VALUES (1, "one");
INSERT INTO test(id, name) VALUES (2, "one");
"#;
db.execute_batch(sql)?;
let mut stmt = db.prepare("SELECT id FROM test where name = :name ORDER BY id ASC")?;
let mut rows = stmt.query_and_then(&[(":name", "one")], |row| {
let id: i32 = row.get(0)?;
if id == 1 {
Ok(id)
} else {
Err(Error::SqliteSingleThreadedMode)
}
})?;
// first row should be Ok
let doubled_id: i32 = rows.next().unwrap()?;
assert_eq!(1, doubled_id);
// second row should be Err
#[allow(clippy::match_wild_err_arm)]
match rows.next().unwrap() {
Ok(_) => panic!("invalid Ok"),
Err(Error::SqliteSingleThreadedMode) => (),
Err(_) => panic!("invalid Err"),
}
Ok(())
}
#[test]
#[allow(deprecated)]
fn test_unbound_parameters_are_null() -> Result<()> {
let db = Connection::open_in_memory()?;
let sql = "CREATE TABLE test (x TEXT, y TEXT)";
db.execute_batch(sql)?;
let mut stmt = db.prepare("INSERT INTO test (x, y) VALUES (:x, :y)")?;
stmt.execute_named(&[(":x", &"one")])?;
let result: Option<String> =
db.query_row("SELECT y FROM test WHERE x = 'one'", [], |row| row.get(0))?;
assert!(result.is_none());
Ok(())
}
#[test]
fn test_raw_binding() -> Result<()> {
let db = Connection::open_in_memory()?;
db.execute_batch("CREATE TABLE test (name TEXT, value INTEGER)")?;
{
let mut stmt = db.prepare("INSERT INTO test (name, value) VALUES (:name, ?3)")?;
let name_idx = stmt.parameter_index(":name")?.unwrap();
stmt.raw_bind_parameter(name_idx, "example")?;
stmt.raw_bind_parameter(3, 50i32)?;
let n = stmt.raw_execute()?;
assert_eq!(n, 1);
}
{
let mut stmt = db.prepare("SELECT name, value FROM test WHERE value = ?2")?;
stmt.raw_bind_parameter(2, 50)?;
let mut rows = stmt.raw_query();
{
let row = rows.next()?.unwrap();
let name: String = row.get(0)?;
assert_eq!(name, "example");
let value: i32 = row.get(1)?;
assert_eq!(value, 50);
}
assert!(rows.next()?.is_none());
}
Ok(())
}
#[test]
fn test_unbound_parameters_are_reused() -> Result<()> {
let db = Connection::open_in_memory()?;
let sql = "CREATE TABLE test (x TEXT, y TEXT)";
db.execute_batch(sql)?;
let mut stmt = db.prepare("INSERT INTO test (x, y) VALUES (:x, :y)")?;
stmt.execute(&[(":x", "one")])?;
stmt.execute(&[(":y", "two")])?;
let result: String =
db.query_row("SELECT x FROM test WHERE y = 'two'", [], |row| row.get(0))?;
assert_eq!(result, "one");
Ok(())
}
#[test]
fn test_insert() -> Result<()> {
let db = Connection::open_in_memory()?;
db.execute_batch("CREATE TABLE foo(x INTEGER UNIQUE)")?;
let mut stmt = db.prepare("INSERT OR IGNORE INTO foo (x) VALUES (?)")?;
assert_eq!(stmt.insert([1i32])?, 1);
assert_eq!(stmt.insert([2i32])?, 2);
match stmt.insert([1i32]).unwrap_err() {
Error::StatementChangedRows(0) => (),
err => panic!("Unexpected error {}", err),
}
let mut multi = db.prepare("INSERT INTO foo (x) SELECT 3 UNION ALL SELECT 4")?;
match multi.insert([]).unwrap_err() {
Error::StatementChangedRows(2) => (),
err => panic!("Unexpected error {}", err),
}
Ok(())
}
#[test]
fn test_insert_different_tables() -> Result<()> {
// Test for https://github.com/rusqlite/rusqlite/issues/171
let db = Connection::open_in_memory()?;
db.execute_batch(
r"
CREATE TABLE foo(x INTEGER);
CREATE TABLE bar(x INTEGER);
",
)?;
assert_eq!(db.prepare("INSERT INTO foo VALUES (10)")?.insert([])?, 1);
assert_eq!(db.prepare("INSERT INTO bar VALUES (10)")?.insert([])?, 1);
Ok(())
}
#[test]
fn test_exists() -> Result<()> {
let db = Connection::open_in_memory()?;
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER);
INSERT INTO foo VALUES(1);
INSERT INTO foo VALUES(2);
END;";
db.execute_batch(sql)?;
let mut stmt = db.prepare("SELECT 1 FROM foo WHERE x = ?")?;
assert!(stmt.exists([1i32])?);
assert!(stmt.exists([2i32])?);
assert!(!stmt.exists([0i32])?);
Ok(())
}
#[test]
fn test_query_row() -> Result<()> {
let db = Connection::open_in_memory()?;
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER, y INTEGER);
INSERT INTO foo VALUES(1, 3);
INSERT INTO foo VALUES(2, 4);
END;";
db.execute_batch(sql)?;
let mut stmt = db.prepare("SELECT y FROM foo WHERE x = ?")?;
let y: Result<i64> = stmt.query_row([1i32], |r| r.get(0));
assert_eq!(3i64, y?);
Ok(())
}
#[test]
fn test_query_by_column_name() -> Result<()> {
let db = Connection::open_in_memory()?;
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER, y INTEGER);
INSERT INTO foo VALUES(1, 3);
END;";
db.execute_batch(sql)?;
let mut stmt = db.prepare("SELECT y FROM foo")?;
let y: Result<i64> = stmt.query_row([], |r| r.get("y"));
assert_eq!(3i64, y?);
Ok(())
}
#[test]
fn test_query_by_column_name_ignore_case() -> Result<()> {
let db = Connection::open_in_memory()?;
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER, y INTEGER);
INSERT INTO foo VALUES(1, 3);
END;";
db.execute_batch(sql)?;
let mut stmt = db.prepare("SELECT y as Y FROM foo")?;
let y: Result<i64> = stmt.query_row([], |r| r.get("y"));
assert_eq!(3i64, y?);
Ok(())
}
#[test]
#[cfg(feature = "modern_sqlite")]
fn test_expanded_sql() -> Result<()> {
let db = Connection::open_in_memory()?;
let stmt = db.prepare("SELECT ?")?;
stmt.bind_parameter(&1, 1)?;
assert_eq!(Some("SELECT 1".to_owned()), stmt.expanded_sql());
Ok(())
}
#[test]
fn test_bind_parameters() -> Result<()> {
let db = Connection::open_in_memory()?;
// dynamic slice:
db.query_row(
"SELECT ?1, ?2, ?3",
&[&1u8 as &dyn ToSql, &"one", &Some("one")],
|row| row.get::<_, u8>(0),
)?;
// existing collection:
let data = vec![1, 2, 3];
db.query_row("SELECT ?1, ?2, ?3", params_from_iter(&data), |row| {
row.get::<_, u8>(0)
})?;
db.query_row(
"SELECT ?1, ?2, ?3",
params_from_iter(data.as_slice()),
|row| row.get::<_, u8>(0),
)?;
db.query_row("SELECT ?1, ?2, ?3", params_from_iter(data), |row| {
row.get::<_, u8>(0)
})?;
use std::collections::BTreeSet;
let data: BTreeSet<String> = ["one", "two", "three"]
.iter()
.map(|s| (*s).to_string())
.collect();
db.query_row("SELECT ?1, ?2, ?3", params_from_iter(&data), |row| {
row.get::<_, String>(0)
})?;
let data = [0; 3];
db.query_row("SELECT ?1, ?2, ?3", params_from_iter(&data), |row| {
row.get::<_, u8>(0)
})?;
db.query_row("SELECT ?1, ?2, ?3", params_from_iter(data.iter()), |row| {
row.get::<_, u8>(0)
})?;
Ok(())
}
#[test]
fn test_empty_stmt() -> Result<()> {
let conn = Connection::open_in_memory()?;
let mut stmt = conn.prepare("")?;
assert_eq!(0, stmt.column_count());
assert!(stmt.parameter_index("test").is_ok());
assert!(stmt.step().is_err());
stmt.reset();
assert!(stmt.execute([]).is_err());
Ok(())
}
#[test]
fn test_comment_stmt() -> Result<()> {
let conn = Connection::open_in_memory()?;
conn.prepare("/*SELECT 1;*/")?;
Ok(())
}
#[test]
fn test_comment_and_sql_stmt() -> Result<()> {
let conn = Connection::open_in_memory()?;
let stmt = conn.prepare("/*...*/ SELECT 1;")?;
assert_eq!(1, stmt.column_count());
Ok(())
}
#[test]
fn test_semi_colon_stmt() -> Result<()> {
let conn = Connection::open_in_memory()?;
let stmt = conn.prepare(";")?;
assert_eq!(0, stmt.column_count());
Ok(())
}
#[test]
fn test_utf16_conversion() -> Result<()> {
let db = Connection::open_in_memory()?;
db.pragma_update(None, "encoding", &"UTF-16le")?;
let encoding: String = db.pragma_query_value(None, "encoding", |row| row.get(0))?;
assert_eq!("UTF-16le", encoding);
db.execute_batch("CREATE TABLE foo(x TEXT)")?;
let expected = "テスト";
db.execute("INSERT INTO foo(x) VALUES (?)", &[&expected])?;
let actual: String = db.query_row("SELECT x FROM foo", [], |row| row.get(0))?;
assert_eq!(expected, actual);
Ok(())
}
#[test]
fn test_nul_byte() -> Result<()> {
let db = Connection::open_in_memory()?;
let expected = "a\x00b";
let actual: String = db.query_row("SELECT ?", [expected], |row| row.get(0))?;
assert_eq!(expected, actual);
Ok(())
}
}<|fim▁end|> | /// A prepared statement.
pub struct Statement<'conn> {
conn: &'conn Connection, |
<|file_name|>PropertiesCredentials.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.auth;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;<|fim▁hole|> * Simple implementation AWSCredentials that reads in AWS access keys from a
* properties file. The AWS access key is expected to be in the "accessKey"
* property and the AWS secret key id is expected to be in the "secretKey"
* property.
*/
public class PropertiesCredentials implements AWSCredentials {
private final String accessKey;
private final String secretAccessKey;
/**
* Reads the specified file as a Java properties file and extracts the
* AWS access key from the "accessKey" property and AWS secret access
* key from the "secretKey" property. If the specified file doesn't
* contain the AWS access keys an IOException will be thrown.
*
* @param file
* The file from which to read the AWS credentials
* properties.
*
* @throws FileNotFoundException
* If the specified file isn't found.
* @throws IOException
* If any problems are encountered reading the AWS access
* keys from the specified file.
* @throws IllegalArgumentException
* If the specified properties file does not contain the
* required keys.
*/
public PropertiesCredentials(File file) throws FileNotFoundException, IOException, IllegalArgumentException {
if (!file.exists()) {
throw new FileNotFoundException("File doesn't exist: "
+ file.getAbsolutePath());
}
FileInputStream stream = new FileInputStream(file);
try {
Properties accountProperties = new Properties();
accountProperties.load(stream);
if (accountProperties.getProperty("accessKey") == null ||
accountProperties.getProperty("secretKey") == null) {
throw new IllegalArgumentException(
"The specified file (" + file.getAbsolutePath()
+ ") doesn't contain the expected properties 'accessKey' "
+ "and 'secretKey'."
);
}
accessKey = accountProperties.getProperty("accessKey");
secretAccessKey = accountProperties.getProperty("secretKey");
} finally {
try {
stream.close();
} catch (IOException e) {
}
}
}
/**
* Reads the specified input stream as a stream of Java properties file
* content and extracts the AWS access key ID and secret access key from the
* properties.
*
* @param inputStream
* The input stream containing the AWS credential properties.
*
* @throws IOException
* If any problems occur while reading from the input stream.
*/
public PropertiesCredentials(InputStream inputStream) throws IOException {
Properties accountProperties = new Properties();
try {
accountProperties.load(inputStream);
} finally {
try {inputStream.close();} catch (Exception e) {}
}
if (accountProperties.getProperty("accessKey") == null ||
accountProperties.getProperty("secretKey") == null) {
throw new IllegalArgumentException("The specified properties data " +
"doesn't contain the expected properties 'accessKey' and 'secretKey'.");
}
accessKey = accountProperties.getProperty("accessKey");
secretAccessKey = accountProperties.getProperty("secretKey");
}
/* (non-Javadoc)
* @see com.amazonaws.auth.AWSCredentials#getAWSAccessKeyId()
*/
public String getAWSAccessKeyId() {
return accessKey;
}
/* (non-Javadoc)
* @see com.amazonaws.auth.AWSCredentials#getAWSSecretKey()
*/
public String getAWSSecretKey() {
return secretAccessKey;
}
}<|fim▁end|> |
/** |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>mod token;
use crate::error::Error;
use crate::io::Char;
use self::token::Tokens;
pub use self::token::Token;
<|fim▁hole|>) -> impl Iterator<Item = Result<Token, Error>> {
Tokens::from(iter)
}<|fim▁end|> | pub fn tokens<I: Iterator<Item = Result<Char, Error>>>(
iter: I, |
<|file_name|>Frustum.cpp<|end_file_name|><|fim▁begin|>/**
* @file Frustum.cpp
*
* @author Jan Duek <[email protected]>
* @date 2013
*/
#include "Frustum.h"
<|fim▁hole|> auto m = glm::transpose(vp);
m_planes[TOP] = Plane::fromCoeficients(-m[1] + m[3]);
m_planes[BOTTOM] = Plane::fromCoeficients(m[1] + m[3]);
m_planes[LEFT] = Plane::fromCoeficients(m[0] + m[3]);
m_planes[RIGHT] = Plane::fromCoeficients(-m[0] + m[3]);
m_planes[NEAR] = Plane::fromCoeficients(m[2] + m[3]);
m_planes[FAR] = Plane::fromCoeficients(-m[2] + m[3]);
}
glm::vec3 getBBoxPositiveVertex(const BoundingBox& bbox, const glm::vec3& normal) {
glm::vec3 result = bbox.min();
if (normal.x >= 0)
result.x = bbox.max().x;
if (normal.y >= 0)
result.y = bbox.max().y;
if (normal.z >= 0)
result.z = bbox.max().z;
return result;
}
glm::vec3 getBBoxNegativeVertex(const BoundingBox& bbox, const glm::vec3& normal) {
glm::vec3 result = bbox.max();
if (normal.x >= 0)
result.x = bbox.min().x;
if (normal.y >= 0)
result.y = bbox.min().y;
if (normal.z >= 0)
result.z = bbox.min().z;
return result;
}
Frustum::Intersection Frustum::boundingBoxIntersetion(const BoundingBox& bbox) const {
Intersection result = Intersection::Inside;
//for each plane do ...
for(int i = 0; i < 6; i++) {
// is the positive vertex outside?
if (m_planes[i].distance(getBBoxPositiveVertex(bbox, m_planes[i].normal())) < 0)
return Intersection::None;
// is the negative vertex outside?
else if (m_planes[i].distance(getBBoxNegativeVertex(bbox, m_planes[i].normal())) < 0)
result = Intersection::Partialy;
}
return result;
}<|fim▁end|> | enum Planes { TOP, BOTTOM, LEFT, RIGHT, NEAR, FAR };
Frustum::Frustum(const glm::mat4& vp) {
// transpose matrix, so we can access rows via [] operator |
<|file_name|>suffix.ts<|end_file_name|><|fim▁begin|>// random suffixes
export const suffix = [
"-a-palooza",
"-a-thon",
"ability",
"able",
"ably",
"acea",
"aceous",
"acity",
"adelic",
"age",
"agogue",
"agogy",
"aholic",
"ality",
"ambulist",
"amine",
"aneous",
"anthropy",
"archy",
"aria",
"arian",
"arium",
"astic",
"ation",
"ative",
"atory",
"bility",
"biosis",
"biotic",
"bot",
"burger",
"burgh",
"cade",
"caine",
"centric",
"centrism",
"cidal",
"cide",
"ocracy",
"craft",
"cratic",
"cyte",
"drome",
"eaux",
"ectomy",
"escence",
"escent",
"esque",
"est",
"fest",
"ifier",
"gasm",
"gate",
"genesis",
"genic",
"ographer",
"ographical",
"ography",
"head",
"hedron",
"hood",
"icide",
"icism",
"icity",
"sicle",
"sicle",
"ienne",
"iferous",
"ification",
"ify",
"ile",
"ington",
"isation",
"ist",
"istic",
"ite",
"itis",
"itol",
"itude",
"ization",
"ize",
"izzle",
"kinesis",
"lalia",
"land",
"leptic",
"less",
"like",
"ling",
"lingual",
"logues",
"iloquy",
"mancy",
"mania",
"maniac",
"meister",
"metric",
"mobile",
"monger",
"morphic",
"morphism",
"morphous",
"naut",
"ness",
"nik",
"onomics",
"onomy",
"-o-rama",<|fim▁hole|> "oid",
"ologist",
"ology",
"onym",
"otomy",
"ous",
"pedia",
"philia",
"phobia",
"phonic",
" R Us",
"rama",
"scape",
"scope",
"ski",
"sky",
"soft",
"osophy",
"tastic",
"thermic",
"ville",
"ivorous",
"wise",
"worthy",
"zilla"
]<|fim▁end|> | "oholic", |
<|file_name|>encoder.rs<|end_file_name|><|fim▁begin|>// SPDX-License-Identifier: MIT
// Copyright [email protected]
// Copyright iced contributors
use super::constant_offsets::ConstantOffsets;
use super::ex_utils::to_js_error;
use super::instruction::Instruction;
use wasm_bindgen::prelude::*;
/// Encodes instructions decoded by the decoder or instructions created by other code.
/// See also [`BlockEncoder`] which can encode any number of instructions.
///
/// [`BlockEncoder`]: struct.BlockEncoder.html
///
/// ```js
/// const assert = require("assert").strict;
/// const { Decoder, DecoderOptions, Encoder } = require("iced-x86");
///
/// // xchg ah,[rdx+rsi+16h]
/// const bytes = new Uint8Array([0x86, 0x64, 0x32, 0x16]);
/// const decoder = new Decoder(64, bytes, DecoderOptions.None);
/// decoder.ipLo = 0x12345678;
/// decoder.ipHi = 0x00000000;
/// const instr = decoder.decode();
///
/// const encoder = new Encoder(64);
/// const len = encoder.encode(instr, 0x00000000, 0x55555555);
/// assert.equal(len, 4);
/// // We're done, take ownership of the buffer
/// const buffer = encoder.takeBuffer();
/// assert.equal(buffer.length, 4);
/// assert.equal(buffer[0], 0x86);
/// assert.equal(buffer[1], 0x64);
/// assert.equal(buffer[2], 0x32);
/// assert.equal(buffer[3], 0x16);
///
/// // Free wasm memory
/// decoder.free();
/// instr.free();
/// encoder.free();
/// ```
#[wasm_bindgen]
pub struct Encoder(iced_x86_rust::Encoder);
#[wasm_bindgen]
impl Encoder {
/// Creates an encoder
///
/// # Throws
///
/// Throws if `bitness` is not one of 16, 32, 64.
///
/// # Arguments
///
/// * `bitness`: 16, 32 or 64
#[wasm_bindgen(constructor)]
pub fn new(bitness: u32) -> Result<Encoder, JsValue> {
Self::with_capacity(bitness, 0)
}
/// Creates an encoder with an initial buffer capacity
///
/// # Throws
///
/// Throws if `bitness` is not one of 16, 32, 64.
///
/// # Arguments
///
/// * `bitness`: 16, 32 or 64
/// * `capacity`: Initial capacity of the `u8` buffer
#[wasm_bindgen(js_name = "withCapacity")]
pub fn with_capacity(bitness: u32, capacity: usize) -> Result<Encoder, JsValue> {
Ok(Self(iced_x86_rust::Encoder::try_with_capacity(bitness, capacity).map_err(to_js_error)?))
}
/// Encodes an instruction and returns the size of the encoded instruction.
///
/// Enable the `bigint` feature to use APIs with 64-bit numbers (requires `BigInt`).
///
/// # Throws
///
/// Throws an error on failure.
///
/// # Arguments
///
/// * `instruction`: Instruction to encode
/// * `ripHi`: High 32 bits of the `RIP` of the encoded instruction
/// * `ripLo`: Low 32 bits of the `RIP` of the encoded instruction
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Decoder, DecoderOptions, Encoder } = require("iced-x86");<|fim▁hole|> /// const bytes = new Uint8Array([0x75, 0x02]);
/// const decoder = new Decoder(64, bytes, DecoderOptions.None);
/// decoder.ipLo = 0x12345678;
/// decoder.ipHi = 0x00000000;
/// const instr = decoder.decode();
///
/// const encoder = new Encoder(64);
/// // Use a different IP (orig rip + 0x10)
/// const len = encoder.encode(instr, 0x00000000, 0x12345688);
/// assert.equal(len, 2);
/// // We're done, take ownership of the buffer
/// const buffer = encoder.takeBuffer();
/// assert.equal(buffer.length, 2);
/// assert.equal(buffer[0], 0x75);
/// assert.equal(buffer[1], 0xF2);
///
/// // Free wasm memory
/// decoder.free();
/// encoder.free();
/// instr.free();
/// ```
#[cfg(not(feature = "bigint"))]
pub fn encode(
&mut self, instruction: &Instruction, #[allow(non_snake_case)] ripHi: u32, #[allow(non_snake_case)] ripLo: u32,
) -> Result<u32, JsValue> {
let rip = ((ripHi as u64) << 32) | (ripLo as u64);
self.encode_core(instruction, rip)
}
/// Encodes an instruction and returns the size of the encoded instruction
///
/// # Throws
///
/// Throws an error on failure.
///
/// # Arguments
///
/// * `instruction`: Instruction to encode
/// * `rip`: `RIP` of the encoded instruction
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Decoder, DecoderOptions, Encoder } = require("iced-x86");
///
/// // je short $+4
/// const bytes = new Uint8Array([0x75, 0x02]);
/// const decoder = new Decoder(64, bytes, DecoderOptions.None);
/// decoder.ipLo = 0x12345678;
/// decoder.ipHi = 0x00000000;
/// const instr = decoder.decode();
///
/// const encoder = new Encoder(64);
/// // Use a different IP (orig rip + 0x10)
/// const len = encoder.encode(instr, 0x00000000, 0x12345688);
/// assert.equal(len, 2);
/// // We're done, take ownership of the buffer
/// const buffer = encoder.takeBuffer();
/// assert.equal(buffer.length, 2);
/// assert.equal(buffer[0], 0x75);
/// assert.equal(buffer[1], 0xF2);
///
/// // Free wasm memory
/// decoder.free();
/// encoder.free();
/// instr.free();
/// ```
#[cfg(feature = "bigint")]
pub fn encode(&mut self, instruction: &Instruction, rip: u64) -> Result<u32, JsValue> {
self.encode_core(instruction, rip)
}
fn encode_core(&mut self, instruction: &Instruction, rip: u64) -> Result<u32, JsValue> {
self.0.encode(&instruction.0, rip).map_or_else(
|error| {
#[cfg(any(feature = "gas", feature = "intel", feature = "masm", feature = "nasm"))]
{
Err(js_sys::Error::new(&format!("{} ({})", error, instruction.0)).into())
}
#[cfg(not(any(feature = "gas", feature = "intel", feature = "masm", feature = "nasm")))]
{
Err(js_sys::Error::new(&format!("{}", error)).into())
}
},
|size| Ok(size as u32),
)
}
/// Writes a byte to the output buffer
///
/// # Arguments
///
/// `value`: Value to write
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Code, Encoder, Instruction, Register } = require("iced-x86");
///
/// const encoder = new Encoder(64);
/// const instr = Instruction.createRegReg(Code.Add_r64_rm64, Register.R8, Register.RBP);
/// encoder.writeU8(0x90);
/// const len = encoder.encode(instr, 0x00000000, 0x55555555);
/// assert.equal(len, 3);
/// encoder.writeU8(0xCC);
/// // We're done, take ownership of the buffer
/// const buffer = encoder.takeBuffer();
/// assert.equal(buffer.length, 5);
/// assert.equal(buffer[0], 0x90);
/// assert.equal(buffer[1], 0x4C);
/// assert.equal(buffer[2], 0x03);
/// assert.equal(buffer[3], 0xC5);
/// assert.equal(buffer[4], 0xCC);
///
/// // Free wasm memory
/// encoder.free();
/// instr.free();
/// ```
#[wasm_bindgen(js_name = "writeU8")]
pub fn write_u8(&mut self, value: u8) {
self.0.write_u8(value)
}
/// Returns the buffer and initializes the internal buffer to an empty vector. Should be called when
/// you've encoded all instructions and need the raw instruction bytes. See also [`setBuffer()`].
///
/// [`setBuffer()`]: #method.set_buffer
#[wasm_bindgen(js_name = "takeBuffer")]
pub fn take_buffer(&mut self) -> Vec<u8> {
self.0.take_buffer()
}
/// Overwrites the buffer with a new vector. The old buffer is dropped. See also [`takeBuffer()`].
///
/// [`takeBuffer()`]: #method.take_buffer
#[wasm_bindgen(js_name = "setBuffer")]
pub fn set_buffer(&mut self, buffer: Vec<u8>) {
self.0.set_buffer(buffer)
}
/// Gets the offsets of the constants (memory displacement and immediate) in the encoded instruction.
/// The caller can use this information to add relocations if needed.
#[wasm_bindgen(js_name = "getConstantOffsets")]
pub fn get_constant_offsets(&self) -> ConstantOffsets {
ConstantOffsets(self.0.get_constant_offsets())
}
/// Disables 2-byte VEX encoding and encodes all VEX instructions with the 3-byte VEX encoding
#[wasm_bindgen(getter)]
#[wasm_bindgen(js_name = "preventVEX2")]
pub fn prevent_vex2(&self) -> bool {
self.0.prevent_vex2()
}
/// Disables 2-byte VEX encoding and encodes all VEX instructions with the 3-byte VEX encoding
///
/// # Arguments
///
/// * `new_value`: new value
#[wasm_bindgen(setter)]
#[wasm_bindgen(js_name = "preventVEX2")]
pub fn set_prevent_vex2(&mut self, new_value: bool) {
self.0.set_prevent_vex2(new_value)
}
/// Value of the `VEX.W` bit to use if it's an instruction that ignores the bit. Default is 0.
#[wasm_bindgen(getter)]
#[wasm_bindgen(js_name = "VEX_WIG")]
pub fn vex_wig(&self) -> u32 {
self.0.vex_wig()
}
/// Value of the `VEX.W` bit to use if it's an instruction that ignores the bit. Default is 0.
///
/// # Arguments
///
/// * `new_value`: new value (0 or 1)
#[wasm_bindgen(setter)]
#[wasm_bindgen(js_name = "VEX_WIG")]
pub fn set_vex_wig(&mut self, new_value: u32) {
self.0.set_vex_wig(new_value)
}
/// Value of the `VEX.L` bit to use if it's an instruction that ignores the bit. Default is 0.
#[wasm_bindgen(getter)]
#[wasm_bindgen(js_name = "VEX_LIG")]
pub fn vex_lig(&self) -> u32 {
self.0.vex_lig()
}
/// Value of the `VEX.L` bit to use if it's an instruction that ignores the bit. Default is 0.
///
/// # Arguments
///
/// * `new_value`: new value (0 or 1)
#[wasm_bindgen(setter)]
#[wasm_bindgen(js_name = "VEX_LIG")]
pub fn set_vex_lig(&mut self, new_value: u32) {
self.0.set_vex_lig(new_value)
}
/// Value of the `EVEX.W` bit to use if it's an instruction that ignores the bit. Default is 0.
#[wasm_bindgen(getter)]
#[wasm_bindgen(js_name = "EVEX_WIG")]
pub fn evex_wig(&self) -> u32 {
self.0.evex_wig()
}
/// Value of the `EVEX.W` bit to use if it's an instruction that ignores the bit. Default is 0.
///
/// # Arguments
///
/// * `new_value`: new value (0 or 1)
#[wasm_bindgen(setter)]
#[wasm_bindgen(js_name = "EVEX_WIG")]
pub fn set_evex_wig(&mut self, new_value: u32) {
self.0.set_evex_wig(new_value)
}
/// Value of the `EVEX.L'L` bits to use if it's an instruction that ignores the bits. Default is 0.
#[wasm_bindgen(getter)]
#[wasm_bindgen(js_name = "EVEX_LIG")]
pub fn evex_lig(&self) -> u32 {
self.0.evex_lig()
}
/// Value of the `EVEX.L'L` bits to use if it's an instruction that ignores the bits. Default is 0.
///
/// # Arguments
///
/// * `new_value`: new value (0 or 3)
#[wasm_bindgen(setter)]
#[wasm_bindgen(js_name = "EVEX_LIG")]
pub fn set_evex_lig(&mut self, new_value: u32) {
self.0.set_evex_lig(new_value)
}
/// Gets the bitness (16, 32 or 64)
#[wasm_bindgen(getter)]
pub fn bitness(&self) -> u32 {
self.0.bitness()
}
}<|fim▁end|> | ///
/// // je short $+4 |
<|file_name|>head-validation.js<|end_file_name|><|fim▁begin|>/**
* Copyright 2020 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {Services} from '../../../src/services';
import {getMode} from '../../../src/mode';
import {includes} from '../../../src/string';
import {map} from '../../../src/utils/object';
import {parseExtensionUrl} from '../../../src/service/extension-script';
import {preloadFriendlyIframeEmbedExtensions} from '../../../src/friendly-iframe-embed';
import {removeElement, rootNodeFor} from '../../../src/dom';
import {urls} from '../../../src/config';
/**
* @typedef {{
* extensions: !Array<{extensionId: (string|undefined), extensionVersion: (string|undefined)}>,
* head: !Element
* }}
*/
export let ValidatedHeadDef;
// From validator/validator-main.protoascii
const ALLOWED_FONT_REGEX = new RegExp(
'https://cdn\\.materialdesignicons\\.com/' +
'([0-9]+\\.?)+/css/materialdesignicons\\.min\\.css|' +
'https://cloud\\.typography\\.com/' +
'[0-9]*/[0-9]*/css/fonts\\.css|' +
'https://fast\\.fonts\\.net/.*|' +
'https://fonts\\.googleapis\\.com/css2?\\?.*|' +
'https://fonts\\.googleapis\\.com/icon\\?.*|' +
'https://fonts\\.googleapis\\.com/earlyaccess/.*\\.css|' +
'https://maxcdn\\.bootstrapcdn\\.com/font-awesome/' +
'([0-9]+\\.?)+/css/font-awesome\\.min\\.css(\\?.*)?|' +
'https://(use|pro)\\.fontawesome\\.com/releases/v([0-9]+\\.?)+' +
'/css/[0-9a-zA-Z-]+\\.css|' +
'https://(use|pro)\\.fontawesome\\.com/[0-9a-zA-Z-]+\\.css|' +
'https://use\\.typekit\\.net/[\\w\\p{L}\\p{N}_]+\\.css'
);
// If editing please also change:
// extensions/amp-a4a/amp-a4a-format.md#allowed-amp-extensions-and-builtins
const EXTENSION_ALLOWLIST = map({
'amp-accordion': true,
'amp-ad-exit': true,
'amp-analytics': true,
'amp-anim': true,
'amp-animation': true,
'amp-audio': true,
'amp-bind': true,
'amp-carousel': true,
'amp-fit-text': true,
'amp-font': true,
'amp-form': true,
'amp-img': true,
'amp-layout': true,
'amp-lightbox': true,
'amp-mraid': true,
'amp-mustache': true,
'amp-pixel': true,
'amp-position-observer': true,
'amp-selector': true,
'amp-social-share': true,<|fim▁hole|> urls.cdn.replace(/\./g, '\\.') + '/v0/'
);
/**
* Sanitizes AMPHTML Ad head element and extracts extensions to be installed.
* @param {!Window} win
* @param {!Element} adElement
* @param {?Element} head
* @return {?ValidatedHeadDef}
*/
export function processHead(win, adElement, head) {
if (!head || !head.firstChild) {
return null;
}
const root = rootNodeFor(head);
const htmlTag = root.documentElement;
if (
!htmlTag ||
(!htmlTag.hasAttribute('amp4ads') &&
!htmlTag.hasAttribute('⚡️4ads') &&
!htmlTag.hasAttribute('⚡4ads')) // Unicode weirdness.
) {
return null;
}
const urlService = Services.urlForDoc(adElement);
/** @type {!Array<{extensionId: string, extensionVersion: string}>} */
const extensions = [];
const fonts = [];
const images = [];
let element = head.firstElementChild;
while (element) {
// Store next element here as the following code will remove
// certain elements from the detached DOM.
const nextElement = element.nextElementSibling;
switch (element.tagName.toUpperCase()) {
case 'SCRIPT':
handleScript(extensions, element);
break;
case 'STYLE':
handleStyle(element);
break;
case 'LINK':
handleLink(fonts, images, element);
break;
// Allow these without validation.
case 'META':
case 'TITLE':
break;
default:
removeElement(element);
break;
}
element = nextElement;
}
// Load any extensions; do not wait on their promises as this
// is just to prefetch.
preloadFriendlyIframeEmbedExtensions(win, extensions);
// Preload any fonts.
fonts.forEach((fontUrl) =>
Services.preconnectFor(win).preload(adElement.getAmpDoc(), fontUrl)
);
// Preload any AMP images.
images.forEach(
(imageUrl) =>
urlService.isSecure(imageUrl) &&
Services.preconnectFor(win).preload(adElement.getAmpDoc(), imageUrl)
);
return {
extensions,
head,
};
}
/**
* Allows json scripts and allowlisted amp elements while removing others.
* @param {!Array<{extensionId: string, extensionVersion: string}>} extensions
* @param {!Element} script
*/
function handleScript(extensions, script) {
if (script.type === 'application/json') {
return;
}
const {src} = script;
const isTesting = getMode().test || getMode().localDev;
if (
EXTENSION_URL_PREFIX.test(src) ||
// Integration tests point to local files.
(isTesting && includes(src, '/dist/'))
) {
const extensionInfo = parseExtensionUrl(src);
if (extensionInfo && EXTENSION_ALLOWLIST[extensionInfo.extensionId]) {
extensions.push(extensionInfo);
}
}
removeElement(script);
}
/**
* Collect links that are from allowed font providers or used for image
* preloading. Remove other <link> elements.
* @param {!Array<string>} fonts
* @param {!Array<string>} images
* @param {!Element} link
*/
function handleLink(fonts, images, link) {
const {href, as, rel} = link;
if (rel === 'preload' && as === 'image') {
images.push(href);
return;
}
if (rel === 'stylesheet' && ALLOWED_FONT_REGEX.test(href)) {
fonts.push(href);
return;
}
removeElement(link);
}
/**
* Remove any non `amp-custom` or `amp-keyframe` styles.
* @param {!Element} style
*/
function handleStyle(style) {
if (
style.hasAttribute('amp-custom') ||
style.hasAttribute('amp-keyframes') ||
style.hasAttribute('amp4ads-boilerplate')
) {
return;
}
removeElement(style);
}<|fim▁end|> | 'amp-video': true,
});
const EXTENSION_URL_PREFIX = new RegExp( |
<|file_name|>render_particle.py<|end_file_name|><|fim▁begin|>import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
class Particle:
def __init__(self, (x, y), radius):
self.x = x
self.y = y
self.radius = radius
self.color = (255, 0, 0)
self.thickness = 1
<|fim▁hole|>
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
#pygame.draw.circle(canvas, color, position(x,y), radius, thickness)
particle = Particle((150, 100), 20)
particle.display()
#-- RUN LOOP --------------------------------------->>>
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False<|fim▁end|> | def display(self):
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius, self.thickness) |
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 15:45
from __future__ import unicode_literals
from django.db import migrations
import share.robot
<|fim▁hole|> ('share', '0001_initial'),
('djcelery', '0001_initial'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotUserMigration('org.biorxiv'),
),
migrations.RunPython(
code=share.robot.RobotOauthTokenMigration('org.biorxiv'),
),
migrations.RunPython(
code=share.robot.RobotScheduleMigration('org.biorxiv'),
),
]<|fim▁end|> |
class Migration(migrations.Migration):
dependencies = [ |
<|file_name|>Blasticator.js<|end_file_name|><|fim▁begin|>var Blasticator = function() {
var init = function() {
registerSettings();
};
var showDialogue = function() {
new ModalDialogue({
message:'This will destroy EVERYTHING. FOREVER.',
buttons:[{
label:'Keep my data',
role:'secondary',
autoClose:true
},{
label:'BURN IT ALL',
role:'primary',
autoClose:true,
callback:function() {
App.persister.clear();
window.location.reload(true);
}
}]
});
};<|fim▁hole|> App.settings.register([{
section:'Data',
label:'Clear data',
type:'button',
iconClass:'fa-trash',
callback:showDialogue
}]);
};
init();
};<|fim▁end|> |
var registerSettings = function() { |
<|file_name|>use-as-where-use-ends-with-mod-sep.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>//~^ ERROR: expected one of `::`, `;`, or `as`, found `foo`<|fim▁end|> | use std::any:: as foo; //~ ERROR expected identifier, found keyword `as` |
<|file_name|>f03.py<|end_file_name|><|fim▁begin|>"""engine.SCons.Tool.f03
Tool-specific initialization for the generic Posix f03 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#<|fim▁hole|>
import SCons.Defaults
import SCons.Tool
import SCons.Util
import fortran
from SCons.Tool.FortranCommon import add_all_to_env, add_f03_to_env
compilers = ['f03']
def generate(env):
add_all_to_env(env)
add_f03_to_env(env)
fcomp = env.Detect(compilers) or 'f03'
env['F03'] = fcomp
env['SHF03'] = fcomp
env['FORTRAN'] = fcomp
env['SHFORTRAN'] = fcomp
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:<|fim▁end|> |
__revision__ = "src/engine/SCons/Tool/f03.py 2014/07/05 09:42:21 garyo" |
<|file_name|>local_gpus.py<|end_file_name|><|fim▁begin|>import os
from cosmos.api import Cosmos, py_call
from cosmos.util.helpers import environment_variables
def use_cuda_device(some_arg, num_gpus):
assert "CUDA_VISIBLE_DEVICES" in os.environ
print(("some_arg", some_arg))
print(("CUDA_VISIBLE_DEVICES", os.environ["CUDA_VISIBLE_DEVICES"]))
assert len(os.environ["CUDA_VISIBLE_DEVICES"].split(",")) == num_gpus
def main():
cosmos = Cosmos().initdb()
workflow = cosmos.start("gpu", skip_confirm=True)
for i, num_gpus in enumerate([1, 1, 2, 2, 3]):
task = workflow.add_task(
use_cuda_device, dict(some_arg=i, num_gpus=num_gpus), gpu_req=num_gpus, uid=str(i),
)
workflow.run(<|fim▁hole|> max_gpus=len(os.environ["COSMOS_LOCAL_GPU_DEVICES"].split(",")), cmd_wrapper=py_call,
)
if __name__ == "__main__":
with environment_variables(COSMOS_LOCAL_GPU_DEVICES="0,1,3"):
main()<|fim▁end|> | |
<|file_name|>managefiles.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Zomboided
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This module is a bunch of functions that are called from the settings
# menu to manage various files groups.
import xbmcaddon
import xbmcgui
import xbmcvfs
import datetime
import os
from libs.vpnproviders import removeGeneratedFiles, cleanPassFiles, providers, usesUserKeys, usesMultipleKeys, getUserKeys
from libs.vpnproviders import getUserCerts, getVPNDisplay, getVPNLocation, removeDownloadedFiles, isAlternative, resetAlternative
from libs.utility import debugTrace, errorTrace, infoTrace, newPrint, getID, getName
from libs.vpnplatform import getLogPath, getUserDataPath, writeVPNLog, copySystemdFiles, addSystemd, removeSystemd, generateVPNs
from libs.common import resetVPNConnections, isVPNConnected, disconnectVPN, suspendConfigUpdate, resumeConfigUpdate, dnsFix, getVPNRequestedProfile
from libs.common import resetVPNProvider, setAPICommand
from libs.ipinfo import resetIPServices
try:
from libs.generation import generateAll
except:
pass
action = sys.argv[1]<|fim▁hole|> addon = xbmcaddon.Addon(getID())
addon_name = getName()
# Reset the ovpn files
if action == "ovpn":
if getVPNRequestedProfile() == "":
if xbmcgui.Dialog().yesno(addon_name, "Resetting the VPN provider will disconnect and reset all VPN connections, and then remove any files that have been created. Continue?"):
suspendConfigUpdate()
# Disconnect so that live files are not being modified
resetVPNConnections(addon)
infoTrace("managefiles.py", "Resetting the VPN provider")
# Delete the generated files, and reset the locations so it can be selected again
removeGeneratedFiles()
# Delete any values that have previously been validated
vpn_provider = getVPNLocation(addon.getSetting("vpn_provider"))
if isAlternative(vpn_provider): resetAlternative(vpn_provider)
# Reset the IP service error counts, etc
resetIPServices()
addon = xbmcaddon.Addon(getID())
resetVPNProvider(addon)
addon = xbmcaddon.Addon(getID())
resumeConfigUpdate()
xbmcgui.Dialog().ok(addon_name, "Reset the VPN provider. Validate a connection to start using a VPN again.")
else:
xbmcgui.Dialog().ok(addon_name, "Connection to VPN being attempted and has been aborted. Try again in a few seconds.")
setAPICommand("Disconnect")
# Generate the VPN provider files
if action == "generate":
# Only used during development to create location files
generateAll()
xbmcgui.Dialog().ok(addon_name, "Regenerated some or all of the VPN location files.")
# Delete all of the downloaded VPN files
if action == "downloads":
debugTrace("Deleting all downloaded VPN files")
removeDownloadedFiles()
xbmcgui.Dialog().ok(addon_name, "Deleted all of the downloaded VPN files. They'll be downloaded again if required.")
# Copy the log file
elif action == "log":
log_path = ""
dest_path = ""
try:
log_path = getLogPath()
start_dir = ""
dest_folder = xbmcgui.Dialog().browse(0, "Select folder to copy log file into", "files", "", False, False, start_dir, False)
dest_path = "kodi " + datetime.datetime.now().strftime("%y-%m-%d %H-%M-%S") + ".log"
dest_path = dest_folder + dest_path.replace(" ", "_")
# Write VPN log to log before copying
writeVPNLog()
debugTrace("Copying " + log_path + " to " + dest_path)
addon = xbmcaddon.Addon(getID())
infoTrace("managefiles.py", "Copying log file to " + dest_path + ". Using version " + addon.getSetting("version_number"))
xbmcvfs.copy(log_path, dest_path)
if not xbmcvfs.exists(dest_path): raise IOError('Failed to copy log ' + log_path + " to " + dest_path)
dialog_message = "Copied log file to: " + dest_path
except:
errorTrace("managefiles.py", "Failed to copy log from " + log_path + " to " + dest_path)
if xbmcvfs.exists(log_path):
dialog_message = "Error copying log, try copying it to a different location."
else:
dialog_messsage = "Could not find the kodi.log file."
errorTrace("managefiles.py", dialog_message + " " + log_path + ", " + dest_path)
xbmcgui.Dialog().ok("Log Copy", dialog_message)
# Delete the user key and cert files
elif action == "user":
if addon.getSetting("1_vpn_validated") == "" or xbmcgui.Dialog().yesno(addon_name, "Deleting key and certificate files will disconnect and reset all VPN connections. Connections must be re-validated before use. Continue?"):
# Disconnect so that live files are not being modified
if isVPNConnected(): resetVPNConnections(addon)
# Select the provider
provider_list = []
for provider in providers:
if usesUserKeys(provider):
provider_list.append(getVPNDisplay(provider))
provider_list.sort()
index = xbmcgui.Dialog().select("Select VPN provider", provider_list)
provider_display = provider_list[index]
provider = getVPNLocation(provider_display)
# Get the key/cert pairs for that provider and offer up for deletion
user_keys = getUserKeys(provider)
user_certs = getUserCerts(provider)
if len(user_keys) > 0 or len(user_certs) > 0:
still_deleting = True
while still_deleting:
if len(user_keys) > 0 or len(user_certs) > 0:
# Build a list of things to display. We should always have pairs, but if
# something didn't copy or the user has messed with the dir this will cope
all_user = []
single_pair = "user [I](Same key and certificate used for all connections)[/I]"
for key in user_keys:
list_item = os.path.basename(key)
list_item = list_item.replace(".key", "")
if list_item == "user": list_item = single_pair
all_user.append(list_item)
for cert in user_certs:
list_item = os.path.basename(cert)
list_item = list_item.replace(".crt", "")
if list_item == "user": list_item = single_pair
if not list_item in all_user: all_user.append(list_item)
all_user.sort()
# Offer a delete all option if there are multiple keys
all_item = "[I]Delete all key and certificate files[/I]"
if usesMultipleKeys(provider):
all_user.append(all_item)
# Add in a finished option
finished_item = "[I]Finished[/I]"
all_user.append(finished_item)
# Get the pair to delete
index = xbmcgui.Dialog().select("Select key and certificate to delete, or [I]Finished[/I]", all_user)
if all_user[index] == finished_item:
still_deleting = False
else:
if all_user[index] == single_pair : all_user[index] = "user"
if all_user[index] == all_item:
if xbmcgui.Dialog().yesno(addon_name, "Are you sure you want to delete all key and certificate files for " + provider_display + "?"):
for item in all_user:
if not item == all_item and not item == finished_item:
path = getUserDataPath(provider + "/" + item)
try:
if xbmcvfs.exists(path + ".key"):
xbmcvfs.delete(path + ".key")
if xbmcvfs.exists(path + ".txt"):
xbmcvfs.delete(path + ".txt")
if xbmcvfs.exists(path + ".crt"):
xbmcvfs.delete(path + ".crt")
except:
xbmcgui.Dialog().ok(addon_name, "Couldn't delete one of the key or certificate files: " + path)
else:
path = getUserDataPath(provider + "/" + all_user[index])
try:
if xbmcvfs.exists(path+".key"):
xbmcvfs.delete(path + ".key")
if xbmcvfs.exists(path + ".txt"):
xbmcvfs.delete(path + ".txt")
if xbmcvfs.exists(path + ".crt"):
xbmcvfs.delete(path + ".crt")
except:
xbmcgui.Dialog().ok(addon_name, "Couldn't delete one of the key or certificate files: " + path)
# Fetch the directory list again
user_keys = getUserKeys(provider)
user_certs = getUserCerts(provider)
if len(user_keys) == 0 and len(user_certs) == 0:
xbmcgui.Dialog().ok(addon_name, "All key and certificate files for " + provider_display + " have been deleted.")
else:
still_deleting = False
else:
xbmcgui.Dialog().ok(addon_name, "No key and certificate files exist for " + provider_display + ".")
# Fix the user defined files with DNS goodness
if action == "dns":
dnsFix()
command = "Addon.OpenSettings(" + getID() + ")"
xbmc.executebuiltin(command)
else:
errorTrace("managefiles.py", "VPN service is not ready")
debugTrace("-- Exit managefiles.py --")<|fim▁end|> |
debugTrace("-- Entered managefiles.py with parameter " + action + " --")
if not getID() == "": |
<|file_name|>a02307.js<|end_file_name|><|fim▁begin|>var a02307 =
[
[ "GenericVector", "a02307.html#a60d42eebf02708482a8b506edd417990", null ],<|fim▁hole|> [ "~GenericVector", "a02307.html#a49840c8743a063b87839baef7e19b968", null ],
[ "back", "a02307.html#a48b82547ebbaa5fedecfdebe7e2f155a", null ],
[ "binary_search", "a02307.html#ad561e19e75a0fb30f0118774d7fa5621", null ],
[ "bool_binary_search", "a02307.html#a8c261f66a24da67aac1acca7aa8f650a", null ],
[ "choose_nth_item", "a02307.html#a5c4218ef833d0fe5db9b9749abd81ea5", null ],
[ "choose_nth_item", "a02307.html#ae1e555b0cdded2c36dd6cf15345f659f", null ],
[ "clear", "a02307.html#a9cdbff49b186574b83e43afba606fdd9", null ],
[ "compact", "a02307.html#a080f7786e007523bcaa3f69913a82882", null ],
[ "compact_sorted", "a02307.html#a8cb22ff55d6dd125d93cd03fd73bf8ad", null ],
[ "contains", "a02307.html#a997e0fcaaa6b6533401dc54c0691e2e5", null ],
[ "contains_index", "a02307.html#ac1aae0b1c22248f264dad02481123398", null ],
[ "delete_data_pointers", "a02307.html#a98f62dccd75224a60437c2761bd215cd", null ],
[ "DeSerialize", "a02307.html#aa4f5b1bc0d044fbd1fc77363b798c39c", null ],
[ "DeSerialize", "a02307.html#a2e4fca9599eff590b76affc0a0aa0a2b", null ],
[ "DeSerializeClasses", "a02307.html#a698ebd328d22f1edc114053ca2eba48e", null ],
[ "DeSerializeClasses", "a02307.html#ade729c7d5429fbd5be304e3493a8a95f", null ],
[ "dot_product", "a02307.html#a6f6dfbc607499173e7809656c6c505bc", null ],
[ "double_the_size", "a02307.html#af0214c8c21da9eb57dfebc78611d0cd6", null ],
[ "empty", "a02307.html#a172c4aa23ba397e24319ae095281cbcc", null ],
[ "get", "a02307.html#abd0a875f98a1d78613ed3521d96e5300", null ],
[ "get_index", "a02307.html#a6dee574daf4a3d4f0fc7048964f8f252", null ],
[ "init", "a02307.html#a5b010723588fe15f303e4f3474d8479e", null ],
[ "init_to_size", "a02307.html#a6751521fd3eb461d81fc83ef93a0def3", null ],
[ "insert", "a02307.html#a57ca5259541548a97bcfd4d0925a27ff", null ],
[ "length", "a02307.html#a6af4e0a2a30dda267d19bf783ae22eb7", null ],
[ "move", "a02307.html#abae057ce589be25aae9b80958f84e34c", null ],
[ "operator+=", "a02307.html#af73fadcdb08f0a12a5615f2bcf6fa6a8", null ],
[ "operator+=", "a02307.html#acc7df2256174b32632e4d5b6c8d05d29", null ],
[ "operator=", "a02307.html#af6fd5b3891b276c10add96f9411bec05", null ],
[ "operator[]", "a02307.html#afd51f3f981284adb20bdf3b0bfd1c1f7", null ],
[ "pop_back", "a02307.html#a0621dd57ce58dae3cb5f3d61e76bd233", null ],
[ "push_back", "a02307.html#a0dc89fe2a365b04a61017f9d78c1a303", null ],
[ "push_back_new", "a02307.html#a393f9f8dcc55ad759a5c7fbdc4840a89", null ],
[ "push_front", "a02307.html#ae08e7cece0097ad356b5e565cbb2cf0b", null ],
[ "read", "a02307.html#a10a273cab07e56c1654b2167f8aa9408", null ],
[ "remove", "a02307.html#a3fd37a240a42f1c3052e8d28614d3702", null ],
[ "reserve", "a02307.html#aa225ea3fc9374961482bc804028317eb", null ],
[ "resize_no_init", "a02307.html#a09005e8f2b51d033d60eb5690aa5d112", null ],
[ "reverse", "a02307.html#a58f6d73009cc3c56d0efb0d96ad35b5b", null ],
[ "Serialize", "a02307.html#a206a6fe71c3780d862d97ef7c5fc9546", null ],
[ "Serialize", "a02307.html#a3e994fd938468ff4fc4b4a902e970876", null ],
[ "SerializeClasses", "a02307.html#ad0e8164e4c5c82e9e367c8a6d9b755b1", null ],
[ "SerializeClasses", "a02307.html#a7d0060c687429049a0ea5cf21d067b8e", null ],
[ "set", "a02307.html#a067b7833ee66238b7b5e230404525fcb", null ],
[ "set_clear_callback", "a02307.html#af2bbca5b3258035a333b62679835a253", null ],
[ "set_compare_callback", "a02307.html#aa3ec670c7f68a95f84641a0ded8cb61f", null ],
[ "size", "a02307.html#a20cfad5c58c50cb85a9529d8ddbd96af", null ],
[ "size_reserved", "a02307.html#a1c273622446ec7b5a6669fa9c9fdd8e5", null ],
[ "sort", "a02307.html#a999bbd8ff336c81fe1198ea714c7936d", null ],
[ "sort", "a02307.html#a461142d4ff7c61f22119552b7c0b2755", null ],
[ "swap", "a02307.html#ac10b1de04fdfd4f5e4b90ac6d03f35b9", null ],
[ "truncate", "a02307.html#a980882b5ebc3e72fdedbdbe345196f21", null ],
[ "unsigned_size", "a02307.html#a47bd2385b28d536e8b6e87b689d61ede", null ],
[ "WithinBounds", "a02307.html#a367914d03777eef59176d48155d06b72", null ],
[ "write", "a02307.html#a8745d1d8394e852d12398d0458684dee", null ],
[ "clear_cb_", "a02307.html#a57a833bdcc07a53e9a7b57d07cac2131", null ],
[ "compare_cb_", "a02307.html#acd69761952fb39cbe7d7b43a6b06a432", null ],
[ "data_", "a02307.html#ab88657a46d06c175dcfc76c0fcdaac7d", null ],
[ "size_reserved_", "a02307.html#a4a02eb2a4ed31e8454cd8ae06eb8d7c5", null ],
[ "size_used_", "a02307.html#a99185b084a6ace7536818ce2f17b11fb", null ]
];<|fim▁end|> | [ "GenericVector", "a02307.html#a28a69767bcadb6058a2a9df4afecd5fc", null ],
[ "GenericVector", "a02307.html#a2b61cd1cd756770518f5ac30f817a9bf", null ], |
<|file_name|>service-763.service.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { Injectable } from '@angular/core';
@Injectable()
export class Service763Service {<|fim▁hole|><|fim▁end|> |
constructor() { }
} |
<|file_name|>OSQLFunctionSymmetricDifference.java<|end_file_name|><|fim▁begin|>/*
*
* * Copyright 2010-2016 OrientDB LTD (http://orientdb.com)
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* * For more information: http://orientdb.com
*
*/
package com.orientechnologies.orient.core.sql.functions.coll;<|fim▁hole|>import com.orientechnologies.orient.core.command.OCommandContext;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* This operator can work as aggregate or inline. If only one argument is passed than aggregates,
* otherwise executes, and returns, the SYMMETRIC DIFFERENCE between the collections received as
* parameters. Works also with no collection values.
*
* @author Luca Garulli (l.garulli--(at)--orientdb.com)
*/
public class OSQLFunctionSymmetricDifference extends OSQLFunctionMultiValueAbstract<Set<Object>> {
public static final String NAME = "symmetricDifference";
private Set<Object> rejected;
public OSQLFunctionSymmetricDifference() {
super(NAME, 1, -1);
}
private static void addItemToResult(Object o, Set<Object> accepted, Set<Object> rejected) {
if (!accepted.contains(o) && !rejected.contains(o)) {
accepted.add(o);
} else {
accepted.remove(o);
rejected.add(o);
}
}
private static void addItemsToResult(
Collection<Object> co, Set<Object> accepted, Set<Object> rejected) {
for (Object o : co) {
addItemToResult(o, accepted, rejected);
}
}
@SuppressWarnings("unchecked")
public Object execute(
Object iThis,
OIdentifiable iCurrentRecord,
Object iCurrentResult,
final Object[] iParams,
OCommandContext iContext) {
if (iParams[0] == null) return null;
Object value = iParams[0];
if (iParams.length == 1) {
// AGGREGATION MODE (STATEFUL)
if (context == null) {
context = new HashSet<Object>();
rejected = new HashSet<Object>();
}
if (value instanceof Collection<?>) {
addItemsToResult((Collection<Object>) value, context, rejected);
} else {
addItemToResult(value, context, rejected);
}
return null;
} else {
// IN-LINE MODE (STATELESS)
final Set<Object> result = new HashSet<Object>();
final Set<Object> rejected = new HashSet<Object>();
for (Object iParameter : iParams) {
if (iParameter instanceof Collection<?>) {
addItemsToResult((Collection<Object>) iParameter, result, rejected);
} else {
addItemToResult(iParameter, result, rejected);
}
}
return result;
}
}
@Override
public Set<Object> getResult() {
if (returnDistributedResult()) {
final Map<String, Object> doc = new HashMap<String, Object>();
doc.put("result", context);
doc.put("rejected", rejected);
return Collections.<Object>singleton(doc);
} else {
return super.getResult();
}
}
public String getSyntax() {
return "difference(<field>*)";
}
@Override
public Object mergeDistributedResult(List<Object> resultsToMerge) {
if (returnDistributedResult()) {
final Set<Object> result = new HashSet<Object>();
final Set<Object> rejected = new HashSet<Object>();
for (Object item : resultsToMerge) {
rejected.addAll(unwrap(item, "rejected"));
}
for (Object item : resultsToMerge) {
addItemsToResult(unwrap(item, "result"), result, rejected);
}
return result;
}
if (!resultsToMerge.isEmpty()) return resultsToMerge.get(0);
return null;
}
@SuppressWarnings("unchecked")
private Set<Object> unwrap(Object obj, String field) {
final Set<Object> objAsSet = (Set<Object>) obj;
final Map<String, Object> objAsMap = (Map<String, Object>) objAsSet.iterator().next();
final Set<Object> objAsField = (Set<Object>) objAsMap.get(field);
return objAsField;
}
}<|fim▁end|> | |
<|file_name|>stickyMojo.js<|end_file_name|><|fim▁begin|>(function($) {
$.fn.extend({
stickyMojo: function(options) {
// Exit if there are no elements to avoid errors:
if (this.length === 0) {
return this;
}
var settings = $.extend({
'footerID': '',
'contentID': '',
'orientation': $(this).css('float')
}, options);
var sticky = {
'el': $(this),
'stickyLeft': $(settings.contentID).outerWidth() + $(settings.contentID).offset.left,
'stickyTop2': $(this).offset().top,
'stickyHeight': $(this).outerHeight(true),
'contentHeight': $(settings.contentID).outerHeight(true),
'win': $(window),
'breakPoint': $(this).outerWidth(true) + $(settings.contentID).outerWidth(true),
'marg': parseInt($(this).css('margin-top'), 10)
};
var errors = checkSettings();
cacheElements();
return this.each(function() {
buildSticky();
});
<|fim▁hole|> function buildSticky() {
if (!errors.length) {
sticky.el.css('left', sticky.stickyLeft);
sticky.win.bind({
'load': stick,
'scroll': stick,
'resize': function() {
sticky.el.css('left', sticky.stickyLeft);
stick();
}
});
} else {
if (console && console.warn) {
console.warn(errors);
} else {
alert(errors);
}
}
}
// Caches the footer and content elements into jquery objects
function cacheElements() {
settings.footerID = $(settings.footerID);
settings.contentID = $(settings.contentID);
}
// Calcualtes the limits top and bottom limits for the sidebar
function calculateLimits() {
return {
limit: settings.footerID.offset().top - sticky.stickyHeight,
windowTop: sticky.win.scrollTop(),
stickyTop: sticky.stickyTop2 - sticky.marg
}
}
// Sets sidebar to fixed position
function setFixedSidebar() {
sticky.el.css({
position: 'fixed',
top: 0
});
}
// Determines the sidebar orientation and sets margins accordingly
function checkOrientation() {
if (settings.orientation === "left") {
settings.contentID.css('margin-left', sticky.el.outerWidth(true));
} else {
sticky.el.css('margin-left', settings.contentID.outerWidth(true));
}
}
// sets sidebar to a static positioned element
function setStaticSidebar() {
sticky.el.css({
'position': 'static',
'margin-left': '0px'
});
settings.contentID.css('margin-left', '0px');
}
// initiated to stop the sidebar from intersecting the footer
function setLimitedSidebar(diff) {
sticky.el.css({
top: diff
});
}
//determines whether sidebar should stick and applies appropriate settings to make it stick
function stick() {
var tops = calculateLimits();
var hitBreakPoint = tops.stickyTop < tops.windowTop && (sticky.win.width() >= sticky.breakPoint);
if (hitBreakPoint) {
setFixedSidebar();
checkOrientation();
} else {
setStaticSidebar();
}
if (tops.limit < tops.windowTop) {
var diff = tops.limit - tops.windowTop;
setLimitedSidebar(diff);
}
}
// verifies that all settings are correct
function checkSettings() {
var errors = [];
for (var key in settings) {
if (!settings[key]) {
errors.push(settings[key]);
}
}
ieVersion() && errors.push("NO IE 7");
return errors;
}
function ieVersion() {
if(document.querySelector) {
return false;
}
else {
return true;
}
}
}
});
})(jQuery);<|fim▁end|> | |
<|file_name|>JpaApplicationDataRepository.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.rave.portal.repository.impl;
import org.apache.rave.exception.NotSupportedException;
import org.apache.commons.lang3.StringUtils;
import org.apache.rave.exception.DataSerializationException;
import org.apache.rave.model.ApplicationData;
import org.apache.rave.portal.model.JpaApplicationData;
import org.apache.rave.portal.model.conversion.JpaApplicationDataConverter;
import org.apache.rave.portal.repository.ApplicationDataRepository;
import org.apache.rave.util.CollectionUtils;
import org.apache.rave.util.JsonUtils;
import org.json.JSONException;
import org.json.JSONObject;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.annotation.Transactional;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.EntityManager;
import javax.persistence.Lob;
import javax.persistence.PersistenceContext;
import javax.persistence.TypedQuery;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import static org.apache.rave.persistence.jpa.util.JpaUtil.getSingleResult;
import static org.apache.rave.persistence.jpa.util.JpaUtil.saveOrUpdate;
@Repository
public class JpaApplicationDataRepository implements ApplicationDataRepository {
@PersistenceContext
private EntityManager manager;
@Autowired
private JpaApplicationDataConverter converter;
@Override
public Class<? extends ApplicationData> getType() {
return JpaApplicationData.class;
}
@Override
public ApplicationData get(String id) {
JpaSerializableApplicationData applicationData = (JpaSerializableApplicationData) manager.find(JpaApplicationData.class, Long.parseLong(id));
if (applicationData != null) {
applicationData.deserializeData();
}
return applicationData;
}
@Override
@Transactional
public JpaApplicationData save(ApplicationData item) {
JpaApplicationData jpaAppData = converter.convert(item);
JpaSerializableApplicationData jpaSerializableApplicationData = getJpaSerializableApplicationData(jpaAppData);<|fim▁hole|>
@Override
public void delete(ApplicationData item) {
manager.remove(item instanceof JpaApplicationData ? item : get(item.getId()));
}
@Override
public List<ApplicationData> getAll() {
throw new NotSupportedException("This function is not yet implemented for this class.");
}
@Override
public List<ApplicationData> getLimitedList(int offset, int limit) {
throw new NotSupportedException("This function is not yet implemented for this class.");
}
@Override
public int getCountAll() {
throw new NotSupportedException("This function is not yet implemented for this class.");
}
@Override
public List<ApplicationData> getApplicationData(List<String> userIds, String appId) {
//if the call is only looking for data for a single user use the more efficient single user variant transparently
if (userIds.size() == 1) {
List<ApplicationData> data = new ArrayList<ApplicationData>();
ApplicationData applicationData = getApplicationData(userIds.get(0), appId);
if (applicationData != null) {
data.add(applicationData);
}
return data;
}
TypedQuery<JpaSerializableApplicationData> query = manager.createNamedQuery(JpaApplicationData.FIND_BY_USER_IDS_AND_APP_ID,
JpaSerializableApplicationData.class);
query.setParameter(JpaApplicationData.USER_IDS_PARAM, userIds);
query.setParameter(JpaApplicationData.APP_URL_PARAM, appId);
List<JpaSerializableApplicationData> results = query.getResultList();
for (JpaSerializableApplicationData applicationData : results) {
applicationData.deserializeData();
}
return CollectionUtils.<ApplicationData>toBaseTypedList(results);
}
@Override
public JpaApplicationData getApplicationData(String personId, String appId) {
TypedQuery<JpaSerializableApplicationData> query = manager.createNamedQuery(JpaApplicationData.FIND_BY_USER_ID_AND_APP_ID,
JpaSerializableApplicationData.class);
query.setParameter(JpaApplicationData.USER_ID_PARAM, personId);
query.setParameter(JpaApplicationData.APP_URL_PARAM, appId);
JpaSerializableApplicationData applicationData = getSingleResult(query.getResultList());
if (applicationData != null) {
applicationData.deserializeData();
}
return applicationData;
}
private JpaSerializableApplicationData getJpaSerializableApplicationData(JpaApplicationData applicationData) {
if (applicationData instanceof JpaSerializableApplicationData) {
return (JpaSerializableApplicationData) applicationData;
}
return new JpaSerializableApplicationData(applicationData.getEntityId(), applicationData.getUserId(),
applicationData.getAppUrl(), applicationData.getData());
}
/**
* This class is here so that the details of the persistence strategy in use for serializing the appdata map to a
* JSON string doesnt end up being reflected in any public API of the ApplicationData object itself.
* <p/>
* This allows the public API of this repository to deal in clean ApplicationData models, but under the covers it
* uses this model for the actual persistence to the database.
*/
@Entity
public static class JpaSerializableApplicationData extends JpaApplicationData {
@Lob
@Column(name = "serialized_data")
private String serializedData;
public JpaSerializableApplicationData() {
super();
}
public JpaSerializableApplicationData(Long entityId, String userId, String appUrl, Map<String, Object> data) {
super(entityId, userId, appUrl, data);
}
public void serializeData() {
Map<String, Object> data = this.getData();
if (data != null) {
serializedData = JsonUtils.stringify(data);
}
}
@SuppressWarnings("unchecked")
public void deserializeData() {
if (serializedData != null && StringUtils.isNotBlank(serializedData)) {
this.setData(JsonUtils.parse(serializedData, Map.class));
}
}
}
}<|fim▁end|> | jpaSerializableApplicationData.serializeData();
return saveOrUpdate(jpaSerializableApplicationData.getEntityId(), manager, jpaSerializableApplicationData);
} |
<|file_name|>managePage.js<|end_file_name|><|fim▁begin|>$(document).ready(function () {
//$('#pageListDt').DataTable();
/*
$("#publish").click(function () {
NccPageMask.ShowLoadingMask();
$.ajax({
type: "post",
url: form.action,<|fim▁hole|> contentType: "application/x-www-form-urlencoded",
success: function (rsp, textStatus, jqXHR) {
NccPageMask.HideLoadingMask();
if (rsp.isSuccess) {
NccAlert.ShowSuccess(rsp.message);
ClearForm();
}
else {
NccAlert.ShowError("Error: " + rsp.message);
}
},
error: function (jqXHR, textStatus, errorThrown) {
NccPageMask.HideLoadingMask();
NccAlert.ShowError("Error. Please try again.");
}
})
});
*/
});<|fim▁end|> | data: form.serialize(), |
<|file_name|>benches.rs<|end_file_name|><|fim▁begin|>#![deny(warnings)]
#![feature(test)]
extern crate shove;
extern crate test;
use test::Bencher;
#[bench]<|fim▁hole|>fn foo(_bencher: &mut Bencher) {
3 + 7;
}<|fim▁end|> | |
<|file_name|>test_field.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Test CLR field support."""
import System
import pytest
from Python.Test import FieldTest
def test_public_instance_field():
"""Test public instance fields."""
ob = FieldTest()
assert ob.PublicField == 0
ob.PublicField = 1
assert ob.PublicField == 1
with pytest.raises(TypeError):
del FieldTest().PublicField
def test_public_static_field():
"""Test public static fields."""
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
FieldTest.PublicStaticField = 1
assert FieldTest.PublicStaticField == 1
assert ob.PublicStaticField == 1
ob.PublicStaticField = 0
assert ob.PublicStaticField == 0
with pytest.raises(TypeError):
del FieldTest.PublicStaticField
with pytest.raises(TypeError):
del FieldTest().PublicStaticField
def test_protected_instance_field():
"""Test protected instance fields."""
ob = FieldTest()
assert ob.ProtectedField == 0
ob.ProtectedField = 1
assert ob.ProtectedField == 1
with pytest.raises(TypeError):
del FieldTest().ProtectedField
def test_protected_static_field():
"""Test protected static fields."""
ob = FieldTest()
assert FieldTest.ProtectedStaticField == 0
FieldTest.ProtectedStaticField = 1<|fim▁hole|> assert ob.ProtectedStaticField == 1
ob.ProtectedStaticField = 0
assert ob.ProtectedStaticField == 0
with pytest.raises(TypeError):
del FieldTest.ProtectedStaticField
with pytest.raises(TypeError):
del FieldTest().ProtectedStaticField
def test_read_only_instance_field():
"""Test readonly instance fields."""
assert FieldTest().ReadOnlyField == 0
with pytest.raises(TypeError):
FieldTest().ReadOnlyField = 1
with pytest.raises(TypeError):
del FieldTest().ReadOnlyField
def test_read_only_static_field():
"""Test readonly static fields."""
ob = FieldTest()
assert FieldTest.ReadOnlyStaticField == 0
assert ob.ReadOnlyStaticField == 0
with pytest.raises(TypeError):
FieldTest.ReadOnlyStaticField = 1
with pytest.raises(TypeError):
FieldTest().ReadOnlyStaticField = 1
with pytest.raises(TypeError):
del FieldTest.ReadOnlyStaticField
with pytest.raises(TypeError):
del FieldTest().ReadOnlyStaticField
def test_constant_field():
"""Test const fields."""
ob = FieldTest()
assert FieldTest.ConstField == 0
assert ob.ConstField == 0
with pytest.raises(TypeError):
FieldTest().ConstField = 1
with pytest.raises(TypeError):
FieldTest.ConstField = 1
with pytest.raises(TypeError):
del FieldTest().ConstField
with pytest.raises(TypeError):
del FieldTest.ConstField
def test_internal_field():
"""Test internal fields."""
with pytest.raises(AttributeError):
_ = FieldTest().InternalField
with pytest.raises(AttributeError):
_ = FieldTest().InternalStaticField
with pytest.raises(AttributeError):
_ = FieldTest.InternalStaticField
def test_private_field():
"""Test private fields."""
with pytest.raises(AttributeError):
_ = FieldTest().PrivateField
with pytest.raises(AttributeError):
_ = FieldTest().PrivateStaticField
with pytest.raises(AttributeError):
_ = FieldTest.PrivateStaticField
def test_field_descriptor_get_set():
"""Test field descriptor get / set."""
# This test ensures that setting an attribute implemented with
# a descriptor actually goes through the descriptor (rather than
# silently replacing the descriptor in the instance or type dict.
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
assert ob.PublicStaticField == 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
ob.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
FieldTest.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
def test_field_descriptor_wrong_type():
"""Test setting a field using a value of the wrong type."""
with pytest.raises(ValueError):
FieldTest().PublicField = "spam"
def test_field_descriptor_abuse():
"""Test field descriptor abuse."""
desc = FieldTest.__dict__['PublicField']
with pytest.raises(TypeError):
desc.__get__(0, 0)
with pytest.raises(TypeError):
desc.__set__(0, 0)
def test_boolean_field():
"""Test boolean fields."""
# change this to true / false later for Python 2.3?
ob = FieldTest()
assert ob.BooleanField is False
ob.BooleanField = True
assert ob.BooleanField is True
ob.BooleanField = False
assert ob.BooleanField is False
ob.BooleanField = 1
assert ob.BooleanField is True
ob.BooleanField = 0
assert ob.BooleanField is False
def test_sbyte_field():
"""Test sbyte fields."""
ob = FieldTest()
assert ob.SByteField == 0
ob.SByteField = 1
assert ob.SByteField == 1
def test_byte_field():
"""Test byte fields."""
ob = FieldTest()
assert ob.ByteField == 0
ob.ByteField = 1
assert ob.ByteField == 1
def test_char_field():
"""Test char fields."""
ob = FieldTest()
assert ob.CharField == u'A'
assert ob.CharField == 'A'
ob.CharField = 'B'
assert ob.CharField == u'B'
assert ob.CharField == 'B'
ob.CharField = u'C'
assert ob.CharField == u'C'
assert ob.CharField == 'C'
def test_int16_field():
"""Test int16 fields."""
ob = FieldTest()
assert ob.Int16Field == 0
ob.Int16Field = 1
assert ob.Int16Field == 1
def test_int32_field():
"""Test int32 fields."""
ob = FieldTest()
assert ob.Int32Field == 0
ob.Int32Field = 1
assert ob.Int32Field == 1
def test_int64_field():
"""Test int64 fields."""
ob = FieldTest()
assert ob.Int64Field == 0
ob.Int64Field = 1
assert ob.Int64Field == 1
def test_uint16_field():
"""Test uint16 fields."""
ob = FieldTest()
assert ob.UInt16Field == 0
ob.UInt16Field = 1
assert ob.UInt16Field == 1
def test_uint32_field():
"""Test uint32 fields."""
ob = FieldTest()
assert ob.UInt32Field == 0
ob.UInt32Field = 1
assert ob.UInt32Field == 1
def test_uint64_field():
"""Test uint64 fields."""
ob = FieldTest()
assert ob.UInt64Field == 0
ob.UInt64Field = 1
assert ob.UInt64Field == 1
def test_single_field():
"""Test single fields."""
ob = FieldTest()
assert ob.SingleField == 0.0
ob.SingleField = 1.1
assert ob.SingleField == 1.1
def test_double_field():
"""Test double fields."""
ob = FieldTest()
assert ob.DoubleField == 0.0
ob.DoubleField = 1.1
assert ob.DoubleField == 1.1
def test_decimal_field():
"""Test decimal fields."""
ob = FieldTest()
assert ob.DecimalField == System.Decimal(0)
ob.DecimalField = System.Decimal(1)
assert ob.DecimalField == System.Decimal(1)
def test_string_field():
"""Test string fields."""
ob = FieldTest()
assert ob.StringField == "spam"
ob.StringField = "eggs"
assert ob.StringField == "eggs"
def test_interface_field():
"""Test interface fields."""
from Python.Test import Spam, ISpam
ob = FieldTest()
assert ISpam(ob.SpamField).GetValue() == "spam"
assert ob.SpamField.GetValue() == "spam"
ob.SpamField = Spam("eggs")
assert ISpam(ob.SpamField).GetValue() == "eggs"
assert ob.SpamField.GetValue() == "eggs"
def test_object_field():
"""Test ob fields."""
ob = FieldTest()
assert ob.ObjectField is None
ob.ObjectField = System.String("spam")
assert ob.ObjectField == "spam"
ob.ObjectField = System.Int32(1)
assert ob.ObjectField == 1
ob.ObjectField = None
assert ob.ObjectField is None
def test_enum_field():
"""Test enum fields."""
from Python.Test import ShortEnum
ob = FieldTest()
assert ob.EnumField == ShortEnum.Zero
ob.EnumField = ShortEnum.One
assert ob.EnumField == ShortEnum.One
def test_nullable_field():
"""Test nullable fields."""
ob = FieldTest()
ob.StringField = None
assert ob.StringField is None
ob.ObjectField = None
assert ob.ObjectField is None
ob.SpamField = None
assert ob.SpamField is None
# Primitive types and enums should not be set to null.
with pytest.raises(TypeError):
FieldTest().Int32Field = None
with pytest.raises(TypeError):
FieldTest().EnumField = None<|fim▁end|> | assert FieldTest.ProtectedStaticField == 1
|
<|file_name|>commandtest.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
import unittest
class TestCommand(unittest.TestCase):
def test_auth(self):
c = command.ConnectionAuth()
assert c.get_id() == 'AUTH'
if __name__ == '__main__':
unittest.main()<|fim▁end|> | from redispy import command |
<|file_name|>popcorn-api-notes.js<|end_file_name|><|fim▁begin|>// http://www.w3.org/2010/05/video/mediaevents.html
var poppy = popcorn( [ vid_elem_ref | 'id_string' ] );
poppy
// pass-through video control methods
.load()
.play()
.pause()
// property setters
.currentTime( time ) // skip forward or backwards `time` seconds
.playbackRate( rate )
.volume( delta )
.mute( [ state ] )
// sugar?
.rewind() // to beginning + stop??
.loop( [ state ] ) // toggle looping
// queuing (maybe unnecessary):
// enqueue method w/ optional args
.queue( 'method', args )
// enqueue arbitrary callback
.queue(function(next){ /* do stuff */ next(); })
// clear the queue
.clearQueue()
// execute arbitrary code @ time
poppy.exec( 1.23, function(){
// exec code
});
// plugin factory sample
popcorn.plugin( 'myPlugin' [, super_plugin ], init_options );
// call plugin (defined above)
poppy.myPlugin( time, options );
// define subtitle plugin
popcorn.plugin( 'subtitle', {
});
poppy
.subtitle( 1.5, {
html: '<p>SUPER AWESOME MEANING</p>',
duration: 5
})
.subtitle({
start: 1.5,
end: 6.5,
html: '<p>SUPER AWESOME MEANING</p>'
})
.subtitle([
{
start: 1.5,
html: '<p>SUPER AWESOME MEANING</p>'
},
{
start: 2.5,
end: 3.5,
html: '<p>OTHER NEAT TEXT</p>'
}
])
.data([
{
subtitle: [
{
start: 1.5,
html: '<p>SUPER AWESOME MEANING</p>'
},
{
start: 2.5,
end: 3.5,
html: '<p>OTHER NEAT TEXT</p>'
}
]
}
]);
// jQuery-dependent plugin, using $.ajax - extend popcorn.data
popcorn.plugin( 'data', popcorn.data, {
_setup: function( options ) {
// called when plugin is first registered (?)
},
_add: function( options ) {
// called when popcorn.data is called
// this == plugin (?)
if ( typeof options === 'string' ) {
$.ajax({
url: options
// stuff
});
} else {
return this.super.data.apply( this, arguments );
}
}
});
poppy.data( '/data.php' ) // data.php returns JSON?
/*
poppy.twitter( dom_elem | 'id_of_dom_elem', options ); // multiple twitters?? FAIL<|fim▁hole|>
var widget1 = $(dom_elem).twitter( options ); // ui widget factory initializes twitter widget
poppy.jQuery( 5.9, {
elem: widget1,
method: 'twitter',
args: [ 'search', '@cowboy' ]
})
poppy.jQuery( time, selector, methodname [, args... ] );
poppy.jQuery( 5.9, widget1, 'twitter', 'search', '@cowboy' );
poppy.jQuery( 5.9, '.div', 'css', 'color', 'red' );
poppy.jQuery( 5.9, '#form', 'submit' );
// sugar methods for jQuery
$(selector).popcorn( time, methodname [, args... ] );
$(selector).popcorn( time, fn );
// another idea, using jQuery special events api
$(selector).bind( 'popcorn', { time: 5.9 }, function(e){
$(this).css( 'color', 'red' );
});
// does $.fn[ methodname ].apply( $(selector), args );<|fim▁end|> |
poppy.twitter( 'load', options );
*/ |
<|file_name|>VCRBitrateLimitingReceiver.java<|end_file_name|><|fim▁begin|>/*
* Decompiled with CFR 0_115.
*
* Could not load the following classes:<|fim▁hole|> */
package com.buzzfeed.android.vcr.util;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.support.annotation.NonNull;
import com.buzzfeed.android.vcr.VCRConfig;
import com.buzzfeed.toolkit.util.EZUtil;
public final class VCRBitrateLimitingReceiver {
private final Context mContext;
private final BroadcastReceiver mReceiver;
public VCRBitrateLimitingReceiver(@NonNull Context context) {
this.mContext = EZUtil.checkNotNull(context, "Context must not be null.");
this.mReceiver = new ConnectivityReceiver();
}
public void register() {
this.mContext.registerReceiver(this.mReceiver, new IntentFilter("android.net.conn.CONNECTIVITY_CHANGE"));
}
public void unregister() {
this.mContext.unregisterReceiver(this.mReceiver);
}
private final class ConnectivityReceiver
extends BroadcastReceiver {
private ConnectivityReceiver() {
}
public void onReceive(Context context, Intent intent) {
VCRConfig.getInstance().setAdaptiveBitrateLimitForConnection(context);
}
}
}<|fim▁end|> | * android.content.BroadcastReceiver
* android.content.Context
* android.content.Intent
* android.content.IntentFilter |
<|file_name|>SessionAuthenticationFilterTest.java<|end_file_name|><|fim▁begin|>package com.turalt.openmentor.security;
import static org.easymock.EasyMock.*;
import java.io.UnsupportedEncodingException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.shiro.authc.AuthenticationException;
import org.apache.shiro.authc.AuthenticationInfo;
import org.apache.shiro.authc.AuthenticationToken;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.springframework.mock.web.MockHttpServletRequest;
import org.springframework.mock.web.MockHttpServletResponse;
import org.apache.shiro.realm.Realm;
import org.apache.shiro.subject.PrincipalCollection;
import org.apache.shiro.subject.SimplePrincipalCollection;
import org.apache.shiro.subject.Subject;
import org.apache.shiro.web.mgt.DefaultWebSecurityManager;
import org.apache.shiro.web.session.mgt.DefaultWebSessionManager;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import org.hamcrest.core.StringContains;
import com.turalt.openmentor.test.AbstractShiroTest;
public class SessionAuthenticationFilterTest extends AbstractShiroTest {
private SessionAuthenticationFilter filter;
private DefaultWebSecurityManager manager;
@Before
public void initialize() {
filter = new SessionAuthenticationFilter();
DefaultWebSessionManager sessions = new DefaultWebSessionManager();
manager = new DefaultWebSecurityManager();
manager.setSessionManager(sessions);
setSecurityManager(manager);
}
@After
public void tearDown() {
manager = null;
setSecurityManager(null);
clearSubject();
}
@Test
public void testGetSetApplicationName() {
Assert.assertEquals("application", filter.getApplicationName());
filter.setApplicationName("awesome");
Assert.assertEquals("awesome", filter.getApplicationName());
}
@Test
public void testGetSetUsernameParam() {
Assert.assertEquals("username", filter.getUsernameParam());
filter.setUsernameParam("awesome");
Assert.assertEquals("awesome", filter.getUsernameParam());
}
@Test
public void testGetSetPasswordParam() {
Assert.assertEquals("password", filter.getPasswordParam());
filter.setPasswordParam("awesome");
Assert.assertEquals("awesome", filter.getPasswordParam());
}
@Test
public void testGetSetAuthcScheme() {
Assert.assertEquals("session", filter.getAuthcScheme());
filter.setAuthcScheme("awesome");
Assert.assertEquals("awesome", filter.getAuthcScheme());
}
@Test
public void testGetSetAuthzScheme() {
Assert.assertEquals("session", filter.getAuthzScheme());
filter.setAuthzScheme("awesome");
Assert.assertEquals("awesome", filter.getAuthzScheme());
}
@Test
public void testGetSetFailureKeyAttribute() {
Assert.assertEquals("shiroLoginFailure", filter.getFailureKeyAttribute());
filter.setFailureKeyAttribute("awesome");
Assert.assertEquals("awesome", filter.getFailureKeyAttribute());
}
@Test
public void testGetSetFailureAttribute() {
MockHttpServletRequest request = new MockHttpServletRequest();
AuthenticationException e = new AuthenticationException("Aaargh");
filter.setFailureAttribute(request, e);
Assert.assertEquals(e.getClass().getName(), request.getAttribute("shiroLoginFailure"));
}
@Test
public void testGetUsername() {
HttpServletRequest request = createMock(HttpServletRequest.class);
expect(request.getParameter("username")).andStubReturn("admin");
replay(request);
Assert.assertEquals("admin", filter.getUsername(request));
}
@Test
public void testGetPassword() {
HttpServletRequest request = createMock(HttpServletRequest.class);
expect(request.getParameter("password")).andStubReturn("junk");
replay(request);
Assert.assertEquals("junk", filter.getPassword(request));
}
@Test
public void testOnLoginFailure() throws UnsupportedEncodingException {
AuthenticationToken token = createMock(AuthenticationToken.class);
replay(token);
AuthenticationException e = createMock(AuthenticationException.class);
expect(e.getMessage()).andStubReturn("Oops: my bad");<|fim▁hole|>
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
Assert.assertEquals(false, filter.onLoginFailure(token, e, request, response));
Assert.assertEquals("application/json", response.getContentType());
Assert.assertEquals(HttpServletResponse.SC_FORBIDDEN, response.getStatus());
Assert.assertEquals("{\"message\":\"Oops: my bad\"}", response.getContentAsString());
}
@Test
public void testOnAccessDeniedWithoutSession() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
request.setContextPath("http://localhost:8666/");
request.setRequestURI("/api/studies");
Assert.assertEquals(false, filter.onAccessDenied(request, response));
Assert.assertEquals(HttpServletResponse.SC_UNAUTHORIZED, response.getStatus());
}
/**
* Checks the main login process. This requires a security manager to be set up,
* as it actually goes through the login process.
* @throws Exception
*/
@Test
public void testOnAccessDeniedIncorrectMethod() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
request.setContextPath("http://localhost:8666/");
request.setRequestURI("/login.jsp");
request.setMethod("GET");
Assert.assertEquals(false, filter.onAccessDenied(request, response));
Assert.assertEquals(HttpServletResponse.SC_UNAUTHORIZED, response.getStatus());
}
/**
* Checks the main login process. This requires a security manager to be set up,
* as it actually goes through the login process.
* @throws Exception
*/
@Test
public void testOnAccessDeniedEmptyLoginRequest() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
request.setContextPath("http://localhost:8666/");
request.setRequestURI("/login.jsp");
request.setMethod("POST");
Assert.assertEquals(false, filter.onAccessDenied(request, response));
Assert.assertEquals(HttpServletResponse.SC_FORBIDDEN, response.getStatus());
Assert.assertEquals("application/json", response.getContentType());
Gson gson = new Gson();
JsonObject data = gson.fromJson(response.getContentAsString(), JsonObject.class);
Assert.assertTrue(data.isJsonObject());
Assert.assertTrue(data.has("message"));
Assert.assertTrue(data.get("message").isJsonPrimitive());
Assert.assertThat(data.get("message").getAsString(), StringContains.containsString("Authentication failed"));
}
/**
* Checks the main login process with a real username and password.
* @throws Exception
*/
@Test
public void testOnAccessDeniedValidLoginRequest() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
request.setContextPath("http://localhost:8666/");
request.setRequestURI("/login.jsp");
request.setMethod("POST");
request.setParameter("username", "user");
request.setParameter("password", "password");
PrincipalCollection principals = new SimplePrincipalCollection("user", "mock");
AuthenticationInfo info = createMock(AuthenticationInfo.class);
expect(info.getPrincipals()).andStubReturn(principals);
replay(info);
Realm realm = createMock(Realm.class);
expect(realm.supports(anyObject(AuthenticationToken.class))).andStubReturn(true);
expect(realm.getAuthenticationInfo(anyObject(AuthenticationToken.class))).andStubReturn(info);
replay(realm);
manager.setRealm(realm);
Subject subjectUnderTest = new Subject.Builder(manager).buildSubject();
setSubject(subjectUnderTest);
Assert.assertEquals(false, filter.onAccessDenied(request, response));
Assert.assertEquals(HttpServletResponse.SC_MOVED_TEMPORARILY, response.getStatus());
}
/**
* Checks the main login process with a real username and password.
* @throws Exception
*/
@Test
public void testOnAccessDeniedInvalidLoginRequest() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
request.setContextPath("http://localhost:8666/");
request.setRequestURI("/login.jsp");
request.setMethod("POST");
request.setParameter("username", "user");
request.setParameter("password", "password");
// Null is the correct expected response for a missing account, which is enough for now
Realm realm = createMock(Realm.class);
expect(realm.supports(anyObject(AuthenticationToken.class))).andStubReturn(true);
expect(realm.getAuthenticationInfo(anyObject(AuthenticationToken.class))).andStubReturn(null);
replay(realm);
manager.setRealm(realm);
Subject subjectUnderTest = new Subject.Builder(manager).buildSubject();
setSubject(subjectUnderTest);
Assert.assertEquals(false, filter.onAccessDenied(request, response));
Assert.assertEquals(HttpServletResponse.SC_FORBIDDEN, response.getStatus());
}
}<|fim▁end|> | replay(e); |
<|file_name|>vpmullq.rs<|end_file_name|><|fim▁begin|>use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn vpmullq_1() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(XMM3)), operand2: Some(Direct(XMM7)), operand3: Some(Direct(XMM6)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K4), broadcast: None }, &[98, 242, 197, 140, 64, 222], OperandSize::Dword)
}
fn vpmullq_2() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM6)), operand3: Some(IndirectScaledIndexedDisplaced(ESI, EDX, Four, 1758422147, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K5), broadcast: None }, &[98, 242, 205, 141, 64, 188, 150, 131, 100, 207, 104], OperandSize::Dword)
}
fn vpmullq_3() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM3)), operand3: Some(IndirectScaledDisplaced(EDX, Two, 877498906, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K5), broadcast: Some(BroadcastMode::Broadcast1To2) }, &[98, 242, 229, 157, 64, 4, 85, 26, 146, 77, 52], OperandSize::Dword)
}
fn vpmullq_4() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM20)), operand3: Some(Direct(XMM31)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: None }, &[98, 146, 221, 130, 64, 199], OperandSize::Qword)
}<|fim▁hole|>
fn vpmullq_6() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(XMM20)), operand2: Some(Direct(XMM25)), operand3: Some(IndirectDisplaced(RAX, 804121059, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: Some(BroadcastMode::Broadcast1To2) }, &[98, 226, 181, 146, 64, 160, 227, 233, 237, 47], OperandSize::Qword)
}
fn vpmullq_7() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(YMM2)), operand2: Some(Direct(YMM6)), operand3: Some(Direct(YMM2)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 242, 205, 171, 64, 210], OperandSize::Dword)
}
fn vpmullq_8() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(YMM6)), operand2: Some(Direct(YMM4)), operand3: Some(IndirectScaledIndexed(EDX, EAX, Two, Some(OperandSize::Ymmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K1), broadcast: None }, &[98, 242, 221, 169, 64, 52, 66], OperandSize::Dword)
}
fn vpmullq_9() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(YMM5)), operand2: Some(Direct(YMM0)), operand3: Some(IndirectDisplaced(EAX, 276073328, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: Some(BroadcastMode::Broadcast1To4) }, &[98, 242, 253, 190, 64, 168, 112, 139, 116, 16], OperandSize::Dword)
}
fn vpmullq_10() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(YMM11)), operand2: Some(Direct(YMM2)), operand3: Some(Direct(YMM24)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K4), broadcast: None }, &[98, 18, 237, 172, 64, 216], OperandSize::Qword)
}
fn vpmullq_11() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(YMM17)), operand2: Some(Direct(YMM6)), operand3: Some(IndirectScaledDisplaced(RDX, Two, 500874551, Some(OperandSize::Ymmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: None }, &[98, 226, 205, 174, 64, 12, 85, 55, 189, 218, 29], OperandSize::Qword)
}
fn vpmullq_12() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(YMM4)), operand2: Some(Direct(YMM21)), operand3: Some(IndirectDisplaced(RBX, 612952646, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K5), broadcast: Some(BroadcastMode::Broadcast1To4) }, &[98, 242, 213, 181, 64, 163, 70, 234, 136, 36], OperandSize::Qword)
}
fn vpmullq_13() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(ZMM5)), operand2: Some(Direct(ZMM2)), operand3: Some(Direct(ZMM5)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: None }, &[98, 242, 237, 202, 64, 237], OperandSize::Dword)
}
fn vpmullq_14() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(ZMM3)), operand2: Some(Direct(ZMM1)), operand3: Some(IndirectDisplaced(EDX, 1125759716, Some(OperandSize::Zmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: None }, &[98, 242, 245, 202, 64, 154, 228, 186, 25, 67], OperandSize::Dword)
}
fn vpmullq_15() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(ZMM6)), operand2: Some(Direct(ZMM1)), operand3: Some(IndirectDisplaced(EAX, 107712143, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K4), broadcast: Some(BroadcastMode::Broadcast1To8) }, &[98, 242, 245, 220, 64, 176, 143, 142, 107, 6], OperandSize::Dword)
}
fn vpmullq_16() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(ZMM8)), operand2: Some(Direct(ZMM23)), operand3: Some(Direct(ZMM27)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K1), broadcast: None }, &[98, 18, 197, 193, 64, 195], OperandSize::Qword)
}
fn vpmullq_17() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(ZMM25)), operand2: Some(Direct(ZMM9)), operand3: Some(IndirectScaledIndexed(RBX, RCX, Two, Some(OperandSize::Zmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: None }, &[98, 98, 181, 206, 64, 12, 75], OperandSize::Qword)
}
fn vpmullq_18() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(ZMM19)), operand2: Some(Direct(ZMM28)), operand3: Some(Indirect(RCX, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K1), broadcast: Some(BroadcastMode::Broadcast1To8) }, &[98, 226, 157, 209, 64, 25], OperandSize::Qword)
}<|fim▁end|> |
fn vpmullq_5() {
run_test(&Instruction { mnemonic: Mnemonic::VPMULLQ, operand1: Some(Direct(XMM5)), operand2: Some(Direct(XMM7)), operand3: Some(IndirectScaledDisplaced(RSI, Two, 1959530681, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: None }, &[98, 242, 197, 138, 64, 44, 117, 185, 16, 204, 116], OperandSize::Qword)
} |
<|file_name|>packet_recv_server.rs<|end_file_name|><|fim▁begin|>#![no_main]
#[macro_use]
extern crate libfuzzer_sys;
#[macro_use]
extern crate lazy_static;
use std::net::SocketAddr;
use std::sync::Mutex;
lazy_static! {
static ref CONFIG: Mutex<quiche::Config> = {
let crt_path = std::env::var("QUICHE_FUZZ_CRT")
.unwrap_or_else(|_| "fuzz/cert.crt".to_string());
let key_path = std::env::var("QUICHE_FUZZ_KEY")
.unwrap_or_else(|_| "fuzz/cert.key".to_string());
let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
config.load_cert_chain_from_pem_file(&crt_path).unwrap();
config.load_priv_key_from_pem_file(&key_path).unwrap();
config
.set_application_protos(b"\x05hq-23\x08http/0.9")
.unwrap();
config.set_initial_max_data(30);
config.set_initial_max_stream_data_bidi_local(15);
config.set_initial_max_stream_data_bidi_remote(15);
config.set_initial_max_stream_data_uni(10);
config.set_initial_max_streams_bidi(3);
config.set_initial_max_streams_uni(3);
Mutex::new(config)<|fim▁hole|>static SCID: quiche::ConnectionId<'static> =
quiche::ConnectionId::from_ref(&[0; quiche::MAX_CONN_ID_LEN]);
fuzz_target!(|data: &[u8]| {
let from: SocketAddr = "127.0.0.1:1234".parse().unwrap();
let mut buf = data.to_vec();
let mut conn =
quiche::accept(&SCID, None, from, &mut CONFIG.lock().unwrap()).unwrap();
let info = quiche::RecvInfo { from };
conn.recv(&mut buf, info).ok();
});<|fim▁end|> | };
}
|
<|file_name|>graycode.py<|end_file_name|><|fim▁begin|>from __future__ import print_function, division
from sympy.core import Basic
from sympy.core.compatibility import xrange
import random
class GrayCode(Basic):
"""
A Gray code is essentially a Hamiltonian walk on
a n-dimensional cube with edge length of one.
The vertices of the cube are represented by vectors
whose values are binary. The Hamilton walk visits
each vertex exactly once. The Gray code for a 3d
cube is ['000','100','110','010','011','111','101',
'001'].
A Gray code solves the problem of sequentially
generating all possible subsets of n objects in such
a way that each subset is obtained from the previous
one by either deleting or adding a single object.
In the above example, 1 indicates that the object is
present, and 0 indicates that its absent.
Gray codes have applications in statistics as well when
we want to compute various statistics related to subsets
in an efficient manner.
References:
[1] Nijenhuis,A. and Wilf,H.S.(1978).
Combinatorial Algorithms. Academic Press.
[2] Knuth, D. (2011). The Art of Computer Programming, Vol 4
Addison Wesley
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> a = GrayCode(4)
>>> list(a.generate_gray())
['0000', '0001', '0011', '0010', '0110', '0111', '0101', '0100', \
'1100', '1101', '1111', '1110', '1010', '1011', '1001', '1000']
"""
_skip = False
_current = 0
_rank = None
def __new__(cls, n, *args, **kw_args):
"""
Default constructor.
It takes a single argument ``n`` which gives the dimension of the Gray
code. The starting Gray code string (``start``) or the starting ``rank``
may also be given; the default is to start at rank = 0 ('0...0').
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> a
GrayCode(3)
>>> a.n
3
>>> a = GrayCode(3, start='100')
>>> a.current
'100'
>>> a = GrayCode(4, rank=4)
>>> a.current
'0110'
>>> a.rank
4
"""
if n < 1 or int(n) != n:
raise ValueError(
'Gray code dimension must be a positive integer, not %i' % n)
n = int(n)
args = (n,) + args
obj = Basic.__new__(cls, *args)
if 'start' in kw_args:
obj._current = kw_args["start"]
if len(obj._current) > n:
raise ValueError('Gray code start has length %i but '
'should not be greater than %i' % (len(obj._current), n))
elif 'rank' in kw_args:
if int(kw_args["rank"]) != kw_args["rank"]:
raise ValueError('Gray code rank must be a positive integer, '
'not %i' % kw_args["rank"])
obj._rank = int(kw_args["rank"]) % obj.selections
obj._current = obj.unrank(n, obj._rank)
return obj
def next(self, delta=1):
"""
Returns the Gray code a distance ``delta`` (default = 1) from the
current value in canonical order.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3, start='110')
>>> a.next().current
'111'
>>> a.next(-1).current
'010'
"""
return GrayCode(self.n, rank=(self.rank + delta) % self.selections)
@property
def selections(self):
"""
Returns the number of bit vectors in the Gray code.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> a.selections
8
"""
return 2**self.n
@property
def n(self):
"""
Returns the dimension of the Gray code.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(5)
>>> a.n
5
"""
return self.args[0]
def generate_gray(self, **hints):
"""
Generates the sequence of bit vectors of a Gray Code.
[1] Knuth, D. (2011). The Art of Computer Programming,
Vol 4, Addison Wesley
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> list(a.generate_gray(start='011'))
['011', '010', '110', '111', '101', '100']
>>> list(a.generate_gray(rank=4))
['110', '111', '101', '100']
See Also
========
skip
"""
bits = self.n
start = None
if "start" in hints:
start = hints["start"]
elif "rank" in hints:
start = GrayCode.unrank(self.n, hints["rank"])
if start is not None:
self._current = start
current = self.current
graycode_bin = gray_to_bin(current)
if len(graycode_bin) > self.n:
raise ValueError('Gray code start has length %i but should '
'not be greater than %i' % (len(graycode_bin), bits))
self._current = int(current, 2)
graycode_int = int(''.join(graycode_bin), 2)
for i in xrange(graycode_int, 1 << bits):
if self._skip:
self._skip = False
else:
yield self.current
bbtc = (i ^ (i + 1))
gbtc = (bbtc ^ (bbtc >> 1))
self._current = (self._current ^ gbtc)
self._current = 0
def skip(self):
"""
Skips the bit generation.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> for i in a.generate_gray():
... if i == '010':
... a.skip()
... print(i)
...
000
001
011
010
111
101
100
See Also
========
generate_gray
"""
self._skip = True
@property
def rank(self):
"""
Ranks the Gray code.
A ranking algorithm determines the position (or rank)
of a combinatorial object among all the objects w.r.t.
a given order. For example, the 4 bit binary reflected
Gray code (BRGC) '0101' has a rank of 6 as it appears in
the 6th position in the canonical ordering of the family
of 4 bit Gray codes.
References:
[1] http://www-stat.stanford.edu/~susan/courses/s208/node12.html
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> GrayCode(3, start='100').rank
7
>>> GrayCode(3, rank=7).current
'100'
See Also
========
unrank
"""
if self._rank is None:
self._rank = int(gray_to_bin(self.current), 2)
return self._rank
@property
def current(self):
"""
Returns the currently referenced Gray code as a bit string.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> GrayCode(3, start='100').current
'100'
"""
rv = self._current or '0'
if type(rv) is not str:
rv = bin(rv)[2:]
return rv.rjust(self.n, '0')
@classmethod
def unrank(self, n, rank):
"""
Unranks an n-bit sized Gray code of rank k. This method exists
so that a derivative GrayCode class can define its own code of
a given rank.
The string here is generated in reverse order to allow for tail-call
optimization.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> GrayCode(5, rank=3).current
'00010'
>>> GrayCode.unrank(5, 3)
'00010'
See Also
========
rank
"""
def _unrank(k, n):
if n == 1:
return str(k % 2)
m = 2**(n - 1)
if k < m:
return '0' + _unrank(k, n - 1)
return '1' + _unrank(m - (k % m) - 1, n - 1)
return _unrank(rank, n)
def random_bitstring(n):
"""
Generates a random bitlist of length n.
Examples
========
>>> from sympy.combinatorics.graycode import random_bitstring
>>> random_bitstring(3) # doctest: +SKIP
100
"""
return ''.join([random.choice('01') for i in xrange(n)])
def gray_to_bin(bin_list):
"""
Convert from Gray coding to binary coding.
We assume big endian encoding.
Examples
========
>>> from sympy.combinatorics.graycode import gray_to_bin
>>> gray_to_bin('100')
'111'
See Also
========
bin_to_gray
"""
b = [bin_list[0]]
for i in xrange(1, len(bin_list)):
b += str(int(b[i - 1] != bin_list[i]))
return ''.join(b)
def bin_to_gray(bin_list):
"""
Convert from binary coding to gray coding.
We assume big endian encoding.
Examples
========
>>> from sympy.combinatorics.graycode import bin_to_gray
>>> bin_to_gray('111')
'100'
See Also
========
gray_to_bin
"""
b = [bin_list[0]]
for i in xrange(0, len(bin_list) - 1):<|fim▁hole|>
def get_subset_from_bitstring(super_set, bitstring):
"""
Gets the subset defined by the bitstring.
Examples
========
>>> from sympy.combinatorics.graycode import get_subset_from_bitstring
>>> get_subset_from_bitstring(['a','b','c','d'], '0011')
['c', 'd']
>>> get_subset_from_bitstring(['c','a','c','c'], '1100')
['c', 'a']
See Also
========
graycode_subsets
"""
if len(super_set) != len(bitstring):
raise ValueError("The sizes of the lists are not equal")
return [super_set[i] for i, j in enumerate(bitstring)
if bitstring[i] == '1']
def graycode_subsets(gray_code_set):
"""
Generates the subsets as enumerated by a Gray code.
Examples
========
>>> from sympy.combinatorics.graycode import graycode_subsets
>>> list(graycode_subsets(['a','b','c']))
[[], ['c'], ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], \
['a', 'c'], ['a']]
>>> list(graycode_subsets(['a','b','c','c']))
[[], ['c'], ['c', 'c'], ['c'], ['b', 'c'], ['b', 'c', 'c'], \
['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], ['a', 'b', 'c', 'c'], \
['a', 'b', 'c'], ['a', 'c'], ['a', 'c', 'c'], ['a', 'c'], ['a']]
See Also
========
get_subset_from_bitstring
"""
for bitstring in list(GrayCode(len(gray_code_set)).generate_gray()):
yield get_subset_from_bitstring(gray_code_set, bitstring)<|fim▁end|> | b += str(int(bin_list[i]) ^ int(b[i - 1]))
return ''.join(b) |
<|file_name|>dummy.py<|end_file_name|><|fim▁begin|># Online training of a logistic regression model
# using Assumed Density Filtering (ADF).
# We compare the ADF result with MCMC sampling
# of the posterior distribution
# Dependencies:
# !pip install git+https://github.com/blackjax-devs/blackjax.git
# !pip install jax_cosmo
# Author: Gerardo Durán-Martín (@gerdm)
import superimport
import jax
import jax.numpy as jnp
import blackjax.rwmh as mh
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from sklearn.datasets import make_biclusters
from jax import random
from jax.scipy.optimize import minimize
from jax_cosmo.scipy import integrate
from functools import partial
def sigmoid(z): return jnp.exp(z) / (1 + jnp.exp(z))
def log_sigmoid(z): return z - jnp.log(1 + jnp.exp(z))
def inference_loop(rng_key, kernel, initial_state, num_samples):
def one_step(state, rng_key):
state, _ = kernel(rng_key, state)
return state, state
keys = jax.random.split(rng_key, num_samples)
_, states = jax.lax.scan(one_step, initial_state, keys)
return states
def E_base(w, Phi, y, alpha):
"""
Base function containing the Energy of a logistic
regression with
"""
an = Phi @ w
log_an = log_sigmoid(an)
log_likelihood_term = y * log_an + (1 - y) * jnp.log(1 - sigmoid(an))
prior_term = alpha * w @ w / 2
return prior_term - log_likelihood_term.sum()
def Zt_func(eta, y, mu, v):
log_term = y * log_sigmoid(eta) + (1 - y) * jnp.log(1 - sigmoid(eta))
log_term = log_term - (eta - mu) ** 2 / (2 * v ** 2)
return jnp.exp(log_term) / jnp.sqrt(2 * jnp.pi * v ** 2)
def mt_func(eta, y, mu, v):
log_term = y * log_sigmoid(eta) + (1 - y) * jnp.log(1 - sigmoid(eta))
log_term = log_term - (eta - mu) ** 2 / (2 * v ** 2)
return eta * jnp.exp(log_term) / jnp.sqrt(2 * jnp.pi * v ** 2)
def vt_func(eta, y, mu, v):
log_term = y * log_sigmoid(eta) + (1 - y) * jnp.log(1 - sigmoid(eta))
log_term = log_term - (eta - mu) ** 2 / (2 * v ** 2)
return eta ** 2 * jnp.exp(log_term) / jnp.sqrt(2 * jnp.pi * v ** 2)
def adf_step(state, xs, q, lbound, ubound):
mu_t, tau_t = state
Phi_t, y_t = xs
mu_t_cond = mu_t
tau_t_cond = tau_t + q
# prior predictive distribution
m_t_cond = (Phi_t * mu_t_cond).sum()
v_t_cond = (Phi_t ** 2 * tau_t_cond).sum()
v_t_cond_sqrt = jnp.sqrt(v_t_cond)
# Moment-matched Gaussian approximation elements
Zt = integrate.romb(lambda eta: Zt_func(eta, y_t, m_t_cond, v_t_cond_sqrt), lbound, ubound)
mt = integrate.romb(lambda eta: mt_func(eta, y_t, m_t_cond, v_t_cond_sqrt), lbound, ubound)
mt = mt / Zt
vt = integrate.romb(lambda eta: vt_func(eta, y_t, m_t_cond, v_t_cond_sqrt), lbound, ubound)
vt = vt / Zt - mt ** 2
# Posterior estimation
delta_m = mt - m_t_cond
delta_v = vt - v_t_cond
a = Phi_t * tau_t_cond / jnp.power(Phi_t * tau_t_cond, 2).sum()
mu_t = mu_t_cond + a * delta_m
tau_t = tau_t_cond + a ** 2 * delta_v
return (mu_t, tau_t), (mu_t, tau_t)
def plot_posterior_predictive(ax, X, Z, title, colors, cmap="RdBu_r"):
ax.contourf(*Xspace, Z, cmap=cmap, alpha=0.5, levels=20)
ax.scatter(*X.T, c=colors)
ax.set_title(title)
ax.axis("off")
plt.tight_layout()
# ** Generating training data **
key = random.PRNGKey(314)
n_datapoints, m = 20, 2
X, rows, cols = make_biclusters((n_datapoints, m), 2, noise=0.6,
random_state=314, minval=-3, maxval=3)
y = rows[0] * 1.0
alpha = 1.0
init_noise = 1.0
Phi = jnp.c_[jnp.ones(n_datapoints)[:, None], X]
N, M = Phi.shape
# ** MCMC Sampling with BlackJAX **
sigma_mcmc = 0.8
w0 = random.multivariate_normal(key, jnp.zeros(M), jnp.eye(M) * init_noise)
E = partial(E_base, Phi=Phi, y=y, alpha=alpha)
initial_state = mh.new_state(w0, E)
mcmc_kernel = mh.kernel(E, jnp.ones(M) * sigma_mcmc)
mcmc_kernel = jax.jit(mcmc_kernel)
n_samples = 5_000
burnin = 300
key_init = jax.random.PRNGKey(0)
states = inference_loop(key_init, mcmc_kernel, initial_state, n_samples)
chains = states.position[burnin:, :]
nsamp, _ = chains.shape
# ** Laplace approximation **
res = minimize(lambda x: E(x) / len(y), w0, method="BFGS")
w_map = res.x
SN = jax.hessian(E)(w_map)
# ** ADF inference **
q = 0.14
lbound, ubound = -10, 10
mu_t = jnp.zeros(M)
tau_t = jnp.ones(M) * q
init_state = (mu_t, tau_t)
xs = (Phi, y)
adf_loop = partial(adf_step, q=q, lbound=lbound, ubound=ubound)
(mu_t, tau_t), (mu_t_hist, tau_t_hist) = jax.lax.scan(adf_loop, init_state, xs)
# ** Estimating posterior predictive distribution **
xmin, ymin = X.min(axis=0) - 0.1
xmax, ymax = X.max(axis=0) + 0.1
step = 0.1
Xspace = jnp.mgrid[xmin:xmax:step, ymin:ymax:step]
_, nx, ny = Xspace.shape
Phispace = jnp.concatenate([jnp.ones((1, nx, ny)), Xspace])
# MCMC posterior predictive distribution
Z_mcmc = sigmoid(jnp.einsum("mij,sm->sij", Phispace, chains))
Z_mcmc = Z_mcmc.mean(axis=0)
# Laplace posterior predictive distribution
key = random.PRNGKey(314)
laplace_samples = random.multivariate_normal(key, w_map, SN, (n_samples,))
Z_laplace = sigmoid(jnp.einsum("mij,sm->sij", Phispace, laplace_samples))
Z_laplace = Z_laplace.mean(axis=0)
# ADF posterior predictive distribution
adf_samples = random.multivariate_normal(key, mu_t, jnp.diag(tau_t), (n_samples,))
Z_adf = sigmoid(jnp.einsum("mij,sm->sij", Phispace, adf_samples))
Z_adf = Z_adf.mean(axis=0)
# ** Plotting predictive distribution **
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
colors = ["tab:red" if el else "tab:blue" for el in y]
fig, ax = plt.subplots()
title = "(MCMC) Predictive distribution"
plot_posterior_predictive(ax, X, Z_mcmc, title, colors)
pml.savefig("mcmc-logreg-predictive-surface.pdf")
fig, ax = plt.subplots()
title = "(Laplace) Predictive distribution"
plot_posterior_predictive(ax, X, Z_adf, title, colors)
pml.savefig("laplace-logreg-predictive-surface.pdf")
fig, ax = plt.subplots()
title = "(ADF) Predictive distribution"
plot_posterior_predictive(ax, X, Z_adf, title, colors)
pml.savefig("adf-logreg-predictive-surface.pdf")
# ** Plotting training history **
w_batch_all = chains.mean(axis=0)
w_batch_std_all = chains.std(axis=0)
timesteps = jnp.arange(n_datapoints)
lcolors = ["black", "tab:blue", "tab:red"]<|fim▁hole|>fig, ax = plt.subplots(figsize=(6, 3))
elements = zip(mu_t_hist.T, tau_t_hist.T, w_batch_all, w_batch_std_all, lcolors)
for i, (w_online, w_err_online, w_batch, w_batch_err, c) in enumerate(elements):
ax.errorbar(timesteps, w_online, jnp.sqrt(w_err_online), c=c, label=f"$w_{i}$ online")
ax.axhline(y=w_batch, c=lcolors[i], linestyle="--", label=f"$w_{i}$ batch (mcmc)")
ax.fill_between(timesteps, w_batch - w_batch_err, w_batch + w_batch_err, color=c, alpha=0.1)
ax.legend(bbox_to_anchor=(1.05, 1))
ax.set_xlim(0, n_datapoints - 0.9)
ax.set_xlabel("number samples")
ax.set_ylabel("weights")
plt.tight_layout()
pml.savefig("adf-mcmc-online-hist.pdf")
plt.show()<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from lxml import etree
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, FORMATS, FORMAT
from superdesk.etree import parse_html
from superdesk.text_utils import get_text
from superdesk.publish import registered_transmitters
formatters = []
logger = logging.getLogger(__name__)
class FormatterRegistry(type):
"""Registry metaclass for formatters."""
def __init__(cls, name, bases, attrs):
"""Register sub-classes of Formatter class when defined."""
super(FormatterRegistry, cls).__init__(name, bases, attrs)
if name != "Formatter":
formatters.append(cls)
class Formatter(metaclass=FormatterRegistry):
"""Base Formatter class for all types of Formatters like News ML 1.2, News ML G2, NITF, etc."""
def __init__(self):
self.can_preview = False
self.can_export = False
self.destination = None<|fim▁hole|> """Formats the article and returns the transformed string"""
raise NotImplementedError()
def export(self, article, subscriber, codes=None):
"""Formats the article and returns the output string for export"""
raise NotImplementedError()
def can_format(self, format_type, article):
"""Test if formatter can format for given article."""
raise NotImplementedError()
def append_body_footer(self, article):
"""
Checks if the article has any Public Service Announcements and if available appends each of them to the body.
:return: body with public service announcements.
"""
try:
article["body_html"] = article["body_html"].replace("<br>", "<br/>")
except KeyError:
pass
body = ""
if article[ITEM_TYPE] in [CONTENT_TYPE.TEXT, CONTENT_TYPE.PREFORMATTED]:
body = article.get("body_html", "")
elif article[ITEM_TYPE] in [CONTENT_TYPE.AUDIO, CONTENT_TYPE.PICTURE, CONTENT_TYPE.VIDEO]:
body = article.get("description", "")
if body and article.get(FORMAT, "") == FORMATS.PRESERVED:
body = body.replace("\n", "\r\n").replace("\r\r", "\r")
parsed = parse_html(body, content="html")
for br in parsed.xpath("//br"):
br.tail = "\r\n" + br.tail if br.tail else "\r\n"
etree.strip_elements(parsed, "br", with_tail=False)
body = etree.tostring(parsed, encoding="unicode")
if body and article.get("body_footer"):
footer = article.get("body_footer")
if article.get(FORMAT, "") == FORMATS.PRESERVED:
body = "{}\r\n{}".format(body, get_text(footer))
else:
body = "{}{}".format(body, footer)
return body
def append_legal(self, article, truncate=False):
"""
Checks if the article has the legal flag on and adds 'Legal:' to the slugline
:param article: article having the slugline
:param truncate: truncates the slugline to 24 characters
:return: updated slugline
"""
slugline = article.get("slugline", "") or ""
if article.get("flags", {}).get("marked_for_legal", False):
slugline = "{}: {}".format("Legal", slugline)
if truncate:
slugline = slugline[:24]
return slugline
def map_html_to_xml(self, element, html):
"""
Map the html text tags to xml
:param etree.Element element: The xml element to populate
:param str html: the html to parse the text from
:return:
"""
root = parse_html(html, content="html")
# if there are no ptags just br
if not len(root.xpath("//p")) and len(root.xpath("//br")):
para = etree.SubElement(element, "p")
for br in root.xpath("//br"):
etree.SubElement(para, "br").text = br.text
for p in root.xpath("//p"):
para = etree.SubElement(element, "p")
if len(p.xpath(".//br")) > 0:
for br in p.xpath(".//br"):
etree.SubElement(para, "br").text = br.text
para.text = etree.tostring(p, encoding="unicode", method="text")
# there neither ptags pr br's
if len(list(element)) == 0:
etree.SubElement(element, "p").text = etree.tostring(root, encoding="unicode", method="text")
def set_destination(self, destination=None, subscriber=None):
self.destination = destination
self.subscriber = subscriber
def get_formatter(format_type, article):
for formatter_cls in formatters:
formatter_instance = formatter_cls()
if formatter_instance.can_format(format_type, article):
return formatter_instance
def get_all_formatters():
"""Return all formatters registered."""
return [formatter_cls() for formatter_cls in formatters]
from .nitf_formatter import NITFFormatter # NOQA
from .ninjs_formatter import NINJSFormatter, NINJS2Formatter # NOQA
from .newsml_1_2_formatter import NewsML12Formatter # NOQA
from .newsml_g2_formatter import NewsMLG2Formatter # NOQA
from .email_formatter import EmailFormatter # NOQA
from .ninjs_newsroom_formatter import NewsroomNinjsFormatter # NOQA
from .idml_formatter import IDMLFormatter # NOQA
from .ninjs_ftp_formatter import FTPNinjsFormatter # NOQA
from .imatrics import IMatricsFormatter # NOQA<|fim▁end|> | self.subscriber = None
def format(self, article, subscriber, codes=None): |
<|file_name|>IEnumOptionsGroupEditpart.java<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2011 - 2015, Lunifera GmbH (Gross Enzersdorf), Loetz KG (Heidelberg)
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Florian Pirchner - Initial implementation
*/
package org.lunifera.ecview.core.ui.core.editparts.extension;
import org.lunifera.ecview.core.common.editpart.IFieldEditpart;
<|fim▁hole|>
}<|fim▁end|> | /**
* An edit part for optionGroup.
*/
public interface IEnumOptionsGroupEditpart extends IFieldEditpart { |
<|file_name|>expr.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # Translation of Expressions
//!
//! The expr module handles translation of expressions. The most general
//! translation routine is `trans()`, which will translate an expression
//! into a datum. `trans_into()` is also available, which will translate
//! an expression and write the result directly into memory, sometimes
//! avoiding the need for a temporary stack slot. Finally,
//! `trans_to_lvalue()` is available if you'd like to ensure that the
//! result has cleanup scheduled.
//!
//! Internally, each of these functions dispatches to various other
//! expression functions depending on the kind of expression. We divide
//! up expressions into:
//!
//! - **Datum expressions:** Those that most naturally yield values.
//! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
//! - **DPS expressions:** Those that most naturally write into a location
//! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
//! - **Statement expressions:** That that do not generate a meaningful
//! result. Examples would be `while { ... }` or `return 44`.
//!
//! Public entry points:
//!
//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
//! storing the result into `dest`. This is the preferred form, if you
//! can manage it.
//!
//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
//! `Datum` with the result. You can then store the datum, inspect
//! the value, etc. This may introduce temporaries if the datum is a
//! structural type.
//!
//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
//! expression and ensures that the result has a cleanup associated with it,
//! creating a temporary stack slot if necessary.
//!
//! - `trans_local_var -> Datum`: looks up a local variable or upvar.
#![allow(non_camel_case_types)]
pub use self::cast_kind::*;
pub use self::Dest::*;
use self::lazy_binop_ty::*;
use back::abi;
use llvm::{self, ValueRef};
use middle::check_const;
use middle::def;
use middle::mem_categorization::Typer;
use middle::subst::{self, Substs};
use trans::{_match, adt, asm, base, callee, closure, consts, controlflow};
use trans::base::*;
use trans::build::*;
use trans::cleanup::{self, CleanupMethods};
use trans::common::*;
use trans::datum::*;
use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
use trans::glue;
use trans::machine;
use trans::meth;
use trans::monomorphize;
use trans::tvec;
use trans::type_of;
use middle::ty::{struct_fields, tup_fields};
use middle::ty::{AdjustDerefRef, AdjustReifyFnPointer, AdjustUnsafeFnPointer};
use middle::ty::{self, Ty};
use middle::ty::MethodCall;
use util::common::indenter;
use util::ppaux::Repr;
use trans::machine::{llsize_of, llsize_of_alloc};
use trans::type_::Type;
use syntax::{ast, ast_util, codemap};
use syntax::parse::token::InternedString;
use syntax::ptr::P;
use syntax::parse::token;
use std::iter::repeat;
use std::mem;
use std::rc::Rc;
// Destinations
// These are passed around by the code generating functions to track the
// destination of a computation's value.
#[derive(Copy, Clone, PartialEq)]
pub enum Dest {
SaveIn(ValueRef),
Ignore,
}
impl Dest {
pub fn to_string(&self, ccx: &CrateContext) -> String {
match *self {
SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
Ignore => "Ignore".to_string()
}
}
}
/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
/// better optimized LLVM code.
pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
dest: Dest)
-> Block<'blk, 'tcx> {
let mut bcx = bcx;
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
if bcx.tcx().adjustments.borrow().contains_key(&expr.id) {
// use trans, which may be less efficient but
// which will perform the adjustments:
let datum = unpack_datum!(bcx, trans(bcx, expr));
return datum.store_to_dest(bcx, dest, expr.id);
}
let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
if !qualif.intersects(check_const::NOT_CONST | check_const::NEEDS_DROP) {
if !qualif.intersects(check_const::PREFER_IN_PLACE) {
if let SaveIn(lldest) = dest {
let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
bcx.fcx.param_substs);
// Cast pointer to destination, because constants
// have different types.
let lldest = PointerCast(bcx, lldest, val_ty(global));
memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
}
// Don't do anything in the Ignore case, consts don't need drop.
return bcx;
} else {
// The only way we're going to see a `const` at this point is if
// it prefers in-place instantiation, likely because it contains
// `[x; N]` somewhere within.
match expr.node {
ast::ExprPath(..) => {
match bcx.def(expr.id) {
def::DefConst(did) => {
let const_expr = consts::get_const_expr(bcx.ccx(), did, expr);
// Temporarily get cleanup scopes out of the way,
// as they require sub-expressions to be contained
// inside the current AST scope.
// These should record no cleanups anyways, `const`
// can't have destructors.
let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
vec![]);
// Lock emitted debug locations to the location of
// the constant reference expression.
debuginfo::with_source_location_override(bcx.fcx,
expr.debug_loc(),
|| {
bcx = trans_into(bcx, const_expr, dest)
});
let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
scopes);
assert!(scopes.is_empty());
return bcx;
}
_ => {}
}
}
_ => {}
}
}
}
debug!("trans_into() expr={}", expr.repr(bcx.tcx()));
let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
expr.id,
expr.span,
false);
bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
let kind = ty::expr_kind(bcx.tcx(), expr);
bcx = match kind {
ty::LvalueExpr | ty::RvalueDatumExpr => {
trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
}
ty::RvalueDpsExpr => {
trans_rvalue_dps_unadjusted(bcx, expr, dest)
}
ty::RvalueStmtExpr => {
trans_rvalue_stmt_unadjusted(bcx, expr)
}
};
bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
}
/// Translates an expression, returning a datum (and new block) encapsulating the result. When
/// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
/// stack.
pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
debug!("trans(expr={})", bcx.expr_to_string(expr));
let mut bcx = bcx;
let fcx = bcx.fcx;
let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
let adjusted_global = !qualif.intersects(check_const::NON_STATIC_BORROWS);
let global = if !qualif.intersects(check_const::NOT_CONST | check_const::NEEDS_DROP) {
let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
bcx.fcx.param_substs);
if qualif.intersects(check_const::HAS_STATIC_BORROWS) {
// Is borrowed as 'static, must return lvalue.
// Cast pointer to global, because constants have different types.
let const_ty = expr_ty_adjusted(bcx, expr);
let llty = type_of::type_of(bcx.ccx(), const_ty);
let global = PointerCast(bcx, global, llty.ptr_to());
let datum = Datum::new(global, const_ty, Lvalue);
return DatumBlock::new(bcx, datum.to_expr_datum());
}
// Otherwise, keep around and perform adjustments, if needed.
let const_ty = if adjusted_global {
expr_ty_adjusted(bcx, expr)
} else {
expr_ty(bcx, expr)
};
// This could use a better heuristic.
Some(if type_is_immediate(bcx.ccx(), const_ty) {
// Cast pointer to global, because constants have different types.
let llty = type_of::type_of(bcx.ccx(), const_ty);
let global = PointerCast(bcx, global, llty.ptr_to());
// Maybe just get the value directly, instead of loading it?
immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
} else {
let llty = type_of::type_of(bcx.ccx(), const_ty);
// HACK(eddyb) get around issues with lifetime intrinsics.
let scratch = alloca_no_lifetime(bcx, llty, "const");
let lldest = if !ty::type_is_structural(const_ty) {
// Cast pointer to slot, because constants have different types.
PointerCast(bcx, scratch, val_ty(global))
} else {
// In this case, memcpy_ty calls llvm.memcpy after casting both
// source and destination to i8*, so we don't need any casts.
scratch
};
memcpy_ty(bcx, lldest, global, const_ty);
Datum::new(scratch, const_ty, Rvalue::new(ByRef))
})
} else {
None
};
let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
expr.id,
expr.span,
false);
fcx.push_ast_cleanup_scope(cleanup_debug_loc);
let datum = match global {
Some(rvalue) => rvalue.to_expr_datum(),
None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
};
let datum = if adjusted_global {
datum // trans::consts already performed adjustments.
} else {
unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
};
bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
return DatumBlock::new(bcx, datum);
}
pub fn get_len(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_EXTRA])
}
pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_ADDR])
}
pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
Store(bcx, Load(bcx, get_len(bcx, src_ptr)), get_len(bcx, dst_ptr));
}
/// Retrieve the information we are losing (making dynamic) in an unsizing
/// adjustment.
///
/// The `old_info` argument is a bit funny. It is intended for use
/// in an upcast, where the new vtable for an object will be drived
/// from the old one.
pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
source: Ty<'tcx>,
target: Ty<'tcx>,
old_info: Option<ValueRef>,
param_substs: &'tcx subst::Substs<'tcx>)
-> ValueRef {
let (source, target) = ty::struct_lockstep_tails(ccx.tcx(), source, target);
match (&source.sty, &target.sty) {
(&ty::ty_vec(_, Some(len)), &ty::ty_vec(_, None)) => C_uint(ccx, len),
(&ty::ty_trait(_), &ty::ty_trait(_)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
old_info.expect("unsized_info: missing old info for trait upcast")
}
(_, &ty::ty_trait(box ty::TyTrait { ref principal, .. })) => {
// Note that we preserve binding levels here:
let substs = principal.0.substs.with_self_ty(source).erase_regions();
let substs = ccx.tcx().mk_substs(substs);
let trait_ref = ty::Binder(Rc::new(ty::TraitRef { def_id: principal.def_id(),
substs: substs }));
consts::ptrcast(meth::get_vtable(ccx, trait_ref, param_substs),
Type::vtable_ptr(ccx))
}
_ => ccx.sess().bug(&format!("unsized_info: invalid unsizing {} -> {}",
source.repr(ccx.tcx()),
target.repr(ccx.tcx())))
}
}
/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
/// translation of `expr`.
fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>)
-> DatumBlock<'blk, 'tcx, Expr>
{
let mut bcx = bcx;
let mut datum = datum;
let adjustment = match bcx.tcx().adjustments.borrow().get(&expr.id).cloned() {
None => {
return DatumBlock::new(bcx, datum);
}
Some(adj) => { adj }
};
debug!("unadjusted datum for expr {}: {} adjustment={:?}",
expr.repr(bcx.tcx()),
datum.to_string(bcx.ccx()),
adjustment);
match adjustment {
AdjustReifyFnPointer => {
// FIXME(#19925) once fn item types are
// zero-sized, we'll need to do something here
}
AdjustUnsafeFnPointer => {
// purely a type-level thing
}
AdjustDerefRef(ref adj) => {
let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
// We are a bit paranoid about adjustments and thus might have a re-
// borrow here which merely derefs and then refs again (it might have
// a different region or mutability, but we don't care here).
match datum.ty.sty {
// Don't skip a conversion from Box<T> to &T, etc.
ty::ty_rptr(..) => {
let method_call = MethodCall::autoderef(expr.id, 0);
if bcx.tcx().method_map.borrow().contains_key(&method_call) {
// Don't skip an overloaded deref.
0
} else {
1
}
}
_ => 0
}
} else {
0
};
if adj.autoderefs > skip_reborrows {
// Schedule cleanup.
let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
lval.to_expr_datum(),
adj.autoderefs - skip_reborrows));
}
// (You might think there is a more elegant way to do this than a
// skip_reborrows bool, but then you remember that the borrow checker exists).
if skip_reborrows == 0 && adj.autoref.is_some() {
datum = unpack_datum!(bcx, apply_autoref(bcx, expr, datum));
}
if let Some(target) = adj.unsize {
datum = unpack_datum!(bcx, unsize_pointer(bcx, datum,
bcx.monomorphize(&target)));
}
}
}
debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
return DatumBlock::new(bcx, datum);
fn apply_autoref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>)
-> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
if !type_is_sized(bcx.tcx(), datum.ty) {
// Arrange cleanup
let lval = unpack_datum!(bcx,
datum.to_lvalue_datum(bcx, "ref_fat_ptr", expr.id));
ref_fat_ptr(bcx, lval)
} else {
auto_ref(bcx, datum, expr)
}
}
fn unsize_pointer<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
datum: Datum<'tcx, Expr>,
target: Ty<'tcx>)
-> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let unsized_ty = ty::deref(target, true)
.expect("expr::unsize got non-pointer target type").ty;
debug!("unsize_lvalue(unsized_ty={})", unsized_ty.repr(bcx.tcx()));
// We do not arrange cleanup ourselves; if we already are an
// L-value, then cleanup will have already been scheduled (and
// the `datum.to_rvalue_datum` call below will emit code to zero
// the drop flag when moving out of the L-value). If we are an
// R-value, then we do not need to schedule cleanup.
let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "__unsize_ref"));
let pointee_ty = ty::deref(datum.ty, true)
.expect("expr::unsize got non-pointer datum type").ty;
let (base, old_info) = if !type_is_sized(bcx.tcx(), pointee_ty) {
// Normally, the source is a thin pointer and we are
// adding extra info to make a fat pointer. The exception
// is when we are upcasting an existing object fat pointer
// to use a different vtable. In that case, we want to
// load out the original data pointer so we can repackage
// it.
(Load(bcx, get_dataptr(bcx, datum.val)),
Some(Load(bcx, get_len(bcx, datum.val))))
} else {
(datum.val, None)
};
let info = unsized_info(bcx.ccx(), pointee_ty, unsized_ty,
old_info, bcx.fcx.param_substs);
// Compute the base pointer. This doesn't change the pointer value,
// but merely its type.
let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), unsized_ty).ptr_to();
let base = PointerCast(bcx, base, ptr_ty);
let llty = type_of::type_of(bcx.ccx(), target);
// HACK(eddyb) get around issues with lifetime intrinsics.
let scratch = alloca_no_lifetime(bcx, llty, "__fat_ptr");
Store(bcx, base, get_dataptr(bcx, scratch));
Store(bcx, info, get_len(bcx, scratch));
DatumBlock::new(bcx, Datum::new(scratch, target, RvalueExpr(Rvalue::new(ByRef))))
}
}
/// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
/// that the expr represents.
///
/// If this expression is an rvalue, this implies introducing a temporary. In other words,
/// something like `x().f` is translated into roughly the equivalent of
///
/// { tmp = x(); tmp.f }
pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
name: &str)
-> DatumBlock<'blk, 'tcx, Lvalue> {
let mut bcx = bcx;
let datum = unpack_datum!(bcx, trans(bcx, expr));
return datum.to_lvalue_datum(bcx, name, expr.id);
}
/// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
/// directly.
fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
debug!("trans_unadjusted(expr={})", bcx.expr_to_string(expr));
let _indenter = indenter();
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
return match ty::expr_kind(bcx.tcx(), expr) {
ty::LvalueExpr | ty::RvalueDatumExpr => {
let datum = unpack_datum!(bcx, {
trans_datum_unadjusted(bcx, expr)
});
DatumBlock {bcx: bcx, datum: datum}
}
ty::RvalueStmtExpr => {
bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
nil(bcx, expr_ty(bcx, expr))
}
ty::RvalueDpsExpr => {
let ty = expr_ty(bcx, expr);
if type_is_zero_size(bcx.ccx(), ty) {
bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
nil(bcx, ty)
} else {
let scratch = rvalue_scratch_datum(bcx, ty, "");
bcx = trans_rvalue_dps_unadjusted(
bcx, expr, SaveIn(scratch.val));
// Note: this is not obviously a good idea. It causes
// immediate values to be loaded immediately after a
// return from a call or other similar expression,
// which in turn leads to alloca's having shorter
// lifetimes and hence larger stack frames. However,
// in turn it can lead to more register pressure.
// Still, in practice it seems to increase
// performance, since we have fewer problems with
// morestack churn.
let scratch = unpack_datum!(
bcx, scratch.to_appropriate_datum(bcx));
DatumBlock::new(bcx, scratch.to_expr_datum())
}
}
};
fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
-> DatumBlock<'blk, 'tcx, Expr> {
let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
let datum = immediate_rvalue(llval, ty);
DatumBlock::new(bcx, datum.to_expr_datum())
}
}
fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let fcx = bcx.fcx;
let _icx = push_ctxt("trans_datum_unadjusted");
match expr.node {
ast::ExprParen(ref e) => {
trans(bcx, &**e)
}
ast::ExprPath(..) => {
trans_def(bcx, expr, bcx.def(expr.id))
}
ast::ExprField(ref base, ident) => {
trans_rec_field(bcx, &**base, ident.node.name)
}
ast::ExprTupField(ref base, idx) => {
trans_rec_tup_field(bcx, &**base, idx.node)
}
ast::ExprIndex(ref base, ref idx) => {
trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
}
ast::ExprBox(_, ref contents) => {
// Special case for `Box<T>`
let box_ty = expr_ty(bcx, expr);
let contents_ty = expr_ty(bcx, &**contents);
match box_ty.sty {
ty::ty_uniq(..) => {
trans_uniq_expr(bcx, expr, box_ty, &**contents, contents_ty)
}
_ => bcx.sess().span_bug(expr.span,
"expected unique box")
}
}
ast::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &**lit),
ast::ExprBinary(op, ref lhs, ref rhs) => {
trans_binary(bcx, expr, op, &**lhs, &**rhs)
}
ast::ExprUnary(op, ref x) => {
trans_unary(bcx, expr, op, &**x)
}
ast::ExprAddrOf(_, ref x) => {
match x.node {
ast::ExprRepeat(..) | ast::ExprVec(..) => {
// Special case for slices.
let cleanup_debug_loc =
debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
x.id,
x.span,
false);
fcx.push_ast_cleanup_scope(cleanup_debug_loc);
let datum = unpack_datum!(
bcx, tvec::trans_slice_vec(bcx, expr, &**x));
bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
DatumBlock::new(bcx, datum)
}
_ => {
trans_addr_of(bcx, expr, &**x)
}
}
}
ast::ExprCast(ref val, _) => {
// Datum output mode means this is a scalar cast:
trans_imm_cast(bcx, &**val, expr.id)
}
_ => {
bcx.tcx().sess.span_bug(
expr.span,
&format!("trans_rvalue_datum_unadjusted reached \
fall-through case: {:?}",
expr.node));
}
}
}<|fim▁hole|> -> DatumBlock<'blk, 'tcx, Expr> where
F: FnOnce(&'blk ty::ctxt<'tcx>, &[ty::field<'tcx>]) -> usize,
{
let mut bcx = bcx;
let _icx = push_ctxt("trans_rec_field");
let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
let bare_ty = base_datum.ty;
let repr = adt::represent_type(bcx.ccx(), bare_ty);
with_field_tys(bcx.tcx(), bare_ty, None, move |discr, field_tys| {
let ix = get_idx(bcx.tcx(), field_tys);
let d = base_datum.get_element(
bcx,
field_tys[ix].mt.ty,
|srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, ix));
if type_is_sized(bcx.tcx(), d.ty) {
DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
} else {
let scratch = rvalue_scratch_datum(bcx, d.ty, "");
Store(bcx, d.val, get_dataptr(bcx, scratch.val));
let info = Load(bcx, get_len(bcx, base_datum.val));
Store(bcx, info, get_len(bcx, scratch.val));
DatumBlock::new(bcx, scratch.to_expr_datum())
}
})
}
/// Translates `base.field`.
fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
base: &ast::Expr,
field: ast::Name)
-> DatumBlock<'blk, 'tcx, Expr> {
trans_field(bcx, base, |tcx, field_tys| ty::field_idx_strict(tcx, field, field_tys))
}
/// Translates `base.<idx>`.
fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
base: &ast::Expr,
idx: usize)
-> DatumBlock<'blk, 'tcx, Expr> {
trans_field(bcx, base, |_, _| idx)
}
fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
index_expr: &ast::Expr,
base: &ast::Expr,
idx: &ast::Expr,
method_call: MethodCall)
-> DatumBlock<'blk, 'tcx, Expr> {
//! Translates `base[idx]`.
let _icx = push_ctxt("trans_index");
let ccx = bcx.ccx();
let mut bcx = bcx;
let index_expr_debug_loc = index_expr.debug_loc();
// Check for overloaded index.
let method_ty = ccx.tcx()
.method_map
.borrow()
.get(&method_call)
.map(|method| method.ty);
let elt_datum = match method_ty {
Some(method_ty) => {
let method_ty = monomorphize_type(bcx, method_ty);
let base_datum = unpack_datum!(bcx, trans(bcx, base));
// Translate index expression.
let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
let ref_ty = // invoked methods have LB regions instantiated:
ty::no_late_bound_regions(
bcx.tcx(), &ty::ty_fn_ret(method_ty)).unwrap().unwrap();
let elt_ty = match ty::deref(ref_ty, true) {
None => {
bcx.tcx().sess.span_bug(index_expr.span,
"index method didn't return a \
dereferenceable type?!")
}
Some(elt_tm) => elt_tm.ty,
};
// Overloaded. Evaluate `trans_overloaded_op`, which will
// invoke the user's index() method, which basically yields
// a `&T` pointer. We can then proceed down the normal
// path (below) to dereference that `&T`.
let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
unpack_result!(bcx,
trans_overloaded_op(bcx,
index_expr,
method_call,
base_datum,
vec![(ix_datum, idx.id)],
Some(SaveIn(scratch.val)),
false));
let datum = scratch.to_expr_datum();
if type_is_sized(bcx.tcx(), elt_ty) {
Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr)
} else {
Datum::new(datum.val, elt_ty, LvalueExpr)
}
}
None => {
let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
base,
"index"));
// Translate index expression and cast to a suitable LLVM integer.
// Rust is less strict than LLVM in this regard.
let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
let ix_val = ix_datum.to_llscalarish(bcx);
let ix_size = machine::llbitsize_of_real(bcx.ccx(),
val_ty(ix_val));
let int_size = machine::llbitsize_of_real(bcx.ccx(),
ccx.int_type());
let ix_val = {
if ix_size < int_size {
if ty::type_is_signed(expr_ty(bcx, idx)) {
SExt(bcx, ix_val, ccx.int_type())
} else { ZExt(bcx, ix_val, ccx.int_type()) }
} else if ix_size > int_size {
Trunc(bcx, ix_val, ccx.int_type())
} else {
ix_val
}
};
let unit_ty = ty::sequence_element_type(bcx.tcx(), base_datum.ty);
let (base, len) = base_datum.get_vec_base_and_len(bcx);
debug!("trans_index: base {}", bcx.val_to_string(base));
debug!("trans_index: len {}", bcx.val_to_string(len));
let bounds_check = ICmp(bcx,
llvm::IntUGE,
ix_val,
len,
index_expr_debug_loc);
let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
let expected = Call(bcx,
expect,
&[bounds_check, C_bool(ccx, false)],
None,
index_expr_debug_loc);
bcx = with_cond(bcx, expected, |bcx| {
controlflow::trans_fail_bounds_check(bcx,
expr_info(index_expr),
ix_val,
len)
});
let elt = InBoundsGEP(bcx, base, &[ix_val]);
let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
Datum::new(elt, unit_ty, LvalueExpr)
}
};
DatumBlock::new(bcx, elt_datum)
}
fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ref_expr: &ast::Expr,
def: def::Def)
-> DatumBlock<'blk, 'tcx, Expr> {
//! Translates a reference to a path.
let _icx = push_ctxt("trans_def_lvalue");
match def {
def::DefFn(..) | def::DefMethod(..) |
def::DefStruct(_) | def::DefVariant(..) => {
let datum = trans_def_fn_unadjusted(bcx.ccx(), ref_expr, def,
bcx.fcx.param_substs);
DatumBlock::new(bcx, datum.to_expr_datum())
}
def::DefStatic(did, _) => {
// There are two things that may happen here:
// 1) If the static item is defined in this crate, it will be
// translated using `get_item_val`, and we return a pointer to
// the result.
// 2) If the static item is defined in another crate then we add
// (or reuse) a declaration of an external global, and return a
// pointer to that.
let const_ty = expr_ty(bcx, ref_expr);
// For external constants, we don't inline.
let val = if did.krate == ast::LOCAL_CRATE {
// Case 1.
// The LLVM global has the type of its initializer,
// which may not be equal to the enum's type for
// non-C-like enums.
let val = base::get_item_val(bcx.ccx(), did.node);
let pty = type_of::type_of(bcx.ccx(), const_ty).ptr_to();
PointerCast(bcx, val, pty)
} else {
// Case 2.
base::get_extern_const(bcx.ccx(), did, const_ty)
};
DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr))
}
def::DefConst(_) => {
bcx.sess().span_bug(ref_expr.span,
"constant expression should not reach expr::trans_def")
}
_ => {
DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum())
}
}
}
fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr)
-> Block<'blk, 'tcx> {
let mut bcx = bcx;
let _icx = push_ctxt("trans_rvalue_stmt");
if bcx.unreachable.get() {
return bcx;
}
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
match expr.node {
ast::ExprParen(ref e) => {
trans_into(bcx, &**e, Ignore)
}
ast::ExprBreak(label_opt) => {
controlflow::trans_break(bcx, expr, label_opt)
}
ast::ExprAgain(label_opt) => {
controlflow::trans_cont(bcx, expr, label_opt)
}
ast::ExprRet(ref ex) => {
// Check to see if the return expression itself is reachable.
// This can occur when the inner expression contains a return
let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
cfg.node_is_reachable(expr.id)
} else {
true
};
if reachable {
controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
} else {
// If it's not reachable, just translate the inner expression
// directly. This avoids having to manage a return slot when
// it won't actually be used anyway.
if let &Some(ref x) = ex {
bcx = trans_into(bcx, &**x, Ignore);
}
// Mark the end of the block as unreachable. Once we get to
// a return expression, there's no more we should be doing
// after this.
Unreachable(bcx);
bcx
}
}
ast::ExprWhile(ref cond, ref body, _) => {
controlflow::trans_while(bcx, expr, &**cond, &**body)
}
ast::ExprLoop(ref body, _) => {
controlflow::trans_loop(bcx, expr, &**body)
}
ast::ExprAssign(ref dst, ref src) => {
let src_datum = unpack_datum!(bcx, trans(bcx, &**src));
let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &**dst, "assign"));
if bcx.fcx.type_needs_drop(dst_datum.ty) {
// If there are destructors involved, make sure we
// are copying from an rvalue, since that cannot possible
// alias an lvalue. We are concerned about code like:
//
// a = a
//
// but also
//
// a = a.b
//
// where e.g. a : Option<Foo> and a.b :
// Option<Foo>. In that case, freeing `a` before the
// assignment may also free `a.b`!
//
// We could avoid this intermediary with some analysis
// to determine whether `dst` may possibly own `src`.
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
let src_datum = unpack_datum!(
bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
bcx = glue::drop_ty(bcx,
dst_datum.val,
dst_datum.ty,
expr.debug_loc());
src_datum.store_to(bcx, dst_datum.val)
} else {
src_datum.store_to(bcx, dst_datum.val)
}
}
ast::ExprAssignOp(op, ref dst, ref src) => {
trans_assign_op(bcx, expr, op, &**dst, &**src)
}
ast::ExprInlineAsm(ref a) => {
asm::trans_inline_asm(bcx, a)
}
_ => {
bcx.tcx().sess.span_bug(
expr.span,
&format!("trans_rvalue_stmt_unadjusted reached \
fall-through case: {:?}",
expr.node));
}
}
}
fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
dest: Dest)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
let mut bcx = bcx;
let tcx = bcx.tcx();
debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
match expr.node {
ast::ExprParen(ref e) => {
trans_into(bcx, &**e, dest)
}
ast::ExprPath(..) => {
trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
}
ast::ExprIf(ref cond, ref thn, ref els) => {
controlflow::trans_if(bcx, expr.id, &**cond, &**thn, els.as_ref().map(|e| &**e), dest)
}
ast::ExprMatch(ref discr, ref arms, _) => {
_match::trans_match(bcx, expr, &**discr, &arms[..], dest)
}
ast::ExprBlock(ref blk) => {
controlflow::trans_block(bcx, &**blk, dest)
}
ast::ExprStruct(_, ref fields, ref base) => {
trans_struct(bcx,
&fields[..],
base.as_ref().map(|e| &**e),
expr.span,
expr.id,
node_id_type(bcx, expr.id),
dest)
}
ast::ExprRange(ref start, ref end) => {
// FIXME it is just not right that we are synthesising ast nodes in
// trans. Shudder.
fn make_field(field_name: &str, expr: P<ast::Expr>) -> ast::Field {
ast::Field {
ident: codemap::dummy_spanned(token::str_to_ident(field_name)),
expr: expr,
span: codemap::DUMMY_SP,
}
}
// A range just desugars into a struct.
// Note that the type of the start and end may not be the same, but
// they should only differ in their lifetime, which should not matter
// in trans.
let (did, fields, ty_params) = match (start, end) {
(&Some(ref start), &Some(ref end)) => {
// Desugar to Range
let fields = vec![make_field("start", start.clone()),
make_field("end", end.clone())];
(tcx.lang_items.range_struct(), fields, vec![node_id_type(bcx, start.id)])
}
(&Some(ref start), &None) => {
// Desugar to RangeFrom
let fields = vec![make_field("start", start.clone())];
(tcx.lang_items.range_from_struct(), fields, vec![node_id_type(bcx, start.id)])
}
(&None, &Some(ref end)) => {
// Desugar to RangeTo
let fields = vec![make_field("end", end.clone())];
(tcx.lang_items.range_to_struct(), fields, vec![node_id_type(bcx, end.id)])
}
_ => {
// Desugar to RangeFull
(tcx.lang_items.range_full_struct(), vec![], vec![])
}
};
if let Some(did) = did {
let substs = Substs::new_type(ty_params, vec![]);
trans_struct(bcx,
&fields,
None,
expr.span,
expr.id,
ty::mk_struct(tcx, did, tcx.mk_substs(substs)),
dest)
} else {
tcx.sess.span_bug(expr.span,
"No lang item for ranges (how did we get this far?)")
}
}
ast::ExprTup(ref args) => {
let numbered_fields: Vec<(usize, &ast::Expr)> =
args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
trans_adt(bcx,
expr_ty(bcx, expr),
0,
&numbered_fields[..],
None,
dest,
expr.debug_loc())
}
ast::ExprLit(ref lit) => {
match lit.node {
ast::LitStr(ref s, _) => {
tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
}
_ => {
bcx.tcx()
.sess
.span_bug(expr.span,
"trans_rvalue_dps_unadjusted shouldn't be \
translating this type of literal")
}
}
}
ast::ExprVec(..) | ast::ExprRepeat(..) => {
tvec::trans_fixed_vstore(bcx, expr, dest)
}
ast::ExprClosure(_, ref decl, ref body) => {
let dest = match dest {
SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
Ignore => closure::Dest::Ignore(bcx.ccx())
};
closure::trans_closure_expr(dest, &**decl, &**body, expr.id, bcx.fcx.param_substs)
.unwrap_or(bcx)
}
ast::ExprCall(ref f, ref args) => {
if bcx.tcx().is_method_call(expr.id) {
trans_overloaded_call(bcx,
expr,
&**f,
&args[..],
Some(dest))
} else {
callee::trans_call(bcx,
expr,
&**f,
callee::ArgExprs(&args[..]),
dest)
}
}
ast::ExprMethodCall(_, _, ref args) => {
callee::trans_method_call(bcx,
expr,
&*args[0],
callee::ArgExprs(&args[..]),
dest)
}
ast::ExprBinary(op, ref lhs, ref rhs) => {
// if not overloaded, would be RvalueDatumExpr
let lhs = unpack_datum!(bcx, trans(bcx, &**lhs));
let rhs_datum = unpack_datum!(bcx, trans(bcx, &**rhs));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
vec![(rhs_datum, rhs.id)], Some(dest),
!ast_util::is_by_value_binop(op.node)).bcx
}
ast::ExprUnary(op, ref subexpr) => {
// if not overloaded, would be RvalueDatumExpr
let arg = unpack_datum!(bcx, trans(bcx, &**subexpr));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
arg, Vec::new(), Some(dest), !ast_util::is_by_value_unop(op)).bcx
}
ast::ExprIndex(ref base, ref idx) => {
// if not overloaded, would be RvalueDatumExpr
let base = unpack_datum!(bcx, trans(bcx, &**base));
let idx_datum = unpack_datum!(bcx, trans(bcx, &**idx));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
vec![(idx_datum, idx.id)], Some(dest), true).bcx
}
ast::ExprCast(..) => {
// Trait casts used to come this way, now they should be coercions.
bcx.tcx().sess.span_bug(expr.span, "DPS expr_cast (residual trait cast?)")
}
ast::ExprAssignOp(op, ref dst, ref src) => {
trans_assign_op(bcx, expr, op, &**dst, &**src)
}
_ => {
bcx.tcx().sess.span_bug(
expr.span,
&format!("trans_rvalue_dps_unadjusted reached fall-through \
case: {:?}",
expr.node));
}
}
}
fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ref_expr: &ast::Expr,
def: def::Def,
dest: Dest)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_def_dps_unadjusted");
let lldest = match dest {
SaveIn(lldest) => lldest,
Ignore => { return bcx; }
};
match def {
def::DefVariant(tid, vid, _) => {
let variant_info = ty::enum_variant_with_id(bcx.tcx(), tid, vid);
if variant_info.args.len() > 0 {
// N-ary variant.
let llfn = callee::trans_fn_ref(bcx.ccx(), vid,
ExprId(ref_expr.id),
bcx.fcx.param_substs).val;
Store(bcx, llfn, lldest);
return bcx;
} else {
// Nullary variant.
let ty = expr_ty(bcx, ref_expr);
let repr = adt::represent_type(bcx.ccx(), ty);
adt::trans_set_discr(bcx, &*repr, lldest,
variant_info.disr_val);
return bcx;
}
}
def::DefStruct(_) => {
let ty = expr_ty(bcx, ref_expr);
match ty.sty {
ty::ty_struct(did, _) if ty::has_dtor(bcx.tcx(), did) => {
let repr = adt::represent_type(bcx.ccx(), ty);
adt::trans_set_discr(bcx, &*repr, lldest, 0);
}
_ => {}
}
bcx
}
_ => {
bcx.tcx().sess.span_bug(ref_expr.span, &format!(
"Non-DPS def {:?} referened by {}",
def, bcx.node_id_to_string(ref_expr.id)));
}
}
}
pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ref_expr: &ast::Expr,
def: def::Def,
param_substs: &'tcx subst::Substs<'tcx>)
-> Datum<'tcx, Rvalue> {
let _icx = push_ctxt("trans_def_datum_unadjusted");
match def {
def::DefFn(did, _) |
def::DefStruct(did) | def::DefVariant(_, did, _) |
def::DefMethod(did, def::FromImpl(_)) => {
callee::trans_fn_ref(ccx, did, ExprId(ref_expr.id), param_substs)
}
def::DefMethod(impl_did, def::FromTrait(trait_did)) => {
meth::trans_static_method_callee(ccx, impl_did,
trait_did, ref_expr.id,
param_substs)
}
_ => {
ccx.tcx().sess.span_bug(ref_expr.span, &format!(
"trans_def_fn_unadjusted invoked on: {:?} for {}",
def,
ref_expr.repr(ccx.tcx())));
}
}
}
/// Translates a reference to a local variable or argument. This always results in an lvalue datum.
pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
def: def::Def)
-> Datum<'tcx, Lvalue> {
let _icx = push_ctxt("trans_local_var");
match def {
def::DefUpvar(nid, _) => {
// Can't move upvars, so this is never a ZeroMemLastUse.
let local_ty = node_id_type(bcx, nid);
match bcx.fcx.llupvars.borrow().get(&nid) {
Some(&val) => Datum::new(val, local_ty, Lvalue),
None => {
bcx.sess().bug(&format!(
"trans_local_var: no llval for upvar {} found",
nid));
}
}
}
def::DefLocal(nid) => {
let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
Some(&v) => v,
None => {
bcx.sess().bug(&format!(
"trans_local_var: no datum for local/arg {} found",
nid));
}
};
debug!("take_local(nid={}, v={}, ty={})",
nid, bcx.val_to_string(datum.val), bcx.ty_to_string(datum.ty));
datum
}
_ => {
bcx.sess().unimpl(&format!(
"unsupported def type in trans_local_var: {:?}",
def));
}
}
}
/// Helper for enumerating the field types of structs, enums, or records. The optional node ID here
/// is the node ID of the path identifying the enum variant in use. If none, this cannot possibly
/// an enum variant (so, if it is and `node_id_opt` is none, this function panics).
pub fn with_field_tys<'tcx, R, F>(tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
node_id_opt: Option<ast::NodeId>,
op: F)
-> R where
F: FnOnce(ty::Disr, &[ty::field<'tcx>]) -> R,
{
match ty.sty {
ty::ty_struct(did, substs) => {
let fields = struct_fields(tcx, did, substs);
let fields = monomorphize::normalize_associated_type(tcx, &fields);
op(0, &fields[..])
}
ty::ty_tup(ref v) => {
op(0, &tup_fields(&v[..]))
}
ty::ty_enum(_, substs) => {
// We want the *variant* ID here, not the enum ID.
match node_id_opt {
None => {
tcx.sess.bug(&format!(
"cannot get field types from the enum type {} \
without a node ID",
ty.repr(tcx)));
}
Some(node_id) => {
let def = tcx.def_map.borrow().get(&node_id).unwrap().full_def();
match def {
def::DefVariant(enum_id, variant_id, _) => {
let variant_info = ty::enum_variant_with_id(tcx, enum_id, variant_id);
let fields = struct_fields(tcx, variant_id, substs);
let fields = monomorphize::normalize_associated_type(tcx, &fields);
op(variant_info.disr_val, &fields[..])
}
_ => {
tcx.sess.bug("resolve didn't map this expr to a \
variant ID")
}
}
}
}
}
_ => {
tcx.sess.bug(&format!(
"cannot get field types from the type {}",
ty.repr(tcx)));
}
}
}
fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
fields: &[ast::Field],
base: Option<&ast::Expr>,
expr_span: codemap::Span,
expr_id: ast::NodeId,
ty: Ty<'tcx>,
dest: Dest) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_rec");
let tcx = bcx.tcx();
with_field_tys(tcx, ty, Some(expr_id), |discr, field_tys| {
let mut need_base: Vec<bool> = repeat(true).take(field_tys.len()).collect();
let numbered_fields = fields.iter().map(|field| {
let opt_pos =
field_tys.iter().position(|field_ty|
field_ty.name == field.ident.node.name);
let result = match opt_pos {
Some(i) => {
need_base[i] = false;
(i, &*field.expr)
}
None => {
tcx.sess.span_bug(field.span,
"Couldn't find field in struct type")
}
};
result
}).collect::<Vec<_>>();
let optbase = match base {
Some(base_expr) => {
let mut leftovers = Vec::new();
for (i, b) in need_base.iter().enumerate() {
if *b {
leftovers.push((i, field_tys[i].mt.ty));
}
}
Some(StructBaseInfo {expr: base_expr,
fields: leftovers })
}
None => {
if need_base.iter().any(|b| *b) {
tcx.sess.span_bug(expr_span, "missing fields and no base expr")
}
None
}
};
trans_adt(bcx,
ty,
discr,
&numbered_fields,
optbase,
dest,
DebugLoc::At(expr_id, expr_span))
})
}
/// Information that `trans_adt` needs in order to fill in the fields
/// of a struct copied from a base struct (e.g., from an expression
/// like `Foo { a: b, ..base }`.
///
/// Note that `fields` may be empty; the base expression must always be
/// evaluated for side-effects.
pub struct StructBaseInfo<'a, 'tcx> {
/// The base expression; will be evaluated after all explicit fields.
expr: &'a ast::Expr,
/// The indices of fields to copy paired with their types.
fields: Vec<(usize, Ty<'tcx>)>
}
/// Constructs an ADT instance:
///
/// - `fields` should be a list of field indices paired with the
/// expression to store into that field. The initializers will be
/// evaluated in the order specified by `fields`.
///
/// - `optbase` contains information on the base struct (if any) from
/// which remaining fields are copied; see comments on `StructBaseInfo`.
pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
ty: Ty<'tcx>,
discr: ty::Disr,
fields: &[(usize, &ast::Expr)],
optbase: Option<StructBaseInfo<'a, 'tcx>>,
dest: Dest,
debug_location: DebugLoc)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_adt");
let fcx = bcx.fcx;
let repr = adt::represent_type(bcx.ccx(), ty);
debug_location.apply(bcx.fcx);
// If we don't care about the result, just make a
// temporary stack slot
let addr = match dest {
SaveIn(pos) => pos,
Ignore => alloc_ty(bcx, ty, "temp"),
};
// This scope holds intermediates that must be cleaned should
// panic occur before the ADT as a whole is ready.
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
if ty::type_is_simd(bcx.tcx(), ty) {
// Issue 23112: The original logic appeared vulnerable to same
// order-of-eval bug. But, SIMD values are tuple-structs;
// i.e. functional record update (FRU) syntax is unavailable.
//
// To be safe, double-check that we did not get here via FRU.
assert!(optbase.is_none());
// This is the constructor of a SIMD type, such types are
// always primitive machine types and so do not have a
// destructor or require any clean-up.
let llty = type_of::type_of(bcx.ccx(), ty);
// keep a vector as a register, and running through the field
// `insertelement`ing them directly into that register
// (i.e. avoid GEPi and `store`s to an alloca) .
let mut vec_val = C_undef(llty);
for &(i, ref e) in fields {
let block_datum = trans(bcx, &**e);
bcx = block_datum.bcx;
let position = C_uint(bcx.ccx(), i);
let value = block_datum.datum.to_llscalarish(bcx);
vec_val = InsertElement(bcx, vec_val, value, position);
}
Store(bcx, vec_val, addr);
} else if let Some(base) = optbase {
// Issue 23112: If there is a base, then order-of-eval
// requires field expressions eval'ed before base expression.
// First, trans field expressions to temporary scratch values.
let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
let datum = unpack_datum!(bcx, trans(bcx, &**e));
(i, datum)
}).collect();
debug_location.apply(bcx.fcx);
// Second, trans the base to the dest.
assert_eq!(discr, 0);
match ty::expr_kind(bcx.tcx(), &*base.expr) {
ty::RvalueDpsExpr | ty::RvalueDatumExpr if !bcx.fcx.type_needs_drop(ty) => {
bcx = trans_into(bcx, &*base.expr, SaveIn(addr));
},
ty::RvalueStmtExpr => bcx.tcx().sess.bug("unexpected expr kind for struct base expr"),
_ => {
let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base"));
for &(i, t) in &base.fields {
let datum = base_datum.get_element(
bcx, t, |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, i));
assert!(type_is_sized(bcx.tcx(), datum.ty));
let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
bcx = datum.store_to(bcx, dest);
}
}
}
// Finally, move scratch field values into actual field locations
for (i, datum) in scratch_vals.into_iter() {
let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
bcx = datum.store_to(bcx, dest);
}
} else {
// No base means we can write all fields directly in place.
for &(i, ref e) in fields {
let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
let e_ty = expr_ty_adjusted(bcx, &**e);
bcx = trans_into(bcx, &**e, SaveIn(dest));
let scope = cleanup::CustomScope(custom_cleanup_scope);
fcx.schedule_lifetime_end(scope, dest);
fcx.schedule_drop_mem(scope, dest, e_ty);
}
}
adt::trans_set_discr(bcx, &*repr, addr, discr);
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
// If we don't care about the result drop the temporary we made
match dest {
SaveIn(_) => bcx,
Ignore => {
bcx = glue::drop_ty(bcx, addr, ty, debug_location);
base::call_lifetime_end(bcx, addr);
bcx
}
}
}
fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
lit: &ast::Lit)
-> DatumBlock<'blk, 'tcx, Expr> {
// must not be a string constant, that is a RvalueDpsExpr
let _icx = push_ctxt("trans_immediate_lit");
let ty = expr_ty(bcx, expr);
let v = consts::const_lit(bcx.ccx(), expr, lit);
immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
}
fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
op: ast::UnOp,
sub_expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let ccx = bcx.ccx();
let mut bcx = bcx;
let _icx = push_ctxt("trans_unary_datum");
let method_call = MethodCall::expr(expr.id);
// The only overloaded operator that is translated to a datum
// is an overloaded deref, since it is always yields a `&T`.
// Otherwise, we should be in the RvalueDpsExpr path.
assert!(
op == ast::UnDeref ||
!ccx.tcx().method_map.borrow().contains_key(&method_call));
let un_ty = expr_ty(bcx, expr);
let debug_loc = expr.debug_loc();
match op {
ast::UnNot => {
let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
}
ast::UnNeg => {
let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
let val = datum.to_llscalarish(bcx);
let llneg = {
if ty::type_is_fp(un_ty) {
FNeg(bcx, val, debug_loc)
} else {
Neg(bcx, val, debug_loc)
}
};
immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
}
ast::UnUniq => {
trans_uniq_expr(bcx, expr, un_ty, sub_expr, expr_ty(bcx, sub_expr))
}
ast::UnDeref => {
let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
deref_once(bcx, expr, datum, method_call)
}
}
}
fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
box_expr: &ast::Expr,
box_ty: Ty<'tcx>,
contents: &ast::Expr,
contents_ty: Ty<'tcx>)
-> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_uniq_expr");
let fcx = bcx.fcx;
assert!(type_is_sized(bcx.tcx(), contents_ty));
let llty = type_of::type_of(bcx.ccx(), contents_ty);
let size = llsize_of(bcx.ccx(), llty);
let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
let llty_ptr = llty.ptr_to();
let Result { bcx, val } = malloc_raw_dyn(bcx,
llty_ptr,
box_ty,
size,
align,
box_expr.debug_loc());
// Unique boxes do not allocate for zero-size types. The standard library
// may assume that `free` is never called on the pointer returned for
// `Box<ZeroSizeType>`.
let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
trans_into(bcx, contents, SaveIn(val))
} else {
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
val, cleanup::HeapExchange, contents_ty);
let bcx = trans_into(bcx, contents, SaveIn(val));
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
bcx
};
immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
}
fn ref_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lval: Datum<'tcx, Lvalue>)
-> DatumBlock<'blk, 'tcx, Expr> {
let dest_ty = ty::mk_imm_rptr(bcx.tcx(), bcx.tcx().mk_region(ty::ReStatic), lval.ty);
let scratch = rvalue_scratch_datum(bcx, dest_ty, "__fat_ptr");
memcpy_ty(bcx, scratch.val, lval.val, scratch.ty);
DatumBlock::new(bcx, scratch.to_expr_datum())
}
fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
subexpr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_addr_of");
let mut bcx = bcx;
let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
if !type_is_sized(bcx.tcx(), sub_datum.ty) {
// DST lvalue, close to a fat pointer
ref_fat_ptr(bcx, sub_datum)
} else {
// Sized value, ref to a thin pointer
let ty = expr_ty(bcx, expr);
immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
}
}
// Important to get types for both lhs and rhs, because one might be _|_
// and the other not.
fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
binop_expr: &ast::Expr,
binop_ty: Ty<'tcx>,
op: ast::BinOp,
lhs_t: Ty<'tcx>,
lhs: ValueRef,
rhs_t: Ty<'tcx>,
rhs: ValueRef)
-> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_eager_binop");
let tcx = bcx.tcx();
let is_simd = ty::type_is_simd(tcx, lhs_t);
let intype = if is_simd {
ty::simd_type(tcx, lhs_t)
} else {
lhs_t
};
let is_float = ty::type_is_fp(intype);
let is_signed = ty::type_is_signed(intype);
let info = expr_info(binop_expr);
let binop_debug_loc = binop_expr.debug_loc();
let mut bcx = bcx;
let val = match op.node {
ast::BiAdd => {
if is_float {
FAdd(bcx, lhs, rhs, binop_debug_loc)
} else if is_simd {
Add(bcx, lhs, rhs, binop_debug_loc)
} else {
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
}
ast::BiSub => {
if is_float {
FSub(bcx, lhs, rhs, binop_debug_loc)
} else if is_simd {
Sub(bcx, lhs, rhs, binop_debug_loc)
} else {
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
}
ast::BiMul => {
if is_float {
FMul(bcx, lhs, rhs, binop_debug_loc)
} else if is_simd {
Mul(bcx, lhs, rhs, binop_debug_loc)
} else {
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
}
ast::BiDiv => {
if is_float {
FDiv(bcx, lhs, rhs, binop_debug_loc)
} else {
// Only zero-check integers; fp /0 is NaN
bcx = base::fail_if_zero_or_overflows(bcx,
expr_info(binop_expr),
op,
lhs,
rhs,
rhs_t);
if is_signed {
SDiv(bcx, lhs, rhs, binop_debug_loc)
} else {
UDiv(bcx, lhs, rhs, binop_debug_loc)
}
}
}
ast::BiRem => {
if is_float {
FRem(bcx, lhs, rhs, binop_debug_loc)
} else {
// Only zero-check integers; fp %0 is NaN
bcx = base::fail_if_zero_or_overflows(bcx,
expr_info(binop_expr),
op, lhs, rhs, rhs_t);
if is_signed {
SRem(bcx, lhs, rhs, binop_debug_loc)
} else {
URem(bcx, lhs, rhs, binop_debug_loc)
}
}
}
ast::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
ast::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
ast::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
ast::BiShl => {
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
ast::BiShr => {
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
ast::BiEq | ast::BiNe | ast::BiLt | ast::BiGe | ast::BiLe | ast::BiGt => {
if is_simd {
base::compare_simd_types(bcx, lhs, rhs, intype, op.node, binop_debug_loc)
} else {
base::compare_scalar_types(bcx, lhs, rhs, intype, op.node, binop_debug_loc)
}
}
_ => {
bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
}
};
immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
}
// refinement types would obviate the need for this
enum lazy_binop_ty {
lazy_and,
lazy_or,
}
fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
binop_expr: &ast::Expr,
op: lazy_binop_ty,
a: &ast::Expr,
b: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_lazy_binop");
let binop_ty = expr_ty(bcx, binop_expr);
let fcx = bcx.fcx;
let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
let lhs = lhs.to_llscalarish(past_lhs);
if past_lhs.unreachable.get() {
return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
}
let join = fcx.new_id_block("join", binop_expr.id);
let before_rhs = fcx.new_id_block("before_rhs", b.id);
match op {
lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
}
let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
let rhs = rhs.to_llscalarish(past_rhs);
if past_rhs.unreachable.get() {
return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
}
Br(past_rhs, join.llbb, DebugLoc::None);
let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
&[past_lhs.llbb, past_rhs.llbb]);
return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
}
fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
op: ast::BinOp,
lhs: &ast::Expr,
rhs: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_binary");
let ccx = bcx.ccx();
// if overloaded, would be RvalueDpsExpr
assert!(!ccx.tcx().method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
match op.node {
ast::BiAnd => {
trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
}
ast::BiOr => {
trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
}
_ => {
let mut bcx = bcx;
let lhs_datum = unpack_datum!(bcx, trans(bcx, lhs));
let rhs_datum = unpack_datum!(bcx, trans(bcx, rhs));
let binop_ty = expr_ty(bcx, expr);
debug!("trans_binary (expr {}): lhs_datum={}",
expr.id,
lhs_datum.to_string(ccx));
let lhs_ty = lhs_datum.ty;
let lhs = lhs_datum.to_llscalarish(bcx);
debug!("trans_binary (expr {}): rhs_datum={}",
expr.id,
rhs_datum.to_string(ccx));
let rhs_ty = rhs_datum.ty;
let rhs = rhs_datum.to_llscalarish(bcx);
trans_eager_binop(bcx, expr, binop_ty, op,
lhs_ty, lhs, rhs_ty, rhs)
}
}
}
fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
method_call: MethodCall,
lhs: Datum<'tcx, Expr>,
rhs: Vec<(Datum<'tcx, Expr>, ast::NodeId)>,
dest: Option<Dest>,
autoref: bool)
-> Result<'blk, 'tcx> {
let method_ty = bcx.tcx().method_map.borrow().get(&method_call).unwrap().ty;
callee::trans_call_inner(bcx,
expr.debug_loc(),
monomorphize_type(bcx, method_ty),
|bcx, arg_cleanup_scope| {
meth::trans_method_callee(bcx,
method_call,
None,
arg_cleanup_scope)
},
callee::ArgOverloadedOp(lhs, rhs, autoref),
dest)
}
fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
callee: &'a ast::Expr,
args: &'a [P<ast::Expr>],
dest: Option<Dest>)
-> Block<'blk, 'tcx> {
let method_call = MethodCall::expr(expr.id);
let method_type = bcx.tcx()
.method_map
.borrow()
.get(&method_call)
.unwrap()
.ty;
let mut all_args = vec!(callee);
all_args.extend(args.iter().map(|e| &**e));
unpack_result!(bcx,
callee::trans_call_inner(bcx,
expr.debug_loc(),
monomorphize_type(bcx,
method_type),
|bcx, arg_cleanup_scope| {
meth::trans_method_callee(
bcx,
method_call,
None,
arg_cleanup_scope)
},
callee::ArgOverloadedCall(all_args),
dest));
bcx
}
fn int_cast(bcx: Block,
lldsttype: Type,
llsrctype: Type,
llsrc: ValueRef,
signed: bool)
-> ValueRef {
let _icx = push_ctxt("int_cast");
let srcsz = llsrctype.int_width();
let dstsz = lldsttype.int_width();
return if dstsz == srcsz {
BitCast(bcx, llsrc, lldsttype)
} else if srcsz > dstsz {
TruncOrBitCast(bcx, llsrc, lldsttype)
} else if signed {
SExtOrBitCast(bcx, llsrc, lldsttype)
} else {
ZExtOrBitCast(bcx, llsrc, lldsttype)
}
}
fn float_cast(bcx: Block,
lldsttype: Type,
llsrctype: Type,
llsrc: ValueRef)
-> ValueRef {
let _icx = push_ctxt("float_cast");
let srcsz = llsrctype.float_width();
let dstsz = lldsttype.float_width();
return if dstsz > srcsz {
FPExt(bcx, llsrc, lldsttype)
} else if srcsz > dstsz {
FPTrunc(bcx, llsrc, lldsttype)
} else { llsrc };
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum cast_kind {
cast_pointer,
cast_integral,
cast_float,
cast_enum,
cast_other,
}
pub fn cast_type_kind<'tcx>(tcx: &ty::ctxt<'tcx>, t: Ty<'tcx>) -> cast_kind {
match t.sty {
ty::ty_char => cast_integral,
ty::ty_float(..) => cast_float,
ty::ty_rptr(_, mt) | ty::ty_ptr(mt) => {
if type_is_sized(tcx, mt.ty) {
cast_pointer
} else {
cast_other
}
}
ty::ty_bare_fn(..) => cast_pointer,
ty::ty_int(..) => cast_integral,
ty::ty_uint(..) => cast_integral,
ty::ty_bool => cast_integral,
ty::ty_enum(..) => cast_enum,
_ => cast_other
}
}
pub fn cast_is_noop<'tcx>(t_in: Ty<'tcx>, t_out: Ty<'tcx>) -> bool {
match (ty::deref(t_in, true), ty::deref(t_out, true)) {
(Some(ty::mt{ ty: t_in, .. }), Some(ty::mt{ ty: t_out, .. })) => {
t_in == t_out
}
_ => false
}
}
fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
id: ast::NodeId)
-> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_cast");
let mut bcx = bcx;
let ccx = bcx.ccx();
let t_in = expr_ty_adjusted(bcx, expr);
let t_out = node_id_type(bcx, id);
let k_in = cast_type_kind(bcx.tcx(), t_in);
let k_out = cast_type_kind(bcx.tcx(), t_out);
let s_in = k_in == cast_integral && ty::type_is_signed(t_in);
let ll_t_in = type_of::arg_type_of(ccx, t_in);
let ll_t_out = type_of::arg_type_of(ccx, t_out);
// Convert the value to be cast into a ValueRef, either by-ref or
// by-value as appropriate given its type:
let mut datum = unpack_datum!(bcx, trans(bcx, expr));
let datum_ty = monomorphize_type(bcx, datum.ty);
if cast_is_noop(datum_ty, t_out) {
datum.ty = t_out;
return DatumBlock::new(bcx, datum);
}
let newval = match (k_in, k_out) {
(cast_integral, cast_integral) => {
let llexpr = datum.to_llscalarish(bcx);
int_cast(bcx, ll_t_out, ll_t_in, llexpr, s_in)
}
(cast_float, cast_float) => {
let llexpr = datum.to_llscalarish(bcx);
float_cast(bcx, ll_t_out, ll_t_in, llexpr)
}
(cast_integral, cast_float) => {
let llexpr = datum.to_llscalarish(bcx);
if s_in {
SIToFP(bcx, llexpr, ll_t_out)
} else { UIToFP(bcx, llexpr, ll_t_out) }
}
(cast_float, cast_integral) => {
let llexpr = datum.to_llscalarish(bcx);
if ty::type_is_signed(t_out) {
FPToSI(bcx, llexpr, ll_t_out)
} else { FPToUI(bcx, llexpr, ll_t_out) }
}
(cast_integral, cast_pointer) => {
let llexpr = datum.to_llscalarish(bcx);
IntToPtr(bcx, llexpr, ll_t_out)
}
(cast_pointer, cast_integral) => {
let llexpr = datum.to_llscalarish(bcx);
PtrToInt(bcx, llexpr, ll_t_out)
}
(cast_pointer, cast_pointer) => {
let llexpr = datum.to_llscalarish(bcx);
PointerCast(bcx, llexpr, ll_t_out)
}
(cast_enum, cast_integral) |
(cast_enum, cast_float) => {
let mut bcx = bcx;
let repr = adt::represent_type(ccx, t_in);
let datum = unpack_datum!(
bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
let llexpr_ptr = datum.to_llref();
let lldiscrim_a =
adt::trans_get_discr(bcx, &*repr, llexpr_ptr, Some(Type::i64(ccx)));
match k_out {
cast_integral => int_cast(bcx, ll_t_out,
val_ty(lldiscrim_a),
lldiscrim_a, true),
cast_float => SIToFP(bcx, lldiscrim_a, ll_t_out),
_ => {
ccx.sess().bug(&format!("translating unsupported cast: \
{} ({:?}) -> {} ({:?})",
t_in.repr(bcx.tcx()),
k_in,
t_out.repr(bcx.tcx()),
k_out))
}
}
}
_ => ccx.sess().bug(&format!("translating unsupported cast: \
{} ({:?}) -> {} ({:?})",
t_in.repr(bcx.tcx()),
k_in,
t_out.repr(bcx.tcx()),
k_out))
};
return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
}
fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
op: ast::BinOp,
dst: &ast::Expr,
src: &ast::Expr)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_assign_op");
let mut bcx = bcx;
debug!("trans_assign_op(expr={})", bcx.expr_to_string(expr));
// User-defined operator methods cannot be used with `+=` etc right now
assert!(!bcx.tcx().method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
// Evaluate LHS (destination), which should be an lvalue
let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
assert!(!bcx.fcx.type_needs_drop(dst_datum.ty));
let dst_ty = dst_datum.ty;
let dst = load_ty(bcx, dst_datum.val, dst_datum.ty);
// Evaluate RHS
let rhs_datum = unpack_datum!(bcx, trans(bcx, &*src));
let rhs_ty = rhs_datum.ty;
let rhs = rhs_datum.to_llscalarish(bcx);
// Perform computation and store the result
let result_datum = unpack_datum!(
bcx, trans_eager_binop(bcx, expr, dst_datum.ty, op,
dst_ty, dst, rhs_ty, rhs));
return result_datum.store_to(bcx, dst_datum.val);
}
fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
datum: Datum<'tcx, Expr>,
expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
// Ensure cleanup of `datum` if not already scheduled and obtain
// a "by ref" pointer.
let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
// Compute final type. Note that we are loose with the region and
// mutability, since those things don't matter in trans.
let referent_ty = lv_datum.ty;
let ptr_ty = ty::mk_imm_rptr(bcx.tcx(), bcx.tcx().mk_region(ty::ReStatic), referent_ty);
// Get the pointer.
let llref = lv_datum.to_llref();
// Construct the resulting datum, using what was the "by ref"
// ValueRef of type `referent_ty` to be the "by value" ValueRef
// of type `&referent_ty`.
DatumBlock::new(bcx, Datum::new(llref, ptr_ty, RvalueExpr(Rvalue::new(ByValue))))
}
fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>,
times: usize)
-> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let mut datum = datum;
for i in 0..times {
let method_call = MethodCall::autoderef(expr.id, i as u32);
datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
}
DatumBlock { bcx: bcx, datum: datum }
}
fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>,
method_call: MethodCall)
-> DatumBlock<'blk, 'tcx, Expr> {
let ccx = bcx.ccx();
debug!("deref_once(expr={}, datum={}, method_call={:?})",
expr.repr(bcx.tcx()),
datum.to_string(ccx),
method_call);
let mut bcx = bcx;
// Check for overloaded deref.
let method_ty = ccx.tcx().method_map.borrow()
.get(&method_call).map(|method| method.ty);
let datum = match method_ty {
Some(method_ty) => {
let method_ty = monomorphize_type(bcx, method_ty);
// Overloaded. Evaluate `trans_overloaded_op`, which will
// invoke the user's deref() method, which basically
// converts from the `Smaht<T>` pointer that we have into
// a `&T` pointer. We can then proceed down the normal
// path (below) to dereference that `&T`.
let datum = if method_call.autoderef == 0 {
datum
} else {
// Always perform an AutoPtr when applying an overloaded auto-deref
unpack_datum!(bcx, auto_ref(bcx, datum, expr))
};
let ref_ty = // invoked methods have their LB regions instantiated
ty::no_late_bound_regions(
ccx.tcx(), &ty::ty_fn_ret(method_ty)).unwrap().unwrap();
let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
datum, Vec::new(), Some(SaveIn(scratch.val)),
false));
scratch.to_expr_datum()
}
None => {
// Not overloaded. We already have a pointer we know how to deref.
datum
}
};
let r = match datum.ty.sty {
ty::ty_uniq(content_ty) => {
if type_is_sized(bcx.tcx(), content_ty) {
deref_owned_pointer(bcx, expr, datum, content_ty)
} else {
// A fat pointer and a DST lvalue have the same representation
// just different types. Since there is no temporary for `*e`
// here (because it is unsized), we cannot emulate the sized
// object code path for running drop glue and free. Instead,
// we schedule cleanup for `e`, turning it into an lvalue.
let datum = unpack_datum!(
bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
let datum = Datum::new(datum.val, content_ty, LvalueExpr);
DatumBlock::new(bcx, datum)
}
}
ty::ty_ptr(ty::mt { ty: content_ty, .. }) |
ty::ty_rptr(_, ty::mt { ty: content_ty, .. }) => {
if type_is_sized(bcx.tcx(), content_ty) {
let ptr = datum.to_llscalarish(bcx);
// Always generate an lvalue datum, even if datum.mode is
// an rvalue. This is because datum.mode is only an
// rvalue for non-owning pointers like &T or *T, in which
// case cleanup *is* scheduled elsewhere, by the true
// owner (or, in the case of *T, by the user).
DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr))
} else {
// A fat pointer and a DST lvalue have the same representation
// just different types.
DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr))
}
}
_ => {
bcx.tcx().sess.span_bug(
expr.span,
&format!("deref invoked on expr of illegal type {}",
datum.ty.repr(bcx.tcx())));
}
};
debug!("deref_once(expr={}, method_call={:?}, result={})",
expr.id, method_call, r.datum.to_string(ccx));
return r;
/// We microoptimize derefs of owned pointers a bit here. Basically, the idea is to make the
/// deref of an rvalue result in an rvalue. This helps to avoid intermediate stack slots in the
/// resulting LLVM. The idea here is that, if the `Box<T>` pointer is an rvalue, then we can
/// schedule a *shallow* free of the `Box<T>` pointer, and then return a ByRef rvalue into the
/// pointer. Because the free is shallow, it is legit to return an rvalue, because we know that
/// the contents are not yet scheduled to be freed. The language rules ensure that the contents
/// will be used (or moved) before the free occurs.
fn deref_owned_pointer<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
datum: Datum<'tcx, Expr>,
content_ty: Ty<'tcx>)
-> DatumBlock<'blk, 'tcx, Expr> {
match datum.kind {
RvalueExpr(Rvalue { mode: ByRef }) => {
let scope = cleanup::temporary_scope(bcx.tcx(), expr.id);
let ptr = Load(bcx, datum.val);
if !type_is_zero_size(bcx.ccx(), content_ty) {
bcx.fcx.schedule_free_value(scope, ptr, cleanup::HeapExchange, content_ty);
}
}
RvalueExpr(Rvalue { mode: ByValue }) => {
let scope = cleanup::temporary_scope(bcx.tcx(), expr.id);
if !type_is_zero_size(bcx.ccx(), content_ty) {
bcx.fcx.schedule_free_value(scope, datum.val, cleanup::HeapExchange,
content_ty);
}
}
LvalueExpr => { }
}
// If we had an rvalue in, we produce an rvalue out.
let (llptr, kind) = match datum.kind {
LvalueExpr => {
(Load(bcx, datum.val), LvalueExpr)
}
RvalueExpr(Rvalue { mode: ByRef }) => {
(Load(bcx, datum.val), RvalueExpr(Rvalue::new(ByRef)))
}
RvalueExpr(Rvalue { mode: ByValue }) => {
(datum.val, RvalueExpr(Rvalue::new(ByRef)))
}
};
let datum = Datum { ty: content_ty, val: llptr, kind: kind };
DatumBlock { bcx: bcx, datum: datum }
}
}
#[derive(Debug)]
enum OverflowOp {
Add,
Sub,
Mul,
Shl,
Shr,
}
impl OverflowOp {
fn codegen_strategy(&self) -> OverflowCodegen {
use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
match *self {
OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
}
}
}
enum OverflowCodegen {
ViaIntrinsic(OverflowOpViaIntrinsic),
ViaInputCheck(OverflowOpViaInputCheck),
}
enum OverflowOpViaInputCheck { Shl, Shr, }
#[derive(Debug)]
enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
impl OverflowOpViaIntrinsic {
fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
bcx.ccx().get_intrinsic(&name)
}
fn to_intrinsic_name(&self, tcx: &ty::ctxt, ty: Ty) -> &'static str {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
use middle::ty::{ty_int, ty_uint};
let new_sty = match ty.sty {
ty_int(TyIs) => match &tcx.sess.target.target.target_pointer_width[..] {
"32" => ty_int(TyI32),
"64" => ty_int(TyI64),
_ => panic!("unsupported target word size")
},
ty_uint(TyUs) => match &tcx.sess.target.target.target_pointer_width[..] {
"32" => ty_uint(TyU32),
"64" => ty_uint(TyU64),
_ => panic!("unsupported target word size")
},
ref t @ ty_uint(_) | ref t @ ty_int(_) => t.clone(),
_ => panic!("tried to get overflow intrinsic for {:?} applied to non-int type",
*self)
};
match *self {
OverflowOpViaIntrinsic::Add => match new_sty {
ty_int(TyI8) => "llvm.sadd.with.overflow.i8",
ty_int(TyI16) => "llvm.sadd.with.overflow.i16",
ty_int(TyI32) => "llvm.sadd.with.overflow.i32",
ty_int(TyI64) => "llvm.sadd.with.overflow.i64",
ty_uint(TyU8) => "llvm.uadd.with.overflow.i8",
ty_uint(TyU16) => "llvm.uadd.with.overflow.i16",
ty_uint(TyU32) => "llvm.uadd.with.overflow.i32",
ty_uint(TyU64) => "llvm.uadd.with.overflow.i64",
_ => unreachable!(),
},
OverflowOpViaIntrinsic::Sub => match new_sty {
ty_int(TyI8) => "llvm.ssub.with.overflow.i8",
ty_int(TyI16) => "llvm.ssub.with.overflow.i16",
ty_int(TyI32) => "llvm.ssub.with.overflow.i32",
ty_int(TyI64) => "llvm.ssub.with.overflow.i64",
ty_uint(TyU8) => "llvm.usub.with.overflow.i8",
ty_uint(TyU16) => "llvm.usub.with.overflow.i16",
ty_uint(TyU32) => "llvm.usub.with.overflow.i32",
ty_uint(TyU64) => "llvm.usub.with.overflow.i64",
_ => unreachable!(),
},
OverflowOpViaIntrinsic::Mul => match new_sty {
ty_int(TyI8) => "llvm.smul.with.overflow.i8",
ty_int(TyI16) => "llvm.smul.with.overflow.i16",
ty_int(TyI32) => "llvm.smul.with.overflow.i32",
ty_int(TyI64) => "llvm.smul.with.overflow.i64",
ty_uint(TyU8) => "llvm.umul.with.overflow.i8",
ty_uint(TyU16) => "llvm.umul.with.overflow.i16",
ty_uint(TyU32) => "llvm.umul.with.overflow.i32",
ty_uint(TyU64) => "llvm.umul.with.overflow.i64",
_ => unreachable!(),
},
}
}
fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
info: NodeIdAndSpan,
lhs_t: Ty<'tcx>, lhs: ValueRef,
rhs: ValueRef,
binop_debug_loc: DebugLoc)
-> (Block<'blk, 'tcx>, ValueRef) {
let llfn = self.to_intrinsic(bcx, lhs_t);
let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
let result = ExtractValue(bcx, val, 0); // iN operation result
let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
binop_debug_loc);
let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
None, binop_debug_loc);
let bcx =
base::with_cond(bcx, cond, |bcx|
controlflow::trans_fail(bcx, info,
InternedString::new("arithmetic operation overflowed")));
(bcx, result)
}
}
impl OverflowOpViaInputCheck {
fn build_with_input_check<'blk, 'tcx>(&self,
bcx: Block<'blk, 'tcx>,
info: NodeIdAndSpan,
lhs_t: Ty<'tcx>,
lhs: ValueRef,
rhs: ValueRef,
binop_debug_loc: DebugLoc)
-> (Block<'blk, 'tcx>, ValueRef)
{
let lhs_llty = val_ty(lhs);
let rhs_llty = val_ty(rhs);
// Panic if any bits are set outside of bits that we always
// mask in.
//
// Note that the mask's value is derived from the LHS type
// (since that is where the 32/64 distinction is relevant) but
// the mask's type must match the RHS type (since they will
// both be fed into a and-binop)
let invert_mask = !shift_mask_val(lhs_llty);
let invert_mask = C_integral(rhs_llty, invert_mask, true);
let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
let cond = ICmp(bcx, llvm::IntNE, outer_bits,
C_integral(rhs_llty, 0, false), binop_debug_loc);
let result = match *self {
OverflowOpViaInputCheck::Shl =>
build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
OverflowOpViaInputCheck::Shr =>
build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
};
let bcx =
base::with_cond(bcx, cond, |bcx|
controlflow::trans_fail(bcx, info,
InternedString::new("shift operation overflowed")));
(bcx, result)
}
}
fn shift_mask_val(llty: Type) -> u64 {
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
llty.int_width() - 1
}
// To avoid UB from LLVM, these two functions mask RHS with an
// appropriate mask unconditionally (i.e. the fallback behavior for
// all shifts). For 32- and 64-bit types, this matches the semantics
// of Java. (See related discussion on #1877 and #10183.)
fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lhs: ValueRef,
rhs: ValueRef,
binop_debug_loc: DebugLoc) -> ValueRef {
let rhs = base::cast_shift_expr_rhs(bcx, ast::BinOp_::BiShl, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
Shl(bcx, lhs, rhs, binop_debug_loc)
}
fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lhs_t: Ty<'tcx>,
lhs: ValueRef,
rhs: ValueRef,
binop_debug_loc: DebugLoc) -> ValueRef {
let rhs = base::cast_shift_expr_rhs(bcx, ast::BinOp_::BiShr, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
let is_signed = ty::type_is_signed(lhs_t);
if is_signed {
AShr(bcx, lhs, rhs, binop_debug_loc)
} else {
LShr(bcx, lhs, rhs, binop_debug_loc)
}
}
fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
rhs: ValueRef,
debug_loc: DebugLoc) -> ValueRef {
let rhs_llty = val_ty(rhs);
let mask = shift_mask_val(rhs_llty);
And(bcx, rhs, C_integral(rhs_llty, mask, false), debug_loc)
}
fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
lhs_t: Ty<'tcx>, lhs: ValueRef,
rhs: ValueRef,
binop_debug_loc: DebugLoc)
-> (Block<'blk, 'tcx>, ValueRef) {
if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
if bcx.ccx().check_overflow() {
match oop.codegen_strategy() {
OverflowCodegen::ViaIntrinsic(oop) =>
oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
OverflowCodegen::ViaInputCheck(oop) =>
oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
}
} else {
let res = match oop {
OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
OverflowOp::Shl =>
build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
OverflowOp::Shr =>
build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
};
(bcx, res)
}
}<|fim▁end|> |
fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
base: &ast::Expr,
get_idx: F) |
<|file_name|>fake_env.rs<|end_file_name|><|fim▁begin|>use compile::ExecutableDevEnv;
use foxbox_taxonomy::api::{ API, Error, User };
use foxbox_taxonomy::manager::*;
use foxbox_taxonomy::services::*;
use foxbox_taxonomy::values::*;
use std::cmp::{ Ord, PartialOrd, Ordering as OrdOrdering };
use std::fmt;
use std::collections::{ BinaryHeap, HashMap };
use std::sync::{ Arc, Mutex };
use std::sync::atomic::{ AtomicBool, Ordering as AtomicOrdering };
use std::thread;
use transformable_channels::mpsc::*;
use chrono::{ DateTime, UTC };
use serde::ser::{Serialize, Serializer};
use serde::de::{Deserialize, Deserializer};
static VERSION : [u32;4] = [0, 0, 0, 0];
/// A back-end holding values and watchers, shared by all the virtual adapters of this simulator.
struct TestSharedAdapterBackend {
/// The latest known value for each getter.
getter_values: HashMap<Id<Getter>, Result<Value, Error>>,
setter_errors: HashMap<Id<Setter>, Error>,
/// A channel to inform when things happen.
on_event: Box<ExtSender<FakeEnvEvent>>,
/// All watchers. 100% counter-optimized
counter: usize,
timers: BinaryHeap<Timer>,
trigger_timers_until: Option<DateTime<UTC>>,
watchers: HashMap<usize, (Id<Getter>, Option<Box<Range>>, Box<ExtSender<WatchEvent>>)>,
}
impl TestSharedAdapterBackend {
fn new(on_event: Box<ExtSender<FakeEnvEvent>>) -> Self {
TestSharedAdapterBackend {
getter_values: HashMap::new(),
setter_errors: HashMap::new(),
on_event: on_event,
counter: 0,
watchers: HashMap::new(),
timers: BinaryHeap::new(),
trigger_timers_until: None,
}
}
fn fetch_values(&self, mut getters: Vec<Id<Getter>>, _: User) -> ResultMap<Id<Getter>, Option<Value>, Error> {
getters.drain(..).map(|id| {
let got = self.getter_values.get(&id).cloned();
match got {
None => (id, Ok(None)),
Some(Ok(value)) => (id, Ok(Some(value))),
Some(Err(err)) => (id, Err(err)),
}
}).collect()
}
fn send_values(&self, mut values: HashMap<Id<Setter>, Value>, _: User) -> ResultMap<Id<Setter>, (), Error> {
values.drain().map(|(id, value)| {
match self.setter_errors.get(&id) {
None => {
let event = FakeEnvEvent::Send {
id: id.clone(),
value: value
};
let _ = self.on_event.send(event);
(id, Ok(()))
}
Some(error) => {
(id, Err(error.clone()))
}
}
}).collect()
}
fn register_watch(&mut self, mut source: Vec<(Id<Getter>, Option<Value>, Box<ExtSender<WatchEvent>>)>) ->
Vec<(Id<Getter>, Result<usize, Error>)>
{
let results = source.drain(..).filter_map(|(id, range, tx)| {
let range = match range {
Some(Value::Range(range)) => Some(range),
None => None,
_ => return None, // FIXME: Log
};
let result = (id.clone(), Ok(self.counter));
self.watchers.insert(self.counter, (id, range, tx));
self.counter += 1;
Some(result)
}).collect();
results
}
fn remove_watch(&mut self, key: usize) {
let _ = self.watchers.remove(&key);
}
fn inject_setter_errors(&mut self, mut errors: Vec<(Id<Setter>, Option<Error>)>)
{
for (id, error) in errors.drain(..) {
match error {
None => {
self.setter_errors.remove(&id);
},
Some(error) => {
self.setter_errors.insert(id, error);
}
}
}
}
fn inject_getter_values(&mut self, mut values: Vec<(Id<Getter>, Result<Value, Error>)>)
{
for (id, value) in values.drain(..) {
let old = self.getter_values.insert(id.clone(), value.clone());
match value {
Err(_) => continue,
Ok(value) => {
for watcher in self.watchers.values() {
let (ref watched_id, ref range, ref cb) = *watcher;
if *watched_id != id {
continue;
}
if let Some(ref range) = *range {
let was_met = if let Some(Ok(ref value)) = old {
range.contains(value)
} else {
false
};
match (was_met, range.contains(&value)) {
(false, true) => {
let _ = cb.send(WatchEvent::Enter {
id: id.clone(),
value: value.clone()
});
}
(true, false) => {
let _ = cb.send(WatchEvent::Exit {
id: id.clone(),
value: value.clone()
});
}
_ => continue
}
} else {
let _ = cb.send(WatchEvent::Enter {
id: id.clone(),
value: value.clone()
});
}
}
}
}
}
}
fn trigger_timers_until(&mut self, date: DateTime<UTC>) {
self.trigger_timers_until = Some(date);
loop {
if let Some(ref timer) = self.timers.peek() {
if timer.date > date {
break;
}
} else {
break;
}
self.timers.pop().unwrap().trigger();
}
}
fn execute(&mut self, op: AdapterOp) {
use self::AdapterOp::*;
match op {
FetchValues { getters, tx } => {
let _ = tx.send(self.fetch_values(getters, User::None));
}
SendValues { values, tx } => {
let _ = tx.send(self.send_values(values, User::None));
}
Watch { source, tx } => {
let _ = tx.send(self.register_watch(source));
}
Unwatch(key) => {
let _ = self.remove_watch(key);
}
InjectGetterValues(values, tx) => {
let _ = self.inject_getter_values(values);
let _ = tx.send(FakeEnvEvent::Done);
}
InjectSetterErrors(errors, tx) => {
let _ = self.inject_setter_errors(errors);
let _ = tx.send(FakeEnvEvent::Done);
}
TriggerTimersUntil(date ,tx) => {
let _ = self.trigger_timers_until(date.into());
let _ = tx.send(FakeEnvEvent::Done);
}
ResetTimers(tx) => {
self.trigger_timers_until = None;
self.timers.clear();
let _ = tx.send(FakeEnvEvent::Done);
}
AddTimer(timer) => {
match self.trigger_timers_until {
None => {
self.timers.push(timer);
}
Some(ref date) if *date < timer.date => {
self.timers.push(timer);
}
Some(_) => {
timer.trigger()
}
}
}
}
}
}
/// Events of interest, that should be displayed to the user of the simulator.
#[derive(Debug)]
pub enum FakeEnvEvent {
/// Some value was sent to a setter channel.
Send {
id: Id<Setter>,
value: Value
},
/// Some error took place.
Error(Error),
/// Handling of an instruction is complete.
Done,
}
/// An adapter for the simulator.
struct TestAdapter {
id: Id<AdapterId>,
/// The back-end holding the state of this adapter. Shared between all adapters.
back_end: Arc<Mutex<Box<ExtSender<AdapterOp>>>>,
}
impl TestAdapter {
fn new(id: Id<AdapterId>, back_end: Box<ExtSender<AdapterOp>>) -> Self {
TestAdapter {
id: id,
back_end: Arc::new(Mutex::new(back_end)),
}
}
}
impl Adapter for TestAdapter {
/// An id unique to this adapter. This id must persist between
/// reboots/reconnections.
fn id(&self) -> Id<AdapterId> {
self.id.clone()
}
/// The name of the adapter.
fn name(&self) -> &str {
"Test Adapter"
}
fn vendor(&self) -> &str {
"test@foxlink"
}
fn version(&self) -> &[u32;4] {
&VERSION
}
// ... more metadata
/// Request values from a group of channels.
///
/// The AdapterManager always attempts to group calls to `fetch_values` by `Adapter`, and then
/// expects the adapter to attempt to minimize the connections with the actual devices.
///
/// The AdapterManager is in charge of keeping track of the age of values.
fn fetch_values(&self, getters: Vec<Id<Getter>>, _: User) -> ResultMap<Id<Getter>, Option<Value>, Error> {
let (tx, rx) = channel();
self.back_end.lock().unwrap().send(AdapterOp::FetchValues {
getters: getters,
tx: Box::new(tx),
}).unwrap();
rx.recv().unwrap()
}
/// Request that values be sent to channels.
///
/// The AdapterManager always attempts to group calls to `send_values` by `Adapter`, and then
/// expects the adapter to attempt to minimize the connections with the actual devices.
fn send_values(&self, values: HashMap<Id<Setter>, Value>, _: User) -> ResultMap<Id<Setter>, (), Error> {
let (tx, rx) = channel();
self.back_end.lock().unwrap().send(AdapterOp::SendValues {
values: values,
tx: Box::new(tx),
}).unwrap();
rx.recv().unwrap()
}
/// Watch a bunch of getters as they change.
///
/// The `AdapterManager` always attempts to group calls to `fetch_values` by `Adapter`, and
/// then expects the adapter to attempt to minimize the connections with the actual devices.
/// The Adapter should however be ready to handle concurrent `register_watch` on the same
/// devices, possibly with distinct `Option<Range>` options.
///
/// If a `Range` option is set, the watcher expects to receive `EnterRange`/`ExitRange` events
/// whenever the value available on the device enters/exits the range. If the `Range` is
/// a `Range::Eq(x)`, the adapter may decide to reject the request or to interpret it as
/// a `Range::BetweenEq { min: x, max: x }`.
///
/// If no `Range` option is set, the watcher expects to receive `EnterRange` events whenever
/// a new value is available on the device. The adapter may decide to reject the request if
/// this is clearly not the expected usage for a device, or to throttle it.
fn register_watch(&self, source: Vec<(Id<Getter>, Option<Value>, Box<ExtSender<WatchEvent>>)>) ->
Vec<(Id<Getter>, Result<Box<AdapterWatchGuard>, Error>)>
{
let (tx, rx) = channel();
self.back_end.lock().unwrap().send(AdapterOp::Watch {
source: source,
tx: Box::new(tx),
}).unwrap();
let received = rx.recv().unwrap();
let tx_unregister = self.back_end.lock().unwrap().clone();
received.iter().map(|&(ref id, ref result)| {
let tx_unregister = tx_unregister.clone();
(id.clone(), match *result {
Err(ref err) => Err(err.clone()),
Ok(ref key) => {
let guard = TestAdapterWatchGuard {
tx: Arc::new(Mutex::new(Box::new(tx_unregister))),
key: key.clone()
};
Ok(Box::new(guard) as Box<AdapterWatchGuard>)
}
})
}).collect()
}
}
/// A watchguard for a TestAdapter
#[derive(Clone)]
struct TestAdapterWatchGuard {
tx: Arc<Mutex<Box<ExtSender<AdapterOp>>>>,
key: usize,
}
impl AdapterWatchGuard for TestAdapterWatchGuard {}
impl Drop for TestAdapterWatchGuard {
fn drop(&mut self) {
let _ = self.tx.lock().unwrap().send(AdapterOp::Unwatch(self.key));
}
}
#[derive(Clone)]
struct Timer {
is_dropped: Arc<AtomicBool>,
date: DateTime<UTC>,
on_triggered: Box<ExtSender<()>>,
}
impl Timer {
fn trigger(&self) {
if self.is_dropped.load(AtomicOrdering::Relaxed) {
return;
}
let _ = self.on_triggered.send(());
}
}
impl Eq for Timer {}
impl Ord for Timer {
fn cmp(&self, other: &Self) -> OrdOrdering {
self.date.cmp(&other.date).reverse()
}
}
impl PartialEq for Timer {
fn eq(&self, other: &Self) -> bool {
self.date == other.date
}
}
impl PartialOrd for Timer {
fn partial_cmp(&self, other: &Self) -> Option<OrdOrdering> {
Some(self.cmp(other))
}
}
pub struct TimerGuard(Arc<AtomicBool>);
impl Drop for TimerGuard {
fn drop(&mut self) {
self.0.store(true, AtomicOrdering::Relaxed)
}
}
/// The test environment.
#[derive(Clone)]
pub struct FakeEnv {
/// The manager in charge of all adapters.
manager: Arc<AdapterManager>,
///<|fim▁hole|> fn fmt(&self, _: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl ExecutableDevEnv for FakeEnv {
// Don't bother stopping watches.
type WatchGuard = <AdapterManager as API>::WatchGuard;
type API = AdapterManager;
fn api(&self) -> &Self::API {
&self.manager
}
type TimerGuard = TimerGuard;
fn start_timer(&self, duration: Duration, timer: Box<ExtSender<()>>) -> Self::TimerGuard {
let is_dropped = Arc::new(AtomicBool::new(false));
let trigger = Timer {
date: UTC::now() + duration.into(),
on_triggered: timer,
is_dropped: is_dropped.clone()
};
let _ = self.back_end.send(AdapterOp::AddTimer(trigger));
TimerGuard(is_dropped)
}
}
impl FakeEnv {
pub fn new(on_event: Box<ExtSender<FakeEnvEvent>>) -> Self {
let (tx, rx) = channel();
let on_event_clone = on_event.clone();
thread::spawn(move || {
let mut back_end = TestSharedAdapterBackend::new(on_event_clone);
for msg in rx {
back_end.execute(msg);
}
});
FakeEnv {
on_event: on_event,
manager: Arc::new(AdapterManager::new(None)),
back_end: Box::new(tx),
}
}
fn report_error<T>(&self, result: Result<T, Error>) {
match result {
Ok(_) => {},
Err(err) => {
let _ = self.on_event.send(FakeEnvEvent::Error(err));
}
}
}
/// Execute instructions issued by the user.
pub fn execute(&self, instruction: Instruction) {
use self::Instruction::*;
match instruction {
AddAdapters(vec) => {
for id in vec {
let id = Id::new(&id);
let adapter = Arc::new(TestAdapter::new(id, self.back_end.clone()));
let result = self.manager.add_adapter(adapter);
self.report_error(result);
}
let _ = self.on_event.send(FakeEnvEvent::Done);
},
AddServices(vec) => {
for service in vec {
let result = self.manager.add_service(service);
self.report_error(result);
}
let _ = self.on_event.send(FakeEnvEvent::Done);
},
AddGetters(vec) => {
for getter in vec {
let result = self.manager.add_getter(getter);
self.report_error(result);
}
let _ = self.on_event.send(FakeEnvEvent::Done);
},
RemoveGetters(vec) => {
for getter in vec {
let result = self.manager.remove_getter(&getter);
self.report_error(result);
}
let _ = self.on_event.send(FakeEnvEvent::Done);
},
AddSetters(vec) => {
for setter in vec {
let result = self.manager.add_setter(setter);
self.report_error(result);
}
let _ = self.on_event.send(FakeEnvEvent::Done);
},
RemoveSetters(vec) => {
for setter in vec {
let result = self.manager.remove_setter(&setter);
self.report_error(result);
}
let _ = self.on_event.send(FakeEnvEvent::Done);
},
InjectGetterValues(vec) => {
self.back_end.send(AdapterOp::InjectGetterValues(vec, self.on_event.clone())).unwrap();
},
InjectSetterErrors(vec) => {
self.back_end.send(AdapterOp::InjectSetterErrors(vec, self.on_event.clone())).unwrap();
}
TriggerTimersUntil(date) => {
self.back_end.send(AdapterOp::TriggerTimersUntil(date, self.on_event.clone())).unwrap();
}
ResetTimers => {
self.back_end.send(AdapterOp::ResetTimers(self.on_event.clone())).unwrap();
}
// _ => unimplemented!()
}
}
}
impl Serialize for FakeEnv {
fn serialize<S>(&self, _: &mut S) -> Result<(), S::Error> where S: Serializer {
panic!("Why are we attempting to serialize the env?")
}
}
impl Deserialize for FakeEnv {
fn deserialize<D>(_: &mut D) -> Result<Self, D::Error> where D: Deserializer {
panic!("Why are we attempting to deserialize the env?")
}
}
#[derive(Deserialize, Debug)]
/// Instructions given to the simulator by the user.
pub enum Instruction {
AddAdapters(Vec<String>),
AddServices(Vec<Service>),
AddGetters(Vec<Channel<Getter>>),
AddSetters(Vec<Channel<Setter>>),
RemoveGetters(Vec<Id<Getter>>),
RemoveSetters(Vec<Id<Setter>>),
InjectGetterValues(Vec<(Id<Getter>, Result<Value, Error>)>),
InjectSetterErrors(Vec<(Id<Setter>, Option<Error>)>),
TriggerTimersUntil(TimeStamp),
ResetTimers,
}
/// Operations internal to a TestAdapter.
enum AdapterOp {
FetchValues {
getters: Vec<Id<Getter>>,
tx: Box<ExtSender<ResultMap<Id<Getter>, Option<Value>, Error>>>
},
SendValues {
values: HashMap<Id<Setter>, Value>,
tx: Box<ExtSender<ResultMap<Id<Setter>, (), Error>>>
},
Watch {
source: Vec<(Id<Getter>, Option<Value>, Box<ExtSender<WatchEvent>>)>,
tx: Box<ExtSender<Vec<(Id<Getter>, Result<usize, Error>)>>>
},
AddTimer(Timer),
Unwatch(usize),
InjectGetterValues(Vec<(Id<Getter>, Result<Value, Error>)>, Box<ExtSender<FakeEnvEvent>>),
InjectSetterErrors(Vec<(Id<Setter>, Option<Error>)>, Box<ExtSender<FakeEnvEvent>>),
TriggerTimersUntil(TimeStamp, Box<ExtSender<FakeEnvEvent>>),
ResetTimers(Box<ExtSender<FakeEnvEvent>>),
}<|fim▁end|> | on_event: Box<ExtSender<FakeEnvEvent>>,
back_end: Box<ExtSender<AdapterOp>>,
}
impl fmt::Debug for FakeEnv { |
<|file_name|>IN.py<|end_file_name|><|fim▁begin|># Generated by h2py from /usr/include/netinet/in.h
# Included from sys/feature_tests.h
# Included from sys/isa_defs.h
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 8
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_ALIGNMENT_REQUIRED = 1
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 4
_DOUBLE_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 4
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 4
_ALIGNMENT_REQUIRED = 0
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_ALIGNMENT_REQUIRED = 1
_LONG_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 8
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 8
_LONG_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_POSIX_C_SOURCE = 1
_LARGEFILE64_SOURCE = 1
_LARGEFILE_SOURCE = 1
_FILE_OFFSET_BITS = 64
_FILE_OFFSET_BITS = 32
_POSIX_C_SOURCE = 199506L
_POSIX_PTHREAD_SEMANTICS = 1
_XOPEN_VERSION = 500
_XOPEN_VERSION = 4
_XOPEN_VERSION = 3
from TYPES import *
# Included from sys/stream.h
# Included from sys/vnode.h
from TYPES import *
# Included from sys/t_lock.h
# Included from sys/machlock.h
from TYPES import *
LOCK_HELD_VALUE = 0xff
def SPIN_LOCK(pl): return ((pl) > ipltospl(LOCK_LEVEL))
def LOCK_SAMPLE_INTERVAL(i): return (((i) & 0xff) == 0)
CLOCK_LEVEL = 10
LOCK_LEVEL = 10
DISP_LEVEL = (LOCK_LEVEL + 1)
PTR24_LSB = 5
PTR24_MSB = (PTR24_LSB + 24)
PTR24_ALIGN = 32
PTR24_BASE = 0xe0000000
# Included from sys/param.h
from TYPES import *
_POSIX_VDISABLE = 0
MAX_INPUT = 512
MAX_CANON = 256
UID_NOBODY = 60001
GID_NOBODY = UID_NOBODY
UID_NOACCESS = 60002
MAX_TASKID = 999999
MAX_MAXPID = 999999
DEFAULT_MAXPID = 999999
DEFAULT_JUMPPID = 100000
DEFAULT_MAXPID = 30000
DEFAULT_JUMPPID = 0
MAXUID = 2147483647
MAXPROJID = MAXUID
MAXLINK = 32767
NMOUNT = 40
CANBSIZ = 256
NOFILE = 20
NGROUPS_UMIN = 0
NGROUPS_UMAX = 32
NGROUPS_MAX_DEFAULT = 16
NZERO = 20
NULL = 0L
NULL = 0
CMASK = 022
CDLIMIT = (1L<<11)
NBPS = 0x20000
NBPSCTR = 512
UBSIZE = 512
SCTRSHFT = 9
SYSNAME = 9
PREMOTE = 39
MAXPATHLEN = 1024
MAXSYMLINKS = 20
MAXNAMELEN = 256
NADDR = 13
PIPE_BUF = 5120
PIPE_MAX = 5120
NBBY = 8
MAXBSIZE = 8192
DEV_BSIZE = 512
DEV_BSHIFT = 9
MAXFRAG = 8
MAXOFF32_T = 0x7fffffff
MAXOFF_T = 0x7fffffffffffffffl
MAXOFFSET_T = 0x7fffffffffffffffl
MAXOFF_T = 0x7fffffffl
MAXOFFSET_T = 0x7fffffff
def btodb(bytes): return \
def dbtob(db): return \
def lbtodb(bytes): return \
def ldbtob(db): return \
NCARGS32 = 0x100000
NCARGS64 = 0x200000
NCARGS = NCARGS64
NCARGS = NCARGS32
FSHIFT = 8
FSCALE = (1<<FSHIFT)
def DELAY(n): return drv_usecwait(n)
def mmu_ptob(x): return ((x) << MMU_PAGESHIFT)
def mmu_btop(x): return (((x)) >> MMU_PAGESHIFT)
def mmu_btopr(x): return ((((x) + MMU_PAGEOFFSET) >> MMU_PAGESHIFT))
def mmu_ptod(x): return ((x) << (MMU_PAGESHIFT - DEV_BSHIFT))
def ptod(x): return ((x) << (PAGESHIFT - DEV_BSHIFT))
def ptob(x): return ((x) << PAGESHIFT)
def btop(x): return (((x) >> PAGESHIFT))
def btopr(x): return ((((x) + PAGEOFFSET) >> PAGESHIFT))
def dtop(DD): return (((DD) + NDPP - 1) >> (PAGESHIFT - DEV_BSHIFT))
def dtopt(DD): return ((DD) >> (PAGESHIFT - DEV_BSHIFT))
_AIO_LISTIO_MAX = (4096)
_AIO_MAX = (-1)
_MQ_OPEN_MAX = (32)
_MQ_PRIO_MAX = (32)
_SEM_NSEMS_MAX = INT_MAX
_SEM_VALUE_MAX = INT_MAX
# Included from sys/unistd.h
_CS_PATH = 65
_CS_LFS_CFLAGS = 68
_CS_LFS_LDFLAGS = 69
_CS_LFS_LIBS = 70
_CS_LFS_LINTFLAGS = 71
_CS_LFS64_CFLAGS = 72
_CS_LFS64_LDFLAGS = 73
_CS_LFS64_LIBS = 74
_CS_LFS64_LINTFLAGS = 75
_CS_XBS5_ILP32_OFF32_CFLAGS = 700
_CS_XBS5_ILP32_OFF32_LDFLAGS = 701
_CS_XBS5_ILP32_OFF32_LIBS = 702
_CS_XBS5_ILP32_OFF32_LINTFLAGS = 703
_CS_XBS5_ILP32_OFFBIG_CFLAGS = 705
_CS_XBS5_ILP32_OFFBIG_LDFLAGS = 706
_CS_XBS5_ILP32_OFFBIG_LIBS = 707
_CS_XBS5_ILP32_OFFBIG_LINTFLAGS = 708
_CS_XBS5_LP64_OFF64_CFLAGS = 709
_CS_XBS5_LP64_OFF64_LDFLAGS = 710
_CS_XBS5_LP64_OFF64_LIBS = 711
_CS_XBS5_LP64_OFF64_LINTFLAGS = 712
_CS_XBS5_LPBIG_OFFBIG_CFLAGS = 713
_CS_XBS5_LPBIG_OFFBIG_LDFLAGS = 714
_CS_XBS5_LPBIG_OFFBIG_LIBS = 715
_CS_XBS5_LPBIG_OFFBIG_LINTFLAGS = 716
_SC_ARG_MAX = 1
_SC_CHILD_MAX = 2
_SC_CLK_TCK = 3
_SC_NGROUPS_MAX = 4
_SC_OPEN_MAX = 5
_SC_JOB_CONTROL = 6
_SC_SAVED_IDS = 7
_SC_VERSION = 8
_SC_PASS_MAX = 9
_SC_LOGNAME_MAX = 10
_SC_PAGESIZE = 11
_SC_XOPEN_VERSION = 12
_SC_NPROCESSORS_CONF = 14
_SC_NPROCESSORS_ONLN = 15
_SC_STREAM_MAX = 16
_SC_TZNAME_MAX = 17
_SC_AIO_LISTIO_MAX = 18
_SC_AIO_MAX = 19
_SC_AIO_PRIO_DELTA_MAX = 20
_SC_ASYNCHRONOUS_IO = 21
_SC_DELAYTIMER_MAX = 22
_SC_FSYNC = 23
_SC_MAPPED_FILES = 24
_SC_MEMLOCK = 25
_SC_MEMLOCK_RANGE = 26
_SC_MEMORY_PROTECTION = 27
_SC_MESSAGE_PASSING = 28
_SC_MQ_OPEN_MAX = 29
_SC_MQ_PRIO_MAX = 30
_SC_PRIORITIZED_IO = 31
_SC_PRIORITY_SCHEDULING = 32
_SC_REALTIME_SIGNALS = 33
_SC_RTSIG_MAX = 34
_SC_SEMAPHORES = 35
_SC_SEM_NSEMS_MAX = 36
_SC_SEM_VALUE_MAX = 37
_SC_SHARED_MEMORY_OBJECTS = 38
_SC_SIGQUEUE_MAX = 39
_SC_SIGRT_MIN = 40
_SC_SIGRT_MAX = 41
_SC_SYNCHRONIZED_IO = 42
_SC_TIMERS = 43
_SC_TIMER_MAX = 44
_SC_2_C_BIND = 45
_SC_2_C_DEV = 46
_SC_2_C_VERSION = 47
_SC_2_FORT_DEV = 48
_SC_2_FORT_RUN = 49
_SC_2_LOCALEDEF = 50
_SC_2_SW_DEV = 51
_SC_2_UPE = 52
_SC_2_VERSION = 53
_SC_BC_BASE_MAX = 54
_SC_BC_DIM_MAX = 55
_SC_BC_SCALE_MAX = 56
_SC_BC_STRING_MAX = 57
_SC_COLL_WEIGHTS_MAX = 58
_SC_EXPR_NEST_MAX = 59
_SC_LINE_MAX = 60
_SC_RE_DUP_MAX = 61
_SC_XOPEN_CRYPT = 62
_SC_XOPEN_ENH_I18N = 63
_SC_XOPEN_SHM = 64
_SC_2_CHAR_TERM = 66
_SC_XOPEN_XCU_VERSION = 67
_SC_ATEXIT_MAX = 76
_SC_IOV_MAX = 77
_SC_XOPEN_UNIX = 78
_SC_PAGE_SIZE = _SC_PAGESIZE
_SC_T_IOV_MAX = 79
_SC_PHYS_PAGES = 500
_SC_AVPHYS_PAGES = 501
_SC_COHER_BLKSZ = 503
_SC_SPLIT_CACHE = 504
_SC_ICACHE_SZ = 505
_SC_DCACHE_SZ = 506
_SC_ICACHE_LINESZ = 507
_SC_DCACHE_LINESZ = 508
_SC_ICACHE_BLKSZ = 509
_SC_DCACHE_BLKSZ = 510
_SC_DCACHE_TBLKSZ = 511
_SC_ICACHE_ASSOC = 512
_SC_DCACHE_ASSOC = 513
_SC_MAXPID = 514
_SC_STACK_PROT = 515
_SC_THREAD_DESTRUCTOR_ITERATIONS = 568
_SC_GETGR_R_SIZE_MAX = 569
_SC_GETPW_R_SIZE_MAX = 570
_SC_LOGIN_NAME_MAX = 571
_SC_THREAD_KEYS_MAX = 572
_SC_THREAD_STACK_MIN = 573
_SC_THREAD_THREADS_MAX = 574
_SC_TTY_NAME_MAX = 575
_SC_THREADS = 576
_SC_THREAD_ATTR_STACKADDR = 577
_SC_THREAD_ATTR_STACKSIZE = 578
_SC_THREAD_PRIORITY_SCHEDULING = 579
_SC_THREAD_PRIO_INHERIT = 580
_SC_THREAD_PRIO_PROTECT = 581
_SC_THREAD_PROCESS_SHARED = 582
_SC_THREAD_SAFE_FUNCTIONS = 583
_SC_XOPEN_LEGACY = 717
_SC_XOPEN_REALTIME = 718
_SC_XOPEN_REALTIME_THREADS = 719
_SC_XBS5_ILP32_OFF32 = 720
_SC_XBS5_ILP32_OFFBIG = 721
_SC_XBS5_LP64_OFF64 = 722
_SC_XBS5_LPBIG_OFFBIG = 723
_PC_LINK_MAX = 1
_PC_MAX_CANON = 2
_PC_MAX_INPUT = 3
_PC_NAME_MAX = 4
_PC_PATH_MAX = 5
_PC_PIPE_BUF = 6
_PC_NO_TRUNC = 7
_PC_VDISABLE = 8
_PC_CHOWN_RESTRICTED = 9
_PC_ASYNC_IO = 10
_PC_PRIO_IO = 11
_PC_SYNC_IO = 12
_PC_FILESIZEBITS = 67
_PC_LAST = 67
_POSIX_VERSION = 199506L
_POSIX2_VERSION = 199209L
_POSIX2_C_VERSION = 199209L
_XOPEN_XCU_VERSION = 4
_XOPEN_REALTIME = 1
_XOPEN_ENH_I18N = 1
_XOPEN_SHM = 1
_POSIX2_C_BIND = 1
_POSIX2_CHAR_TERM = 1
_POSIX2_LOCALEDEF = 1
_POSIX2_C_DEV = 1
_POSIX2_SW_DEV = 1
_POSIX2_UPE = 1
# Included from sys/mutex.h
from TYPES import *
def MUTEX_HELD(x): return (mutex_owned(x))
# Included from sys/rwlock.h
from TYPES import *
def RW_READ_HELD(x): return (rw_read_held((x)))
def RW_WRITE_HELD(x): return (rw_write_held((x)))
def RW_LOCK_HELD(x): return (rw_lock_held((x)))
def RW_ISWRITER(x): return (rw_iswriter(x))
# Included from sys/semaphore.h
# Included from sys/thread.h
from TYPES import *
# Included from sys/klwp.h
from TYPES import *
# Included from sys/condvar.h
from TYPES import *
# Included from sys/time.h
# Included from sys/types32.h
# Included from sys/int_types.h
TIME32_MAX = INT32_MAX
TIME32_MIN = INT32_MIN
def TIMEVAL_OVERFLOW(tv): return \
from TYPES import *
DST_NONE = 0
DST_USA = 1
DST_AUST = 2
DST_WET = 3
DST_MET = 4
DST_EET = 5
DST_CAN = 6
DST_GB = 7
DST_RUM = 8
DST_TUR = 9
DST_AUSTALT = 10
ITIMER_REAL = 0
ITIMER_VIRTUAL = 1
ITIMER_PROF = 2
ITIMER_REALPROF = 3
def ITIMERVAL_OVERFLOW(itv): return \
SEC = 1
MILLISEC = 1000
MICROSEC = 1000000
NANOSEC = 1000000000
# Included from sys/time_impl.h
def TIMESPEC_OVERFLOW(ts): return \
def ITIMERSPEC_OVERFLOW(it): return \
__CLOCK_REALTIME0 = 0
CLOCK_VIRTUAL = 1
CLOCK_PROF = 2
__CLOCK_REALTIME3 = 3
CLOCK_HIGHRES = 4
CLOCK_MAX = 5
CLOCK_REALTIME = __CLOCK_REALTIME3
CLOCK_REALTIME = __CLOCK_REALTIME0
TIMER_RELTIME = 0x0
TIMER_ABSTIME = 0x1
def TICK_TO_SEC(tick): return ((tick) / hz)
def SEC_TO_TICK(sec): return ((sec) * hz)
def TICK_TO_MSEC(tick): return \
def MSEC_TO_TICK(msec): return \
def MSEC_TO_TICK_ROUNDUP(msec): return \
def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
def USEC_TO_TICK_ROUNDUP(usec): return \
def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
def NSEC_TO_TICK_ROUNDUP(nsec): return \
def TIMEVAL_TO_TICK(tvp): return \
def TIMESTRUC_TO_TICK(tsp): return \
# Included from time.h
from TYPES import *
# Included from iso/time_iso.h
NULL = 0L
NULL = 0
CLOCKS_PER_SEC = 1000000
# Included from sys/select.h
FD_SETSIZE = 65536
FD_SETSIZE = 1024
_NBBY = 8
NBBY = _NBBY
def FD_ZERO(p): return bzero((p), sizeof (*(p)))
# Included from sys/signal.h
# Included from sys/iso/signal_iso.h
SIGHUP = 1
SIGINT = 2
SIGQUIT = 3
SIGILL = 4
SIGTRAP = 5
SIGIOT = 6
SIGABRT = 6
SIGEMT = 7
SIGFPE = 8
SIGKILL = 9
SIGBUS = 10
SIGSEGV = 11
SIGSYS = 12
SIGPIPE = 13
SIGALRM = 14
SIGTERM = 15
SIGUSR1 = 16
SIGUSR2 = 17
SIGCLD = 18
SIGCHLD = 18
SIGPWR = 19
SIGWINCH = 20
SIGURG = 21
SIGPOLL = 22
SIGIO = SIGPOLL
SIGSTOP = 23
SIGTSTP = 24
SIGCONT = 25
SIGTTIN = 26
SIGTTOU = 27
SIGVTALRM = 28
SIGPROF = 29
SIGXCPU = 30
SIGXFSZ = 31
SIGWAITING = 32
SIGLWP = 33
SIGFREEZE = 34
SIGTHAW = 35
SIGCANCEL = 36
SIGLOST = 37
_SIGRTMIN = 38
_SIGRTMAX = 45
SIG_BLOCK = 1
SIG_UNBLOCK = 2
SIG_SETMASK = 3
SIGNO_MASK = 0xFF
SIGDEFER = 0x100
SIGHOLD = 0x200
SIGRELSE = 0x400
SIGIGNORE = 0x800
SIGPAUSE = 0x1000
# Included from sys/siginfo.h
from TYPES import *
SIGEV_NONE = 1
SIGEV_SIGNAL = 2
SIGEV_THREAD = 3
SI_NOINFO = 32767
SI_USER = 0
SI_LWP = (-1)
SI_QUEUE = (-2)
SI_TIMER = (-3)
SI_ASYNCIO = (-4)
SI_MESGQ = (-5)
# Included from sys/machsig.h
ILL_ILLOPC = 1
ILL_ILLOPN = 2
ILL_ILLADR = 3
ILL_ILLTRP = 4
ILL_PRVOPC = 5
ILL_PRVREG = 6
ILL_COPROC = 7
ILL_BADSTK = 8
NSIGILL = 8
EMT_TAGOVF = 1
EMT_CPCOVF = 2
NSIGEMT = 2
FPE_INTDIV = 1
FPE_INTOVF = 2
FPE_FLTDIV = 3
FPE_FLTOVF = 4
FPE_FLTUND = 5
FPE_FLTRES = 6
FPE_FLTINV = 7
FPE_FLTSUB = 8
NSIGFPE = 8
SEGV_MAPERR = 1
SEGV_ACCERR = 2
NSIGSEGV = 2
BUS_ADRALN = 1
BUS_ADRERR = 2
BUS_OBJERR = 3
NSIGBUS = 3
TRAP_BRKPT = 1
TRAP_TRACE = 2
TRAP_RWATCH = 3
TRAP_WWATCH = 4
TRAP_XWATCH = 5
NSIGTRAP = 5
CLD_EXITED = 1
CLD_KILLED = 2
CLD_DUMPED = 3
CLD_TRAPPED = 4
CLD_STOPPED = 5
CLD_CONTINUED = 6
NSIGCLD = 6
POLL_IN = 1
POLL_OUT = 2
POLL_MSG = 3
POLL_ERR = 4
POLL_PRI = 5
POLL_HUP = 6
NSIGPOLL = 6
PROF_SIG = 1
NSIGPROF = 1
SI_MAXSZ = 256
SI_MAXSZ = 128
# Included from sys/time_std_impl.h
from TYPES import *
SI32_MAXSZ = 128
def SI_CANQUEUE(c): return ((c) <= SI_QUEUE)
SA_NOCLDSTOP = 0x00020000
SA_ONSTACK = 0x00000001
SA_RESETHAND = 0x00000002
SA_RESTART = 0x00000004
SA_SIGINFO = 0x00000008
SA_NODEFER = 0x00000010
SA_NOCLDWAIT = 0x00010000
SA_WAITSIG = 0x00010000
NSIG = 46
MAXSIG = 45
S_SIGNAL = 1
S_SIGSET = 2
S_SIGACTION = 3
S_NONE = 4
MINSIGSTKSZ = 2048
SIGSTKSZ = 8192
SS_ONSTACK = 0x00000001
SS_DISABLE = 0x00000002
SN_PROC = 1
SN_CANCEL = 2
SN_SEND = 3
# Included from sys/ucontext.h
from TYPES import *
# Included from sys/regset.h
REG_CCR = (0)
REG_PSR = (0)
REG_PSR = (0)
REG_PC = (1)
REG_nPC = (2)
REG_Y = (3)
REG_G1 = (4)
REG_G2 = (5)
REG_G3 = (6)
REG_G4 = (7)
REG_G5 = (8)
REG_G6 = (9)
REG_G7 = (10)
REG_O0 = (11)
REG_O1 = (12)
REG_O2 = (13)
REG_O3 = (14)
REG_O4 = (15)
REG_O5 = (16)
REG_O6 = (17)
REG_O7 = (18)
REG_ASI = (19)
REG_FPRS = (20)
REG_PS = REG_PSR
REG_SP = REG_O6
REG_R0 = REG_O0
REG_R1 = REG_O1
_NGREG = 21
_NGREG = 19
NGREG = _NGREG
_NGREG32 = 19
_NGREG64 = 21
SPARC_MAXREGWINDOW = 31
MAXFPQ = 16
XRS_ID = 0x78727300
# Included from v7/sys/privregs.h
# Included from v7/sys/psr.h
PSR_CWP = 0x0000001F
PSR_ET = 0x00000020
PSR_PS = 0x00000040
PSR_S = 0x00000080
PSR_PIL = 0x00000F00
PSR_EF = 0x00001000
PSR_EC = 0x00002000
PSR_RSV = 0x000FC000
PSR_ICC = 0x00F00000
PSR_C = 0x00100000
PSR_V = 0x00200000
PSR_Z = 0x00400000
PSR_N = 0x00800000
PSR_VER = 0x0F000000
PSR_IMPL = 0xF0000000
PSL_ALLCC = PSR_ICC
PSL_USER = (PSR_S)
PSL_USERMASK = (PSR_ICC)
PSL_UBITS = (PSR_ICC|PSR_EF)
def USERMODE(ps): return (((ps) & PSR_PS) == 0)
# Included from sys/fsr.h
FSR_CEXC = 0x0000001f
FSR_AEXC = 0x000003e0
FSR_FCC = 0x00000c00
FSR_PR = 0x00001000
FSR_QNE = 0x00002000
FSR_FTT = 0x0001c000
FSR_VER = 0x000e0000
FSR_TEM = 0x0f800000
FSR_RP = 0x30000000
FSR_RD = 0xc0000000
FSR_VER_SHIFT = 17
FSR_FCC1 = 0x00000003
FSR_FCC2 = 0x0000000C
FSR_FCC3 = 0x00000030
FSR_CEXC_NX = 0x00000001
FSR_CEXC_DZ = 0x00000002
FSR_CEXC_UF = 0x00000004
FSR_CEXC_OF = 0x00000008
FSR_CEXC_NV = 0x00000010
FSR_AEXC_NX = (0x1 << 5)
FSR_AEXC_DZ = (0x2 << 5)
FSR_AEXC_UF = (0x4 << 5)
FSR_AEXC_OF = (0x8 << 5)
FSR_AEXC_NV = (0x10 << 5)
FTT_NONE = 0
FTT_IEEE = 1
FTT_UNFIN = 2
FTT_UNIMP = 3
FTT_SEQ = 4
FTT_ALIGN = 5
FTT_DFAULT = 6
FSR_FTT_SHIFT = 14
FSR_FTT_IEEE = (FTT_IEEE << FSR_FTT_SHIFT)
FSR_FTT_UNFIN = (FTT_UNFIN << FSR_FTT_SHIFT)
FSR_FTT_UNIMP = (FTT_UNIMP << FSR_FTT_SHIFT)
FSR_FTT_SEQ = (FTT_SEQ << FSR_FTT_SHIFT)
FSR_FTT_ALIGN = (FTT_ALIGN << FSR_FTT_SHIFT)
FSR_FTT_DFAULT = (FTT_DFAULT << FSR_FTT_SHIFT)
FSR_TEM_NX = (0x1 << 23)
FSR_TEM_DZ = (0x2 << 23)
FSR_TEM_UF = (0x4 << 23)
FSR_TEM_OF = (0x8 << 23)
FSR_TEM_NV = (0x10 << 23)
RP_DBLEXT = 0
RP_SINGLE = 1
RP_DOUBLE = 2
RP_RESERVED = 3
RD_NEAR = 0
RD_ZER0 = 1
RD_POSINF = 2
RD_NEGINF = 3
FPRS_DL = 0x1
FPRS_DU = 0x2
FPRS_FEF = 0x4
PIL_MAX = 0xf
def SAVE_GLOBALS(RP): return \
def RESTORE_GLOBALS(RP): return \
def SAVE_OUTS(RP): return \
def RESTORE_OUTS(RP): return \
def SAVE_WINDOW(SBP): return \
def RESTORE_WINDOW(SBP): return \
def STORE_FPREGS(FP): return \
def LOAD_FPREGS(FP): return \
_SPARC_MAXREGWINDOW = 31
_XRS_ID = 0x78727300
GETCONTEXT = 0
SETCONTEXT = 1
UC_SIGMASK = 001
UC_STACK = 002
UC_CPU = 004
UC_MAU = 010
UC_FPU = UC_MAU
UC_INTR = 020
UC_ASR = 040
UC_MCONTEXT = (UC_CPU|UC_FPU|UC_ASR)
UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
_SIGQUEUE_MAX = 32
_SIGNOTIFY_MAX = 32
# Included from sys/pcb.h
INSTR_VALID = 0x02
NORMAL_STEP = 0x04
WATCH_STEP = 0x08
CPC_OVERFLOW = 0x10
ASYNC_HWERR = 0x20
STEP_NONE = 0
STEP_REQUESTED = 1
STEP_ACTIVE = 2
STEP_WASACTIVE = 3
# Included from sys/msacct.h
LMS_USER = 0
LMS_SYSTEM = 1
LMS_TRAP = 2
LMS_TFAULT = 3
LMS_DFAULT = 4
LMS_KFAULT = 5
LMS_USER_LOCK = 6
LMS_SLEEP = 7
LMS_WAIT_CPU = 8
LMS_STOPPED = 9
NMSTATES = 10
# Included from sys/lwp.h
# Included from sys/synch.h
from TYPES import *
USYNC_THREAD = 0x00
USYNC_PROCESS = 0x01
LOCK_NORMAL = 0x00
LOCK_ERRORCHECK = 0x02
LOCK_RECURSIVE = 0x04
USYNC_PROCESS_ROBUST = 0x08
LOCK_PRIO_NONE = 0x00
LOCK_PRIO_INHERIT = 0x10
LOCK_PRIO_PROTECT = 0x20
LOCK_STALL_NP = 0x00
LOCK_ROBUST_NP = 0x40
LOCK_OWNERDEAD = 0x1
LOCK_NOTRECOVERABLE = 0x2
LOCK_INITED = 0x4
LOCK_UNMAPPED = 0x8
LWP_DETACHED = 0x00000040
LWP_SUSPENDED = 0x00000080
__LWP_ASLWP = 0x00000100
MAXSYSARGS = 8
NORMALRETURN = 0
JUSTRETURN = 1
LWP_USER = 0x01
LWP_SYS = 0x02
TS_FREE = 0x00
TS_SLEEP = 0x01
TS_RUN = 0x02
TS_ONPROC = 0x04
TS_ZOMB = 0x08
TS_STOPPED = 0x10
T_INTR_THREAD = 0x0001
T_WAKEABLE = 0x0002
T_TOMASK = 0x0004
T_TALLOCSTK = 0x0008
T_WOULDBLOCK = 0x0020
T_DONTBLOCK = 0x0040
T_DONTPEND = 0x0080
T_SYS_PROF = 0x0100
T_WAITCVSEM = 0x0200
T_WATCHPT = 0x0400
T_PANIC = 0x0800
TP_HOLDLWP = 0x0002
TP_TWAIT = 0x0004
TP_LWPEXIT = 0x0008
TP_PRSTOP = 0x0010
TP_CHKPT = 0x0020
TP_EXITLWP = 0x0040
TP_PRVSTOP = 0x0080
TP_MSACCT = 0x0100
TP_STOPPING = 0x0200
TP_WATCHPT = 0x0400
TP_PAUSE = 0x0800
TP_CHANGEBIND = 0x1000
TS_LOAD = 0x0001
TS_DONT_SWAP = 0x0002
TS_SWAPENQ = 0x0004
TS_ON_SWAPQ = 0x0008
TS_CSTART = 0x0100
TS_UNPAUSE = 0x0200
TS_XSTART = 0x0400
TS_PSTART = 0x0800
TS_RESUME = 0x1000
TS_CREATE = 0x2000
TS_ALLSTART = \
(TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE)
def CPR_VSTOPPED(t): return \
def THREAD_TRANSITION(tp): return thread_transition(tp);
def THREAD_STOP(tp): return \
def THREAD_ZOMB(tp): return THREAD_SET_STATE(tp, TS_ZOMB, NULL)
def SEMA_HELD(x): return (sema_held((x)))
NO_LOCKS_HELD = 1
NO_COMPETING_THREADS = 1
# Included from sys/cred.h
# Included from sys/uio.h
from TYPES import *
# Included from sys/resource.h
from TYPES import *
PRIO_PROCESS = 0
PRIO_PGRP = 1
PRIO_USER = 2
RLIMIT_CPU = 0
RLIMIT_FSIZE = 1
RLIMIT_DATA = 2
RLIMIT_STACK = 3
RLIMIT_CORE = 4
RLIMIT_NOFILE = 5
RLIMIT_VMEM = 6
RLIMIT_AS = RLIMIT_VMEM
RLIM_NLIMITS = 7
RLIM_INFINITY = (-3l)
RLIM_SAVED_MAX = (-2l)
RLIM_SAVED_CUR = (-1l)
RLIM_INFINITY = 0x7fffffff
RLIM_SAVED_MAX = 0x7ffffffe
RLIM_SAVED_CUR = 0x7ffffffd
RLIM32_INFINITY = 0x7fffffff
RLIM32_SAVED_MAX = 0x7ffffffe
RLIM32_SAVED_CUR = 0x7ffffffd
# Included from sys/model.h
# Included from sys/debug.h
def ASSERT64(x): return ASSERT(x)
def ASSERT32(x): return ASSERT(x)
DATAMODEL_MASK = 0x0FF00000
DATAMODEL_ILP32 = 0x00100000
DATAMODEL_LP64 = 0x00200000
DATAMODEL_NONE = 0
DATAMODEL_NATIVE = DATAMODEL_LP64
DATAMODEL_NATIVE = DATAMODEL_ILP32
def STRUCT_SIZE(handle): return \
def STRUCT_BUF(handle): return ((handle).ptr.m64)
<|fim▁hole|>def STRUCT_BUF(handle): return ((handle).ptr)
def SIZEOF_PTR(umodel): return sizeof (caddr_t)
def lwp_getdatamodel(t): return DATAMODEL_ILP32
RUSAGE_SELF = 0
RUSAGE_CHILDREN = -1
# Included from vm/seg_enum.h
# Included from sys/buf.h
# Included from sys/kstat.h
from TYPES import *
KSTAT_STRLEN = 31
def KSTAT_ENTER(k): return \
def KSTAT_EXIT(k): return \
KSTAT_TYPE_RAW = 0
KSTAT_TYPE_NAMED = 1
KSTAT_TYPE_INTR = 2
KSTAT_TYPE_IO = 3
KSTAT_TYPE_TIMER = 4
KSTAT_NUM_TYPES = 5
KSTAT_FLAG_VIRTUAL = 0x01
KSTAT_FLAG_VAR_SIZE = 0x02
KSTAT_FLAG_WRITABLE = 0x04
KSTAT_FLAG_PERSISTENT = 0x08
KSTAT_FLAG_DORMANT = 0x10
KSTAT_FLAG_INVALID = 0x20
KSTAT_READ = 0
KSTAT_WRITE = 1
KSTAT_DATA_CHAR = 0
KSTAT_DATA_INT32 = 1
KSTAT_DATA_UINT32 = 2
KSTAT_DATA_INT64 = 3
KSTAT_DATA_UINT64 = 4
KSTAT_DATA_LONG = KSTAT_DATA_INT32
KSTAT_DATA_ULONG = KSTAT_DATA_UINT32
KSTAT_DATA_LONG = KSTAT_DATA_INT64
KSTAT_DATA_ULONG = KSTAT_DATA_UINT64
KSTAT_DATA_LONG = 7
KSTAT_DATA_ULONG = 8
KSTAT_DATA_LONGLONG = KSTAT_DATA_INT64
KSTAT_DATA_ULONGLONG = KSTAT_DATA_UINT64
KSTAT_DATA_FLOAT = 5
KSTAT_DATA_DOUBLE = 6
KSTAT_INTR_HARD = 0
KSTAT_INTR_SOFT = 1
KSTAT_INTR_WATCHDOG = 2
KSTAT_INTR_SPURIOUS = 3
KSTAT_INTR_MULTSVC = 4
KSTAT_NUM_INTRS = 5
B_BUSY = 0x0001
B_DONE = 0x0002
B_ERROR = 0x0004
B_PAGEIO = 0x0010
B_PHYS = 0x0020
B_READ = 0x0040
B_WRITE = 0x0100
B_KERNBUF = 0x0008
B_WANTED = 0x0080
B_AGE = 0x000200
B_ASYNC = 0x000400
B_DELWRI = 0x000800
B_STALE = 0x001000
B_DONTNEED = 0x002000
B_REMAPPED = 0x004000
B_FREE = 0x008000
B_INVAL = 0x010000
B_FORCE = 0x020000
B_HEAD = 0x040000
B_NOCACHE = 0x080000
B_TRUNC = 0x100000
B_SHADOW = 0x200000
B_RETRYWRI = 0x400000
def notavail(bp): return \
def BWRITE(bp): return \
def BWRITE2(bp): return \
VROOT = 0x01
VNOCACHE = 0x02
VNOMAP = 0x04
VDUP = 0x08
VNOSWAP = 0x10
VNOMOUNT = 0x20
VISSWAP = 0x40
VSWAPLIKE = 0x80
VVFSLOCK = 0x100
VVFSWAIT = 0x200
VVMLOCK = 0x400
VDIROPEN = 0x800
VVMEXEC = 0x1000
VPXFS = 0x2000
AT_TYPE = 0x0001
AT_MODE = 0x0002
AT_UID = 0x0004
AT_GID = 0x0008
AT_FSID = 0x0010
AT_NODEID = 0x0020
AT_NLINK = 0x0040
AT_SIZE = 0x0080
AT_ATIME = 0x0100
AT_MTIME = 0x0200
AT_CTIME = 0x0400
AT_RDEV = 0x0800
AT_BLKSIZE = 0x1000
AT_NBLOCKS = 0x2000
AT_VCODE = 0x4000
AT_ALL = (AT_TYPE|AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\
AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|\
AT_RDEV|AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
AT_STAT = (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\
AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV)
AT_TIMES = (AT_ATIME|AT_MTIME|AT_CTIME)
AT_NOSET = (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|AT_TYPE|\
AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
VSUID = 04000
VSGID = 02000
VSVTX = 01000
VREAD = 00400
VWRITE = 00200
VEXEC = 00100
MODEMASK = 07777
PERMMASK = 00777
def MANDMODE(mode): return (((mode) & (VSGID|(VEXEC>>3))) == VSGID)
VSA_ACL = 0x0001
VSA_ACLCNT = 0x0002
VSA_DFACL = 0x0004
VSA_DFACLCNT = 0x0008
LOOKUP_DIR = 0x01
DUMP_ALLOC = 0
DUMP_FREE = 1
DUMP_SCAN = 2
ATTR_UTIME = 0x01
ATTR_EXEC = 0x02
ATTR_COMM = 0x04
ATTR_HINT = 0x08
ATTR_REAL = 0x10
# Included from sys/poll.h
POLLIN = 0x0001
POLLPRI = 0x0002
POLLOUT = 0x0004
POLLRDNORM = 0x0040
POLLWRNORM = POLLOUT
POLLRDBAND = 0x0080
POLLWRBAND = 0x0100
POLLNORM = POLLRDNORM
POLLERR = 0x0008
POLLHUP = 0x0010
POLLNVAL = 0x0020
POLLREMOVE = 0x0800
POLLRDDATA = 0x0200
POLLNOERR = 0x0400
POLLCLOSED = 0x8000
# Included from sys/strmdep.h
def str_aligned(X): return (((ulong_t)(X) & (sizeof (long) - 1)) == 0)
# Included from sys/strft.h
tdelta_t_sz = 12
FTEV_MASK = 0x1FFF
FTEV_ISWR = 0x8000
FTEV_CS = 0x4000
FTEV_PS = 0x2000
FTEV_QMASK = 0x1F00
FTEV_ALLOCMASK = 0x1FF8
FTEV_ALLOCB = 0x0000
FTEV_ESBALLOC = 0x0001
FTEV_DESBALLOC = 0x0002
FTEV_ESBALLOCA = 0x0003
FTEV_DESBALLOCA = 0x0004
FTEV_ALLOCBIG = 0x0005
FTEV_ALLOCBW = 0x0006
FTEV_FREEB = 0x0008
FTEV_DUPB = 0x0009
FTEV_COPYB = 0x000A
FTEV_CALLER = 0x000F
FTEV_PUT = 0x0100
FTEV_FSYNCQ = 0x0103
FTEV_DSYNCQ = 0x0104
FTEV_PUTQ = 0x0105
FTEV_GETQ = 0x0106
FTEV_RMVQ = 0x0107
FTEV_INSQ = 0x0108
FTEV_PUTBQ = 0x0109
FTEV_FLUSHQ = 0x010A
FTEV_REPLYQ = 0x010B
FTEV_PUTNEXT = 0x010D
FTEV_RWNEXT = 0x010E
FTEV_QWINNER = 0x010F
FTEV_GEWRITE = 0x0101
def FTFLW_HASH(h): return (((unsigned)(h))%ftflw_hash_sz)
FTBLK_EVNTS = 0x9
QENAB = 0x00000001
QWANTR = 0x00000002
QWANTW = 0x00000004
QFULL = 0x00000008
QREADR = 0x00000010
QUSE = 0x00000020
QNOENB = 0x00000040
QBACK = 0x00000100
QHLIST = 0x00000200
QPAIR = 0x00000800
QPERQ = 0x00001000
QPERMOD = 0x00002000
QMTSAFE = 0x00004000
QMTOUTPERIM = 0x00008000
QMT_TYPEMASK = (QPAIR|QPERQ|QPERMOD|QMTSAFE|QMTOUTPERIM)
QINSERVICE = 0x00010000
QWCLOSE = 0x00020000
QEND = 0x00040000
QWANTWSYNC = 0x00080000
QSYNCSTR = 0x00100000
QISDRV = 0x00200000
QHOT = 0x00400000
QNEXTHOT = 0x00800000
_QINSERTING = 0x04000000
_QREMOVING = 0x08000000
Q_SQQUEUED = 0x01
Q_SQDRAINING = 0x02
QB_FULL = 0x01
QB_WANTW = 0x02
QB_BACK = 0x04
NBAND = 256
STRUIOT_NONE = -1
STRUIOT_DONTCARE = 0
STRUIOT_STANDARD = 1
STRUIOT_IP = 2
DBLK_REFMIN = 0x01
STRUIO_SPEC = 0x01
STRUIO_DONE = 0x02
STRUIO_IP = 0x04
STRUIO_ZC = 0x08
STRUIO_ICK = 0x10
MSGMARK = 0x01
MSGNOLOOP = 0x02
MSGDELIM = 0x04
MSGNOGET = 0x08
MSGMARKNEXT = 0x10
MSGNOTMARKNEXT = 0x20
M_DATA = 0x00
M_PROTO = 0x01
M_BREAK = 0x08
M_PASSFP = 0x09
M_EVENT = 0x0a
M_SIG = 0x0b
M_DELAY = 0x0c
M_CTL = 0x0d
M_IOCTL = 0x0e
M_SETOPTS = 0x10
M_RSE = 0x11
M_IOCACK = 0x81
M_IOCNAK = 0x82
M_PCPROTO = 0x83
M_PCSIG = 0x84
M_READ = 0x85
M_FLUSH = 0x86
M_STOP = 0x87
M_START = 0x88
M_HANGUP = 0x89
M_ERROR = 0x8a
M_COPYIN = 0x8b
M_COPYOUT = 0x8c
M_IOCDATA = 0x8d
M_PCRSE = 0x8e
M_STOPI = 0x8f
M_STARTI = 0x90
M_PCEVENT = 0x91
M_UNHANGUP = 0x92
QNORM = 0x00
QPCTL = 0x80
IOC_MODELS = DATAMODEL_MASK
IOC_ILP32 = DATAMODEL_ILP32
IOC_LP64 = DATAMODEL_LP64
IOC_NATIVE = DATAMODEL_NATIVE
IOC_NONE = DATAMODEL_NONE
STRCANON = 0x01
RECOPY = 0x02
SO_ALL = 0x003f
SO_READOPT = 0x0001
SO_WROFF = 0x0002
SO_MINPSZ = 0x0004
SO_MAXPSZ = 0x0008
SO_HIWAT = 0x0010
SO_LOWAT = 0x0020
SO_MREADON = 0x0040
SO_MREADOFF = 0x0080
SO_NDELON = 0x0100
SO_NDELOFF = 0x0200
SO_ISTTY = 0x0400
SO_ISNTTY = 0x0800
SO_TOSTOP = 0x1000
SO_TONSTOP = 0x2000
SO_BAND = 0x4000
SO_DELIM = 0x8000
SO_NODELIM = 0x010000
SO_STRHOLD = 0x020000
SO_ERROPT = 0x040000
SO_COPYOPT = 0x080000
SO_MAXBLK = 0x100000
DEF_IOV_MAX = 16
INFOD_FIRSTBYTES = 0x02
INFOD_BYTES = 0x04
INFOD_COUNT = 0x08
INFOD_COPYOUT = 0x10
MODOPEN = 0x1
CLONEOPEN = 0x2
CONSOPEN = 0x4
OPENFAIL = -1
BPRI_LO = 1
BPRI_MED = 2
BPRI_HI = 3
BPRI_FT = 4
INFPSZ = -1
FLUSHALL = 1
FLUSHDATA = 0
STRHIGH = 5120
STRLOW = 1024
MAXIOCBSZ = 1024
PERIM_INNER = 1
PERIM_OUTER = 2
def datamsg(type): return \
def straln(a): return (caddr_t)((intptr_t)(a) & ~(sizeof (int)-1))
# Included from sys/byteorder.h
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
IPPROTO_IP = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_ENCAP = 4
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_IPV6 = 41
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_RSVP = 46
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_HELLO = 63
IPPROTO_ND = 77
IPPROTO_EON = 80
IPPROTO_PIM = 103
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_ECHO = 7
IPPORT_DISCARD = 9
IPPORT_SYSTAT = 11
IPPORT_DAYTIME = 13
IPPORT_NETSTAT = 15
IPPORT_FTP = 21
IPPORT_TELNET = 23
IPPORT_SMTP = 25
IPPORT_TIMESERVER = 37
IPPORT_NAMESERVER = 42
IPPORT_WHOIS = 43
IPPORT_MTP = 57
IPPORT_BOOTPS = 67
IPPORT_BOOTPC = 68
IPPORT_TFTP = 69
IPPORT_RJE = 77
IPPORT_FINGER = 79
IPPORT_TTYLINK = 87
IPPORT_SUPDUP = 95
IPPORT_EXECSERVER = 512
IPPORT_LOGINSERVER = 513
IPPORT_CMDSERVER = 514
IPPORT_EFSSERVER = 520
IPPORT_BIFFUDP = 512
IPPORT_WHOSERVER = 513
IPPORT_ROUTESERVER = 520
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IMPLINK_IP = 155
IMPLINK_LOWEXPER = 156
IMPLINK_HIGHEXPER = 158
IN_CLASSA_NSHIFT = 24
IN_CLASSA_MAX = 128
IN_CLASSB_NSHIFT = 16
IN_CLASSB_MAX = 65536
IN_CLASSC_NSHIFT = 8
IN_CLASSD_NSHIFT = 28
def IN_MULTICAST(i): return IN_CLASSD(i)
IN_LOOPBACKNET = 127
def IN_SET_LOOPBACK_ADDR(a): return \
def IN6_IS_ADDR_UNSPECIFIED(addr): return \
def IN6_IS_ADDR_LOOPBACK(addr): return \
def IN6_IS_ADDR_LOOPBACK(addr): return \
def IN6_IS_ADDR_MULTICAST(addr): return \
def IN6_IS_ADDR_MULTICAST(addr): return \
def IN6_IS_ADDR_LINKLOCAL(addr): return \
def IN6_IS_ADDR_LINKLOCAL(addr): return \
def IN6_IS_ADDR_SITELOCAL(addr): return \
def IN6_IS_ADDR_SITELOCAL(addr): return \
def IN6_IS_ADDR_V4MAPPED(addr): return \
def IN6_IS_ADDR_V4MAPPED(addr): return \
def IN6_IS_ADDR_V4MAPPED_ANY(addr): return \
def IN6_IS_ADDR_V4MAPPED_ANY(addr): return \
def IN6_IS_ADDR_V4COMPAT(addr): return \
def IN6_IS_ADDR_V4COMPAT(addr): return \
def IN6_IS_ADDR_MC_RESERVED(addr): return \
def IN6_IS_ADDR_MC_RESERVED(addr): return \
def IN6_IS_ADDR_MC_NODELOCAL(addr): return \
def IN6_IS_ADDR_MC_NODELOCAL(addr): return \
def IN6_IS_ADDR_MC_LINKLOCAL(addr): return \
def IN6_IS_ADDR_MC_LINKLOCAL(addr): return \
def IN6_IS_ADDR_MC_SITELOCAL(addr): return \
def IN6_IS_ADDR_MC_SITELOCAL(addr): return \
def IN6_IS_ADDR_MC_ORGLOCAL(addr): return \
def IN6_IS_ADDR_MC_ORGLOCAL(addr): return \
def IN6_IS_ADDR_MC_GLOBAL(addr): return \
def IN6_IS_ADDR_MC_GLOBAL(addr): return \
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 0x10
IP_MULTICAST_TTL = 0x11
IP_MULTICAST_LOOP = 0x12
IP_ADD_MEMBERSHIP = 0x13
IP_DROP_MEMBERSHIP = 0x14
IP_SEC_OPT = 0x22
IPSEC_PREF_NEVER = 0x01
IPSEC_PREF_REQUIRED = 0x02
IPSEC_PREF_UNIQUE = 0x04
IP_ADD_PROXY_ADDR = 0x40
IP_BOUND_IF = 0x41
IP_UNSPEC_SRC = 0x42
IP_REUSEADDR = 0x104
IP_DONTROUTE = 0x105
IP_BROADCAST = 0x106
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_UNICAST_HOPS = 0x5
IPV6_MULTICAST_IF = 0x6
IPV6_MULTICAST_HOPS = 0x7
IPV6_MULTICAST_LOOP = 0x8
IPV6_JOIN_GROUP = 0x9
IPV6_LEAVE_GROUP = 0xa
IPV6_ADD_MEMBERSHIP = 0x9
IPV6_DROP_MEMBERSHIP = 0xa
IPV6_PKTINFO = 0xb
IPV6_HOPLIMIT = 0xc
IPV6_NEXTHOP = 0xd
IPV6_HOPOPTS = 0xe
IPV6_DSTOPTS = 0xf
IPV6_RTHDR = 0x10
IPV6_RTHDRDSTOPTS = 0x11
IPV6_RECVPKTINFO = 0x12
IPV6_RECVHOPLIMIT = 0x13
IPV6_RECVHOPOPTS = 0x14
IPV6_RECVDSTOPTS = 0x15
IPV6_RECVRTHDR = 0x16
IPV6_RECVRTHDRDSTOPTS = 0x17
IPV6_CHECKSUM = 0x18
IPV6_BOUND_IF = 0x41
IPV6_UNSPEC_SRC = 0x42
INET_ADDRSTRLEN = 16
INET6_ADDRSTRLEN = 46
IPV6_PAD1_OPT = 0<|fim▁end|> | def SIZEOF_PTR(umodel): return \
def STRUCT_SIZE(handle): return (sizeof (*(handle).ptr))
|
<|file_name|>database.go<|end_file_name|><|fim▁begin|>package greatspacerace
import (
"bufio"
"fmt"
"github.com/TSavo/go.firebase"
"os"
)
func init() {
f, err := os.Open("firebase.secret")
if err != nil {
fmt.Printf("error opening firebase.secret: %v\n", err)
}
r := bufio.NewReader(f)
url, e := Readln(r)
if e != nil {
panic(e)
}
secret, e := Readln(r)
if e != nil {
panic(e)
}
db = firebase.New(url, secret)
}
var db *firebase.FirebaseRoot;
func Readln(r *bufio.Reader) (string, error) {
var (
isPrefix bool = true
err error = nil
<|fim▁hole|> line, isPrefix, err = r.ReadLine()
ln = append(ln, line...)
}
return string(ln), err
}<|fim▁end|> | line, ln []byte
)
for isPrefix && err == nil {
|
<|file_name|>confirmation.component.spec.ts<|end_file_name|><|fim▁begin|>import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { ConfirmationComponent } from './confirmation.component';
describe('ConfirmationComponent', () => {
let component: ConfirmationComponent;
let fixture: ComponentFixture<ConfirmationComponent>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ ConfirmationComponent ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(ConfirmationComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should be created', () => {
expect(component).toBeTruthy();
});<|fim▁hole|><|fim▁end|> | }); |
<|file_name|>MessageQueueBackend.py<|end_file_name|><|fim▁begin|>"""
Message Queue wrapper
"""
__RCSID__ = "$Id$"
from DIRAC.FrameworkSystem.private.standardLogging.Handler.MessageQueueHandler import MessageQueueHandler
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
from DIRAC.FrameworkSystem.private.standardLogging.Formatter.JsonFormatter import JsonFormatter
class MessageQueueBackend(AbstractBackend):
"""
MessageQueueBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we have:
- MessageQueueHandler: which is a custom handler created in DIRAC to send
log records to a Message Queue server. You can find it in: FrameworkSys./private/standardlogging/Handler
- BaseFormatter: is a custom Formatter object, created for DIRAC in order to get the appropriate display.
You can find it in FrameworkSystem/private/standardLogging/Formatter
"""
def __init__(self):
"""
Initialization of the MessageQueueBackend<|fim▁hole|> self.__queue = ''
def createHandler(self, parameters=None):
"""
Each backend can initialize its attributes and create its handler with them.
:params parameters: dictionary of parameters. ex: {'FileName': file.log}
"""
if parameters is not None:
self.__queue = parameters.get("MsgQueue", self.__queue)
self._handler = MessageQueueHandler(self.__queue)
def setLevel(self, level):
"""
No possibility to set the level of the MessageQueue handler.
It is not set by default so it can send all Log Records of all levels to the MessageQueue.
"""
pass<|fim▁end|> | """
super(MessageQueueBackend, self).__init__(None, JsonFormatter) |
<|file_name|>empty-concept-id.ts<|end_file_name|><|fim▁begin|>import {EMPTY_CONCEPT_ID} from '../registry';
import {DdfDataSet} from '../../ddf-definitions/ddf-data-set';
import {Issue} from '../issue';
export const rule = {
rule: (ddfDataSet: DdfDataSet) => ddfDataSet
.getConcept()
.getAllData()
.filter(conceptRecord => !conceptRecord.concept)
.map(conceptRecordWithEmptyId =>
new Issue(EMPTY_CONCEPT_ID)<|fim▁hole|> .setPath(conceptRecordWithEmptyId.$$source)
.setData({line: conceptRecordWithEmptyId.$$lineNumber})
)
};<|fim▁end|> | |
<|file_name|>power-save-blocker.js<|end_file_name|><|fim▁begin|>var powerSaveBlocker;<|fim▁hole|>
module.exports = powerSaveBlocker;<|fim▁end|> |
powerSaveBlocker = process.atomBinding('power_save_blocker').powerSaveBlocker; |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>import os
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch, Mock
import re
import rdflib
from rdflib import RDF
from urllib import urlencode, unquote
from eulxml.xmlmap import load_xmlobject_from_file, XmlObject
from eulfedora.server import Repository
from piffle import iiif
from readux.annotations.models import Annotation
from readux.books import abbyyocr
from readux.books.models import SolrVolume, Volume, VolumeV1_0, Book, BIBO, \
DC, Page, PageV1_1
FIXTURE_DIR = os.path.join(settings.BASE_DIR, 'readux', 'books', 'fixtures')
class SolrVolumeTest(TestCase):
# primarily testing BaseVolume logic here
def test_properties(self):
ocm = 'ocn460678076'
vol = 'V.1'
noid = '1234'
volume = SolrVolume(label='%s_%s' % (ocm, vol),
pid='testpid:%s' % noid)
self.assertEqual(ocm, volume.control_key)
self.assertEqual(vol, volume.volume)
self.assertEqual(noid, volume.noid)
# don't display volume zero
vol = 'V.0'
volume.data['label'] = '%s_%s' % (ocm, vol)
self.assertEqual('', volume.volume)
# should also work without volume info
volume.data['label'] = ocm
self.assertEqual(ocm, volume.control_key)
self.assertEqual('', volume.volume)
def test_fulltext_absolute_url(self):
volume = SolrVolume(label='ocn460678076_V.1',
pid='testpid:1234')
url = volume.fulltext_absolute_url()
self.assert_(url.startswith('https://'))
self.assert_(url.endswith(reverse('books:text', kwargs={'pid': volume.pid})))
current_site = Site.objects.get_current()
self.assert_(current_site.domain in url)
def test_voyant_url(self):
# Volume with English Lang
volume1 = SolrVolume(label='ocn460678076_V.1',
pid='testpid:1234', language='eng')
url = volume1.voyant_url()
self.assert_(urlencode({'corpus': volume1.pid}) in url,
'voyant url should include volume pid as corpus identifier')
self.assert_(urlencode({'archive': volume1.fulltext_absolute_url()}) in url,
'voyant url should include volume fulltext url as archive')
self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) in url,
'voyant url should not include english stopword list when volume is in english')
# volume language is French
volume2 = SolrVolume(label='ocn460678076_V.1',
pid='testpid:1235', language='fra')
url_fra = volume2.voyant_url()
self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) not in url_fra,
'voyant url should not include english stopword list when language is not english')
def test_pdf_url(self):
# no start page set
vol = SolrVolume(pid='vol:123')
pdf_url = vol.pdf_url()
self.assertEqual(unquote(reverse('books:pdf', kwargs={'pid': vol.pid})), pdf_url)
# start page
vol = SolrVolume(pid='vol:123', start_page=6)
pdf_url = vol.pdf_url()
self.assert_(pdf_url.startswith(unquote(reverse('books:pdf', kwargs={'pid': vol.pid}))))
self.assert_('#page=6' in pdf_url)
class VolumeTest(TestCase):
# borrowing fixture & test accounts from readux.annotations.tests
fixtures = ['test_annotation_data.json']
user_credentials = {
'user': {'username': 'testuser', 'password': 'testing'},
'superuser': {'username': 'testsuper', 'password': 'superme'}
}
def test_annotations(self):
# find annotations associated with a volume, optionally filtered
# by user
User = get_user_model()
testuser = User.objects.create(username='tester')
testadmin = User.objects.create(username='super', is_superuser=True)
mockapi = Mock()
vol = Volume(mockapi, 'vol:1')
# create annotations to test finding
p1 = Annotation.objects.create(user=testuser, text='testuser p1',
uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:1'}),
volume_uri=vol.absolute_url)
p2 = Annotation.objects.create(user=testuser, text='testuser p2',
uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:2'}),
volume_uri=vol.absolute_url)
p3 = Annotation.objects.create(user=testuser, text='testuser p3',
uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:3'}),
volume_uri=vol.absolute_url)
v2p1 = Annotation.objects.create(user=testuser, text='testuser vol2 p1',
uri=reverse('books:page', kwargs={'vol_pid': 'vol:2', 'pid': 'p:1'}),
volume_uri='http://example.com/books/vol:2/')
sup2 = Annotation.objects.create(user=testadmin, text='testsuper p2',
uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:2'}),
volume_uri=vol.absolute_url)
annotations = vol.annotations()
self.assertEqual(4, annotations.count())
self.assert_(v2p1 not in annotations)
# filter by user
annotations = vol.annotations().visible_to(testuser)
self.assertEqual(3, annotations.count())
self.assert_(sup2 not in annotations)
annotations = vol.annotations().visible_to(testadmin)
self.assertEqual(4, annotations.count())
self.assert_(sup2 in annotations)
# annotation counts per page
annotation_count = vol.page_annotation_count()
self.assertEqual(1, annotation_count[p1.uri])
self.assertEqual(2, annotation_count[p2.uri])
self.assertEqual(1, annotation_count[p3.uri])
# by user
annotation_count = vol.page_annotation_count(testuser)
self.assertEqual(1, annotation_count[p2.uri])
annotation_count = vol.page_annotation_count(testadmin)
self.assertEqual(2, annotation_count[p2.uri])
# total for a volume
self.assertEqual(4, vol.annotation_count())
self.assertEqual(3, vol.annotation_count(testuser))
self.assertEqual(4, vol.annotation_count(testadmin))
# total for all volumes
totals = Volume.volume_annotation_count()
self.assertEqual(1, totals['http://example.com/books/vol:2/'])
self.assertEqual(4, totals[vol.absolute_url])
totals = Volume.volume_annotation_count(testuser)
self.assertEqual(3, totals[vol.absolute_url])
def test_has_pages(self):
mockapi = Mock()
vol = Volume(mockapi, 'vol:1')
vol.pages = []
self.assertFalse(vol.has_pages)
# one page (i.e. cover image) is not enough to count as having pages
vol.pages = [Mock(spec=Page)]
self.assertFalse(vol.has_pages)
vol.pages = [Mock(spec=Page), Mock(spec=Page)]
self.assertTrue(vol.has_pages)
def test_has_tei(self):
mockapi = Mock()
vol = Volume(mockapi, 'vol:1')
p1 = Mock(spec=Page)
p1.tei.exists = False
p2 = Mock(spec=Page)
p2.tei.exists = False
vol.pages = [p1, p2]
self.assertFalse(vol.has_tei)
p2.tei.exists = True
self.assertTrue(vol.has_tei)
class VolumeV1_0Test(TestCase):
def setUp(self):
# use uningested objects for testing purposes
repo = Repository()
self.vol = repo.get_object(type=VolumeV1_0)
self.vol.label = 'ocn460678076_V.1'
self.vol.pid = 'rdxtest:4606'
def test_ark_uri(self):
ark_uri = 'http://pid.co/ark:/12345/ba45'
self.vol.dc.content.identifier_list.extend([ark_uri, 'pid:ba45', 'otherid'])
self.assertEqual(ark_uri, self.vol.ark_uri)
def test_rdf_dc(self):
# add metadata to test rdf generated
ark_uri = 'http://pid.co/ark:/12345/ba45'
self.vol.dc.content.identifier_list.append(ark_uri)
self.vol.dc.content.title = 'Sunset, a novel'
self.vol.dc.content.format = 'application/pdf'
self.vol.dc.content.language = 'eng'
self.vol.dc.content.rights = 'public domain'
# NOTE: patching on class instead of instance because related object is a descriptor
with patch.object(Volume, 'book', new=Mock(spec=Book)) as mockbook:
mockbook.dc.content.creator_list = ['Author, Joe']
mockbook.dc.content.date_list = ['1801', '2010']
mockbook.dc.content.description_list = ['digitized edition', 'mystery novel']
mockbook.dc.content.publisher = 'Nashville, Tenn. : Barbee & Smith'
mockbook.dc.content.relation_list = [
'http://pid.co/ark:/12345/book',
'http://pid.co/ark:/12345/volpdf'
]
graph = self.vol.rdf_dc_graph()
lit = rdflib.Literal
uri = rdflib.URIRef(self.vol.ark_uri)
self.assert_((uri, RDF.type, BIBO.book) in graph,
'rdf graph type should be bibo:book')
self.assert_((uri, DC.title, lit(self.vol.dc.content.title)) in graph,
'title should be set as dc:title')
self.assert_((uri, BIBO.volume, lit(self.vol.volume)) in graph,
'volume label should be set as bibo:volume')
self.assert_((uri, DC['format'], lit(self.vol.dc.content.format)) in graph,
'format should be set as dc:format')
self.assert_((uri, DC.language, lit(self.vol.dc.content.language)) in graph,
'language should be set as dc:language')
self.assert_((uri, DC.rights, lit(self.vol.dc.content.rights)) in graph,
'rights should be set as dc:rights')
for rel in self.vol.dc.content.relation_list:
self.assert_((uri, DC.relation, lit(rel)) in graph,
'related item %s should be set as dc:relation' % rel)
# metadata pulled from book obj because not present in volume
self.assert_((uri, DC.creator, lit(mockbook.dc.content.creator_list[0])) in graph,
'creator from book metadata should be set as dc:creator when not present in volume metadata')
self.assert_((uri, DC.publisher, lit(mockbook.dc.content.publisher)) in graph,
'publisher from book metadata should be set as dc:publisher when not present in volume metadata')
# earliest date only
self.assert_((uri, DC.date, lit('1801')) in graph,
'earliest date 1801 from book metadata should be set as dc:date when not present in volume metadata')
for d in mockbook.dc.content.description_list:
self.assert_((uri, DC.description, lit(d)) in graph,
'description from book metadata should be set as dc:description when not present in volume metadata')
# volume-level metadata should be used when present instead of book
self.vol.dc.content.creator_list = ['Writer, Jane']
self.vol.dc.content.date_list = ['1832', '2012']
self.vol.dc.content.description_list = ['digital edition']
self.vol.dc.content.publisher = 'So & So Publishers'
graph = self.vol.rdf_dc_graph()
self.assert_((uri, DC.creator, lit(self.vol.dc.content.creator_list[0])) in graph,
'creator from volume metadata should be set as dc:creator when present')
self.assert_((uri, DC.publisher, lit(self.vol.dc.content.publisher)) in graph,
'publisher from volume metadata should be set as dc:publisher when present')
# earliest date *only* should be present
self.assert_((uri, DC.date, lit('1832')) in graph,
'earliest date 1832 from volume metadata should be set as dc:date when present')
for d in self.vol.dc.content.description_list:
self.assert_((uri, DC.description, lit(d)) in graph,
'description from volume metadata should be set as dc:description when present')
def test_index_data(self):
self.vol.owner = ''
self.vol.dc.content.date = 1842
# NOTE: patching on class instead of instance because related object is a descriptor
with patch.object(Volume, 'book', new=Mock(spec=Book)) as mockbook:
mockbook.pid = 'book:123'
mockbook.collection.pid = 'coll:123',
mockbook.collection.short_label = 'Pile O\' Books'
mockbook.dc.content.creator_list = ['Author, Joe']
mockbook.dc.content.date_list = ['1801', '2010']
mockbook.dc.content.description_list = ['digitized edition', 'mystery novel']
mockbook.dc.content.publisher = 'Nashville, Tenn. : Barbee & Smith'
mockbook.dc.content.relation_list = [
'http://pid.co/ark:/12345/book',
'http://pid.co/ark:/12345/volpdf'
]
mockbook.dc.content.subject_list = []
data = self.vol.index_data()
self.assert_('fulltext' not in data,
'fulltext should not be set in index data when volume has no ocr')
self.assert_('hasPrimaryImage' not in data,
'hasPrimaryImage should not be set in index data when volume has no cover')
self.assertEqual(mockbook.pid, data['book_id'],
'associated book pid should be set as book id')
self.assertEqual(mockbook.collection.pid, data['collection_id'],
'associated collection pid should be set as collection id')
self.assertEqual(mockbook.collection.short_label, data['collection_label'],
'associated collection label short label should be set as collection label')
self.assertEqual(mockbook.dc.content.creator_list, data['creator'],
'creator should be set from book DC creator')
self.assertEqual(self.vol.dc.content.date_list, data['date'],
'date should be set from earliest volume DC date')
self.assert_('subject' not in data,
'subject should not be set in index data when book has no subjects')
self.assertEqual(0, data['page_count'],
'page count should be set to zero when volume has no pages loaded')
# test hasPrimaryImage
mockpage = Mock(spec=Page)
mockpage.pid = 'page:1234'
mockpage.uriref = rdflib.URIRef('info:fedora/%s' % mockpage.pid)
self.vol.primary_image = mockpage
data = self.vol.index_data()
self.assertEqual(mockpage.pid, data['hasPrimaryImage'],
'hasPrimaryImage should be set to cover page pid, when present')
# test subjects
mockbook.dc.content.subject_list = ['subj1', 'subj2']
data = self.vol.index_data()
self.assertEqual(mockbook.dc.content.subject_list, data['subject'],
'subject should be set when present in book DC')
# test full-text
with patch.object(self.vol, 'ocr') as mockocr:
mockocr.exists = True
ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR,
'abbyyocr_fr8v2.xml'))
mockocr.content = ocr_xml
data = self.vol.index_data()
self.assert_('fulltext' in data,
'fulltext should be set in index data when OCR is available')
# use mock to test pdf size indexing
with patch.object(self.vol, 'pdf') as mockpdf:
mockpdf.size = 1234567
data = self.vol.index_data()
self.assertEqual(mockpdf.size, data['pdf_size'],
'pdf_size should be set from pdf size, when available')
def test_voyant_url(self):
# NOTE: this test is semi-redundant with the same test for the SolrVolume,
# but since the method is implemented in BaseVolume and depends on
# properties set on the subclasses, testing here to ensure it works
# in both cases
# no language
self.vol.pid = 'vol:1234'
url = self.vol.voyant_url()
self.assert_(urlencode({'corpus': self.vol.pid}) in url,
'voyant url should include volume pid as corpus identifier')
self.assert_(urlencode({'archive': self.vol.fulltext_absolute_url()}) in url,
'voyant url should include volume fulltext url as archive')
self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) not in url,
'voyant url should not include english stopword list when volume is not in english')
# english
self.vol.dc.content.language = 'eng'
url = self.vol.voyant_url()
self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) in url,
'voyant url should include english stopword list when volume is in english')
def test_get_fulltext(self):
with patch.object(self.vol, 'ocr') as mockocr:
mockocr.exists = True
# abbyy finereader v8
ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR,
'abbyyocr_fr8v2.xml'))
mockocr.content = ocr_xml
text = self.vol.get_fulltext()
# check for arbitrary text content
self.assert_('In presenting this, the initial volume of the' in text,
'ocr text content should be present in plain text')
self.assert_('Now, kind reader, we ask that you do not crit' in text,
'ocr text content should be present in plain text')
self.assert_(re.search(r'Baldwin\s+Dellinger\s+Brice', text),
'table row content should be displayed on a single line')
# abbyy finereader v6
ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR,
'abbyyocr_fr6v1.xml'))
mockocr.content = ocr_xml<|fim▁hole|> 'ocr text content should be present in plain text')
self.assert_('walked up the steps. The lady had not moved, and made' in text,
'ocr text content should be present in plain text')
self.assert_(re.search(r'Modern\.\s+New Standard\.\s+Popular\.', text),
'table row content should be displayed on a single line')
def test_ocr_ids(self):
# pach in fixture ocr content
with patch.object(self.vol, 'ocr') as mockocr:
mockocr.exists = True
ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR,
'abbyyocr_fr8v2.xml'))
mockocr.content = ocr_xml
self.assertFalse(self.vol.ocr_has_ids)
self.vol.add_ocr_ids()
self.assertTrue(self.vol.ocr_has_ids)
class PageV1_1Test(TestCase):
metsalto_doc = os.path.join(FIXTURE_DIR, 'mets_alto.xml')
def setUp(self):
self.mets_alto = load_xmlobject_from_file(self.metsalto_doc, XmlObject)
def test_ocr_ids(self):
page = PageV1_1(Mock()) # use mock for fedora api, since we won't make any calls
page.pid = 'rdxtest:4607'
with patch.object(page, 'ocr') as mockocr:
mockocr.exists = True
mockocr.content = self.mets_alto
self.assertFalse(page.ocr_has_ids)
page.add_ocr_ids()
self.assertTrue(page.ocr_has_ids)
class AbbyyOCRTestCase(TestCase):
fr6v1_doc = os.path.join(FIXTURE_DIR, 'abbyyocr_fr6v1.xml')
fr8v2_doc = os.path.join(FIXTURE_DIR, 'abbyyocr_fr8v2.xml')
# language code
eng = 'EnglishUnitedStates'
def setUp(self):
self.fr6v1 = load_xmlobject_from_file(self.fr6v1_doc, abbyyocr.Document)
self.fr8v2 = load_xmlobject_from_file(self.fr8v2_doc, abbyyocr.Document)
def test_document(self):
# top-level document properties
# finereader 6 v1
self.assertEqual(132, self.fr6v1.page_count)
self.assertEqual(self.eng, self.fr6v1.language)
self.assertEqual(self.eng, self.fr6v1.languages)
self.assert_(self.fr6v1.pages, 'page list should be non-empty')
self.assertEqual(132, len(self.fr6v1.pages),
'number of pages should match page count')
self.assert_(isinstance(self.fr6v1.pages[0], abbyyocr.Page))
# finereader 8 v2
self.assertEqual(186, self.fr8v2.page_count)
self.assertEqual(self.eng, self.fr8v2.language)
self.assertEqual(self.eng, self.fr8v2.languages)
self.assert_(self.fr8v2.pages, 'page list should be non-empty')
self.assertEqual(186, len(self.fr8v2.pages),
'number of pages should match page count')
self.assert_(isinstance(self.fr8v2.pages[0], abbyyocr.Page))
def test_page(self):
# finereader 6 v1
self.assertEqual(1500, self.fr6v1.pages[0].width)
self.assertEqual(2174, self.fr6v1.pages[0].height)
self.assertEqual(300, self.fr6v1.pages[0].resolution)
# second page has picture block, no text
self.assertEqual(1, len(self.fr6v1.pages[1].blocks))
self.assertEqual(1, len(self.fr6v1.pages[1].picture_blocks))
self.assertEqual(0, len(self.fr6v1.pages[1].text_blocks))
self.assert_(isinstance(self.fr6v1.pages[1].blocks[0], abbyyocr.Block))
# fourth page has paragraph text
self.assert_(self.fr6v1.pages[3].paragraphs)
self.assert_(isinstance(self.fr6v1.pages[3].paragraphs[0],
abbyyocr.Paragraph))
# finereader 8 v2
self.assertEqual(2182, self.fr8v2.pages[0].width)
self.assertEqual(3093, self.fr8v2.pages[0].height)
self.assertEqual(300, self.fr8v2.pages[0].resolution)
# first page has multiple text/pic blocks
self.assert_(self.fr8v2.pages[0].blocks)
self.assert_(self.fr8v2.pages[0].picture_blocks)
self.assert_(self.fr8v2.pages[0].text_blocks)
self.assert_(isinstance(self.fr8v2.pages[0].blocks[0], abbyyocr.Block))
# first page has paragraph text
self.assert_(self.fr8v2.pages[0].paragraphs)
self.assert_(isinstance(self.fr8v2.pages[0].paragraphs[0],
abbyyocr.Paragraph))
def test_block(self):
# finereader 6 v1
# - basic block attributes
b = self.fr6v1.pages[1].blocks[0]
self.assertEqual('Picture', b.type)
self.assertEqual(144, b.left)
self.assertEqual(62, b.top)
self.assertEqual(1358, b.right)
self.assertEqual(2114, b.bottom)
# - block with text
b = self.fr6v1.pages[3].blocks[0]
self.assert_(b.paragraphs)
self.assert_(isinstance(b.paragraphs[0], abbyyocr.Paragraph))
# finereader 8 v2
b = self.fr8v2.pages[0].blocks[0]
self.assertEqual('Text', b.type)
self.assertEqual(282, b.left)
self.assertEqual(156, b.top)
self.assertEqual(384, b.right)
self.assertEqual(228, b.bottom)
self.assert_(b.paragraphs)
self.assert_(isinstance(b.paragraphs[0], abbyyocr.Paragraph))
def test_paragraph_line(self):
# finereader 6 v1
para = self.fr6v1.pages[3].paragraphs[0]
# untested: align, left/right/start indent
self.assert_(para.lines)
self.assert_(isinstance(para.lines[0], abbyyocr.Line))
line = para.lines[0]
self.assertEqual(283, line.baseline)
self.assertEqual(262, line.left)
self.assertEqual(220, line.top)
self.assertEqual(1220, line.right)
self.assertEqual(294, line.bottom)
# line text available via unicode
self.assertEqual(u'MABEL MEREDITH;', unicode(line))
# also mapped as formatted text (could repeat/segment)
self.assert_(line.formatted_text) # should be non-empty
self.assert_(isinstance(line.formatted_text[0], abbyyocr.Formatting))
self.assertEqual(self.eng, line.formatted_text[0].language)
self.assertEqual(u'MABEL MEREDITH;', line.formatted_text[0].text) # not normalized
# finereader 8 v2
para = self.fr8v2.pages[1].paragraphs[0]
self.assert_(para.lines)
self.assert_(isinstance(para.lines[0], abbyyocr.Line))
line = para.lines[0]
self.assertEqual(1211, line.baseline)
self.assertEqual(845, line.left)
self.assertEqual(1160, line.top)
self.assertEqual(1382, line.right)
self.assertEqual(1213, line.bottom)
self.assertEqual(u'EMORY UNIVERSITY', unicode(line))
self.assert_(line.formatted_text) # should be non-empty
self.assert_(isinstance(line.formatted_text[0], abbyyocr.Formatting))
self.assertEqual(self.eng, line.formatted_text[0].language)
self.assertEqual(u'EMORY UNIVERSITY', line.formatted_text[0].text)
def test_frns(self):
self.assertEqual('fr6v1:par|fr8v2:par', abbyyocr.frns('par'))
self.assertEqual('fr6v1:text/fr6v1:par|fr8v2:text/fr8v2:par',
abbyyocr.frns('text/par'))<|fim▁end|> |
text = self.vol.get_fulltext()
# check for arbitrary text content
self.assert_('was late in the autumn, the vines yet kept their leaves,' in text, |
<|file_name|>graticule.test.ts<|end_file_name|><|fim▁begin|>import {GraticuleParams} from '../../../src/data';
import {assembleRootData} from '../../../src/compile/data/assemble';
import {GraticuleNode} from '../../../src/compile/data/graticule';
import {parseUnitModelWithScaleAndLayoutSize} from '../../util';
describe('compile/data/graticule', () => {
describe('GraticuleNode', () => {
describe('assemble', () => {
it('should return a proper vg transform with object param', () => {
const params = {
stepMinor: [15, 10],<|fim▁hole|> expect(graticule.assemble()).toEqual({
type: 'graticule',
stepMinor: [15, 10],
precision: 2
});
});
it('should return a proper vg transform with true param', () => {
const params = true;
const graticule = new GraticuleNode(null, params);
expect(graticule.assemble()).toEqual({
type: 'graticule'
});
});
});
describe('dependentFields', () => {
it('should return empty set', () => {
const sequence = new GraticuleNode(null, true);
expect(sequence.dependentFields()).toEqual(new Set());
});
});
describe('producedFields', () => {
it('should return undefined', () => {
const sequence = new GraticuleNode(null, true);
expect(sequence.producedFields()).toBeUndefined();
});
});
});
it('should parse and add generator transform correctly', () => {
const model = parseUnitModelWithScaleAndLayoutSize({
data: {
graticule: true
},
mark: 'geoshape'
});
model.parseData();
const data = assembleRootData(model.component.data, {});
expect(data[0].transform[0]).toEqual({type: 'graticule'});
});
});<|fim▁end|> | precision: 2
} as GraticuleParams;
const graticule = new GraticuleNode(null, params); |
<|file_name|>env.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 Jeremy Wall <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains code for converting a UCG Val into the environment variable output target.
use std::io::Write as IOWrite;
use std::rc::Rc;
use crate::build::Val;
use crate::convert::traits::{ConvertResult, Converter};
/// EnvConverter implements the conversion logic for converting a Val into a
/// set of environment variables.
pub struct EnvConverter {}<|fim▁hole|> EnvConverter {}
}
fn convert_tuple(&self, flds: &Vec<(String, Rc<Val>)>, w: &mut dyn IOWrite) -> ConvertResult {
for &(ref name, ref val) in flds.iter() {
if val.is_tuple() {
eprintln!("Skipping embedded tuple...");
return Ok(());
}
if let &Val::Empty = val.as_ref() {
eprintln!("Skipping empty variable: {}", name);
return Ok(());
}
write!(w, "{}=", name)?;
self.write(&val, w)?;
}
Ok(())
}
fn convert_list(&self, _items: &Vec<Rc<Val>>, _w: &mut dyn IOWrite) -> ConvertResult {
eprintln!("Skipping List...");
Ok(())
}
fn write(&self, v: &Val, w: &mut dyn IOWrite) -> ConvertResult {
match v {
&Val::Empty => {
// Empty is a noop.
return Ok(());
}
&Val::Boolean(b) => {
write!(w, "{}\n", if b { "true" } else { "false" })?;
}
&Val::Float(ref f) => {
write!(w, "{}\n", f)?;
}
&Val::Int(ref i) => {
write!(w, "{}\n", i)?;
}
&Val::Str(ref s) => {
write!(w, "'{}'\n", s)?;
}
&Val::List(ref items) => {
self.convert_list(items, w)?;
}
&Val::Tuple(ref flds) => {
self.convert_tuple(flds, w)?;
}
&Val::Env(ref _fs) => {
// This is ignored
eprintln!("Skipping env...");
}
}
Ok(())
}
}
impl Converter for EnvConverter {
fn convert(&self, v: Rc<Val>, mut w: &mut dyn IOWrite) -> ConvertResult {
self.write(&v, &mut w)
}
fn file_ext(&self) -> String {
String::from("env")
}
fn description(&self) -> String {
"Convert ucg Vals into environment variables.".to_string()
}
#[allow(unused_must_use)]
fn help(&self) -> String {
include_str!("env_help.txt").to_string()
}
}<|fim▁end|> |
impl EnvConverter {
pub fn new() -> Self { |
<|file_name|>MaxTextureSizeTest.cpp<|end_file_name|><|fim▁begin|>//
// Copyright 2015 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
#include "test_utils/ANGLETest.h"
using namespace angle;
class MaxTextureSizeTest : public ANGLETest
{
protected:
MaxTextureSizeTest()
{
setWindowWidth(512);
setWindowHeight(512);
setConfigRedBits(8);
setConfigGreenBits(8);
setConfigBlueBits(8);
setConfigAlphaBits(8);
}
virtual void SetUp()
{
ANGLETest::SetUp();
const std::string vsSource = SHADER_SOURCE
(
precision highp float;
attribute vec4 position;
varying vec2 texcoord;
void main()
{
gl_Position = position;
texcoord = (position.xy * 0.5) + 0.5;
}
);
const std::string textureFSSource = SHADER_SOURCE
(
precision highp float;
uniform sampler2D tex;
varying vec2 texcoord;
void main()
{
gl_FragColor = texture2D(tex, texcoord);<|fim▁hole|> }
);
const std::string blueFSSource = SHADER_SOURCE
(
precision highp float;
void main()
{
gl_FragColor = vec4(0.0, 0.0, 1.0, 1.0);
}
);
mTextureProgram = CompileProgram(vsSource, textureFSSource);
mBlueProgram = CompileProgram(vsSource, blueFSSource);
if (mTextureProgram == 0 || mBlueProgram == 0)
{
FAIL() << "shader compilation failed.";
}
mTextureUniformLocation = glGetUniformLocation(mTextureProgram, "tex");
glGetIntegerv(GL_MAX_TEXTURE_SIZE, &mMaxTexture2DSize);
glGetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, &mMaxTextureCubeSize);
glGetIntegerv(GL_MAX_RENDERBUFFER_SIZE, &mMaxRenderbufferSize);
ASSERT_GL_NO_ERROR();
}
virtual void TearDown()
{
glDeleteProgram(mTextureProgram);
glDeleteProgram(mBlueProgram);
ANGLETest::TearDown();
}
GLuint mTextureProgram;
GLint mTextureUniformLocation;
GLuint mBlueProgram;
GLint mMaxTexture2DSize;
GLint mMaxTextureCubeSize;
GLint mMaxRenderbufferSize;
};
TEST_P(MaxTextureSizeTest, SpecificationTexImage)
{
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
GLsizei textureWidth = mMaxTexture2DSize;
GLsizei textureHeight = 64;
std::vector<GLubyte> data(textureWidth * textureHeight * 4);
for (int y = 0; y < textureHeight; y++)
{
for (int x = 0; x < textureWidth; x++)
{
GLubyte* pixel = &data[0] + ((y * textureWidth + x) * 4);
// Draw a gradient, red in direction, green in y direction
pixel[0] = static_cast<GLubyte>((float(x) / textureWidth) * 255);
pixel[1] = static_cast<GLubyte>((float(y) / textureHeight) * 255);
pixel[2] = 0;
pixel[3] = 255;
}
}
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, textureWidth, textureHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, &data[0]);
EXPECT_GL_NO_ERROR();
glUseProgram(mTextureProgram);
glUniform1i(mTextureUniformLocation, 0);
drawQuad(mTextureProgram, "position", 0.5f);
std::vector<GLubyte> pixels(getWindowWidth() * getWindowHeight() * 4);
glReadPixels(0, 0, getWindowWidth(), getWindowHeight(), GL_RGBA, GL_UNSIGNED_BYTE, &pixels[0]);
for (int y = 1; y < getWindowHeight(); y++)
{
for (int x = 1; x < getWindowWidth(); x++)
{
const GLubyte* prevPixel = &pixels[0] + (((y - 1) * getWindowWidth() + (x - 1)) * 4);
const GLubyte* curPixel = &pixels[0] + ((y * getWindowWidth() + x) * 4);
EXPECT_GE(curPixel[0], prevPixel[0]);
EXPECT_GE(curPixel[1], prevPixel[1]);
EXPECT_EQ(curPixel[2], prevPixel[2]);
EXPECT_EQ(curPixel[3], prevPixel[3]);
}
}
}
TEST_P(MaxTextureSizeTest, SpecificationTexStorage)
{
if (getClientVersion() < 3 && (!extensionEnabled("GL_EXT_texture_storage") || !extensionEnabled("GL_OES_rgb8_rgba8")))
{
return;
}
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
GLsizei textureWidth = 64;
GLsizei textureHeight = mMaxTexture2DSize;
std::vector<GLubyte> data(textureWidth * textureHeight * 4);
for (int y = 0; y < textureHeight; y++)
{
for (int x = 0; x < textureWidth; x++)
{
GLubyte* pixel = &data[0] + ((y * textureWidth + x) * 4);
// Draw a gradient, red in direction, green in y direction
pixel[0] = static_cast<GLubyte>((float(x) / textureWidth) * 255);
pixel[1] = static_cast<GLubyte>((float(y) / textureHeight) * 255);
pixel[2] = 0;
pixel[3] = 255;
}
}
if (getClientVersion() < 3)
{
glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8_OES, textureWidth, textureHeight);
}
else
{
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA8_OES, textureWidth, textureHeight);
}
EXPECT_GL_NO_ERROR();
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, textureWidth, textureHeight, GL_RGBA, GL_UNSIGNED_BYTE, &data[0]);
EXPECT_GL_NO_ERROR();
glUseProgram(mTextureProgram);
glUniform1i(mTextureUniformLocation, 0);
drawQuad(mTextureProgram, "position", 0.5f);
std::vector<GLubyte> pixels(getWindowWidth() * getWindowHeight() * 4);
glReadPixels(0, 0, getWindowWidth(), getWindowHeight(), GL_RGBA, GL_UNSIGNED_BYTE, &pixels[0]);
for (int y = 1; y < getWindowHeight(); y++)
{
for (int x = 1; x < getWindowWidth(); x++)
{
const GLubyte* prevPixel = &pixels[0] + (((y - 1) * getWindowWidth() + (x - 1)) * 4);
const GLubyte* curPixel = &pixels[0] + ((y * getWindowWidth() + x) * 4);
EXPECT_GE(curPixel[0], prevPixel[0]);
EXPECT_GE(curPixel[1], prevPixel[1]);
EXPECT_EQ(curPixel[2], prevPixel[2]);
EXPECT_EQ(curPixel[3], prevPixel[3]);
}
}
}
TEST_P(MaxTextureSizeTest, RenderToTexture)
{
if (getClientVersion() < 3 && (!extensionEnabled("GL_ANGLE_framebuffer_blit")))
{
std::cout << "Test skipped due to missing glBlitFramebuffer[ANGLE] support." << std::endl;
return;
}
GLuint fbo = 0;
GLuint textureId = 0;
// create a 1-level texture at maximum size
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
GLsizei textureWidth = 64;
GLsizei textureHeight = mMaxTexture2DSize;
// texture setup code
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, textureWidth, textureHeight, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, NULL);
EXPECT_GL_NO_ERROR();
// create an FBO and attach the texture
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureId, 0);
EXPECT_GL_NO_ERROR();
EXPECT_GLENUM_EQ(GL_FRAMEBUFFER_COMPLETE, glCheckFramebufferStatus(GL_FRAMEBUFFER));
const int frameCount = 64;
for (int i = 0; i < frameCount; i++)
{
// clear the screen
glBindFramebuffer(GL_FRAMEBUFFER, 0);
GLubyte clearRed = static_cast<GLubyte>((float(i) / frameCount) * 255);
GLubyte clearGreen = 255 - clearRed;
GLubyte clearBlue = 0;
glClearColor(clearRed / 255.0f, clearGreen / 255.0f, clearBlue / 255.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// render blue into the texture
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
drawQuad(mBlueProgram, "position", 0.5f);
// copy corner of texture to LL corner of window
glBindFramebuffer(GL_DRAW_FRAMEBUFFER_ANGLE, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER_ANGLE, fbo);
glBlitFramebufferANGLE(0, 0, textureWidth - 1, getWindowHeight() - 1,
0, 0, textureWidth - 1, getWindowHeight() - 1,
GL_COLOR_BUFFER_BIT, GL_NEAREST);
glBindFramebuffer(GL_READ_FRAMEBUFFER_ANGLE, 0);
EXPECT_GL_NO_ERROR();
EXPECT_PIXEL_EQ(textureWidth / 2, getWindowHeight() / 2, 0, 0, 255, 255);
EXPECT_PIXEL_EQ(textureWidth + 10, getWindowHeight() / 2, clearRed, clearGreen, clearBlue, 255);
swapBuffers();
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glDeleteFramebuffers(1, &fbo);
glDeleteTextures(1, &textureId);
}
// Use this to select which configurations (e.g. which renderer, which GLES major version) these tests should be run against.
ANGLE_INSTANTIATE_TEST(MaxTextureSizeTest, ES2_D3D9(), ES2_D3D11(), ES2_D3D11_FL9_3());<|fim▁end|> | |
<|file_name|>item-attributes.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// These are attributes of the implicit crate. Really this just needs to parse
// for completeness since .rs files linked from .rc files support this
// notation to specify their module's attributes
#![allow(unused_attribute)]
#![attr1 = "val"]
#![attr2 = "val"]
#![attr3]
#![attr4(attr5)]
#![crate_id="foobar#0.1"]
// These are attributes of the following mod
#[attr1 = "val"]
#[attr2 = "val"]
mod test_first_item_in_file_mod {}
mod test_single_attr_outer {
#[attr = "val"]
pub static x: int = 10;
#[attr = "val"]
pub fn f() { }
#[attr = "val"]
pub mod mod1 {}
pub mod rustrt {
#[attr = "val"]
extern {}
}
}
mod test_multi_attr_outer {
#[attr1 = "val"]
#[attr2 = "val"]
pub static x: int = 10;
#[attr1 = "val"]
#[attr2 = "val"]
pub fn f() { }
#[attr1 = "val"]
#[attr2 = "val"]
pub mod mod1 {}
pub mod rustrt {
#[attr1 = "val"]
#[attr2 = "val"]
extern {}
}
#[attr1 = "val"]
#[attr2 = "val"]
struct t {x: int}
}
mod test_stmt_single_attr_outer {
pub fn f() {
#[attr = "val"]
static x: int = 10;
#[attr = "val"]
fn f() { }
#[attr = "val"]
mod mod1 {
}
mod rustrt {
#[attr = "val"]
extern {
}
}
}
}
mod test_stmt_multi_attr_outer {
pub fn f() {
#[attr1 = "val"]
#[attr2 = "val"]
static x: int = 10;
#[attr1 = "val"]
#[attr2 = "val"]
fn f() { }
#[attr1 = "val"]
#[attr2 = "val"]
mod mod1 {
}
mod rustrt {
#[attr1 = "val"]
#[attr2 = "val"]
extern {
}
}
}
}
mod test_attr_inner {
pub mod m {
// This is an attribute of mod m
#![attr = "val"]
}
}<|fim▁hole|> pub mod m {
// This is an attribute of mod m
#![attr = "val"]
// This is an attribute of fn f
#[attr = "val"]
fn f() { }
}
}
mod test_attr_inner_then_outer_multi {
pub mod m {
// This is an attribute of mod m
#![attr1 = "val"]
#![attr2 = "val"]
// This is an attribute of fn f
#[attr1 = "val"]
#[attr2 = "val"]
fn f() { }
}
}
mod test_distinguish_syntax_ext {
pub fn f() {
format!("test{}", "s");
#[attr = "val"]
fn g() { }
}
}
mod test_other_forms {
#[attr]
#[attr(word)]
#[attr(attr(word))]
#[attr(key1 = "val", key2 = "val", attr)]
pub fn f() { }
}
mod test_foreign_items {
pub mod rustrt {
extern crate libc;
extern {
#![attr]
#[attr]
fn rust_get_test_int() -> libc::intptr_t;
}
}
}
// FIXME #623 - these aren't supported yet
/*mod test_literals {
#![str = "s"]
#![char = 'c']
#![int = 100]
#![uint = 100u]
#![mach_int = 100u32]
#![float = 1.0]
#![mach_float = 1.0f32]
#![nil = ()]
#![bool = true]
mod m {}
}*/
fn test_fn_inner() {
#![inner_fn_attr]
}
pub fn main() { }<|fim▁end|> |
mod test_attr_inner_then_outer { |
<|file_name|>issue-2190-1.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test
static generations: uint = 1024+256+128+49;
fn child_no(x: uint) -> proc() {
|| {
if x < generations {
task::spawn(child_no(x+1));
}
}<|fim▁hole|>}
pub fn main() {
task::spawn(child_no(0));
}<|fim▁end|> | |
<|file_name|>adder.go<|end_file_name|><|fim▁begin|>package add
import (
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"sort"
"time"
"github.com/fhs/gompd/mpd"
"hawx.me/code/aoide/data"
"hawx.me/code/aoide/song"
)
func New(musicDir string, db *data.Database) *Adder {
conn, err := mpd.Dial("tcp", ":6600")<|fim▁hole|> return &Adder{musicDir, db, conn}
}
type Adder struct {
musicDir string
db *data.Database
client *mpd.Client
}
type addRecord struct {
from, to string
s data.Song
}
type byPath []addRecord
func (a byPath) Len() int { return len(a) }
func (a byPath) Less(i, j int) bool { return a[i].to < a[j].to }
func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a *Adder) Add(queue bool, paths ...string) (err error) {
recs := []addRecord{}
for _, path := range paths {
s, err := song.Read(path)
if err != nil {
log.Println(err)
continue
}
recs = append(recs, addRecord{
from: path,
to: song.PathFor(a.musicDir, s),
s: s,
})
}
sort.Sort(byPath(recs))
lastDir := ""
for _, rec := range recs {
thisDir := filepath.Dir(rec.to)
if thisDir != lastDir {
fmt.Println(thisDir)
lastDir = thisDir
}
fmt.Println(" ", filepath.Base(rec.to))
}
fmt.Print("Add? [yn] ")
var response string
_, err = fmt.Scanln(&response)
if err != nil {
log.Fatal(err)
}
if response == "y" || response == "Y" {
log.Println("adding...")
a.doAdd(queue, recs)
} else {
log.Println("skipping...")
}
return
}
func (a *Adder) doAdd(queue bool, recs []addRecord) error {
var toQueue []string
for _, rec := range recs {
if err := os.MkdirAll(filepath.Dir(rec.to), 0755); err != nil {
log.Printf("Could not add %s: %s\n", rec.from, err)
continue
}
if err := copyFile(rec.from, rec.to); err != nil {
log.Printf("Could not add %s: %s\n", rec.from, err)
continue
}
s, err := song.Read(rec.to)
if err != nil {
log.Printf("Could not add %s to db: %s\n", rec.from, err)
continue
}
if err := a.db.Insert(s); err != nil {
log.Printf("Could not add %s to db: %s\n", rec.from, err)
continue
}
if a.client != nil {
p, _ := filepath.Rel(a.musicDir, rec.to)
if _, err := a.client.Update(p); err != nil {
log.Printf("Could not add %s to mpd: %s\n", rec.from, err)
continue
}
if queue {
toQueue = append(toQueue, p)
}
}
}
if len(toQueue) > 0 {
time.Sleep(time.Second) // ugh
for _, p := range toQueue {
if err := a.client.Add(p); err != nil {
log.Printf("Could not add %s to playlist: %s\n", p, err)
continue
}
}
}
return nil
}
func copyFile(src, dst string) error {
if _, err := os.Stat(dst); err == nil || os.IsExist(err) {
return errors.New("destination already exists")
}
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer out.Close()
if _, err = io.Copy(out, in); err != nil {
return err
}
return out.Close()
}<|fim▁end|> | if err != nil {
conn = nil
}
|
<|file_name|>list.rs<|end_file_name|><|fim▁begin|>/* Copyright 2016 Jordan Miner
*
* Licensed under the MIT license <LICENSE or
* http://opensource.org/licenses/MIT>. This file may not be copied,
* modified, or distributed except according to those terms.
*/
use super::control_prelude::*;
use attributes::set_attribute_ptr;
use std::ffi::CStr;
#[derive(Clone)]
pub struct List(HandleRc);
impl List {
pub fn new() -> Self {
unsafe {
::iup_open();
let ih = IupList(ptr::null_mut());
List(HandleRc::new(ih))
}
}
pub fn dropdown(&self) -> bool {
unsafe {
get_str_attribute_slice(self.handle(), "DROPDOWN\0") == "YES"
}
}
pub fn set_dropdown(&self, dropdown: bool) -> &Self {
set_str_attribute(self.handle(), "DROPDOWN\0", if dropdown { "YES\0" } else { "NO\0" });
self
}
pub fn edit_box(&self) -> bool {
unsafe {
get_str_attribute_slice(self.handle(), "EDITBOX\0") == "YES"
}
}
pub fn set_edit_box(&self, edit_box: bool) -> &Self {
set_str_attribute(self.handle(), "EDITBOX\0", if edit_box { "YES\0" } else { "NO\0" });
self
}
pub fn multiple(&self) -> bool {
unsafe {
get_str_attribute_slice(self.handle(), "MULTIPLE\0") == "YES"
}
}
pub fn set_multiple(&self, multiple: bool) -> &Self {
set_str_attribute(self.handle(), "MULTIPLE\0", if multiple { "YES\0" } else { "NO\0" });
self
}
// An `index` of 0 is the first item.
pub fn item(&self, index: usize) -> String {
get_str_attribute(self.handle(), &format!("{}\0", index + 1))
}
// An `index` of 0 is the first item.
pub fn set_item(&self, index: usize, text: &str) -> &Self {
set_str_attribute(self.handle(), &format!("{}\0", index + 1), text);
self
}
pub fn set_items<'a, I, T>(&self, items: I) -> &Self
where I: IntoIterator<Item=T>, T: AsRef<str> {
let mut index = 0;
for item in items {
self.set_item(index, item.as_ref());
index += 1;
}
unsafe {
set_attribute_ptr(self.handle(), &format!("{}\0", index + 1), ptr::null_mut());
}
self
}
pub fn append_item(&self, text: &str) -> &Self {
set_str_attribute(self.handle(), "APPENDITEM\0", text);
self
}
// An `index` of 0 is the first item.
pub fn insert_item(&self, index: usize, text: &str) -> &Self {
set_str_attribute(self.handle(), &format!("INSERTITEM{}\0", index + 1), text);
self
}
// An `index` of 0 is the first item.
pub fn remove_item(&self, index: usize) -> &Self {
set_str_attribute(self.handle(), "REMOVEITEM\0", &format!("{}\0", index + 1));
self
}
pub fn clear(&self) -> &Self {
set_str_attribute(self.handle(), "REMOVEITEM\0", "ALL\0");
self
}
pub fn count(&self) -> usize {
unsafe {
let s = get_str_attribute_slice(self.handle(), "COUNT\0");
s.parse().unwrap()
}
}
/// Panics if `edit_box` is false.
pub fn value_text(&self) -> String {
assert!(self.edit_box());
get_str_attribute(self.handle(), "VALUE\0")
}
/// Returns the index of the selected item or `None` if no item is selected.
///
/// Panics if `edit_box` is true or `multiple` is true.
pub fn value_single(&self) -> Option<usize> {
assert!(!self.edit_box());
assert!(!self.multiple());
unsafe {
let s = get_str_attribute_slice(self.handle(), "VALUE\0");<|fim▁hole|>
pub fn set_value_single(&self, index: Option<usize>) -> &Self {
assert!(!self.edit_box());
assert!(!self.multiple());
if let Some(index) = index {
assert!(index < self.count());
set_str_attribute(self.handle(), "VALUE\0", &format!("{}\0", index + 1));
} else {
unsafe {
set_attribute_ptr(self.handle(), "VALUE\0", ptr::null_mut());
}
}
self
}
/// Returns the indexes of all selected items.
///
/// Panics if `edit_box` is true or `multiple` is false.
pub fn value_multiple(&self) -> Vec<usize> {
assert!(!self.edit_box());
assert!(self.multiple());
unsafe {
let s = get_str_attribute_slice(self.handle(), "VALUE\0");
let sel = s.as_bytes().iter().enumerate().filter(|&(_, c)| *c == b'+').map(|(i, _)| i);
sel.collect()
}
}
// visible_items
// visible_columns
// visible_lines
}
impl_control_traits!(List);
impl ActiveAttribute for List {}
impl ExpandAttribute for List {}
impl MinMaxSizeAttribute for List {}
impl VisibleAttribute for List {}
impl VisibleColumnsLinesAttribute for List {}
impl MenuCommonCallbacks for List {}
#[derive(Clone)]
pub struct ListActionArgs<'a> {
pub text: &'a str,
pub item_index: usize,
pub selected: bool,
_dummy: (),
}
impl_callbacks! {
List {
"ACTION\0" => action_event {
ACTION_CALLBACKS<FnMut(&ListActionArgs), ListActionCallbackToken>
}
unsafe extern fn list_action_cb(ih: *mut Ihandle, text: *mut c_char, item: c_int, state: c_int) -> c_int {
with_callbacks(ih, &ACTION_CALLBACKS, |cbs| {
let text_str = CStr::from_ptr(text).to_string_lossy();
let args = ListActionArgs {
text: &*text_str,
item_index: (item - 1) as usize,
selected: state == 1,
_dummy: (),
};
for cb in cbs {
(&mut *cb.1.borrow_mut())(&args);
}
IUP_DEFAULT
})
}
}
}<|fim▁end|> | s.parse::<usize>().ok().into_iter().filter(|i| *i != 0).next().map(|i| i - 1)
}
} |
<|file_name|>containerRemove.js<|end_file_name|><|fim▁begin|>containerRemoveSchemas = new SimpleSchema({
force: {
type: Boolean,
optional:true,
label: "Force"
},
link: {
type: Boolean,
optional:true,
label: "Link"
},
v: {
type: Boolean,
optional:true,
label: "Volumes"
}
,id: {
type: String,
autoform: {
type: 'hidden',
label:false
}
}
,host: {
type: String,
autoform: {
type: 'hidden',
label:false
}
}
});
if (Meteor.isClient){<|fim▁hole|> }});
}<|fim▁end|> | AutoForm.hooks({
containerRemoveForm: {
after: {
'method': formNotifier('rm','containers')} |
<|file_name|>test-expire-tiles.cpp<|end_file_name|><|fim▁begin|>#include "expire-tiles.hpp"
#include "options.hpp"
#include <iterator>
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdexcept>
#include <boost/format.hpp>
#include <set>
#define EARTH_CIRCUMFERENCE (40075016.68)
namespace {
void run_test(const char* test_name, void (*testfunc)())
{
try
{
fprintf(stderr, "%s\n", test_name);
testfunc();
}
catch(const std::exception& e)
{
fprintf(stderr, "%s\n", e.what());
fprintf(stderr, "FAIL\n");
exit(EXIT_FAILURE);
}
fprintf(stderr, "PASS\n");
}
#define RUN_TEST(x) run_test(#x, &(x))
#define ASSERT_EQ(a, b) { if (!((a) == (b))) { throw std::runtime_error((boost::format("Expecting %1% == %2%, but %3% != %4%") % #a % #b % (a) % (b)).str()); } }
struct xyz {
int z, x, y;
xyz(int z_, int x_, int y_) : z(z_), x(x_), y(y_) {}
bool operator==(const xyz &other) const {
return ((z == other.z) &&
(x == other.x) &&
(y == other.y));
}
bool operator<(const xyz &other) const {
return ((z < other.z) ||
((z == other.z) &&
((x < other.x) ||
((x == other.x) &&
(y < other.y)))));
}
void to_bbox(double &x0, double &y0,
double &x1, double &y1) const {
const double datum = 0.5 * (1 << z);
const double scale = EARTH_CIRCUMFERENCE / (1 << z);
x0 = (x - datum) * scale;
y0 = (datum - (y + 1)) * scale;
x1 = ((x + 1) - datum) * scale;
y1 = (datum - y) * scale;
}
void to_centroid(double &x0, double &y0) const {
const double datum = 0.5 * (1 << z);
const double scale = EARTH_CIRCUMFERENCE / (1 << z);
x0 = ((x + 0.5) - datum) * scale;
y0 = (datum - (y + 0.5)) * scale;
}
};
std::ostream &operator<<(std::ostream &out, const xyz &tile) {
out << tile.z << "/" << tile.x << "/" << tile.y;
return out;
}
struct tile_output_set : public expire_tiles::tile_output {
tile_output_set() {}
virtual ~tile_output_set() {}
virtual void output_dirty_tile(int x, int y, int zoom, int min_zoom) {
int y_min, x_iter, y_iter, x_max, y_max, out_zoom, zoom_diff;
if (zoom > min_zoom) out_zoom = zoom;
else out_zoom = min_zoom;
zoom_diff = out_zoom - zoom;
y_min = y << zoom_diff;
x_max = (x + 1) << zoom_diff;
y_max = (y + 1) << zoom_diff;
for (x_iter = x << zoom_diff; x_iter < x_max; x_iter++) {
for (y_iter = y_min; y_iter < y_max; y_iter++) {
m_tiles.insert(xyz(out_zoom, x_iter, y_iter));
}
}
}
std::set<xyz> m_tiles;
};
void test_expire_simple_z1() {
options_t opt;
opt.expire_tiles_zoom = 1;
opt.expire_tiles_zoom_min = 1;
expire_tiles et(&opt);
tile_output_set set;
// as big a bbox as possible at the origin to dirty all four
// quadrants of the world.
et.from_bbox(-10000, -10000, 10000, 10000);
et.output_and_destroy(&set);
ASSERT_EQ(set.m_tiles.size(), 4);
std::set<xyz>::iterator itr = set.m_tiles.begin();
ASSERT_EQ(*itr, xyz(1, 0, 0)); ++itr;
ASSERT_EQ(*itr, xyz(1, 0, 1)); ++itr;
ASSERT_EQ(*itr, xyz(1, 1, 0)); ++itr;
ASSERT_EQ(*itr, xyz(1, 1, 1)); ++itr;
}
void test_expire_simple_z3() {
options_t opt;
opt.expire_tiles_zoom = 3;
opt.expire_tiles_zoom_min = 3;
expire_tiles et(&opt);
tile_output_set set;
// as big a bbox as possible at the origin to dirty all four
// quadrants of the world.
et.from_bbox(-10000, -10000, 10000, 10000);
et.output_and_destroy(&set);
ASSERT_EQ(set.m_tiles.size(), 4);
std::set<xyz>::iterator itr = set.m_tiles.begin();
ASSERT_EQ(*itr, xyz(3, 3, 3)); ++itr;
ASSERT_EQ(*itr, xyz(3, 3, 4)); ++itr;
ASSERT_EQ(*itr, xyz(3, 4, 3)); ++itr;
ASSERT_EQ(*itr, xyz(3, 4, 4)); ++itr;
}
void test_expire_simple_z18() {
options_t opt;
opt.expire_tiles_zoom = 18;
opt.expire_tiles_zoom_min = 18;
expire_tiles et(&opt);
tile_output_set set;
// dirty a smaller bbox this time, as at z18 the scale is
// pretty small.
et.from_bbox(-1, -1, 1, 1);
et.output_and_destroy(&set);
ASSERT_EQ(set.m_tiles.size(), 4);
std::set<xyz>::iterator itr = set.m_tiles.begin();
ASSERT_EQ(*itr, xyz(18, 131071, 131071)); ++itr;
ASSERT_EQ(*itr, xyz(18, 131071, 131072)); ++itr;
ASSERT_EQ(*itr, xyz(18, 131072, 131071)); ++itr;
ASSERT_EQ(*itr, xyz(18, 131072, 131072)); ++itr;
}
std::set<xyz> generate_random(int zoom, size_t count) {
size_t num = 0;
std::set<xyz> set;
const int coord_mask = (1 << zoom) - 1;
while (num < count) {
xyz item(zoom, rand() & coord_mask, rand() & coord_mask);
if (set.count(item) == 0) {
set.insert(item);
++num;
}
}
return set;
}
void assert_tilesets_equal(const std::set<xyz> &a,
const std::set<xyz> &b) {
ASSERT_EQ(a.size(), b.size());
std::set<xyz>::const_iterator a_itr = a.begin();
std::set<xyz>::const_iterator b_itr = b.begin();
while ((a_itr != a.end()) &&
(b_itr != b.end())) {
ASSERT_EQ(*a_itr, *b_itr);
++a_itr;
++b_itr;
}
}
void expire_centroids(const std::set<xyz> &check_set,
expire_tiles &et) {
for (std::set<xyz>::const_iterator itr = check_set.begin();
itr != check_set.end(); ++itr) {
double x0 = 0.0, y0 = 0.0;
itr->to_centroid(x0, y0);
et.from_bbox(x0, y0, x0, y0);
}
}
// tests that expiring a set of tile centroids means that
// those tiles get expired.
void test_expire_set() {
options_t opt;
int zoom = 18;
opt.expire_tiles_zoom = zoom;
opt.expire_tiles_zoom_min = zoom;
for (int i = 0; i < 100; ++i) {
expire_tiles et(&opt);
tile_output_set set;
std::set<xyz> check_set = generate_random(zoom, 100);
expire_centroids(check_set, et);
et.output_and_destroy(&set);
assert_tilesets_equal(set.m_tiles, check_set);
}
}
// this tests that, after expiring a random set of tiles
// in one expire_tiles object and a different set in
// another, when they are merged together they are the
// same as if the union of the sets of tiles had been
// expired.
void test_expire_merge() {
options_t opt;
int zoom = 18;
opt.expire_tiles_zoom = zoom;
opt.expire_tiles_zoom_min = zoom;
for (int i = 0; i < 100; ++i) {
expire_tiles et(&opt), et1(&opt), et2(&opt);
tile_output_set set;
std::set<xyz> check_set1 = generate_random(zoom, 100);
expire_centroids(check_set1, et1);
std::set<xyz> check_set2 = generate_random(zoom, 100);
expire_centroids(check_set2, et2);
et.merge_and_destroy(et1);
et.merge_and_destroy(et2);
std::set<xyz> check_set;
std::set_union(check_set1.begin(), check_set1.end(),
check_set2.begin(), check_set2.end(),
std::inserter(check_set, check_set.end()));
et.output_and_destroy(&set);
assert_tilesets_equal(set.m_tiles, check_set);
}
}
// tests that merging two identical sets results in
// the same set. this guarantees that we check some
// pathways of the merging which possibly could be
// skipped by the random tile set in the previous<|fim▁hole|> int zoom = 18;
opt.expire_tiles_zoom = zoom;
opt.expire_tiles_zoom_min = zoom;
for (int i = 0; i < 100; ++i) {
expire_tiles et(&opt), et1(&opt), et2(&opt);
tile_output_set set;
std::set<xyz> check_set = generate_random(zoom, 100);
expire_centroids(check_set, et1);
expire_centroids(check_set, et2);
et.merge_and_destroy(et1);
et.merge_and_destroy(et2);
et.output_and_destroy(&set);
assert_tilesets_equal(set.m_tiles, check_set);
}
}
// makes sure that we're testing the case where some
// tiles are in both.
void test_expire_merge_overlap() {
options_t opt;
int zoom = 18;
opt.expire_tiles_zoom = zoom;
opt.expire_tiles_zoom_min = zoom;
for (int i = 0; i < 100; ++i) {
expire_tiles et(&opt), et1(&opt), et2(&opt);
tile_output_set set;
std::set<xyz> check_set1 = generate_random(zoom, 100);
expire_centroids(check_set1, et1);
std::set<xyz> check_set2 = generate_random(zoom, 100);
expire_centroids(check_set2, et2);
std::set<xyz> check_set3 = generate_random(zoom, 100);
expire_centroids(check_set3, et1);
expire_centroids(check_set3, et2);
et.merge_and_destroy(et1);
et.merge_and_destroy(et2);
std::set<xyz> check_set;
std::set_union(check_set1.begin(), check_set1.end(),
check_set2.begin(), check_set2.end(),
std::inserter(check_set, check_set.end()));
std::set_union(check_set1.begin(), check_set1.end(),
check_set3.begin(), check_set3.end(),
std::inserter(check_set, check_set.end()));
et.output_and_destroy(&set);
assert_tilesets_equal(set.m_tiles, check_set);
}
}
// checks that the set union still works when we expire
// large contiguous areas of tiles (i.e: ensure that we
// handle the "complete" flag correctly).
void test_expire_merge_complete() {
options_t opt;
int zoom = 18;
opt.expire_tiles_zoom = zoom;
opt.expire_tiles_zoom_min = zoom;
for (int i = 0; i < 100; ++i) {
expire_tiles et(&opt), et1(&opt), et2(&opt), et0(&opt);
tile_output_set set, set0;
// et1&2 are two halves of et0's box
et0.from_bbox(-10000, -10000, 10000, 10000);
et1.from_bbox(-10000, -10000, 0, 10000);
et2.from_bbox( 0, -10000, 10000, 10000);
et.merge_and_destroy(et1);
et.merge_and_destroy(et2);
et.output_and_destroy(&set);
et0.output_and_destroy(&set0);
assert_tilesets_equal(set.m_tiles, set0.m_tiles);
}
}
} // anonymous namespace
int main(int argc, char *argv[])
{
srand(0);
//try each test if any fail we will exit
RUN_TEST(test_expire_simple_z1);
RUN_TEST(test_expire_simple_z3);
RUN_TEST(test_expire_simple_z18);
RUN_TEST(test_expire_set);
RUN_TEST(test_expire_merge);
RUN_TEST(test_expire_merge_same);
RUN_TEST(test_expire_merge_overlap);
RUN_TEST(test_expire_merge_complete);
//passed
return 0;
}<|fim▁end|> | // test.
void test_expire_merge_same() {
options_t opt; |
<|file_name|>random_mac.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python<|fim▁hole|>Inspired by http://www.linux-kvm.com/sites/default/files/macgen.py
"""
from sys import argv
import random
DEFAULT_OUI = '00-16-3e' # Xensource, Inc.
def random_mac(oui=None):
"""returns a random MAC address, with optional +oui+ override"""
mac_parts = [oui or DEFAULT_OUI]
for limit in [0x7f, 0xff, 0xff]:
mac_parts.append("%02x" % random.randint(0x00, limit))
return '-'.join(mac_parts)
if __name__ == '__main__':
print random_mac(argv[1] if len(argv) > 1 else None)<|fim▁end|> | """
Generate random MAC address |
<|file_name|>quit.go<|end_file_name|><|fim▁begin|>package module
import (
"fmt"
"github.com/davidscholberg/irkbot/lib/configure"
"github.com/davidscholberg/irkbot/lib/message"
)
func helpQuit() []string {
s := "quit - ragequit IRC (requires owner privilege)"
return []string{s}
}
<|fim▁hole|> }
actions.quit()
}<|fim▁end|> | func quit(cfg *configure.Config, in *message.InboundMsg, actions *actions) {
if in.Event.Nick != cfg.Admin.Owner {
actions.say(fmt.Sprintf("%s: %s", in.Event.Nick, cfg.Admin.DenyMessage))
return |
<|file_name|>slice-mut-2.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or<|fim▁hole|>// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test mutability and slicing syntax.
fn main() {
let x: &[isize] = &[1, 2, 3, 4, 5];
// Can't mutably slice an immutable slice
let slice: &mut [isize] = &mut [0, 1];
let _ = &mut x[2..4]; //~ERROR cannot borrow immutable borrowed content `*x` as mutable
}<|fim▁end|> | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
<|file_name|>bunyanConfig.test.js<|end_file_name|><|fim▁begin|>var bunyanConfig = require('../index');
var bunyan = require('bunyan');
var path = require('path');
describe('bunyan-config', function () {
it('should not convert things it does not understand', function () {
bunyanConfig({
name: 'test',
streams: [{
path: '/tmp/log.log'
}, {
type: 'raw',
stream: 'unknown'
}],
serializers: 5
}).should.deep.equal({
name: 'test',
streams: [{
path: '/tmp/log.log'
}, {
type: 'raw',
stream: 'unknown'
}],
serializers: 5
});
});
describe('streams', function () {
it('should convert stdout and stderr', function () {
bunyanConfig({
streams: [{
level: 'info',
stream: 'stdout'
}, {
stream: {name: 'stderr'}
}]
}).should.deep.equal({
streams: [{
level: 'info',
stream: process.stdout
}, {
stream: process.stderr
}]
});
});
it('should convert bunyan-logstash', function () {<|fim▁hole|> bunyanConfig({
streams: [{
level: 'error',
type: 'raw',
stream: {
name: 'bunyan-logstash',
params: {
host: 'example.com',
port: 1234
}
}
}]
}).should.deep.equal({
streams: [{
level: 'error',
type: 'raw',
stream: require('bunyan-logstash').createStream({
host: 'example.com',
port: 1234
})
}]
});
});
it('should convert bunyan-redis stream', function () {
var config = bunyanConfig({
streams: [{
type: 'raw',
stream: {
name: 'bunyan-redis',
params: {
host: 'example.com',
port: 1234
}
}
}]
});
config.streams[0].stream.should.be.an.instanceof(require('events').EventEmitter);
config.streams[0].stream._client.host.should.equal('example.com');
config.streams[0].stream._client.port.should.equal(1234);
config.streams[0].stream._client.end();
});
});
describe('serializers', function () {
it('should convert serializers property, if it is a string', function () {
bunyanConfig({
serializers: 'bunyan:stdSerializers'
}).should.deep.equal({
serializers: bunyan.stdSerializers
});
});
it('should not convert serializers, if it is an empty string', function () {
bunyanConfig({
serializers: ''
}).should.deep.equal({
serializers: ''
});
});
it('should convert serializers object', function () {
var absolutePathWithProps = path.resolve(__dirname, './fixtures/dummySerializerWithProps');
var relativePathWithProps = './' + path.relative(process.cwd(), absolutePathWithProps);
var absolutePathWithoutProps = path.resolve(__dirname, './fixtures/dummySerializerWithoutProps');
var relativePathWithoutProps = './' + path.relative(process.cwd(), absolutePathWithoutProps);
bunyanConfig({
serializers: {
moduleWithProps: 'bunyan:stdSerializers.req',
moduleWithoutProps: 'bunyan',
absoluteWithProps: relativePathWithProps + ':c',
relativeWithProps: relativePathWithProps + ':a.b',
absoluteWithoutProps: absolutePathWithoutProps,
relativeWithoutProps: relativePathWithoutProps,
empty: '',
noModuleId: ':abc'
}
}).should.deep.equal({
serializers: {
moduleWithProps: bunyan.stdSerializers.req,
moduleWithoutProps: bunyan,
absoluteWithProps: require('./fixtures/dummySerializerWithProps').c,
relativeWithProps: require('./fixtures/dummySerializerWithProps').a.b,
absoluteWithoutProps: require('./fixtures/dummySerializerWithoutProps'),
relativeWithoutProps: require('./fixtures/dummySerializerWithoutProps'),
empty: '',
noModuleId: ':abc'
}
});
});
});
});<|fim▁end|> | |
<|file_name|>GardenDropGame.py<|end_file_name|><|fim▁begin|>from panda3d.core import *
from panda3d.direct import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
from direct.gui.DirectScrolledList import *
from direct.distributed.ClockDelta import *
from toontown.toontowngui import TTDialog
import math
from direct.task.Task import Task
from toontown.toonbase import ToontownGlobals
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM
from direct.fsm import State
from toontown.toon import Toon
from direct.showbase import RandomNumGen
from toontown.toonbase import TTLocalizer
import random
import random
import cPickle
from direct.showbase import PythonUtil
import GameSprite
from math import pi
import GardenProgressMeter
class GardenDropGame(DirectObject.DirectObject):
def __init__(self):
self.acceptErrorDialog = None
self.doneEvent = 'game Done'
self.sprites = []
self.load()
thing = self.model.find('**/item_board')
self.block = self.model1.find('**/minnieCircle')
self.colorRed = (1, 0, 0, 1)
self.colorBlue = (0, 0, 1, 1)
self.colorGreen = (0, 1, 0, 1)
self.colorGhostRed = (1, 0, 0, 0.5)
self.colorGhostBlue = (0, 0, 1, 0.5)
self.colorGhostGreen = (0, 1, 0, 0.5)
self.colorWhite = (1, 1, 1, 1)
self.colorBlack = (0, 0, 0, 1.0)
self.colorShadow = (0, 0, 0, 0.5)
self.lastTime = None
self.running = 0
self.massCount = 0
self.foundCount = 0
self.maxX = 0.47
self.minX = -0.47
self.maxZ = 0.65
self.minZ = -0.1
self.newBallX = 0.0
self.newBallZ = 0.6
self.rangeX = self.maxX - self.minX
self.rangeZ = self.maxZ - self.minZ
size = 0.085
sizeZ = size * 0.8
gX = int(self.rangeX / size)
gZ = int(self.rangeZ / sizeZ)
self.maxX = self.minX + gX * size
self.maxZ = self.minZ + gZ * sizeZ
self.controlOffsetX = 0.0
self.controlOffsetZ = 0.0
self.queExtent = 3
print 'Grid Dimensions X%s Z%s' % (gX, gZ)
self.grid = []
self.gridDimX = gX
self.gridDimZ = gZ
self.gridBrick = False
base.gardenGame = self
for countX in range(self.gridDimX):
newRow = []
for countZ in range(self.gridDimZ):
offset = 0
if countZ % 2 == 0:
offset = size / 2
newRow.append([None, countX * size + self.minX + offset, countZ * sizeZ + self.minZ])
self.grid.append(newRow)
self.controlSprite = None
self.cogSprite = self.addUnSprite(self.block, posX=0.25, posZ=0.5)
self.cogSprite.setColor(self.colorShadow)
for ball in range(0, 3):
place = random.random() * self.rangeX
newSprite = self.addSprite(self.block, size=0.5, posX=self.minX + place, posZ=0.0, found=1)
self.stickInGrid(newSprite, 1)
self.queBall = self.addSprite(self.block, posX=0.25, posZ=0.5, found=0)
self.queBall.setColor(self.colorWhite)
self.queBall.isQue = 1
self.matchList = []
self.newBallTime = 1.0
self.newBallCountUp = 0.0
self.cogX = 0
self.cogZ = 0
self.__run()
return
def findGrid(self, x, z, force = 0):
currentClosest = None
currentDist = 10000000
for countX in range(self.gridDimX):
for countZ in range(self.gridDimZ):
testDist = self.testPointDistanceSquare(x, z, self.grid[countX][countZ][1], self.grid[countX][countZ][2])
if self.grid[countX][countZ][0] == None and testDist < currentDist and (force or self.hasNeighbor(countX, countZ)):
currentClosest = self.grid[countX][countZ]<|fim▁hole|> return currentClosest
def hasNeighbor(self, cellX, cellZ):
gotNeighbor = 0
if cellZ % 2 == 0:
if self.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ - 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ - 1)):
gotNeighbor = 1
return gotNeighbor
def clearMatchList(self):
for entry in self.matchList:
gridEntry = self.grid[entry[0]][entry[1]]
sprite = gridEntry[0]
gridEntry[0] = None
sprite.markedForDeath = 1
return
def createMatchList(self, x, z):
self.matchList = []
self.fillMatchList(x, z)
def fillMatchList(self, cellX, cellZ):
if (cellX, cellZ) in self.matchList:
return
self.matchList.append((cellX, cellZ))
colorType = self.grid[cellX][cellZ][0].colorType
if cellZ % 2 == 0:
if self.getColorType(cellX - 1, cellZ) == colorType:
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType:
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType:
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX + 1, cellZ + 1) == colorType:
self.fillMatchList(cellX + 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType:
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX + 1, cellZ - 1) == colorType:
self.fillMatchList(cellX + 1, cellZ - 1)
else:
if self.getColorType(cellX - 1, cellZ) == colorType:
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType:
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType:
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX - 1, cellZ + 1) == colorType:
self.fillMatchList(cellX - 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType:
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX - 1, cellZ - 1) == colorType:
self.fillMatchList(cellX - 1, cellZ - 1)
def testGridfull(self, cell):
if not cell:
return 0
elif cell[0] != None:
return 1
else:
return 0
return
def getValidGrid(self, x, z):
if x < 0 or x >= self.gridDimX:
return None
elif z < 0 or z >= self.gridDimZ:
return None
else:
return self.grid[x][z]
return None
def getColorType(self, x, z):
if x < 0 or x >= self.gridDimX:
return -1
elif z < 0 or z >= self.gridDimZ:
return -1
elif self.grid[x][z][0] == None:
return -1
else:
return self.grid[x][z][0].colorType
return
def findGridCog(self):
self.cogX = 0
self.cogZ = 0
self.massCount = 0
for row in self.grid:
for cell in row:
if cell[0] != None:
self.cogX += cell[1]
self.cogZ += cell[2]
self.massCount += 1
if self.massCount > 0:
self.cogX = self.cogX / self.massCount
self.cogZ = self.cogZ / self.massCount
self.cogSprite.setX(self.cogX)
self.cogSprite.setZ(self.cogZ)
else:
self.doOnClearGrid()
return
def doOnClearGrid(self):
secondSprite = self.addSprite(self.block, posX=self.newBallX, posZ=0.0, found=1)
secondSprite.addForce(0, 1.55 * pi)
self.stickInGrid(secondSprite, 1)
def findGrid2(self, x, z):
rangeX = self.maxX - self.minX
rangeZ = self.maxZ - self.minZ
framedX = x - self.minX
framedZ = z - self.minZ
tileDimX = rangeX / self.gridDimX
tileDimZ = rangeZ / self.gridDimZ
tileX = int(framedX / tileDimX)
tileZ = int(framedZ / tileDimZ)
print 'find Grid tileX%s tileZ%s' % (tileX, tileZ)
return (tileX, tileZ)
def findPos(self, x, z):
rangeX = self.maxX - self.minX
rangeZ = self.maxZ - self.minZ
tileDimX = rangeX / self.gridDimX
tileDimZ = rangeZ / self.gridDimZ
posX = tileDimX * x + self.minX
posZ = tileDimZ * z + self.minZ
print 'find Pos X%s Z%s' % (posX, posZ)
return (posX, posZ)
def placeIntoGrid(self, sprite, x, z):
if self.grid[x][z][0] == None:
self.grid[x][z][0] = sprite
sprite.setActive(0)
newX, newZ = self.findPos(x, z)
sprite.setX(newX)
sprite.setZ(newZ)
print 'Setting Final Pos X%s Z%s' % (newX, newZ)
else:
self.placeIntoGrid(sprite, x + 1, z - 1)
return
def stickInGrid(self, sprite, force = 0):
if sprite.isActive and not sprite.isQue:
gridCell = self.findGrid(sprite.getX(), sprite.getZ(), force)
if gridCell:
gridCell[0] = sprite
sprite.setActive(0)
sprite.setX(gridCell[1])
sprite.setZ(gridCell[2])
self.createMatchList(self.closestX, self.closestZ)
if len(self.matchList) >= 3:
self.clearMatchList()
self.findGridCog()
def stickInGrid2(self, sprite):
if sprite.isActive and not sprite.isQue:
tileX, tileZ = self.findGrid(sprite.getX(), sprite.getZ())
self.placeIntoGrid(sprite, tileX, tileZ)
sprite.isActive = 0
def load(self):
model = loader.loadModel('phase_5.5/models/gui/package_delivery_panel')
model1 = loader.loadModel('phase_3.5/models/gui/matching_game_gui')
self.model = model
self.model1 = model1
background = model.find('**/bg')
itemBoard = model.find('**/item_board')
self.frame = DirectFrame(scale=1.1, relief=DGG.FLAT, frameSize=(-0.5,
0.5,
-0.45,
-0.05), frameColor=(0.737, 0.573, 0.345, 1.0))
self.background = DirectFrame(self.frame, image=background, image_scale=0.05, relief=None, pos=(0, 1, 0))
self.itemBoard = DirectFrame(parent=self.frame, image=itemBoard, image_scale=0.05, image_color=(0.922, 0.922, 0.753, 1), relief=None, pos=(0, 1, 0))
gui2 = loader.loadModel('phase_3/models/gui/quit_button')
self.quitButton = DirectButton(parent=self.frame, relief=None, image=(gui2.find('**/QuitBtn_UP'), gui2.find('**/QuitBtn_DN'), gui2.find('**/QuitBtn_RLVR')), pos=(0.5, 1.0, -0.42), scale=0.9, text='Exit Mini Game', text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text1_fg=(1, 1, 1, 1), text2_fg=(1, 1, 1, 1), text_scale=0.045, text_pos=(0, -0.01), command=self.__handleExit)
return
def unload(self):
self.frame.destroy()
del self.frame
if self.acceptErrorDialog:
self.acceptErrorDialog.cleanup()
self.acceptErrorDialog = None
taskMgr.remove('gameTask')
self.ignoreAll()
return
def show(self):
self.frame.show()
def hide(self):
self.frame.hide()
def __handleExit(self):
self.__acceptExit()
def __acceptExit(self, buttonValue = None):
if hasattr(self, 'frame'):
self.hide()
self.unload()
messenger.send(self.doneEvent)
def addSprite(self, image, size = 0.5, posX = 0, posZ = 0, found = 0):
nodeObj = DirectLabel(parent=self.frame, relief=None, image=image, pos=(posX, 0.0, posZ), scale=size, image_color=(1.0, 1.0, 1.0, 1))
colorChoice = random.choice(range(0, 3))
newSprite = GameSprite.GameSprite(nodeObj, colorChoice, found)
self.sprites.append(newSprite)
if found:
self.foundCount += 1
return newSprite
def addUnSprite(self, image, size = 0.5, posX = 0, posZ = 0):
nodeObj = DirectLabel(parent=self.frame, relief=None, image=image, pos=(posX, 0.0, posZ), scale=size, image_color=(1.0, 1.0, 1.0, 1))
newSprite = GameSprite.GameSprite(nodeObj)
newSprite = GameSprite.GameSprite(nodeObj)
return newSprite
def __run(self, cont = 1):
if self.lastTime == None:
self.lastTime = globalClock.getRealTime()
timeDelta = globalClock.getRealTime() - self.lastTime
self.lastTime = globalClock.getRealTime()
self.newBallCountUp += timeDelta
if base.mouseWatcherNode.hasMouse():
x = base.mouseWatcherNode.getMouseX()
y = base.mouseWatcherNode.getMouseY()
self.queBall.setX(x)
self.queBall.setZ(y)
for sprite in self.sprites:
sprite.run(timeDelta)
if sprite.getX() > self.maxX:
sprite.setX(self.maxX)
sprite.velX = -sprite.velX
if sprite.getX() < self.minX:
sprite.setX(self.minX)
sprite.velX = -sprite.velX
if sprite.getZ() > self.maxZ:
sprite.setZ(self.maxZ)
sprite.velZ = -sprite.velZ
if sprite.getZ() < self.minZ:
self.stickInGrid(sprite, 1)
if sprite.isActive:
sprite.addForce(timeDelta * 0.9, pi * 1.5)
self.queBall.velX = (self.queBall.getX() - self.queBall.prevX) / timeDelta
self.queBall.velZ = (self.queBall.getZ() - self.queBall.prevZ) / timeDelta
self.__colTest()
for sprite in self.sprites:
if sprite.markedForDeath:
if sprite.foundation:
self.foundCount -= 1
self.sprites.remove(sprite)
sprite.delete()
if self.controlSprite == None:
self.addControlSprite(self.newBallX, self.newBallZ)
self.newBallCountUp = 0.0
if self.newBallCountUp >= self.newBallTime:
self.addControlSprite(self.newBallX, self.newBallZ)
self.newBallCountUp = 0.0
if not self.controlSprite.isActive:
self.controlSprite = None
if self.foundCount <= 0:
self.__handleWin()
if cont and not self.running:
taskMgr.add(self.__run, 'gameTask')
self.running = 1
return Task.cont
def __handleWin(self):
GardenProgressMeter.GardenProgressMeter()
self.__handleExit()
def addControlSprite(self, x = 0.0, z = 0.0):
newSprite = self.addSprite(self.block, posX=x, posZ=z)
self.controlSprite = newSprite
def __colTest(self):
if not hasattr(self, 'tick'):
self.tick = 0
self.tick += 1
if self.tick > 5:
self.tick = 0
sizeSprites = len(self.sprites)
for movingSpriteIndex in range(len(self.sprites)):
for testSpriteIndex in range(movingSpriteIndex, len(self.sprites)):
movingSprite = self.getSprite(movingSpriteIndex)
testSprite = self.getSprite(testSpriteIndex)
if testSprite and movingSprite:
if movingSpriteIndex != testSpriteIndex and (movingSprite.isActive or testSprite.isActive):
if movingSprite.isQue or testSprite.isQue:
if self.testDistance(movingSprite.nodeObj, testSprite.nodeObj) < self.queExtent * (movingSprite.size + testSprite.size):
self.push(movingSprite, testSprite)
elif self.testDistance(movingSprite.nodeObj, testSprite.nodeObj) < movingSprite.size + testSprite.size:
if not (movingSprite.isActive and testSprite.isActive):
self.__collide(movingSprite, testSprite)
if self.tick == 5:
pass
def getSprite(self, spriteIndex):
if spriteIndex >= len(self.sprites) or self.sprites[spriteIndex].markedForDeath:
return None
else:
return self.sprites[spriteIndex]
return None
def testDistance(self, nodeA, nodeB):
distX = nodeA.getX() - nodeB.getX()
distZ = nodeA.getZ() - nodeB.getZ()
distC = distX * distX + distZ * distZ
dist = math.sqrt(distC)
return dist
def testPointDistance(self, x1, z1, x2, z2):
distX = x1 - x2
distZ = z1 - z2
distC = distX * distX + distZ * distZ
dist = math.sqrt(distC)
if dist == 0:
dist = 1e-10
return dist
def testPointDistanceSquare(self, x1, z1, x2, z2):
distX = x1 - x2
distZ = z1 - z2
distC = distX * distX + distZ * distZ
if distC == 0:
distC = 1e-10
return distC
def angleTwoSprites(self, sprite1, sprite2):
x1 = sprite1.getX()
z1 = sprite1.getZ()
x2 = sprite2.getX()
z2 = sprite2.getZ()
x = x2 - x1
z = z2 - z1
angle = math.atan2(-x, z)
return angle + pi * 0.5
def angleTwoPoints(self, x1, z1, x2, z2):
x = x2 - x1
z = z2 - z1
angle = math.atan2(-x, z)
return angle + pi * 0.5
def __collide(self, move, test):
queHit = 0
if move.isQue:
que = move
hit = test
queHit = 1
elif test.isQue:
que = test
hit = move
queHit = 1
else:
test.velX = 0
test.velZ = 0
move.velX = 0
move.velZ = 0
test.collide()
move.collide()
self.stickInGrid(move)
self.stickInGrid(test)
if queHit:
forceM = 0.1
distX = que.getX() - hit.getX()
distZ = que.getZ() - hit.getZ()
def push(self, move, test):
queHit = 0
if move.isQue:
que = move
hit = test
queHit = 1
elif test.isQue:
que = test
hit = move
queHit = 1
if queHit:
forceM = 0.1
dist = self.testDistance(move.nodeObj, test.nodeObj)
if abs(dist) < self.queExtent * que.size and abs(dist) > 0:
scaleSize = self.queExtent * que.size * 0.5
distFromPara = abs(abs(dist) - scaleSize)
force = (scaleSize - distFromPara) / scaleSize * (dist / abs(dist))
angle = self.angleTwoSprites(que, hit)
if angle < 0:
angle = angle + 2 * pi
if angle > pi * 2.0:
angle = angle - 2 * pi
newAngle = pi * 1.0
if angle > pi * 1.5 or angle < pi * 0.5:
newAngle = pi * 0.0
hit.addForce(forceM * force, newAngle)<|fim▁end|> | self.closestX = countX
self.closestZ = countZ
currentDist = testDist
|
<|file_name|>gesture_drag.rs<|end_file_name|><|fim▁begin|>// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use EventController;
use Gesture;
use GestureSingle;
#[cfg(any(feature = "v3_14", feature = "dox"))]
use Widget;
use ffi;
use glib;
use glib::object::Downcast;
use glib::object::IsA;
#[cfg(any(feature = "v3_14", feature = "dox"))]
use glib::signal::SignalHandlerId;
#[cfg(any(feature = "v3_14", feature = "dox"))]
use glib::signal::connect;
use glib::translate::*;
use glib_ffi;
use gobject_ffi;
#[cfg(any(feature = "v3_14", feature = "dox"))]
use libc;
#[cfg(any(feature = "v3_14", feature = "dox"))]
use std::boxed::Box as Box_;
use std::mem;
#[cfg(any(feature = "v3_14", feature = "dox"))]
use std::mem::transmute;
use std::ptr;
glib_wrapper! {
pub struct GestureDrag(Object<ffi::GtkGestureDrag, ffi::GtkGestureDragClass>): GestureSingle, Gesture, EventController;
match fn {
get_type => || ffi::gtk_gesture_drag_get_type(),
}
}
impl GestureDrag {
#[cfg(any(feature = "v3_14", feature = "dox"))]
pub fn new<P: IsA<Widget>>(widget: &P) -> GestureDrag {
skip_assert_initialized!();
unsafe {
Gesture::from_glib_full(ffi::gtk_gesture_drag_new(widget.to_glib_none().0)).downcast_unchecked()
}
}
}
pub trait GestureDragExt {
#[cfg(any(feature = "v3_14", feature = "dox"))]
fn get_offset(&self) -> Option<(f64, f64)>;
#[cfg(any(feature = "v3_14", feature = "dox"))]
fn get_start_point(&self) -> Option<(f64, f64)>;
#[cfg(any(feature = "v3_14", feature = "dox"))]
fn connect_drag_begin<F: Fn(&Self, f64, f64) + 'static>(&self, f: F) -> SignalHandlerId;
#[cfg(any(feature = "v3_14", feature = "dox"))]
fn connect_drag_end<F: Fn(&Self, f64, f64) + 'static>(&self, f: F) -> SignalHandlerId;
#[cfg(any(feature = "v3_14", feature = "dox"))]
fn connect_drag_update<F: Fn(&Self, f64, f64) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<GestureDrag> + IsA<glib::object::Object>> GestureDragExt for O {
#[cfg(any(feature = "v3_14", feature = "dox"))]
fn get_offset(&self) -> Option<(f64, f64)> {
unsafe {
let mut x = mem::uninitialized();
let mut y = mem::uninitialized();
let ret = from_glib(ffi::gtk_gesture_drag_get_offset(self.to_glib_none().0, &mut x, &mut y));
if ret { Some((x, y)) } else { None }
}
}
#[cfg(any(feature = "v3_14", feature = "dox"))]
fn get_start_point(&self) -> Option<(f64, f64)> {
unsafe {
let mut x = mem::uninitialized();
let mut y = mem::uninitialized();
let ret = from_glib(ffi::gtk_gesture_drag_get_start_point(self.to_glib_none().0, &mut x, &mut y));
if ret { Some((x, y)) } else { None }
}
}
#[cfg(any(feature = "v3_14", feature = "dox"))]
fn connect_drag_begin<F: Fn(&Self, f64, f64) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<Box_<Fn(&Self, f64, f64) + 'static>> = Box_::new(Box_::new(f));<|fim▁hole|> connect(self.to_glib_none().0, "drag-begin",
transmute(drag_begin_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _)
}
}
#[cfg(any(feature = "v3_14", feature = "dox"))]
fn connect_drag_end<F: Fn(&Self, f64, f64) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<Box_<Fn(&Self, f64, f64) + 'static>> = Box_::new(Box_::new(f));
connect(self.to_glib_none().0, "drag-end",
transmute(drag_end_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _)
}
}
#[cfg(any(feature = "v3_14", feature = "dox"))]
fn connect_drag_update<F: Fn(&Self, f64, f64) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<Box_<Fn(&Self, f64, f64) + 'static>> = Box_::new(Box_::new(f));
connect(self.to_glib_none().0, "drag-update",
transmute(drag_update_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _)
}
}
}
#[cfg(any(feature = "v3_14", feature = "dox"))]
unsafe extern "C" fn drag_begin_trampoline<P>(this: *mut ffi::GtkGestureDrag, start_x: libc::c_double, start_y: libc::c_double, f: glib_ffi::gpointer)
where P: IsA<GestureDrag> {
let f: &&(Fn(&P, f64, f64) + 'static) = transmute(f);
f(&GestureDrag::from_glib_borrow(this).downcast_unchecked(), start_x, start_y)
}
#[cfg(any(feature = "v3_14", feature = "dox"))]
unsafe extern "C" fn drag_end_trampoline<P>(this: *mut ffi::GtkGestureDrag, offset_x: libc::c_double, offset_y: libc::c_double, f: glib_ffi::gpointer)
where P: IsA<GestureDrag> {
let f: &&(Fn(&P, f64, f64) + 'static) = transmute(f);
f(&GestureDrag::from_glib_borrow(this).downcast_unchecked(), offset_x, offset_y)
}
#[cfg(any(feature = "v3_14", feature = "dox"))]
unsafe extern "C" fn drag_update_trampoline<P>(this: *mut ffi::GtkGestureDrag, offset_x: libc::c_double, offset_y: libc::c_double, f: glib_ffi::gpointer)
where P: IsA<GestureDrag> {
let f: &&(Fn(&P, f64, f64) + 'static) = transmute(f);
f(&GestureDrag::from_glib_borrow(this).downcast_unchecked(), offset_x, offset_y)
}<|fim▁end|> | |
<|file_name|>cask.rs<|end_file_name|><|fim▁begin|>use std::collections::{BTreeSet, HashMap};
use std::collections::hash_map::{Entry as HashMapEntry, Keys};
use std::default::Default;
use std::path::PathBuf;
use std::result::Result::Ok;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
use std::time::Duration;
use std::vec::Vec;
use time;
use data::{Entry, Hint, SequenceNumber};
use errors::Result;
use log::{Log, LogWrite};
use stats::Stats;
use util::human_readable_byte_count;
#[derive(Debug)]
pub struct IndexEntry {
pub file_id: u32,
entry_pos: u64,
pub entry_size: u64,
sequence: SequenceNumber,
}
struct Index {
map: HashMap<Vec<u8>, IndexEntry>,
stats: Stats,
}
impl Index {
fn new() -> Index {
Index {
map: HashMap::new(),
stats: Stats::new(),
}
}
fn get(&self, key: &[u8]) -> Option<&IndexEntry> {
self.map.get(key)
}
fn insert(&mut self, key: Vec<u8>, index_entry: IndexEntry) -> Option<IndexEntry> {
self.stats.add_entry(&index_entry);
self.map.insert(key, index_entry).map(|entry| {
self.stats.remove_entry(&entry);
entry
})
}
fn remove(&mut self, key: &[u8]) -> Option<IndexEntry> {
self.map.remove(key).map(|entry| {
self.stats.remove_entry(&entry);
entry
})
}
fn update(&mut self, hint: Hint, file_id: u32) {
let index_entry = IndexEntry {
file_id: file_id,
entry_pos: hint.entry_pos,
entry_size: hint.entry_size(),
sequence: hint.sequence,
};
match self.map.entry(hint.key.to_vec()) {
HashMapEntry::Occupied(mut o) => {
if o.get().sequence <= hint.sequence {
self.stats.remove_entry(o.get());
if hint.deleted {
o.remove();
} else {
self.stats.add_entry(&index_entry);
o.insert(index_entry);
}
} else {
self.stats.add_entry(&index_entry);
self.stats.remove_entry(&index_entry);
}
}
HashMapEntry::Vacant(e) => {
if !hint.deleted {
self.stats.add_entry(&index_entry);
e.insert(index_entry);
}
}
}
}
pub fn keys(&self) -> Keys<Vec<u8>, IndexEntry> {
self.map.keys()
}
}
struct CaskInner {
current_sequence: SequenceNumber,
index: Index,
log: Log,
}
impl CaskInner {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
let value = match self.index.get(key) {
Some(index_entry) => {
let entry = self.log.read_entry(
index_entry.file_id,
index_entry.entry_pos,
)?;
if entry.deleted {
warn!(
"Index pointed to dead entry: Entry {{ key: {:?}, sequence: {} }} at \
file: {}",
entry.key,
entry.sequence,
index_entry.file_id
);
None
} else {
Some(entry.value.into_owned())
}
}
_ => None,
};
Ok(value)
}
fn put(&mut self, key: Vec<u8>, value: &[u8]) -> Result<()> {
let index_entry = {
let entry = Entry::new(self.current_sequence, &*key, value)?;
let (file_id, file_pos) = self.log.append_entry(&entry)?;
self.current_sequence += 1;
IndexEntry {
file_id: file_id,
entry_pos: file_pos,
entry_size: entry.size(),
sequence: entry.sequence,
}
};
self.index.insert(key, index_entry);
Ok(())
}
fn delete(&mut self, key: &[u8]) -> Result<()> {
if self.index.remove(key).is_some() {
let entry = Entry::deleted(self.current_sequence, key);
self.log.append_entry(&entry)?;
self.current_sequence += 1;
}
Ok(())
}
pub fn keys(&self) -> Keys<Vec<u8>, IndexEntry> {
self.index.keys()
}
}
/// An handle to a `Cask` database.
///
/// This handle can be "cheaply" cloned and safely shared between threads. `Cask`s cannot be used
/// concurrently by separate processes and this is ensured by using a file lock in the `Cask` dir.
#[derive(Clone)]
pub struct Cask {
path: PathBuf,
options: CaskOptions,
dropped: Arc<AtomicBool>,
inner: Arc<RwLock<CaskInner>>,
compaction: Arc<Mutex<()>>,
}
/// `Cask` configuration. Provides control over the properties and behavior of the `Cask` instance.
///
/// # Examples
///
/// ```rust,no_run
/// use cask::{CaskOptions, SyncStrategy};
///
/// let cask = CaskOptions::default()
/// .compaction_check_frequency(1200)
/// .sync(SyncStrategy::Never)
/// .max_file_size(1024 * 1024 * 1024)
/// .open("cask.db")
/// .unwrap();
/// ```
#[derive(Clone)]
pub struct CaskOptions {
create: bool,
sync: SyncStrategy,
max_file_size: usize,
file_pool_size: usize,
compaction: bool,
compaction_check_frequency: u64,
compaction_window: (usize, usize),
fragmentation_trigger: f64,
dead_bytes_trigger: u64,
fragmentation_threshold: f64,
dead_bytes_threshold: u64,
small_file_threshold: u64,
}
/// Strategy used to synchronize writes to disk.
#[derive(Clone, PartialEq)]
pub enum SyncStrategy {
/// Never explicitly synchronize writes (the OS manages it).
Never,
/// Always synchronize writes.
Always,
/// Synchronize writes in the background every `n` milliseconds.
Interval(usize),
}
impl Default for CaskOptions {
fn default() -> CaskOptions {
CaskOptions {
create: true,
sync: SyncStrategy::Interval(1000),
max_file_size: 2 * 1024 * 1024 * 1024,
file_pool_size: 2048,
compaction: true,
compaction_check_frequency: 3600,
compaction_window: (0, 23),
fragmentation_trigger: 0.6,
dead_bytes_trigger: 512 * 1024 * 1024,
fragmentation_threshold: 0.4,
dead_bytes_threshold: 128 * 1024 * 1024,
small_file_threshold: 10 * 1024 * 1024,
}
}
}
#[allow(dead_code)]
impl CaskOptions {
/// Generates the base configuration for opening a `Cask`, from which configuration methods can
/// be chained.
pub fn new() -> CaskOptions {
CaskOptions::default()
}
/// Sets the strategy used to synchronize writes to disk. Defaults to
/// `SyncStrategy::Interval(1000)`.
pub fn sync(&mut self, sync: SyncStrategy) -> &mut CaskOptions {
self.sync = sync;
self
}
/// Sets the maximum file size. Defaults to `2GB`.
pub fn max_file_size(&mut self, max_file_size: usize) -> &mut CaskOptions {
self.max_file_size = max_file_size;
self
}
/// Sets the maximum size of the file descriptor cache. Defaults to `2048`.
pub fn file_pool_size(&mut self, file_pool_size: usize) -> &mut CaskOptions {
self.file_pool_size = file_pool_size;
self
}
/// Enable or disable background compaction. Defaults to `true`.
pub fn compaction(&mut self, compaction: bool) -> &mut CaskOptions {
self.compaction = compaction;
self
}
/// Create `Cask` if it doesn't exist. Defaults to `true`.
pub fn create(&mut self, create: bool) -> &mut CaskOptions {
self.create = create;
self
}
/// Sets the frequency of compaction, in seconds. Defaults to `3600`.
pub fn compaction_check_frequency(
&mut self,
compaction_check_frequency: u64,
) -> &mut CaskOptions {
self.compaction_check_frequency = compaction_check_frequency;
self
}
/// Sets the time window during which compaction can run. Defaults to `[0, 23]`.
pub fn compaction_window(&mut self, start: usize, end: usize) -> &mut CaskOptions {
self.compaction_window = (start, end);
self
}
/// Sets the ratio of dead entries to total entries in a file that will trigger compaction.
/// Defaults to `0.6`.
pub fn fragmentation_trigger(&mut self, fragmentation_trigger: f64) -> &mut CaskOptions {
self.fragmentation_trigger = fragmentation_trigger;
self
}
/// Sets the minimum amount of data occupied by dead entries in a single file that will trigger
/// compaction. Defaults to `512MB`.
pub fn dead_bytes_trigger(&mut self, dead_bytes_trigger: u64) -> &mut CaskOptions {
self.dead_bytes_trigger = dead_bytes_trigger;
self
}
/// Sets the ratio of dead entries to total entries in a file that will cause it to be included
/// in a compaction. Defaults to `0.4`.
pub fn fragmentation_threshold(&mut self, fragmentation_threshold: f64) -> &mut CaskOptions {
self.fragmentation_threshold = fragmentation_threshold;
self
}
/// Sets the minimum amount of data occupied by dead entries in a single file that will cause it
/// to be included in a compaction. Defaults to `128MB`.
pub fn dead_bytes_threshold(&mut self, dead_bytes_threshold: u64) -> &mut CaskOptions {
self.dead_bytes_threshold = dead_bytes_threshold;
self
}
/// Sets the minimum size a file must have to be excluded from compaction. Defaults to `10MB`.
pub fn small_file_threshold(&mut self, small_file_threshold: u64) -> &mut CaskOptions {
self.small_file_threshold = small_file_threshold;
self
}
/// Opens/creates a `Cask` at `path`.
pub fn open(&self, path: &str) -> Result<Cask> {
Cask::open(path, self.clone())
}
}
impl Cask {
/// Opens/creates a new `Cask`.
pub fn open(path: &str, options: CaskOptions) -> Result<Cask> {
info!("Opening database: {:?}", &path);
let mut log = Log::open(
path,
options.create,
options.sync == SyncStrategy::Always,
options.max_file_size,
options.file_pool_size,
)?;
let mut index = Index::new();
let mut sequence = 0;
for file_id in log.files() {
let mut f = |hint: Hint| {
if hint.sequence > sequence {
sequence = hint.sequence;
}
index.update(hint, file_id);
};
match log.hints(file_id)? {
Some(hints) => {
for hint in hints {
f(hint?);
}
}
None => {
for hint in log.recreate_hints(file_id)? {
f(hint?);
}
}
};
}
info!("Opened database: {:?}", &path);
info!("Current sequence number: {:?}", sequence);
let cask = Cask {
path: log.path.clone(),
options: options,
dropped: Arc::new(AtomicBool::new(false)),
inner: Arc::new(RwLock::new(CaskInner {
current_sequence: sequence + 1,
log: log,
index: index,
})),
compaction: Arc::new(Mutex::new(())),
};
if let SyncStrategy::Interval(millis) = cask.options.sync {
let cask = cask.clone();
thread::spawn(move || {
let duration = Duration::from_millis(millis as u64);
loop {
if cask.dropped.load(Ordering::SeqCst) {
info!(
"Cask has been dropped, background file sync \
thread is exiting"
);
break;
}
debug!("Background file sync");
cask.inner.read().unwrap().log.sync().unwrap();
thread::sleep(duration);
}
});
};
if cask.options.compaction {
let cask = cask.clone();
thread::spawn(move || {
let duration = Duration::from_secs(cask.options.compaction_check_frequency);
loop {
if cask.dropped.load(Ordering::SeqCst) {
info!(
"Cask has been dropped, background compaction \
thread is exiting"
);
break;
}
info!("Compaction thread wake up");
let current_hour = time::now().tm_hour as usize;
let (window_start, window_end) = cask.options.compaction_window;
let in_window = if window_start <= window_end {
current_hour >= window_start && current_hour <= window_end
} else {
current_hour >= window_end || current_hour <= window_end
};
if !in_window {
info!(
"Compaction outside defined window {:?}",
cask.options.compaction_window
);
continue;
} else if let Err(err) = cask.compact() {
warn!("Error during compaction: {}", err);
}
thread::sleep(duration);
}
});
}
Ok(cask)
}
fn compact_files_aux(&self, files: &[u32]) -> Result<(Vec<u32>, Vec<u32>)> {
let active_file_id = {
self.inner.read().unwrap().log.active_file_id
};
let compacted_files_hints = files.iter().flat_map(|&file_id| {
if active_file_id.is_some() && active_file_id.unwrap() == file_id {
None
} else {
self.inner
.read()
.unwrap()
.log
.hints(file_id)
.ok() // FIXME: log the error?
.and_then(|hints| hints.map(|h| (file_id, h)))
}
});
let mut compacted_files = Vec::new();
let mut new_files = Vec::new();
let mut deletes = HashMap::new();
let mut log_writer = {
// FIXME: turn into error
self.inner.read().unwrap().log.writer()
};
for (file_id, hints) in compacted_files_hints {
let mut inserts = Vec::new();
for hint in hints {
let hint = hint?;
let inner = self.inner.read().unwrap();
let index_entry = inner.index.get(&*hint.key);
if hint.deleted {
if index_entry.is_none() {
match deletes.entry(hint.key.to_vec()) {
HashMapEntry::Occupied(mut o) => {
if *o.get() < hint.sequence {
o.insert(hint.sequence);
}
}
HashMapEntry::Vacant(e) => {
e.insert(hint.sequence);
}
}
}
} else if index_entry.is_some() && index_entry.unwrap().sequence == hint.sequence {
inserts.push(hint)
}
}
for hint in inserts {
// FIXME: turn into error
let log = &self.inner.read().unwrap().log;
let log_write = log_writer.write(&log.read_entry(file_id, hint.entry_pos)?)?;
if let LogWrite::NewFile(file_id) = log_write {
new_files.push(file_id);
}
}
compacted_files.push(file_id);
}
for (key, sequence) in deletes {
log_writer.write(&Entry::deleted(sequence, key))?;
}
Ok((compacted_files, new_files))
}
fn compact_files(&self, files: &[u32]) -> Result<()> {
info!("Compacting data files: {:?}", files);
let (ref compacted_files, ref new_files) = self.compact_files_aux(files)?;
for &file_id in new_files {
let hints = {
self.inner.read().unwrap().log.hints(file_id)?
};
if let Some(hints) = hints {
for hint in hints {
let hint = hint?;
self.inner.write().unwrap().index.update(hint, file_id);
}
};
}
self.inner.write().unwrap().index.stats.remove_files(
compacted_files,
);
self.inner.write().unwrap().log.swap_files(
compacted_files,
new_files,
)?;
// FIXME: print files not compacted
info!(
"Finished compacting data files: {:?} into: {:?}",
compacted_files,
new_files
);
Ok(())
}
/// Trigger `Cask` log compaction.
pub fn compact(&self) -> Result<()> {
let _lock = self.compaction.lock().unwrap();
let active_file_id = {
self.inner.read().unwrap().log.active_file_id
};
let file_stats = {
self.inner.read().unwrap().index.stats.file_stats()
};
let mut files = BTreeSet::new();
let mut triggered = false;
for (file_id, fragmentation, dead_bytes) in file_stats {
if active_file_id.is_some() && file_id == active_file_id.unwrap() {
continue;
}<|fim▁hole|>
if !triggered {
if fragmentation >= self.options.fragmentation_trigger {
info!(
"File {} has fragmentation factor of {:.1}%, triggered compaction",
file_id,
fragmentation * 100.0
);
triggered = true;
files.insert(file_id);
} else if dead_bytes >= self.options.dead_bytes_trigger &&
!files.contains(&file_id)
{
info!(
"File {} has {} of dead data, triggered compaction",
file_id,
human_readable_byte_count(dead_bytes as usize, true)
);
triggered = true;
files.insert(file_id);
}
}
if fragmentation >= self.options.fragmentation_threshold && !files.contains(&file_id) {
info!(
"File {} has fragmentation factor of {:.1}%, adding for compaction",
file_id,
fragmentation * 100.0
);
files.insert(file_id);
} else if dead_bytes >= self.options.dead_bytes_threshold && !files.contains(&file_id) {
info!(
"File {} has {} of dead data, adding for compaction",
file_id,
human_readable_byte_count(dead_bytes as usize, true)
);
files.insert(file_id);
}
if !files.contains(&file_id) {
let file_size = {
self.inner.read().unwrap().log.file_size(file_id).ok()
};
if let Some(file_size) = file_size {
if file_size <= self.options.small_file_threshold {
info!(
"File {} has total size of {}, adding for compaction",
file_id,
human_readable_byte_count(file_size as usize, true)
);
files.insert(file_id);
}
};
}
}
if triggered {
let files: Vec<_> = files.into_iter().collect();
self.compact_files(&files)?;
} else if !files.is_empty() {
info!(
"Compaction of files {:?} aborted due to missing trigger",
&files
);
} else {
info!("No files eligible for compaction")
}
Ok(())
}
/// Returns the value corresponding to the key, if any.
pub fn get<K: AsRef<[u8]>>(&self, key: K) -> Result<Option<Vec<u8>>> {
self.inner.read().unwrap().get(key.as_ref())
}
/// Inserts a key-value pair into the map.
pub fn put<K: Into<Vec<u8>>, V: AsRef<[u8]>>(&self, key: K, value: V) -> Result<()> {
self.inner.write().unwrap().put(key.into(), value.as_ref())
}
/// Removes a key from the map.
pub fn delete<K: AsRef<[u8]>>(&self, key: K) -> Result<()> {
self.inner.write().unwrap().delete(key.as_ref())
}
/// Returns all keys stored in the map.
pub fn keys(&self) -> Vec<Vec<u8>> {
self.inner.read().unwrap().keys().cloned().collect()
}
}
impl Drop for Cask {
fn drop(&mut self) {
self.dropped.store(true, Ordering::SeqCst);
let _lock = self.compaction.lock().unwrap();
}
}
#[cfg(test)]
mod tests {
use cask::CaskOptions;
use std::fs;
#[test]
fn test_keys() {
let cask_result = CaskOptions::default()
.compaction_check_frequency(1)
.max_file_size(50 * 1024 * 1024)
.open("test.db");
assert!(cask_result.is_ok());
let cask = cask_result.unwrap();
let key1: &[u8] = &[0];
let key2: &[u8] = &[1];
let key3: &[u8] = &[2];
let val: &[u8] = &[0];
assert!(cask.put(key1, val).is_ok());
assert!(cask.put(key2, val).is_ok());
assert!(cask.put(key3, val).is_ok());
assert!(cask.delete(key3).is_ok());
let mut keys = cask.keys();
//Keys are not guaranteed to be in order.
keys.sort();
assert_eq!(keys.len(), 2);
assert_eq!(keys[0], key1);
assert_eq!(keys[1], key2);
assert!(fs::remove_dir_all("test.db").is_ok());
}
}<|fim▁end|> | |
<|file_name|>align.rs<|end_file_name|><|fim▁begin|>macro_rules! expand_align {
() => {
s! {
#[cfg_attr(any(target_pointer_width = "32",
target_arch = "x86_64",
target_arch = "powerpc64",
target_arch = "mips64",
target_arch = "s390x",
target_arch = "sparc64"),
repr(align(4)))]
#[cfg_attr(not(any(target_pointer_width = "32",
target_arch = "x86_64",
target_arch = "powerpc64",
target_arch = "mips64",
target_arch = "s390x",
target_arch = "sparc64")),
repr(align(8)))]
pub struct pthread_mutexattr_t {
size: [u8; ::__SIZEOF_PTHREAD_MUTEXATTR_T],
}
#[repr(align(4))]
pub struct pthread_condattr_t {
size: [u8; ::__SIZEOF_PTHREAD_CONDATTR_T],
}
}
s_no_extra_traits! {
#[repr(align(8))]
#[allow(missing_debug_implementations)]
pub struct pthread_cond_t {
size: [u8; ::__SIZEOF_PTHREAD_COND_T],
}
#[cfg_attr(all(target_pointer_width = "32",<|fim▁hole|> repr(align(4)))]
#[cfg_attr(any(target_pointer_width = "64",
not(any(target_arch = "mips",
target_arch = "arm",
target_arch = "powerpc"))),
repr(align(8)))]
#[allow(missing_debug_implementations)]
pub struct pthread_mutex_t {
size: [u8; ::__SIZEOF_PTHREAD_MUTEX_T],
}
#[cfg_attr(all(target_pointer_width = "32",
any(target_arch = "mips",
target_arch = "arm",
target_arch = "powerpc")),
repr(align(4)))]
#[cfg_attr(any(target_pointer_width = "64",
not(any(target_arch = "mips",
target_arch = "arm",
target_arch = "powerpc"))),
repr(align(8)))]
#[allow(missing_debug_implementations)]
pub struct pthread_rwlock_t {
size: [u8; ::__SIZEOF_PTHREAD_RWLOCK_T],
}
}
}
}<|fim▁end|> | any(target_arch = "mips",
target_arch = "arm",
target_arch = "powerpc")), |
<|file_name|>ibm2.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Natural Language Toolkit: IBM Model 2
#
# Copyright (C) 2001-2013 NLTK Project
# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Lexical translation model that considers word order.
IBM Model 2 improves on Model 1 by accounting for word order.
An alignment probability is introduced, a(i | j,l,m), which predicts
a source word position, given its aligned target word's position.
The EM algorithm used in Model 2 is:
E step - In the training data, collect counts, weighted by prior
probabilities.
(a) count how many times a source language word is translated
into a target language word
(b) count how many times a particular position in the source
sentence is aligned to a particular position in the target
sentence
M step - Estimate new probabilities based on the counts from the E step
Notations:
i: Position in the source sentence
Valid values are 0 (for NULL), 1, 2, ..., length of source sentence
j: Position in the target sentence
Valid values are 1, 2, ..., length of target sentence
l: Number of words in the source sentence, excluding NULL
m: Number of words in the target sentence
s: A word in the source language
t: A word in the target language
References:
Philipp Koehn. 2010. Statistical Machine Translation.
Cambridge University Press, New York.
Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and
Robert L. Mercer. 1993. The Mathematics of Statistical Machine
Translation: Parameter Estimation. Computational Linguistics, 19 (2),
263-311.
"""
from __future__ import division
from collections import defaultdict
from nltk.translate import AlignedSent
from nltk.translate import Alignment
from nltk.translate import IBMModel
from nltk.translate import IBMModel1
from nltk.translate.ibm_model import Counts
import warnings
class IBMModel2(IBMModel):
"""
Lexical translation model that considers word order
>>> bitext = []
>>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big']))
>>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house']))
>>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book']))
>>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book']))
>>> ibm2 = IBMModel2(bitext, 5)
>>> print(round(ibm2.translation_table['buch']['book'], 3))
1.0
>>> print(round(ibm2.translation_table['das']['book'], 3))
0.0
>>> print(round(ibm2.translation_table['buch'][None], 3))
0.0
>>> print(round(ibm2.translation_table['ja'][None], 3))
0.0
>>> print(ibm2.alignment_table[1][1][2][2])
0.938...
>>> print(round(ibm2.alignment_table[1][2][2][2], 3))
0.0
>>> print(round(ibm2.alignment_table[2][2][4][5], 3))
1.0
>>> test_sentence = bitext[2]
>>> test_sentence.words
['das', 'buch', 'ist', 'ja', 'klein']
>>> test_sentence.mots
['the', 'book', 'is', 'small']
>>> test_sentence.alignment
Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)])
"""
def __init__(self, sentence_aligned_corpus, iterations,
probability_tables=None):
"""
Train on ``sentence_aligned_corpus`` and create a lexical
translation model and an alignment model.
Translation direction is from ``AlignedSent.mots`` to
``AlignedSent.words``.
:param sentence_aligned_corpus: Sentence-aligned parallel corpus
:type sentence_aligned_corpus: list(AlignedSent)
:param iterations: Number of iterations to run training algorithm
:type iterations: int
:param probability_tables: Optional. Use this to pass in custom
probability values. If not specified, probabilities will be
set to a uniform distribution, or some other sensible value.
If specified, all the following entries must be present:
``translation_table``, ``alignment_table``.
See ``IBMModel`` for the type and purpose of these tables.
:type probability_tables: dict[str]: object
"""
super(IBMModel2, self).__init__(sentence_aligned_corpus)
if probability_tables is None:
# Get translation probabilities from IBM Model 1
# Run more iterations of training for Model 1, since it is
# faster than Model 2
ibm1 = IBMModel1(sentence_aligned_corpus, 2 * iterations)
self.translation_table = ibm1.translation_table
self.set_uniform_probabilities(sentence_aligned_corpus)
else:
# Set user-defined probabilities
self.translation_table = probability_tables['translation_table']
self.alignment_table = probability_tables['alignment_table']
for n in range(0, iterations):
self.train(sentence_aligned_corpus)
self.__align_all(sentence_aligned_corpus)
def set_uniform_probabilities(self, sentence_aligned_corpus):
# a(i | j,l,m) = 1 / (l+1) for all i, j, l, m
l_m_combinations = set()
for aligned_sentence in sentence_aligned_corpus:
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
if (l, m) not in l_m_combinations:
l_m_combinations.add((l, m))
initial_prob = 1 / (l + 1)
if initial_prob < IBMModel.MIN_PROB:
warnings.warn("A source sentence is too long (" + str(l) +
" words). Results may be less accurate.")
for i in range(0, l + 1):
for j in range(1, m + 1):
self.alignment_table[i][j][l][m] = initial_prob
def train(self, parallel_corpus):
counts = Model2Counts()
for aligned_sentence in parallel_corpus:
src_sentence = [None] + aligned_sentence.mots
trg_sentence = ['UNUSED'] + aligned_sentence.words # 1-indexed
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
# E step (a): Compute normalization factors to weigh counts
total_count = self.prob_all_alignments(src_sentence, trg_sentence)
# E step (b): Collect counts
for j in range(1, m + 1):
t = trg_sentence[j]
for i in range(0, l + 1):
s = src_sentence[i]
count = self.prob_alignment_point(
i, j, src_sentence, trg_sentence)
normalized_count = count / total_count[t]
counts.update_lexical_translation(normalized_count, s, t)
counts.update_alignment(normalized_count, i, j, l, m)
# M step: Update probabilities with maximum likelihood estimates
self.maximize_lexical_translation_probabilities(counts)
self.maximize_alignment_probabilities(counts)
def maximize_alignment_probabilities(self, counts):
MIN_PROB = IBMModel.MIN_PROB
for i, j_s in counts.alignment.items():
for j, src_sentence_lengths in j_s.items():
for l, trg_sentence_lengths in src_sentence_lengths.items():
for m in trg_sentence_lengths:
estimate = (counts.alignment[i][j][l][m] /
counts.alignment_for_any_i[j][l][m])
self.alignment_table[i][j][l][m] = max(estimate,
MIN_PROB)
<|fim▁hole|>
Each entry in the return value represents the contribution to
the total alignment probability by the target word t.
To obtain probability(alignment | src_sentence, trg_sentence),
simply sum the entries in the return value.
:return: Probability of t for all s in ``src_sentence``
:rtype: dict(str): float
"""
alignment_prob_for_t = defaultdict(lambda: 0.0)
for j in range(1, len(trg_sentence)):
t = trg_sentence[j]
for i in range(0, len(src_sentence)):
alignment_prob_for_t[t] += self.prob_alignment_point(
i, j, src_sentence, trg_sentence)
return alignment_prob_for_t
def prob_alignment_point(self, i, j, src_sentence, trg_sentence):
"""
Probability that position j in ``trg_sentence`` is aligned to
position i in the ``src_sentence``
"""
l = len(src_sentence) - 1
m = len(trg_sentence) - 1
s = src_sentence[i]
t = trg_sentence[j]
return self.translation_table[t][s] * self.alignment_table[i][j][l][m]
def prob_t_a_given_s(self, alignment_info):
"""
Probability of target sentence and an alignment given the
source sentence
"""
prob = 1.0
l = len(alignment_info.src_sentence) - 1
m = len(alignment_info.trg_sentence) - 1
for j, i in enumerate(alignment_info.alignment):
if j == 0:
continue # skip the dummy zeroeth element
trg_word = alignment_info.trg_sentence[j]
src_word = alignment_info.src_sentence[i]
prob *= (self.translation_table[trg_word][src_word] *
self.alignment_table[i][j][l][m])
return max(prob, IBMModel.MIN_PROB)
def __align_all(self, parallel_corpus):
for sentence_pair in parallel_corpus:
self.__align(sentence_pair)
def __align(self, sentence_pair):
"""
Determines the best word alignment for one sentence pair from
the corpus that the model was trained on.
The best alignment will be set in ``sentence_pair`` when the
method returns. In contrast with the internal implementation of
IBM models, the word indices in the ``Alignment`` are zero-
indexed, not one-indexed.
:param sentence_pair: A sentence in the source language and its
counterpart sentence in the target language
:type sentence_pair: AlignedSent
"""
best_alignment = []
l = len(sentence_pair.mots)
m = len(sentence_pair.words)
for j, trg_word in enumerate(sentence_pair.words):
# Initialize trg_word to align with the NULL token
best_prob = (self.translation_table[trg_word][None] *
self.alignment_table[0][j + 1][l][m])
best_prob = max(best_prob, IBMModel.MIN_PROB)
best_alignment_point = None
for i, src_word in enumerate(sentence_pair.mots):
align_prob = (self.translation_table[trg_word][src_word] *
self.alignment_table[i + 1][j + 1][l][m])
if align_prob >= best_prob:
best_prob = align_prob
best_alignment_point = i
best_alignment.append((j, best_alignment_point))
sentence_pair.alignment = Alignment(best_alignment)
class Model2Counts(Counts):
"""
Data object to store counts of various parameters during training.
Includes counts for alignment.
"""
def __init__(self):
super(Model2Counts, self).__init__()
self.alignment = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
lambda: 0.0))))
self.alignment_for_any_i = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: 0.0)))
def update_lexical_translation(self, count, s, t):
self.t_given_s[t][s] += count
self.any_t_given_s[s] += count
def update_alignment(self, count, i, j, l, m):
self.alignment[i][j][l][m] += count
self.alignment_for_any_i[j][l][m] += count<|fim▁end|> | def prob_all_alignments(self, src_sentence, trg_sentence):
"""
Computes the probability of all possible word alignments,
expressed as a marginal distribution over target words t |
<|file_name|>charm.go<|end_file_name|><|fim▁begin|>package charm
import (
"errors"
"fmt"
"os"
)
// The Charm interface is implemented by any type that
// may be handled as a charm.
type Charm interface {
Meta() *Meta
Config() *Config
Revision() int
}
// Read reads a Charm from path, which can point to either a charm bundle or a
// charm directory.
func Read(path string) (Charm, error) {
info, err := os.Stat(path)
if err != nil {
return nil, err
}
if info.IsDir() {
return ReadDir(path)
}
return ReadBundle(path)
}
// InferRepository returns a charm repository inferred from
// the provided URL. Local URLs will use the provided path.
func InferRepository(curl *URL, localRepoPath string) (repo Repository, err error) {
switch curl.Schema {
case "cs":
repo = Store()
case "local":
if localRepoPath == "" {
return nil, errors.New("path to local repository not specified")
}
repo = &LocalRepository{localRepoPath}<|fim▁hole|> default:
return nil, fmt.Errorf("unknown schema for charm URL %q", curl)
}
return
}<|fim▁end|> | |
<|file_name|>FileParser.java<|end_file_name|><|fim▁begin|>package softuni.io;
import org.springframework.stereotype.Component;<|fim▁hole|>@Component
public class FileParser {
public String readFile(String path) throws IOException {
StringBuilder stringBuilder = new StringBuilder();
try (InputStream is = this.getClass().getResourceAsStream(path);
BufferedReader bfr = new BufferedReader(new InputStreamReader(is))) {
String line = bfr.readLine();
while (line != null) {
stringBuilder.append(line);
line = bfr.readLine();
}
}
return stringBuilder.toString();
}
public void writeFile(String path, String content) throws IOException {
File file = new File(System.getProperty("user.dir") + File.separator + path);
if (!file.exists()) {
file.createNewFile();
}
try (OutputStream os = new FileOutputStream(System.getProperty("user.dir")+ File.separator + path);
BufferedWriter bfw = new BufferedWriter(new OutputStreamWriter(os))) {
bfw.write(content);
}
}
}<|fim▁end|> |
import java.io.*;
|
<|file_name|>calculate_intermediate_sums.py<|end_file_name|><|fim▁begin|>"""
Post processing (subset of columns) to calculate intermediate sum edit counts
and other variables. Date sorted.
Usage:
calculate_intermediate_sums (-h|--help)
calculate_intermediate_sums <input> <output>
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
<input> Path to file to process.
<output> Where revisions results
will be written
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import docopt
import sys
import logging
import operator
from collections import defaultdict
import mysqltsv
logger = logging.getLogger(__name__)
def main(argv=None):<|fim▁hole|> logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
input_file = mysqltsv.Reader(open(args['<input>'], "r"), headers=True,
types=[int, float, int, int, int, int])
output_file = mysqltsv.Writer(open(args['<output>'], "w"), headers=[
'yyyymm', 'aligned_entities', 'difference_in_alignment_with_previous',
'bot_edits', 'semi_automated_edits', 'non_bot_edits', 'anon_edits',
'current_bot_edits_count', 'current_semi_automated_edits_count',
'current_non_bot_edits_count', 'current_anon_edits_count'])
verbose = args['--verbose']
run(input_file, output_file, verbose)
def run(input_file, output_file, verbose):
current_bot_edits_count = 0
semi_automated_edits_count = 0
non_bot_edits_count = 0
anon_edits_count = 0
all_edits_count = 0
previous_alignment = 0
for i, line in enumerate(input_file):
current_bot_edits_count += line['bot_edits']
semi_automated_edits_count += line['semi_automated_edits']
non_bot_edits_count += line['non_bot_edits']
anon_edits_count += line['anon_edits']
output_file.write([line['yyyymm'],
line['aligned_entities'],
line['aligned_entities'] - previous_alignment,
line['bot_edits'],
line['semi_automated_edits'],
line['non_bot_edits'],
line['anon_edits'],
current_bot_edits_count,
semi_automated_edits_count,
non_bot_edits_count,
anon_edits_count])
previous_alignment = line['aligned_entities']
main()<|fim▁end|> | args = docopt.docopt(__doc__) |
<|file_name|>conn_sp.go<|end_file_name|><|fim▁begin|>package freetds
import (
"errors"
"fmt"
"unsafe"
)
/*
#cgo LDFLAGS: -lsybdb
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <unistd.h>
#include <libgen.h>
#include <sybfront.h>
#include <sybdb.h>
*/
import "C"
//Execute stored procedure by name and list of params.
//
//Example:
// conn.ExecSp("sp_help", "authors")
func (conn *Conn) ExecSp(spName string, params ...interface{}) (*SpResult, error) {
if conn.isDead() || conn.isMirrorSlave() {
if err := conn.reconnect(); err != nil {
return nil, err
}
}
//hold references to data sent to the C code until the end of this function
//without this GC could remove something used later in C, and we will get SIGSEG
refHolder := make([]*[]byte, 0)
conn.clearMessages()
name := C.CString(spName)
defer C.free(unsafe.Pointer(name))
if C.dbrpcinit(conn.dbproc, name, 0) == C.FAIL {
return nil, conn.raiseError("dbrpcinit failed")
}
//input params
spParams, err := conn.getSpParams(spName)
if err != nil {
return nil, err
}
for i, spParam := range spParams {
//get datavalue for the suplied stored procedure parametar
var datavalue *C.BYTE
datalen := 0
if i < len(params) {
param := params[i]
if param != nil {
data, sqlDatalen, err := typeToSqlBuf(int(spParam.UserTypeId), param, conn.freetdsVersionGte095)
if err != nil {
conn.Close() //close the connection
return nil, err
}
if len(data) > 0 {
datalen = sqlDatalen
datavalue = (*C.BYTE)(unsafe.Pointer(&data[0]))
refHolder = append(refHolder, &data)
}
}
}
//set parametar valus, call dbrpcparam
if i < len(params) || spParam.IsOutput {
maxOutputSize := C.DBINT(-1)
status := C.BYTE(0)
if spParam.IsOutput {
status = C.DBRPCRETURN
maxOutputSize = C.DBINT(spParam.MaxLength)
if maxOutputSize == -1 {
maxOutputSize = 8000
}
}
paramname := C.CString(spParam.Name)
defer C.free(unsafe.Pointer(paramname))
if C.dbrpcparam(conn.dbproc, paramname, status,
C.int(spParam.UserTypeId), maxOutputSize, C.DBINT(datalen), datavalue) == C.FAIL {
return nil, errors.New("dbrpcparam failed")
}
}
}
//execute
if C.dbrpcsend(conn.dbproc) == C.FAIL {
return nil, conn.raiseError("dbrpcsend failed")
}
//results
result := NewSpResult()
result.results, err = conn.fetchResults()
if err != nil {
return nil, conn.raise(err)
}
//return status
if C.dbhasretstat(conn.dbproc) == C.TRUE {
result.status = int(C.dbretstatus(conn.dbproc))
}
//read output params
numOutParams := int(C.dbnumrets(conn.dbproc))
result.outputParams = make([]*SpOutputParam, numOutParams)
for i := 1; i <= numOutParams; i++ {
j := C.int(i)
len := C.dbretlen(conn.dbproc, j)
name := C.GoString(C.dbretname(conn.dbproc, j))
typ := int(C.dbrettype(conn.dbproc, j))
data := C.GoBytes(unsafe.Pointer(C.dbretdata(conn.dbproc, j)), len)
value := sqlBufToType(typ, data)
param := &SpOutputParam{Name: name, Value: value}
result.outputParams[i-1] = param
}
return result, nil
}
func (conn *Conn) raise(err error) error {
if len(conn.Error) != 0 {
return errors.New(fmt.Sprintf("%s\n%s", conn.Error, conn.Message))
}
return err
}
func (conn *Conn) raiseError(errMsg string) error {
return conn.raise(errors.New(errMsg))
}
// func toRpcParam(datatype int, value interface{}) (datalen C.DBINT, datavalue *C.BYTE, err error) {
// data, err := typeToSqlBuf(datatype, value)
// if err != nil {
// return
// }
// datalen = C.DBINT(len(data))
// if len(data) > 0 {
// datavalue = (*C.BYTE)(unsafe.Pointer(&data[0]))
// }
// //fmt.Printf("\ndatavalue: %v, datalen: %v, data: %v %s\n", datavalue, datalen, data, data)
// return
// }
//Stored procedure parameter definition
type spParam struct {
Name string
ParameterId int32
UserTypeId int32
IsOutput bool
MaxLength int16
Precision uint8
Scale uint8
}
//Read stored procedure parameters.
//Will cache params in connection or pool and reuse it.
func (conn *Conn) getSpParams(spName string) ([]*spParam, error) {<|fim▁hole|> sql := conn.getSpParamsSql(spName)
results, err := conn.exec(sql)
if err != nil {
return nil, err
}
r := results[0]
spParams := make([]*spParam, len(r.Rows))
for i := 0; r.Next(); i++ {
p := &spParam{}
err := r.Scan(&p.Name, &p.ParameterId, &p.UserTypeId, &p.IsOutput, &p.MaxLength, &p.Precision, &p.Scale)
//fixme: mapping uniqueidentifier, datetimeoffset, date, time, datetime2 to string
if p.UserTypeId == 0x24 || p.UserTypeId == 0x2B || p.UserTypeId == 0x28 || p.UserTypeId == 0x29 || p.UserTypeId == 0x2A {
p.UserTypeId = 0x27
}
if err != nil {
return nil, err
}
spParams[i] = p
}
conn.spParamsCache.Set(spName, spParams)
return spParams, nil
}
const msSqlGetSpParamsSql string = `
select name, parameter_id, user_type_id, is_output, max_length, precision, scale
from sys.all_parameters
where object_id = (select object_id from sys.all_objects where object_id = object_id('%s'))
order by parameter_id
`
const sybaseAseGetSpParamsSql string = `
select name = c.name,
parameter_id = c.id,
user_type_id = c.type,
is_output = case
when c.status2 = 2 or c.status2 = 4 then 1
else 0
end,
max_length = c.length,
precision = isnull(c.prec,0),
scale = isnull(c.scale,0)
from sysobjects o
join syscolumns c
on c.id = o.id
where o.name = '%s'
order by c.id, c.colid
`
func (conn *Conn) getSpParamsSql(spName string) string {
if conn.sybaseMode() || conn.sybaseMode125() {
return fmt.Sprintf(sybaseAseGetSpParamsSql, spName)
}
return fmt.Sprintf(msSqlGetSpParamsSql, spName)
}<|fim▁end|> | if spParams, ok := conn.spParamsCache.Get(spName); ok {
return spParams, nil
}
|
<|file_name|>core.py<|end_file_name|><|fim▁begin|># (c) 2012, Jeroen Hoekx <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import crypt
import glob
import hashlib
import itertools
import json
import ntpath
import os.path
import re
import string
import sys
import uuid
from collections import MutableMapping, MutableSequence
from datetime import datetime
from functools import partial
from random import Random, SystemRandom, shuffle
import yaml
from jinja2.filters import environmentfilter, do_groupby as _do_groupby
try:
import passlib.hash
HAS_PASSLIB = True
except:
HAS_PASSLIB = False
from ansible import errors
from ansible.module_utils.six import iteritems, string_types, integer_types
from ansible.module_utils.six.moves import reduce, shlex_quote
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap
from ansible.utils.vars import merge_hash
from ansible.vars.hostvars import HostVars
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
class AnsibleJSONEncoder(json.JSONEncoder):
'''
Simple encoder class to deal with JSON encoding of internal
types like HostVars
'''
def default(self, o):
if isinstance(o, HostVars):
return dict(o)
else:
return super(AnsibleJSONEncoder, self).default(o)
def to_yaml(a, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, **kw)
return to_text(transformed)
def to_nice_yaml(a, indent=4, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
return to_text(transformed)
def to_json(a, *args, **kw):
''' Convert the value to JSON '''
return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
def to_nice_json(a, indent=4, *args, **kw):
'''Make verbose, human readable JSON'''
# python-2.6's json encoder is buggy (can't encode hostvars)
if sys.version_info < (2, 7):
try:
import simplejson
except ImportError:
pass
else:
try:
major = int(simplejson.__version__.split('.')[0])
except:
pass
else:
if major >= 2:
return simplejson.dumps(a, indent=indent, sort_keys=True, *args, **kw)
try:
return json.dumps(a, indent=indent, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw)
except:
# Fallback to the to_json filter
return to_json(a, *args, **kw)
def to_bool(a):
''' return a bool for the arg '''
if a is None or isinstance(a, bool):
return a
if isinstance(a, string_types):
a = a.lower()
if a in ('yes', 'on', '1', 'true', 1):
return True
return False
def to_datetime(string, format="%Y-%d-%m %H:%M:%S"):
return datetime.strptime(string, format)
def quote(a):
''' return its argument quoted for shell usage '''
return shlex_quote(a)
def fileglob(pathname):
''' return list of matched regular files for glob '''
return [ g for g in glob.glob(pathname) if os.path.isfile(g) ]
def regex_replace(value='', pattern='', replacement='', ignorecase=False):
''' Perform a `re.sub` returning a string '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
return _re.sub(replacement, value)
def regex_findall(value, regex, multiline=False, ignorecase=False):
''' Perform re.findall and return the list of matches '''
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
return re.findall(regex, value, flags)
def regex_search(value, regex, *args, **kwargs):
''' Perform re.search and return the list of matches or a backref '''
groups = list()
for arg in args:
if arg.startswith('\\g'):
match = re.match(r'\\g<(\S+)>', arg).group(1)
groups.append(match)
elif arg.startswith('\\'):
match = int(re.match(r'\\(\d+)', arg).group(1))
groups.append(match)
else:
raise errors.AnsibleFilterError('Unknown argument')
flags = 0
if kwargs.get('ignorecase'):
flags |= re.I
if kwargs.get('multiline'):
flags |= re.M
match = re.search(regex, value, flags)
if match:
if not groups:
return match.group()
else:
items = list()
for item in groups:
items.append(match.group(item))
return items
def ternary(value, true_val, false_val):
''' value ? true_val : false_val '''
if value:
return true_val
else:
return false_val
def regex_escape(string):
'''Escape all regular expressions special characters from STRING.'''
return re.escape(string)
def from_yaml(data):
if isinstance(data, string_types):
return yaml.safe_load(data)
return data
@environmentfilter
def rand(environment, end, start=None, step=None, seed=None):
if seed is None:
r = SystemRandom()
else:
r = Random(seed)
if isinstance(end, integer_types):
if not start:
start = 0
if not step:
step = 1
return r.randrange(start, end, step)
elif hasattr(end, '__iter__'):
if start or step:
raise errors.AnsibleFilterError('start and step can only be used with integer values')
return r.choice(end)
else:
raise errors.AnsibleFilterError('random can only be used on sequences and integers')
def randomize_list(mylist, seed=None):
try:
mylist = list(mylist)
if seed:
r = Random(seed)
r.shuffle(mylist)
else:
shuffle(mylist)
except:
pass
return mylist
def get_hash(data, hashtype='sha1'):
try: # see if hash is supported
h = hashlib.new(hashtype)
except:
return None
h.update(to_bytes(data, errors='surrogate_then_strict'))
return h.hexdigest()
def get_encrypted_password(password, hashtype='sha512', salt=None):
# TODO: find a way to construct dynamically from system
cryptmethod= {
'md5': '1',
'blowfish': '2a',
'sha256': '5',
'sha512': '6',
}
if hashtype in cryptmethod:
if salt is None:
r = SystemRandom()
if hashtype in ['md5']:
saltsize = 8
else:
saltsize = 16
saltcharset = string.ascii_letters + string.digits + '/.'
salt = ''.join([r.choice(saltcharset) for _ in range(saltsize)])
if not HAS_PASSLIB:
if sys.platform.startswith('darwin'):
raise errors.AnsibleFilterError('|password_hash requires the passlib python module to generate password hashes on Mac OS X/Darwin')
saltstring = "$%s$%s" % (cryptmethod[hashtype],salt)
encrypted = crypt.crypt(password, saltstring)
else:
if hashtype == 'blowfish':
cls = passlib.hash.bcrypt
else:
cls = getattr(passlib.hash, '%s_crypt' % hashtype)
encrypted = cls.encrypt(password, salt=salt)
return encrypted
return None
def to_uuid(string):
return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
def mandatory(a):
from jinja2.runtime import Undefined
''' Make a variable mandatory '''
if isinstance(a, Undefined):
raise errors.AnsibleFilterError('Mandatory variable not defined.')
return a
def combine(*terms, **kwargs):
recursive = kwargs.get('recursive', False)
if len(kwargs) > 1 or (len(kwargs) == 1 and 'recursive' not in kwargs):
raise errors.AnsibleFilterError("'recursive' is the only valid keyword argument")
for t in terms:
if not isinstance(t, dict):
raise errors.AnsibleFilterError("|combine expects dictionaries, got " + repr(t))
if recursive:
return reduce(merge_hash, terms)
else:
return dict(itertools.chain(*map(iteritems, terms)))
def comment(text, style='plain', **kw):
# Predefined comment types
comment_styles = {
'plain': {
'decoration': '# '
},
'erlang': {
'decoration': '% '
},
'c': {
'decoration': '// '
},
'cblock': {
'beginning': '/*',
'decoration': ' * ',
'end': ' */'
},
'xml': {
'beginning': '<!--',
'decoration': ' - ',
'end': '-->'
}
}
# Pointer to the right comment type
style_params = comment_styles[style]
if 'decoration' in kw:
prepostfix = kw['decoration']
else:
prepostfix = style_params['decoration']
# Default params
p = {
'newline': '\n',
'beginning': '',
'prefix': (prepostfix).rstrip(),
'prefix_count': 1,
'decoration': '',
'postfix': (prepostfix).rstrip(),
'postfix_count': 1,
'end': ''
}
# Update default params
p.update(style_params)
p.update(kw)
# Compose substrings for the final string
str_beginning = ''
if p['beginning']:
str_beginning = "%s%s" % (p['beginning'], p['newline'])
str_prefix = ''
if p['prefix']:
if p['prefix'] != p['newline']:
str_prefix = str(
"%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
else:
str_prefix = str(
"%s" % (p['newline'])) * int(p['prefix_count'])
str_text = ("%s%s" % (
p['decoration'],
# Prepend each line of the text with the decorator
text.replace(
p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
# Remove trailing spaces when only decorator is on the line
"%s%s" % (p['decoration'], p['newline']),
"%s%s" % (p['decoration'].rstrip(), p['newline']))
str_postfix = p['newline'].join(
[''] + [p['postfix'] for x in range(p['postfix_count'])])
str_end = ''
if p['end']:
str_end = "%s%s" % (p['newline'], p['end'])
# Return the final string
return "%s%s%s%s%s" % (
str_beginning,
str_prefix,
str_text,
str_postfix,
str_end)
def extract(item, container, morekeys=None):
from jinja2.runtime import Undefined
value = container[item]
if value is not Undefined and morekeys is not None:
if not isinstance(morekeys, list):
morekeys = [morekeys]
try:
value = reduce(lambda d, k: d[k], morekeys, value)
except KeyError:
value = Undefined()
return value
def failed(*a, **kw):
''' Test if task result yields failed '''
item = a[0]
if not isinstance(item, MutableMapping):
raise errors.AnsibleFilterError("|failed expects a dictionary")
rc = item.get('rc', 0)
failed = item.get('failed', False)
if rc != 0 or failed:
return True
else:
return False
def success(*a, **kw):
''' Test if task result yields success '''
return not failed(*a, **kw)
def changed(*a, **kw):
''' Test if task result yields changed '''
item = a[0]
if not isinstance(item, MutableMapping):
raise errors.AnsibleFilterError("|changed expects a dictionary")
if not 'changed' in item:
changed = False
if ('results' in item # some modules return a 'results' key
and isinstance(item['results'], MutableSequence)
and isinstance(item['results'][0], MutableMapping)):
for result in item['results']:
changed = changed or result.get('changed', False)
else:
changed = item.get('changed', False)
return changed
def skipped(*a, **kw):
''' Test if task result yields skipped '''
item = a[0]
if not isinstance(item, MutableMapping):
raise errors.AnsibleFilterError("|skipped expects a dictionary")
skipped = item.get('skipped', False)
return skipped
@environmentfilter
def do_groupby(environment, value, attribute):
"""Overridden groupby filter for jinja2, to address an issue with
jinja2>=2.9.0,<2.9.5 where a namedtuple was returned which
has repr that prevents ansible.template.safe_eval.safe_eval from being
able to parse and eval the data.
jinja2<2.9.0,>=2.9.5 is not affected, as <2.9.0 uses a tuple, and
>=2.9.5 uses a standard tuple repr on the namedtuple.
The adaptation here, is to run the jinja2 `do_groupby` function, and
cast all of the namedtuples to a regular tuple.
See https://github.com/ansible/ansible/issues/20098
We may be able to remove this in the future.
"""
return [tuple(t) for t in _do_groupby(environment, value, attribute)]
def b64encode(string):
return to_text(base64.b64encode(to_bytes(string, errors='surrogate_then_strict')))
def b64decode(string):
return to_text(base64.b64decode(to_bytes(string, errors='surrogate_then_strict')))
<|fim▁hole|>
def filters(self):
return {
# jinja2 overrides
'groupby': do_groupby,
# base 64
'b64decode': b64decode,
'b64encode': b64encode,
# uuid
'to_uuid': to_uuid,
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
'from_json': json.loads,
# yaml
'to_yaml': to_yaml,
'to_nice_yaml': to_nice_yaml,
'from_yaml': from_yaml,
#date
'to_datetime': to_datetime,
# path
'basename': partial(unicode_wrap, os.path.basename),
'dirname': partial(unicode_wrap, os.path.dirname),
'expanduser': partial(unicode_wrap, os.path.expanduser),
'realpath': partial(unicode_wrap, os.path.realpath),
'relpath': partial(unicode_wrap, os.path.relpath),
'splitext': partial(unicode_wrap, os.path.splitext),
'win_basename': partial(unicode_wrap, ntpath.basename),
'win_dirname': partial(unicode_wrap, ntpath.dirname),
'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
# value as boolean
'bool': to_bool,
# quote string for shell usage
'quote': quote,
# hash filters
# md5 hex digest of string
'md5': md5s,
# sha1 hex digeset of string
'sha1': checksum_s,
# checksum of string as used by ansible for checksuming files
'checksum': checksum_s,
# generic hashing
'password_hash': get_encrypted_password,
'hash': get_hash,
# file glob
'fileglob': fileglob,
# regex
'regex_replace': regex_replace,
'regex_escape': regex_escape,
'regex_search': regex_search,
'regex_findall': regex_findall,
# ? : ;
'ternary': ternary,
# list
# random stuff
'random': rand,
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
# merge dicts
'combine': combine,
# comment-style decoration
'comment': comment,
# array and dict lookups
'extract': extract,
# failure testing
'failed' : failed,
'failure' : failed,
'success' : success,
'succeeded' : success,
# changed testing
'changed' : changed,
'change' : changed,
# skip testing
'skipped' : skipped,
'skip' : skipped,
# debug
'type_debug': lambda o: o.__class__.__name__,
}<|fim▁end|> |
class FilterModule(object):
''' Ansible core jinja2 filters ''' |
<|file_name|>components.rs<|end_file_name|><|fim▁begin|>use crate::dist::prefix::InstallPrefix;
use crate::errors::*;
/// The representation of the installed toolchain and its components.
/// `Components` and `DirectoryPackage` are the two sides of the
/// installation / uninstallation process.
use crate::utils::utils;
use crate::dist::component::package::{INSTALLER_VERSION, VERSION_FILE};
use crate::dist::component::transaction::Transaction;
use std::fs::File;
use std::path::{Path, PathBuf};
const COMPONENTS_FILE: &str = "components";
#[derive(Clone, Debug)]
pub struct Components {
prefix: InstallPrefix,
}
impl Components {
pub fn open(prefix: InstallPrefix) -> Result<Self> {
let c = Self { prefix };
// Validate that the metadata uses a format we know
if let Some(v) = c.read_version()? {
if v != INSTALLER_VERSION {
return Err(ErrorKind::BadInstalledMetadataVersion(v).into());
}
}
Ok(c)
}
fn rel_components_file(&self) -> PathBuf {
self.prefix.rel_manifest_file(COMPONENTS_FILE)
}
fn rel_component_manifest(&self, name: &str) -> PathBuf {
self.prefix.rel_manifest_file(&format!("manifest-{}", name))
}
fn read_version(&self) -> Result<Option<String>> {
let p = self.prefix.manifest_file(VERSION_FILE);
if utils::is_file(&p) {
Ok(Some(utils::read_file(VERSION_FILE, &p)?.trim().to_string()))
} else {
Ok(None)
}
}
fn write_version(&self, tx: &mut Transaction<'_>) -> Result<()> {
tx.modify_file(self.prefix.rel_manifest_file(VERSION_FILE))?;
utils::write_file(
VERSION_FILE,
&self.prefix.manifest_file(VERSION_FILE),
INSTALLER_VERSION,
)?;
Ok(())
}
pub fn list(&self) -> Result<Vec<Component>> {
let path = self.prefix.abs_path(self.rel_components_file());
if !utils::is_file(&path) {
return Ok(Vec::new());
}
let content = utils::read_file("components", &path)?;
Ok(content
.lines()
.map(|s| Component {
components: self.clone(),
name: s.to_owned(),
})
.collect())
}
pub fn add<'a>(&self, name: &str, tx: Transaction<'a>) -> ComponentBuilder<'a> {
ComponentBuilder {
components: self.clone(),
name: name.to_owned(),
parts: Vec::new(),
tx,
}
}
pub fn find(&self, name: &str) -> Result<Option<Component>> {
let result = self.list()?;
Ok(result.into_iter().find(|c| (c.name() == name)))
}
pub fn prefix(&self) -> InstallPrefix {
self.prefix.clone()
}
}
pub struct ComponentBuilder<'a> {
components: Components,
name: String,
parts: Vec<ComponentPart>,
tx: Transaction<'a>,
}
impl<'a> ComponentBuilder<'a> {
pub fn add_file(&mut self, path: PathBuf) -> Result<File> {
self.parts
.push(ComponentPart("file".to_owned(), path.clone()));
self.tx.add_file(&self.name, path)
}
pub fn copy_file(&mut self, path: PathBuf, src: &Path) -> Result<()> {
self.parts
.push(ComponentPart("file".to_owned(), path.clone()));
self.tx.copy_file(&self.name, path, src)
}
pub fn copy_dir(&mut self, path: PathBuf, src: &Path) -> Result<()> {
self.parts
.push(ComponentPart("dir".to_owned(), path.clone()));
self.tx.copy_dir(&self.name, path, src)
}
pub fn move_file(&mut self, path: PathBuf, src: &Path) -> Result<()> {
self.parts
.push(ComponentPart("file".to_owned(), path.clone()));
self.tx.move_file(&self.name, path, src)
}
pub fn move_dir(&mut self, path: PathBuf, src: &Path) -> Result<()> {
self.parts
.push(ComponentPart("dir".to_owned(), path.clone()));
self.tx.move_dir(&self.name, path, src)
}
pub fn finish(mut self) -> Result<Transaction<'a>> {
// Write component manifest
let path = self.components.rel_component_manifest(&self.name);
let abs_path = self.components.prefix.abs_path(&path);
let mut file = self.tx.add_file(&self.name, path)?;
for part in self.parts {
// FIXME: This writes relative paths to the component manifest,
// but rust-installer writes absolute paths.
utils::write_line("component", &mut file, &abs_path, &part.encode())?;
}
// Add component to components file
let path = self.components.rel_components_file();
let abs_path = self.components.prefix.abs_path(&path);
self.tx.modify_file(path)?;
utils::append_file("components", &abs_path, &self.name)?;
// Drop in the version file for future use
self.components.write_version(&mut self.tx)?;
Ok(self.tx)
}
}
#[derive(Debug)]
pub struct ComponentPart(pub String, pub PathBuf);
impl ComponentPart {
pub fn encode(&self) -> String {
format!("{}:{}", &self.0, &self.1.to_string_lossy())
}
pub fn decode(line: &str) -> Option<Self> {
line.find(':')
.map(|pos| Self(line[0..pos].to_owned(), PathBuf::from(&line[(pos + 1)..])))
}
}
#[derive(Clone, Debug)]
pub struct Component {
components: Components,
name: String,
}
impl Component {
pub fn manifest_name(&self) -> String {
format!("manifest-{}", &self.name)
}
pub fn manifest_file(&self) -> PathBuf {
self.components.prefix.manifest_file(&self.manifest_name())
}
pub fn rel_manifest_file(&self) -> PathBuf {<|fim▁hole|> self.components
.prefix
.rel_manifest_file(&self.manifest_name())
}
pub fn name(&self) -> &str {
&self.name
}
pub fn parts(&self) -> Result<Vec<ComponentPart>> {
let mut result = Vec::new();
for line in utils::read_file("component", &self.manifest_file())?.lines() {
result.push(
ComponentPart::decode(line)
.ok_or_else(|| ErrorKind::CorruptComponent(self.name.clone()))?,
);
}
Ok(result)
}
pub fn uninstall<'a>(&self, mut tx: Transaction<'a>) -> Result<Transaction<'a>> {
// Update components file
let path = self.components.rel_components_file();
let abs_path = self.components.prefix.abs_path(&path);
let temp = tx.temp().new_file()?;
utils::filter_file("components", &abs_path, &temp, |l| (l != self.name))?;
tx.modify_file(path)?;
utils::rename_file("components", &temp, &abs_path, tx.notify_handler())?;
// TODO: If this is the last component remove the components file
// and the version file.
// Track visited directories
use std::collections::hash_set::IntoIter;
use std::collections::HashSet;
use std::fs::read_dir;
// dirs will contain the set of longest disjoint directory paths seen
// ancestors help in filtering seen paths and constructing dirs
// All seen paths must be relative to avoid surprises
struct PruneSet {
dirs: HashSet<PathBuf>,
ancestors: HashSet<PathBuf>,
prefix: PathBuf,
}
impl PruneSet {
fn seen(&mut self, mut path: PathBuf) {
if !path.is_relative() || !path.pop() {
return;
}
if self.dirs.contains(&path) || self.ancestors.contains(&path) {
return;
}
self.dirs.insert(path.clone());
while path.pop() {
if path.file_name().is_none() {
break;
}
if self.dirs.contains(&path) {
self.dirs.remove(&path);
}
if self.ancestors.contains(&path) {
break;
}
self.ancestors.insert(path.clone());
}
}
}
struct PruneIter {
iter: IntoIter<PathBuf>,
path_buf: Option<PathBuf>,
prefix: PathBuf,
}
impl IntoIterator for PruneSet {
type Item = PathBuf;
type IntoIter = PruneIter;
fn into_iter(self) -> Self::IntoIter {
PruneIter {
iter: self.dirs.into_iter(),
path_buf: None,
prefix: self.prefix,
}
}
}
// Returns only empty directories
impl Iterator for PruneIter {
type Item = PathBuf;
fn next(&mut self) -> Option<Self::Item> {
self.path_buf = match self.path_buf {
None => self.iter.next(),
Some(_) => {
let mut path_buf = self.path_buf.take().unwrap();
match path_buf.file_name() {
Some(_) => {
if path_buf.pop() {
Some(path_buf)
} else {
None
}
}
None => self.iter.next(),
}
}
};
self.path_buf.as_ref()?;
let full_path = self.prefix.join(self.path_buf.as_ref().unwrap());
let empty = match read_dir(full_path) {
Ok(dir) => dir.count() == 0,
Err(_) => false,
};
if empty {
self.path_buf.clone()
} else {
// No dir above can be empty, go to next path in dirs
self.path_buf = None;
self.next()
}
}
}
// Remove parts
let mut pset = PruneSet {
dirs: HashSet::new(),
ancestors: HashSet::new(),
prefix: self.components.prefix.abs_path(""),
};
for part in self.parts()?.into_iter().rev() {
match &*part.0 {
"file" => tx.remove_file(&self.name, part.1.clone())?,
"dir" => tx.remove_dir(&self.name, part.1.clone())?,
_ => return Err(ErrorKind::CorruptComponent(self.name.clone()).into()),
}
pset.seen(part.1);
}
for empty_dir in pset {
tx.remove_dir(&self.name, empty_dir)?;
}
// Remove component manifest
tx.remove_file(&self.name, self.rel_manifest_file())?;
Ok(tx)
}
}<|fim▁end|> | |
<|file_name|>config_validation.py<|end_file_name|><|fim▁begin|>"""Helpers for config validation using voluptuous."""
import inspect
import logging
import os
import re
from datetime import (timedelta, datetime as datetime_sys,
time as time_sys, date as date_sys)
from socket import _GLOBAL_DEFAULT_TIMEOUT
from typing import Any, Union, TypeVar, Callable, Sequence, Dict, Optional
from urllib.parse import urlparse
import voluptuous as vol
from pkg_resources import parse_version
import homeassistant.util.dt as dt_util
from homeassistant.const import (
CONF_PLATFORM, CONF_SCAN_INTERVAL, TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_ALIAS, CONF_ENTITY_ID, CONF_VALUE_TEMPLATE, WEEKDAYS,
CONF_CONDITION, CONF_BELOW, CONF_ABOVE, CONF_TIMEOUT, SUN_EVENT_SUNSET,
SUN_EVENT_SUNRISE, CONF_UNIT_SYSTEM_IMPERIAL, CONF_UNIT_SYSTEM_METRIC,
ENTITY_MATCH_ALL, CONF_ENTITY_NAMESPACE, __version__)
from homeassistant.core import valid_entity_id, split_entity_id
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template as template_helper
from homeassistant.helpers.logging import KeywordStyleAdapter
from homeassistant.util import slugify as util_slugify
# pylint: disable=invalid-name
TIME_PERIOD_ERROR = "offset {} should be format 'HH:MM' or 'HH:MM:SS'"
OLD_SLUG_VALIDATION = r'^[a-z0-9_]+$'
OLD_ENTITY_ID_VALIDATION = r"^(\w+)\.(\w+)$"
# Keep track of invalid slugs and entity ids found so we can create a
# persistent notification. Rare temporary exception to use a global.
INVALID_SLUGS_FOUND = {}
INVALID_ENTITY_IDS_FOUND = {}
INVALID_EXTRA_KEYS_FOUND = []
# Home Assistant types
byte = vol.All(vol.Coerce(int), vol.Range(min=0, max=255))
small_float = vol.All(vol.Coerce(float), vol.Range(min=0, max=1))
positive_int = vol.All(vol.Coerce(int), vol.Range(min=0))
latitude = vol.All(vol.Coerce(float), vol.Range(min=-90, max=90),
msg='invalid latitude')
longitude = vol.All(vol.Coerce(float), vol.Range(min=-180, max=180),
msg='invalid longitude')
gps = vol.ExactSequence([latitude, longitude])
sun_event = vol.All(vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE))
port = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
# typing typevar
T = TypeVar('T')
# Adapted from:
# https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666
def has_at_least_one_key(*keys: str) -> Callable:
"""Validate that at least one key exists."""
def validate(obj: Dict) -> Dict:
"""Test keys exist in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
for k in obj.keys():
if k in keys:
return obj
raise vol.Invalid('must contain one of {}.'.format(', '.join(keys)))
return validate
def has_at_most_one_key(*keys: str) -> Callable:
"""Validate that zero keys exist or one key exists."""
def validate(obj: Dict) -> Dict:
"""Test zero keys exist or one key exists in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
if len(set(keys) & set(obj)) > 1:
raise vol.Invalid(
'must contain at most one of {}.'.format(', '.join(keys))
)
return obj
return validate
def boolean(value: Any) -> bool:
"""Validate and coerce a boolean value."""
if isinstance(value, str):
value = value.lower()
if value in ('1', 'true', 'yes', 'on', 'enable'):
return True
if value in ('0', 'false', 'no', 'off', 'disable'):
return False
raise vol.Invalid('invalid boolean value {}'.format(value))
return bool(value)
def isdevice(value):
"""Validate that value is a real device."""
try:
os.stat(value)
return str(value)
except OSError:
raise vol.Invalid('No device at {} found'.format(value))
def matches_regex(regex):
"""Validate that the value is a string that matches a regex."""
regex = re.compile(regex)
def validator(value: Any) -> str:
"""Validate that value matches the given regex."""
if not isinstance(value, str):
raise vol.Invalid('not a string value: {}'.format(value))
if not regex.match(value):
raise vol.Invalid('value {} does not match regular expression {}'
.format(value, regex.pattern))
return value
return validator
def is_regex(value):
"""Validate that a string is a valid regular expression."""
try:
r = re.compile(value)
return r
except TypeError:
raise vol.Invalid("value {} is of the wrong type for a regular "
"expression".format(value))
except re.error:
raise vol.Invalid("value {} is not a valid regular expression".format(
value))
def isfile(value: Any) -> str:
"""Validate that the value is an existing file."""
if value is None:
raise vol.Invalid('None is not file')
file_in = os.path.expanduser(str(value))
if not os.path.isfile(file_in):
raise vol.Invalid('not a file')
if not os.access(file_in, os.R_OK):
raise vol.Invalid('file not readable')
return file_in
def isdir(value: Any) -> str:
"""Validate that the value is an existing dir."""
if value is None:
raise vol.Invalid('not a directory')
dir_in = os.path.expanduser(str(value))
if not os.path.isdir(dir_in):
raise vol.Invalid('not a directory')
if not os.access(dir_in, os.R_OK):
raise vol.Invalid('directory not readable')
return dir_in
def ensure_list(value: Union[T, Sequence[T]]) -> Sequence[T]:
"""Wrap value in list if it is not one."""
if value is None:
return []
return value if isinstance(value, list) else [value]
def entity_id(value: Any) -> str:
"""Validate Entity ID."""
value = string(value).lower()
if valid_entity_id(value):
return value
if re.match(OLD_ENTITY_ID_VALIDATION, value):
# To ease the breaking change, we allow old slugs for now
# Remove after 0.94 or 1.0
fixed = '.'.join(util_slugify(part) for part in value.split('.', 1))
INVALID_ENTITY_IDS_FOUND[value] = fixed
logging.getLogger(__name__).warning(
"Found invalid entity_id %s, please update with %s. This "
"will become a breaking change.",
value, fixed
)
return value
raise vol.Invalid('Entity ID {} is an invalid entity id'.format(value))
def entity_ids(value: Union[str, Sequence]) -> Sequence[str]:
"""Validate Entity IDs."""
if value is None:
raise vol.Invalid('Entity IDs can not be None')
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value]
comp_entity_ids = vol.Any(
vol.All(vol.Lower, ENTITY_MATCH_ALL),
entity_ids
)
def entity_domain(domain: str):
"""Validate that entity belong to domain."""
def validate(value: Any) -> str:
"""Test if entity domain is domain."""
ent_domain = entities_domain(domain)
return ent_domain(value)[0]
return validate
def entities_domain(domain: str):
"""Validate that entities belong to domain."""
def validate(values: Union[str, Sequence]) -> Sequence[str]:
"""Test if entity domain is domain."""
values = entity_ids(values)
for ent_id in values:
if split_entity_id(ent_id)[0] != domain:
raise vol.Invalid(
"Entity ID '{}' does not belong to domain '{}'"
.format(ent_id, domain))
return values
return validate
def enum(enumClass):
"""Create validator for specified enum."""
return vol.All(vol.In(enumClass.__members__), enumClass.__getitem__)
def icon(value):
"""Validate icon."""
value = str(value)
if ':' in value:
return value
raise vol.Invalid('Icons should be specifed on the form "prefix:name"')
time_period_dict = vol.All(
dict, vol.Schema({
'days': vol.Coerce(int),
'hours': vol.Coerce(int),
'minutes': vol.Coerce(int),
'seconds': vol.Coerce(int),
'milliseconds': vol.Coerce(int),
}),
has_at_least_one_key('days', 'hours', 'minutes',
'seconds', 'milliseconds'),
lambda value: timedelta(**value))
def time(value) -> time_sys:
"""Validate and transform a time."""
if isinstance(value, time_sys):
return value
try:
time_val = dt_util.parse_time(value)
except TypeError:
raise vol.Invalid('Not a parseable type')
if time_val is None:
raise vol.Invalid('Invalid time specified: {}'.format(value))
return time_val
def date(value) -> date_sys:
"""Validate and transform a date."""
if isinstance(value, date_sys):
return value
try:
date_val = dt_util.parse_date(value)
except TypeError:
raise vol.Invalid('Not a parseable type')
if date_val is None:
raise vol.Invalid("Could not parse date")
return date_val
def time_period_str(value: str) -> timedelta:
"""Validate and transform time offset."""
if isinstance(value, int):
raise vol.Invalid('Make sure you wrap time values in quotes')
if not isinstance(value, str):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
negative_offset = False
if value.startswith('-'):
negative_offset = True
value = value[1:]
elif value.startswith('+'):
value = value[1:]
try:
parsed = [int(x) for x in value.split(':')]
except ValueError:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
if len(parsed) == 2:
hour, minute = parsed
second = 0
elif len(parsed) == 3:
hour, minute, second = parsed
else:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
offset = timedelta(hours=hour, minutes=minute, seconds=second)
if negative_offset:
offset *= -1
return offset
def time_period_seconds(value: Union[int, str]) -> timedelta:
"""Validate and transform seconds to a time offset."""
try:
return timedelta(seconds=int(value))
except (ValueError, TypeError):
raise vol.Invalid('Expected seconds, got {}'.format(value))
time_period = vol.Any(time_period_str, time_period_seconds, timedelta,
time_period_dict)
def match_all(value):
"""Validate that matches all values."""
return value
def positive_timedelta(value: timedelta) -> timedelta:
"""Validate timedelta is positive."""
if value < timedelta(0):
raise vol.Invalid('Time period should be positive')
return value
def service(value):
"""Validate service."""
# Services use same format as entities so we can use same helper.
if valid_entity_id(value):
return value
raise vol.Invalid('Service {} does not match format <domain>.<name>'
.format(value))
def schema_with_slug_keys(value_schema: Union[T, Callable]) -> Callable:
"""Ensure dicts have slugs as keys.
Replacement of vol.Schema({cv.slug: value_schema}) to prevent misleading
"Extra keys" errors from voluptuous.
"""
schema = vol.Schema({str: value_schema})
def verify(value: Dict) -> Dict:
"""Validate all keys are slugs and then the value_schema."""
if not isinstance(value, dict):
raise vol.Invalid('expected dictionary')
for key in value.keys():
try:
slug(key)
except vol.Invalid:
# To ease the breaking change, we allow old slugs for now
# Remove after 0.94 or 1.0
if re.match(OLD_SLUG_VALIDATION, key):
fixed = util_slugify(key)
INVALID_SLUGS_FOUND[key] = fixed
logging.getLogger(__name__).warning(
"Found invalid slug %s, please update with %s. This "
"will be come a breaking change.",
key, fixed
)
else:
raise
return schema(value)
return verify
def slug(value: Any) -> str:
"""Validate value is a valid slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
value = str(value)
slg = util_slugify(value)
if value == slg:
return value
raise vol.Invalid('invalid slug {} (try {})'.format(value, slg))
def slugify(value: Any) -> str:
"""Coerce a value to a slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
slg = util_slugify(str(value))
if slg:
return slg
raise vol.Invalid('Unable to slugify {}'.format(value))
def string(value: Any) -> str:
"""Coerce value to string, except for None."""
if value is None:
raise vol.Invalid('string value is None')
if isinstance(value, (list, dict)):
raise vol.Invalid('value should be a string')
return str(value)
def temperature_unit(value) -> str:
"""Validate and transform temperature unit."""
value = str(value).upper()
if value == 'C':
return TEMP_CELSIUS
if value == 'F':
return TEMP_FAHRENHEIT
raise vol.Invalid('invalid temperature unit (expected C or F)')
unit_system = vol.All(vol.Lower, vol.Any(CONF_UNIT_SYSTEM_METRIC,
CONF_UNIT_SYSTEM_IMPERIAL))
def template(value):
"""Validate a jinja2 template."""
if value is None:
raise vol.Invalid('template value is None')
if isinstance(value, (list, dict, template_helper.Template)):
raise vol.Invalid('template value should be a string')
value = template_helper.Template(str(value))
try:
value.ensure_valid()
return value
except TemplateError as ex:
raise vol.Invalid('invalid template ({})'.format(ex))
def template_complex(value):
"""Validate a complex jinja2 template."""
if isinstance(value, list):
return_value = value.copy()
for idx, element in enumerate(return_value):
return_value[idx] = template_complex(element)
return return_value
if isinstance(value, dict):
return_value = value.copy()
for key, element in return_value.items():
return_value[key] = template_complex(element)
return return_value
return template(value)
def datetime(value):
"""Validate datetime."""
if isinstance(value, datetime_sys):
return value
try:
date_val = dt_util.parse_datetime(value)
except TypeError:
date_val = None
if date_val is None:
raise vol.Invalid('Invalid datetime specified: {}'.format(value))
return date_val
def time_zone(value):
"""Validate timezone."""
if dt_util.get_time_zone(value) is not None:
return value
raise vol.Invalid(
'Invalid time zone passed in. Valid options can be found here: '
'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones')
weekdays = vol.All(ensure_list, [vol.In(WEEKDAYS)])
def socket_timeout(value):
"""Validate timeout float > 0.0.
None coerced to socket._GLOBAL_DEFAULT_TIMEOUT bare object.
"""
if value is None:
return _GLOBAL_DEFAULT_TIMEOUT
try:
float_value = float(value)
if float_value > 0.0:
return float_value
raise vol.Invalid('Invalid socket timeout value.'
' float > 0.0 required.')
except Exception as _:
raise vol.Invalid('Invalid socket timeout: {err}'.format(err=_))
# pylint: disable=no-value-for-parameter
def url(value: Any) -> str:
"""Validate an URL."""
url_in = str(value)
if urlparse(url_in).scheme in ['http', 'https']:
return vol.Schema(vol.Url())(url_in)
raise vol.Invalid('invalid url')
def x10_address(value):
"""Validate an x10 address."""
regex = re.compile(r'([A-Pa-p]{1})(?:[2-9]|1[0-6]?)$')
if not regex.match(value):
raise vol.Invalid('Invalid X10 Address')
return str(value).lower()
def ensure_list_csv(value: Any) -> Sequence:
"""Ensure that input is a list or make one from comma-separated string."""
if isinstance(value, str):
return [member.strip() for member in value.split(',')]
return ensure_list(value)
def deprecated(key: str,
replacement_key: Optional[str] = None,
invalidation_version: Optional[str] = None,
default: Optional[Any] = None):
"""
Log key as deprecated and provide a replacement (if exists).
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning if neither key nor replacement_key are provided
- Adds replacement_key with default value in this case
- Once the invalidation_version is crossed, raises vol.Invalid if key
is detected
"""
module_name = inspect.getmodule(inspect.stack()[1][0]).__name__
if replacement_key and invalidation_version:
warning = ("The '{key}' option (with value '{value}') is"
" deprecated, please replace it with '{replacement_key}'."
" This option will become invalid in version"
" {invalidation_version}")
elif replacement_key:
warning = ("The '{key}' option (with value '{value}') is"
" deprecated, please replace it with '{replacement_key}'")
elif invalidation_version:
warning = ("The '{key}' option (with value '{value}') is"
" deprecated, please remove it from your configuration."
" This option will become invalid in version"
" {invalidation_version}")
else:
warning = ("The '{key}' option (with value '{value}') is"
" deprecated, please remove it from your configuration")
def check_for_invalid_version(value: Optional[Any]):
"""Raise error if current version has reached invalidation."""
if not invalidation_version:
return
if parse_version(__version__) >= parse_version(invalidation_version):
raise vol.Invalid(
warning.format(
key=key,
value=value,
replacement_key=replacement_key,
invalidation_version=invalidation_version
)
)
def validator(config: Dict):
"""Check if key is in config and log warning."""
if key in config:
value = config[key]
check_for_invalid_version(value)
KeywordStyleAdapter(logging.getLogger(module_name)).warning(
warning,
key=key,
value=value,
replacement_key=replacement_key,
invalidation_version=invalidation_version
)
if replacement_key:
config.pop(key)
else:
value = default
if (replacement_key
and (replacement_key not in config
or default == config.get(replacement_key))
and value is not None):
config[replacement_key] = value
return has_at_most_one_key(key, replacement_key)(config)
return validator
# Validator helpers
def key_dependency(key, dependency):
"""Validate that all dependencies exist for key."""
def validator(value):
"""Test dependencies."""
if not isinstance(value, dict):
raise vol.Invalid('key dependencies require a dict')
if key in value and dependency not in value:
raise vol.Invalid('dependency violation - key "{}" requires '
'key "{}" to exist'.format(key, dependency))
return value
return validator
# Schemas
class HASchema(vol.Schema):
"""Schema class that allows us to mark PREVENT_EXTRA errors as warnings."""
def __call__(self, data):
"""Override __call__ to mark PREVENT_EXTRA as warning."""
try:
return super().__call__(data)
except vol.Invalid as orig_err:
if self.extra != vol.PREVENT_EXTRA:
raise
# orig_error is of type vol.MultipleInvalid (see super __call__)
assert isinstance(orig_err, vol.MultipleInvalid)
# pylint: disable=no-member
# If it fails with PREVENT_EXTRA, try with ALLOW_EXTRA
self.extra = vol.ALLOW_EXTRA
# In case it still fails the following will raise
try:
validated = super().__call__(data)
finally:
self.extra = vol.PREVENT_EXTRA
# This is a legacy config, print warning
extra_key_errs = [err for err in orig_err.errors
if err.error_message == 'extra keys not allowed']
if extra_key_errs:
msg = "Your configuration contains extra keys " \
"that the platform does not support.\n" \
"Please remove "
submsg = ', '.join('[{}]'.format(err.path[-1]) for err in
extra_key_errs)
submsg += '. '
if hasattr(data, '__config_file__'):
submsg += " (See {}, line {}). ".format(
data.__config_file__, data.__line__)
msg += submsg
logging.getLogger(__name__).warning(msg)
INVALID_EXTRA_KEYS_FOUND.append(submsg)
else:
# This should not happen (all errors should be extra key
# errors). Let's raise the original error anyway.
raise orig_err
# Return legacy validated config
return validated
def extend(self, schema, required=None, extra=None):
"""Extend this schema and convert it to HASchema if necessary."""
ret = super().extend(schema, required=required, extra=extra)
if extra is not None:
return ret
return HASchema(ret.schema, required=required, extra=self.extra)
PLATFORM_SCHEMA = HASchema({
vol.Required(CONF_PLATFORM): string,
vol.Optional(CONF_ENTITY_NAMESPACE): string,
vol.Optional(CONF_SCAN_INTERVAL): time_period
})
PLATFORM_SCHEMA_BASE = PLATFORM_SCHEMA.extend({
}, extra=vol.ALLOW_EXTRA)
EVENT_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required('event'): string,
vol.Optional('event_data'): dict,
vol.Optional('event_data_template'): {match_all: template_complex}
})
SERVICE_SCHEMA = vol.All(vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Exclusive('service', 'service name'): service,<|fim▁hole|>}), has_at_least_one_key('service', 'service_template'))
NUMERIC_STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'numeric_state',
vol.Required(CONF_ENTITY_ID): entity_id,
CONF_BELOW: vol.Coerce(float),
CONF_ABOVE: vol.Coerce(float),
vol.Optional(CONF_VALUE_TEMPLATE): template,
}), has_at_least_one_key(CONF_BELOW, CONF_ABOVE))
STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'state',
vol.Required(CONF_ENTITY_ID): entity_id,
vol.Required('state'): str,
vol.Optional('for'): vol.All(time_period, positive_timedelta),
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('from'): str,
}), key_dependency('for', 'state'))
SUN_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'sun',
vol.Optional('before'): sun_event,
vol.Optional('before_offset'): time_period,
vol.Optional('after'): vol.All(vol.Lower, vol.Any(
SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE)),
vol.Optional('after_offset'): time_period,
}), has_at_least_one_key('before', 'after'))
TEMPLATE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'template',
vol.Required(CONF_VALUE_TEMPLATE): template,
})
TIME_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'time',
'before': time,
'after': time,
'weekday': weekdays,
}), has_at_least_one_key('before', 'after', 'weekday'))
ZONE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'zone',
vol.Required(CONF_ENTITY_ID): entity_id,
'zone': entity_id,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('event'): vol.Any('enter', 'leave'),
})
AND_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'and',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
OR_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'or',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
CONDITION_SCHEMA = vol.Any(
NUMERIC_STATE_CONDITION_SCHEMA,
STATE_CONDITION_SCHEMA,
SUN_CONDITION_SCHEMA,
TEMPLATE_CONDITION_SCHEMA,
TIME_CONDITION_SCHEMA,
ZONE_CONDITION_SCHEMA,
AND_CONDITION_SCHEMA,
OR_CONDITION_SCHEMA,
)
_SCRIPT_DELAY_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("delay"): vol.Any(
vol.All(time_period, positive_timedelta),
template, template_complex)
})
_SCRIPT_WAIT_TEMPLATE_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("wait_template"): template,
vol.Optional(CONF_TIMEOUT): vol.All(time_period, positive_timedelta),
vol.Optional("continue_on_timeout"): boolean,
})
SCRIPT_SCHEMA = vol.All(
ensure_list,
[vol.Any(SERVICE_SCHEMA, _SCRIPT_DELAY_SCHEMA,
_SCRIPT_WAIT_TEMPLATE_SCHEMA, EVENT_SCHEMA, CONDITION_SCHEMA)],
)<|fim▁end|> | vol.Exclusive('service_template', 'service name'): template,
vol.Optional('data'): dict,
vol.Optional('data_template'): {match_all: template_complex},
vol.Optional(CONF_ENTITY_ID): comp_entity_ids, |
<|file_name|>test_modified_huber_loss_op.py<|end_file_name|><|fim▁begin|># Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
<|fim▁hole|> if val < -1:
return -4. * val
elif val < 1:
return (1. - val) * (1. - val)
else:
return 0.
class TestModifiedHuberLossOp(OpTest):
def setUp(self):
self.op_type = 'modified_huber_loss'
samples_num = 32
x_np = np.random.uniform(-2., 2., (samples_num, 1)).astype('float32')
y_np = np.random.choice([0, 1], samples_num).reshape(
(samples_num, 1)).astype('float32')
product_res = x_np * (2. * y_np - 1.)
# keep away from the junction of piecewise function
for pos, val in np.ndenumerate(product_res):
while abs(val - 1.) < 0.05:
x_np[pos] = np.random.uniform(-2., 2.)
y_np[pos] = np.random.choice([0, 1])
product_res[pos] = x_np[pos] * (2 * y_np[pos] - 1)
val = product_res[pos]
self.inputs = {'X': x_np, 'Y': y_np}
loss = np.vectorize(modified_huber_loss_forward)(product_res)
self.outputs = {
'IntermediateVal': product_res.astype('float32'),
'Out': loss.reshape((samples_num, 1)).astype('float32')
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', max_relative_error=0.01)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | def modified_huber_loss_forward(val): |
<|file_name|>epochGrapher.py<|end_file_name|><|fim▁begin|>import sys,numpy,matplotlib
import matplotlib.pyplot, scipy.stats
import library
def colorDefiner(epoch):
if epoch == '0':
theColor='blue'
elif epoch == '0.5':
theColor='red'<|fim▁hole|> theColor='green'
elif epoch == '1.5':
theColor='orange'
else:
print 'error from colorDefiner. exiting...'
sys.exit()
return theColor
def dataGrapherEpochs(dataStructure,figureLabel):
resolution=1000
figureFile='results/figure_%s.pdf'%figureLabel
for epochLabel in dataStructure:
epoch=epochLabel.split('_')[0]
localTime=numpy.array(dataStructure[epochLabel][0])
shiftedTime=localTime-min(localTime)
localCells=dataStructure[epochLabel][1]
highResolutionTime=numpy.linspace(min(shiftedTime),max(shiftedTime),resolution)
epochColor=colorDefiner(epoch)
# plotting the data
if len(localCells) > 1:
matplotlib.pyplot.plot(localTime,localCells,'o',color=epochColor,markeredgecolor='None',ms=4)
# plotting the model if there is growth, otherwise plot a best model straight line
if len(localCells) <= 2:
matplotlib.pyplot.plot([localTime[0],localTime[-1]],[localCells[0],localCells[-1]],'-',color=epochColor)
elif localCells[0] > localCells[-1]:
slope, intercept, temp0, temp1, temp2 = scipy.stats.linregress(shiftedTime,localCells)
matplotlib.pyplot.plot([localTime[0],localTime[-1]],[intercept,slope*shiftedTime[-1]+intercept],'-',color=epochColor)
else:
fittedTrajectory=library.dataFitter(shiftedTime,localCells)
b=library.peval(highResolutionTime,fittedTrajectory[0])
matplotlib.pyplot.plot(highResolutionTime+min(localTime),b,'-',color=epochColor)
matplotlib.pyplot.xlim([-0.5,20])
matplotlib.pyplot.ylim([-0.5e5,18e5])
matplotlib.pyplot.xlabel('time (days)')
matplotlib.pyplot.ylabel('number of cells (x 1e5)')
matplotlib.pyplot.title('%s ppm'%figureLabel)
matplotlib.pyplot.yticks((0,2e5,4e5,6e5,8e5,10e5,12e5,14e5,16e5,18e5),('0','2','4','6','8','10','12','14','16','18'))
matplotlib.pyplot.savefig(figureFile)
matplotlib.pyplot.clf()
return None
### MAIN
# 1. data reading
data300=library.dataReader('data/300ppmSetsLight.v2.txt')
data1000=library.dataReader('data/1000ppmSetsLight.v2.txt')
# 2. fitting the data to sigmoidal function
print 'fitting data for 300 pppm...'
dataGrapherEpochs(data300,'300')
print
print 'fitting data for 1000 ppm...'
dataGrapherEpochs(data1000,'1000')
print '... graphs completed.'<|fim▁end|> | elif epoch == '1': |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import os
from setuptools import find_packages
from setuptools import setup
version = '1.0'
install_requires = [<|fim▁hole|>tests_require = install_requires + ['Sphinx', 'docutils',
'virtualenv', 'nose', 'coverage']
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.md')).read()
except IOError:
README = CHANGES = ''
kwargs = dict(
version=version,
name='strava',
description='Python wrapper for the Strava (http://www.strava.com) API',
long_description=README + '\n\n' + CHANGES,
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License',
],
install_requires=install_requires,
license='Apache',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
tests_require=tests_require,
test_suite='strava.tests',
url='https://github.com/Packetslave/strava',
author='Brian Landers',
author_email='[email protected]',
entry_points="""\
"""
)
setup(**kwargs)<|fim▁end|> | ]
|
<|file_name|>english.js<|end_file_name|><|fim▁begin|>export default {
navigator: {
doc: 'Docs',
demo: 'Demo',
started: 'Get Started'
},
features: {
userExperience: {
title: 'Optimize Experience',
desc: 'To make scroll more smoothly, We support flexible configurations about inertial scrolling, rebound, fade scrollbar, etc. which could optimize user experience obviously.'
},
application: {
title: 'Rich Features',
desc: 'It can be applied to normal scroll list, picker, slide, index list, start guidance, etc. What\'s more, some complicated needs like pull down refresh and pull up load can be implemented much easier.'
},
dependence: {
title: 'Dependence Free',
desc: 'As a plain JavaScript library, BetterScroll doesn\'t depend on any framework, you could use it alone, or with any other MVVM frameworks.'
}
},
examples: {
normalScrollList: 'Normal Scroll List',
indexList: 'Index List',
picker: 'Picker',
slide: 'Slide',
startGuidance: 'Start Guidance',
freeScroll: 'Free Scroll',
formList: 'Form List',
verticalScrollImg: 'vertical-scroll-en.jpeg',<|fim▁hole|> pickerImg: 'picker-en.jpeg',
slideImg: 'slide.jpeg',
startGuidanceImg: 'full-page-slide.jpeg',
freeScrollImg: 'free-scroll.jpeg',
formListImg: 'form-list-en.jpeg'
},
normalScrollListPage: {
desc: 'Nomal scroll list based on BetterScroll',
scrollbar: 'Scrollbar',
pullDownRefresh: 'Pull Down Refresh',
pullUpLoad: 'Pull Up Load',
previousTxt: 'I am the No.',
followingTxt: ' line',
newDataTxt: 'I am new data: '
},
scrollComponent: {
defaultLoadTxtMore: 'Load more',
defaultLoadTxtNoMore: 'There is no more data',
defaultRefreshTxt: 'Refresh success'
},
indexListPage: {
title: 'Current City: Beijing'
},
pickerPage: {
desc: 'Picker is a typical choose component at mobile end. And it could dynamic change the data of every column to realize linkage picker.',
picker: ' picker',
pickerDemo: ' picker demo ...',
oneColumn: 'One column',
twoColumn: 'Two column',
threeColumn: 'Three column',
linkage: 'Linkage',
confirmTxt: 'confirm | ok',
cancelTxt: 'cancel | close'
},
slidePage: {
desc: 'Slide is a typical component at mobile end, support horizontal move.'
},
fullPageSlideComponent: {
buttonTxt: 'Start Use'
},
freeScrollPage: {
desc: 'Free scroll supports horizontal and vertical move at the same time.'
},
formListPage: {
desc: 'To use form in better-scroll, you need to make sure the option click is configured as false, since some native element events will be prevented when click is true. And in this situation, we recommend to handle click by listening tap event.',
previousTxt: 'No.',
followingTxt: ' option'
}
}<|fim▁end|> | indexListImg: 'index-list.jpeg', |
<|file_name|>cast-region-to-uint.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.<|fim▁hole|>// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() {
let x: isize = 3;
println!("&x={:x}", (&x as *const isize as usize));
}<|fim▁end|> | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
<|file_name|>bitcoinunits.cpp<|end_file_name|><|fim▁begin|>#include "bitcoinunits.h"
#include <QStringList>
BitcoinUnits::BitcoinUnits(QObject *parent):
QAbstractListModel(parent),
unitlist(availableUnits())
{
}
QList<BitcoinUnits::Unit> BitcoinUnits::availableUnits()
{
QList<BitcoinUnits::Unit> unitlist;
unitlist.append(BTC);
unitlist.append(mBTC);
unitlist.append(uBTC);
return unitlist;
}
bool BitcoinUnits::valid(int unit)
{
switch(unit)
{
case BTC:
case mBTC:
case uBTC:
return true;
default:
return false;
}
}
QString BitcoinUnits::name(int unit)
{
switch(unit)
{
case BTC: return QString("BDOG");
case mBTC: return QString("mBDOG");
case uBTC: return QString::fromUtf8("μBDOG");
default: return QString("???");
}
}
QString BitcoinUnits::description(int unit)
{
switch(unit)
{
case BTC: return QString("blackdogcoins");
case mBTC: return QString("Milli-blackdogcoins (1 / 1,000)");
case uBTC: return QString("Micro-blackdogcoins (1 / 1,000,000)");
default: return QString("???");
}
}
qint64 BitcoinUnits::factor(int unit)
{
switch(unit)
{
case BTC: return 100000000;
case mBTC: return 100000;
case uBTC: return 100;
default: return 100000000;
}
}
int BitcoinUnits::amountDigits(int unit)
{
switch(unit)
{
case BTC: return 8; // 21,000,000 (# digits, without commas)
case mBTC: return 11; // 21,000,000,000
case uBTC: return 14; // 21,000,000,000,000
default: return 0;
}
}
int BitcoinUnits::decimals(int unit)
{
switch(unit)
{
case BTC: return 8;
case mBTC: return 5;
case uBTC: return 2;
default: return 0;
}
}
QString BitcoinUnits::format(int unit, qint64 n, bool fPlus)
{
// Note: not using straight sprintf here because we do NOT want
// localized number formatting.
if(!valid(unit))
return QString(); // Refuse to format invalid unit
qint64 coin = factor(unit);
int num_decimals = decimals(unit);
qint64 n_abs = (n > 0 ? n : -n);
qint64 quotient = n_abs / coin;
qint64 remainder = n_abs % coin;
QString quotient_str = QString::number(quotient);
QString remainder_str = QString::number(remainder).rightJustified(num_decimals, '0');
// Right-trim excess zeros after the decimal point
int nTrim = 0;
for (int i = remainder_str.size()-1; i>=2 && (remainder_str.at(i) == '0'); --i)
++nTrim;
remainder_str.chop(nTrim);
if (n < 0)
quotient_str.insert(0, '-');
else if (fPlus && n > 0)
quotient_str.insert(0, '+');
return quotient_str + QString(".") + remainder_str;
}<|fim▁hole|> return format(unit, amount, plussign) + QString(" ") + name(unit);
}
bool BitcoinUnits::parse(int unit, const QString &value, qint64 *val_out)
{
if(!valid(unit) || value.isEmpty())
return false; // Refuse to parse invalid unit or empty string
int num_decimals = decimals(unit);
QStringList parts = value.split(".");
if(parts.size() > 2)
{
return false; // More than one dot
}
QString whole = parts[0];
QString decimals;
if(parts.size() > 1)
{
decimals = parts[1];
}
if(decimals.size() > num_decimals)
{
return false; // Exceeds max precision
}
bool ok = false;
QString str = whole + decimals.leftJustified(num_decimals, '0');
if(str.size() > 18)
{
return false; // Longer numbers will exceed 63 bits
}
qint64 retvalue = str.toLongLong(&ok);
if(val_out)
{
*val_out = retvalue;
}
return ok;
}
int BitcoinUnits::rowCount(const QModelIndex &parent) const
{
Q_UNUSED(parent);
return unitlist.size();
}
QVariant BitcoinUnits::data(const QModelIndex &index, int role) const
{
int row = index.row();
if(row >= 0 && row < unitlist.size())
{
Unit unit = unitlist.at(row);
switch(role)
{
case Qt::EditRole:
case Qt::DisplayRole:
return QVariant(name(unit));
case Qt::ToolTipRole:
return QVariant(description(unit));
case UnitRole:
return QVariant(static_cast<int>(unit));
}
}
return QVariant();
}<|fim▁end|> |
QString BitcoinUnits::formatWithUnit(int unit, qint64 amount, bool plussign)
{ |
<|file_name|>readline.js<|end_file_name|><|fim▁begin|>/*
* Exemplo de Readline (Input via console)
*
* @author André Ferreira <[email protected]>
*/<|fim▁hole|> input: process.stdin,
output: process.stdout
});
rl.question('Qual sua idade? ', (idade) => {
console.log('Sua idade é :', idade);
rl.close();
});<|fim▁end|> |
const readline = require('readline');
const rl = readline.createInterface({ |
<|file_name|>extendablemessageevent.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::ExtendableMessageEventBinding;
use crate::dom::bindings::codegen::Bindings::ExtendableMessageEventBinding::ExtendableMessageEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::bindings::trace::RootedTraceableBox;<|fim▁hole|>use crate::dom::eventtarget::EventTarget;
use crate::dom::extendableevent::ExtendableEvent;
use crate::dom::globalscope::GlobalScope;
use crate::dom::serviceworkerglobalscope::ServiceWorkerGlobalScope;
use dom_struct::dom_struct;
use js::jsapi::{Heap, JSContext};
use js::jsval::JSVal;
use js::rust::HandleValue;
use servo_atoms::Atom;
#[dom_struct]
pub struct ExtendableMessageEvent {
event: ExtendableEvent,
data: Heap<JSVal>,
origin: DOMString,
lastEventId: DOMString,
}
impl ExtendableMessageEvent {
pub fn new(
global: &GlobalScope,
type_: Atom,
bubbles: bool,
cancelable: bool,
data: HandleValue,
origin: DOMString,
lastEventId: DOMString,
) -> DomRoot<ExtendableMessageEvent> {
let ev = Box::new(ExtendableMessageEvent {
event: ExtendableEvent::new_inherited(),
data: Heap::default(),
origin: origin,
lastEventId: lastEventId,
});
let ev = reflect_dom_object(ev, global, ExtendableMessageEventBinding::Wrap);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bubbles, cancelable);
}
ev.data.set(data.get());
ev
}
pub fn Constructor(
worker: &ServiceWorkerGlobalScope,
type_: DOMString,
init: RootedTraceableBox<ExtendableMessageEventBinding::ExtendableMessageEventInit>,
) -> Fallible<DomRoot<ExtendableMessageEvent>> {
let global = worker.upcast::<GlobalScope>();
let ev = ExtendableMessageEvent::new(
global,
Atom::from(type_),
init.parent.parent.bubbles,
init.parent.parent.cancelable,
init.data.handle(),
init.origin.clone().unwrap(),
init.lastEventId.clone().unwrap(),
);
Ok(ev)
}
}
impl ExtendableMessageEvent {
pub fn dispatch_jsval(target: &EventTarget, scope: &GlobalScope, message: HandleValue) {
let Extendablemessageevent = ExtendableMessageEvent::new(
scope,
atom!("message"),
false,
false,
message,
DOMString::new(),
DOMString::new(),
);
Extendablemessageevent.upcast::<Event>().fire(target);
}
}
impl ExtendableMessageEventMethods for ExtendableMessageEvent {
#[allow(unsafe_code)]
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-data-attribute
unsafe fn Data(&self, _cx: *mut JSContext) -> JSVal {
self.data.get()
}
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-origin-attribute
fn Origin(&self) -> DOMString {
self.origin.clone()
}
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-lasteventid-attribute
fn LastEventId(&self) -> DOMString {
self.lastEventId.clone()
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}<|fim▁end|> | use crate::dom::event::Event; |
<|file_name|>Main1122.cpp<|end_file_name|><|fim▁begin|>#include <iostream>
#include <cstdio>
using namespace std;
<|fim▁hole|>int main() {
cout<<"\n";
printf('\n');
// your code goes here
return 0;
}<|fim▁end|> | |
<|file_name|>cast_kernels.rs<|end_file_name|><|fim▁begin|>// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#[macro_use]
extern crate criterion;
use criterion::Criterion;
use rand::distributions::{Distribution, Standard};
use rand::prelude::random;
use std::sync::Arc;
extern crate arrow;
use arrow::array::*;
use arrow::compute::cast;
use arrow::datatypes::*;
fn build_array<FROM>(size: usize) -> ArrayRef
where
FROM: ArrowNumericType,
Standard: Distribution<FROM::Native>,
PrimitiveArray<FROM>: std::convert::From<Vec<Option<FROM::Native>>>,
{
let values = (0..size)
.map(|_| {
// 10% nulls, i.e. dense.
if random::<f64>() < 0.1 {
None
} else {
Some(random::<FROM::Native>())
}
})
.collect();
Arc::new(PrimitiveArray::<FROM>::from(values))
}
fn build_timestamp_array<FROM>(size: usize) -> ArrayRef
where
FROM: ArrowTimestampType,
Standard: Distribution<FROM::Native>,
{
let values = (0..size)
.map(|_| {
if random::<f64>() < 0.5 {
None
} else {
Some(random::<i64>())
}
})
.collect::<Vec<Option<i64>>>();
Arc::new(PrimitiveArray::<FROM>::from_opt_vec(values, None))
}
// cast array from specified primitive array type to desired data type
fn cast_array(array: &ArrayRef, to_type: DataType) {
criterion::black_box(cast(array, &to_type).unwrap());
}
fn add_benchmark(c: &mut Criterion) {
let i32_array = build_array::<Int32Type>(512);
let i64_array = build_array::<Int64Type>(512);
let f32_array = build_array::<Float32Type>(512);
let f64_array = build_array::<Float64Type>(512);
let date64_array = build_array::<Date64Type>(512);
let date32_array = build_array::<Date32Type>(512);
let time32s_array = build_array::<Time32SecondType>(512);
let time64ns_array = build_array::<Time64NanosecondType>(512);
let time_ns_array = build_timestamp_array::<TimestampNanosecondType>(512);
let time_ms_array = build_timestamp_array::<TimestampMillisecondType>(512);
c.bench_function("cast int32 to int32 512", |b| {
b.iter(|| cast_array(&i32_array, DataType::Int32))
});
c.bench_function("cast int32 to uint32 512", |b| {
b.iter(|| cast_array(&i32_array, DataType::UInt32))
});
c.bench_function("cast int32 to float32 512", |b| {
b.iter(|| cast_array(&i32_array, DataType::Float32))
});
c.bench_function("cast int32 to float64 512", |b| {
b.iter(|| cast_array(&i32_array, DataType::Float64))
});
c.bench_function("cast int32 to int64 512", |b| {<|fim▁hole|> b.iter(|| cast_array(&f32_array, DataType::Int32))
});
c.bench_function("cast float64 to float32 512", |b| {
b.iter(|| cast_array(&f64_array, DataType::Float32))
});
c.bench_function("cast float64 to uint64 512", |b| {
b.iter(|| cast_array(&f64_array, DataType::UInt64))
});
c.bench_function("cast int64 to int32 512", |b| {
b.iter(|| cast_array(&i64_array, DataType::Int32))
});
c.bench_function("cast date64 to date32 512", |b| {
b.iter(|| cast_array(&date64_array, DataType::Date32(DateUnit::Day)))
});
c.bench_function("cast date32 to date64 512", |b| {
b.iter(|| cast_array(&date32_array, DataType::Date64(DateUnit::Millisecond)))
});
c.bench_function("cast time32s to time32ms 512", |b| {
b.iter(|| cast_array(&time32s_array, DataType::Time32(TimeUnit::Millisecond)))
});
c.bench_function("cast time32s to time64us 512", |b| {
b.iter(|| cast_array(&time32s_array, DataType::Time64(TimeUnit::Microsecond)))
});
c.bench_function("cast time64ns to time32s 512", |b| {
b.iter(|| cast_array(&time64ns_array, DataType::Time32(TimeUnit::Second)))
});
c.bench_function("cast timestamp_ns to timestamp_s 512", |b| {
b.iter(|| {
cast_array(
&time_ns_array,
DataType::Timestamp(TimeUnit::Nanosecond, None),
)
})
});
c.bench_function("cast timestamp_ms to timestamp_ns 512", |b| {
b.iter(|| {
cast_array(
&time_ms_array,
DataType::Timestamp(TimeUnit::Nanosecond, None),
)
})
});
c.bench_function("cast timestamp_ms to i64 512", |b| {
b.iter(|| cast_array(&time_ms_array, DataType::Int64))
});
}
criterion_group!(benches, add_benchmark);
criterion_main!(benches);<|fim▁end|> | b.iter(|| cast_array(&i32_array, DataType::Int64))
});
c.bench_function("cast float32 to int32 512", |b| { |
<|file_name|>builtintypes.py<|end_file_name|><|fim▁begin|>"""
Flow's list of built-in types available in 0.44.0 (Apr 13, 2017).
Related to
https://flow.org/en/docs/types/
and
http://www.saltycrane.com/blog/2016/06/flow-type-cheat-sheet/
"""
def print_type_format(trigger, content=None, description=None):
"""Format output for autocompletion for a given trigger text."""
return ("%s\t%s" % (trigger, description), "%s" % (content or trigger))
builtintypes = [
# Built-in types
print_type_format("any", description="Flow built-in type"),
print_type_format("boolean", description="Flow built-in type"),
print_type_format("null", description="Flow built-in type"),
print_type_format("number", description="Flow built-in type"),
print_type_format("mixed", description="Flow built-in type"),
print_type_format("string", description="Flow built-in type"),
print_type_format("void", description="Flow built-in type"),
print_type_format("Class", "Class<${1}>", "Flow built-in type"),
# Built-in 'private' types
print_type_format(
"Abstract", "\$Abstract<${1}>", "Flow built-in private type"
),
print_type_format(
"Diff", "\$Diff<${1}, ${2}>", "Flow built-in private type"
),
print_type_format("Exact", "\$Exact<${1}>", "Flow built-in private type"),
print_type_format("Keys", "\$Keys<${1}>", "Flow built-in private type"),
print_type_format(
"ObjMap", "\$ObjMap<${1}, ${2}>", "Flow built-in private type"
),
print_type_format(
"PropertyType",
"\$PropertyType<${1}, ${2}>",
"Flow built-in private type",
),
print_type_format(
"Subtype", "\$Subtype<${1}, ${2}>", "Flow built-in private type"
),
print_type_format(
"Supertype", "\$Supertype<${1}, ${2}>", "Flow built-in private type"
),
# Core types
print_type_format("Array", "Array<${1}>", "Flow core type"),
print_type_format("ArrayBuffer", description="Flow core type"),
print_type_format(
"AsyncIterable", "AsyncIterable<${1}>", "Flow core type"
),
print_type_format(
"AsyncIterator", "AsyncIterator<${1}>", "Flow core type"
),
print_type_format("Boolean", description="Flow core type"),
print_type_format("CallSite", description="Flow core type"),
print_type_format("DataView", description="Flow core type"),
print_type_format("Date", description="Flow core type"),
print_type_format("Error", description="Flow core type"),
print_type_format("EvalError", description="Flow core type"),
print_type_format("Float32Array", description="Flow core type"),
print_type_format("Float64Array", description="Flow core type"),
print_type_format("Function", description="Flow core type"),
print_type_format("global", description="Flow core type"),
print_type_format("Infinity", description="Flow core type"),
print_type_format("Int16Array", description="Flow core type"),
print_type_format("Int32Array", description="Flow core type"),
print_type_format("Int8Array", description="Flow core type"),
print_type_format("Iterable", "Iterable<${1}>", "Flow core type"),
print_type_format("Iterator", "Iterator<${1}>", "Flow core type"),
print_type_format(
"IteratorResult", "IteratorResult<${1}, ${2}>", "Flow core type"
),
print_type_format("JSON", description="Flow core type"),
print_type_format("Map", "Map<${1}, ${2}>", "Flow core type"),
print_type_format("NaN", description="Flow core type"),
print_type_format("Number", description="Flow core type"),
print_type_format("Object", description="Flow core type"),
print_type_format("Promise", "Promise<${1}>", "Flow core type"),
print_type_format("Proxy", "Proxy<${1}>", "Flow core type"),
print_type_format("RangeError", description="Flow core type"),
print_type_format("ReferenceError", description="Flow core type"),
print_type_format("Reflect", description="Flow core type"),
print_type_format("RegExp", description="Flow core type"),
print_type_format("Set", "Set<${1}>", "Flow core type"),
print_type_format("String", description="Flow core type"),
print_type_format("Symbol", description="Flow core type"),
print_type_format("SyntaxError", description="Flow core type"),
print_type_format("TypeError", description="Flow core type"),
print_type_format("Uint16Array", description="Flow core type"),
print_type_format("Uint32Array", description="Flow core type"),
print_type_format("Uint8Array", description="Flow core type"),
print_type_format("Uint8ClampedArray", description="Flow core type"),
print_type_format("URIError", description="Flow core type"),
print_type_format("WeakMap", "WeakMap<${1}, ${2}>", "Flow core type"),
print_type_format("WeakSet", "WeakSet<${1}>", "Flow core type"),
# Core 'private' types
print_type_format(
"ArrayBufferView",
"\$ArrayBufferView",
description="Flow core private type",
),
print_type_format(
"ReadOnlyArray",
"\$ReadOnlyArray<${1}>",
description="Flow core private type",
),
print_type_format(
"SymboIsConcatSpreadable",
"\$SymboIsConcatSpreadable",
description="Flow core private type",
),
print_type_format(
"SymbolHasInstance",
"\$SymbolHasInstance",
description="Flow core private type",
),
print_type_format(
"SymbolIterator",
"\$SymbolIterator",
description="Flow core private type",
),
print_type_format(
"SymbolMatch", "\$SymbolMatch", description="Flow core private type"
),
print_type_format(
"SymbolReplace",
"\$SymbolReplace",
description="Flow core private type",
),
print_type_format(
"SymbolSearch", "\$SymbolSearch", description="Flow core private type"
),
print_type_format(
"SymbolSpecies",
"\$SymbolSpecies",
description="Flow core private type",
),
print_type_format(
"SymbolSplit", "\$SymbolSplit", description="Flow core private type"
),
print_type_format(
"SymbolToPrimitive",
"\$SymbolToPrimitive",
description="Flow core private type",
),
print_type_format(
"SymbolToStringTag",
"\$SymbolToStringTag",
description="Flow core private type",
),
print_type_format(
"SymbolUnscopables",
"\$SymbolUnscopables",
description="Flow core private type",
),
print_type_format(
"TypedArray", "\$TypedArray", description="Flow core private type"
),
print_type_format(
"Proxyrevocable",
"Proxy\$revocable<${1}>",
description="Flow core private type",
),
print_type_format(
"Proxytraps",
"Proxy\$traps<${1}>",
description="Flow core private type",
),
print_type_format(
"RegExpflags", "RegExp\$flags", description="Flow core private type"
),
# React types
print_type_format(
"_ReactClass", "_ReactClass<${1}, ${2}, ${3}, ${4}>", "Flow React type"
),
print_type_format(
"LegacyReactComponent",
"LegacyReactComponent<${1}, ${2}, ${3}>",
"Flow React type",
),
print_type_format("ReactClass", "ReactClass<${1}>", "Flow React type"),
print_type_format(
"ReactPropsChainableTypeChecker", description="Flow React type"
),
print_type_format("ReactPropsCheckType", description="Flow React type"),
print_type_format("ReactPropTypes", description="Flow React type"),
print_type_format(
"SyntheticClipboardEvent", description="Flow React type"
),
print_type_format(
"SyntheticCompositionEvent", description="Flow React type"
),
print_type_format("SyntheticDragEvent", description="Flow React type"),
print_type_format("SyntheticEvent", description="Flow React type"),
print_type_format("SyntheticFocusEvent", description="Flow React type"),
print_type_format("SyntheticInputEvent", description="Flow React type"),
print_type_format("SyntheticKeyboardEvent", description="Flow React type"),
print_type_format("SyntheticMouseEvent", description="Flow React type"),
print_type_format("SyntheticTouchEvent", description="Flow React type"),
print_type_format("SyntheticUIEvent", description="Flow React type"),
print_type_format("SyntheticWheelEvent", description="Flow React type"),
# React 'private' types
print_type_format(
"DefaultPropsOf", "\$DefaultPropsOf<${1}>", "Flow React type"
),
print_type_format("JSXIntrinsics", "\$JSXIntrinsics", "Flow React type"),
print_type_format("PropsOf", "\$PropsOf<${1}>", "Flow React type"),
print_type_format(
"ReactComponent",
"React\$Component<${1}, ${2}, ${3}>",
"Flow React type",
),
print_type_format(
"ReactElement", "React\$Element<${1}>", "Flow React type"
),
print_type_format(
"ReactPropTypesarrayOf", "React\$PropTypes\$arrayOf", "Flow React type"
),
print_type_format(
"ReactPropTypesinstanceOf",
"React\$PropTypes\$instanceOf",
"Flow React type",
),
print_type_format(
"ReactPropTypesobjectOf",
"React\$PropTypes\$objectOf",
"Flow React type",
),
print_type_format(
"ReactPropTypesoneOf", "React\$PropTypes\$oneOf", "Flow React type"
),
print_type_format(
"ReactPropTypesoneOfType",
"React\$PropTypes\$oneOfType",
"Flow React type",
),
print_type_format(
"ReactPropTypesshape", "React\$PropTypes\$shape", "Flow React type"
),
print_type_format(
"React$PureComponent",
"React\$PureComponent<${1}, ${2}, ${3}>",
"Flow React type",
),
# Document Object Model types
print_type_format("AnimationEvent", description="Flow DOM type"),
print_type_format("AnimationEventHandler", description="Flow DOM type"),
print_type_format("AnimationEventListener", description="Flow DOM type"),
print_type_format("AnimationEventTypes", description="Flow DOM type"),
print_type_format("Attr", description="Flow DOM type"),
print_type_format("AudioTrack", description="Flow DOM type"),
print_type_format("AudioTrackList", description="Flow DOM type"),
print_type_format("Blob", description="Flow DOM type"),
print_type_format("BufferDataSource", description="Flow DOM type"),
print_type_format("CanvasDrawingStyles", description="Flow DOM type"),
print_type_format("CanvasFillRule", description="Flow DOM type"),
print_type_format("CanvasGradient", description="Flow DOM type"),
print_type_format("CanvasImageSource", description="Flow DOM type"),
print_type_format("CanvasPattern", description="Flow DOM type"),
print_type_format("CanvasRenderingContext2D", description="Flow DOM type"),
print_type_format("CharacterData", description="Flow DOM type"),
print_type_format("ClientRect", description="Flow DOM type"),
print_type_format("ClientRectList", description="Flow DOM type"),
print_type_format("Comment", description="Flow DOM type"),
print_type_format("CustomElementRegistry", description="Flow DOM type"),
print_type_format("CustomEvent", description="Flow DOM type"),
print_type_format("DataTransfer", description="Flow DOM type"),
print_type_format("DataTransferItem", description="Flow DOM type"),
print_type_format("DataTransferItemList", description="Flow DOM type"),
print_type_format("Document", description="Flow DOM type"),
print_type_format("DocumentFragment", description="Flow DOM type"),
print_type_format("DocumentType", description="Flow DOM type"),
print_type_format("DOMError", description="Flow DOM type"),
print_type_format("DOMImplementation", description="Flow DOM type"),
print_type_format("DOMTokenList", description="Flow DOM type"),
print_type_format("DragEvent", description="Flow DOM type"),
print_type_format("DragEventHandler", description="Flow DOM type"),
print_type_format("DragEventListener", description="Flow DOM type"),
print_type_format("DragEventTypes", description="Flow DOM type"),
print_type_format("Element", description="Flow DOM type"),
print_type_format(
"ElementRegistrationOptions", description="Flow DOM type"
),
print_type_format("Event", description="Flow DOM type"),
print_type_format("EventHandler", description="Flow DOM type"),
print_type_format("EventListener", description="Flow DOM type"),
print_type_format(
"EventListenerOptionsOrUseCapture", description="Flow DOM type"
),
print_type_format("EventTarget", description="Flow DOM type"),
print_type_format("File", description="Flow DOM type"),
print_type_format("FileList", description="Flow DOM type"),
print_type_format("FileReader", description="Flow DOM type"),
print_type_format("HitRegionOptions", description="Flow DOM type"),
print_type_format("HTMLAnchorElement", description="Flow DOM type"),
print_type_format("HTMLAppletElement", description="Flow DOM type"),
print_type_format("HTMLAudioElement", description="Flow DOM type"),
print_type_format("HTMLBaseElement", description="Flow DOM type"),
print_type_format("HTMLButtonElement", description="Flow DOM type"),
print_type_format("HTMLCanvasElement", description="Flow DOM type"),
print_type_format(
"HTMLCollection", "HTMLCollection<${1}>", "Flow DOM type"
),
print_type_format("HTMLDivElement", description="Flow DOM type"),
print_type_format("HTMLElement", description="Flow DOM type"),
print_type_format("HTMLEmbedElement", description="Flow DOM type"),
print_type_format("HTMLFormElement", description="Flow DOM type"),
print_type_format("HTMLIFrameElement", description="Flow DOM type"),
print_type_format("HTMLImageElement", description="Flow DOM type"),
print_type_format("HTMLInputElement", description="Flow DOM type"),
print_type_format("HTMLLabelElement", description="Flow DOM type"),
print_type_format("HTMLLinkElement", description="Flow DOM type"),
print_type_format("HTMLMediaElement", description="Flow DOM type"),
print_type_format("HTMLMenuElement", description="Flow DOM type"),
print_type_format("HTMLMetaElement", description="Flow DOM type"),
print_type_format("HTMLOptGroupElement", description="Flow DOM type"),
print_type_format("HTMLOptionElement", description="Flow DOM type"),
print_type_format("HTMLOptionsCollection", description="Flow DOM type"),
print_type_format("HTMLParagraphElement", description="Flow DOM type"),
print_type_format("HTMLScriptElement", description="Flow DOM type"),
print_type_format("HTMLSelectElement", description="Flow DOM type"),
print_type_format("HTMLSlotElement", description="Flow DOM type"),
print_type_format("HTMLSourceElement", description="Flow DOM type"),
print_type_format("HTMLSpanElement", description="Flow DOM type"),
print_type_format("HTMLStyleElement", description="Flow DOM type"),
print_type_format("HTMLTableCaptionElement", description="Flow DOM type"),
print_type_format("HTMLTableCellElement", description="Flow DOM type"),
print_type_format("HTMLTableElement", description="Flow DOM type"),
print_type_format("HTMLTableRowElement", description="Flow DOM type"),
print_type_format("HTMLTableSectionElement", description="Flow DOM type"),
print_type_format("HTMLTemplateElement", description="Flow DOM type"),
print_type_format("HTMLTextAreaElement", description="Flow DOM type"),
print_type_format("HTMLVideoElement", description="Flow DOM type"),
print_type_format("Image", description="Flow DOM type"),
print_type_format("ImageBitmap", description="Flow DOM type"),
print_type_format("ImageData", description="Flow DOM type"),
print_type_format("KeyboardEvent", description="Flow DOM type"),
print_type_format("KeyboardEventHandler", description="Flow DOM type"),
print_type_format("KeyboardEventListener", description="Flow DOM type"),
print_type_format("KeyboardEventTypes", description="Flow DOM type"),
print_type_format("MediaError", description="Flow DOM type"),
print_type_format("MediaSource", description="Flow DOM type"),
print_type_format("MessageEvent", description="Flow DOM type"),
print_type_format("MouseEvent", description="Flow DOM type"),
print_type_format("MouseEventHandler", description="Flow DOM type"),
print_type_format("MouseEventListener", description="Flow DOM type"),
print_type_format("MouseEventTypes", description="Flow DOM type"),
print_type_format("NamedNodeMap", description="Flow DOM type"),
print_type_format("Node", description="Flow DOM type"),
print_type_format("NodeFilter", description="Flow DOM type"),
print_type_format("NodeFilterCallback", description="Flow DOM type"),
print_type_format("NodeFilterInterface", description="Flow DOM type"),
print_type_format(
"NodeIterator", "NodeIterator<${1}, ${2}>", "Flow DOM type"
),
print_type_format("NodeList", "NodeList<${1}>", "Flow DOM type"),
print_type_format("Path2D", description="Flow DOM type"),
print_type_format("ProgressEvent", description="Flow DOM type"),
print_type_format("ProgressEventHandler", description="Flow DOM type"),
print_type_format("ProgressEventListener", description="Flow DOM type"),
print_type_format("ProgressEventTypes", description="Flow DOM type"),
print_type_format("PromiseRejectionEvent", description="Flow DOM type"),
print_type_format("Range", description="Flow DOM type"),
print_type_format("RenderingContext", description="Flow DOM type"),
print_type_format("Selection", description="Flow DOM type"),
print_type_format("SelectionDirection", description="Flow DOM type"),
print_type_format("SelectionMode", description="Flow DOM type"),
print_type_format("ShadowRoot", description="Flow DOM type"),
print_type_format("SourceBuffer", description="Flow DOM type"),
print_type_format("SourceBufferList", description="Flow DOM type"),
print_type_format("Storage", description="Flow DOM type"),
print_type_format("SVGMatrix", description="Flow DOM type"),
print_type_format("TexImageSource", description="Flow DOM type"),
print_type_format("Text", description="Flow DOM type"),
print_type_format("TextMetrics", description="Flow DOM type"),
print_type_format("TextRange", description="Flow DOM type"),
print_type_format("TextTrack", description="Flow DOM type"),
print_type_format("TextTrackCue", description="Flow DOM type"),
print_type_format("TextTrackCueList", description="Flow DOM type"),
print_type_format("TextTrackList", description="Flow DOM type"),
print_type_format("TimeRanges", description="Flow DOM type"),
print_type_format("Touch", description="Flow DOM type"),
print_type_format("TouchEvent", description="Flow DOM type"),
print_type_format("TouchEventHandler", description="Flow DOM type"),
print_type_format("TouchEventListener", description="Flow DOM type"),
print_type_format("TouchEventTypes", description="Flow DOM type"),
print_type_format("TouchList", description="Flow DOM type"),
print_type_format("TrackDefault", description="Flow DOM type"),
print_type_format("TrackDefaultList", description="Flow DOM type"),
print_type_format("TreeWalker", "TreeWalker<${1}, ${2}>", "Flow DOM type"),
print_type_format("UIEvent", description="Flow DOM type"),
print_type_format("URL", description="Flow DOM type"),
print_type_format("ValidityState", description="Flow DOM type"),
print_type_format("VertexAttribFVSource", description="Flow DOM type"),
print_type_format("VideoTrack", description="Flow DOM type"),
print_type_format("VideoTrackList", description="Flow DOM type"),
print_type_format("WebGLContextAttributes", description="Flow DOM type"),
print_type_format("WebGLContextEvent", description="Flow DOM type"),
print_type_format("WebGLRenderingContext", description="Flow DOM type"),
print_type_format("WheelEvent", description="Flow DOM type"),
print_type_format("WheelEventHandler", description="Flow DOM type"),
print_type_format("WheelEventListener", description="Flow DOM type"),
print_type_format("WheelEventTypes", description="Flow DOM type"),
# Document Object Model 'private' types
print_type_format(
"CustomEventInit", "CustomEvent\$Init", "Flow DOM private type"
),
print_type_format("EventInit", "Event\$Init", "Flow DOM private type"),
# Browser Object Model types
print_type_format("AnalyserNode", description="Flow BOM type"),
print_type_format("AudioBuffer", description="Flow BOM type"),
print_type_format("AudioBufferSourceNode", description="Flow BOM type"),
print_type_format("AudioContext", description="Flow BOM type"),
print_type_format("AudioDestinationNode", description="Flow BOM type"),
print_type_format("AudioListener", description="Flow BOM type"),
print_type_format("AudioNode", description="Flow BOM type"),
print_type_format("AudioParam", description="Flow BOM type"),
print_type_format("BatteryManager", description="Flow BOM type"),
print_type_format("BiquadFilterNode", description="Flow BOM type"),
print_type_format("BodyInit", description="Flow BOM type"),
print_type_format("CacheType", description="Flow BOM type"),
print_type_format("ChannelMergerNode", description="Flow BOM type"),
print_type_format("ChannelSplitterNode", description="Flow BOM type"),
print_type_format("CloseEvent", description="Flow BOM type"),
print_type_format("ConvolverNode", description="Flow BOM type"),
print_type_format("Coordinates", description="Flow BOM type"),
print_type_format("CredentialsType", description="Flow BOM type"),
print_type_format(
"DedicatedWorkerGlobalScope", description="Flow BOM type"
),
print_type_format("DelayNode", description="Flow BOM type"),
print_type_format("DOMParser", description="Flow BOM type"),
print_type_format("DynamicsCompressorNode", description="Flow BOM type"),
print_type_format("FormData", description="Flow BOM type"),
print_type_format("FormDataEntryValue", description="Flow BOM type"),
print_type_format("GainNode", description="Flow BOM type"),
print_type_format("Gamepad", description="Flow BOM type"),
print_type_format("GamepadButton", description="Flow BOM type"),
print_type_format("Geolocation", description="Flow BOM type"),
print_type_format("Headers", description="Flow BOM type"),
print_type_format("HeadersInit", description="Flow BOM type"),
print_type_format("History", description="Flow BOM type"),
print_type_format("Location", description="Flow BOM type"),
print_type_format(
"MediaElementAudioSourceNode", description="Flow BOM type"
),
print_type_format("MediaStream", description="Flow BOM type"),
print_type_format(
"MediaStreamAudioSourceNode", description="Flow BOM type"
),
print_type_format("MediaStreamTrack", description="Flow BOM type"),
print_type_format("MessageChannel", description="Flow BOM type"),
print_type_format("MessagePort", description="Flow BOM type"),
print_type_format("MethodType", description="Flow BOM type"),
print_type_format("MimeType", description="Flow BOM type"),
print_type_format("MimeTypeArray", description="Flow BOM type"),
print_type_format("ModeType", description="Flow BOM type"),
print_type_format("MutationObserver", description="Flow BOM type"),
print_type_format(
"MutationObserverInitRequired", description="Flow BOM type"
),
print_type_format("MutationRecord", description="Flow BOM type"),
print_type_format("Navigator", description="Flow BOM type"),
print_type_format("NavigatorCommon", description="Flow BOM type"),
print_type_format("OscillatorNode", description="Flow BOM type"),
print_type_format("PannerNode", description="Flow BOM type"),
print_type_format("Performance", description="Flow BOM type"),
print_type_format("PerformanceEntry", description="Flow BOM type"),
print_type_format(
"PerformanceEntryFilterOptions", description="Flow BOM type"
),
print_type_format("PerformanceNavigation", description="Flow BOM type"),
print_type_format(
"PerformanceNavigationTiming", description="Flow BOM type"
),
print_type_format(
"PerformanceResourceTiming", description="Flow BOM type"
),
print_type_format("PerformanceTiming", description="Flow BOM type"),
print_type_format("PeriodicWave", description="Flow BOM type"),
print_type_format("Plugin", description="Flow BOM type"),
print_type_format("PluginArray", description="Flow BOM type"),
print_type_format("Position", description="Flow BOM type"),
print_type_format("PositionError", description="Flow BOM type"),
print_type_format("PositionOptions", description="Flow BOM type"),
print_type_format("RedirectType", description="Flow BOM type"),
print_type_format("ReferrerPolicyType", description="Flow BOM type"),
print_type_format("Request", description="Flow BOM type"),
print_type_format("RequestOptions", description="Flow BOM type"),
print_type_format("Response", description="Flow BOM type"),
print_type_format("ResponseOptions", description="Flow BOM type"),
print_type_format("ResponseType", description="Flow BOM type"),
print_type_format("Screen", description="Flow BOM type"),
print_type_format("ScriptProcessorNode", description="Flow BOM type"),
print_type_format("SharedWorker", description="Flow BOM type"),
print_type_format("SharedWorkerGlobalScope", description="Flow BOM type"),
print_type_format("TextDecoder", description="Flow BOM type"),
print_type_format("TextEncoder", description="Flow BOM type"),
print_type_format("URLSearchParams", description="Flow BOM type"),
print_type_format("WaveShaperNode", description="Flow BOM type"),
print_type_format("WebSocket", description="Flow BOM type"),
print_type_format("Worker", description="Flow BOM type"),
print_type_format("WorkerGlobalScope", description="Flow BOM type"),
print_type_format("WorkerLocation", description="Flow BOM type"),
print_type_format("WorkerNavigator", description="Flow BOM type"),
print_type_format("XDomainRequest", description="Flow BOM type"),
print_type_format("XMLHttpRequest", description="Flow BOM type"),
print_type_format(
"XMLHttpRequestEventTarget", description="Flow BOM type"
),
print_type_format("XMLSerializer", description="Flow BOM type"),
# Browser Object Model 'private' types
print_type_format(
"TextDecoderavailableEncodings",
"TextDecoder\$availableEncodings",
"Flow BOM private type",
),
print_type_format(
"TextEncoderavailableEncodings",
"TextEncoder\$availableEncodings",
"Flow BOM private type",
),
# CSS Object Model types
print_type_format("CSSRule", description="Flow CSSOM type"),
print_type_format("CSSRuleList", description="Flow CSSOM type"),
print_type_format("CSSStyleDeclaration", description="Flow CSSOM type"),
print_type_format("CSSStyleSheet", description="Flow CSSOM type"),
print_type_format("MediaList", description="Flow CSSOM type"),
print_type_format("StyleSheet", description="Flow CSSOM type"),
print_type_format("StyleSheetList", description="Flow CSSOM type"),
print_type_format("TransitionEvent", description="Flow CSSOM type"),
# indexedDB types
print_type_format("IDBCursor", description="Flow indexedDB type"),
print_type_format("IDBCursorWithValue", description="Flow indexedDB type"),
print_type_format("IDBDatabase", description="Flow indexedDB type"),
print_type_format("IDBDirection", description="Flow indexedDB type"),
print_type_format("IDBEnvironment", description="Flow indexedDB type"),
print_type_format("IDBFactory", description="Flow indexedDB type"),
print_type_format("IDBIndex", description="Flow indexedDB type"),
print_type_format("IDBKeyRange", description="Flow indexedDB type"),
print_type_format("IDBObjectStore", description="Flow indexedDB type"),
print_type_format("IDBOpenDBRequest", description="Flow indexedDB type"),
print_type_format("IDBRequest", description="Flow indexedDB type"),
print_type_format("IDBTransaction", description="Flow indexedDB type"),
# Node.js types
print_type_format("AssertionError", description="Flow Node.js type"),
print_type_format("Buffer", description="Flow Node.js type"),
print_type_format("Sign", description="Flow Node.js type"),
print_type_format("Verify", description="Flow Node.js type"),
print_type_format("duplexStreamOptions", description="Flow Node.js type"),
print_type_format("EventEmitter", description="Flow Node.js type"),
print_type_format("FSWatcher", description="Flow Node.js type"),
print_type_format("ReadStream", description="Flow Node.js type"),
print_type_format("Stats", description="Flow Node.js type"),
print_type_format("WriteStream", description="Flow Node.js type"),
print_type_format("ClientRequest", description="Flow Node.js type"),
print_type_format("IncomingMessage", description="Flow Node.js type"),
print_type_format("Server", description="Flow Node.js type"),
print_type_format("ServerResponse", description="Flow Node.js type"),
print_type_format("ClientRequest", description="Flow Node.js type"),
print_type_format("IncomingMessage", description="Flow Node.js type"),
print_type_format("Server", description="Flow Node.js type"),
print_type_format("ServerResponse", description="Flow Node.js type"),
print_type_format("Server", description="Flow Node.js type"),
print_type_format("Socket", description="Flow Node.js type"),
print_type_format("Process", description="Flow Node.js type"),
print_type_format(<|fim▁hole|> print_type_format(
"writableStreamOptions", description="Flow Node.js type"
),
print_type_format("Deflate", description="Flow Node.js type"),
print_type_format("DeflateRaw", description="Flow Node.js type"),
print_type_format("Gunzip", description="Flow Node.js type"),
print_type_format("Gzip", description="Flow Node.js type"),
print_type_format("Inflate", description="Flow Node.js type"),
print_type_format("InflateRaw", description="Flow Node.js type"),
print_type_format("Unzip", description="Flow Node.js type"),
print_type_format("Zlib", description="Flow Node.js type"),
# Node.js private types
print_type_format(
"bufferEncoding", "buffer\$Encoding", "Flow Node.js private type"
),
print_type_format(
"bufferNonBufferEncoding",
"buffer\$NonBufferEncoding",
"Flow Node.js private type",
),
print_type_format(
"bufferToJSONRet", "buffer\$ToJSONRet", "Flow Node.js private type"
),
print_type_format(
"child_processChildProcess",
"child_process\$ChildProcess",
"Flow Node.js private type",
),
print_type_format(
"child_processError",
"child_process\$Error",
"Flow Node.js private type",
),
print_type_format(
"child_processexecCallback",
"child_process\$execCallback",
"Flow Node.js private type",
),
print_type_format(
"child_processexecFileCallback",
"child_process\$execFileCallback",
"Flow Node.js private type",
),
print_type_format(
"child_processexecFileOpts",
"child_process\$execFileOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processexecFileSyncOpts",
"child_process\$execFileSyncOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processexecOpts",
"child_process\$execOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processexecSyncOpts",
"child_process\$execSyncOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processforkOpts",
"child_process\$forkOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processHandle",
"child_process\$Handle",
"Flow Node.js private type",
),
print_type_format(
"child_processspawnOpts",
"child_process\$spawnOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processspawnRet",
"child_process\$spawnRet",
"Flow Node.js private type",
),
print_type_format(
"child_processspawnSyncOpts",
"child_process\$spawnSyncOpts",
"Flow Node.js private type",
),
print_type_format(
"child_processspawnSyncRet",
"child_process\$spawnSyncRet",
"Flow Node.js private type",
),
print_type_format(
"clustersetupMasterOpts",
"cluster\$setupMasterOpts",
"Flow Node.js private type",
),
print_type_format(
"clusterWorker", "cluster\$Worker", "Flow Node.js private type"
),
print_type_format(
"cryptoCipher", "crypto\$Cipher", "Flow Node.js private type"
),
print_type_format(
"cryptocreateCredentialsDetails",
"crypto\$createCredentialsDetails",
"Flow Node.js private type",
),
print_type_format(
"cryptoCredentials", "crypto\$Credentials", "Flow Node.js private type"
),
print_type_format(
"cryptoDecipher", "crypto\$Decipher", "Flow Node.js private type"
),
print_type_format(
"cryptoDiffieHellman",
"crypto\$DiffieHellman",
"Flow Node.js private type",
),
print_type_format(
"cryptoHash", "crypto\$Hash", "Flow Node.js private type"
),
print_type_format(
"cryptoHmac", "crypto\$Hmac", "Flow Node.js private type"
),
print_type_format(
"cryptoSign", "crypto\$Sign", "Flow Node.js private type"
),
print_type_format(
"cryptoSignprivate_key",
"crypto\$Sign\$private_key",
"Flow Node.js private type",
),
print_type_format(
"cryptoVerify", "crypto\$Verify", "Flow Node.js private type"
),
print_type_format(
"dgramSocket", "dgram\$Socket", "Flow Node.js private type"
),
print_type_format(
"domainDomain", "domain\$Domain", "Flow Node.js private type"
),
print_type_format(
"eventsEventEmitter",
"events\$EventEmitter",
"Flow Node.js private type",
),
print_type_format(
"httpClientRequest", "http\$ClientRequest", "Flow Node.js private type"
),
print_type_format(
"httpIncomingMessage",
"http\$IncomingMessage",
"Flow Node.js private type",
),
print_type_format(
"httpServerResponse",
"http\$ServerResponse",
"Flow Node.js private type",
),
print_type_format(
"netconnectOptions", "net\$connectOptions", "Flow Node.js private type"
),
print_type_format("netServer", "net\$Server", "Flow Node.js private type"),
print_type_format("netSocket", "net\$Socket", "Flow Node.js private type"),
print_type_format(
"netSocketaddress", "net\$Socket\$address", "Flow Node.js private type"
),
print_type_format("osCPU", "os\$CPU", "Flow Node.js private type"),
print_type_format(
"osNetIFAddr", "os\$NetIFAddr", "Flow Node.js private type"
),
print_type_format(
"osUserInfobuffer", "os\$UserInfo\$buffer", "Flow Node.js private type"
),
print_type_format(
"osUserInfostring", "os\$UserInfo\$string", "Flow Node.js private type"
),
print_type_format(
"readlineInterface", "readline\$Interface", "Flow Node.js private type"
),
print_type_format(
"streamDuplex", "stream\$Duplex", "Flow Node.js private type"
),
print_type_format(
"streamPassThrough", "stream\$PassThrough", "Flow Node.js private type"
),
print_type_format(
"streamReadable", "stream\$Readable", "Flow Node.js private type"
),
print_type_format(
"streamStream", "stream\$Stream", "Flow Node.js private type"
),
print_type_format(
"streamTransform", "stream\$Transform", "Flow Node.js private type"
),
print_type_format(
"streamWritable", "stream\$Writable", "Flow Node.js private type"
),
print_type_format(
"string_decoderStringDecoder",
"string_decoder\$StringDecoder",
"Flow Node.js private type",
),
print_type_format("tlsServer", "tls\$Server", "Flow Node.js private type"),
print_type_format(
"tlsTLSSocket", "tls\$TLSSocket", "Flow Node.js private type"
),
print_type_format(
"ttyReadStream", "tty\$ReadStream", "Flow Node.js private type"
),
print_type_format(
"ttyWriteStream", "tty\$WriteStream", "Flow Node.js private type"
),
print_type_format("vmContext", "vm\$Context", "Flow Node.js private type"),
print_type_format("vmScript", "vm\$Script", "Flow Node.js private type"),
print_type_format(
"vmScriptOptions", "vm\$ScriptOptions", "Flow Node.js private type"
),
print_type_format(
"zlibasyncFn", "zlib\$asyncFn", "Flow Node.js private type"
),
print_type_format(
"zliboptions", "zlib\$options", "Flow Node.js private type"
),
print_type_format(
"zlibsyncFn", "zlib\$syncFn", "Flow Node.js private type"
),
# Service Workers types
print_type_format("Cache", description="Flow service worker type"),
print_type_format(
"CacheQueryOptions", description="Flow service worker type"
),
print_type_format("CacheStorage", description="Flow service worker type"),
print_type_format("Client", description="Flow service worker type"),
print_type_format(
"ClientQueryOptions", description="Flow service worker type"
),
print_type_format("Clients", description="Flow service worker type"),
print_type_format("ClientType", description="Flow service worker type"),
print_type_format(
"ExtendableEvent", description="Flow service worker type"
),
print_type_format("FetchEvent", description="Flow service worker type"),
print_type_format(
"ForeignFetchOptions", description="Flow service worker type"
),
print_type_format("FrameType", description="Flow service worker type"),
print_type_format("InstallEvent", description="Flow service worker type"),
print_type_format(
"NavigationPreloadManager", description="Flow service worker type"
),
print_type_format(
"NavigationPreloadState", description="Flow service worker type"
),
print_type_format(
"RegistrationOptions", description="Flow service worker type"
),
print_type_format("RequestInfo", description="Flow service worker type"),
print_type_format("ServiceWorker", description="Flow service worker type"),
print_type_format(
"ServiceWorkerContainer", description="Flow service worker type"
),
print_type_format(
"ServiceWorkerMessageEvent", description="Flow service worker type"
),
print_type_format(
"ServiceWorkerRegistration", description="Flow service worker type"
),
print_type_format(
"ServiceWorkerState", description="Flow service worker type"
),
print_type_format(
"VisibilityState", description="Flow service worker type"
),
print_type_format("WindowClient", description="Flow service worker type"),
print_type_format("WorkerType", description="Flow service worker type"),
# Streams types
print_type_format("PipeToOptions", description="Flow streams type"),
print_type_format("QueuingStrategy", description="Flow streams type"),
print_type_format(
"ReadableByteStreamController", description="Flow streams type"
),
print_type_format("ReadableStream", description="Flow streams type"),
print_type_format(
"ReadableStreamBYOBRequest", description="Flow streams type"
),
print_type_format(
"ReadableStreamController", description="Flow streams type"
),
print_type_format("ReadableStreamReader", description="Flow streams type"),
print_type_format("TextEncodeOptions", description="Flow streams type"),
print_type_format("TextEncoder", description="Flow streams type"),
print_type_format("TransformStream", description="Flow streams type"),
print_type_format("UnderlyingSink", description="Flow streams type"),
print_type_format("UnderlyingSource", description="Flow streams type"),
print_type_format("WritableStream", description="Flow streams type"),
print_type_format(
"WritableStreamController", description="Flow streams type"
),
print_type_format("WritableStreamWriter", description="Flow streams type"),
]<|fim▁end|> | "readableStreamOptions", description="Flow Node.js type"
), |
<|file_name|>vrepConst.py<|end_file_name|><|fim▁begin|># This file is part of the REMOTE API
#
# Copyright 2006-2016 Coppelia Robotics GmbH. All rights reserved.
# [email protected]
# www.coppeliarobotics.com
#
# The REMOTE API is licensed under the terms of GNU GPL:
#
# -------------------------------------------------------------------
# The REMOTE API is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# THE REMOTE API IS DISTRIBUTED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTY. THE USER WILL USE IT AT HIS/HER OWN RISK. THE ORIGINAL
# AUTHORS AND COPPELIA ROBOTICS GMBH WILL NOT BE LIABLE FOR DATA LOSS,
# DAMAGES, LOSS OF PROFITS OR ANY OTHER KIND OF LOSS WHILE USING OR
# MISUSING THIS SOFTWARE.
#
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the REMOTE API. If not, see <http://www.gnu.org/licenses/>.
# -------------------------------------------------------------------
#
# This file was automatically created for V-REP release V3.3.2 on August 29th 2016
#constants
#Scene object types. Values are serialized
sim_object_shape_type =0
sim_object_joint_type =1
sim_object_graph_type =2
sim_object_camera_type =3
sim_object_dummy_type =4
sim_object_proximitysensor_type =5
sim_object_reserved1 =6
sim_object_reserved2 =7
sim_object_path_type =8
sim_object_visionsensor_type =9
sim_object_volume_type =10
sim_object_mill_type =11
sim_object_forcesensor_type =12
sim_object_light_type =13
sim_object_mirror_type =14
#General object types. Values are serialized
sim_appobj_object_type =109
sim_appobj_collision_type =110
sim_appobj_distance_type =111
sim_appobj_simulation_type =112
sim_appobj_ik_type =113
sim_appobj_constraintsolver_type=114
sim_appobj_collection_type =115
sim_appobj_ui_type =116
sim_appobj_script_type =117
sim_appobj_pathplanning_type =118
sim_appobj_RESERVED_type =119
sim_appobj_texture_type =120
# Ik calculation methods. Values are serialized
sim_ik_pseudo_inverse_method =0
sim_ik_damped_least_squares_method =1
sim_ik_jacobian_transpose_method =2
# Ik constraints. Values are serialized
sim_ik_x_constraint =1
sim_ik_y_constraint =2
sim_ik_z_constraint =4
sim_ik_alpha_beta_constraint=8
sim_ik_gamma_constraint =16
sim_ik_avoidance_constraint =64
# Ik calculation results
sim_ikresult_not_performed =0
sim_ikresult_success =1
sim_ikresult_fail =2
# Scene object sub-types. Values are serialized
# Light sub-types
sim_light_omnidirectional_subtype =1
sim_light_spot_subtype =2
sim_light_directional_subtype =3
# Joint sub-types
sim_joint_revolute_subtype =10
sim_joint_prismatic_subtype =11
sim_joint_spherical_subtype =12
# Shape sub-types
sim_shape_simpleshape_subtype =20
sim_shape_multishape_subtype =21
# Proximity sensor sub-types
sim_proximitysensor_pyramid_subtype =30
sim_proximitysensor_cylinder_subtype=31
sim_proximitysensor_disc_subtype =32
sim_proximitysensor_cone_subtype =33
sim_proximitysensor_ray_subtype =34
# Mill sub-types
sim_mill_pyramid_subtype =40
sim_mill_cylinder_subtype =41
sim_mill_disc_subtype =42
sim_mill_cone_subtype =42
# No sub-type
sim_object_no_subtype =200
#Scene object main properties (serialized)
sim_objectspecialproperty_collidable =0x0001
sim_objectspecialproperty_measurable =0x0002
#reserved =0x0004
#reserved =0x0008
sim_objectspecialproperty_detectable_ultrasonic =0x0010
sim_objectspecialproperty_detectable_infrared =0x0020
sim_objectspecialproperty_detectable_laser =0x0040
sim_objectspecialproperty_detectable_inductive =0x0080
sim_objectspecialproperty_detectable_capacitive =0x0100
sim_objectspecialproperty_renderable =0x0200
sim_objectspecialproperty_detectable_all =sim_objectspecialproperty_detectable_ultrasonic|sim_objectspecialproperty_detectable_infrared|sim_objectspecialproperty_detectable_laser|sim_objectspecialproperty_detectable_inductive|sim_objectspecialproperty_detectable_capacitive
sim_objectspecialproperty_cuttable =0x0400
sim_objectspecialproperty_pathplanning_ignored =0x0800
# Model properties (serialized)
sim_modelproperty_not_collidable =0x0001
sim_modelproperty_not_measurable =0x0002
sim_modelproperty_not_renderable =0x0004
sim_modelproperty_not_detectable =0x0008
sim_modelproperty_not_cuttable =0x0010
sim_modelproperty_not_dynamic =0x0020
sim_modelproperty_not_respondable =0x0040 # cannot be selected if sim_modelproperty_not_dynamic is not selected
sim_modelproperty_not_reset =0x0080 # Model is not reset at simulation end. This flag is cleared at simulation end
sim_modelproperty_not_visible =0x0100 # Whole model is invisible independent of local visibility settings
sim_modelproperty_not_model =0xf000 # object is not a model
# Check the documentation instead of comments below!!
# Following messages are dispatched to the Lua-message container
sim_message_ui_button_state_change =0 # a UI button slider etc. changed (due to a user's action). aux[0]=UI handle aux[1]=button handle aux[2]=button attributes aux[3]=slider position (if slider)
sim_message_reserved9 =1 # Do not use
sim_message_object_selection_changed=2
sim_message_reserved10 =3 # do not use
sim_message_model_loaded =4
sim_message_reserved11 =5 # do not use
sim_message_keypress =6 # a key was pressed while the focus was on a page (aux[0]=key aux[1]=ctrl and shift key state)
sim_message_bannerclicked =7 # a banner was clicked (aux[0]=banner ID)
# Following messages are dispatched only to the C-API (not available from Lua)
sim_message_for_c_api_only_start =0x100 # Do not use
sim_message_reserved1 =0x101 # Do not use
sim_message_reserved2 =0x102 # Do not use
sim_message_reserved3 =0x103 # Do not use
sim_message_eventcallback_scenesave =0x104 # about to save a scene
sim_message_eventcallback_modelsave =0x105 # about to save a model (current selection will be saved)
sim_message_eventcallback_moduleopen =0x106 # called when simOpenModule in Lua is called
sim_message_eventcallback_modulehandle =0x107 # called when simHandleModule in Lua is called with argument false
sim_message_eventcallback_moduleclose =0x108 # called when simCloseModule in Lua is called
sim_message_reserved4 =0x109 # Do not use
sim_message_reserved5 =0x10a # Do not use
sim_message_reserved6 =0x10b # Do not use
sim_message_reserved7 =0x10c # Do not use
sim_message_eventcallback_instancepass =0x10d # Called once every main application loop pass. auxiliaryData[0] contains event flags of events that happened since last time
sim_message_eventcallback_broadcast =0x10e
sim_message_eventcallback_imagefilter_enumreset =0x10f
sim_message_eventcallback_imagefilter_enumerate =0x110
sim_message_eventcallback_imagefilter_adjustparams =0x111
sim_message_eventcallback_imagefilter_reserved =0x112
sim_message_eventcallback_imagefilter_process =0x113
sim_message_eventcallback_reserved1 =0x114 # do not use
sim_message_eventcallback_reserved2 =0x115 # do not use
sim_message_eventcallback_reserved3 =0x116 # do not use
sim_message_eventcallback_reserved4 =0x117 # do not use
sim_message_eventcallback_abouttoundo =0x118 # the undo button was hit and a previous state is about to be restored
sim_message_eventcallback_undoperformed =0x119 # the undo button was hit and a previous state restored
sim_message_eventcallback_abouttoredo =0x11a # the redo button was hit and a future state is about to be restored
sim_message_eventcallback_redoperformed =0x11b # the redo button was hit and a future state restored
sim_message_eventcallback_scripticondblclick =0x11c # scipt icon was double clicked. (aux[0]=object handle associated with script set replyData[0] to 1 if script should not be opened)
sim_message_eventcallback_simulationabouttostart =0x11d
sim_message_eventcallback_simulationended =0x11e
sim_message_eventcallback_reserved5 =0x11f # do not use
sim_message_eventcallback_keypress =0x120 # a key was pressed while the focus was on a page (aux[0]=key aux[1]=ctrl and shift key state)
sim_message_eventcallback_modulehandleinsensingpart =0x121 # called when simHandleModule in Lua is called with argument true
sim_message_eventcallback_renderingpass =0x122 # called just before the scene is rendered
sim_message_eventcallback_bannerclicked =0x123 # called when a banner was clicked (aux[0]=banner ID)
sim_message_eventcallback_menuitemselected =0x124 # auxiliaryData[0] indicates the handle of the item auxiliaryData[1] indicates the state of the item
sim_message_eventcallback_refreshdialogs =0x125 # aux[0]=refresh degree (0=light 1=medium 2=full)
sim_message_eventcallback_sceneloaded =0x126
sim_message_eventcallback_modelloaded =0x127
sim_message_eventcallback_instanceswitch =0x128
sim_message_eventcallback_guipass =0x129
sim_message_eventcallback_mainscriptabouttobecalled =0x12a
sim_message_eventcallback_rmlposition =0x12b #the command simRMLPosition was called. The appropriate plugin should handle the call
sim_message_eventcallback_rmlvelocity =0x12c # the command simRMLVelocity was called. The appropriate plugin should handle the call
sim_message_simulation_start_resume_request =0x1000
sim_message_simulation_pause_request =0x1001
sim_message_simulation_stop_request =0x1002
# Scene object properties. Combine with the | operator
sim_objectproperty_reserved1 =0x0000
sim_objectproperty_reserved2 =0x0001
sim_objectproperty_reserved3 =0x0002
sim_objectproperty_reserved4 =0x0003
sim_objectproperty_reserved5 =0x0004 # formely sim_objectproperty_visible
sim_objectproperty_reserved6 =0x0008 # formely sim_objectproperty_wireframe
sim_objectproperty_collapsed =0x0010
sim_objectproperty_selectable =0x0020
sim_objectproperty_reserved7 =0x0040
sim_objectproperty_selectmodelbaseinstead =0x0080
sim_objectproperty_dontshowasinsidemodel =0x0100
# reserved =0x0200
sim_objectproperty_canupdatedna =0x0400
sim_objectproperty_selectinvisible =0x0800
sim_objectproperty_depthinvisible =0x1000
# type of arguments (input and output) for custom lua commands
sim_lua_arg_nil =0
sim_lua_arg_bool =1
sim_lua_arg_int =2
sim_lua_arg_float =3
sim_lua_arg_string =4
sim_lua_arg_invalid =5
sim_lua_arg_table =8
# custom user interface properties. Values are serialized.
sim_ui_property_visible =0x0001
sim_ui_property_visibleduringsimulationonly =0x0002
sim_ui_property_moveable =0x0004
sim_ui_property_relativetoleftborder =0x0008
sim_ui_property_relativetotopborder =0x0010
sim_ui_property_fixedwidthfont =0x0020
sim_ui_property_systemblock =0x0040
sim_ui_property_settocenter =0x0080
sim_ui_property_rolledup =0x0100
sim_ui_property_selectassociatedobject =0x0200
sim_ui_property_visiblewhenobjectselected =0x0400
# button properties. Values are serialized.
sim_buttonproperty_button =0x0000
sim_buttonproperty_label =0x0001
sim_buttonproperty_slider =0x0002
sim_buttonproperty_editbox =0x0003
sim_buttonproperty_staydown =0x0008
sim_buttonproperty_enabled =0x0010
sim_buttonproperty_borderless =0x0020
sim_buttonproperty_horizontallycentered =0x0040
sim_buttonproperty_ignoremouse =0x0080
sim_buttonproperty_isdown =0x0100
sim_buttonproperty_transparent =0x0200
sim_buttonproperty_nobackgroundcolor =0x0400
sim_buttonproperty_rollupaction =0x0800
sim_buttonproperty_closeaction =0x1000
sim_buttonproperty_verticallycentered =0x2000
sim_buttonproperty_downupevent =0x4000
# Simulation status
sim_simulation_stopped =0x00 # Simulation is stopped
sim_simulation_paused =0x08 # Simulation is paused
sim_simulation_advancing =0x10 # Simulation is advancing
sim_simulation_advancing_firstafterstop =sim_simulation_advancing|0x00 # First simulation pass (1x)
sim_simulation_advancing_running =sim_simulation_advancing|0x01 # Normal simulation pass (>=1x)
# reserved =sim_simulation_advancing|0x02
sim_simulation_advancing_lastbeforepause =sim_simulation_advancing|0x03 # Last simulation pass before pause (1x)
sim_simulation_advancing_firstafterpause =sim_simulation_advancing|0x04 # First simulation pass after pause (1x)
sim_simulation_advancing_abouttostop =sim_simulation_advancing|0x05 # "Trying to stop" simulation pass (>=1x)
sim_simulation_advancing_lastbeforestop =sim_simulation_advancing|0x06 # Last simulation pass (1x)
# Script execution result (first return value)
sim_script_no_error =0
sim_script_main_script_nonexistent =1
sim_script_main_script_not_called =2
sim_script_reentrance_error =4
sim_script_lua_error =8
sim_script_call_error =16
# Script types (serialized!)
sim_scripttype_mainscript =0
sim_scripttype_childscript =1
sim_scripttype_jointctrlcallback =4
sim_scripttype_contactcallback =5
sim_scripttype_customizationscript =6
sim_scripttype_generalcallback =7
# API call error messages
sim_api_errormessage_ignore =0 # does not memorize nor output errors
sim_api_errormessage_report =1 # memorizes errors (default for C-API calls)
sim_api_errormessage_output =2 # memorizes and outputs errors (default for Lua-API calls)
# special argument of some functions
sim_handle_all =-2
sim_handle_all_except_explicit =-3
sim_handle_self =-4
sim_handle_main_script =-5
sim_handle_tree =-6
sim_handle_chain =-7
sim_handle_single =-8
sim_handle_default =-9
sim_handle_all_except_self =-10
sim_handle_parent =-11
# special handle flags
sim_handleflag_assembly =0x400000
sim_handleflag_model =0x800000
# distance calculation methods (serialized)
sim_distcalcmethod_dl =0
sim_distcalcmethod_dac =1
sim_distcalcmethod_max_dl_dac =2
sim_distcalcmethod_dl_and_dac =3
sim_distcalcmethod_sqrt_dl2_and_dac2=4
sim_distcalcmethod_dl_if_nonzero =5
sim_distcalcmethod_dac_if_nonzero =6
# Generic dialog styles
sim_dlgstyle_message =0
sim_dlgstyle_input =1
sim_dlgstyle_ok =2
sim_dlgstyle_ok_cancel =3
sim_dlgstyle_yes_no =4
sim_dlgstyle_dont_center =32# can be combined with one of above values. Only with this flag can the position of the related UI be set just after dialog creation
# Generic dialog return values
sim_dlgret_still_open =0
sim_dlgret_ok =1
sim_dlgret_cancel =2
sim_dlgret_yes =3
sim_dlgret_no =4
# Path properties
sim_pathproperty_show_line =0x0001
sim_pathproperty_show_orientation =0x0002
sim_pathproperty_closed_path =0x0004
sim_pathproperty_automatic_orientation =0x0008
sim_pathproperty_invert_velocity =0x0010
sim_pathproperty_infinite_acceleration =0x0020
sim_pathproperty_flat_path =0x0040
sim_pathproperty_show_position =0x0080
sim_pathproperty_auto_velocity_profile_translation =0x0100
sim_pathproperty_auto_velocity_profile_rotation =0x0200
sim_pathproperty_endpoints_at_zero =0x0400
sim_pathproperty_keep_x_up =0x0800
<|fim▁hole|># following are mutually exclusive
sim_drawing_points =0 # 3 values per point (point size in pixels)
sim_drawing_lines =1 # 6 values per line (line size in pixels)
sim_drawing_triangles =2 # 9 values per triangle
sim_drawing_trianglepoints =3 # 6 values per point (3 for triangle position 3 for triangle normal vector) (triangle size in meters)
sim_drawing_quadpoints =4 # 6 values per point (3 for quad position 3 for quad normal vector) (quad size in meters)
sim_drawing_discpoints =5 # 6 values per point (3 for disc position 3 for disc normal vector) (disc size in meters)
sim_drawing_cubepoints =6 # 6 values per point (3 for cube position 3 for cube normal vector) (cube size in meters)
sim_drawing_spherepoints =7 # 3 values per point (sphere size in meters)
# following can be or-combined
sim_drawing_itemcolors =0x00020 # +3 values per item (each item has its own ambient color (rgb values)).
# Mutually exclusive with sim_drawing_vertexcolors
sim_drawing_vertexcolors =0x00040 # +3 values per vertex (each vertex has its own ambient color (rgb values). Only for sim_drawing_lines (+6) and for sim_drawing_triangles(+9)). Mutually exclusive with sim_drawing_itemcolors
sim_drawing_itemsizes =0x00080 # +1 value per item (each item has its own size). Not for sim_drawing_triangles
sim_drawing_backfaceculling =0x00100 # back faces are not displayed for all items
sim_drawing_wireframe =0x00200 # all items displayed in wireframe
sim_drawing_painttag =0x00400 # all items are tagged as paint (for additinal processing at a later stage)
sim_drawing_followparentvisibility =0x00800 # if the object is associated with a scene object then it follows that visibility otherwise it is always visible
sim_drawing_cyclic =0x01000 # if the max item count was reached then the first items are overwritten.
sim_drawing_50percenttransparency =0x02000 # the drawing object will be 50% transparent
sim_drawing_25percenttransparency =0x04000 # the drawing object will be 25% transparent
sim_drawing_12percenttransparency =0x08000 # the drawing object will be 12.5% transparent
sim_drawing_emissioncolor =0x10000 # When used in combination with sim_drawing_itemcolors or sim_drawing_vertexcolors then the specified colors will be for the emissive component
sim_drawing_facingcamera =0x20000 # Only for trianglepoints quadpoints discpoints and cubepoints. If specified the normal verctor is calculated to face the camera (each item data requires 3 values less)
sim_drawing_overlay =0x40000 # When specified objects are always drawn on top of "regular objects"
sim_drawing_itemtransparency =0x80000 # +1 value per item (each item has its own transparency value (0-1)). Not compatible with sim_drawing_vertexcolors
# banner values
# following can be or-combined
sim_banner_left =0x00001 # Banners display on the left of the specified point
sim_banner_right =0x00002 # Banners display on the right of the specified point
sim_banner_nobackground =0x00004 # Banners have no background rectangle
sim_banner_overlay =0x00008 # When specified banners are always drawn on top of "regular objects"
sim_banner_followparentvisibility =0x00010 # if the object is associated with a scene object then it follows that visibility otherwise it is always visible
sim_banner_clickselectsparent =0x00020 # if the object is associated with a scene object then clicking the banner will select the scene object
sim_banner_clicktriggersevent =0x00040 # if the banner is clicked an event is triggered (sim_message_eventcallback_bannerclicked and sim_message_bannerclicked are generated)
sim_banner_facingcamera =0x00080 # If specified the banner will always face the camera by rotating around the banner's vertical axis (y-axis)
sim_banner_fullyfacingcamera =0x00100 # If specified the banner will always fully face the camera (the banner's orientation is same as the camera looking at it)
sim_banner_backfaceculling =0x00200 # If specified the banner will only be visible from one side
sim_banner_keepsamesize =0x00400 # If specified the banner will always appear in the same size. In that case size represents the character height in pixels
sim_banner_bitmapfont =0x00800 # If specified a fixed-size bitmap font is used. The text will also always fully face the camera and be right
# to the specified position. Bitmap fonts are not clickable
# particle objects following are mutually exclusive
sim_particle_points1 =0 # 6 values per point (pt1 and pt2. Pt1 is start position pt2-pt1 is the initial velocity vector). i
#Point is 1 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_points2 =1 # 6 values per point. Point is 2 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_points4 =2 # 6 values per point. Point is 4 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_roughspheres =3 # 6 values per sphere. Only appearance is rough. Internally a perfect sphere
sim_particle_spheres =4 # 6 values per sphere. Internally a perfect sphere
# following can be or-combined
sim_particle_respondable1to4 =0x0020 # the particles are respondable against shapes (against all objects that have at least one bit 1-4 activated in the global respondable mask)
sim_particle_respondable5to8 =0x0040 # the particles are respondable against shapes (against all objects that have at least one bit 5-8 activated in the global respondable mask)
sim_particle_particlerespondable =0x0080 # the particles are respondable against each other
sim_particle_ignoresgravity =0x0100 # the particles ignore the effect of gravity. Not compatible with sim_particle_water
sim_particle_invisible =0x0200 # the particles are invisible
sim_particle_itemsizes =0x0400 # +1 value per particle (each particle can have a different size)
sim_particle_itemdensities =0x0800 # +1 value per particle (each particle can have a different density)
sim_particle_itemcolors =0x1000 # +3 values per particle (each particle can have a different color)
sim_particle_cyclic =0x2000 # if the max item count was reached then the first items are overwritten.
sim_particle_emissioncolor =0x4000 # When used in combination with sim_particle_itemcolors then the specified colors will be for the emissive component
sim_particle_water =0x8000 # the particles are water particles (no weight in the water (i.e. when z<0)). Not compatible with sim_particle_ignoresgravity
sim_particle_painttag =0x10000 # The particles can be seen by vision sensors (sim_particle_invisible must not be set)
# custom user interface menu attributes
sim_ui_menu_title =1
sim_ui_menu_minimize =2
sim_ui_menu_close =4
sim_ui_menu_systemblock =8
# Boolean parameters
sim_boolparam_hierarchy_visible =0
sim_boolparam_console_visible =1
sim_boolparam_collision_handling_enabled =2
sim_boolparam_distance_handling_enabled =3
sim_boolparam_ik_handling_enabled =4
sim_boolparam_gcs_handling_enabled =5
sim_boolparam_dynamics_handling_enabled =6
sim_boolparam_joint_motion_handling_enabled =7
sim_boolparam_path_motion_handling_enabled =8
sim_boolparam_proximity_sensor_handling_enabled =9
sim_boolparam_vision_sensor_handling_enabled =10
sim_boolparam_mill_handling_enabled =11
sim_boolparam_browser_visible =12
sim_boolparam_scene_and_model_load_messages =13
sim_reserved0 =14
sim_boolparam_shape_textures_are_visible =15
sim_boolparam_display_enabled =16
sim_boolparam_infotext_visible =17
sim_boolparam_statustext_open =18
sim_boolparam_fog_enabled =19
sim_boolparam_rml2_available =20
sim_boolparam_rml4_available =21
sim_boolparam_mirrors_enabled =22
sim_boolparam_aux_clip_planes_enabled =23
sim_boolparam_full_model_copy_from_api =24
sim_boolparam_realtime_simulation =25
sim_boolparam_force_show_wireless_emission =27
sim_boolparam_force_show_wireless_reception =28
sim_boolparam_video_recording_triggered =29
sim_boolparam_threaded_rendering_enabled =32
sim_boolparam_fullscreen =33
sim_boolparam_headless =34
sim_boolparam_hierarchy_toolbarbutton_enabled =35
sim_boolparam_browser_toolbarbutton_enabled =36
sim_boolparam_objectshift_toolbarbutton_enabled =37
sim_boolparam_objectrotate_toolbarbutton_enabled=38
sim_boolparam_force_calcstruct_all_visible =39
sim_boolparam_force_calcstruct_all =40
sim_boolparam_exit_request =41
sim_boolparam_play_toolbarbutton_enabled =42
sim_boolparam_pause_toolbarbutton_enabled =43
sim_boolparam_stop_toolbarbutton_enabled =44
sim_boolparam_waiting_for_trigger =45
# Integer parameters
sim_intparam_error_report_mode =0 # Check sim_api_errormessage_... constants above for valid values
sim_intparam_program_version =1 # e.g Version 2.1.4 --> 20104. Can only be read
sim_intparam_instance_count =2 # do not use anymore (always returns 1 since V-REP 2.5.11)
sim_intparam_custom_cmd_start_id =3 # can only be read
sim_intparam_compilation_version =4 # 0=evaluation version 1=full version 2=player version. Can only be read
sim_intparam_current_page =5
sim_intparam_flymode_camera_handle =6 # can only be read
sim_intparam_dynamic_step_divider =7 # can only be read
sim_intparam_dynamic_engine =8 # 0=Bullet 1=ODE. 2=Vortex.
sim_intparam_server_port_start =9 # can only be read
sim_intparam_server_port_range =10 # can only be read
sim_intparam_visible_layers =11
sim_intparam_infotext_style =12
sim_intparam_settings =13
sim_intparam_edit_mode_type =14 # can only be read
sim_intparam_server_port_next =15 # is initialized at sim_intparam_server_port_start
sim_intparam_qt_version =16 # version of the used Qt framework
sim_intparam_event_flags_read =17 # can only be read
sim_intparam_event_flags_read_clear =18 # can only be read
sim_intparam_platform =19 # can only be read
sim_intparam_scene_unique_id =20 # can only be read
sim_intparam_work_thread_count =21
sim_intparam_mouse_x =22
sim_intparam_mouse_y =23
sim_intparam_core_count =24
sim_intparam_work_thread_calc_time_ms =25
sim_intparam_idle_fps =26
sim_intparam_prox_sensor_select_down =27
sim_intparam_prox_sensor_select_up =28
sim_intparam_stop_request_counter =29
sim_intparam_program_revision =30
sim_intparam_mouse_buttons =31
sim_intparam_dynamic_warning_disabled_mask =32
sim_intparam_simulation_warning_disabled_mask =33
sim_intparam_scene_index =34
sim_intparam_motionplanning_seed =35
sim_intparam_speedmodifier =36
# Float parameters
sim_floatparam_rand=0 # random value (0.0-1.0)
sim_floatparam_simulation_time_step =1
sim_floatparam_stereo_distance =2
# String parameters
sim_stringparam_application_path=0 # path of V-REP's executable
sim_stringparam_video_filename=1
sim_stringparam_app_arg1 =2
sim_stringparam_app_arg2 =3
sim_stringparam_app_arg3 =4
sim_stringparam_app_arg4 =5
sim_stringparam_app_arg5 =6
sim_stringparam_app_arg6 =7
sim_stringparam_app_arg7 =8
sim_stringparam_app_arg8 =9
sim_stringparam_app_arg9 =10
sim_stringparam_scene_path_and_name =13
# Array parameters
sim_arrayparam_gravity =0
sim_arrayparam_fog =1
sim_arrayparam_fog_color =2
sim_arrayparam_background_color1=3
sim_arrayparam_background_color2=4
sim_arrayparam_ambient_light =5
sim_arrayparam_random_euler =6
sim_objintparam_visibility_layer= 10
sim_objfloatparam_abs_x_velocity= 11
sim_objfloatparam_abs_y_velocity= 12
sim_objfloatparam_abs_z_velocity= 13
sim_objfloatparam_abs_rot_velocity= 14
sim_objfloatparam_objbbox_min_x= 15
sim_objfloatparam_objbbox_min_y= 16
sim_objfloatparam_objbbox_min_z= 17
sim_objfloatparam_objbbox_max_x= 18
sim_objfloatparam_objbbox_max_y= 19
sim_objfloatparam_objbbox_max_z= 20
sim_objfloatparam_modelbbox_min_x= 21
sim_objfloatparam_modelbbox_min_y= 22
sim_objfloatparam_modelbbox_min_z= 23
sim_objfloatparam_modelbbox_max_x= 24
sim_objfloatparam_modelbbox_max_y= 25
sim_objfloatparam_modelbbox_max_z= 26
sim_objintparam_collection_self_collision_indicator= 27
sim_objfloatparam_transparency_offset= 28
sim_objintparam_child_role= 29
sim_objintparam_parent_role= 30
sim_objintparam_manipulation_permissions= 31
sim_objintparam_illumination_handle= 32
sim_visionfloatparam_near_clipping= 1000
sim_visionfloatparam_far_clipping= 1001
sim_visionintparam_resolution_x= 1002
sim_visionintparam_resolution_y= 1003
sim_visionfloatparam_perspective_angle= 1004
sim_visionfloatparam_ortho_size= 1005
sim_visionintparam_disabled_light_components= 1006
sim_visionintparam_rendering_attributes= 1007
sim_visionintparam_entity_to_render= 1008
sim_visionintparam_windowed_size_x= 1009
sim_visionintparam_windowed_size_y= 1010
sim_visionintparam_windowed_pos_x= 1011
sim_visionintparam_windowed_pos_y= 1012
sim_visionintparam_pov_focal_blur= 1013
sim_visionfloatparam_pov_blur_distance= 1014
sim_visionfloatparam_pov_aperture= 1015
sim_visionintparam_pov_blur_sampled= 1016
sim_visionintparam_render_mode= 1017
sim_jointintparam_motor_enabled= 2000
sim_jointintparam_ctrl_enabled= 2001
sim_jointfloatparam_pid_p= 2002
sim_jointfloatparam_pid_i= 2003
sim_jointfloatparam_pid_d= 2004
sim_jointfloatparam_intrinsic_x= 2005
sim_jointfloatparam_intrinsic_y= 2006
sim_jointfloatparam_intrinsic_z= 2007
sim_jointfloatparam_intrinsic_qx= 2008
sim_jointfloatparam_intrinsic_qy= 2009
sim_jointfloatparam_intrinsic_qz= 2010
sim_jointfloatparam_intrinsic_qw= 2011
sim_jointfloatparam_velocity= 2012
sim_jointfloatparam_spherical_qx= 2013
sim_jointfloatparam_spherical_qy= 2014
sim_jointfloatparam_spherical_qz= 2015
sim_jointfloatparam_spherical_qw= 2016
sim_jointfloatparam_upper_limit= 2017
sim_jointfloatparam_kc_k= 2018
sim_jointfloatparam_kc_c= 2019
sim_jointfloatparam_ik_weight= 2021
sim_jointfloatparam_error_x= 2022
sim_jointfloatparam_error_y= 2023
sim_jointfloatparam_error_z= 2024
sim_jointfloatparam_error_a= 2025
sim_jointfloatparam_error_b= 2026
sim_jointfloatparam_error_g= 2027
sim_jointfloatparam_error_pos= 2028
sim_jointfloatparam_error_angle= 2029
sim_jointintparam_velocity_lock= 2030
sim_jointintparam_vortex_dep_handle= 2031
sim_jointfloatparam_vortex_dep_multiplication= 2032
sim_jointfloatparam_vortex_dep_offset= 2033
sim_shapefloatparam_init_velocity_x= 3000
sim_shapefloatparam_init_velocity_y= 3001
sim_shapefloatparam_init_velocity_z= 3002
sim_shapeintparam_static= 3003
sim_shapeintparam_respondable= 3004
sim_shapefloatparam_mass= 3005
sim_shapefloatparam_texture_x= 3006
sim_shapefloatparam_texture_y= 3007
sim_shapefloatparam_texture_z= 3008
sim_shapefloatparam_texture_a= 3009
sim_shapefloatparam_texture_b= 3010
sim_shapefloatparam_texture_g= 3011
sim_shapefloatparam_texture_scaling_x= 3012
sim_shapefloatparam_texture_scaling_y= 3013
sim_shapeintparam_culling= 3014
sim_shapeintparam_wireframe= 3015
sim_shapeintparam_compound= 3016
sim_shapeintparam_convex= 3017
sim_shapeintparam_convex_check= 3018
sim_shapeintparam_respondable_mask= 3019
sim_shapefloatparam_init_velocity_a= 3020
sim_shapefloatparam_init_velocity_b= 3021
sim_shapefloatparam_init_velocity_g= 3022
sim_shapestringparam_color_name= 3023
sim_shapeintparam_edge_visibility= 3024
sim_shapefloatparam_shading_angle= 3025
sim_shapefloatparam_edge_angle= 3026
sim_shapeintparam_edge_borders_hidden= 3027
sim_proxintparam_ray_invisibility= 4000
sim_forcefloatparam_error_x= 5000
sim_forcefloatparam_error_y= 5001
sim_forcefloatparam_error_z= 5002
sim_forcefloatparam_error_a= 5003
sim_forcefloatparam_error_b= 5004
sim_forcefloatparam_error_g= 5005
sim_forcefloatparam_error_pos= 5006
sim_forcefloatparam_error_angle= 5007
sim_lightintparam_pov_casts_shadows= 8000
sim_cameraintparam_disabled_light_components= 9000
sim_camerafloatparam_perspective_angle= 9001
sim_camerafloatparam_ortho_size= 9002
sim_cameraintparam_rendering_attributes= 9003
sim_cameraintparam_pov_focal_blur= 9004
sim_camerafloatparam_pov_blur_distance= 9005
sim_camerafloatparam_pov_aperture= 9006
sim_cameraintparam_pov_blur_samples= 9007
sim_dummyintparam_link_type= 10000
sim_mirrorfloatparam_width= 12000
sim_mirrorfloatparam_height= 12001
sim_mirrorfloatparam_reflectance= 12002
sim_mirrorintparam_enable= 12003
sim_pplanfloatparam_x_min= 20000
sim_pplanfloatparam_x_range= 20001
sim_pplanfloatparam_y_min= 20002
sim_pplanfloatparam_y_range= 20003
sim_pplanfloatparam_z_min= 20004
sim_pplanfloatparam_z_range= 20005
sim_pplanfloatparam_delta_min= 20006
sim_pplanfloatparam_delta_range= 20007
sim_mplanintparam_nodes_computed= 25000
sim_mplanintparam_prepare_nodes= 25001
sim_mplanintparam_clear_nodes= 25002
# User interface elements
sim_gui_menubar =0x0001
sim_gui_popups =0x0002
sim_gui_toolbar1 =0x0004
sim_gui_toolbar2 =0x0008
sim_gui_hierarchy =0x0010
sim_gui_infobar =0x0020
sim_gui_statusbar =0x0040
sim_gui_scripteditor =0x0080
sim_gui_scriptsimulationparameters =0x0100
sim_gui_dialogs =0x0200
sim_gui_browser =0x0400
sim_gui_all =0xffff
# Joint modes
sim_jointmode_passive =0
sim_jointmode_motion =1
sim_jointmode_ik =2
sim_jointmode_ikdependent =3
sim_jointmode_dependent =4
sim_jointmode_force =5
# Navigation and selection modes with the mouse. Lower byte values are mutually exclusive upper byte bits can be combined
sim_navigation_passive =0x0000
sim_navigation_camerashift =0x0001
sim_navigation_camerarotate =0x0002
sim_navigation_camerazoom =0x0003
sim_navigation_cameratilt =0x0004
sim_navigation_cameraangle =0x0005
sim_navigation_camerafly =0x0006
sim_navigation_objectshift =0x0007
sim_navigation_objectrotate =0x0008
sim_navigation_reserved2 =0x0009
sim_navigation_reserved3 =0x000A
sim_navigation_jointpathtest =0x000B
sim_navigation_ikmanip =0x000C
sim_navigation_objectmultipleselection =0x000D
# Bit-combine following values and add them to one of above's values for a valid navigation mode
sim_navigation_reserved4 =0x0100
sim_navigation_clickselection =0x0200
sim_navigation_ctrlselection =0x0400
sim_navigation_shiftselection =0x0800
sim_navigation_camerazoomwheel =0x1000
sim_navigation_camerarotaterightbutton =0x2000
#Remote API constants
SIMX_VERSION =0
# Remote API message header structure
SIMX_HEADER_SIZE =18
simx_headeroffset_crc =0 # 1 simxUShort. Generated by the client or server. The CRC for the message
simx_headeroffset_version =2 # 1 byte. Generated by the client or server. The version of the remote API software
simx_headeroffset_message_id =3 # 1 simxInt. Generated by the client (and used in a reply by the server)
simx_headeroffset_client_time =7 # 1 simxInt. Client time stamp generated by the client (and sent back by the server)
simx_headeroffset_server_time =11 # 1 simxInt. Generated by the server when a reply is generated. The server timestamp
simx_headeroffset_scene_id =15 # 1 simxUShort. Generated by the server. A unique ID identifying the scene currently displayed
simx_headeroffset_server_state =17 # 1 byte. Generated by the server. Bit coded 0 set --> simulation not stopped 1 set --> simulation paused 2 set --> real-time switch on 3-5 edit mode type (0=no edit mode 1=triangle 2=vertex 3=edge 4=path 5=UI)
# Remote API command header
SIMX_SUBHEADER_SIZE =26
simx_cmdheaderoffset_mem_size =0 # 1 simxInt. Generated by the client or server. The buffer size of the command.
simx_cmdheaderoffset_full_mem_size =4 # 1 simxInt. Generated by the client or server. The full buffer size of the command (applies to split chunks).
simx_cmdheaderoffset_pdata_offset0 =8 # 1 simxUShort. Generated by the client or server. The amount of data that is part of the command identification.
simx_cmdheaderoffset_pdata_offset1 =10 # 1 simxInt. Generated by the client or server. The amount of shift of the pure data buffer (applies to split chunks).
simx_cmdheaderoffset_cmd=14 # 1 simxInt. Generated by the client (and used in a reply by the server). The command combined with the operation mode of the command.
simx_cmdheaderoffset_delay_or_split =18 # 1 simxUShort. Generated by the client or server. The amount of delay in ms of a continuous command or the max. pure data size to send at once (applies to split commands).
simx_cmdheaderoffset_sim_time =20 # 1 simxInt. Generated by the server. The simulation time (in ms) when the command was executed (or 0 if simulation is not running)
simx_cmdheaderoffset_status =24 # 1 byte. Generated by the server. (1 bit 0 is set --> error in function execution on server side). The client writes bit 1 if command cannot be overwritten
simx_cmdheaderoffset_reserved =25 # 1 byte. Not yet used
# Regular operation modes
simx_opmode_oneshot =0x000000 # sends command as one chunk. Reply will also come as one chunk. Doesn't wait for the reply.
simx_opmode_blocking =0x010000 # sends command as one chunk. Reply will also come as one chunk. Waits for the reply (_REPLY_WAIT_TIMEOUT_IN_MS is the timeout).
simx_opmode_oneshot_wait =0x010000 # sends command as one chunk. Reply will also come as one chunk. Waits for the reply (_REPLY_WAIT_TIMEOUT_IN_MS is the timeout).
simx_opmode_continuous =0x020000
simx_opmode_streaming =0x020000 # sends command as one chunk. Command will be stored on the server and always executed
#(every x ms (as far as possible) where x can be 0-65535. just add x to opmode_continuous).
# A reply will be sent continuously each time as one chunk. Doesn't wait for the reply.
# Operation modes for heavy data
simx_opmode_oneshot_split =0x030000 # sends command as several chunks (max chunk size is x bytes where x can be _MIN_SPLIT_AMOUNT_IN_BYTES-65535. Just add x to opmode_oneshot_split). Reply will also come as several chunks. Doesn't wait for the reply.
simx_opmode_continuous_split =0x040000
simx_opmode_streaming_split =0x040000 # sends command as several chunks (max chunk size is x bytes where x can be _MIN_SPLIT_AMOUNT_IN_BYTES-65535. Just add x to opmode_continuous_split). Command will be stored on the server and always executed. A reply will be sent continuously each time as several chunks. Doesn't wait for the reply.
# Special operation modes
simx_opmode_discontinue =0x050000 # removes and cancels all commands stored on the client or server side (also continuous commands)
simx_opmode_buffer =0x060000 # doesn't send anything but checks if a reply for the given command is available in the input buffer (i.e. previously received from the server)
simx_opmode_remove =0x070000 # doesn't send anything and doesn't return any specific value. It just erases a similar command reply in the inbox (to free some memory)
# Command return codes
simx_return_ok =0x000000
simx_return_novalue_flag =0x000001 # input buffer doesn't contain the specified command
simx_return_timeout_flag =0x000002 # command reply not received in time for opmode_oneshot_wait operation mode
simx_return_illegal_opmode_flag =0x000004 # command doesn't support the specified operation mode
simx_return_remote_error_flag =0x000008 # command caused an error on the server side
simx_return_split_progress_flag =0x000010 # previous similar command not yet fully processed (applies to opmode_oneshot_split operation modes)
simx_return_local_error_flag =0x000020 # command caused an error on the client side
simx_return_initialize_error_flag =0x000040 # simxStart was not yet called
# Following for backward compatibility (same as above)
simx_error_noerror =0x000000
simx_error_novalue_flag =0x000001 # input buffer doesn't contain the specified command
simx_error_timeout_flag =0x000002 # command reply not received in time for opmode_oneshot_wait operation mode
simx_error_illegal_opmode_flag =0x000004 # command doesn't support the specified operation mode
simx_error_remote_error_flag =0x000008 # command caused an error on the server side
simx_error_split_progress_flag =0x000010 # previous similar command not yet fully processed (applies to opmode_oneshot_split operation modes)
simx_error_local_error_flag =0x000020 # command caused an error on the client side
simx_error_initialize_error_flag =0x000040 # simxStart was not yet called<|fim▁end|> |
# drawing objects
|
<|file_name|>ajax.service.ts<|end_file_name|><|fim▁begin|>import { Injectable } from '@angular/core';
import { Http, Response, Headers, RequestOptions } from '@angular/http';
import { Observable } from 'rxjs/Observable';
import 'rxjs/add/operator/map';
import 'rxjs/add/operator/catch';
import 'rxjs/add/operator/delay';
import 'rxjs/add/operator/do';
import { SpinnerService } from './spinner.service';
@Injectable()
export class AjaxService {
private apiUrl = '/assets/api';
constructor(private http: Http, private spinner: SpinnerService) { }
public fetch (url): Observable<any> {
this.showSpinner();
return this.http.get(this.buildUrl(url))
.map(this.extractData)
.catch(this.handleError);
}
public post (url, data): Observable<any> {
let headers = new Headers({ 'Content-Type': 'application/json' });
let options = new RequestOptions({ headers: headers });
this.showSpinner();
return this.http.post(this.buildUrl(url), data, options)
.map(this.extractData)
.catch(this.handleError);
}
public put (url, data): Observable<any> {
let headers = new Headers({ 'Content-Type': 'application/json' });<|fim▁hole|>
this.showSpinner();
return this.http.put(this.buildUrl(url), data, options)
.map(this.extractData)
.catch(this.handleError);
}
public delete (url, data): Observable<any> {
this.showSpinner();
return this.http.delete(this.buildUrl(url))
.map(this.extractData)
.catch(this.handleError);
}
private buildUrl (url) {
return this.apiUrl + url;
}
private extractData = (res: Response) => {
let body = res.json();
this.showSpinner();
return body || { };
}
private handleError = (error: Response | any) => {
let errMsg: string;
if (error instanceof Response) {
const body = error.json() || '';
const err = body.error || JSON.stringify(body);
errMsg = `${error.status} - ${error.statusText || ''} ${err}`;
} else {
errMsg = error.message ? error.message : error.toString();
}
console.error(errMsg);
this.hideSpinner();
return Observable.throw(errMsg, error);
}
private showSpinner () {
this.spinner.isLoading = true;
}
private hideSpinner () {
this.spinner.isLoading = false;
}
}<|fim▁end|> | let options = new RequestOptions({ headers: headers }); |
<|file_name|>TestDataFormatterObjCNSError.py<|end_file_name|><|fim▁begin|># encoding: utf-8
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
<|fim▁hole|>
class ObjCDataFormatterNSError(ObjCDataFormatterTestCase):
@skipUnlessDarwin
def test_nserror_with_run_command(self):
"""Test formatters for NSError."""
self.appkit_tester_impl(self.nserror_data_formatter_commands)
def nserror_data_formatter_commands(self):
self.expect(
'frame variable nserror', substrs=['domain: @"Foobar" - code: 12'])
self.expect(
'frame variable nserrorptr',
substrs=['domain: @"Foobar" - code: 12'])
self.expect(
'frame variable nserror->_userInfo', substrs=['2 key/value pairs'])
self.expect(
'frame variable nserror->_userInfo --ptr-depth 1 -d run-target',
substrs=['@"a"', '@"b"', "1", "2"])<|fim▁end|> | from ObjCDataFormatterTestCase import ObjCDataFormatterTestCase
|
<|file_name|>io.hpp<|end_file_name|><|fim▁begin|>#ifndef HAVE_spiral_io_hpp
#define HAVE_spiral_io_hpp
#include <cstdio>
#include "spiral/core.hpp"
#include "spiral/string.hpp"
namespace spiral {
struct IoObj {
const ObjTable* otable;
std::FILE* stdio;
bool close_on_drop;
};
auto io_from_val(Bg* bg, Val val) -> IoObj*;
auto io_from_obj_ptr(void* obj_ptr) -> IoObj*;
auto io_new_from_stdio(Bg* bg, void* sp, std::FILE* stdio, bool close_on_drop) -> IoObj*;
void io_stringify(Bg* bg, Buffer* buf, void* obj_ptr);
auto io_length(void* obj_ptr) -> uint32_t;
auto io_evacuate(GcCtx* gc_ctx, void* obj_ptr) -> Val;
void io_scavenge(GcCtx* gc_ctx, void* obj_ptr);
void io_drop(Bg* bg, void* obj_ptr);
auto io_eqv(Bg* bg, void* l_ptr, void* r_ptr) -> bool;
extern const ObjTable io_otable;
auto io_open_file(Bg* bg, void* sp, StrObj* path, const char* mode) -> Val;<|fim▁hole|> auto spiral_std_io_file_create(Bg* bg, void* sp, uint32_t path) -> uint32_t;
auto spiral_std_io_file_append(Bg* bg, void* sp, uint32_t path) -> uint32_t;
auto spiral_std_io_close(Bg* bg, void* sp, uint32_t io) -> uint32_t;
auto spiral_std_io_flush(Bg* bg, void* sp, uint32_t io) -> uint32_t;
auto spiral_std_is_io(Bg* bg, void* sp, uint32_t io) -> uint32_t;
auto spiral_std_io_is_eof(Bg* bg, void* sp, uint32_t io) -> uint32_t;
auto spiral_std_io_is_error(Bg* bg, void* sp, uint32_t io) -> uint32_t;
auto spiral_std_io_stdin(Bg* bg, void* sp) -> uint32_t;
auto spiral_std_io_stdout(Bg* bg, void* sp) -> uint32_t;
auto spiral_std_io_stderr(Bg* bg, void* sp) -> uint32_t;
auto spiral_std_io_write_byte(Bg* bg, void* sp, uint32_t io, uint32_t byte) -> uint32_t;
auto spiral_std_io_write(Bg* bg, void* sp, uint32_t io, uint32_t str) -> uint32_t;
auto spiral_std_io_write_line(Bg* bg, void* sp, uint32_t io, uint32_t str) -> uint32_t;
auto spiral_std_io_read_byte(Bg* bg, void* sp, uint32_t io) -> uint32_t;
auto spiral_std_io_read_str(Bg* bg, void* sp, uint32_t io, uint32_t len) -> uint32_t;
auto spiral_std_io_read_all_str(Bg* bg, void* sp, uint32_t io) -> uint32_t;
auto spiral_std_io_read_line(Bg* bg, void* sp, uint32_t io) -> uint32_t;
auto spiral_std_io_read_word(Bg* bg, void* sp, uint32_t io) -> uint32_t;
auto spiral_std_io_read_int(Bg* bg, void* sp, uint32_t io) -> uint32_t;
auto spiral_std_io_read_number(Bg* bg, void* sp, uint32_t io) -> uint32_t;
}
}
#endif<|fim▁end|> |
extern "C" {
auto spiral_std_io_file_open(Bg* bg, void* sp, uint32_t path) -> uint32_t; |
<|file_name|>conditionalGroupProvider.spec.js<|end_file_name|><|fim▁begin|>/**
* Copyright 2014 Daniel Furtlehner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
describe('Conditionalgroup Provider', function() {
var crossfilterUtils,
groupData = {
type: 'conditional',
parameters: {
init: {
a: 0,
b: 0,
c: 0,
d: 0
},
conditions: {
isA: 'v.type === "A"',
isB: 'v.type === "B"',
isC: 'v.type === "C"'
},
handlers: {
add: {
isA: {
'p.a': 'p.a + v.val'
},
isB: {
'p.b': 'p.b + v.val'
},
isC: {
'p.c': 'p.c + v.val',
'p.d': 'p.d + 1'
}
},
remove: {
isA: {
'p.a': 'p.a - v.val'
},
isB: {
'p.b': 'p.b - v.val'
},
isC: {
'p.c': 'p.c - v.val',
'p.d': 'p.d - 1'
}
}
}
}
};
var preGroupData = {
type: 'conditional',
parameters: {
conditions: {
aCheck: 'v.type === "C"'
},
handlers: {
add: {
pre: {
'p.c': 'p.c * v.val'
},
aCheck: {
'p.c': 'p.c + v.val'
}
},
remove: {
pre: {
'p.c': 'p.c - v.val'
},
aCheck: {
'p.c': 'p.c / v.val'
}
}
}
}
};
var postGroupData = {
type: 'conditional',
parameters: {
conditions: {
aCheck: 'v.type === "C"'
},
handlers: {
add: {
aCheck: {
'p.c': 'p.c + v.val'
},
post: {
'p.c': 'p.c * v.val'
}
},
remove: {
aCheck: {
'p.c': 'p.c / v.val'
},
post: {
'p.c': 'p.c - v.val'
}
}
}
}
};
beforeEach(function() {
module('ngDashboard');
inject(function(_crossfilterUtils_) {
crossfilterUtils = _crossfilterUtils_;
});
});
it('Init function defined', function() {
var groupFunctions = crossfilterUtils.groupFunctions(groupData);
expect(groupFunctions.init).toBeDefined();
});
it('Add function defined', function() {
var groupFunctions = crossfilterUtils.groupFunctions(groupData);
expect(groupFunctions.add).toBeDefined();
});
it('Remove function defined', function() {
var groupFunctions = crossfilterUtils.groupFunctions(groupData);
expect(groupFunctions.remove).toBeDefined();
});
it('Init should return object', function() {
var groupFunctions = crossfilterUtils.groupFunctions(groupData);
expect(groupFunctions.init()).toEqual({
a: 0,
b: 0,
c: 0,
d: 0
});
});
it('Add a should increment a', function() {
var groupFunctions = crossfilterUtils.groupFunctions(groupData);
var p = {
a: 1,
b: 10,
c: 5,
d: 0
};
var v = {
val: 10,
type: 'A'
};
expect(groupFunctions.add(p, v)).toEqual({
a: 11,
b: 10,
c: 5,
d: 0
});
});
it('Add b should increment b', function() {
var groupFunctions = crossfilterUtils.groupFunctions(groupData);
var p = {
a: 1,
b: 10,
c: 5,
d: 0
};
var v = {
val: 5,
type: 'B'
};
expect(groupFunctions.add(p, v)).toEqual({
a: 1,
b: 15,
c: 5,
d: 0
});
});
it('Add c should increment c and d', function() {
var groupFunctions = crossfilterUtils.groupFunctions(groupData);
var p = {
a: 1,
b: 10,
c: 5,
d: 0
};
var v = {
val: 1,
type: 'C'
};
expect(groupFunctions.add(p, v)).toEqual({
a: 1,
b: 10,
c: 6,
d: 1
});
});
it('Remove a should decrement a', function() {
var groupFunctions = crossfilterUtils.groupFunctions(groupData);
var p = {
a: 5,
b: 10,<|fim▁hole|> var v = {
val: 3,
type: 'A'
};
expect(groupFunctions.remove(p, v)).toEqual({
a: 2,
b: 10,
c: 5,
d: 0
});
});
it('Remove b should decrement b', function() {
var groupFunctions = crossfilterUtils.groupFunctions(groupData);
var p = {
a: 1,
b: 10,
c: 5,
d: 0
};
var v = {
val: 5,
type: 'B'
};
expect(groupFunctions.remove(p, v)).toEqual({
a: 1,
b: 5,
c: 5,
d: 0
});
});
it('Remove c should decrement c and d', function() {
var groupFunctions = crossfilterUtils.groupFunctions(groupData);
var p = {
a: 1,
b: 10,
c: 5,
d: 5
};
var v = {
val: 1,
type: 'C'
};
expect(groupFunctions.remove(p, v)).toEqual({
a: 1,
b: 10,
c: 4,
d: 4
});
});
it('Pre handlers should be called before others when adding', function() {
var groupFunctions = crossfilterUtils.groupFunctions(preGroupData);
var p = {
a: 1,
b: 10,
c: 6,
d: 5
};
var v = {
val: 2,
type: 'C'
};
expect(groupFunctions.add(p, v)).toEqual({
a: 1,
b: 10,
c: 14,
d: 5
});
});
it('Pre handlers should be called before others when removing', function() {
var groupFunctions = crossfilterUtils.groupFunctions(preGroupData);
var p = {
a: 1,
b: 10,
c: 14,
d: 5
};
var v = {
val: 2,
type: 'C'
};
expect(groupFunctions.remove(p, v)).toEqual({
a: 1,
b: 10,
c: 6,
d: 5
});
});
it('post handlers should be called after others when adding', function() {
var groupFunctions = crossfilterUtils.groupFunctions(postGroupData);
var p = {
a: 1,
b: 10,
c: 5,
d: 5
};
var v = {
val: 3,
type: 'C'
};
expect(groupFunctions.add(p, v)).toEqual({
a: 1,
b: 10,
c: 24,
d: 5
});
});
it('post handlers should be called after others when removing', function() {
var groupFunctions = crossfilterUtils.groupFunctions(postGroupData);
var p = {
a: 1,
b: 10,
c: 24,
d: 5
};
var v = {
val: 3,
type: 'C'
};
expect(groupFunctions.remove(p, v)).toEqual({
a: 1,
b: 10,
c: 5,
d: 5
});
});
});<|fim▁end|> | c: 5,
d: 0
};
|
<|file_name|>conf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pycasso documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 24 01:54:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import shutil
sys.path.insert(0, os.path.abspath('../../'))
# copy README
shutil.copy('../../README.rst', './README.rst')
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.<|fim▁hole|># extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['APItemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pycasso'
copyright = '2017, Haoming Jiang, Jason Ge'
author = 'Haoming Jiang, Jason Ge'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
VERSION_PATH = os.path.join(os.path.dirname(__file__), '../../pycasso/VERSION')
# The full version, including alpha/beta/rc tags.
release = open(VERSION_PATH).read().strip()
# The short X.Y version.
version = release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pycassodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pycasso.tex', 'pycasso Documentation',
'Haoming Jiang, Jian Ge', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycasso', 'pycasso Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pycasso', 'pycasso Documentation',
author, 'pycasso', 'One line description of project.',
'Miscellaneous'),
]<|fim▁end|> | #
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be |
<|file_name|>krpc.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 Dmitry "Divius" Tantsur <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! KRPC DHT service as described in
//! [BEP 0005](http://www.bittorrent.org/beps/bep_0005.html).
use std::old_io::{IoErrorKind, IoResult};
use std::sync;
use std::thread;
use num::traits::ToPrimitive;
use super::super::base;
use super::super::knodetable;
use super::udpwrapper;
/// Generic implementation of basic KRPC DHT on which BitTorrent DHT is based.
///
/// No peer retrival is supported: just finding and pinging nodes.
///
/// Usually you will be creating it as:
/// ```
/// let service = dht::bt::KRpcService::new_default(this_node);
/// ```
pub struct KRpcService<TNodeTable: base::GenericNodeTable,
TSocket: udpwrapper::GenericSocketWrapper> {
this_node: base::Node,
node_table: sync::Arc<sync::RwLock<TNodeTable>>,
socket: TSocket,
// this is for tests only, proper cancelling will follow later
active: sync::Arc<sync::RwLock<bool>>,
}
/// Default kind of KRpc service.
pub type DefaultKRpcService = KRpcService<knodetable::KNodeTable<base::Node>, udpwrapper::UdpSocketWrapper>;
// This can't be derived, compiler is confused because of Arc.
impl<TNodeTable: base::GenericNodeTable,
TSocket: udpwrapper::GenericSocketWrapper>
Clone for KRpcService<TNodeTable, TSocket> {
fn clone(&self) -> KRpcService<TNodeTable, TSocket> {
KRpcService {
this_node: self.this_node.clone(),
node_table: self.node_table.clone(),
socket: self.socket.clone(),
active: self.active.clone(),
}
}
}
fn handle_incoming<TNodeTable: base::GenericNodeTable,
TSocket: udpwrapper::GenericSocketWrapper>
(mut service: KRpcService<TNodeTable, TSocket>) {
while *service.active.read().unwrap() {
match service.socket.receive() {
Ok((package, addr)) =>
// TODO(divius): implement
debug!("Received {:?} from {:?}", package, addr),
Err(e) =>
if e.kind != IoErrorKind::TimedOut {
debug!("Error during receiving {}", e);
}
}
}
}
impl KRpcService<knodetable::KNodeTable<base::Node>, udpwrapper::UdpSocketWrapper> {
/// New service with default node table.
pub fn new_default(this_node: base::Node) -> IoResult<DefaultKRpcService> {
let node_table = knodetable::KNodeTable::new(this_node.id.clone());
let socket = try!(udpwrapper::UdpSocketWrapper::new(&this_node));
KRpcService::new(this_node, node_table, socket)
}
}
impl<TNodeTable: base::GenericNodeTable + 'static,
TSocket: udpwrapper::GenericSocketWrapper + 'static>
KRpcService<TNodeTable, TSocket> {
/// New service with given node table and socket.
pub fn new(this_node: base::Node, node_table: TNodeTable, socket: TSocket)
-> IoResult<KRpcService<TNodeTable, TSocket>> {
let self_ = KRpcService {
this_node: this_node,
node_table: sync::Arc::new(sync::RwLock::new(node_table)),
socket: socket,
active: sync::Arc::new(sync::RwLock::new(true)),
};
let self_clone = self_.clone();
//thread::Thread::spawn(move || handle_incoming(self_clone));
Ok(self_)
}
/// Get lock guarding access to a node table.
pub fn node_table_lock(&self) -> &sync::RwLock<TNodeTable> {
&(*self.node_table)
}
/// Get reference to a socket wrapper.
///
/// Clone it if you want to get mutable copy.
pub fn socket_ref(&self) -> &TSocket {
&self.socket
}
}
#[unsafe_destructor]
impl<TNodeTable: base::GenericNodeTable,
TSocket: udpwrapper::GenericSocketWrapper>
Drop for KRpcService<TNodeTable, TSocket> {
fn drop(&mut self) {
*self.active.write().unwrap() = false;
}
}
#[cfg(test)]
mod test {
use std::old_io::IoResult;
use std::old_io::net::ip;
<|fim▁hole|> use num::traits::ToPrimitive;
use num;
use super::super::super::base::{self, GenericNodeTable};
use super::super::super::utils::test;
use super::super::protocol;
use super::super::udpwrapper::GenericSocketWrapper;
use super::KRpcService;
struct DummyNodeTable {
last_node: Option<base::Node>,
}
impl GenericNodeTable for DummyNodeTable {
type P = base::Node;
fn random_id(&self) -> num::BigUint {
// This number is random, I promise :)
test::usize_to_id(42)
}
fn update(&mut self, node: &base::Node) -> bool {
self.last_node = Some(node.clone());
true
}
#[allow(unused_variables)]
fn find(&self, id: &num::BigUint, count: usize) -> Vec<base::Node> {
match self.last_node {
Some(ref n) if n.id == *id => vec![n.clone()],
_ => vec![]
}
}
fn pop_oldest(&mut self) -> Vec<base::Node> {
vec![]
}
fn remove(&mut self, id: &num::BigUint) -> bool {
true
}
}
#[derive(Clone)]
struct DummySocketWrapper {
last_package: Option<protocol::Package>,
last_node: Option<base::Node>
}
impl GenericSocketWrapper for DummySocketWrapper {
fn send(&mut self, package: &protocol::Package, node: &base::Node)
-> IoResult<()> {
self.last_package = Some(package.clone());
self.last_node = Some(node.clone());
Ok(())
}
fn receive(&mut self) -> IoResult<(protocol::Package, ip::SocketAddr)> {
Ok((self.last_package.as_ref().unwrap().clone(),
self.last_node.as_ref().unwrap().address.clone()))
}
}
#[test]
fn test_new() {
let n = test::new_node_with_port(42, 8007);
let sock = DummySocketWrapper { last_package: None, last_node: None };
let s = KRpcService::new(n.clone(),
DummyNodeTable { last_node: None },
sock).unwrap();
assert_eq!(n.id, s.this_node.id);
let mut nt = s.node_table_lock().write().unwrap();
nt.update(&test::new_node(1));
assert_eq!(nt.random_id().to_u8().unwrap(), 42);
}
#[test]
fn test_new_default() {
let n = test::new_node_with_port(42, 8009);
let s = KRpcService::new_default(n.clone()).unwrap();
assert_eq!(n.id, s.this_node.id);
let nt = s.node_table_lock().read().unwrap();
assert_eq!(0, nt.find(&test::usize_to_id(1), 1).len());
}
}<|fim▁end|> | |
<|file_name|>DataFile.cpp<|end_file_name|><|fim▁begin|>/* DataFile.cpp
Copyright (c) 2014 by Michael Zahniser
Endless Sky is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later version.
Endless Sky is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
*/
#include "DataFile.h"
#include <fstream>
using namespace std;
DataFile::DataFile(const string &path)
{
Load(path);
}
DataFile::DataFile(istream &in)
{
Load(in);
}
void DataFile::Load(const string &path)
{
// Check if the file exists before doing anything with it.
ifstream in(path);
if(!in.is_open())
return;
// Find out how big the file is.
in.seekg(0, ios_base::end);
size_t size = in.tellg();
in.seekg(0, ios_base::beg);
// Allocate one extra character for the sentinel '\n' character.
vector<char> data(size + 1);
in.read(&*data.begin(), size);
// As a sentinel, make sure the file always ends in a newline.
data.back() = '\n';
Load(&*data.begin(), &*data.end());
}
void DataFile::Load(istream &in)
{
vector<char> data;
static const size_t BLOCK = 4096;
while(in)
{
size_t currentSize = data.size();
data.resize(currentSize + BLOCK);
in.read(&*data.begin() + currentSize, BLOCK);
data.resize(currentSize + in.gcount());
}
// As a sentinel, make sure the file always ends in a newline.
if(data.back() != '\n')
data.push_back('\n');
Load(&*data.begin(), &*data.end());
}
list<DataNode>::const_iterator DataFile::begin() const
{
return root.begin();
}
list<DataNode>::const_iterator DataFile::end() const
{
return root.end();
}
void DataFile::Load(const char *it, const char *end)
{
vector<DataNode *> stack(1, &root);
vector<int> whiteStack(1, -1);
for( ; it != end; ++it)
{
// Find the first non-white character in this line.
int white = 0;
for( ; *it <= ' ' && *it != '\n'; ++it)
++white;
// If the line is a comment, skip to the end of the line.
if(*it == '#')
{
while(*it != '\n')
++it;
}
// Skip empty lines (including comment lines).
if(*it == '\n')
continue;
// Determine where in the node tree we are inserting this node, based on
// whether it has more indentation that the previous node, less, or the same.
while(whiteStack.back() >= white)
{
whiteStack.pop_back();
stack.pop_back();
}
// Add this node as a child of the proper node.
list<DataNode> &children = stack.back()->children;
children.push_back(DataNode());
DataNode &node = children.back();
// Remember where in the tree we are.
stack.push_back(&node);
whiteStack.push_back(white);
// Tokenize the line. Skip comments and empty lines.
while(*it != '\n')
{<|fim▁hole|> bool isQuoted = (endQuote == '"' || endQuote == '`');
it += isQuoted;
const char *start = it;
// Find the end of this token.
while(*it != '\n' && (isQuoted ? (*it != endQuote) : (*it > ' ')))
++it;
node.tokens.emplace_back(start, it);
if(*it != '\n')
{
it += isQuoted;
while(*it != '\n' && *it <= ' ' && *it != '#')
++it;
// If a comment is encountered outside of a token, skip the rest
// of this line of the file.
if(*it == '#')
{
while(*it != '\n')
++it;
}
}
}
}
}<|fim▁end|> | char endQuote = *it; |
<|file_name|>common_type-N3843.cpp<|end_file_name|><|fim▁begin|>// Copyright Louis Dionne 2015
// Distributed under the Boost Software License, Version 1.0.
#include <type_traits>
template <typename T>
using void_t = std::conditional_t<true, void, T>;
// sample(common_type-N3843)
template <typename T, typename U>
using builtin_common_t = std::decay_t<decltype(
true ? std::declval<T>() : std::declval<U>()
)>;
template <typename, typename ...>
struct ct { };
template <typename T>
struct ct<void, T> : std::decay<T> { };
template <typename T, typename U, typename ...V>
struct ct<void_t<builtin_common_t<T, U>>, T, U, V...>
: ct<void, builtin_common_t<T, U>, V...>
{ };
template <typename ...T>
struct common_type : ct<void, T...> { };
// end-sample
template <typename ...Ts>
using common_type_t = typename common_type<Ts...>::type;
//////////////////////////////////////////////////////////////////////////////
// Tests
//////////////////////////////////////////////////////////////////////////////
template <typename T, typename = void>
struct has_type : std::false_type { };
template <typename T>
struct has_type<T, void_t<typename T::type>> : std::true_type { };
struct A { }; struct B { }; struct C { };
// Ensure proper behavior in normal cases
static_assert(std::is_same<
common_type_t<char>,
char
>{}, "");
static_assert(std::is_same<
common_type_t<A, A>,
A
>{}, "");
static_assert(std::is_same<
common_type_t<char, short, char, short>,
int
>{}, "");
static_assert(std::is_same<
common_type_t<char, double, short, char, short, double>,
double
>{}, "");
static_assert(std::is_same<
common_type_t<char, short, float, short>,
float<|fim▁hole|>// Ensure SFINAE-friendliness
static_assert(!has_type<common_type<>>{}, "");
static_assert(!has_type<common_type<int, void>>{}, "");
int main() { }<|fim▁end|> | >{}, "");
|
<|file_name|>Star.js<|end_file_name|><|fim▁begin|>"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
<|fim▁hole|>});
exports.default = void 0;
var React = _interopRequireWildcard(require("react"));
var _createSvgIcon = _interopRequireDefault(require("../../utils/createSvgIcon"));
var _jsxRuntime = require("react/jsx-runtime");
/**
* @ignore - internal component.
*/
var _default = (0, _createSvgIcon.default)( /*#__PURE__*/(0, _jsxRuntime.jsx)("path", {
d: "M12 17.27L18.18 21l-1.64-7.03L22 9.24l-7.19-.61L12 2 9.19 8.63 2 9.24l5.46 4.73L5.82 21z"
}), 'Star');
exports.default = _default;<|fim▁end|> | var _interopRequireWildcard = require("@babel/runtime/helpers/interopRequireWildcard");
Object.defineProperty(exports, "__esModule", {
value: true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.