max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
32,544 | <reponame>DBatOWL/tutorials<filename>spring-boot-modules/spring-boot-config-jpa-error/data-jpa-application/src/main/java/com/baeldung/data/jpa/ApplicationFound.java
package com.baeldung.data.jpa;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
public class ApplicationFound {
}
| 109 |
2,706 | import pytest
def test_core_import():
try:
import mgba.core
except:
raise AssertionError
| 51 |
335 | <reponame>Safal08/Hacktoberfest-1
{
"word": "Quisquilious",
"definitions": [
"= quisquilian."
],
"parts-of-speech": "Adjective"
} | 79 |
7,158 | <reponame>ptelang/opencv_contrib
package org.opencv.test.barcode;
import java.util.List;
import org.opencv.core.Mat;
import org.opencv.barcode.BarcodeDetector;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.test.OpenCVTestCase;
import java.util.ArrayList;
import static org.opencv.barcode.Barcode.EAN_13;
public class BarcodeDetectorTest extends OpenCVTestCase {
private final static String ENV_OPENCV_TEST_DATA_PATH = "OPENCV_TEST_DATA_PATH";
private String testDataPath;
@Override
protected void setUp() throws Exception {
super.setUp();
testDataPath = System.getenv(ENV_OPENCV_TEST_DATA_PATH);
if (testDataPath == null)
throw new Exception(ENV_OPENCV_TEST_DATA_PATH + " has to be defined!");
}
public void testDetectAndDecode() {
Mat img = Imgcodecs.imread(testDataPath + "/cv/barcode/multiple/4_barcodes.jpg");
assertFalse(img.empty());
BarcodeDetector detector = new BarcodeDetector();
assertNotNull(detector);
List < String > infos = new ArrayList< String >();
List < Integer > types = new ArrayList< Integer >();
boolean result = detector.detectAndDecode(img, infos, types);
assertTrue(result);
assertEquals(infos.size(), 4);
assertEquals(types.size(), 4);
final String[] correctResults = {"9787122276124", "9787118081473", "9787564350840", "9783319200064"};
for (int i = 0; i < 4; i++) {
assertEquals(types.get(i).intValue(), EAN_13);
result = false;
for (int j = 0; j < 4; j++) {
if (correctResults[j].equals(infos.get(i))) {
result = true;
break;
}
}
assertTrue(result);
}
}
}
| 811 |
2,206 | /*
*
* Copyright (c) 2006-2020, Speedment, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); You may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.speedment.common.injector.internal;
import com.speedment.common.injector.Injector;
import com.speedment.common.injector.InjectorBuilder;
import com.speedment.common.injector.MissingArgumentStrategy;
import com.speedment.common.injector.State;
import com.speedment.common.injector.annotation.ExecuteBefore;
import com.speedment.common.injector.annotation.Inject;
import com.speedment.common.injector.annotation.InjectKey;
import com.speedment.common.injector.annotation.WithState;
import com.speedment.common.injector.exception.InjectorException;
import com.speedment.common.logger.Level;
import com.speedment.common.logger.Logger;
import com.speedment.common.logger.LoggerManager;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static java.util.stream.Collectors.*;
import static org.junit.jupiter.api.Assertions.*;
final class InjectorImplTest {
private InjectorImpl injector;
@InjectKey(Foo.class) public interface Foo {};
public static final class Bar implements Foo {};
public static final class Baz implements Foo {
@Inject public Bar bar;
public boolean stopped;
@ExecuteBefore(State.STOPPED) public void stop() {
stopped = true;
}
};
public static final class Unstoppable {
@ExecuteBefore(State.STOPPED) void stop(Foo foo){}
}
public static final class Stoppable {
@ExecuteBefore(value = State.STOPPED, missingArgument = MissingArgumentStrategy.SKIP_INVOCATION) void stop(Foo foo){}
}
@BeforeEach
void setup() throws InstantiationException {
injector = (InjectorImpl) Injector.builder()
.withComponent(Bar.class)
.withComponent(Baz.class)
.build();
/* final Injectable<Integer> integerInjectable0 = new Injectable<>(Integer.class, () -> 0);
final Injectable<Integer> integerInjectable1 = new Injectable<>(Integer.class, () -> 1);
final Injectable<String> stringInjectable = new Injectable<>(String.class, () -> "A");
final Set<Injectable<?>> injectables = Stream.of(integerInjectable0, integerInjectable1, stringInjectable).collect(toCollection(LinkedHashSet::new));
final List<Object> instances = injectables.stream().map(i -> i.supplier().get()).collect(toList());
final Properties properties = new Properties();
final ClassLoader classLoader = InjectorImplTest.class.getClassLoader();
injector = new InjectorImpl(injectables, instances, properties, classLoader);*/
}
@Test
void stream() {
assertEquals(asList(Baz.class, Bar.class), injector.stream(Foo.class).map(Object::getClass).collect(toList()));
assertEquals(singletonList(Bar.class), injector.stream(Bar.class).map(Object::getClass).collect(toList()));
assertEquals(0, injector.stream(Integer.class).count());
}
@Test
void getOrThrow() {
assertThrows(IllegalArgumentException.class, () -> injector.getOrThrow(Integer.class));
}
@Test
void getAfterOrThrow() {
final Foo expected = injector.getOrThrow(Bar.class);
final Baz baz = injector.getOrThrow(Baz.class);
final Foo actual = injector.getAfterOrThrow(Foo.class, baz);
assertEquals(expected, actual);
final Bar bar = injector.getOrThrow(Bar.class);
assertThrows(IllegalArgumentException.class, () -> injector.getAfterOrThrow(Foo.class, bar));
}
@Test
void get() {
assertTrue(injector.get(Foo.class).isPresent());
assertFalse(injector.get(Integer.class).isPresent());
}
@Test
void getAfter() {
final Foo expected = injector.getOrThrow(Bar.class);
final Baz baz = injector.getOrThrow(Baz.class);
final Foo actual = injector.getAfter(Foo.class, baz).orElseThrow(NoSuchElementException::new);
assertEquals(expected, actual);
final Bar bar = injector.getOrThrow(Bar.class);
assertFalse(injector.getAfter(Foo.class, bar).isPresent());
}
@Test
void injectables() {
final Set<Class<?>> injectables = injector.injectables().collect(toSet());
assertEquals(new HashSet<>(asList(Bar.class, Baz.class)), injectables);
}
@Test
void inject() {
final Baz newBaz = new Baz();
assertNull(newBaz.bar);
injector.inject(newBaz);
assertNotNull(newBaz.bar);
}
@Test
void injectIllegal() {
final class Bez {
@Inject private int a; // private field
}
final Bez bez = new Bez();
assertThrows(IllegalArgumentException.class, () -> injector.inject(bez));
}
@Test
void classLoader() {
assertSame(Injector.class.getClassLoader(),injector.classLoader());
}
@Test
void stop() {
final Logger logger = LoggerManager.getLogger(InjectorImpl.class);
final Level level = logger.getLevel();
logger.setLevel(Level.DEBUG);
try {
final Baz baz = injector.getOrThrow(Baz.class);
injector.stop();
assertTrue(baz.stopped);
} finally {
logger.setLevel(level);
}
}
@Test
void stopFailed() throws InstantiationException {
final Logger logger = LoggerManager.getLogger(InjectorImpl.class);
final Level level = logger.getLevel();
logger.setLevel(Level.DEBUG);
final Injector newInjector = Injector.builder().withComponent(Unstoppable.class).build();
try {
assertThrows(InjectorException.class, newInjector::stop);
} finally {
logger.setLevel(level);
}
}
@Test
void stopSkipInvocation() throws InstantiationException {
final Logger logger = LoggerManager.getLogger(InjectorImpl.class);
final Level level = logger.getLevel();
logger.setLevel(Level.DEBUG);
final Injector newInjector = Injector.builder().withComponent(Stoppable.class).build();
try {
assertDoesNotThrow(newInjector::stop);
} finally {
logger.setLevel(level);
}
}
@Test
void newBuilder() {
assertNotNull(injector.newBuilder());
injector.newBuilder();
}
@Test
@Disabled("https://github.com/speedment/speedment/issues/853")
void newBuilderIsNew() {
final InjectorBuilder builder1 = injector.newBuilder();
final InjectorBuilder builder2 = injector.newBuilder();
assertNotSame(builder1, builder2);
}
} | 2,848 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.gradle.tooling;
/**
*
* @author <NAME>
*/
public class BaseModel implements Model {
String gradleException;
void setGradleException(String gradleException) {
this.gradleException = gradleException;
}
@Override
public final String getGradleException() {
return gradleException;
}
@Override
public final boolean hasException() {
return gradleException != null;
}
}
| 368 |
13,937 | <reponame>ChiQuang98/NodeRedTest
{
"info": {
"tip0": "Sie können die ausgewählten Nodes oder Verbindungen mit {{ core:delete-selection }} entfernen",
"tip1": "Sie können nach Nodes mit {{ core:search }} suchen",
"tip2": "{{ core:toggle-sidebar }} blendet die Seitenleiste ein/aus",
"tip3": "Sie können Ihre Node-Palette mit {{ core:manage-palette }} verwalten",
"tip4": "Ihre Flow-Konfigurationsnodes werden in der Seitenleiste angezeigt, die über das Menü oder mit {{ core:show-config-tab }} angezeigt werden kann",
"tip5": "Aktiviere oder deaktiviere diese Tipps in den Einstellungen im Tab 'Ansicht'",
"tip6": "Sie können die ausgewählten Nodes mit den [left]/[up]/[down]/[right]-Tasten verschieben. Wenn Sie dabei [Shift] gedrückt halten, können Sie den Fensterausschnitt verschieben.",
"tip7": "Wenn Sie ein Node auf eine Verbindung ziehen, wird es in die Verbindung eingefügt",
"tip8": "Sie können die ausgewählten Nodes oder den aktuellen Flow-Tab mit {{ core:show-export-dialog }} exportieren",
"tip9": "Sie können einen Flow importieren, indem Sie sein JSON in den Editor ziehen oder mittels {{ core:show-import-dialog }}",
"tip10": "Halten Sie [Shift] beim [Klicken] auf ein Node gedrückt, um auch alle verbundenen Nodes mit zu verschieben",
"tip11": "Sie können den Tab 'Info' mit {{ core:show-info-tab }} oder den Tab 'Debug' mit {{ core:show-debug-tab }} anzeigen lassen",
"tip12": "Halten Sie [Strg] beim [Klicken] in den Arbeitsbereich gedrückt, um den Schnellhinzufügedialog öffnen",
"tip13": "Halten Sie [Strg] beim [Klicken] auf einen Node-Anschluss gedrückt, um eine Verbindung nur durch kurzes [Klicken] (ohne Halten) zu verlegen",
"tip14": "Halten Sie [Shift] beim [Klicken] auf ein Node gedrückt, um auch alle verbundenen Nodes mit auszuwählen",
"tip15": "Halten Sie [Strg] beim [Klicken] auf ein Node gedrückt, um es zu der aktuellen Auswahl hinzuzufügen oder aus ihr zu entfernen",
"tip16": "Sie können die Flow-Tabs mit {{ core:show-previous-tab }} und {{ core:show-next-tab }} wechseln",
"tip17": "Sie können die Änderungen im Node-Editor mit {{ core:confirm-edit-tray }} bestätigen oder sie mit {{ core:cancel-edit-tray }} verwerfen",
"tip18": "Sie können mit {{ core:edit-selected-node }} den ersten Node in der aktuellen Auswahl bearbeiten"
}
}
| 1,075 |
301 | //******************************************************************
//
// Copyright 2017 Samsung Mobile Communications GmbH All Rights Reserved.
//
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#include "iotivity_config.h"
#include "trace.h"
#if (defined(__ANDROID__)) || (defined(__TIZEN__) && defined(OIC_SUPPORT_TIZEN_TRACE))
#define MAX_BUFFER_SIZE 8
#define MAX_LINE_LEN ((MAX_BUFFER_SIZE) * 2) + 1
void oic_trace_buffer(const char *name, const uint8_t * buffer, size_t bufferSize)
{
if (!name || !buffer || (0 == bufferSize))
{
return;
}
char lineBuffer[MAX_LINE_LEN] = {0};
size_t count = (MAX_BUFFER_SIZE > bufferSize) ? bufferSize : MAX_BUFFER_SIZE;
size_t remainSize = MAX_LINE_LEN;
int writtenSize = 0;
char* buf = &lineBuffer[0];
for (size_t i = 0; i < count; i++)
{
writtenSize = snprintf(buf, remainSize, "%02x", buffer[i]);
if (2 != writtenSize)
{
break;
}
buf += writtenSize;
remainSize -= 2;
}
OIC_TRACE_BEGIN(%s:%s, name, lineBuffer);
OIC_TRACE_END();
}
#endif
#ifndef __TIZEN__
#include "experimental/logger.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#ifdef HAVE_STRING_H
#include <string.h>
#elif defined(HAVE_STRINGS_H)
#include <strings.h>
#endif
#define FD_INITIAL_VALUE -1
#define FD_NOT_EXIST -2
#define MAX_BUF_SIZE 4096
#define MAX_TRACE_LEN 524
#define MAX_HEAD_LEN 8
#define MAX_TAIL_LEN 13
#define POS_LABEL_ST ((MAX_TRACE_LEN - MAX_HEAD_LEN))
#define POS_LABEL_ED ((MAX_TRACE_LEN - MAX_TAIL_LEN))
#define TAG "OIC_TRACER"
#ifdef __ANDROID__
/*
* Currently android api level 21 is used for building iotivity project.
* Since Atrace (aka. android trace) API has been provided by NDK above android api level 23,
* we use ftrace directly as workaround to Atrace API until android build level is upgraded
*/
int g_trace_marker_hd=FD_INITIAL_VALUE;
int oic_trace_init()
{
OIC_LOG(INFO, TAG, "entering oic_trace_init");
int mounts = -1;
char buf[MAX_BUF_SIZE] = {0};
ssize_t buflen = -1;
char *line = NULL, *tmp1 = NULL, *path = NULL;
if(g_trace_marker_hd == FD_INITIAL_VALUE)
{
mounts = open("/proc/mounts", O_RDONLY);
if (mounts < 0)
{
OIC_LOG(INFO, TAG, "no /proc/mounts");
return -1;
}
buflen = read(mounts, buf, sizeof(buf) - 1);
close(mounts);
if (buflen < 0)
{
OIC_LOG(INFO, TAG, "failed to read /proc/mounts");
return -1;
}
line = strtok_r(buf, "\n", &tmp1);
while (line)
{
char *tmp2 = NULL, *tmp_path = NULL, *fstype = NULL;
/* "<dev> <mountpoint> <fs type> ..." */
strtok_r(line, " ", &tmp2);
tmp_path = strtok_r(NULL, " ", &tmp2);
fstype = strtok_r(NULL, " ", &tmp2);
if (strcmp(fstype, "debugfs") == 0)
{
path = tmp_path;
break;
}
line = strtok_r(NULL, "\n", &tmp1);
}
if (NULL == path)
{
OIC_LOG(INFO, TAG, "debugfs mountpoint not found");
return -1;
}
snprintf(buf, sizeof(buf) - 1, "%s/tracing/trace_marker", path);
g_trace_marker_hd = open(buf, O_WRONLY);
if (g_trace_marker_hd < 0)
{
OIC_LOG_V(INFO, TAG, "failed to open trace_marker file: %s (%d)",
strerror(errno), errno);
return -1;
}
}
OIC_LOG_V(INFO, TAG, "exit oic_trace_init with: %d", g_trace_marker_hd);
return g_trace_marker_hd;
}
void oic_trace_begin(const char *name, ...)
{
if (g_trace_marker_hd == FD_INITIAL_VALUE)
{
oic_trace_init();
}
if (g_trace_marker_hd > 0)
{
char buf[MAX_TRACE_LEN]={0};
int len = MAX_HEAD_LEN, ret = 0;
va_list ap;
va_start(ap, name);
snprintf(buf, MAX_TRACE_LEN, "B|%5d|", getpid());
len += vsnprintf(buf + MAX_HEAD_LEN, POS_LABEL_ST, name, ap);
va_end(ap);
if (len > MAX_TRACE_LEN)
{
len = MAX_TRACE_LEN - 1;
}
ret = write(g_trace_marker_hd, buf, len);
if (ret < 0)
{
OIC_LOG_V(INFO, TAG, "error writing, len: %d, ret: %d, errno: %d at oic_trace_begin",
len, ret, errno);
}
}
else
{
OIC_LOG_V(INFO, TAG, "oic_trace_begin: invalid fd: %d", g_trace_marker_hd);
}
}
void oic_trace_end()
{
if (FD_INITIAL_VALUE == g_trace_marker_hd)
{
oic_trace_init();
}
if (g_trace_marker_hd > 0)
{
int ret = 0, len = 1;
char end = 'E';
ret = write(g_trace_marker_hd, &end, len);
if (ret < 0)
{
OIC_LOG_V(INFO, TAG, "error writing, len: %d, ret: %d, errno: %d at oic_trace_end",
len, ret, errno);
}
}
else
{
OIC_LOG_V(INFO, TAG, "oic_trace_end: invalid fd: %d", g_trace_marker_hd);
}
}
#endif // #ifdef __ANDROID__
#endif // #ifndef __TIZEN__
| 2,809 |
1,240 | package com.eventyay.organizer.data.repository;
import com.eventyay.organizer.common.Constants;
import com.eventyay.organizer.data.AbstractObservable;
import com.eventyay.organizer.data.Repository;
import com.eventyay.organizer.data.session.Session;
import com.eventyay.organizer.data.session.SessionApi;
import com.eventyay.organizer.data.session.SessionRepositoryImpl;
import com.eventyay.organizer.data.tracks.Track;
import com.raizlabs.android.dbflow.sql.language.SQLOperator;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnit;
import org.mockito.junit.MockitoRule;
import java.util.ArrayList;
import java.util.List;
import io.reactivex.Completable;
import io.reactivex.Observable;
import io.reactivex.android.plugins.RxAndroidPlugins;
import io.reactivex.plugins.RxJavaPlugins;
import io.reactivex.schedulers.Schedulers;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@SuppressWarnings("PMD.TooManyMethods")
public class SessionRepositoryTest {
@Rule
public MockitoRule mockitoRule = MockitoJUnit.rule();
private SessionRepositoryImpl sessionRepository;
private static final Session SESSION = Session.builder().id(10L).title("a").build();
private static final Track TRACK = new Track();
private static final long ID = 10L;
@Mock
private SessionApi sessionApi;
@Mock
private Repository repository;
static {
SESSION.setTrack(TRACK);
}
@Before
public void setUp() {
when(repository.observableOf(Session.class)).thenReturn(new AbstractObservable.AbstractObservableBuilder<>(repository));
sessionRepository = new SessionRepositoryImpl(sessionApi, repository);
RxJavaPlugins.setIoSchedulerHandler(scheduler -> Schedulers.trampoline());
RxAndroidPlugins.setInitMainThreadSchedulerHandler(schedulerCallable -> Schedulers.trampoline());
}
@After
public void tearDown() {
RxJavaPlugins.reset();
RxAndroidPlugins.reset();
}
@Test
public void shouldReturnConnectionErrorOnGetSessionsWithReload() {
when(repository.isConnected()).thenReturn(false);
Observable<Session> sessionObservable = sessionRepository.getSessions(ID, true);
sessionObservable
.test()
.assertError(throwable -> throwable.getMessage().equals(Constants.NO_NETWORK));
}
@Test
public void shouldReturnConnectionErrorOnGetSessionsWithNoneSaved() {
when(repository.isConnected()).thenReturn(false);
when(repository.getItems(eq(Session.class), any(SQLOperator.class))).thenReturn(Observable.empty());
Observable<Session> sessionObservable = sessionRepository.getSessions(ID, false);
sessionObservable
.test()
.assertError(throwable -> throwable.getMessage().equals(Constants.NO_NETWORK));
}
@Test
public void shouldCallGetSessionsServiceOnReload() {
when(repository.isConnected()).thenReturn(true);
when(sessionApi.getSessions(ID)).thenReturn(Observable.empty());
sessionRepository.getSessions(ID, true).subscribe();
verify(sessionApi).getSessions(ID);
}
@Test
public void shouldCallGetSessionsServiceWithNoneSaved() {
when(repository.isConnected()).thenReturn(true);
when(sessionApi.getSessions(ID)).thenReturn(Observable.empty());
when(repository.getItems(eq(Session.class), any(SQLOperator.class))).thenReturn(Observable.empty());
sessionRepository.getSessions(ID, false).subscribe();
verify(sessionApi).getSessions(ID);
}
@Test
public void shouldSaveSessionsOnGet() {
List<Session> sessions = new ArrayList<>();
sessions.add(SESSION);
when(repository.isConnected()).thenReturn(true);
when(sessionApi.getSessions(ID)).thenReturn(Observable.just(sessions));
when(repository.syncSave(eq(Session.class), eq(sessions), any(), any())).thenReturn(Completable.complete());
sessionRepository.getSessions(ID, true).subscribe();
verify(repository).syncSave(eq(Session.class), eq(sessions), any(), any());
}
// Session update tests
@Test
public void shouldCallUpdateSessionService() {
when(repository.isConnected()).thenReturn(true);
when(sessionApi.updateSession(ID, SESSION)).thenReturn(Observable.empty());
sessionRepository.updateSession(SESSION).subscribe();
verify(sessionApi).updateSession(ID, SESSION);
}
@Test
public void shouldUpdateUpdatedSession() {
Session updated = mock(Session.class);
when(repository.isConnected()).thenReturn(true);
when(sessionApi.updateSession(ID, SESSION)).thenReturn(Observable.just(updated));
when(repository.update(eq(Session.class), eq(updated))).thenReturn(Completable.complete());
sessionRepository.updateSession(SESSION).subscribe();
verify(repository).update(Session.class, updated);
}
// Session delete tests
@Test
public void shouldCallDeleteSessionService() {
when(repository.isConnected()).thenReturn(true);
when(sessionApi.deleteSession(ID)).thenReturn(Completable.complete());
sessionRepository.deleteSession(ID).subscribe();
verify(sessionApi).deleteSession(ID);
}
}
| 2,074 |
937 |
{
"mpConfigIssuer": "farmshop",
"mpConfigurationFolder": ".",
"privateKey": "<KEY>",
"publicKey": "<KEY>"
} | 53 |
1,825 | <filename>unidbg-android/src/main/java/com/github/unidbg/linux/file/NetworkIF.java
package com.github.unidbg.linux.file;
import java.net.Inet4Address;
public class NetworkIF {
public final int index;
public final String ifName;
public final Inet4Address ipv4;
public final Inet4Address broadcast;
public NetworkIF(int index, String ifName, Inet4Address ipv4) {
this(index, ifName, ipv4, null);
}
public NetworkIF(int index, String ifName, Inet4Address ipv4, Inet4Address broadcast) {
this.index = index;
this.ifName = getIfName(ifName);
this.ipv4 = ipv4;
this.broadcast = broadcast;
}
private String getIfName(String ifName) {
if ("lo0".equals(ifName)) {
return "lo";
}
if ("en0".equals(ifName)) {
return "wlan0";
}
return ifName;
}
public boolean isLoopback() {
return ifName.startsWith("lo");
}
@Override
public String toString() {
return ifName;
}
}
| 455 |
12,940 | <reponame>dciborow/azure-quickstart-templates
{
"$schema": "https://aka.ms/azure-quickstart-templates-metadata-schema#",
"type": "QuickStart",
"itemDisplayName": "JBoss EAP on RHEL (clustered, multi-VM)",
"description": "This template allows you to create multiple RHEL 8.4 VM running JBoss EAP 7.3/ EAP 7.4 cluster and also deploy a web application called eap-session-replication, you can log into the admin console using the JBoss EAP username and password configured at the time of the deployment.",
"summary": "This template deploys an eap-session-replication web application on JBoss EAP EAP 7.3/ EAP 7.4 cluster running on multiple RHEL 8.4 VM.",
"githubUsername": "SpektraSystems",
"dateUpdated": "2021-08-16",
"validationType": "Manual"
} | 242 |
638 | // Copyright 2017, Additive Regularization of Topic Models.
#include <memory>
#include <vector>
#include "gtest/gtest.h"
#include "artm/cpp_interface.h"
#include "artm/core/common.h"
#include "artm/core/instance.h"
#include "artm_tests/test_mother.h"
#include "artm_tests/api.h"
// artm_tests.exe --gtest_filter=Regularizers.TopicSelection
TEST(Regularizers, TopicSelection) {
int nTopics = 10;
// create master
::artm::MasterModelConfig master_config = ::artm::test::TestMother::GenerateMasterModelConfig(nTopics);
master_config.set_cache_theta(true);
// create regularizer
::artm::RegularizerConfig* regularizer_config = master_config.add_regularizer_config();
regularizer_config->set_name("TopicSelectionRegularizer");
regularizer_config->set_type(::artm::RegularizerType_TopicSelectionTheta);
regularizer_config->set_tau(0.5f);
::artm::TopicSelectionThetaConfig internal_config;
for (int i = 0; i < nTopics; ++i) {
internal_config.add_topic_value(static_cast<float>(i) / nTopics);
}
regularizer_config->set_config(internal_config.SerializeAsString());
artm::MasterModel master(master_config);
::artm::test::Api api(master);
// iterations
auto batches = ::artm::test::TestMother::GenerateBatches(1, 5);
auto offline_args = api.Initialize(batches);
for (int iter = 0; iter < 3; ++iter) {
master.FitOfflineModel(offline_args);
}
// get and check theta
artm::GetThetaMatrixArgs args;
::artm::ThetaMatrix theta_matrix = master.GetThetaMatrix();
// Uncomment to dump actual results
// for (int i = 0; i <= 9; ++i)
// std::cout << theta_matrix.item_weights(0).value(i) << std::endl;
float expected_values[] = { 0.41836f, 0.262486f, 0.160616f, 0.0845677f, 0.032849f,
0.022987f, 0.0103793f, 0.0040327f, 0.00267936f, 0.00104289f };
for (int i = 0; i < nTopics; ++i) {
ASSERT_NEAR(theta_matrix.item_weights(0).value(i), expected_values[i], 0.00001);
}
}
// artm_tests.exe --gtest_filter=Regularizers.SmoothSparseTheta
TEST(Regularizers, SmoothSparseTheta) {
int nTopics = 4;
int nTokens = 5;
int nDocs = 3;
// generate batch
std::shared_ptr<::artm::Batch> batch(new ::artm::Batch());
batch->set_id(artm::test::Helpers::getUniqueString());
for (int i = 0; i < nTokens; i++) {
std::stringstream str;
str << "token" << i;
batch->add_token(str.str());
}
for (int i = 0; i < nDocs; ++i) {
artm::Item* item = batch->add_item();
std::stringstream str;
str << "item_" << i;
item->set_title(str.str());
for (int iToken = 0; iToken < nTokens; ++iToken) {
item->add_token_id(iToken);
item->add_transaction_start_index(item->transaction_start_index_size());
item->add_token_weight(1.0);
}
item->add_transaction_start_index(item->transaction_start_index_size());
}
// part 1
// create master
::artm::MasterModelConfig master_config = ::artm::test::TestMother::GenerateMasterModelConfig(nTopics);
master_config.set_cache_theta(true);
// create regularizer
::artm::RegularizerConfig* regularizer_config = master_config.add_regularizer_config();
regularizer_config->set_name("SSTRegularizer_1");
regularizer_config->set_type(::artm::RegularizerType_SmoothSparseTheta);
regularizer_config->set_tau(-100.0f);
::artm::SmoothSparseThetaConfig internal_config;
internal_config.add_item_title("item_0");
internal_config.add_item_title("item_2");
regularizer_config->set_config(internal_config.SerializeAsString());
artm::MasterModel master(master_config);
::artm::test::Api api(master);
auto offline_args = api.Initialize({ batch });
master.FitOfflineModel(offline_args);
// get and check theta
::artm::ThetaMatrix theta_matrix = master.GetThetaMatrix();
// nDocs x nTopics
std::vector<std::vector<float> > expected_values = {
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.265f, 0.224f, 0.247f, 0.264f },
{ 0.0f, 0.0f, 0.0f, 0.0f }
};
for (int i = 0; i < nDocs; ++i) {
for (int j = 0; j < nTopics; ++j) {
ASSERT_NEAR(theta_matrix.item_weights(i).value(j), expected_values[i][j], 0.001);
}
}
for (int i = 0; i < nDocs; ++i) {
for (int j = 0; j < nTopics; ++j) {
std::cout << theta_matrix.item_weights(i).value(j) << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
// part 2
// create master
master_config = ::artm::test::TestMother::GenerateMasterModelConfig(nTopics);
master_config.set_opt_for_avx(false);
master_config.set_cache_theta(true);
// create regularizer
regularizer_config = master_config.add_regularizer_config();
regularizer_config->set_name("SSTRegularizer_1");
regularizer_config->set_type(::artm::RegularizerType_SmoothSparseTheta);
regularizer_config->set_tau(100.0f);
::artm::SmoothSparseThetaConfig internal_config_2;
internal_config_2.add_item_title("item_0");
auto values = internal_config_2.add_item_topic_multiplier();
values->add_value(1.0);
values->add_value(0.0);
values->add_value(1.0);
values->add_value(0.0);
internal_config_2.add_item_title("item_2");
values = internal_config_2.add_item_topic_multiplier();
for (int i = 0; i < nTopics; ++i) {
values->add_value(-1.0f);
}
regularizer_config->set_config(internal_config_2.SerializeAsString());
master.Reconfigure(master_config);
::artm::test::Api api_2(master);
offline_args = api_2.Initialize({ batch });
master.FitOfflineModel(offline_args);
// get and check theta
theta_matrix = master.GetThetaMatrix();
// nDocs x nTopics
expected_values = {
{ 0.5f, 0.0f, 0.5f, 0.0f },
{ 0.265f, 0.224f, 0.247f, 0.264f },
{ 0.0f, 0.0f, 0.0f, 0.0f }
};
for (int i = 0; i < nDocs; ++i) {
for (int j = 0; j < nTopics; ++j) {
ASSERT_NEAR(theta_matrix.item_weights(i).value(j), expected_values[i][j], 0.001);
}
}
for (int i = 0; i < nDocs; ++i) {
for (int j = 0; j < nTopics; ++j) {
std::cout << theta_matrix.item_weights(i).value(j) << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
// artm_tests.exe --gtest_filter=Regularizers.NetPlsa
TEST(Regularizers, NetPlsa) {
int nTopics = 8;
int nTokens = 10;
int nDocs = 5;
// create master
::artm::MasterModelConfig master_config = ::artm::test::TestMother::GenerateMasterModelConfig(nTopics);
master_config.set_cache_theta(true);
// create regularizers
::artm::RegularizerConfig* regularizer_config = master_config.add_regularizer_config();
regularizer_config->set_name("NetPlsaRegularizer_1");
regularizer_config->set_type(::artm::RegularizerType_NetPlsaPhi);
regularizer_config->set_tau(2.0f);
::artm::NetPlsaPhiConfig internal_config;
internal_config.set_class_id("@time_class");
internal_config.add_vertex_name("time_1");
internal_config.add_vertex_name("time_2");
internal_config.add_vertex_weight(2.0);
internal_config.add_vertex_weight(1.0);
internal_config.add_first_vertex_index(0);
internal_config.add_second_vertex_index(1);
internal_config.add_edge_weight(3.0);
internal_config.set_symmetric_edge_weights(true);
regularizer_config->set_config(internal_config.SerializeAsString());
regularizer_config = master_config.add_regularizer_config();
regularizer_config->set_name("NetPlsaRegularizer_2");
regularizer_config->set_type(::artm::RegularizerType_NetPlsaPhi);
regularizer_config->set_tau(-2.0f);
::artm::NetPlsaPhiConfig internal_config_2;
internal_config_2.set_class_id("@time_class");
internal_config_2.add_vertex_name("time_1");
internal_config_2.add_vertex_name("time_2");
internal_config_2.add_first_vertex_index(0);
internal_config_2.add_second_vertex_index(1);
internal_config_2.add_edge_weight(-3.0);
internal_config_2.add_first_vertex_index(1);
internal_config_2.add_second_vertex_index(0);
internal_config_2.add_edge_weight(8.0);
internal_config_2.set_symmetric_edge_weights(false);
regularizer_config->set_config(internal_config_2.SerializeAsString());
artm::MasterModel master(master_config);
::artm::test::Api api(master);
// generate data
artm::Batch batch;
batch.set_id("11972762-6a23-4524-b089-7122816aff72");
for (int i = 0; i < nTokens; i++) {
std::stringstream str;
str << "token" << i;
batch.add_token(str.str());
batch.add_class_id("@default_class");
}
batch.add_token("time_1");
batch.add_class_id("@time_class");
batch.add_token("time_2");
batch.add_class_id("@time_class");
for (int iDoc = 0; iDoc < nDocs; iDoc++) {
artm::Item* item = batch.add_item();
item->set_id(iDoc);
for (int iToken = 0; iToken < nTokens; ++iToken) {
item->add_token_id(iToken);
item->add_transaction_start_index(item->transaction_start_index_size());
int background_count = (iToken > 40) ? (1 + rand() % 5) : 0; // NOLINT
int topical_count = ((iToken < 40) && ((iToken % 10) == (iDoc % 10))) ? 10 : 0;
item->add_token_weight(static_cast<float>(background_count + topical_count));
}
if (iDoc < 2) {
item->add_token_id(nTokens);
item->add_transaction_start_index(item->transaction_start_index_size());
item->add_token_weight(1.0f);
} else if (iDoc == 2) {
item->add_token_id(nTokens + 1);
item->add_transaction_start_index(item->transaction_start_index_size());
item->add_token_weight(1.0f);
}
item->add_transaction_start_index(item->transaction_start_index_size());
}
// iterations
auto offline_args = api.Initialize({ std::make_shared<artm::Batch>(batch) });
for (int iter = 0; iter < 2; ++iter)
master.FitOfflineModel(offline_args);
// get and check theta
::artm::ThetaMatrix theta_matrix = master.GetThetaMatrix();
std::vector<float> real_values;
for (int j = 0; j < nDocs; ++j) {
real_values.push_back(theta_matrix.item_weights(j).value(2));
}
std::vector<float> expected_values = { 0.000f, 0.000f, 0.000f, 0.000f, 0.999f };
for (int i = 0; i < nDocs; ++i) {
ASSERT_NEAR(real_values[i], expected_values[i], 1.0e-3);
}
}
// artm_tests.exe --gtest_filter=Regularizers.RelativeRegularization
TEST(Regularizers, RelativeRegularization) {
int nTopics = 50;
int nTokens = 50;
int nDocs = 100;
// generate batch
std::shared_ptr<::artm::Batch> batch(new ::artm::Batch());
batch->set_id(artm::test::Helpers::getUniqueString());
for (int i = 0; i < nTokens; i++) {
std::stringstream str;
str << "token" << i;
batch->add_token(str.str());
}
for (int i = 0; i < nDocs; ++i) {
artm::Item* item = batch->add_item();
std::stringstream str;
str << "item_" << i;
item->set_title(str.str());
for (int iToken = 0; iToken < nTokens; ++iToken) {
item->add_token_id(iToken);
item->add_transaction_start_index(item->transaction_start_index_size());
item->add_token_weight(1.0);
}
item->add_transaction_start_index(item->transaction_start_index_size());
}
// part 1
// create master
::artm::MasterModelConfig master_config = ::artm::test::TestMother::GenerateMasterModelConfig(nTopics);
master_config.set_cache_theta(true);
// create regularizer
::artm::RegularizerConfig* regularizer_config = master_config.add_regularizer_config();
regularizer_config->set_name("SparsePhi");
regularizer_config->set_type(::artm::RegularizerType_SmoothSparsePhi);
regularizer_config->set_tau(-0.5);
regularizer_config->set_gamma(0.5);
regularizer_config->set_config(::artm::DecorrelatorPhiConfig().SerializeAsString());
// create sparsity score
::artm::ScoreConfig* score_config = master_config.add_score_config();
score_config->set_name("SparsityPhi");
score_config->set_type(::artm::ScoreType_SparsityPhi);
score_config->set_config(::artm::SparsityPhiScore().SerializeAsString());
artm::MasterModel master(master_config);
::artm::test::Api api(master);
std::vector<double> true_score = { 0.244, 0.380, 0.478, 0.544, 0.588,
0.627, 0.665, 0.694, 0.716, 0.734,
0.750, 0.768, 0.781, 0.790, 0.804,
0.814, 0.824, 0.830, 0.836, 0.839 };
auto offline_args = api.Initialize({ batch });
for (int i = 0; i < 20; ++i) {
master.FitOfflineModel(offline_args);
::artm::GetScoreArrayArgs args;
args.set_score_name("SparsityPhi");
auto sparsity_scores = master.GetScoreArrayAs< ::artm::SparsityPhiScore>(args);
ASSERT_EQ(sparsity_scores.size(), (i + 1));
ASSERT_NEAR(sparsity_scores.back().value(), true_score[i], 1e-3);
}
}
| 5,038 |
347 | package org.ovirt.engine.ui.webadmin.section.main.presenter.popup;
import org.ovirt.engine.ui.common.presenter.AbstractModelBoundPopupPresenterWidget;
import org.ovirt.engine.ui.uicommonweb.models.ConfirmationModel;
import com.google.gwt.event.shared.EventBus;
import com.google.inject.Inject;
/**
* The Presenter Widget for the System Permissions removal pop-up.
*/
public class SystemPermissionsRemoveConfirmationPopupPresenterWidget extends
AbstractModelBoundPopupPresenterWidget<ConfirmationModel,
SystemPermissionsRemoveConfirmationPopupPresenterWidget.ViewDef> {
/**
* The view definition interface.
*/
public interface ViewDef extends AbstractModelBoundPopupPresenterWidget.ViewDef<ConfirmationModel> {
}
/**
* Constructor.
* @param eventBus The GWT event bus.
* @param view The view.
*/
@Inject
public SystemPermissionsRemoveConfirmationPopupPresenterWidget(EventBus eventBus, ViewDef view) {
super(eventBus, view);
}
}
| 333 |
2,219 | <reponame>lazymartin/naiveproxy
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/cert/internal/trust_store_in_memory.h"
namespace net {
TrustStoreInMemory::TrustStoreInMemory() = default;
TrustStoreInMemory::~TrustStoreInMemory() = default;
bool TrustStoreInMemory::IsEmpty() const {
return entries_.empty();
}
void TrustStoreInMemory::Clear() {
entries_.clear();
}
void TrustStoreInMemory::AddTrustAnchor(scoped_refptr<ParsedCertificate> cert) {
AddCertificate(std::move(cert), CertificateTrust::ForTrustAnchor());
}
void TrustStoreInMemory::AddTrustAnchorWithConstraints(
scoped_refptr<ParsedCertificate> cert) {
AddCertificate(std::move(cert),
CertificateTrust::ForTrustAnchorEnforcingConstraints());
}
void TrustStoreInMemory::AddDistrustedCertificateForTest(
scoped_refptr<ParsedCertificate> cert) {
AddCertificate(std::move(cert), CertificateTrust::ForDistrusted());
}
void TrustStoreInMemory::AddCertificateWithUnspecifiedTrust(
scoped_refptr<ParsedCertificate> cert) {
AddCertificate(std::move(cert), CertificateTrust::ForUnspecified());
}
void TrustStoreInMemory::SyncGetIssuersOf(const ParsedCertificate* cert,
ParsedCertificateList* issuers) {
auto range = entries_.equal_range(cert->normalized_issuer().AsStringPiece());
for (auto it = range.first; it != range.second; ++it)
issuers->push_back(it->second.cert);
}
void TrustStoreInMemory::GetTrust(const scoped_refptr<ParsedCertificate>& cert,
CertificateTrust* trust,
base::SupportsUserData* debug_data) const {
auto range = entries_.equal_range(cert->normalized_subject().AsStringPiece());
for (auto it = range.first; it != range.second; ++it) {
if (cert.get() == it->second.cert.get() ||
cert->der_cert() == it->second.cert->der_cert()) {
*trust = it->second.trust;
// NOTE: ambiguity when there are duplicate entries.
return;
}
}
*trust = CertificateTrust::ForUnspecified();
}
bool TrustStoreInMemory::Contains(const ParsedCertificate* cert) const {
for (const auto& it : entries_) {
if (cert->der_cert() == it.second.cert->der_cert())
return true;
}
return false;
}
TrustStoreInMemory::Entry::Entry() = default;
TrustStoreInMemory::Entry::Entry(const Entry& other) = default;
TrustStoreInMemory::Entry::~Entry() = default;
void TrustStoreInMemory::AddCertificate(scoped_refptr<ParsedCertificate> cert,
const CertificateTrust& trust) {
Entry entry;
entry.cert = std::move(cert);
entry.trust = trust;
// TODO(mattm): should this check for duplicate certificates?
entries_.insert(
std::make_pair(entry.cert->normalized_subject().AsStringPiece(), entry));
}
} // namespace net
| 1,083 |
776 | <reponame>Diffblue-benchmarks/actframework
package testapp.endpoint;
import act.controller.Controller;
import act.controller.annotation.UrlContext;
import org.osgl.mvc.annotation.GetAction;
import testapp.model.VersionedModel;
@UrlContext("/etag")
public class ETagTestBed extends Controller.Util {
@GetAction("{id}")
public VersionedModel get(String id) {
return VersionedModel.getById(id);
}
}
| 145 |
2,603 | <filename>FreeRTOS/Demo/ColdFire_MCF52259_CodeWarrior/Freescale_Headers/MCF52259_PMM.h
/* Coldfire C Header File
* Copyright Freescale Semiconductor Inc
* All rights reserved.
*
* 2008/04/17 Revision: 0.2
*
* (c) Copyright UNIS, spol. s r.o. 1997-2008
* UNIS, spol. s r.o.
* Jundrovska 33
* 624 00 Brno
* Czech Republic
* http : www.processorexpert.com
* mail : <EMAIL>
*/
#ifndef __MCF52259_PMM_H__
#define __MCF52259_PMM_H__
/*********************************************************************
*
* Power Management (PMM)
*
*********************************************************************/
/* Register read/write macros */
#define MCF_PMM_LPICR (*(vuint8 *)(0x40000012))
#define MCF_PMM_LPCR (*(vuint8 *)(0x40110007))
/* Bit definitions and macros for MCF_PMM_LPICR */
#define MCF_PMM_LPICR_XLPM_IPL(x) (((x)&0x7)<<0x4)
#define MCF_PMM_LPICR_ENBSTOP (0x80)
/* Bit definitions and macros for MCF_PMM_LPCR */
#define MCF_PMM_LPCR_LVDSE (0x2)
#define MCF_PMM_LPCR_STPMD(x) (((x)&0x3)<<0x3)
#define MCF_PMM_LPCR_STPMD_SYS_DISABLED (0)
#define MCF_PMM_LPCR_STPMD_SYS_CLKOUT_DISABLED (0x8)
#define MCF_PMM_LPCR_STPMD_ONLY_OSC_ENABLED (0x10)
#define MCF_PMM_LPCR_STPMD_ALL_DISABLED (0x18)
#define MCF_PMM_LPCR_LPMD(x) (((x)&0x3)<<0x6)
#define MCF_PMM_LPCR_LPMD_RUN (0)
#define MCF_PMM_LPCR_LPMD_DOZE (0x40)
#define MCF_PMM_LPCR_LPMD_WAIT (0x80)
#define MCF_PMM_LPCR_LPMD_STOP (0xC0)
#endif /* __MCF52259_PMM_H__ */
| 925 |
575 | // Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef FUCHSIA_ENGINE_FAKE_CONTEXT_H_
#define FUCHSIA_ENGINE_FAKE_CONTEXT_H_
#include <fuchsia/web/cpp/fidl.h>
#include <fuchsia/web/cpp/fidl_test_base.h>
#include <lib/fidl/cpp/binding.h>
#include <lib/fidl/cpp/binding_set.h>
#include <utility>
#include "base/callback.h"
#include "base/macros.h"
// A fake Frame implementation that manages its own lifetime.
class FakeFrame : public fuchsia::web::testing::Frame_TestBase {
public:
explicit FakeFrame(fidl::InterfaceRequest<fuchsia::web::Frame> request);
~FakeFrame() override;
void set_on_set_listener_callback(base::OnceClosure callback) {
on_set_listener_callback_ = std::move(callback);
}
// Tests can provide e.g a mock NavigationController, which the FakeFrame will
// pass bind GetNavigationController() requests to.
void set_navigation_controller(
fuchsia::web::NavigationController* controller) {
navigation_controller_ = controller;
}
fuchsia::web::NavigationEventListener* listener() { return listener_.get(); }
// fuchsia::web::Frame implementation.
void GetNavigationController(
fidl::InterfaceRequest<fuchsia::web::NavigationController> controller)
override;
void SetNavigationEventListener(
fidl::InterfaceHandle<fuchsia::web::NavigationEventListener> listener)
override;
// fuchsia::web::testing::Frame_TestBase implementation.
void NotImplemented_(const std::string& name) override;
private:
fidl::Binding<fuchsia::web::Frame> binding_;
fuchsia::web::NavigationEventListenerPtr listener_;
base::OnceClosure on_set_listener_callback_;
fuchsia::web::NavigationController* navigation_controller_ = nullptr;
fidl::BindingSet<fuchsia::web::NavigationController>
navigation_controller_bindings_;
DISALLOW_COPY_AND_ASSIGN(FakeFrame);
};
// An implementation of Context that creates and binds FakeFrames.
class FakeContext : public fuchsia::web::testing::Context_TestBase {
public:
using CreateFrameCallback = base::RepeatingCallback<void(FakeFrame*)>;
FakeContext();
~FakeContext() override;
// Sets a callback that is invoked whenever new Frames are bound.
void set_on_create_frame_callback(CreateFrameCallback callback) {
on_create_frame_callback_ = callback;
}
// fuchsia::web::Context implementation.
void CreateFrame(
fidl::InterfaceRequest<fuchsia::web::Frame> frame_request) override;
// fuchsia::web::testing::Context_TestBase implementation.
void NotImplemented_(const std::string& name) override;
private:
CreateFrameCallback on_create_frame_callback_;
DISALLOW_COPY_AND_ASSIGN(FakeContext);
};
#endif // FUCHSIA_ENGINE_FAKE_CONTEXT_H_
| 910 |
454 | package io.vertx.up.util;
import org.junit.Test;
import java.time.Instant;
public class PeriodT {
@Test
public void testD() {
final Instant instant = Period.parseAt("W,17:00,2");
System.out.println(Ut.toDateTime(instant));
}
}
| 108 |
1,545 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.channels.FileChannel;
/**
* A FileChannel for the JournalChannel read and write, we can use this interface to extend the FileChannel
* which we use in the JournalChannel.
*/
interface BookieFileChannel {
/**
* An interface for get the FileChannel from the provider.
* @return
*/
FileChannel getFileChannel() throws FileNotFoundException, IOException;
/**
* Check the given file if exists.
*
* @param file
* @return
*/
boolean fileExists(File file);
/**
* Get the file descriptor of the opened file.
*
* @return
* @throws IOException
*/
FileDescriptor getFD() throws IOException;
/**
* Close file channel and release all resources.
*/
void close() throws IOException;
}
| 524 |
677 | <reponame>hugmyndakassi/hvmi<gh_stars>100-1000
/*
* Copyright (c) 2020 Bitdefender
* SPDX-License-Identifier: Apache-2.0
*/
#include "hnd_loggather.h"
#include "alerts.h"
#include "guests.h"
INTSTATUS
IntAgentHandleLogGatherVmcall(
_In_opt_ void *Reserved,
_In_ PIG_ARCH_REGS Registers
)
///
/// @brief Handle a VMCALL issued by a log gather agent.
///
/// This handler will froward all the info reported by the in-guest agent to the integrator. The log gather agent
/// collects log data from the guest and reports it back to the integrator.
///
/// @param[in] Reserved Reserved.
/// @param[in] Registers The general purpose registers state.
///
/// @retval #INT_STATUS_SUCCESS On success.
/// @retval #INT_STATUS_INVALID_PARAMETER If an invalid parameter is supplied.
/// @retval #INT_STATUS_NOT_FOUND If the agent did not provide any data.
/// @retval #INT_STATUS_NOT_SUPPORTED If the OS version is not supported or if the VMCALL interface version mismatched.
///
{
INTSTATUS status;
QWORD dataAddr;
DWORD retLen;
PEVENT_AGENT_EVENT agentEvent;
AGENT_LGT_EVENT_HEADER header;
UNREFERENCED_PARAMETER(Reserved);
if (NULL == Registers)
{
return INT_STATUS_INVALID_PARAMETER_2;
}
retLen = 0;
agentEvent = &gAlert.Agent;
memset(agentEvent, 0, sizeof(*agentEvent));
memset(&header, 0, sizeof(header));
// Data address will be in RBX on x64 and ESI on x86.
dataAddr = gGuest.Guest64 ? Registers->Rbx : (Registers->Rsi & 0xFFFFFFFF);
if (0 == dataAddr)
{
ERROR("[ERROR] Data address is 0!\n");
return INT_STATUS_NOT_FOUND;
}
// Read the event structure.
agentEvent->AgentTag = IG_AGENT_TAG_LOG_GATHER_TOOL;
agentEvent->Event = agentMessage;
agentEvent->ErrorCode = 0;
if (gGuest.OSType == introGuestWindows)
{
IntAlertFillWinProcessCurrent(&agentEvent->CurrentProcess);
}
else if (gGuest.OSType == introGuestLinux)
{
IntAlertFillLixCurrentProcess(&agentEvent->CurrentProcess);
}
else
{
return INT_STATUS_NOT_SUPPORTED;
}
// Pause the VCPUs while we read.
IntPauseVcpus();
status = IntVirtMemRead(dataAddr, sizeof(AGENT_LGT_EVENT_HEADER), Registers->Cr3, &header, &retLen);
if (!INT_SUCCESS(status))
{
ERROR("[ERROR] IntVirtMemRead failed: 0x%08x\n", status);
goto resume_and_exit;
}
if (header.Version != LGT_EVENT_VERSION)
{
ERROR("[ERROR] Version mismatch: %x (read) vs %x (known).\n", header.Version, LGT_EVENT_VERSION);
status = INT_STATUS_NOT_SUPPORTED;
goto resume_and_exit;
}
if (header.Size != LGT_EVENT_SIZE)
{
ERROR("[ERROR] Size mismatch: %d (read) vs %lu (known).\n", header.Size, LGT_EVENT_SIZE);
status = INT_STATUS_NOT_SUPPORTED;
goto resume_and_exit;
}
status = IntVirtMemRead(dataAddr, sizeof(AGENT_LGT_EVENT), Registers->Cr3, &agentEvent->LogGatherEvent, &retLen);
if (!INT_SUCCESS(status))
{
ERROR("[ERROR] IntVirtMemRead failed: 0x%08x\n", status);
goto resume_and_exit;
}
if (retLen != sizeof(AGENT_LGT_EVENT))
{
ERROR("[ERROR] Read only %d bytes, needed %zu!\n", retLen, sizeof(AGENT_LGT_EVENT));
status = INT_STATUS_INVALID_DATA_SIZE;
goto resume_and_exit;
}
// Log
switch (header.EventType)
{
case lgtEventData:
TRACE("[LOGTOOL] Data from %s: %d bytes\n",
utf16_for_log(agentEvent->LogGatherEvent.DataEvent.FileName),
agentEvent->LogGatherEvent.DataEvent.DataSize);
break;
case lgtEventError:
TRACE("[LOGTOOL] Error: 0x%08x\n", agentEvent->LogGatherEvent.ErrorEvent.ErrorCode);
break;
}
status = INT_STATUS_SUCCESS;
resume_and_exit:
IntResumeVcpus();
if (INT_SUCCESS(status))
{
status = IntNotifyIntroEvent(introEventAgentEvent, agentEvent, sizeof(*agentEvent));
if (!INT_SUCCESS(status))
{
ERROR("[ERROR] IntNotifyIntroEvent failed: 0x%08x\n", status);
return status;
}
}
return status;
}
| 1,784 |
5,355 | /**
* WARNING: All code in this package is a work in progress for a new execution engine.
* It is not really "wired up" and can't be really used and should not be used yet!
*/
package graphql.execution.nextgen; | 57 |
12,252 | <reponame>rmartinc/keycloak
/*
* Copyright 2019 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.keycloak.credential;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.UserModel;
/**
* @author <a href="mailto:<EMAIL>"><NAME></a>
*/
public class CredentialTypeMetadataContext {
private UserModel user;
private CredentialTypeMetadataContext() {
}
/**
* @return user, for which we create metadata. Could be null
*/
public UserModel getUser() {
return user;
}
public static CredentialTypeMetadataContext.CredentialTypeMetadataContextBuilder builder() {
return new CredentialTypeMetadataContext.CredentialTypeMetadataContextBuilder();
}
// BUILDER
public static class CredentialTypeMetadataContextBuilder {
private CredentialTypeMetadataContext instance = new CredentialTypeMetadataContext();
public CredentialTypeMetadataContext.CredentialTypeMetadataContextBuilder user(UserModel user) {
instance.user = user;
return this;
}
public CredentialTypeMetadataContext build(KeycloakSession session) {
// Possible to have null user
return instance;
}
}
}
| 603 |
642 | <reponame>YugN17/People-MVVM
package io.github.erikjhordanrey.people_mvvm.viewmodel;
import android.widget.ImageView;
import androidx.databinding.BindingAdapter;
import com.bumptech.glide.Glide;
public class Binders {
@BindingAdapter("imageUrl")
public static void setImageUrl(ImageView imageView, String url) {
Glide.with(imageView.getContext()).load(url).into(imageView);
}
}
| 150 |
310 | <filename>gear/software/p/psychonauts.json
{
"name": "Psychonauts",
"description": "A psychic platformer game.",
"url": "https://en.wikipedia.org/wiki/Psychonauts"
} | 60 |
1,552 | # -*- coding: utf-8 -*
"""
ERNIE 使用的学习率设置
"""
import logging
from paddle.optimizer.lr import LRScheduler
import numpy as np
class LinearWarmupDecay(LRScheduler):
"""LinearWarmupDecay
"""
def __init__(self, base_lr, end_lr, warmup_steps, decay_steps, num_train_steps, power=1.0, verbose=False,
cycle=False):
"""先使用warmup线性衰减,由小变大到base_lr, 再使用多项式衰减由大变小到end_lr
:param base_lr:
:param end_lr:
:param warmup_steps:
:param decay_steps:
:param num_train_steps:
:param power:
:param verbose:
:param cycle:
"""
self.base_lr = base_lr
self.warmup_steps = warmup_steps
self.num_train_steps = num_train_steps
self.decay_steps = decay_steps # 与num_train_steps一致
self.end_lr = end_lr
self.power = power
self.cycle = cycle
# tips: 基类的__init__方法一定要放在最后,因为基类的__init__中会直接调用一次get_lr()
LRScheduler.__init__(self, learning_rate=base_lr, last_epoch=-1, verbose=verbose)
def get_lr(self):
"""即时的学习率计算
"""
if self.last_epoch < self.warmup_steps:
return self.base_lr * (self.last_epoch / self.warmup_steps)
else:
return self._polynomial_decay(learning_rate=self.base_lr,
decay_steps=self.decay_steps,
end_learning_rate=self.end_lr,
power=self.power,
cycle=self.cycle)
def _polynomial_decay(self, learning_rate, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False):
"""
the same algrithm as
paddle/fluid/layers/learning_rate_scheduler.py:
polynomial_decay
"""
global_step = self.last_epoch
if cycle:
div_res = np.ceil(self.last_epoch / decay_steps)
if self.last_epoch == 0:
div_res = 1.0
decay_steps = decay_steps * div_res
else:
global_step = min(decay_steps, self.last_epoch)
decayed_lr = (learning_rate - end_learning_rate) * \
((1.0 - global_step / decay_steps) ** power) + end_learning_rate
return decayed_lr
def exclude_from_weight_decay(name):
"""exclude_from_weight_decay
"""
if not isinstance(name, str):
name = name.name
if name.find("layer_norm") > -1:
return True
bias_suffix = ["_bias", "_b", ".b_0"]
for suffix in bias_suffix:
if name.endswith(suffix):
return True
return False
def lr_decay_fn(param, decay_rate, n_layers, server_layers):
"""lr_decay_fn
"""
if "encoder_layer" in param.name and param.name.index("encoder_layer") == 0:
depth = int(param.name.split("_")[2]) + 1
elif "server_post_encoder_layer" in param.name or "sharing_to_task_fc.w_0" in param.name:
depth = server_layers
elif "nlu_encoder_layer" in param.name and param.name.index("nlu_encoder_layer") == 0:
depth = int(param.name.split("_")[3]) + 1
elif "nlg_encoder_layer" in param.name and param.name.index("nlg_encoder_layer") == 0:
depth = int(param.name.split("_")[3]) + 1
elif 'nlu_post_encoder_layer' in param.name or "nlg_post_encoder_layer" in param.name:
depth = n_layers + 1
elif "embedding" in param.name or "emb_hidden_mapping" in param.name or 'pre_encoder_layer' in param.name:
depth = 0
else:
depth = n_layers + 2
return decay_rate ** (n_layers + 2 - depth)
def lr_decay_freeze_fn(param, decay_rate, n_layers, server_layers):
"""lr_decay_freeze_fn
"""
if "encoder_layer" in param.name and param.name.index("encoder_layer") == 0:
depth = int(param.name.split("_")[2]) + 1
decay_rate = 0
elif "server_post_encoder_layer" in param.name or "sharing_to_task_fc.w_0" in param.name:
depth = server_layers
decay_rate = 0
elif "nlu_encoder_layer" in param.name and param.name.index("nlu_encoder_layer") == 0:
depth = int(param.name.split("_")[3]) + 1
decay_rate = 1
elif "nlg_encoder_layer" in param.name and param.name.index("nlg_encoder_layer") == 0:
depth = int(param.name.split("_")[3]) + 1
decay_rate = 1
elif 'nlu_post_encoder_layer' in param.name or "nlg_post_encoder_layer" in param.name:
depth = n_layers + 1
decay_rate = 1
elif "embedding" in param.name or "emb_hidden_mapping" in param.name or 'pre_encoder_layer' in param.name:
depth = 0
decay_rate = 0
else:
depth = n_layers + 2
decay_rate = 1
return decay_rate ** (n_layers + 2 - depth)
| 2,412 |
777 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "modules/serviceworkers/WaitUntilObserver.h"
#include "bindings/core/v8/ScriptFunction.h"
#include "bindings/core/v8/ScriptPromise.h"
#include "bindings/core/v8/ScriptValue.h"
#include "bindings/core/v8/V8Binding.h"
#include "core/dom/ExceptionCode.h"
#include "core/dom/ExecutionContext.h"
#include "modules/serviceworkers/ServiceWorkerGlobalScope.h"
#include "platform/LayoutTestSupport.h"
#include "public/platform/Platform.h"
#include "public/platform/modules/serviceworker/WebServiceWorkerEventResult.h"
#include "wtf/Assertions.h"
#include <v8.h>
namespace blink {
namespace {
// Timeout before a service worker that was given window interaction
// permission loses them. The unit is seconds.
const unsigned kWindowInteractionTimeout = 10;
const unsigned kWindowInteractionTimeoutForTest = 1;
unsigned windowInteractionTimeout() {
return LayoutTestSupport::isRunningLayoutTest()
? kWindowInteractionTimeoutForTest
: kWindowInteractionTimeout;
}
} // anonymous namespace
class WaitUntilObserver::ThenFunction final : public ScriptFunction {
public:
enum ResolveType {
Fulfilled,
Rejected,
};
static v8::Local<v8::Function> createFunction(ScriptState* scriptState,
WaitUntilObserver* observer,
ResolveType type) {
ThenFunction* self = new ThenFunction(scriptState, observer, type);
return self->bindToV8Function();
}
DEFINE_INLINE_VIRTUAL_TRACE() {
visitor->trace(m_observer);
ScriptFunction::trace(visitor);
}
private:
ThenFunction(ScriptState* scriptState,
WaitUntilObserver* observer,
ResolveType type)
: ScriptFunction(scriptState),
m_observer(observer),
m_resolveType(type) {}
ScriptValue call(ScriptValue value) override {
ASSERT(m_observer);
ASSERT(m_resolveType == Fulfilled || m_resolveType == Rejected);
if (m_resolveType == Rejected) {
m_observer->reportError(value);
value =
ScriptPromise::reject(value.getScriptState(), value).getScriptValue();
}
m_observer->decrementPendingActivity();
m_observer = nullptr;
return value;
}
Member<WaitUntilObserver> m_observer;
ResolveType m_resolveType;
};
WaitUntilObserver* WaitUntilObserver::create(ExecutionContext* context,
EventType type,
int eventID) {
return new WaitUntilObserver(context, type, eventID);
}
void WaitUntilObserver::willDispatchEvent() {
m_eventDispatchTime = WTF::currentTime();
// When handling a notificationclick event, we want to allow one window to
// be focused or opened. These calls are allowed between the call to
// willDispatchEvent() and the last call to decrementPendingActivity(). If
// waitUntil() isn't called, that means between willDispatchEvent() and
// didDispatchEvent().
if (m_type == NotificationClick)
m_executionContext->allowWindowInteraction();
incrementPendingActivity();
}
void WaitUntilObserver::didDispatchEvent(bool errorOccurred) {
if (errorOccurred)
m_hasError = true;
decrementPendingActivity();
m_eventDispatched = true;
}
void WaitUntilObserver::waitUntil(ScriptState* scriptState,
ScriptPromise scriptPromise,
ExceptionState& exceptionState) {
if (m_eventDispatched) {
exceptionState.throwDOMException(InvalidStateError,
"The event handler is already finished.");
return;
}
if (!m_executionContext)
return;
// When handling a notificationclick event, we want to allow one window to
// be focused or opened. See comments in ::willDispatchEvent(). When
// waitUntil() is being used, opening or closing a window must happen in a
// timeframe specified by windowInteractionTimeout(), otherwise the calls
// will fail.
if (m_type == NotificationClick)
m_consumeWindowInteractionTimer.startOneShot(windowInteractionTimeout(),
BLINK_FROM_HERE);
incrementPendingActivity();
scriptPromise.then(
ThenFunction::createFunction(scriptState, this, ThenFunction::Fulfilled),
ThenFunction::createFunction(scriptState, this, ThenFunction::Rejected));
}
WaitUntilObserver::WaitUntilObserver(ExecutionContext* context,
EventType type,
int eventID)
: m_executionContext(context),
m_type(type),
m_eventID(eventID),
m_consumeWindowInteractionTimer(
Platform::current()->currentThread()->getWebTaskRunner(),
this,
&WaitUntilObserver::consumeWindowInteraction) {}
void WaitUntilObserver::reportError(const ScriptValue& value) {
// FIXME: Propagate error message to the client for onerror handling.
NOTIMPLEMENTED();
m_hasError = true;
}
void WaitUntilObserver::incrementPendingActivity() {
++m_pendingActivity;
}
void WaitUntilObserver::decrementPendingActivity() {
ASSERT(m_pendingActivity > 0);
if (!m_executionContext || (!m_hasError && --m_pendingActivity))
return;
ServiceWorkerGlobalScopeClient* client =
ServiceWorkerGlobalScopeClient::from(m_executionContext);
WebServiceWorkerEventResult result =
m_hasError ? WebServiceWorkerEventResultRejected
: WebServiceWorkerEventResultCompleted;
switch (m_type) {
case Activate:
client->didHandleActivateEvent(m_eventID, result, m_eventDispatchTime);
break;
case Fetch:
client->didHandleFetchEvent(m_eventID, result, m_eventDispatchTime);
break;
case Install:
client->didHandleInstallEvent(m_eventID, result, m_eventDispatchTime);
break;
case Message:
client->didHandleExtendableMessageEvent(m_eventID, result,
m_eventDispatchTime);
break;
case NotificationClick:
client->didHandleNotificationClickEvent(m_eventID, result,
m_eventDispatchTime);
m_consumeWindowInteractionTimer.stop();
consumeWindowInteraction(nullptr);
break;
case NotificationClose:
client->didHandleNotificationCloseEvent(m_eventID, result,
m_eventDispatchTime);
break;
case Push:
client->didHandlePushEvent(m_eventID, result, m_eventDispatchTime);
break;
case Sync:
client->didHandleSyncEvent(m_eventID, result, m_eventDispatchTime);
break;
case PaymentRequest:
client->didHandlePaymentRequestEvent(m_eventID, result,
m_eventDispatchTime);
break;
}
m_executionContext = nullptr;
}
void WaitUntilObserver::consumeWindowInteraction(TimerBase*) {
if (!m_executionContext)
return;
m_executionContext->consumeWindowInteraction();
}
DEFINE_TRACE(WaitUntilObserver) {
visitor->trace(m_executionContext);
}
} // namespace blink
| 2,829 |
22,688 | <filename>modules/canbus/vehicle/ge3/protocol/pc_epb_203.cc
/******************************************************************************
* Copyright 2019 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/canbus/vehicle/ge3/protocol/pc_epb_203.h"
#include "modules/drivers/canbus/common/byte.h"
namespace apollo {
namespace canbus {
namespace ge3 {
using ::apollo::drivers::canbus::Byte;
const int32_t Pcepb203::ID = 0x203;
// public
Pcepb203::Pcepb203() { Reset(); }
uint32_t Pcepb203::GetPeriod() const {
// modify every protocol's period manually
static const uint32_t PERIOD = 20 * 1000;
return PERIOD;
}
void Pcepb203::UpdateData(uint8_t* data) {
set_p_pc_epbreq(data, pc_epbreq_);
set_p_pc_epbenable(data, pc_epbenable_);
}
void Pcepb203::Reset() {
// you should check this manually
pc_epbreq_ = Pc_epb_203::PC_EPBREQ_INVALID;
pc_epbenable_ = Pc_epb_203::PC_EPBENABLE_DISABLE;
}
Pcepb203* Pcepb203::set_pc_epbreq(Pc_epb_203::Pc_epbreqType pc_epbreq) {
pc_epbreq_ = pc_epbreq;
return this;
}
// config detail: {'description': 'EPB request', 'enum': {0:
// 'PC_EPBREQ_INVALID', 1: 'PC_EPBREQ_RELEASE', 2: 'PC_EPBREQ_APPLY'},
// 'precision': 1.0, 'len': 2, 'name': 'PC_EpbReq', 'is_signed_var': False,
// 'offset': 0.0, 'physical_range': '[0|1]', 'bit': 1, 'type': 'enum', 'order':
// 'motorola', 'physical_unit': ''}
void Pcepb203::set_p_pc_epbreq(uint8_t* data,
Pc_epb_203::Pc_epbreqType pc_epbreq) {
int x = pc_epbreq;
Byte to_set(data + 0);
to_set.set_value(static_cast<uint8_t>(x), 0, 2);
}
Pcepb203* Pcepb203::set_pc_epbenable(
Pc_epb_203::Pc_epbenableType pc_epbenable) {
pc_epbenable_ = pc_epbenable;
return this;
}
// config detail: {'description': 'EPB control enable', 'enum': {0:
// 'PC_EPBENABLE_DISABLE', 1: 'PC_EPBENABLE_ENABLE'}, 'precision': 1.0, 'len':
// 1, 'name': 'PC_EpbEnable', 'is_signed_var': False, 'offset': 0.0,
// 'physical_range': '[0|1]', 'bit': 7, 'type': 'enum', 'order': 'motorola',
// 'physical_unit': ''}
void Pcepb203::set_p_pc_epbenable(uint8_t* data,
Pc_epb_203::Pc_epbenableType pc_epbenable) {
int x = pc_epbenable;
Byte to_set(data + 0);
to_set.set_value(static_cast<uint8_t>(x), 7, 1);
}
} // namespace ge3
} // namespace canbus
} // namespace apollo
| 1,175 |
1,078 | /**
* OpenAL cross platform audio library
* Copyright (C) 2013 by <NAME>
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <math.h>
#include <stdlib.h>
#include "alMain.h"
#include "alFilter.h"
#include "alAuxEffectSlot.h"
#include "alError.h"
#include "alu.h"
typedef struct ALflangerState {
DERIVE_FROM_TYPE(ALeffectState);
ALfloat *SampleBuffer[2];
ALuint BufferLength;
ALuint offset;
ALuint lfo_range;
ALfloat lfo_scale;
ALint lfo_disp;
/* Gains for left and right sides */
ALfloat Gain[2][MaxChannels];
/* effect parameters */
ALint waveform;
ALint delay;
ALfloat depth;
ALfloat feedback;
} ALflangerState;
static ALvoid ALflangerState_Destruct(ALflangerState *state)
{
free(state->SampleBuffer[0]);
state->SampleBuffer[0] = NULL;
state->SampleBuffer[1] = NULL;
}
static ALboolean ALflangerState_deviceUpdate(ALflangerState *state, ALCdevice *Device)
{
ALuint maxlen;
ALuint it;
maxlen = fastf2u(AL_FLANGER_MAX_DELAY * 3.0f * Device->Frequency) + 1;
maxlen = NextPowerOf2(maxlen);
if(maxlen != state->BufferLength)
{
void *temp;
temp = realloc(state->SampleBuffer[0], maxlen * sizeof(ALfloat) * 2);
if(!temp) return AL_FALSE;
state->SampleBuffer[0] = temp;
state->SampleBuffer[1] = state->SampleBuffer[0] + maxlen;
state->BufferLength = maxlen;
}
for(it = 0;it < state->BufferLength;it++)
{
state->SampleBuffer[0][it] = 0.0f;
state->SampleBuffer[1][it] = 0.0f;
}
return AL_TRUE;
}
static ALvoid ALflangerState_update(ALflangerState *state, ALCdevice *Device, const ALeffectslot *Slot)
{
ALfloat frequency = (ALfloat)Device->Frequency;
ALfloat rate;
ALint phase;
state->waveform = Slot->EffectProps.Flanger.Waveform;
state->depth = Slot->EffectProps.Flanger.Depth;
state->feedback = Slot->EffectProps.Flanger.Feedback;
state->delay = fastf2i(Slot->EffectProps.Flanger.Delay * frequency);
/* Gains for left and right sides */
ComputeAngleGains(Device, atan2f(-1.0f, 0.0f), 0.0f, Slot->Gain, state->Gain[0]);
ComputeAngleGains(Device, atan2f(+1.0f, 0.0f), 0.0f, Slot->Gain, state->Gain[1]);
phase = Slot->EffectProps.Flanger.Phase;
rate = Slot->EffectProps.Flanger.Rate;
if(!(rate > 0.0f))
{
state->lfo_scale = 0.0f;
state->lfo_range = 1;
state->lfo_disp = 0;
}
else
{
/* Calculate LFO coefficient */
state->lfo_range = fastf2u(frequency/rate + 0.5f);
switch(state->waveform)
{
case AL_FLANGER_WAVEFORM_TRIANGLE:
state->lfo_scale = 4.0f / state->lfo_range;
break;
case AL_FLANGER_WAVEFORM_SINUSOID:
state->lfo_scale = F_2PI / state->lfo_range;
break;
}
/* Calculate lfo phase displacement */
state->lfo_disp = fastf2i(state->lfo_range * (phase/360.0f));
}
}
static inline void Triangle(ALint *delay_left, ALint *delay_right, ALuint offset, const ALflangerState *state)
{
ALfloat lfo_value;
lfo_value = 2.0f - fabsf(2.0f - state->lfo_scale*(offset%state->lfo_range));
lfo_value *= state->depth * state->delay;
*delay_left = fastf2i(lfo_value) + state->delay;
offset += state->lfo_disp;
lfo_value = 2.0f - fabsf(2.0f - state->lfo_scale*(offset%state->lfo_range));
lfo_value *= state->depth * state->delay;
*delay_right = fastf2i(lfo_value) + state->delay;
}
static inline void Sinusoid(ALint *delay_left, ALint *delay_right, ALuint offset, const ALflangerState *state)
{
ALfloat lfo_value;
lfo_value = 1.0f + sinf(state->lfo_scale*(offset%state->lfo_range));
lfo_value *= state->depth * state->delay;
*delay_left = fastf2i(lfo_value) + state->delay;
offset += state->lfo_disp;
lfo_value = 1.0f + sinf(state->lfo_scale*(offset%state->lfo_range));
lfo_value *= state->depth * state->delay;
*delay_right = fastf2i(lfo_value) + state->delay;
}
#define DECL_TEMPLATE(Func) \
static void Process##Func(ALflangerState *state, const ALuint SamplesToDo, \
const ALfloat *restrict SamplesIn, ALfloat (*restrict out)[2]) \
{ \
const ALuint bufmask = state->BufferLength-1; \
ALfloat *restrict leftbuf = state->SampleBuffer[0]; \
ALfloat *restrict rightbuf = state->SampleBuffer[1]; \
ALuint offset = state->offset; \
const ALfloat feedback = state->feedback; \
ALuint it; \
\
for(it = 0;it < SamplesToDo;it++) \
{ \
ALint delay_left, delay_right; \
Func(&delay_left, &delay_right, offset, state); \
\
out[it][0] = leftbuf[(offset-delay_left)&bufmask]; \
leftbuf[offset&bufmask] = (out[it][0]+SamplesIn[it]) * feedback; \
\
out[it][1] = rightbuf[(offset-delay_right)&bufmask]; \
rightbuf[offset&bufmask] = (out[it][1]+SamplesIn[it]) * feedback; \
\
offset++; \
} \
state->offset = offset; \
}
DECL_TEMPLATE(Triangle)
DECL_TEMPLATE(Sinusoid)
#undef DECL_TEMPLATE
static ALvoid ALflangerState_process(ALflangerState *state, ALuint SamplesToDo, const ALfloat *restrict SamplesIn, ALfloat (*restrict SamplesOut)[BUFFERSIZE])
{
ALuint it, kt;
ALuint base;
for(base = 0;base < SamplesToDo;)
{
ALfloat temps[64][2];
ALuint td = minu(SamplesToDo-base, 64);
if(state->waveform == AL_FLANGER_WAVEFORM_TRIANGLE)
ProcessTriangle(state, td, SamplesIn+base, temps);
else if(state->waveform == AL_FLANGER_WAVEFORM_SINUSOID)
ProcessSinusoid(state, td, SamplesIn+base, temps);
for(kt = 0;kt < MaxChannels;kt++)
{
ALfloat gain = state->Gain[0][kt];
if(gain > GAIN_SILENCE_THRESHOLD)
{
for(it = 0;it < td;it++)
SamplesOut[kt][it+base] += temps[it][0] * gain;
}
gain = state->Gain[1][kt];
if(gain > GAIN_SILENCE_THRESHOLD)
{
for(it = 0;it < td;it++)
SamplesOut[kt][it+base] += temps[it][1] * gain;
}
}
base += td;
}
}
static void ALflangerState_Delete(ALflangerState *state)
{
free(state);
}
DEFINE_ALEFFECTSTATE_VTABLE(ALflangerState);
typedef struct ALflangerStateFactory {
DERIVE_FROM_TYPE(ALeffectStateFactory);
} ALflangerStateFactory;
ALeffectState *ALflangerStateFactory_create(ALflangerStateFactory *UNUSED(factory))
{
ALflangerState *state;
state = malloc(sizeof(*state));
if(!state) return NULL;
SET_VTABLE2(ALflangerState, ALeffectState, state);
state->BufferLength = 0;
state->SampleBuffer[0] = NULL;
state->SampleBuffer[1] = NULL;
state->offset = 0;
state->lfo_range = 1;
return STATIC_CAST(ALeffectState, state);
}
DEFINE_ALEFFECTSTATEFACTORY_VTABLE(ALflangerStateFactory);
ALeffectStateFactory *ALflangerStateFactory_getFactory(void)
{
static ALflangerStateFactory FlangerFactory = { { GET_VTABLE2(ALflangerStateFactory, ALeffectStateFactory) } };
return STATIC_CAST(ALeffectStateFactory, &FlangerFactory);
}
void ALflanger_setParami(ALeffect *effect, ALCcontext *context, ALenum param, ALint val)
{
ALeffectProps *props = &effect->Props;
switch(param)
{
case AL_FLANGER_WAVEFORM:
if(!(val >= AL_FLANGER_MIN_WAVEFORM && val <= AL_FLANGER_MAX_WAVEFORM))
SET_ERROR_AND_RETURN(context, AL_INVALID_VALUE);
props->Flanger.Waveform = val;
break;
case AL_FLANGER_PHASE:
if(!(val >= AL_FLANGER_MIN_PHASE && val <= AL_FLANGER_MAX_PHASE))
SET_ERROR_AND_RETURN(context, AL_INVALID_VALUE);
props->Flanger.Phase = val;
break;
default:
SET_ERROR_AND_RETURN(context, AL_INVALID_ENUM);
}
}
void ALflanger_setParamiv(ALeffect *effect, ALCcontext *context, ALenum param, const ALint *vals)
{
ALflanger_setParami(effect, context, param, vals[0]);
}
void ALflanger_setParamf(ALeffect *effect, ALCcontext *context, ALenum param, ALfloat val)
{
ALeffectProps *props = &effect->Props;
switch(param)
{
case AL_FLANGER_RATE:
if(!(val >= AL_FLANGER_MIN_RATE && val <= AL_FLANGER_MAX_RATE))
SET_ERROR_AND_RETURN(context, AL_INVALID_VALUE);
props->Flanger.Rate = val;
break;
case AL_FLANGER_DEPTH:
if(!(val >= AL_FLANGER_MIN_DEPTH && val <= AL_FLANGER_MAX_DEPTH))
SET_ERROR_AND_RETURN(context, AL_INVALID_VALUE);
props->Flanger.Depth = val;
break;
case AL_FLANGER_FEEDBACK:
if(!(val >= AL_FLANGER_MIN_FEEDBACK && val <= AL_FLANGER_MAX_FEEDBACK))
SET_ERROR_AND_RETURN(context, AL_INVALID_VALUE);
props->Flanger.Feedback = val;
break;
case AL_FLANGER_DELAY:
if(!(val >= AL_FLANGER_MIN_DELAY && val <= AL_FLANGER_MAX_DELAY))
SET_ERROR_AND_RETURN(context, AL_INVALID_VALUE);
props->Flanger.Delay = val;
break;
default:
SET_ERROR_AND_RETURN(context, AL_INVALID_ENUM);
}
}
void ALflanger_setParamfv(ALeffect *effect, ALCcontext *context, ALenum param, const ALfloat *vals)
{
ALflanger_setParamf(effect, context, param, vals[0]);
}
void ALflanger_getParami(const ALeffect *effect, ALCcontext *context, ALenum param, ALint *val)
{
const ALeffectProps *props = &effect->Props;
switch(param)
{
case AL_FLANGER_WAVEFORM:
*val = props->Flanger.Waveform;
break;
case AL_FLANGER_PHASE:
*val = props->Flanger.Phase;
break;
default:
SET_ERROR_AND_RETURN(context, AL_INVALID_ENUM);
}
}
void ALflanger_getParamiv(const ALeffect *effect, ALCcontext *context, ALenum param, ALint *vals)
{
ALflanger_getParami(effect, context, param, vals);
}
void ALflanger_getParamf(const ALeffect *effect, ALCcontext *context, ALenum param, ALfloat *val)
{
const ALeffectProps *props = &effect->Props;
switch(param)
{
case AL_FLANGER_RATE:
*val = props->Flanger.Rate;
break;
case AL_FLANGER_DEPTH:
*val = props->Flanger.Depth;
break;
case AL_FLANGER_FEEDBACK:
*val = props->Flanger.Feedback;
break;
case AL_FLANGER_DELAY:
*val = props->Flanger.Delay;
break;
default:
SET_ERROR_AND_RETURN(context, AL_INVALID_ENUM);
}
}
void ALflanger_getParamfv(const ALeffect *effect, ALCcontext *context, ALenum param, ALfloat *vals)
{
ALflanger_getParamf(effect, context, param, vals);
}
DEFINE_ALEFFECT_VTABLE(ALflanger);
| 6,281 |
372 | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.fitness.model;
/**
* Model definition for BucketBySession.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Fitness API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class BucketBySession extends com.google.api.client.json.GenericJson {
/**
* Specifies that only sessions of duration longer than minDurationMillis are considered and used
* as a container for aggregated data.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key @com.google.api.client.json.JsonString
private java.lang.Long minDurationMillis;
/**
* Specifies that only sessions of duration longer than minDurationMillis are considered and used
* as a container for aggregated data.
* @return value or {@code null} for none
*/
public java.lang.Long getMinDurationMillis() {
return minDurationMillis;
}
/**
* Specifies that only sessions of duration longer than minDurationMillis are considered and used
* as a container for aggregated data.
* @param minDurationMillis minDurationMillis or {@code null} for none
*/
public BucketBySession setMinDurationMillis(java.lang.Long minDurationMillis) {
this.minDurationMillis = minDurationMillis;
return this;
}
@Override
public BucketBySession set(String fieldName, Object value) {
return (BucketBySession) super.set(fieldName, value);
}
@Override
public BucketBySession clone() {
return (BucketBySession) super.clone();
}
}
| 728 |
2,610 | from __future__ import print_function, unicode_literals
import csv
import os
import shutil
import sys
import time
import boto3
import pyspark
from awsglue.utils import getResolvedOptions
from mleap.pyspark.spark_support import SimpleSparkSerializer
from pyspark.ml import Pipeline
from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler, VectorIndexer
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import DoubleType, StringType, StructField, StructType
def csv_line(data):
r = ",".join(str(d) for d in data[1])
return str(data[0]) + "," + r
def main():
spark = SparkSession.builder.appName("PySparkAbalone").getOrCreate()
args = getResolvedOptions(
sys.argv,
[
"S3_INPUT_BUCKET",
"S3_INPUT_KEY_PREFIX",
"S3_OUTPUT_BUCKET",
"S3_OUTPUT_KEY_PREFIX",
"S3_MODEL_BUCKET",
"S3_MODEL_KEY_PREFIX",
],
)
# This is needed to save RDDs which is the only way to write nested Dataframes into CSV format
spark.sparkContext._jsc.hadoopConfiguration().set(
"mapred.output.committer.class", "org.apache.hadoop.mapred.FileOutputCommitter"
)
# Defining the schema corresponding to the input data. The input data does not contain the headers
schema = StructType(
[
StructField("sex", StringType(), True),
StructField("length", DoubleType(), True),
StructField("diameter", DoubleType(), True),
StructField("height", DoubleType(), True),
StructField("whole_weight", DoubleType(), True),
StructField("shucked_weight", DoubleType(), True),
StructField("viscera_weight", DoubleType(), True),
StructField("shell_weight", DoubleType(), True),
StructField("rings", DoubleType(), True),
]
)
# Downloading the data from S3 into a Dataframe
total_df = spark.read.csv(
(
"s3://"
+ os.path.join(args["S3_INPUT_BUCKET"], args["S3_INPUT_KEY_PREFIX"], "abalone.csv")
),
header=False,
schema=schema,
)
# StringIndexer on the sex column which has categorical value
sex_indexer = StringIndexer(inputCol="sex", outputCol="indexed_sex")
# one-hot-encoding is being performed on the string-indexed sex column (indexed_sex)
sex_encoder = OneHotEncoder(inputCol="indexed_sex", outputCol="sex_vec")
# vector-assembler will bring all the features to a 1D vector for us to save easily into CSV format
assembler = VectorAssembler(
inputCols=[
"sex_vec",
"length",
"diameter",
"height",
"whole_weight",
"shucked_weight",
"viscera_weight",
"shell_weight",
],
outputCol="features",
)
# The pipeline comprises of the steps added above
pipeline = Pipeline(stages=[sex_indexer, sex_encoder, assembler])
# This step trains the feature transformers. We need to serialize this model with MLeap and save to S3
model = pipeline.fit(total_df)
# This step transforms the dataset with information obtained from the previous fit
transformed_total_df = model.transform(total_df)
# Split the overall dataset into 80-20 training and validation
(train_df, validation_df) = transformed_total_df.randomSplit([0.8, 0.2])
# Convert the train dataframe to RDD to save in CSV format and upload to S3
train_rdd = train_df.rdd.map(lambda x: (x.rings, x.features))
train_lines = train_rdd.map(csv_line)
train_lines.saveAsTextFile(
"s3://" + os.path.join(args["S3_OUTPUT_BUCKET"], args["S3_OUTPUT_KEY_PREFIX"], "train")
)
# Convert the validation dataframe to RDD to save in CSV format and upload to S3
validation_rdd = validation_df.rdd.map(lambda x: (x.rings, x.features))
validation_lines = validation_rdd.map(csv_line)
validation_lines.saveAsTextFile(
"s3://" + os.path.join(args["S3_OUTPUT_BUCKET"], args["S3_OUTPUT_KEY_PREFIX"], "validation")
)
# Serialize and store the model via MLeap
SimpleSparkSerializer().serializeToBundle(model, "jar:file:/tmp/model.zip", validation_df)
# Unzip the model as SageMaker expects a .tar.gz file but MLeap produces a .zip file
import zipfile
with zipfile.ZipFile("/tmp/model.zip") as zf:
zf.extractall("/tmp/model")
# Writw back the content as a .tar.gz file
import tarfile
with tarfile.open("/tmp/model.tar.gz", "w:gz") as tar:
tar.add("/tmp/model/bundle.json", arcname="bundle.json")
tar.add("/tmp/model/root", arcname="root")
# Upload the model in tar.gz format to S3 so that it can be used with SageMaker for inference later
s3 = boto3.resource("s3")
file_name = os.path.join(args["S3_MODEL_KEY_PREFIX"], "model.tar.gz")
s3.Bucket(args["S3_MODEL_BUCKET"]).upload_file("/tmp/model.tar.gz", file_name)
if __name__ == "__main__":
main()
| 2,050 |
12,278 | <reponame>rajeev02101987/arangodb<filename>3rdParty/boost/1.71.0/libs/geometry/test/algorithms/overlay/get_turns_linear_linear_geo.cpp
// Boost.Geometry
// Unit Test
// Copyright (c) 2017, Oracle and/or its affiliates.
// Contributed and/or modified by <NAME>, on behalf of Oracle
// Use, modification and distribution is subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include "test_get_turns.hpp"
#include <boost/geometry/geometries/geometries.hpp>
template <typename T>
void test_radian()
{
typedef bg::model::point<T, 2, bg::cs::geographic<bg::radian> > pt;
typedef bg::model::linestring<pt> ls;
typedef bg::model::multi_linestring<ls> mls;
bg::srs::spheroid<double> sph_wgs84(6378137.0, 6356752.3142451793);
boost::geometry::strategy::intersection::geographic_segments<> wgs84(sph_wgs84);
test_geometry<ls, mls>(
"LINESTRING(0 0, -3.14159265358979 0)",
"MULTILINESTRING((-2.1467549799530232 -0.12217304763960295,"
"-2.5481807079117185 -0.90757121103705041,"
"-2.6529004630313784 0.85521133347722067,"
" 0.92502450355699373 0.62831853071795796,"
"-2.5307274153917754 0,"
" 2.8099800957108676 1.0646508437165401,"
"-1.6057029118347816 -1.5009831567151219,"
" 0.2268928027592626 1.0646508437165401,"
"-2.199114857512853 -0.017453292519943278,"
" 0 0.31415926535897898,"
" 0 0.57595865315812822,"
" 1.0471975511965967 -0.73303828583761765,"
" 2.1118483949131366 -0.54105206811824158))",
expected("mii++")("muu==")("iuu++")("iuu++")("iuu++")("iuu++"),
wgs84);
}
int test_main(int, char* [])
{
test_radian<double>();
return 0;
}
| 1,085 |
1,102 | <filename>src/lib/boost/process/terminate.hpp
// Copyright (c) 2006, 2007 <NAME>
// Copyright (c) 2008 <NAME>, <NAME>
// Copyright (c) 2009 <NAME>
// Copyright (c) 2010 <NAME>, <NAME>
// Copyright (c) 2011, 2012 <NAME>, <NAME>
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
/**
* \file boost/process/terminate.hpp
*
* Defines a function to terminate a process.
*/
#ifndef BOOST_PROCESS_TERMINATE_HPP
#define BOOST_PROCESS_TERMINATE_HPP
#include <boost/process/config.hpp>
#include BOOST_PROCESS_PLATFORM_PROMOTE_PATH(terminate)
BOOST_PROCESS_PLATFORM_PROMOTE_NAMESPACE(terminate)
#if defined(BOOST_PROCESS_DOXYGEN)
namespace boost { namespace process {
/**
* Terminates a process.
*
* \warning Call this function only as a last resort. The process
* is terminated immediately and forcefully and has no
* chance to close or clean up resources properly.
*
* \throws boost::system::system_error in case of an error
*/
template <class Process>
void terminate(const Process &p);
/**
* Terminates a process.
*
* \warning Call this function only as a last resort. The process
* is terminated immediately and forcefully and has no
* chance to close or clean up resources properly.
*/
template <class Process>
void terminate(const Process &p, boost::system::error_code &ec);
}}
#endif
#endif
| 495 |
51,887 | {"name":"node-html-parser","main":"index.js","author":"<NAME> <<EMAIL>>","license":"MIT"}
| 30 |
544 | from pprint import pprint
from office365.sharepoint.client_context import ClientContext
from office365.sharepoint.listitems.listitem import ListItem
from tests import test_team_site_url, test_client_credentials
ctx = ClientContext(test_team_site_url).with_credentials(test_client_credentials)
file_url = '/sites/team/Shared Documents/big_buck_bunny.mp4'
file = ctx.web.get_file_by_server_relative_url(file_url)
file_item = file.listItemAllFields.select(["EffectiveBasePermissions"]).get().execute_query() # type: ListItem
pprint(file_item.effective_base_permissions.permission_levels)
| 188 |
652 | """Tests for distutils.command.bdist_dumb."""
import os
import imp
import sys
import zipfile
import unittest
from test.support import run_unittest
from distutils.core import Distribution
from distutils.command.bdist_dumb import bdist_dumb
from distutils.tests import support
SETUP_PY = """\
from distutils.core import setup
import foo
setup(name='foo', version='0.1', py_modules=['foo'],
url='xxx', author='xxx', author_email='xxx')
"""
try:
import zlib
ZLIB_SUPPORT = True
except ImportError:
ZLIB_SUPPORT = False
class BuildDumbTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(BuildDumbTestCase, self).setUp()
self.old_location = os.getcwd()
self.old_sys_argv = sys.argv, sys.argv[:]
def tearDown(self):
os.chdir(self.old_location)
sys.argv = self.old_sys_argv[0]
sys.argv[:] = self.old_sys_argv[1]
super(BuildDumbTestCase, self).tearDown()
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_simple_built(self):
# let's create a simple package
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({'name': 'foo', 'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_dumb(dist)
# so the output is the same no matter
# what is the platform
cmd.format = 'zip'
cmd.ensure_finalized()
cmd.run()
# see what we have
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
base = "%s.%s.zip" % (dist.get_fullname(), cmd.plat_name)
if os.name == 'os2':
base = base.replace(':', '-')
self.assertEqual(dist_created, [base])
# now let's check what we have in the zip file
fp = zipfile.ZipFile(os.path.join('dist', base))
try:
contents = fp.namelist()
finally:
fp.close()
contents = sorted(os.path.basename(fn) for fn in contents)
wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py']
if not sys.dont_write_bytecode:
wanted.append('foo.%s.pyc' % imp.get_tag())
self.assertEqual(contents, sorted(wanted))
def test_suite():
return unittest.makeSuite(BuildDumbTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| 1,425 |
548 | <reponame>timgates42/kivy-ios
"""
Icon and LaunchImage generator for iOS
======================================
.. author:: <NAME> <<EMAIL>>
"""
# flake8: noqa (E121 mainly)
__all__ = ["launchimage"]
import sh
import json
from PIL import Image
from os.path import join, exists
from os import makedirs
appicon_json = {
"images": [
{
"idiom": "iphone",
"size": "20x20",
"scale": "2x",
"filename": "Icon40.png"
},
{
"idiom": "iphone",
"size": "20x20",
"scale": "3x",
"filename": "Icon60.png"
},
{
"size": "29x29",
"idiom": "iphone",
"filename": "Icon29.png",
"scale": "1x"
},
{
"size": "29x29",
"idiom": "iphone",
"filename": "Icon58.png",
"scale": "2x"
},
{
"size": "29x29",
"idiom": "iphone",
"filename": "Icon87.png",
"scale": "3x"
},
{
"size": "40x40",
"idiom": "iphone",
"filename": "Icon80.png",
"scale": "2x"
},
{
"size": "40x40",
"idiom": "iphone",
"filename": "Icon120.png",
"scale": "3x"
},
{
"size": "57x57",
"idiom": "iphone",
"filename": "Icon57.png",
"scale": "1x"
},
{
"size": "57x57",
"idiom": "iphone",
"filename": "Icon114.png",
"scale": "2x"
},
{
"size": "60x60",
"idiom": "iphone",
"filename": "Icon120.png",
"scale": "2x"
},
{
"size": "60x60",
"idiom": "iphone",
"filename": "Icon180.png",
"scale": "3x"
},
{
"idiom": "ipad",
"size": "20x20",
"filename": "Icon20.png",
"scale": "1x"
},
{
"idiom": "ipad",
"size": "20x20",
"filename": "Icon40.png",
"scale": "2x"
},
{
"size": "29x29",
"idiom": "ipad",
"filename": "Icon29.png",
"scale": "1x"
},
{
"size": "29x29",
"idiom": "ipad",
"filename": "Icon58.png",
"scale": "2x"
},
{
"size": "40x40",
"idiom": "ipad",
"filename": "Icon40.png",
"scale": "1x"
},
{
"size": "40x40",
"idiom": "ipad",
"filename": "Icon80.png",
"scale": "2x"
},
{
"size": "50x50",
"idiom": "ipad",
"filename": "Icon50.png",
"scale": "1x"
},
{
"size": "50x50",
"idiom": "ipad",
"filename": "Icon100.png",
"scale": "2x"
},
{
"size": "72x72",
"idiom": "ipad",
"filename": "Icon72.png",
"scale": "1x"
},
{
"size": "72x72",
"idiom": "ipad",
"filename": "Icon144.png",
"scale": "2x"
},
{
"size": "76x76",
"idiom": "ipad",
"filename": "Icon76.png",
"scale": "1x"
},
{
"size": "76x76",
"idiom": "ipad",
"filename": "Icon152.png",
"scale": "2x"
},
# If activated, we got a submission error:
# "Error ITMS-9000: Invalid Image Path - No image found at the path
# referenced under key 'CFBundleIcons': 'AppIcon120x120'"
# {
# "size": "120x120",
# "idiom": "car",
# "filename": "Icon120.png",
# "scale": "1x"
# },
{
"size": "24x24",
"idiom": "watch",
"scale": "2x",
"filename": "Icon48.png",
"role": "notificationCenter",
"subtype": "38mm"
},
{
"size": "27.5x27.5",
"idiom": "watch",
"scale": "2x",
"filename": "Icon55.png",
"role": "notificationCenter",
"subtype": "42mm"
},
{
"size": "29x29",
"idiom": "watch",
"filename": "Icon58.png",
"role": "companionSettings",
"scale": "2x"
},
{
"size": "29x29",
"idiom": "watch",
"filename": "Icon87.png",
"role": "companionSettings",
"scale": "3x"
},
{
"size": "40x40",
"idiom": "watch",
"scale": "2x",
"filename": "Icon80.png",
"role": "appLauncher",
"subtype": "38mm"
},
{
"size": "44x44",
"idiom": "watch",
"scale": "2x",
"filename": "Icon88.png",
"role": "longLook",
"subtype": "42mm"
},
{
"size": "86x86",
"idiom": "watch",
"scale": "2x",
"filename": "Icon172.png",
"role": "quickLook",
"subtype": "38mm"
},
{
"size": "44x44",
"idiom": "watch",
"scale": "2x",
"filename": "Icon88.png",
"role": "appLauncher",
"subtype": "40mm"
},
{
"size": "50x50",
"idiom": "watch",
"scale": "2x",
"filename": "Icon100.png",
"role": "appLauncher",
"subtype": "44mm"
},
{
"size": "98x98",
"idiom": "watch",
"scale": "2x",
"filename": "Icon196.png",
"role": "quickLook",
"subtype": "42mm"
},
{
"size": "108x108",
"idiom": "watch",
"scale": "2x",
"filename": "Icon216.png",
"role": "quickLook",
"subtype": "44mm"
},
{
"size": "16x16",
"idiom": "mac",
"filename": "Icon16.png",
"scale": "1x"
},
{
"size": "16x16",
"idiom": "mac",
"filename": "Icon32.png",
"scale": "2x"
},
{
"size": "32x32",
"idiom": "mac",
"filename": "Icon32.png",
"scale": "1x"
},
{
"size": "32x32",
"idiom": "mac",
"filename": "Icon64.png",
"scale": "2x"
},
{
"size": "128x128",
"idiom": "mac",
"filename": "Icon128.png",
"scale": "1x"
},
{
"size": "128x128",
"idiom": "mac",
"filename": "Icon256.png",
"scale": "2x"
},
{
"size": "256x256",
"idiom": "mac",
"filename": "Icon256.png",
"scale": "1x"
},
{
"size": "256x256",
"idiom": "mac",
"filename": "Icon512.png",
"scale": "2x"
},
{
"size": "512x512",
"idiom": "mac",
"filename": "Icon512.png",
"scale": "1x"
},
{
"size": "512x512",
"idiom": "mac",
"filename": "Icon1024.png",
"scale": "2x"
},
{
"size": "83.5x83.5",
"idiom": "ipad",
"filename": "Icon167.png",
"scale": "2x"
},
{
"idiom": "ios-marketing",
"size": "1024x1024",
"scale": "1x",
"filename": "Icon1024.png"
},
{
"idiom": "watch-marketing",
"size": "1024x1024",
"scale": "1x",
"filename": "Icon1024.png"
},
],
"info": {
"version": 1,
"author": "xcode"
},
# "properties": {
# "pre-rendered": True
# }
}
launchimage_json = {
"images": [
{
"extent": "full-screen",
"idiom": "iphone",
"subtype": "736h",
"filename": "Default1242x2208.png",
"minimum-system-version": "8.0",
"orientation": "portrait",
"scale": "3x"
},
{
"extent": "full-screen",
"idiom": "iphone",
"subtype": "736h",
"filename": "Default2208x1242.png",
"minimum-system-version": "8.0",
"orientation": "landscape",
"scale": "3x"
},
{
"extent": "full-screen",
"idiom": "iphone",
"subtype": "667h",
"filename": "Default750x1334.png",
"minimum-system-version": "8.0",
"orientation": "portrait",
"scale": "2x"
},
{
"orientation": "portrait",
"idiom": "iphone",
"extent": "full-screen",
"minimum-system-version": "7.0",
"filename": "Default640x960.png",
"scale": "2x"
},
{
"extent": "full-screen",
"idiom": "iphone",
"subtype": "retina4",
"filename": "Default640x1136.png",
"minimum-system-version": "7.0",
"orientation": "portrait",
"scale": "2x"
},
{
"orientation": "portrait",
"idiom": "ipad",
"extent": "full-screen",
"minimum-system-version": "7.0",
"filename": "Default768x1024.png",
"scale": "1x"
},
{
"orientation": "landscape",
"idiom": "ipad",
"extent": "full-screen",
"minimum-system-version": "7.0",
"filename": "Default1024x768.png",
"scale": "1x"
},
{
"orientation": "portrait",
"idiom": "ipad",
"extent": "full-screen",
"minimum-system-version": "7.0",
"filename": "Default1536x2048.png",
"scale": "2x"
},
{
"orientation": "landscape",
"idiom": "ipad",
"extent": "full-screen",
"minimum-system-version": "7.0",
"filename": "Default2048x1536.png",
"scale": "2x"
},
{
"orientation": "portrait",
"idiom": "iphone",
"extent": "full-screen",
"filename": "Default320x480.png",
"scale": "1x"
},
{
"orientation": "portrait",
"idiom": "iphone",
"extent": "full-screen",
"filename": "Default640x960.png",
"scale": "2x"
},
{
"orientation": "portrait",
"idiom": "iphone",
"extent": "full-screen",
"filename": "Default640x1136.png",
"subtype": "retina4",
"scale": "2x"
},
{
"orientation": "portrait",
"idiom": "ipad",
"extent": "full-screen",
"filename": "Default768x1024.png",
"scale": "1x"
},
{
"orientation": "landscape",
"idiom": "ipad",
"extent": "full-screen",
"filename": "Default1024x768.png",
"scale": "1x"
},
{
"orientation": "portrait",
"idiom": "ipad",
"extent": "full-screen",
"filename": "Default1536x2048.png",
"scale": "2x"
},
{
"orientation": "landscape",
"idiom": "ipad",
"extent": "full-screen",
"filename": "Default2048x1536.png",
"scale": "2x"
},
],
"info": {
"version": 1,
"author": "xcode"
}
}
def icon(image_xcassets, image_fn):
"""Generate all the possible Icon from a single image_fn
"""
appicon_dir = join(image_xcassets, "AppIcon.appiconset")
if not exists(appicon_dir):
makedirs(appicon_dir)
with open(join(appicon_dir, "Contents.json"), "w") as fd:
json.dump(appicon_json, fd)
options = (
# iPhone
# Spotlight - iOS 5,6
# Settings - iOS 5-8
# 29pt - 1x,2x,3x
("87", None, "Icon87.png"),
("58", None, "Icon58.png"),
("29", "Icon58.png", "Icon29.png"),
# iPhone notification
# 20pt - 2x,3x
# ("40", None, "Icon40.png"),
("60", None, "Icon60.png"),
# iPhone
# Spotlight - iOS 7-8
# 40pt 2x,3x
("120", None, "Icon120.png"),
("80", None, "Icon80.png"),
# iPhone
# App - iOS 5,6
# 57pt 1x,2x
("114", None, "Icon114.png"),
("57", "Icon114.png", "Icon57.png"),
# iPhone
# App - iOS 7,8
# 60pt 2x,3x
("180", None, "Icon180.png"),
# ("120", None, "Icon120.png # duplicate"),
# iPad
# Notifications
# 20pt 1x,2x
("20", "Icon80.png", "Icon20.png"),
("40", "Icon80.png", "Icon40.png"),
# iPad
# Settings iOS 5-8
# ("58", None, "Icon58.png # duplicate"),
# ("29", "Icon58.png", "Icon29.png # duplicate"),
# iPad
# Spotlight iOS 7,8
# 40pt 1x,2x
# ("80", None, "Icon80.png # duplicate"),
("40", "Icon80.png", "Icon40.png"),
# iPad
# Spotlight iOS 5,6
# 50pt 1x,2x
("100", None, "Icon100.png"),
("50", "Icon100.png", "Icon50.png"),
# iPad
# App iOS 5,6
# 72pt 1x,2x
("144", None, "Icon144.png"),
("72", "Icon144.png", "Icon72.png"),
# iPad
# App iOS 7,8
# 76pt 1x,2x
("152", None, "Icon152.png"),
("76", "Icon152.png", "Icon76.png"),
# iPad
# App iOS 9
# 83.5pt 2x
("167", None, "Icon167.png"),
# CarPlay
# App iOS 8
# 120pt 1x
# ("120", None, "Icon120.png # duplicate"),
# Apple Watch
# Notification Center
# 38mm, 42mm
("48", None, "Icon48.png"),
("55", None, "Icon55.png"),
# Apple Watch
# Companion Settings
# 29pt 2x,3x
# ("58", None, "Icon58.png # duplicate"),
# ("87", None, "Icon87.png # duplicate"),
# Apple Watch
# Home Screen (All)
# Long Look (38mm)
# ("80", None, "Icon80.png # duplicate"),
# Apple Watch
# Long Look (42mm)
("88", None, "Icon88.png"),
# Apple Watch
# Short Look
# 38mm, 42mm, 44mm
("172", None, "Icon172.png"),
("196", None, "Icon196.png"),
("216", None, "Icon216.png"),
# OS X
# 512pt 1x,2x
("1024", None, "Icon1024.png"),
("512", "Icon1024.png", "Icon512.png"),
# OS X
# 256pt 1x,2x
# ("512", "Icon1024.png", "Icon512.png # duplicate"),
("256", "Icon512.png", "Icon256.png"),
# OS X
# 128pt 1x,2x
# ("256", "Icon512.png", "Icon256.png # duplicate"),
("128", "Icon256.png", "Icon128.png"),
# OS X
# 32pt 1x,2x
("64", "Icon128.png", "Icon64.png"),
("32", "Icon64.png", "Icon32.png"),
# OS X
# 16pt 1x,2x
# ("32", "Icon64.png", "Icon32.png # duplicate"),
("16", "Icon32.png", "Icon16.png"))
_generate("AppIcon.appiconset", image_xcassets, image_fn, options, icon=True)
def launchimage(image_xcassets, image_fn):
"""Generate all the possible Launch Images from a single image_fn
"""
launchimage_dir = join(image_xcassets, "LaunchImage.launchimage")
if not exists(launchimage_dir):
makedirs(launchimage_dir)
with open(join(launchimage_dir, "Contents.json"), "w") as fd:
json.dump(launchimage_json, fd)
options = (
# size, input, output
# iPhone 3.5" @2x
("640 960", None, "Default640x960.png"),
# iPhone 3.5" @1x
("320 480", None, "Default320x480.png"),
# iPhone 4.0" @2x
("640 1136", None, "Default640x1136.png"),
# iPhone 5.5" @3x - landscape
("2208 1242", None, "Default2208x1242.png"),
# iPhone 5.5" @3x - portrait
("1242 2208", None, "Default1242x2208.png"),
# iPhone 4.7" @2x
("750 1334", None, "Default750x1334.png"),
# iPad @2x - landscape
("2048 1536", None, "Default2048x1536.png"),
# iPad @2x - portrait
("1536 2048", None, "Default1536x2048.png"),
# iPad @1x - landscape
("1024 768", None, "Default1024x768.png"),
# iPad @1x - portrait
("768 1024", None, "Default768x1024.png"),
)
_generate("LaunchImage.launchimage", image_xcassets, image_fn, options)
def _buildimage(in_fn, out_fn, size, padcolor=None):
im = Image.open(in_fn)
# read the first left/bottom pixel
bgcolor = im.getpixel((0, 0))
# ensure the image fit in the destination size
if im.size[0] > size[0] or im.size[1] > size[1]:
f = max(im.size[0] / size[0], im.size[1] / size[1])
newsize = int(im.size[0] / f), int(im.size[1] / f)
im = im.resize(newsize)
# create final image
outim = Image.new("RGB", size, bgcolor[:3])
x = (size[0] - im.size[0]) // 2
y = (size[1] - im.size[1]) // 2
outim.paste(im, (x, y))
# save the image
outim.save(out_fn)
def _generate(d, image_xcassets, image_fn, options, icon=False):
for c, in_fn, out_fn in options:
args = []
if in_fn is not None:
filename = join(image_xcassets, d, in_fn)
else:
filename = image_fn
if icon:
args += [filename, "-Z", c]
args += [
"--out",
join(image_xcassets, d, out_fn)
]
print("sips", " ".join(args))
sh.sips(*args)
else:
size = [int(x) for x in c.split()]
_buildimage(filename, join(image_xcassets, d, out_fn), size)
| 8,461 |
6,181 | /******************************************************************************
* The MIT License (MIT)
*
* Copyright (c) 2019-2021 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
#include "spirv_common.h"
#include "api/replay/replay_enums.h"
#include "common/common.h"
#include "common/formatting.h"
template <>
rdcstr DoStringise(const rdcspv::Id &el)
{
uint32_t id;
RDCCOMPILE_ASSERT(sizeof(el) == sizeof(id), "SPIR-V Id isn't 32-bit!");
memcpy(&id, &el, sizeof(el));
return StringFormat::Fmt("%u", id);
}
void rdcspv::Iter::nopRemove(size_t idx, size_t count)
{
RDCASSERT(idx >= 1);
size_t oldSize = size();
if(count == 0)
count = oldSize - idx;
// reduce the size of this op
word(0) = rdcspv::Operation::MakeHeader(opcode(), oldSize - count);
if(idx + count < oldSize)
{
// move any words on the end into the middle, then nop them
for(size_t i = 0; i < count; i++)
{
word(idx + i) = word(idx + count + i);
word(oldSize - i - 1) = OpNopWord;
}
}
else
{
for(size_t i = 0; i < count; i++)
{
word(idx + i) = OpNopWord;
}
}
}
void rdcspv::Iter::nopRemove()
{
for(size_t i = 0, sz = size(); i < sz; i++)
word(i) = OpNopWord;
}
rdcspv::Iter &rdcspv::Iter::operator=(const Operation &op)
{
size_t newSize = op.size();
size_t oldSize = size();
if(newSize > oldSize)
{
RDCERR("Can't resize up from %zu to %zu", oldSize, newSize);
return *this;
}
memcpy(&cur(), &op[0], sizeof(uint32_t) * RDCMIN(oldSize, newSize));
// set remaining words to NOP if we reduced the size
for(size_t i = newSize; i < oldSize; i++)
word(i) = OpNopWord;
return *this;
}
ShaderStage MakeShaderStage(rdcspv::ExecutionModel model)
{
switch(model)
{
case rdcspv::ExecutionModel::Vertex: return ShaderStage::Vertex;
case rdcspv::ExecutionModel::TessellationControl: return ShaderStage::Tess_Control;
case rdcspv::ExecutionModel::TessellationEvaluation: return ShaderStage::Tess_Eval;
case rdcspv::ExecutionModel::Geometry: return ShaderStage::Geometry;
case rdcspv::ExecutionModel::Fragment: return ShaderStage::Fragment;
case rdcspv::ExecutionModel::GLCompute: return ShaderStage::Compute;
case rdcspv::ExecutionModel::Kernel:
case rdcspv::ExecutionModel::TaskNV:
case rdcspv::ExecutionModel::MeshNV:
case rdcspv::ExecutionModel::RayGenerationNV:
case rdcspv::ExecutionModel::IntersectionNV:
case rdcspv::ExecutionModel::AnyHitNV:
case rdcspv::ExecutionModel::ClosestHitNV:
case rdcspv::ExecutionModel::MissNV:
case rdcspv::ExecutionModel::CallableNV:
// all of these are currently unsupported
break;
case rdcspv::ExecutionModel::Invalid:
case rdcspv::ExecutionModel::Max: break;
}
return ShaderStage::Count;
}
ShaderBuiltin MakeShaderBuiltin(ShaderStage stage, const rdcspv::BuiltIn el)
{
// not complete, might need to expand system attribute list
switch(el)
{
case rdcspv::BuiltIn::Position: return ShaderBuiltin::Position;
case rdcspv::BuiltIn::PointSize: return ShaderBuiltin::PointSize;
case rdcspv::BuiltIn::ClipDistance: return ShaderBuiltin::ClipDistance;
case rdcspv::BuiltIn::CullDistance: return ShaderBuiltin::CullDistance;
case rdcspv::BuiltIn::VertexId: return ShaderBuiltin::VertexIndex;
case rdcspv::BuiltIn::InstanceId: return ShaderBuiltin::InstanceIndex;
case rdcspv::BuiltIn::PrimitiveId: return ShaderBuiltin::PrimitiveIndex;
case rdcspv::BuiltIn::InvocationId:
{
if(stage == ShaderStage::Geometry)
return ShaderBuiltin::GSInstanceIndex;
else
return ShaderBuiltin::OutputControlPointIndex;
}
case rdcspv::BuiltIn::Layer: return ShaderBuiltin::RTIndex;
case rdcspv::BuiltIn::ViewportIndex: return ShaderBuiltin::ViewportIndex;
case rdcspv::BuiltIn::TessLevelOuter: return ShaderBuiltin::OuterTessFactor;
case rdcspv::BuiltIn::TessLevelInner: return ShaderBuiltin::InsideTessFactor;
case rdcspv::BuiltIn::PatchVertices: return ShaderBuiltin::PatchNumVertices;
case rdcspv::BuiltIn::FragCoord: return ShaderBuiltin::Position;
case rdcspv::BuiltIn::FrontFacing: return ShaderBuiltin::IsFrontFace;
case rdcspv::BuiltIn::SampleId: return ShaderBuiltin::MSAASampleIndex;
case rdcspv::BuiltIn::SamplePosition: return ShaderBuiltin::MSAASamplePosition;
case rdcspv::BuiltIn::SampleMask: return ShaderBuiltin::MSAACoverage;
case rdcspv::BuiltIn::FragDepth: return ShaderBuiltin::DepthOutput;
case rdcspv::BuiltIn::VertexIndex: return ShaderBuiltin::VertexIndex;
case rdcspv::BuiltIn::InstanceIndex: return ShaderBuiltin::InstanceIndex;
case rdcspv::BuiltIn::BaseVertex: return ShaderBuiltin::BaseVertex;
case rdcspv::BuiltIn::BaseInstance: return ShaderBuiltin::BaseInstance;
case rdcspv::BuiltIn::DrawIndex: return ShaderBuiltin::DrawIndex;
case rdcspv::BuiltIn::ViewIndex: return ShaderBuiltin::ViewportIndex;
case rdcspv::BuiltIn::FragStencilRefEXT: return ShaderBuiltin::StencilReference;
case rdcspv::BuiltIn::NumWorkgroups: return ShaderBuiltin::DispatchSize;
case rdcspv::BuiltIn::GlobalInvocationId: return ShaderBuiltin::DispatchThreadIndex;
case rdcspv::BuiltIn::WorkgroupId: return ShaderBuiltin::GroupIndex;
case rdcspv::BuiltIn::WorkgroupSize: return ShaderBuiltin::GroupSize;
case rdcspv::BuiltIn::LocalInvocationIndex: return ShaderBuiltin::GroupFlatIndex;
case rdcspv::BuiltIn::LocalInvocationId: return ShaderBuiltin::GroupThreadIndex;
case rdcspv::BuiltIn::TessCoord: return ShaderBuiltin::DomainLocation;
case rdcspv::BuiltIn::PointCoord: return ShaderBuiltin::PointCoord;
case rdcspv::BuiltIn::HelperInvocation: return ShaderBuiltin::IsHelper;
case rdcspv::BuiltIn::SubgroupSize: return ShaderBuiltin::SubgroupSize;
case rdcspv::BuiltIn::NumSubgroups: return ShaderBuiltin::NumSubgroups;
case rdcspv::BuiltIn::SubgroupId: return ShaderBuiltin::SubgroupIndexInWorkgroup;
case rdcspv::BuiltIn::SubgroupLocalInvocationId: return ShaderBuiltin::IndexInSubgroup;
case rdcspv::BuiltIn::SubgroupEqMask: return ShaderBuiltin::SubgroupEqualMask;
case rdcspv::BuiltIn::SubgroupGeMask: return ShaderBuiltin::SubgroupGreaterEqualMask;
case rdcspv::BuiltIn::SubgroupGtMask: return ShaderBuiltin::SubgroupGreaterMask;
case rdcspv::BuiltIn::SubgroupLeMask: return ShaderBuiltin::SubgroupLessEqualMask;
case rdcspv::BuiltIn::SubgroupLtMask: return ShaderBuiltin::SubgroupLessMask;
case rdcspv::BuiltIn::DeviceIndex: return ShaderBuiltin::DeviceIndex;
case rdcspv::BuiltIn::FullyCoveredEXT: return ShaderBuiltin::IsFullyCovered;
case rdcspv::BuiltIn::FragSizeEXT: return ShaderBuiltin::FragAreaSize;
case rdcspv::BuiltIn::FragInvocationCountEXT: return ShaderBuiltin::FragInvocationCount;
default: break;
}
RDCWARN("Couldn't map SPIR-V built-in %s to known built-in", ToStr(el).c_str());
return ShaderBuiltin::Undefined;
}
| 2,879 |
5,821 | from metaflow_test import MetaflowTest, ExpectationFailed, steps, tag
class BasicUnboundedForeachTest(MetaflowTest):
PRIORITY = 1
@steps(0, ['foreach-split-small'], required=True)
def split(self):
self.my_index = None
from metaflow.plugins import InternalTestUnboundedForeachInput
self.arr = InternalTestUnboundedForeachInput(range(2))
@tag('unbounded_test_foreach_internal')
@steps(0, ['foreach-inner-small'], required=True)
def inner(self):
# index must stay constant over multiple steps inside foreach
if self.my_index is None:
self.my_index = self.index
assert_equals(self.my_index, self.index)
assert_equals(self.input, self.arr[self.index])
self.my_input = self.input
@steps(0, ['foreach-join-small'], required=True)
def join(self, inputs):
got = sorted([inp.my_input for inp in inputs])
assert_equals(list(range(2)), got)
@steps(1, ['all'])
def step_all(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
if type(checker).__name__ == 'CliCheck':
# CliCheck doesn't support enlisting of tasks.
assert(run is None)
else:
assert(run is not None)
tasks = run['foreach_inner'].tasks()
task_list = list(tasks)
assert_equals(2, len(task_list))
assert_equals(1, len(list(run['foreach_inner'].control_tasks())))
| 657 |
468 | <filename>ssd/demo_tensorflow.py<gh_stars>100-1000
import tensorflow as tf
import cv2
import numpy as np
def graph_create(graphpath):
with tf.gfile.FastGFile(graphpath, 'r') as graphfile:
graphdef = tf.GraphDef()
graphdef.ParseFromString(graphfile.read())
return tf.import_graph_def(graphdef, name='',return_elements=[
'image_tensor:0','detection_boxes:0', 'detection_scores:0', 'detection_classes:0'])
image_tensor, box, score, cls = graph_create("ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb")
image_file = "images/004545.jpg"
with tf.Session() as sess:
image = cv2.imread(image_file)
image_data = np.expand_dims(image, axis=0).astype(np.uint8)
b, s, c = sess.run([box, score, cls], {image_tensor: image_data})
boxes = b[0]
conf = s[0]
clses = c[0]
#writer = tf.summary.FileWriter('debug', sess.graph)
for i in range(8):
bx = boxes[i]
print boxes[i]
print conf[i]
print clses[i]
if conf[i] < 0.5:
continue
h = image.shape[0]
w = image.shape[1]
p1 = (int(w * bx[1]), int(h * bx[0]))
p2 = (int(w * bx[3]) ,int(h * bx[2]))
cv2.rectangle(image, p1, p2, (0,255,0))
cv2.imshow("mobilenet-ssd", image)
cv2.waitKey(0)
| 669 |
17,703 | namespace Envoy {
void use_serialize_as_string() {
google::protobuf::FieldMask mask;
const std::string key = mask.SerializeAsString();
}
} // namespace Envoy
| 55 |
5,169 | <filename>Specs/1/9/b/JPButtonCategory/1.0.1/JPButtonCategory.podspec.json<gh_stars>1000+
{
"name": "JPButtonCategory",
"version": "1.0.1",
"summary": "button category.",
"homepage": "https://github.com/baiyidjp/JPButtonCategory",
"license": "MIT",
"authors": "baiyi",
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://github.com/baiyidjp/JPButtonCategory.git",
"tag": "1.0.1"
},
"source_files": "JPButtonCategory/*.{h,m}",
"requires_arc": true
}
| 212 |
1,299 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tika.pipes.fetcher.gcs;
import static org.apache.tika.config.TikaConfig.mustNotBeEmpty;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Path;
import java.util.Map;
import com.google.cloud.storage.Blob;
import com.google.cloud.storage.BlobId;
import com.google.cloud.storage.Storage;
import com.google.cloud.storage.StorageOptions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.tika.config.Field;
import org.apache.tika.config.Initializable;
import org.apache.tika.config.InitializableProblemHandler;
import org.apache.tika.config.Param;
import org.apache.tika.exception.TikaConfigException;
import org.apache.tika.exception.TikaException;
import org.apache.tika.io.TemporaryResources;
import org.apache.tika.io.TikaInputStream;
import org.apache.tika.metadata.Metadata;
import org.apache.tika.pipes.fetcher.AbstractFetcher;
/**
* Fetches files from google cloud storage. Must set projectId and bucket via the config.
*/
public class GCSFetcher extends AbstractFetcher implements Initializable {
private static String PREFIX = "gcs";
private static final Logger LOGGER = LoggerFactory.getLogger(GCSFetcher.class);
private String projectId;
private String bucket;
private boolean extractUserMetadata = true;
private Storage storage;
private boolean spoolToTemp = true;
@Override
public InputStream fetch(String fetchKey, Metadata metadata) throws TikaException, IOException {
LOGGER.debug("about to fetch fetchkey={} from bucket ({})", fetchKey, bucket);
try {
Blob blob = storage.get(BlobId.of(bucket, fetchKey));
if (extractUserMetadata) {
if (blob.getMetadata() != null) {
for (Map.Entry<String, String> e : blob.getMetadata().entrySet()) {
metadata.add(PREFIX + ":" + e.getKey(), e.getValue());
}
}
}
if (!spoolToTemp) {
return TikaInputStream.get(blob.getContent());
} else {
long start = System.currentTimeMillis();
TemporaryResources tmpResources = new TemporaryResources();
Path tmp = tmpResources.createTempFile();
blob.downloadTo(tmp);
TikaInputStream tis = TikaInputStream.get(tmp, metadata, tmpResources);
long elapsed = System.currentTimeMillis() - start;
LOGGER.debug("took {} ms to copy to local tmp file", elapsed);
return tis;
}
} catch (Exception e) {
throw new IOException("gcs storage exception", e);
}
}
@Field
public void setSpoolToTemp(boolean spoolToTemp) {
this.spoolToTemp = spoolToTemp;
}
@Field
public void setProjectId(String projectId) {
this.projectId = projectId;
}
@Field
public void setBucket(String bucket) {
this.bucket = bucket;
}
/**
* Whether or not to extract user metadata from the S3Object
*
* @param extractUserMetadata
*/
@Field
public void setExtractUserMetadata(boolean extractUserMetadata) {
this.extractUserMetadata = extractUserMetadata;
}
//TODO: parameterize extracting other blob metadata, eg. md5, crc, etc.
/**
* This initializes the gcs storage client.
*
* @param params params to use for initialization
* @throws TikaConfigException
*/
@Override
public void initialize(Map<String, Param> params) throws TikaConfigException {
//params have already been set...ignore them
//TODO -- add other params to the builder as needed
storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService();
}
@Override
public void checkInitialization(InitializableProblemHandler problemHandler)
throws TikaConfigException {
mustNotBeEmpty("bucket", this.bucket);
mustNotBeEmpty("projectId", this.projectId);
}
}
| 1,787 |
376 | #pragma once
#include <Chip/CM4/Freescale/MK70F15/FTFE_FlashConfig.hpp>
#include <Chip/CM4/Freescale/MK70F15/AIPS0.hpp>
#include <Chip/CM4/Freescale/MK70F15/AIPS1.hpp>
#include <Chip/CM4/Freescale/MK70F15/AXBS.hpp>
#include <Chip/CM4/Freescale/MK70F15/DMA.hpp>
#include <Chip/CM4/Freescale/MK70F15/FB.hpp>
#include <Chip/CM4/Freescale/MK70F15/MPU.hpp>
#include <Chip/CM4/Freescale/MK70F15/FMC.hpp>
#include <Chip/CM4/Freescale/MK70F15/FTFE.hpp>
#include <Chip/CM4/Freescale/MK70F15/DMAMUX0.hpp>
#include <Chip/CM4/Freescale/MK70F15/DMAMUX1.hpp>
#include <Chip/CM4/Freescale/MK70F15/CAN0.hpp>
#include <Chip/CM4/Freescale/MK70F15/CAN1.hpp>
#include <Chip/CM4/Freescale/MK70F15/SPI0.hpp>
#include <Chip/CM4/Freescale/MK70F15/SPI1.hpp>
#include <Chip/CM4/Freescale/MK70F15/SPI2.hpp>
#include <Chip/CM4/Freescale/MK70F15/I2S0.hpp>
#include <Chip/CM4/Freescale/MK70F15/I2S1.hpp>
#include <Chip/CM4/Freescale/MK70F15/CRC.hpp>
#include <Chip/CM4/Freescale/MK70F15/USBHS.hpp>
#include <Chip/CM4/Freescale/MK70F15/USBDCD.hpp>
#include <Chip/CM4/Freescale/MK70F15/PDB0.hpp>
#include <Chip/CM4/Freescale/MK70F15/PIT.hpp>
#include <Chip/CM4/Freescale/MK70F15/FTM0.hpp>
#include <Chip/CM4/Freescale/MK70F15/FTM1.hpp>
#include <Chip/CM4/Freescale/MK70F15/FTM2.hpp>
#include <Chip/CM4/Freescale/MK70F15/FTM3.hpp>
#include <Chip/CM4/Freescale/MK70F15/ADC0.hpp>
#include <Chip/CM4/Freescale/MK70F15/ADC1.hpp>
#include <Chip/CM4/Freescale/MK70F15/ADC2.hpp>
#include <Chip/CM4/Freescale/MK70F15/ADC3.hpp>
#include <Chip/CM4/Freescale/MK70F15/RTC.hpp>
#include <Chip/CM4/Freescale/MK70F15/RFVBAT.hpp>
#include <Chip/CM4/Freescale/MK70F15/LPTMR0.hpp>
#include <Chip/CM4/Freescale/MK70F15/RFSYS.hpp>
#include <Chip/CM4/Freescale/MK70F15/TSI0.hpp>
#include <Chip/CM4/Freescale/MK70F15/SIM.hpp>
#include <Chip/CM4/Freescale/MK70F15/PORTA.hpp>
#include <Chip/CM4/Freescale/MK70F15/PORTB.hpp>
#include <Chip/CM4/Freescale/MK70F15/PORTC.hpp>
#include <Chip/CM4/Freescale/MK70F15/PORTD.hpp>
#include <Chip/CM4/Freescale/MK70F15/PORTE.hpp>
#include <Chip/CM4/Freescale/MK70F15/PORTF.hpp>
#include <Chip/CM4/Freescale/MK70F15/WDOG.hpp>
#include <Chip/CM4/Freescale/MK70F15/EWM.hpp>
#include <Chip/CM4/Freescale/MK70F15/CMT.hpp>
#include <Chip/CM4/Freescale/MK70F15/MCG.hpp>
#include <Chip/CM4/Freescale/MK70F15/OSC0.hpp>
#include <Chip/CM4/Freescale/MK70F15/OSC1.hpp>
#include <Chip/CM4/Freescale/MK70F15/I2C0.hpp>
#include <Chip/CM4/Freescale/MK70F15/I2C1.hpp>
#include <Chip/CM4/Freescale/MK70F15/UART0.hpp>
#include <Chip/CM4/Freescale/MK70F15/UART1.hpp>
#include <Chip/CM4/Freescale/MK70F15/UART2.hpp>
#include <Chip/CM4/Freescale/MK70F15/UART3.hpp>
#include <Chip/CM4/Freescale/MK70F15/UART4.hpp>
#include <Chip/CM4/Freescale/MK70F15/UART5.hpp>
#include <Chip/CM4/Freescale/MK70F15/USB0.hpp>
#include <Chip/CM4/Freescale/MK70F15/CMP0.hpp>
#include <Chip/CM4/Freescale/MK70F15/CMP1.hpp>
#include <Chip/CM4/Freescale/MK70F15/CMP2.hpp>
#include <Chip/CM4/Freescale/MK70F15/CMP3.hpp>
#include <Chip/CM4/Freescale/MK70F15/VREF.hpp>
#include <Chip/CM4/Freescale/MK70F15/LLWU.hpp>
#include <Chip/CM4/Freescale/MK70F15/PMC.hpp>
#include <Chip/CM4/Freescale/MK70F15/SMC.hpp>
#include <Chip/CM4/Freescale/MK70F15/RCM.hpp>
#include <Chip/CM4/Freescale/MK70F15/RNG.hpp>
#include <Chip/CM4/Freescale/MK70F15/NFC.hpp>
#include <Chip/CM4/Freescale/MK70F15/DDR.hpp>
#include <Chip/CM4/Freescale/MK70F15/SDHC.hpp>
#include <Chip/CM4/Freescale/MK70F15/LCDC.hpp>
#include <Chip/CM4/Freescale/MK70F15/ENET.hpp>
#include <Chip/CM4/Freescale/MK70F15/DAC0.hpp>
#include <Chip/CM4/Freescale/MK70F15/DAC1.hpp>
#include <Chip/CM4/Freescale/MK70F15/PTA.hpp>
#include <Chip/CM4/Freescale/MK70F15/PTB.hpp>
#include <Chip/CM4/Freescale/MK70F15/PTC.hpp>
#include <Chip/CM4/Freescale/MK70F15/PTD.hpp>
#include <Chip/CM4/Freescale/MK70F15/PTE.hpp>
#include <Chip/CM4/Freescale/MK70F15/PTF.hpp>
#include <Chip/CM4/Freescale/MK70F15/SystemControl.hpp>
#include <Chip/CM4/Freescale/MK70F15/SysTick.hpp>
#include <Chip/CM4/Freescale/MK70F15/NVIC.hpp>
#include <Chip/CM4/Freescale/MK70F15/MCM.hpp>
#include <Chip/CM4/Freescale/MK70F15/CAU.hpp>
#include <Chip/CM4/Freescale/MK70F15/LMEM.hpp>
| 2,144 |
476 | void *best_selling_book;
int64_t book_list;
int initialize(int64_t a1)
{
setvbuf(stdout, 0LL, 2, 0LL);
setvbuf(stderr, 0LL, 2, 0LL);
alarm(0x1F4u);
printf("%s", a1);
return puts("Designed by <NAME>(quangnh89), a member of PiggyBird.\nMy blog: https://develbranch.com");
}
int64_t get_number(int64_t a1)
{
unsigned int v2; // [rsp+14h] [rbp-Ch]
uint64_t v3; // [rsp+18h] [rbp-8h]
if ( a1 )
printf("%s:", a1);
_fpurge(stdin);
if ( scanf("%d%*c", &v2) <= 0 )
{
puts("invalid number");
exit(0);
}
return v2;
}
int64_t show_menu()
{
puts("=====================");
puts("== Book Store ==");
puts("1. Add");
puts("2. Edit");
puts("3. Remove");
puts("4. List");
puts("5. Exit");
return get_number((int64_t)"Your choice");
}
char *print_best_selling_book(int64_t a1)
{
char s; // [rsp+10h] [rbp-410h]
uint64_t v3; // [rsp+418h] [rbp-8h]
sprintf(&s, "\x1B[33m%s\x1B[0m", a1);
return strdup(&s);
}
char *print_best_selling_book_0(int64_t a1)
{
char s; // [rsp+10h] [rbp-410h]
uint64_t v3; // [rsp+418h] [rbp-8h]
sprintf(&s, "\x1B[32m%s\x1B[0m", a1);
return strdup(&s);
}
int64_t set_best_selling_book(int64_t new_book)
{
int64_t result; // rax
if ( best_selling_book )
{
--*((char *)best_selling_book + 49);
*(int64_t *)((char *)best_selling_book + 50) = print_best_selling_book_0;
if ( !*((char *)best_selling_book + 49) )
{
free(*((void **)best_selling_book + 1));
free(best_selling_book);
}
}
best_selling_book = (void *)new_book;
++*(char *)(new_book + 49);
result = new_book;
*(int64_t *)(new_book + 50) = print_best_selling_book;
return result;
}
char *get_string(int64_t a1, unsigned int a2)
{
char *result; // rax
char v3; // [rsp+1Bh] [rbp-5h]
unsigned int i; // [rsp+1Ch] [rbp-4h]
_fpurge(stdin);
for ( i = 0; i < a2; ++i )
{
v3 = fgetc(stdin);
if ( v3 == -1 )
exit(0);
if ( v3 == 10 )
break;
*(char *)(a1 + i) = v3;
}
result = (char *)(i + a1);
*result = 0;
return result;
}
uint64_t add_book()
{
int64_t v0; // rsi
char v2; // [rsp+8h] [rbp-248h]
int64_t brief_size; // [rsp+Ch] [rbp-244h]
int64_t i; // [rsp+10h] [rbp-240h]
int64_t book; // [rsp+18h] [rbp-238h]
int64_t *j; // [rsp+20h] [rbp-230h]
int64_t *new_book; // [rsp+28h] [rbp-228h]
char s; // [rsp+30h] [rbp-220h]
char temp; // [rsp+40h] [rbp-210h]
uint64_t v10; // [rsp+248h] [rbp-8h]
new_book = malloc(58uLL);
if ( new_book )
{
*((char *)new_book + 48) = -1;
*((char *)new_book + 49) = 1;
printf("Title:");
get_string((int64_t)&temp, 511u);
for ( i = book_list; i; i = *(int64_t *)i )
{
if ( !strncmp(&temp, (const char *)(i + 16), 32uLL) )
{
puts("Item is duplicated.");
free(new_book);
}
}
strncpy((char *)new_book + 16, &temp, 31uLL);
brief_size = (unsigned int)get_number((int64_t)"Enter brief size");
if ( (signed int)brief_size <= 0 || (signed int)brief_size > 1024 )
exit(0);
new_book[1] = malloc((signed int)brief_size + 1);
printf("Enter brief:", &temp);
get_string(new_book[1], brief_size);
printf("Reference book title:", (unsigned int)brief_size);
v0 = 511LL;
get_string((int64_t)&temp, 511u);
if ( temp )
{
book = book_list;
v2 = 0;
while ( book )
{
v0 = book + 16;
if ( !strncmp(&temp, (const char *)(book + 16), 32uLL) )
{
*((char *)new_book + 48) = v2;
++*(char *)(book + 49);
break;
}
++v2;
book = *(int64_t *)book;
}
}
*new_book = 0LL;
if ( book_list )
{
for ( j = (int64_t *)book_list; *j; j = (int64_t *)*j )
;
*j = new_book;
}
else
{
book_list = (int64_t)new_book;
}
*(int64_t *)((char *)new_book + 50) = print_best_selling_book_0;
printf("Best Selling? (Y/N)", v0);
fgets(&s, 5, stdin);
if ( s == 'Y' || s == 'y' )
set_best_selling_book((int64_t)new_book);
puts("a book is added.");
}
}
uint64_t edit_book()
{
int64_t v0; // rsi
int brief_size; // [rsp+4h] [rbp-23Ch]
int64_t book; // [rsp+8h] [rbp-238h]
char s; // [rsp+10h] [rbp-230h]
char temp; // [rsp+20h] [rbp-220h]
uint64_t v6; // [rsp+228h] [rbp-18h]
printf("Old title:");
get_string((int64_t)&temp, 0x1FFu);
for ( book = book_list; ; book = *(int64_t *)book )
{
if ( !book )
{
puts("Book not found.");
}
if ( !strncmp(&temp, (const char *)(book + 16), 32uLL) )
break;
}
printf("New title:", book + 16);
v0 = 32LL;
get_string(book + 16, 0x20u);
brief_size = get_number((int64_t)"Enter brief size");
if ( brief_size < 0 || brief_size > 1024 )
exit(0);
if ( brief_size > 0 )
{
if ( brief_size > strlen(*(const char **)(book + 8)) )
{
free(*(void **)(book + 8));
*(int64_t *)(book + 8) = malloc(brief_size);
}
printf("Enter brief:", 32LL);
v0 = (unsigned int)brief_size;
get_string(*(int64_t *)(book + 8), brief_size);
}
printf("Best Selling? (Y/N)", v0);
fgets(&s, 5, stdin);
if ( s == 'Y' || s == 'y' )
set_best_selling_book(book);
puts("Entry is edited.");
}
void delete_book(void **a1)
{
void **v1; // [rsp+10h] [rbp-10h]
void **ptr; // [rsp+18h] [rbp-8h]
v1 = 0LL;
for ( ptr = (void **)book_list; ptr; ptr = (void **)*ptr )
{
if ( ptr == a1 )
{
if ( !-- * ((char *)ptr + 49) )
{
if ( v1 )
*v1 = *ptr;
else
book_list = *(int64_t *)book_list;
free(ptr[1]);
free(ptr);
}
return;
}
v1 = ptr;
}
}
uint64_t remove_book()
{
int v1; // [rsp+4h] [rbp-22Ch]
void **v2; // [rsp+8h] [rbp-228h]
char *i; // [rsp+10h] [rbp-220h]
void **v4; // [rsp+18h] [rbp-218h]
char temp; // [rsp+20h] [rbp-210h]
uint64_t v6; // [rsp+228h] [rbp-8h]
v2 = 0LL;
printf("Title:");
get_string((int64_t)&temp, 0x1FFu);
for ( i = (char *)book_list; ; i = *(char **)i )
{
if ( !i )
{
puts("Book not found.");
}
if ( !strncmp(&temp, i + 16, 32uLL) )
break;
}
v4 = (void **)book_list;
v1 = 0;
while ( v4 )
{
if ( (unsigned __int8)i[48] == v1 )
{
v2 = v4;
break;
}
++v1;
v4 = (void **)*v4;
}
delete_book((void **)i);
delete_book(v2);
puts("Entry is removed.");
}
int list_book()
{
void *ptr; // ST10_8
int result; // eax
void *v2; // ST18_8
int v3; // [rsp+4h] [rbp-1Ch]
int64_t i; // [rsp+8h] [rbp-18h]
v3 = 0;
puts("|----+-------------------------------+-----------------------------------------|");
printf("|%4s|%32s|%-40s\n", "ID", "Title", "Brief");
puts("|----+-------------------------------+-----------------------------------------|");
for ( i = book_list; i; i = *(int64_t *)i )
{
ptr = (void *)(*(int64_t (* *)(int64_t))(i + 50))(*(int64_t *)(i + 8));
printf("|%04d|%32s|%s", (unsigned int)++v3, i + 16, ptr);
puts(&byte_401977);
free(ptr);
}
puts("|----+-------------------------------+-----------------------------------------|");
result = (signed int)best_selling_book;
if ( best_selling_book )
{
puts("| The best selling book today |");
puts("|----+-------------------------------+-----------------------------------------|");
v2 = (void *)(*(int64_t (* *)(int64_t))((char *)best_selling_book + 50))(*((int64_t *)best_selling_book + 1));
printf("|%04d|%32s|%s", (unsigned int)(v3 + 1), (char *)best_selling_book + 16, v2);
puts(&byte_401977);
free(v2);
result = puts("|----+-------------------------------+-----------------------------------------|");
}
return result;
}
signed int64_t main(int64_t a1, char **a2, char **a3)
{
initialize((int64_t)banner_string);
while ( 1 )
{
switch ( (unsigned int)show_menu() )
{
case 1u:
add_book();
break;
case 2u:
edit_book();
break;
case 3u:
remove_book();
break;
case 4u:
list_book();
break;
case 5u:
return 42LL;
default:
continue;
}
}
}
| 5,049 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_sw.hxx"
#include <ctype.h>
#include <float.h>
#include <hintids.hxx>
#include <hints.hxx> // fuer SwAttrSetChg
#include <editeng/lrspitem.hxx>
#include <editeng/shaditem.hxx>
#include <editeng/adjitem.hxx>
#include <editeng/colritem.hxx>
#include <sfx2/linkmgr.hxx>
#include <editeng/boxitem.hxx>
#include <fmtfsize.hxx>
#include <fmtornt.hxx>
#include <fmtpdsc.hxx>
#include <fldbas.hxx>
#include <fmtfld.hxx>
#include <frmatr.hxx>
#include <doc.hxx>
#include <docary.hxx> // fuer RedlineTbl()
#include <frame.hxx>
#include <swtable.hxx>
#include <ndtxt.hxx>
#include <tabcol.hxx>
#include <tabfrm.hxx>
#include <cellfrm.hxx>
#include <rowfrm.hxx>
#include <swserv.hxx>
#include <expfld.hxx>
#include <mdiexp.hxx>
#include <cellatr.hxx>
#include <txatbase.hxx>
#include <htmltbl.hxx>
#include <swtblfmt.hxx>
#include <ndindex.hxx>
#include <tblrwcl.hxx>
#include <shellres.hxx>
#include <viewsh.hxx>
#include <redline.hxx>
#include <list>
#include <switerator.hxx>
#ifndef DBG_UTIL
#define CHECK_TABLE(t)
#else
#ifdef DEBUG
#define CHECK_TABLE(t) (t).CheckConsistency();
#else
#define CHECK_TABLE(t)
#endif
#endif
using namespace com::sun::star;
TYPEINIT1( SwTable, SwClient );
TYPEINIT1( SwTableBox, SwClient );
TYPEINIT1( SwTableLine, SwClient );
//UUUU
TYPEINIT1( SwTableFmt, SwFrmFmt );
SwTableFmt::SwTableFmt(SwAttrPool& rPool,const sal_Char* pFmtNm, SwFrmFmt *pDrvdFrm)
: SwFrmFmt(rPool,pFmtNm,pDrvdFrm,RES_FRMFMT,aTableSetRange)
{
}
SwTableFmt::SwTableFmt(SwAttrPool& rPool,const String &rFmtNm, SwFrmFmt *pDrvdFrm)
: SwFrmFmt(rPool,rFmtNm,pDrvdFrm,RES_FRMFMT,aTableSetRange)
{
}
SwTableFmt::~SwTableFmt()
{
}
//UUUU Do not support for table frames - may change later if support will be added
bool SwTableFmt::supportsFullDrawingLayerFillAttributeSet() const
{
return false;
}
//UUUU
TYPEINIT1( SwTableBoxFmt, SwFrmFmt );
SwTableBoxFmt::SwTableBoxFmt(SwAttrPool& rPool,const sal_Char* pFmtNm, SwFrmFmt *pDrvdFrm)
: SwFrmFmt(rPool,pFmtNm,pDrvdFrm,RES_FRMFMT,aTableBoxSetRange)
{
}
SwTableBoxFmt::SwTableBoxFmt(SwAttrPool& rPool,const String &rFmtNm, SwFrmFmt *pDrvdFrm)
: SwFrmFmt(rPool,rFmtNm,pDrvdFrm,RES_FRMFMT,aTableBoxSetRange)
{
}
SwTableBoxFmt::~SwTableBoxFmt()
{
}
//UUUU Do not support for table frames - may change later if support will be added
bool SwTableBoxFmt::supportsFullDrawingLayerFillAttributeSet() const
{
return false;
}
//UUUU
TYPEINIT1( SwTableLineFmt, SwFrmFmt );
SwTableLineFmt::SwTableLineFmt(SwAttrPool& rPool,const sal_Char* pFmtNm, SwFrmFmt *pDrvdFrm)
: SwFrmFmt(rPool,pFmtNm,pDrvdFrm,RES_FRMFMT,aTableLineSetRange)
{
}
SwTableLineFmt::SwTableLineFmt(SwAttrPool& rPool,const String &rFmtNm, SwFrmFmt *pDrvdFrm)
: SwFrmFmt(rPool,rFmtNm,pDrvdFrm,RES_FRMFMT,aTableLineSetRange)
{
}
SwTableLineFmt::~SwTableLineFmt()
{
}
//UUUU Do not support for table frames - may change later if support will be added
bool SwTableLineFmt::supportsFullDrawingLayerFillAttributeSet() const
{
return false;
}
SV_IMPL_PTRARR(SwTableLines,SwTableLine*);
SV_IMPL_PTRARR(SwTableBoxes,SwTableBox*);
SV_IMPL_PTRARR_SORT(SwTableSortBoxes,SwTableBoxPtr);
SV_IMPL_REF( SwServerObject )
#define COLFUZZY 20
void ChgTextToNum( SwTableBox& rBox, const String& rTxt, const Color* pCol,
sal_Bool bChgAlign,sal_uLong nNdPos );
//----------------------------------
class SwTableBox_Impl
{
Color *mpUserColor, *mpNumFmtColor;
long mnRowSpan;
bool mbDummyFlag;
void SetNewCol( Color** ppCol, const Color* pNewCol );
public:
SwTableBox_Impl() : mpUserColor(0), mpNumFmtColor(0), mnRowSpan(1),
mbDummyFlag( false ) {}
~SwTableBox_Impl() { delete mpUserColor; delete mpNumFmtColor; }
const Color* GetSaveUserColor() const { return mpUserColor; }
const Color* GetSaveNumFmtColor() const { return mpNumFmtColor; }
void SetSaveUserColor(const Color* p ) { SetNewCol( &mpUserColor, p ); }
void SetSaveNumFmtColor( const Color* p ) { SetNewCol( &mpNumFmtColor, p ); }
long getRowSpan() const { return mnRowSpan; }
void setRowSpan( long nNewRowSpan ) { mnRowSpan = nNewRowSpan; }
bool getDummyFlag() const { return mbDummyFlag; }
void setDummyFlag( bool bDummy ) { mbDummyFlag = bDummy; }
};
// ----------- Inlines -----------------------------
inline const Color* SwTableBox::GetSaveUserColor() const
{
return pImpl ? pImpl->GetSaveUserColor() : 0;
}
inline const Color* SwTableBox::GetSaveNumFmtColor() const
{
return pImpl ? pImpl->GetSaveNumFmtColor() : 0;
}
inline void SwTableBox::SetSaveUserColor(const Color* p )
{
if( pImpl )
pImpl->SetSaveUserColor( p );
else if( p )
( pImpl = new SwTableBox_Impl ) ->SetSaveUserColor( p );
}
inline void SwTableBox::SetSaveNumFmtColor( const Color* p )
{
if( pImpl )
pImpl->SetSaveNumFmtColor( p );
else if( p )
( pImpl = new SwTableBox_Impl )->SetSaveNumFmtColor( p );
}
long SwTableBox::getRowSpan() const
{
return pImpl ? pImpl->getRowSpan() : 1;
}
void SwTableBox::setRowSpan( long nNewRowSpan )
{
if( !pImpl )
{
if( nNewRowSpan == 1 )
return;
pImpl = new SwTableBox_Impl();
}
pImpl->setRowSpan( nNewRowSpan );
}
bool SwTableBox::getDummyFlag() const
{
return pImpl ? pImpl->getDummyFlag() : false;
}
void SwTableBox::setDummyFlag( bool bDummy )
{
if( !pImpl )
{
if( !bDummy )
return;
pImpl = new SwTableBox_Impl();
}
pImpl->setDummyFlag( bDummy );
}
//JP 15.09.98: Bug 55741 - Tabs beibehalten (vorne und hinten)
String& lcl_TabToBlankAtSttEnd( String& rTxt )
{
sal_Unicode c;
xub_StrLen n;
for( n = 0; n < rTxt.Len() && ' ' >= ( c = rTxt.GetChar( n )); ++n )
if( '\x9' == c )
rTxt.SetChar( n, ' ' );
for( n = rTxt.Len(); n && ' ' >= ( c = rTxt.GetChar( --n )); )
if( '\x9' == c )
rTxt.SetChar( n, ' ' );
return rTxt;
}
String& lcl_DelTabsAtSttEnd( String& rTxt )
{
sal_Unicode c;
xub_StrLen n;
for( n = 0; n < rTxt.Len() && ' ' >= ( c = rTxt.GetChar( n )); ++n )
if( '\x9' == c )
rTxt.Erase( n--, 1 );
for( n = rTxt.Len(); n && ' ' >= ( c = rTxt.GetChar( --n )); )
if( '\x9' == c )
rTxt.Erase( n, 1 );
return rTxt;
}
void _InsTblBox( SwDoc* pDoc, SwTableNode* pTblNd,
SwTableLine* pLine, SwTableBoxFmt* pBoxFrmFmt,
SwTableBox* pBox,
sal_uInt16 nInsPos, sal_uInt16 nCnt )
{
ASSERT( pBox->GetSttNd(), "Box ohne Start-Node" );
SwNodeIndex aIdx( *pBox->GetSttNd(), +1 );
SwCntntNode* pCNd = aIdx.GetNode().GetCntntNode();
if( !pCNd )
pCNd = pDoc->GetNodes().GoNext( &aIdx );
ASSERT( pCNd, "Box ohne ContentNode" );
if( pCNd->IsTxtNode() )
{
if( pBox->GetSaveNumFmtColor() && pCNd->GetpSwAttrSet() )
{
SwAttrSet aAttrSet( *pCNd->GetpSwAttrSet() );
if( pBox->GetSaveUserColor() )
aAttrSet.Put( SvxColorItem( *pBox->GetSaveUserColor(), RES_CHRATR_COLOR ));
else
aAttrSet.ClearItem( RES_CHRATR_COLOR );
pDoc->GetNodes().InsBoxen( pTblNd, pLine, pBoxFrmFmt,
((SwTxtNode*)pCNd)->GetTxtColl(),
&aAttrSet, nInsPos, nCnt );
}
else
pDoc->GetNodes().InsBoxen( pTblNd, pLine, pBoxFrmFmt,
((SwTxtNode*)pCNd)->GetTxtColl(),
pCNd->GetpSwAttrSet(),
nInsPos, nCnt );
}
else
pDoc->GetNodes().InsBoxen( pTblNd, pLine, pBoxFrmFmt,
(SwTxtFmtColl*)pDoc->GetDfltTxtFmtColl(), 0,
nInsPos, nCnt );
long nRowSpan = pBox->getRowSpan();
if( nRowSpan != 1 )
{
SwTableBoxes& rTblBoxes = pLine->GetTabBoxes();
for( sal_uInt16 i = 0; i < nCnt; ++i )
{
pBox = rTblBoxes[ i + nInsPos ];
pBox->setRowSpan( nRowSpan );
}
}
}
/*************************************************************************
|*
|* SwTable::SwTable()
|*
|*************************************************************************/
SwTable::SwTable( SwTableFmt* pFmt )
: SwClient( pFmt ),
pHTMLLayout( 0 ),
pTableNode( 0 ),
nGrfsThatResize( 0 ),
nRowsToRepeat( 1 ),
bModifyLocked( sal_False ),
bNewModel( sal_True )
{
// default Wert aus den Optionen setzen
eTblChgMode = (TblChgMode)GetTblChgDefaultMode();
}
SwTable::SwTable( const SwTable& rTable )
: SwClient( rTable.GetFrmFmt() ),
pHTMLLayout( 0 ),
pTableNode( 0 ),
eTblChgMode( rTable.eTblChgMode ),
nGrfsThatResize( 0 ),
nRowsToRepeat( rTable.GetRowsToRepeat() ),
bModifyLocked( sal_False ),
bNewModel( rTable.bNewModel )
{
}
void DelBoxNode( SwTableSortBoxes& rSortCntBoxes )
{
for( sal_uInt16 n = 0; n < rSortCntBoxes.Count(); ++n )
rSortCntBoxes[ n ]->pSttNd = 0;
}
SwTable::~SwTable()
{
if( refObj.Is() )
{
SwDoc* pDoc = GetFrmFmt()->GetDoc();
if( !pDoc->IsInDtor() ) // dann aus der Liste entfernen
pDoc->GetLinkManager().RemoveServer( &refObj );
refObj->Closed();
}
// ist die Tabelle der letzte Client im FrameFormat, kann dieses
// geloescht werden
SwTableFmt* pFmt = (SwTableFmt*)GetFrmFmt();
pFmt->Remove( this ); // austragen,
if( !pFmt->GetDepends() )
pFmt->GetDoc()->DelTblFrmFmt( pFmt ); // und loeschen
// Loesche die Pointer aus dem SortArray der Boxen, die
// Objecte bleiben erhalten und werden vom DTOR der Lines/Boxes
// Arrays geloescht.
//JP: reicht leider nicht, es muessen die Pointer auf den StartNode
// der Section geloescht werden
DelBoxNode( aSortCntBoxes );
aSortCntBoxes.Remove( (sal_uInt16)0, aSortCntBoxes.Count() );
delete pHTMLLayout;
}
/*************************************************************************
|*
|* SwTable::Modify()
|*
|*************************************************************************/
inline void FmtInArr( SvPtrarr& rFmtArr, SwFmt* pBoxFmt )
{
sal_Bool bRet = USHRT_MAX != rFmtArr.GetPos( (VoidPtr)pBoxFmt );
if( !bRet )
rFmtArr.Insert( (VoidPtr)pBoxFmt, rFmtArr.Count() );
}
void lcl_ModifyBoxes( SwTableBoxes &rBoxes, const long nOld,
const long nNew, SvPtrarr& rFmtArr );
void lcl_ModifyLines( SwTableLines &rLines, const long nOld,
const long nNew, SvPtrarr& rFmtArr, const bool bCheckSum )
{
for ( sal_uInt16 i = 0; i < rLines.Count(); ++i )
::lcl_ModifyBoxes( rLines[i]->GetTabBoxes(), nOld, nNew, rFmtArr );
if( bCheckSum )
{
for( sal_uInt16 i = 0; i < rFmtArr.Count(); ++i )
{
SwFmt* pFmt = (SwFmt*)rFmtArr[i];
sal_uInt64 nBox = pFmt->GetFrmSize().GetWidth();
nBox *= nNew;
nBox /= nOld;
SwFmtFrmSize aNewBox( ATT_VAR_SIZE, SwTwips(nBox), 0 );
pFmt->LockModify();
pFmt->SetFmtAttr( aNewBox );
pFmt->UnlockModify();
}
}
}
void lcl_ModifyBoxes( SwTableBoxes &rBoxes, const long nOld,
const long nNew, SvPtrarr& rFmtArr )
{
sal_uInt64 nSum = 0; // To avoid rounding errors we summarize all box widths
sal_uInt64 nOriginalSum = 0; // Sum of original widths
for ( sal_uInt16 i = 0; i < rBoxes.Count(); ++i )
{
SwTableBox &rBox = *rBoxes[i];
if ( rBox.GetTabLines().Count() )
{
// For SubTables the rounding problem will not be solved :-(
::lcl_ModifyLines( rBox.GetTabLines(), nOld, nNew, rFmtArr, false );
}
//Die Box anpassen
SwFrmFmt *pFmt = rBox.GetFrmFmt();
sal_uInt64 nBox = pFmt->GetFrmSize().GetWidth();
nOriginalSum += nBox;
nBox *= nNew;
nBox /= nOld;
sal_uInt64 nWishedSum = nOriginalSum;
nWishedSum *= nNew;
nWishedSum /= nOld;
nWishedSum -= nSum;
if( nWishedSum > 0 )
{
if( nBox == nWishedSum )
FmtInArr( rFmtArr, pFmt );
else
{
nBox = nWishedSum;
pFmt = rBox.ClaimFrmFmt();
SwFmtFrmSize aNewBox( ATT_VAR_SIZE, static_cast< SwTwips >(nBox), 0 );
pFmt->LockModify();
pFmt->SetFmtAttr( aNewBox );
pFmt->UnlockModify();
}
}
else {
ASSERT( false, "Rounding error" );
}
nSum += nBox;
}
}
void SwTable::Modify( const SfxPoolItem* pOld, const SfxPoolItem *pNew )
{
// fange SSize Aenderungen ab, um die Lines/Boxen anzupassen
sal_uInt16 nWhich = pOld ? pOld->Which() : pNew ? pNew->Which() : 0 ;
const SwFmtFrmSize* pNewSize = 0, *pOldSize = 0;
if( RES_ATTRSET_CHG == nWhich )
{
if( SFX_ITEM_SET == ((SwAttrSetChg*)pNew)->GetChgSet()->GetItemState(
RES_FRM_SIZE, sal_False, (const SfxPoolItem**)&pNewSize ))
pOldSize = &((SwAttrSetChg*)pOld)->GetChgSet()->GetFrmSize();
}
else if( RES_FRM_SIZE == nWhich )
{
pOldSize = (const SwFmtFrmSize*)pOld;
pNewSize = (const SwFmtFrmSize*)pNew;
}
else
CheckRegistration( pOld, pNew );
if( pOldSize || pNewSize )
{
if ( !IsModifyLocked() )
{
ASSERT( pOldSize && pOldSize->Which() == RES_FRM_SIZE &&
pNewSize && pNewSize->Which() == RES_FRM_SIZE,
"Kein Old oder New fuer FmtFrmSize-Modify der SwTable." );
AdjustWidths( pOldSize->GetWidth(), pNewSize->GetWidth() );
}
}
}
void SwTable::AdjustWidths( const long nOld, const long nNew )
{
SvPtrarr aFmtArr( (sal_uInt8)aLines[0]->GetTabBoxes().Count(), 1 );
::lcl_ModifyLines( aLines, nOld, nNew, aFmtArr, true );
}
/*************************************************************************
|*
|* SwTable::GetTabCols()
|*
|*************************************************************************/
void lcl_RefreshHidden( SwTabCols &rToFill, sal_uInt16 nPos )
{
for ( sal_uInt16 i = 0; i < rToFill.Count(); ++i )
{
if ( Abs((long)(nPos - rToFill[i])) <= COLFUZZY )
{
rToFill.SetHidden( i, sal_False );
break;
}
}
}
void lcl_SortedTabColInsert( SwTabCols &rToFill, const SwTableBox *pBox,
const SwFrmFmt *pTabFmt, const sal_Bool bHidden,
const FASTBOOL bRefreshHidden )
{
const long nWish = pTabFmt->GetFrmSize().GetWidth();
const long nAct = rToFill.GetRight() - rToFill.GetLeft(); // +1 why?
//Der Wert fuer die linke Kante der Box errechnet sich aus den
//Breiten der vorhergehenden Boxen.
sal_uInt16 nPos = 0;
sal_uInt16 nSum = 0;
sal_uInt16 nLeftMin = 0;
sal_uInt16 nRightMax = 0;
const SwTableBox *pCur = pBox;
const SwTableLine *pLine = pBox->GetUpper();
while ( pLine )
{ const SwTableBoxes &rBoxes = pLine->GetTabBoxes();
for ( sal_uInt16 i = 0; i < rBoxes.Count(); ++i )
{
SwTwips nWidth = rBoxes[i]->GetFrmFmt()->GetFrmSize().GetWidth();
nSum = (sal_uInt16)(nSum + nWidth);
sal_uInt64 nTmp = nSum;
nTmp *= nAct;
nTmp /= nWish;
if (rBoxes[i] != pCur)
{
if ( pLine == pBox->GetUpper() || 0 == nLeftMin )
nLeftMin = (sal_uInt16)(nTmp - nPos);
nPos = (sal_uInt16)nTmp;
}
else
{
nSum = (sal_uInt16)(nSum - nWidth);
if ( 0 == nRightMax )
nRightMax = (sal_uInt16)(nTmp - nPos);
break;
}
}
pCur = pLine->GetUpper();
pLine = pCur ? pCur->GetUpper() : 0;
}
sal_Bool bInsert = !bRefreshHidden;
for ( sal_uInt16 j = 0; bInsert && (j < rToFill.Count()); ++j )
{
long nCmp = rToFill[j];
if ( (nPos >= ((nCmp >= COLFUZZY) ? nCmp - COLFUZZY : nCmp)) &&
(nPos <= (nCmp + COLFUZZY)) )
{
bInsert = sal_False; //Hat ihn schon.
}
else if ( nPos < nCmp )
{
bInsert = sal_False;
rToFill.Insert( nPos, bHidden, j );
}
}
if ( bInsert )
rToFill.Insert( nPos, bHidden, rToFill.Count() );
else if ( bRefreshHidden )
::lcl_RefreshHidden( rToFill, nPos );
if ( bHidden && !bRefreshHidden )
{
// calculate minimum/maximum values for the existing entries:
nLeftMin = nPos - nLeftMin;
nRightMax = nPos + nRightMax;
// check if nPos is entry:
bool bFoundPos = false;
bool bFoundMax = false;
for ( sal_uInt16 j = 0; !(bFoundPos && bFoundMax ) && j < rToFill.Count(); ++j )
{
SwTabColsEntry& rEntry = rToFill.GetEntry( j );
long nCmp = rToFill[j];
if ( (nPos >= ((nCmp >= COLFUZZY) ? nCmp - COLFUZZY : nCmp)) &&
(nPos <= (nCmp + COLFUZZY)) )
{
// check if nLeftMin is > old minimum for entry nPos:
const long nOldMin = rEntry.nMin;
if ( nLeftMin > nOldMin )
rEntry.nMin = nLeftMin;
// check if nRightMin is < old maximum for entry nPos:
const long nOldMax = rEntry.nMax;
if ( nRightMax < nOldMax )
rEntry.nMax = nRightMax;
bFoundPos = true;
}
else if ( (nRightMax >= ((nCmp >= COLFUZZY) ? nCmp - COLFUZZY : nCmp)) &&
(nRightMax <= (nCmp + COLFUZZY)) )
{
// check if nPos is > old minimum for entry nRightMax:
const long nOldMin = rEntry.nMin;
if ( nPos > nOldMin )
rEntry.nMin = nPos;
bFoundMax = true;
}
}
}
}
void lcl_ProcessBoxGet( const SwTableBox *pBox, SwTabCols &rToFill,
const SwFrmFmt *pTabFmt, FASTBOOL bRefreshHidden )
{
if ( pBox->GetTabLines().Count() )
{
const SwTableLines &rLines = pBox->GetTabLines();
for ( sal_uInt16 i = 0; i < rLines.Count(); ++i )
{ const SwTableBoxes &rBoxes = rLines[i]->GetTabBoxes();
for ( sal_uInt16 j = 0; j < rBoxes.Count(); ++j )
::lcl_ProcessBoxGet( rBoxes[j], rToFill, pTabFmt, bRefreshHidden);
}
}
else
::lcl_SortedTabColInsert( rToFill, pBox, pTabFmt, sal_False, bRefreshHidden );
}
void lcl_ProcessLineGet( const SwTableLine *pLine, SwTabCols &rToFill,
const SwFrmFmt *pTabFmt )
{
for ( sal_uInt16 i = 0; i < pLine->GetTabBoxes().Count(); ++i )
{
const SwTableBox *pBox = pLine->GetTabBoxes()[i];
if ( pBox->GetSttNd() )
::lcl_SortedTabColInsert( rToFill, pBox, pTabFmt, sal_True, sal_False );
else
for ( sal_uInt16 j = 0; j < pBox->GetTabLines().Count(); ++j )
::lcl_ProcessLineGet( pBox->GetTabLines()[j], rToFill, pTabFmt );
}
}
// MS: <NAME> auf der DEC-Kiste
//
#if defined(ALPHA) && defined(WNT)
#pragma optimize("", off)
#endif
void SwTable::GetTabCols( SwTabCols &rToFill, const SwTableBox *pStart,
sal_Bool bRefreshHidden, sal_Bool bCurRowOnly ) const
{
//MA 30. Nov. 95: Opt: wenn bHidden gesetzt ist, wird nur das Hidden
//Array aktualisiert.
if ( bRefreshHidden )
{
//Korrekturen entfernen
sal_uInt16 i;
for ( i = 0; i < rToFill.Count(); ++i )
{
SwTabColsEntry& rEntry = rToFill.GetEntry( i );
rEntry.nPos -= rToFill.GetLeft();
rEntry.nMin -= rToFill.GetLeft();
rEntry.nMax -= rToFill.GetLeft();
}
//Alle sind hidden, dann die sichtbaren eintragen.
for ( i = 0; i < rToFill.Count(); ++i )
rToFill.SetHidden( i, sal_True );
}
else
{
rToFill.Remove( 0, rToFill.Count() );
}
//Eingetragen werden:
//1. Alle Boxen unterhalb der dem Start uebergeordneten Line sowie
// deren untergeordnete Boxen falls vorhanden.
//2. Ausgehend von der Line die uebergeordnete Box sowie deren Nachbarn;
// nicht aber deren untergeordnete.
//3. Mit der der Boxenkette uebergeordneten Line wieder wie 2. bis einer
// Line keine Box (sondern die Table) uebergeordnet ist.
//Es werden nur diejenigen Boxen eingetragen, die keine weiteren Zeilen
//enhalten. Die eintragende Funktion sorgt dafuer, dass keine doppelten
//eingetragen werden. Um dies zu gewaehrleisten wird mit einer gewissen
//Unschaerfe gearbeitet (um Rundungsfehler auszuschalten).
//Es werden nur die linken Kanten der Boxen eingetragen.
//Am Schluss wird der Erste wieder ausgetragen denn er ist bereits vom
//Rand abgedeckt.
//4. Nochmalige abscannen der Tabelle und eintragen _aller_ Boxen,
// jetzt aber als Hidden.
const SwFrmFmt *pTabFmt = GetFrmFmt();
//1.
const SwTableBoxes &rBoxes = pStart->GetUpper()->GetTabBoxes();
sal_uInt16 i;
for ( i = 0; i < rBoxes.Count(); ++i )
::lcl_ProcessBoxGet( rBoxes[i], rToFill, pTabFmt, bRefreshHidden );
//2. und 3.
const SwTableLine *pLine = pStart->GetUpper()->GetUpper() ?
pStart->GetUpper()->GetUpper()->GetUpper() : 0;
while ( pLine )
{
const SwTableBoxes &rBoxes2 = pLine->GetTabBoxes();
for ( sal_uInt16 k = 0; k < rBoxes2.Count(); ++k )
::lcl_SortedTabColInsert( rToFill, rBoxes2[k],
pTabFmt, sal_False, bRefreshHidden );
pLine = pLine->GetUpper() ? pLine->GetUpper()->GetUpper() : 0;
}
if ( !bRefreshHidden )
{
//4.
if ( !bCurRowOnly )
{
for ( i = 0; i < aLines.Count(); ++i )
::lcl_ProcessLineGet( aLines[i], rToFill, pTabFmt );
}
rToFill.Remove( 0, 1 );
}
//Die Koordinaten sind jetzt relativ zum linken Rand der Tabelle - also
//relativ zum nLeft vom SwTabCols. Die Werte werden aber relativ zum
//linken Rand - also nLeftMin vom SwTabCols - erwartet.
//Alle Werte muessen also um nLeft erweitert werden.
for ( i = 0; i < rToFill.Count(); ++i )
{
SwTabColsEntry& rEntry = rToFill.GetEntry( i );
rEntry.nPos += rToFill.GetLeft();
rEntry.nMin += rToFill.GetLeft();
rEntry.nMax += rToFill.GetLeft();
}
}
#if defined(ALPHA) && defined(WNT)
#pragma optimize("", on)
#endif
/*************************************************************************
|*
|* SwTable::SetTabCols()
|*
|*************************************************************************/
//Struktur zur Parameteruebergabe
struct Parm
{
const SwTabCols &rNew;
const SwTabCols &rOld;
long nNewWish,
nOldWish;
SvPtrarr aBoxArr;
SwShareBoxFmts aShareFmts;
Parm( const SwTabCols &rN, const SwTabCols &rO ) :
rNew( rN ), rOld( rO ), aBoxArr( 10, 1 ) {}
};
inline sal_Bool BoxInArr( SvPtrarr& rArr, SwTableBox* pBox )
{
sal_Bool bRet = USHRT_MAX != rArr.GetPos( (VoidPtr)pBox );
if( !bRet )
rArr.Insert( (VoidPtr)pBox, rArr.Count() );
return bRet;
}
void lcl_ProcessBoxSet( SwTableBox *pBox, Parm &rParm );
void lcl_ProcessLine( SwTableLine *pLine, Parm &rParm )
{
SwTableBoxes &rBoxes = pLine->GetTabBoxes();
for ( int i = rBoxes.Count()-1; i >= 0; --i )
::lcl_ProcessBoxSet( rBoxes[ static_cast< sal_uInt16 >(i) ], rParm );
}
void lcl_ProcessBoxSet( SwTableBox *pBox, Parm &rParm )
{
if ( pBox->GetTabLines().Count() )
{ SwTableLines &rLines = pBox->GetTabLines();
for ( int i = rLines.Count()-1; i >= 0; --i )
lcl_ProcessLine( rLines[ static_cast< sal_uInt16 >(i) ], rParm );
}
else
{
//Aktuelle Position (linke und rechte Kante berechnen) und im
//alten TabCols suchen. Im neuen TabCols die Werte vergleichen und
//wenn es Unterschiede gibt die Box entsprechend anpassen.
//Wenn an der veraenderten Kante kein Nachbar existiert werden auch
//alle uebergeordneten Boxen angepasst.
const long nOldAct = rParm.rOld.GetRight() -
rParm.rOld.GetLeft(); // +1 why?
//Der Wert fuer die linke Kante der Box errechnet sich aus den
//Breiten der vorhergehenden Boxen plus dem linken Rand
long nLeft = rParm.rOld.GetLeft();
const SwTableBox *pCur = pBox;
const SwTableLine *pLine = pBox->GetUpper();
while ( pLine )
{ const SwTableBoxes &rBoxes = pLine->GetTabBoxes();
for ( sal_uInt16 i = 0; (i < rBoxes.Count()) && (rBoxes[i] != pCur); ++i)
{
sal_uInt64 nWidth = rBoxes[i]->GetFrmFmt()->
GetFrmSize().GetWidth();
nWidth *= nOldAct;
nWidth /= rParm.nOldWish;
nLeft += (sal_uInt16)nWidth;
}
pCur = pLine->GetUpper();
pLine = pCur ? pCur->GetUpper() : 0;
}
long nLeftDiff;
long nRightDiff = 0;
if ( nLeft != rParm.rOld.GetLeft() ) //Es gibt noch Boxen davor.
{
//Rechte Kante ist linke Kante plus Breite.
sal_uInt64 nWidth = pBox->GetFrmFmt()->GetFrmSize().GetWidth();
nWidth *= nOldAct;
nWidth /= rParm.nOldWish;
long nRight = nLeft + (long)nWidth;
sal_uInt16 nLeftPos = USHRT_MAX,
nRightPos = USHRT_MAX;
for ( sal_uInt16 i = 0; i < rParm.rOld.Count(); ++i )
{
if ( nLeft >= (rParm.rOld[i] - COLFUZZY) &&
nLeft <= (rParm.rOld[i] + COLFUZZY) )
nLeftPos = i;
else if ( nRight >= (rParm.rOld[i] - COLFUZZY) &&
nRight <= (rParm.rOld[i] + COLFUZZY) )
nRightPos = i;
}
nLeftDiff = nLeftPos != USHRT_MAX ?
(int)rParm.rOld[nLeftPos] - (int)rParm.rNew[nLeftPos] : 0;
nRightDiff= nRightPos!= USHRT_MAX ?
(int)rParm.rNew[nRightPos] - (int)rParm.rOld[nRightPos] : 0;
}
else //Die erste Box.
{
nLeftDiff = (long)rParm.rOld.GetLeft() - (long)rParm.rNew.GetLeft();
if ( rParm.rOld.Count() )
{
//Differnz zu der Kante berechnen, von der die erste Box
//beruehrt wird.
sal_uInt64 nWidth = pBox->GetFrmFmt()->GetFrmSize().GetWidth();
nWidth *= nOldAct;
nWidth /= rParm.nOldWish;
long nTmp = (long)nWidth;
nTmp += rParm.rOld.GetLeft();
sal_uInt16 nLeftPos = USHRT_MAX;
for ( sal_uInt16 i = 0; i < rParm.rOld.Count() &&
nLeftPos == USHRT_MAX; ++i )
{
if ( nTmp >= (rParm.rOld[i] - COLFUZZY) &&
nTmp <= (rParm.rOld[i] + COLFUZZY) )
nLeftPos = i;
}
if ( nLeftPos != USHRT_MAX )
nRightDiff = (long)rParm.rNew[nLeftPos] -
(long)rParm.rOld[nLeftPos];
}
//MA 11. Feb. 99: #61577# 0 sollte doch gerade richtig sein, weil die
//Kante doch schon in SetTabCols() korrigiert wurde.
// else
// nRightDiff = (long)rParm.rNew.GetRight() -
// (long)rParm.rOld.GetRight();
}
if( pBox->getRowSpan() == 1 )
{
SwTableBoxes& rTblBoxes = pBox->GetUpper()->GetTabBoxes();
sal_uInt16 nPos = rTblBoxes.C40_GETPOS( SwTableBox, pBox );
if( nPos && rTblBoxes[ nPos - 1 ]->getRowSpan() != 1 )
nLeftDiff = 0;
if( nPos + 1 < rTblBoxes.Count() &&
rTblBoxes[ nPos + 1 ]->getRowSpan() != 1 )
nRightDiff = 0;
}
else
nLeftDiff = nRightDiff = 0;
if ( nLeftDiff || nRightDiff )
{
//Die Differenz ist der tatsaechliche Differenzbetrag; die
//Attribute der Boxen um diesen Betrag anzupassen macht keinen
//Sinn wenn die Tabelle gestrecht ist. Der Differenzbetrag muss
//entsprechend umgerechnet werden.
long nTmp = rParm.rNew.GetRight() - rParm.rNew.GetLeft(); // +1 why?
nLeftDiff *= rParm.nNewWish;
nLeftDiff /= nTmp;
nRightDiff *= rParm.nNewWish;
nRightDiff /= nTmp;
long nDiff = nLeftDiff + nRightDiff;
//Box und alle uebergeordneten um den Differenzbetrag anpassen.
while ( pBox )
{
SwFmtFrmSize aFmtFrmSize( pBox->GetFrmFmt()->GetFrmSize() );
aFmtFrmSize.SetWidth( aFmtFrmSize.GetWidth() + nDiff );
if ( aFmtFrmSize.GetWidth() < 0 )
aFmtFrmSize.SetWidth( -aFmtFrmSize.GetWidth() );
rParm.aShareFmts.SetSize( *pBox, aFmtFrmSize );
// The outer cells of the last row are responsible to adjust a surrounding cell.
// Last line check:
if ( pBox->GetUpper()->GetUpper() &&
pBox->GetUpper() != pBox->GetUpper()->GetUpper()->GetTabLines()
[pBox->GetUpper()->GetUpper()->GetTabLines().Count()-1])
{
pBox = 0;
}
else
{
// Middle cell check:
if ( pBox != pBox->GetUpper()->GetTabBoxes()[0] )
nDiff = nRightDiff;
if ( pBox != pBox->GetUpper()->GetTabBoxes()
[pBox->GetUpper()->GetTabBoxes().Count()-1] )
nDiff -= nRightDiff;
pBox = nDiff ? pBox->GetUpper()->GetUpper() : 0;
}
}
}
}
}
void lcl_ProcessBoxPtr( SwTableBox *pBox, SvPtrarr &rBoxArr,
sal_Bool bBefore )
{
if ( pBox->GetTabLines().Count() )
{
const SwTableLines &rLines = pBox->GetTabLines();
for ( sal_uInt16 i = 0; i < rLines.Count(); ++i )
{
const SwTableBoxes &rBoxes = rLines[i]->GetTabBoxes();
for ( sal_uInt16 j = 0; j < rBoxes.Count(); ++j )
::lcl_ProcessBoxPtr( rBoxes[j], rBoxArr, bBefore );
}
}
else if ( bBefore )
rBoxArr.Insert( (VoidPtr)pBox, 0 );
else
rBoxArr.Insert( (VoidPtr)pBox, rBoxArr.Count() );
}
void lcl_AdjustBox( SwTableBox *pBox, const long nDiff, Parm &rParm );
void lcl_AdjustLines( SwTableLines &rLines, const long nDiff, Parm &rParm )
{
for ( sal_uInt16 i = 0; i < rLines.Count(); ++i )
{
SwTableBox *pBox = rLines[i]->GetTabBoxes()
[rLines[i]->GetTabBoxes().Count()-1];
lcl_AdjustBox( pBox, nDiff, rParm );
}
}
void lcl_AdjustBox( SwTableBox *pBox, const long nDiff, Parm &rParm )
{
if ( pBox->GetTabLines().Count() )
::lcl_AdjustLines( pBox->GetTabLines(), nDiff, rParm );
//Groesse der Box anpassen.
SwFmtFrmSize aFmtFrmSize( pBox->GetFrmFmt()->GetFrmSize() );
aFmtFrmSize.SetWidth( aFmtFrmSize.GetWidth() + nDiff );
//#30009# if ( aFmtFrmSize.GetWidth() < 0 )
// aFmtFrmSize.SetWidth( -aFmtFrmSize.GetWidth() );
rParm.aShareFmts.SetSize( *pBox, aFmtFrmSize );
}
void SwTable::SetTabCols( const SwTabCols &rNew, const SwTabCols &rOld,
const SwTableBox *pStart, sal_Bool bCurRowOnly )
{
CHECK_TABLE( *this )
SetHTMLTableLayout( 0 ); // MIB 9.7.97: HTML-Layout loeschen
// FME: Made rOld const. The caller is responsible for passing correct
// values of rOld. Therefore we do not have to call GetTabCols anymore:
//GetTabCols( rOld, pStart );
Parm aParm( rNew, rOld );
ASSERT( rOld.Count() == rNew.Count(), "Columnanzahl veraendert.");
//Raender verarbeiten. Groesse der Tabelle und ein paar Boxen mussen
//angepasst werden. Bei der Groesseneinstellung darf allerdings das
//Modify nicht verarbeitet werden - dieses wuerde alle Boxen anpassen
//und das koennen wir ueberhaupt nicht gebrauchen.
SwFrmFmt *pFmt = GetFrmFmt();
aParm.nOldWish = aParm.nNewWish = pFmt->GetFrmSize().GetWidth();
if ( (rOld.GetLeft() != rNew.GetLeft()) ||
(rOld.GetRight()!= rNew.GetRight()) )
{
LockModify();
{
SvxLRSpaceItem aLR( pFmt->GetLRSpace() );
SvxShadowItem aSh( pFmt->GetShadow() );
SwTwips nShRight = aSh.CalcShadowSpace( SHADOW_RIGHT );
SwTwips nShLeft = aSh.CalcShadowSpace( SHADOW_LEFT );
aLR.SetLeft ( rNew.GetLeft() - nShLeft );
aLR.SetRight( rNew.GetRightMax() - rNew.GetRight() - nShRight );
pFmt->SetFmtAttr( aLR );
//Die Ausrichtung der Tabelle muss entsprechend angepasst werden,
//das geschieht so, dass die Tabelle genauso stehenbleibt wie der
//Anwender sie gerade hingezuppelt hat.
SwFmtHoriOrient aOri( pFmt->GetHoriOrient() );
if(text::HoriOrientation::NONE != aOri.GetHoriOrient())
{
const sal_Bool bLeftDist = rNew.GetLeft() != nShLeft;
const sal_Bool bRightDist = rNew.GetRight() + nShRight != rNew.GetRightMax();
if(!bLeftDist && !bRightDist)
aOri.SetHoriOrient( text::HoriOrientation::FULL );
else if(!bRightDist && rNew.GetLeft() > nShLeft )
aOri.SetHoriOrient( text::HoriOrientation::RIGHT );
else if(!bLeftDist && rNew.GetRight() + nShRight < rNew.GetRightMax())
aOri.SetHoriOrient( text::HoriOrientation::LEFT );
else
aOri.SetHoriOrient( text::HoriOrientation::NONE );
}
pFmt->SetFmtAttr( aOri );
}
const long nAct = rOld.GetRight() - rOld.GetLeft(); // +1 why?
long nTabDiff = 0;
if ( rOld.GetLeft() != rNew.GetLeft() )
{
nTabDiff = rOld.GetLeft() - rNew.GetLeft();
nTabDiff *= aParm.nOldWish;
nTabDiff /= nAct;
}
if ( rOld.GetRight() != rNew.GetRight() )
{
long nDiff = rNew.GetRight() - rOld.GetRight();
nDiff *= aParm.nOldWish;
nDiff /= nAct;
nTabDiff += nDiff;
if( !IsNewModel() )
::lcl_AdjustLines( GetTabLines(), nDiff, aParm );
}
//Groesse der Tabelle anpassen. Es muss beachtet werden, das die
//Tabelle gestrecht sein kann.
if ( nTabDiff )
{
aParm.nNewWish += nTabDiff;
if ( aParm.nNewWish < 0 )
aParm.nNewWish = USHRT_MAX; //Uuups! Eine Rolle rueckwaerts.
SwFmtFrmSize aSz( pFmt->GetFrmSize() );
if ( aSz.GetWidth() != aParm.nNewWish )
{
aSz.SetWidth( aParm.nNewWish );
aSz.SetWidthPercent( 0 );
pFmt->SetFmtAttr( aSz );
}
}
UnlockModify();
}
if( IsNewModel() )
NewSetTabCols( aParm, rNew, rOld, pStart, bCurRowOnly );
else
{
if ( bCurRowOnly )
{
//Um die aktuelle Zeile anzupassen muessen wir analog zu dem
//Verfahren zum fuellen der TabCols (siehe GetTabCols()) die
//Boxen der aktuellen Zeile abklappern.
//Leider muessen wir auch hier dafuer sorgen, dass die Boxen von
//hinten nach vorne bzw. von innen nach aussen veraendert werden.
//Der beste Weg hierzu scheint mir darin zu liegen die
//entsprechenden Boxen in einem PtrArray vorzumerken.
const SwTableBoxes &rBoxes = pStart->GetUpper()->GetTabBoxes();
for ( sal_uInt16 i = 0; i < rBoxes.Count(); ++i )
::lcl_ProcessBoxPtr( rBoxes[i], aParm.aBoxArr, sal_False );
const SwTableLine *pLine = pStart->GetUpper()->GetUpper() ?
pStart->GetUpper()->GetUpper()->GetUpper() : 0;
const SwTableBox *pExcl = pStart->GetUpper()->GetUpper();
while ( pLine )
{
const SwTableBoxes &rBoxes2 = pLine->GetTabBoxes();
sal_Bool bBefore = sal_True;
for ( sal_uInt16 i = 0; i < rBoxes2.Count(); ++i )
{
if ( rBoxes2[i] != pExcl )
::lcl_ProcessBoxPtr( rBoxes2[i], aParm.aBoxArr, bBefore );
else
bBefore = sal_False;
}
pExcl = pLine->GetUpper();
pLine = pLine->GetUpper() ? pLine->GetUpper()->GetUpper() : 0;
}
//Nachdem wir haufenweise Boxen (hoffentlich alle und in der richtigen
//Reihenfolge) eingetragen haben, brauchen diese nur noch rueckwaerts
//verarbeitet zu werden.
for ( int j = aParm.aBoxArr.Count()-1; j >= 0; --j )
{
SwTableBox *pBox = (SwTableBox*)aParm.aBoxArr[ static_cast< sal_uInt16 >(j)];
::lcl_ProcessBoxSet( pBox, aParm );
}
}
else
{ //Die gesamte Tabelle anzupassen ist 'einfach'.
//Es werden alle Boxen, die keine Lines mehr enthalten angepasst.
//Diese Boxen passen alle uebergeordneten Boxen entsprechend mit an.
//Um uns nicht selbst hereinzulegen muss natuerlich rueckwaerst
//gearbeitet werden!
SwTableLines &rLines = GetTabLines();
for ( int i = rLines.Count()-1; i >= 0; --i )
::lcl_ProcessLine( rLines[ static_cast< sal_uInt16 >(i) ], aParm );
}
}
#ifdef DBG_UTIL
{
// steht im tblrwcl.cxx
extern void _CheckBoxWidth( const SwTableLine&, SwTwips );
// checke doch mal ob die Tabellen korrekte Breiten haben
SwTwips nSize = GetFrmFmt()->GetFrmSize().GetWidth();
for( sal_uInt16 n = 0; n < aLines.Count(); ++n )
_CheckBoxWidth( *aLines[ n ], nSize );
}
#endif
}
typedef std::pair<sal_uInt16, sal_uInt16> ColChange;
typedef std::list< ColChange > ChangeList;
static void lcl_AdjustWidthsInLine( SwTableLine* pLine, ChangeList& rOldNew,
Parm& rParm, sal_uInt16 nColFuzzy )
{
ChangeList::iterator pCurr = rOldNew.begin();
if( pCurr == rOldNew.end() )
return;
sal_uInt16 nCount = pLine->GetTabBoxes().Count();
sal_uInt16 i = 0;
SwTwips nBorder = 0;
SwTwips nRest = 0;
while( i < nCount )
{
SwTableBox* pBox = pLine->GetTabBoxes()[i++];
SwTwips nWidth = pBox->GetFrmFmt()->GetFrmSize().GetWidth();
SwTwips nNewWidth = nWidth - nRest;
nRest = 0;
nBorder += nWidth;
if( pCurr != rOldNew.end() && nBorder + nColFuzzy >= pCurr->first )
{
nBorder -= nColFuzzy;
while( pCurr != rOldNew.end() && nBorder > pCurr->first )
++pCurr;
if( pCurr != rOldNew.end() )
{
nBorder += nColFuzzy;
if( nBorder + nColFuzzy >= pCurr->first )
{
if( pCurr->second == pCurr->first )
nRest = 0;
else
nRest = pCurr->second - nBorder;
nNewWidth += nRest;
++pCurr;
}
}
}
if( nNewWidth != nWidth )
{
if( nNewWidth < 0 )
{
nRest += 1 - nNewWidth;
nNewWidth = 1;
}
SwFmtFrmSize aFmtFrmSize( pBox->GetFrmFmt()->GetFrmSize() );
aFmtFrmSize.SetWidth( nNewWidth );
rParm.aShareFmts.SetSize( *pBox, aFmtFrmSize );
}
}
}
static void lcl_CalcNewWidths( std::list<sal_uInt16> &rSpanPos, ChangeList& rChanges,
SwTableLine* pLine, long nWish, long nWidth, bool bTop )
{
if( !rChanges.size() )
{
rSpanPos.clear();
return;
}
if( !rSpanPos.size() )
{
rChanges.clear();
return;
}
std::list<sal_uInt16> aNewSpanPos;
ChangeList aNewChanges;
ChangeList::iterator pCurr = rChanges.begin();
aNewChanges.push_back( *pCurr ); // Nullposition
std::list<sal_uInt16>::iterator pSpan = rSpanPos.begin();
sal_uInt16 nCurr = 0;
sal_uInt16 nOrgSum = 0;
bool bRowSpan = false;
sal_uInt16 nRowSpanCount = 0;
sal_uInt16 nCount = pLine->GetTabBoxes().Count();
for( sal_uInt16 nCurrBox = 0; nCurrBox < nCount; ++nCurrBox )
{
SwTableBox* pBox = pLine->GetTabBoxes()[nCurrBox];
SwTwips nCurrWidth = pBox->GetFrmFmt()->GetFrmSize().GetWidth();
const long nRowSpan = pBox->getRowSpan();
const bool bCurrRowSpan = bTop ? nRowSpan < 0 :
( nRowSpan > 1 || nRowSpan < -1 );
if( bRowSpan || bCurrRowSpan )
aNewSpanPos.push_back( nRowSpanCount );
bRowSpan = bCurrRowSpan;
nOrgSum = (sal_uInt16)(nOrgSum + nCurrWidth);
sal_uInt64 nSum = nOrgSum;
nSum *= nWidth;
nSum /= nWish;
nSum *= nWish;
nSum /= nWidth;
sal_uInt16 nPos = (sal_uInt16)nSum;
while( pCurr != rChanges.end() && pCurr->first < nPos )
{
#ifdef DBG_UTIL
sal_uInt16 nTemp = pCurr->first;
nTemp = pCurr->second;
#endif
++nCurr;
++pCurr;
}
bool bNew = true;
if( pCurr != rChanges.end() && pCurr->first <= nPos &&
pCurr->first != pCurr->second )
{
while( pSpan != rSpanPos.end() && *pSpan < nCurr )
++pSpan;
if( pSpan != rSpanPos.end() && *pSpan == nCurr )
{
aNewChanges.push_back( *pCurr );
++nRowSpanCount;
bNew = false;
}
}
if( bNew )
{
ColChange aTmp( nPos, nPos );
aNewChanges.push_back( aTmp );
++nRowSpanCount;
}
}
pCurr = aNewChanges.begin();
ChangeList::iterator pLast = pCurr;
ChangeList::iterator pLeftMove = pCurr;
while( pCurr != aNewChanges.end() )
{
if( pLeftMove == pCurr )
{
while( ++pLeftMove != aNewChanges.end() && pLeftMove->first <= pLeftMove->second )
;
}
if( pCurr->second == pCurr->first )
{
if( pLeftMove != aNewChanges.end() && pCurr->second > pLeftMove->second )
{
if( pLeftMove->first == pLast->first )
pCurr->second = pLeftMove->second;
else
{
sal_uInt64 nTmp = pCurr->first - pLast->first;
nTmp *= pLeftMove->second - pLast->second;
nTmp /= pLeftMove->first - pLast->first;
nTmp += pLast->second;
pCurr->second = (sal_uInt16)nTmp;
}
}
pLast = pCurr;
++pCurr;
}
else if( pCurr->second > pCurr->first )
{
pLast = pCurr;
++pCurr;
ChangeList::iterator pNext = pCurr;
while( pNext != pLeftMove && pNext->second == pNext->first &&
pNext->second < pLast->second )
++pNext;
while( pCurr != pNext )
{
if( pNext == aNewChanges.end() || pNext->first == pLast->first )
pCurr->second = pLast->second;
else
{
sal_uInt64 nTmp = pCurr->first - pLast->first;
nTmp *= pNext->second - pLast->second;
nTmp /= pNext->first - pLast->first;
nTmp += pLast->second;
pCurr->second = (sal_uInt16)nTmp;
}
++pCurr;
}
pLast = pCurr;
}
else
{
pLast = pCurr;
++pCurr;
}
}
rChanges.clear();
ChangeList::iterator pCopy = aNewChanges.begin();
while( pCopy != aNewChanges.end() )
rChanges.push_back( *pCopy++ );
rSpanPos.clear();
std::list<sal_uInt16>::iterator pSpCopy = aNewSpanPos.begin();
while( pSpCopy != aNewSpanPos.end() )
rSpanPos.push_back( *pSpCopy++ );
}
void SwTable::NewSetTabCols( Parm &rParm, const SwTabCols &rNew,
const SwTabCols &rOld, const SwTableBox *pStart, sal_Bool bCurRowOnly )
{
#ifdef DBG_UTIL
static int nCallCount = 0;
++nCallCount;
#endif
// First step: evaluate which lines have been moved/which widths changed
ChangeList aOldNew;
const long nNewWidth = rParm.rNew.GetRight() - rParm.rNew.GetLeft();
const long nOldWidth = rParm.rOld.GetRight() - rParm.rOld.GetLeft();
if( nNewWidth < 1 || nOldWidth < 1 )
return;
for( sal_uInt16 i = 0; i <= rOld.Count(); ++i )
{
sal_uInt64 nNewPos;
sal_uInt64 nOldPos;
if( i == rOld.Count() )
{
nOldPos = rParm.rOld.GetRight() - rParm.rOld.GetLeft();
nNewPos = rParm.rNew.GetRight() - rParm.rNew.GetLeft();
}
else
{
nOldPos = rOld[i] - rParm.rOld.GetLeft();
nNewPos = rNew[i] - rParm.rNew.GetLeft();
}
nNewPos *= rParm.nNewWish;
nNewPos /= nNewWidth;
nOldPos *= rParm.nOldWish;
nOldPos /= nOldWidth;
if( nOldPos != nNewPos && nNewPos > 0 && nOldPos > 0 )
{
ColChange aChg( (sal_uInt16)nOldPos, (sal_uInt16)nNewPos );
aOldNew.push_back( aChg );
}
}
// Finished first step
int nCount = aOldNew.size();
if( !nCount )
return; // no change, nothing to do
SwTableLines &rLines = GetTabLines();
if( bCurRowOnly )
{
const SwTableLine* pCurrLine = pStart->GetUpper();
sal_uInt16 nCurr = rLines.C40_GETPOS( SwTableLine, pCurrLine );
if( nCurr >= USHRT_MAX )
return;
ColChange aChg( 0, 0 );
aOldNew.push_front( aChg );
std::list<sal_uInt16> aRowSpanPos;
if( nCurr )
{
ChangeList aCopy;
ChangeList::iterator pCop = aOldNew.begin();
sal_uInt16 nPos = 0;
while( pCop != aOldNew.end() )
{
aCopy.push_back( *pCop );
++pCop;
aRowSpanPos.push_back( nPos++ );
}
lcl_CalcNewWidths( aRowSpanPos, aCopy, rLines[nCurr],
rParm.nOldWish, nOldWidth, true );
bool bGoOn = aRowSpanPos.size() > 0;
sal_uInt16 j = nCurr;
while( bGoOn )
{
lcl_CalcNewWidths( aRowSpanPos, aCopy, rLines[--j],
rParm.nOldWish, nOldWidth, true );
lcl_AdjustWidthsInLine( rLines[j], aCopy, rParm, 0 );
bGoOn = aRowSpanPos.size() > 0 && j > 0;
};
aRowSpanPos.clear();
}
if( nCurr+1 < rLines.Count() )
{
ChangeList aCopy;
ChangeList::iterator pCop = aOldNew.begin();
sal_uInt16 nPos = 0;
while( pCop != aOldNew.end() )
{
aCopy.push_back( *pCop );
++pCop;
aRowSpanPos.push_back( nPos++ );
}
lcl_CalcNewWidths( aRowSpanPos, aCopy, rLines[nCurr],
rParm.nOldWish, nOldWidth, false );
bool bGoOn = aRowSpanPos.size() > 0;
sal_uInt16 j = nCurr;
while( bGoOn )
{
lcl_CalcNewWidths( aRowSpanPos, aCopy, rLines[++j],
rParm.nOldWish, nOldWidth, false );
lcl_AdjustWidthsInLine( rLines[j], aCopy, rParm, 0 );
bGoOn = aRowSpanPos.size() > 0 && j+1 < rLines.Count();
};
}
::lcl_AdjustWidthsInLine( rLines[nCurr], aOldNew, rParm, 1 );
}
else for( sal_uInt16 i = 0; i < rLines.Count(); ++i )
::lcl_AdjustWidthsInLine( rLines[i], aOldNew, rParm, COLFUZZY );
CHECK_TABLE( *this )
}
/*************************************************************************
|*
|* const SwTableBox* SwTable::GetTblBox( const Strn?ng& rName ) const
|* gebe den Pointer auf die benannte Box zurueck.
|*
|*************************************************************************/
sal_Bool IsValidRowName( const String& rStr )
{
sal_Bool bIsValid = sal_True;
xub_StrLen nLen = rStr.Len();
for (xub_StrLen i = 0; i < nLen && bIsValid; ++i)
{
const sal_Unicode cChar = rStr.GetChar(i);
if (cChar < '0' || cChar > '9')
bIsValid = sal_False;
}
return bIsValid;
}
// --> OD 2007-08-03 #i80314#
// add 3rd parameter and its handling
sal_uInt16 SwTable::_GetBoxNum( String& rStr, sal_Bool bFirstPart,
const bool bPerformValidCheck )
{
sal_uInt16 nRet = 0;
xub_StrLen nPos = 0;
if( bFirstPart ) // sal_True == column; sal_False == row
{
// die 1. ist mit Buchstaben addressiert!
sal_Unicode cChar;
sal_Bool bFirst = sal_True;
while( 0 != ( cChar = rStr.GetChar( nPos )) &&
( (cChar >= 'A' && cChar <= 'Z') ||
(cChar >= 'a' && cChar <= 'z') ) )
{
if( (cChar -= 'A') >= 26 )
cChar -= 'a' - '[';
if( bFirst )
bFirst = sal_False;
else
++nRet;
nRet = nRet * 52 + cChar;
++nPos;
}
rStr.Erase( 0, nPos ); // Zeichen aus dem String loeschen
}
else if( STRING_NOTFOUND == ( nPos = rStr.Search( aDotStr ) ))
{
nRet = 0;
if ( !bPerformValidCheck || IsValidRowName( rStr ) )
{
nRet = static_cast<sal_uInt16>(rStr.ToInt32());
}
rStr.Erase();
}
else
{
nRet = 0;
String aTxt( rStr.Copy( 0, nPos ) );
if ( !bPerformValidCheck || IsValidRowName( aTxt ) )
{
nRet = static_cast<sal_uInt16>(aTxt.ToInt32());
}
rStr.Erase( 0, nPos+1 );
}
return nRet;
}
// <--
// --> OD 2007-08-03 #i80314#
// add 2nd parameter and its handling
const SwTableBox* SwTable::GetTblBox( const String& rName,
const bool bPerformValidCheck ) const
{
const SwTableBox* pBox = 0;
const SwTableLine* pLine;
const SwTableLines* pLines;
const SwTableBoxes* pBoxes;
sal_uInt16 nLine, nBox;
String aNm( rName );
while( aNm.Len() )
{
nBox = SwTable::_GetBoxNum( aNm, 0 == pBox, bPerformValidCheck );
// erste Box ?
if( !pBox )
pLines = &GetTabLines();
else
{
pLines = &pBox->GetTabLines();
if( nBox )
--nBox;
}
nLine = SwTable::_GetBoxNum( aNm, sal_False, bPerformValidCheck );
// bestimme die Line
if( !nLine || nLine > pLines->Count() )
return 0;
pLine = (*pLines)[ nLine-1 ];
// bestimme die Box
pBoxes = &pLine->GetTabBoxes();
if( nBox >= pBoxes->Count() )
return 0;
pBox = (*pBoxes)[ nBox ];
}
// abpruefen, ob die gefundene Box auch wirklich eine Inhaltstragende
// Box ist ??
if( pBox && !pBox->GetSttNd() )
{
ASSERT( sal_False, "Box ohne Inhalt, suche die naechste !!" );
// "herunterfallen lassen" bis zur ersten Box
while( pBox->GetTabLines().Count() )
pBox = pBox->GetTabLines()[0]->GetTabBoxes()[0];
}
return pBox;
}
SwTableBox* SwTable::GetTblBox( sal_uLong nSttIdx )
{
//MA: Zur Optimierung nicht immer umstaendlich das ganze SortArray abhuenern.
//OS: #102675# converting text to table tries und certain conditions
// to ask for a table box of a table that is not yet having a format
if(!GetFrmFmt())
return 0;
SwTableBox* pRet = 0;
SwNodes& rNds = GetFrmFmt()->GetDoc()->GetNodes();
sal_uLong nIndex = nSttIdx + 1;
SwCntntNode* pCNd = 0;
SwTableNode* pTblNd = 0;
while ( nIndex < rNds.Count() )
{
pTblNd = rNds[ nIndex ]->GetTableNode();
if ( pTblNd )
break;
pCNd = rNds[ nIndex ]->GetCntntNode();
if ( pCNd )
break;
++nIndex;
}
if ( pCNd || pTblNd )
{
SwModify* pModify = pCNd;
// --> FME 2007-3-26 #144862# Better handling of table in table:
if ( pTblNd && pTblNd->GetTable().GetFrmFmt() )
pModify = pTblNd->GetTable().GetFrmFmt();
// <--
SwFrm* pFrm = SwIterator<SwFrm,SwModify>::FirstElement( *pModify );
while ( pFrm && !pFrm->IsCellFrm() )
pFrm = pFrm->GetUpper();
if ( pFrm )
pRet = (SwTableBox*)((SwCellFrm*)pFrm)->GetTabBox();
}
//Falls es das Layout noch nicht gibt oder sonstwie etwas schieft geht.
if ( !pRet )
{
for( sal_uInt16 n = aSortCntBoxes.Count(); n; )
if( aSortCntBoxes[ --n ]->GetSttIdx() == nSttIdx )
return aSortCntBoxes[ n ];
}
return pRet;
}
sal_Bool SwTable::IsTblComplex() const
{
// returnt sal_True wenn sich in der Tabelle Verschachtelungen befinden
// steht eine Box nicht in der obersten Line, da wurde gesplittet/
// gemergt und die Struktur ist komplexer.
for( sal_uInt16 n = 0; n < aSortCntBoxes.Count(); ++n )
if( aSortCntBoxes[ n ]->GetUpper()->GetUpper() )
return sal_True;
return sal_False;
}
/*************************************************************************
|*
|* SwTableLine::SwTableLine()
|*
|*************************************************************************/
SwTableLine::SwTableLine( SwTableLineFmt *pFmt, sal_uInt16 nBoxes,
SwTableBox *pUp )
: SwClient( pFmt ),
aBoxes( (sal_uInt8)nBoxes, 1 ),
pUpper( pUp )
{
}
SwTableLine::~SwTableLine()
{
// ist die TabelleLine der letzte Client im FrameFormat, kann dieses
// geloescht werden
SwModify* pMod = GetFrmFmt();
pMod->Remove( this ); // austragen,
if( !pMod->GetDepends() )
delete pMod; // und loeschen
}
/*************************************************************************
|*
|* SwTableLine::ClaimFrmFmt(), ChgFrmFmt()
|*
|*************************************************************************/
SwFrmFmt* SwTableLine::ClaimFrmFmt()
{
// This method makes sure that this object is an exclusive SwTableLine client
// of an SwTableLineFmt object
// If other SwTableLine objects currently listen to the same SwTableLineFmt as
// this one, something needs to be done
SwTableLineFmt *pRet = (SwTableLineFmt*)GetFrmFmt();
SwIterator<SwTableLine,SwFmt> aIter( *pRet );
for( SwTableLine* pLast = aIter.First(); pLast; pLast = aIter.Next() )
{
if ( pLast != this )
{
// found another SwTableLine that is a client of the current Fmt
// create a new Fmt as a copy and use it for this object
SwTableLineFmt *pNewFmt = pRet->GetDoc()->MakeTableLineFmt();
*pNewFmt = *pRet;
// register SwRowFrms that know me as clients at the new Fmt
SwIterator<SwRowFrm,SwFmt> aFrmIter( *pRet );
for( SwRowFrm* pFrm = aFrmIter.First(); pFrm; pFrm = aFrmIter.Next() )
if( pFrm->GetTabLine() == this )
pFrm->RegisterToFormat( *pNewFmt );
// register myself
pNewFmt->Add( this );
pRet = pNewFmt;
break;
}
}
return pRet;
}
void SwTableLine::ChgFrmFmt( SwTableLineFmt *pNewFmt )
{
SwFrmFmt *pOld = GetFrmFmt();
SwIterator<SwRowFrm,SwFmt> aIter( *pOld );
//Erstmal die Frms ummelden.
for( SwRowFrm* pRow = aIter.First(); pRow; pRow = aIter.Next() )
{
if( pRow->GetTabLine() == this )
{
pRow->RegisterToFormat( *pNewFmt );
pRow->InvalidateSize();
pRow->_InvalidatePrt();
pRow->SetCompletePaint();
pRow->ReinitializeFrmSizeAttrFlags();
// --> FME 2004-10-27 #i35063#
// consider 'split row allowed' attribute
SwTabFrm* pTab = pRow->FindTabFrm();
bool bInFollowFlowRow = false;
const bool bInFirstNonHeadlineRow = pTab->IsFollow() &&
pRow == pTab->GetFirstNonHeadlineRow();
if ( bInFirstNonHeadlineRow ||
!pRow->GetNext() ||
0 != ( bInFollowFlowRow = pRow->IsInFollowFlowRow() ) ||
0 != pRow->IsInSplitTableRow() )
{
if ( bInFirstNonHeadlineRow || bInFollowFlowRow )
pTab = pTab->FindMaster();
pTab->SetRemoveFollowFlowLinePending( sal_True );
pTab->InvalidatePos();
}
// <--
}
}
//Jetzt noch mich selbst ummelden.
pNewFmt->Add( this );
if ( !pOld->GetDepends() )
delete pOld;
}
SwTwips SwTableLine::GetTableLineHeight( bool& bLayoutAvailable ) const
{
SwTwips nRet = 0;
bLayoutAvailable = false;
SwIterator<SwRowFrm,SwFmt> aIter( *GetFrmFmt() );
// A row could appear several times in headers/footers so only one chain of master/follow tables
// will be accepted...
const SwTabFrm* pChain = NULL; // My chain
for( SwRowFrm* pLast = aIter.First(); pLast; pLast = aIter.Next() )
{
if( pLast->GetTabLine() == this )
{
const SwTabFrm* pTab = pLast->FindTabFrm();
bLayoutAvailable = ( pTab && pTab->IsVertical() ) ?
( 0 < pTab->Frm().Height() ) :
( 0 < pTab->Frm().Width() );
// The first one defines the chain, if a chain is defined, only members of the chain
// will be added.
if( !pChain || pChain->IsAnFollow( pTab ) || pTab->IsAnFollow( pChain ) )
{
pChain = pTab; // defines my chain (even it is already)
if( pTab->IsVertical() )
nRet += pLast->Frm().Width();
else
nRet += pLast->Frm().Height();
// Optimization, if there are no master/follows in my chain, nothing more to add
if( !pTab->HasFollow() && !pTab->IsFollow() )
break;
// This is not an optimization, this is necessary to avoid double additions of
// repeating rows
if( pTab->IsInHeadline(*pLast) )
break;
}
}
}
return nRet;
}
/*************************************************************************
|*
|* SwTableBox::SwTableBox()
|*
|*************************************************************************/
SwTableBox::SwTableBox( SwTableBoxFmt* pFmt, sal_uInt16 nLines, SwTableLine *pUp )
: SwClient( 0 ),
aLines( (sal_uInt8)nLines, 1 ),
pSttNd( 0 ),
pUpper( pUp ),
pImpl( 0 )
{
CheckBoxFmt( pFmt )->Add( this );
}
SwTableBox::SwTableBox( SwTableBoxFmt* pFmt, const SwNodeIndex &rIdx,
SwTableLine *pUp )
: SwClient( 0 ),
aLines( 0, 0 ),
pUpper( pUp ),
pImpl( 0 )
{
CheckBoxFmt( pFmt )->Add( this );
pSttNd = rIdx.GetNode().GetStartNode();
// an der Table eintragen
const SwTableNode* pTblNd = pSttNd->FindTableNode();
ASSERT( pTblNd, "in welcher Tabelle steht denn die Box?" );
SwTableSortBoxes& rSrtArr = (SwTableSortBoxes&)pTblNd->GetTable().
GetTabSortBoxes();
SwTableBox* p = this; // error: &this
rSrtArr.Insert( p ); // eintragen
}
SwTableBox::SwTableBox( SwTableBoxFmt* pFmt, const SwStartNode& rSttNd, SwTableLine *pUp ) :
SwClient( 0 ),
aLines( 0, 0 ),
pSttNd( &rSttNd ),
pUpper( pUp ),
pImpl( 0 )
{
CheckBoxFmt( pFmt )->Add( this );
// an der Table eintragen
const SwTableNode* pTblNd = pSttNd->FindTableNode();
ASSERT( pTblNd, "in welcher Tabelle steht denn die Box?" );
SwTableSortBoxes& rSrtArr = (SwTableSortBoxes&)pTblNd->GetTable().
GetTabSortBoxes();
SwTableBox* p = this; // error: &this
rSrtArr.Insert( p ); // eintragen
}
SwTableBox::~SwTableBox()
{
// Inhaltstragende Box ?
if( !GetFrmFmt()->GetDoc()->IsInDtor() && pSttNd )
{
// an der Table austragen
const SwTableNode* pTblNd = pSttNd->FindTableNode();
ASSERT( pTblNd, "in welcher Tabelle steht denn die Box?" );
SwTableSortBoxes& rSrtArr = (SwTableSortBoxes&)pTblNd->GetTable().
GetTabSortBoxes();
SwTableBox *p = this; // error: &this
rSrtArr.Remove( p ); // austragen
}
// ist die TabelleBox der letzte Client im FrameFormat, kann dieses
// geloescht werden
SwModify* pMod = GetFrmFmt();
pMod->Remove( this ); // austragen,
if( !pMod->GetDepends() )
delete pMod; // und loeschen
delete pImpl;
}
SwTableBoxFmt* SwTableBox::CheckBoxFmt( SwTableBoxFmt* pFmt )
{
// sollte das Format eine Formel oder einen Value tragen, dann muss die
// Box alleine am Format haengen. Ggfs. muss ein neues angelegt werden.
if( SFX_ITEM_SET == pFmt->GetItemState( RES_BOXATR_VALUE, sal_False ) ||
SFX_ITEM_SET == pFmt->GetItemState( RES_BOXATR_FORMULA, sal_False ) )
{
SwTableBox* pOther = SwIterator<SwTableBox,SwFmt>::FirstElement( *pFmt );
if( pOther )
{
SwTableBoxFmt* pNewFmt = pFmt->GetDoc()->MakeTableBoxFmt();
pNewFmt->LockModify();
*pNewFmt = *pFmt;
// Values und Formeln entfernen
pNewFmt->ResetFmtAttr( RES_BOXATR_FORMULA, RES_BOXATR_VALUE );
pNewFmt->UnlockModify();
pFmt = pNewFmt;
}
}
return pFmt;
}
/*************************************************************************
|*
|* SwTableBox::ClaimFrmFmt(), ChgFrmFmt()
|*
|*************************************************************************/
SwFrmFmt* SwTableBox::ClaimFrmFmt()
{
// This method makes sure that this object is an exclusive SwTableBox client
// of an SwTableBoxFmt object
// If other SwTableBox objects currently listen to the same SwTableBoxFmt as
// this one, something needs to be done
SwTableBoxFmt *pRet = (SwTableBoxFmt*)GetFrmFmt();
SwIterator<SwTableBox,SwFmt> aIter( *pRet );
for( SwTableBox* pLast = aIter.First(); pLast; pLast = aIter.Next() )
{
if ( pLast != this )
{
// Found another SwTableBox object
// create a new Fmt as a copy and assign me to it
// don't copy values and formulas
SwTableBoxFmt* pNewFmt = pRet->GetDoc()->MakeTableBoxFmt();
pNewFmt->LockModify();
*pNewFmt = *pRet;
pNewFmt->ResetFmtAttr( RES_BOXATR_FORMULA, RES_BOXATR_VALUE );
pNewFmt->UnlockModify();
// re-register SwCellFrm objects that know me
SwIterator<SwCellFrm,SwFmt> aFrmIter( *pRet );
for( SwCellFrm* pCell = aFrmIter.First(); pCell; pCell = aFrmIter.Next() )
if( pCell->GetTabBox() == this )
pCell->RegisterToFormat( *pNewFmt );
// re-register myself
pNewFmt->Add( this );
pRet = pNewFmt;
break;
}
}
return pRet;
}
void SwTableBox::ChgFrmFmt( SwTableBoxFmt* pNewFmt )
{
SwFrmFmt *pOld = GetFrmFmt();
SwIterator<SwCellFrm,SwFmt> aIter( *pOld );
//Erstmal die Frms ummelden.
for( SwCellFrm* pCell = aIter.First(); pCell; pCell = aIter.Next() )
{
if( pCell->GetTabBox() == this )
{
pCell->RegisterToFormat( *pNewFmt );
pCell->InvalidateSize();
pCell->_InvalidatePrt();
pCell->SetCompletePaint();
pCell->SetDerivedVert( sal_False );
pCell->CheckDirChange();
// --> FME 2005-04-15 #i47489#
// make sure that the row will be formatted, in order
// to have the correct Get(Top|Bottom)MarginForLowers values
// set at the row.
const SwTabFrm* pTab = pCell->FindTabFrm();
if ( pTab && pTab->IsCollapsingBorders() )
{
SwFrm* pRow = pCell->GetUpper();
pRow->_InvalidateSize();
pRow->_InvalidatePrt();
}
// <--
}
}
//Jetzt noch mich selbst ummelden.
pNewFmt->Add( this );
if( !pOld->GetDepends() )
delete pOld;
}
/*************************************************************************
|*
|* String SwTableBox::GetName() const
|* gebe den Namen dieser Box zurueck. Dieser wird dynamisch bestimmt
|* und ergibt sich aus der Position in den Lines/Boxen/Tabelle
|*
|*************************************************************************/
void lcl_GetTblBoxColStr( sal_uInt16 nCol, String& rNm )
{
const sal_uInt16 coDiff = 52; // 'A'-'Z' 'a' - 'z'
sal_uInt16 nCalc;
do {
nCalc = nCol % coDiff;
if( nCalc >= 26 )
rNm.Insert( sal_Unicode('a' - 26 + nCalc ), 0 );
else
rNm.Insert( sal_Unicode('A' + nCalc ), 0 );
if( 0 == (nCol = nCol - nCalc) )
break;
nCol /= coDiff;
--nCol;
} while( 1 );
}
String SwTableBox::GetName() const
{
if( !pSttNd ) // keine Content Box ??
{
// die naechste erste Box suchen ??
return aEmptyStr;
}
const SwTable& rTbl = pSttNd->FindTableNode()->GetTable();
sal_uInt16 nPos;
String sNm, sTmp;
const SwTableBox* pBox = this;
do {
const SwTableBoxes* pBoxes = &pBox->GetUpper()->GetTabBoxes();
const SwTableLine* pLine = pBox->GetUpper();
// auf oberstere Ebene ?
const SwTableLines* pLines = pLine->GetUpper()
? &pLine->GetUpper()->GetTabLines() : &rTbl.GetTabLines();
sTmp = String::CreateFromInt32( nPos = pLines->GetPos( pLine ) + 1 );
if( sNm.Len() )
sNm.Insert( aDotStr, 0 ).Insert( sTmp, 0 );
else
sNm = sTmp;
sTmp = String::CreateFromInt32(( nPos = pBoxes->GetPos( pBox )) + 1 );
if( 0 != ( pBox = pLine->GetUpper()) )
sNm.Insert( aDotStr, 0 ).Insert( sTmp, 0 );
else
::lcl_GetTblBoxColStr( nPos, sNm );
} while( pBox );
return sNm;
}
sal_Bool SwTableBox::IsInHeadline( const SwTable* pTbl ) const
{
if( !GetUpper() ) // sollte nur beim Merge vorkommen.
return sal_False;
if( !pTbl )
pTbl = &pSttNd->FindTableNode()->GetTable();
const SwTableLine* pLine = GetUpper();
while( pLine->GetUpper() )
pLine = pLine->GetUpper()->GetUpper();
// Headerline?
return pTbl->GetTabLines()[ 0 ] == pLine;
}
#ifdef DBG_UTIL
sal_uLong SwTableBox::GetSttIdx() const
{
return pSttNd ? pSttNd->GetIndex() : 0;
}
#endif
// erfrage vom Client Informationen
sal_Bool SwTable::GetInfo( SfxPoolItem& rInfo ) const
{
switch( rInfo.Which() )
{
case RES_AUTOFMT_DOCNODE:
{
const SwTableNode* pTblNode = GetTableNode();
if( pTblNode && &pTblNode->GetNodes() == ((SwAutoFmtGetDocNode&)rInfo).pNodes )
{
if ( aSortCntBoxes.Count() )
{
SwNodeIndex aIdx( *aSortCntBoxes[ 0 ]->GetSttNd() );
((SwAutoFmtGetDocNode&)rInfo).pCntntNode =
GetFrmFmt()->GetDoc()->GetNodes().GoNext( &aIdx );
}
return sal_False;
}
break;
}
case RES_FINDNEARESTNODE:
if( GetFrmFmt() && ((SwFmtPageDesc&)GetFrmFmt()->GetFmtAttr(
RES_PAGEDESC )).GetPageDesc() &&
aSortCntBoxes.Count() &&
aSortCntBoxes[ 0 ]->GetSttNd()->GetNodes().IsDocNodes() )
((SwFindNearestNode&)rInfo).CheckNode( *
aSortCntBoxes[ 0 ]->GetSttNd()->FindTableNode() );
break;
case RES_CONTENT_VISIBLE:
{
((SwPtrMsgPoolItem&)rInfo).pObject = SwIterator<SwFrm,SwFmt>::FirstElement( *GetFrmFmt() );
}
return sal_False;
}
return sal_True;
}
SwTable * SwTable::FindTable( SwFrmFmt const*const pFmt )
{
return (pFmt)
? SwIterator<SwTable,SwFmt>::FirstElement(*pFmt)
: 0;
}
SwTableNode* SwTable::GetTableNode() const
{
return GetTabSortBoxes().Count() ?
(SwTableNode*)GetTabSortBoxes()[ 0 ]->GetSttNd()->FindTableNode() :
pTableNode;
}
void SwTable::SetRefObject( SwServerObject* pObj )
{
if( refObj.Is() )
refObj->Closed();
refObj = pObj;
}
void SwTable::SetHTMLTableLayout( SwHTMLTableLayout *p )
{
delete pHTMLLayout;
pHTMLLayout = p;
}
void ChgTextToNum( SwTableBox& rBox, const String& rTxt, const Color* pCol,
sal_Bool bChgAlign )
{
sal_uLong nNdPos = rBox.IsValidNumTxtNd( sal_True );
ChgTextToNum( rBox,rTxt,pCol,bChgAlign,nNdPos);
}
void ChgTextToNum( SwTableBox& rBox, const String& rTxt, const Color* pCol,
sal_Bool bChgAlign,sal_uLong nNdPos )
{
if( ULONG_MAX != nNdPos )
{
SwDoc* pDoc = rBox.GetFrmFmt()->GetDoc();
SwTxtNode* pTNd = pDoc->GetNodes()[ nNdPos ]->GetTxtNode();
const SfxPoolItem* pItem;
// Ausrichtung umsetzen
if( bChgAlign )
{
pItem = &pTNd->SwCntntNode::GetAttr( RES_PARATR_ADJUST );
SvxAdjust eAdjust = ((SvxAdjustItem*)pItem)->GetAdjust();
if( SVX_ADJUST_LEFT == eAdjust || SVX_ADJUST_BLOCK == eAdjust )
{
SvxAdjustItem aAdjust( *(SvxAdjustItem*)pItem );
aAdjust.SetAdjust( SVX_ADJUST_RIGHT );
pTNd->SetAttr( aAdjust );
}
}
// Farbe umsetzen oder "Benutzer Farbe" sichern
if( !pTNd->GetpSwAttrSet() || SFX_ITEM_SET != pTNd->GetpSwAttrSet()->
GetItemState( RES_CHRATR_COLOR, sal_False, &pItem ))
pItem = 0;
const Color* pOldNumFmtColor = rBox.GetSaveNumFmtColor();
const Color* pNewUserColor = pItem ? &((SvxColorItem*)pItem)->GetValue() : 0;
if( ( pNewUserColor && pOldNumFmtColor &&
*pNewUserColor == *pOldNumFmtColor ) ||
( !pNewUserColor && !pOldNumFmtColor ))
{
// User Color nicht veraendern aktuellen Werte setzen
// ggfs. die alte NumFmtColor loeschen
if( pCol )
// ggfs. die Farbe setzen
pTNd->SetAttr( SvxColorItem( *pCol, RES_CHRATR_COLOR ));
else if( pItem )
{
pNewUserColor = rBox.GetSaveUserColor();
if( pNewUserColor )
pTNd->SetAttr( SvxColorItem( *pNewUserColor, RES_CHRATR_COLOR ));
else
pTNd->ResetAttr( RES_CHRATR_COLOR );
}
}
else
{
// User Color merken, ggfs. die NumFormat Color setzen, aber
// nie die Farbe zurueck setzen
rBox.SetSaveUserColor( pNewUserColor );
if( pCol )
// ggfs. die Farbe setzen
pTNd->SetAttr( SvxColorItem( *pCol, RES_CHRATR_COLOR ));
}
rBox.SetSaveNumFmtColor( pCol );
if( pTNd->GetTxt() != rTxt )
{
// Text austauschen
//JP 15.09.98: Bug 55741 - Tabs beibehalten (vorne und hinten!)
const String& rOrig = pTNd->GetTxt();
xub_StrLen n;
for( n = 0; n < rOrig.Len() && '\x9' == rOrig.GetChar( n ); ++n )
;
for( ; n < rOrig.Len() && '\x01' == rOrig.GetChar( n ); ++n )
;
SwIndex aIdx( pTNd, n );
for( n = rOrig.Len(); n && '\x9' == rOrig.GetChar( --n ); )
;
n -= aIdx.GetIndex() - 1;
//JP 06.04.99: Bug 64321 - DontExpand-Flags vorm Austauschen
// zuruecksetzen, damit sie wieder aufgespannt werden
{
SwIndex aResetIdx( aIdx, n );
pTNd->DontExpandFmt( aResetIdx, sal_False, sal_False );
}
if( !pDoc->IsIgnoreRedline() && pDoc->GetRedlineTbl().Count() )
{
SwPaM aTemp(*pTNd, 0, *pTNd, rOrig.Len());
pDoc->DeleteRedline(aTemp, true, USHRT_MAX);
}
pTNd->EraseText( aIdx, n,
IDocumentContentOperations::INS_EMPTYEXPAND );
pTNd->InsertText( rTxt, aIdx,
IDocumentContentOperations::INS_EMPTYEXPAND );
if( pDoc->IsRedlineOn() )
{
SwPaM aTemp(*pTNd, 0, *pTNd, rTxt.Len());
pDoc->AppendRedline(new SwRedline(nsRedlineType_t::REDLINE_INSERT, aTemp), true);
}
}
// vertikale Ausrichtung umsetzen
if( bChgAlign &&
( SFX_ITEM_SET != rBox.GetFrmFmt()->GetItemState(
RES_VERT_ORIENT, sal_True, &pItem ) ||
text::VertOrientation::TOP == ((SwFmtVertOrient*)pItem)->GetVertOrient() ))
{
rBox.GetFrmFmt()->SetFmtAttr( SwFmtVertOrient( 0, text::VertOrientation::BOTTOM ));
}
}
}
void ChgNumToText( SwTableBox& rBox, sal_uLong nFmt )
{
sal_uLong nNdPos = rBox.IsValidNumTxtNd( sal_False );
if( ULONG_MAX != nNdPos )
{
SwDoc* pDoc = rBox.GetFrmFmt()->GetDoc();
SwTxtNode* pTNd = pDoc->GetNodes()[ nNdPos ]->GetTxtNode();
sal_Bool bChgAlign = pDoc->IsInsTblAlignNum();
const SfxPoolItem* pItem;
Color* pCol = 0;
if( NUMBERFORMAT_TEXT != nFmt )
{
// speziellen Textformat:
String sTmp, sTxt( pTNd->GetTxt() );
pDoc->GetNumberFormatter()->GetOutputString( sTxt, nFmt, sTmp, &pCol );
if( sTxt != sTmp )
{
// Text austauschen
SwIndex aIdx( pTNd, sTxt.Len() );
//JP 06.04.99: Bug 64321 - DontExpand-Flags vorm Austauschen
// zuruecksetzen, damit sie wieder aufgespannt werden
pTNd->DontExpandFmt( aIdx, sal_False, sal_False );
aIdx = 0;
pTNd->EraseText( aIdx, STRING_LEN,
IDocumentContentOperations::INS_EMPTYEXPAND );
pTNd->InsertText( sTmp, aIdx,
IDocumentContentOperations::INS_EMPTYEXPAND );
}
}
const SfxItemSet* pAttrSet = pTNd->GetpSwAttrSet();
// Ausrichtung umsetzen
if( bChgAlign && pAttrSet && SFX_ITEM_SET == pAttrSet->GetItemState(
RES_PARATR_ADJUST, sal_False, &pItem ) &&
SVX_ADJUST_RIGHT == ((SvxAdjustItem*)pItem)->GetAdjust() )
{
pTNd->SetAttr( SvxAdjustItem( SVX_ADJUST_LEFT, RES_PARATR_ADJUST ) );
}
// Farbe umsetzen oder "Benutzer Farbe" sichern
if( !pAttrSet || SFX_ITEM_SET != pAttrSet->
GetItemState( RES_CHRATR_COLOR, sal_False, &pItem ))
pItem = 0;
const Color* pOldNumFmtColor = rBox.GetSaveNumFmtColor();
const Color* pNewUserColor = pItem ? &((SvxColorItem*)pItem)->GetValue() : 0;
if( ( pNewUserColor && pOldNumFmtColor &&
*pNewUserColor == *pOldNumFmtColor ) ||
( !pNewUserColor && !pOldNumFmtColor ))
{
// User Color nicht veraendern aktuellen Werte setzen
// ggfs. die alte NumFmtColor loeschen
if( pCol )
// ggfs. die Farbe setzen
pTNd->SetAttr( SvxColorItem( *pCol, RES_CHRATR_COLOR ));
else if( pItem )
{
pNewUserColor = rBox.GetSaveUserColor();
if( pNewUserColor )
pTNd->SetAttr( SvxColorItem( *pNewUserColor, RES_CHRATR_COLOR ));
else
pTNd->ResetAttr( RES_CHRATR_COLOR );
}
}
else
{
// User Color merken, ggfs. die NumFormat Color setzen, aber
// nie die Farbe zurueck setzen
rBox.SetSaveUserColor( pNewUserColor );
if( pCol )
// ggfs. die Farbe setzen
pTNd->SetAttr( SvxColorItem( *pCol, RES_CHRATR_COLOR ));
}
rBox.SetSaveNumFmtColor( pCol );
// vertikale Ausrichtung umsetzen
if( bChgAlign &&
SFX_ITEM_SET == rBox.GetFrmFmt()->GetItemState(
RES_VERT_ORIENT, sal_False, &pItem ) &&
text::VertOrientation::BOTTOM == ((SwFmtVertOrient*)pItem)->GetVertOrient() )
{
rBox.GetFrmFmt()->SetFmtAttr( SwFmtVertOrient( 0, text::VertOrientation::TOP ));
}
}
}
// zum Erkennen von Veraenderungen (haupts. TableBoxAttribute)
void SwTableBoxFmt::Modify( const SfxPoolItem* pOld, const SfxPoolItem* pNew )
{
if( !IsModifyLocked() && !IsInDocDTOR() )
{
const SwTblBoxNumFormat *pNewFmt = 0;
const SwTblBoxFormula *pNewFml = 0;
const SwTblBoxValue *pNewVal = 0;
double aOldValue = 0;
sal_uLong nOldFmt = NUMBERFORMAT_TEXT;
switch( pNew ? pNew->Which() : 0 )
{
case RES_ATTRSET_CHG:
{
const SfxItemSet& rSet = *((SwAttrSetChg*)pNew)->GetChgSet();
if( SFX_ITEM_SET == rSet.GetItemState( RES_BOXATR_FORMAT,
sal_False, (const SfxPoolItem**)&pNewFmt ) )
nOldFmt = ((SwTblBoxNumFormat&)((SwAttrSetChg*)pOld)->
GetChgSet()->Get( RES_BOXATR_FORMAT )).GetValue();
rSet.GetItemState( RES_BOXATR_FORMULA, sal_False,
(const SfxPoolItem**)&pNewFml );
if( SFX_ITEM_SET == rSet.GetItemState( RES_BOXATR_VALUE,
sal_False, (const SfxPoolItem**)&pNewVal ) )
aOldValue = ((SwTblBoxValue&)((SwAttrSetChg*)pOld)->
GetChgSet()->Get( RES_BOXATR_VALUE )).GetValue();
}
break;
case RES_BOXATR_FORMAT:
pNewFmt = (SwTblBoxNumFormat*)pNew;
nOldFmt = ((SwTblBoxNumFormat*)pOld)->GetValue();
break;
case RES_BOXATR_FORMULA:
pNewFml = (SwTblBoxFormula*)pNew;
break;
case RES_BOXATR_VALUE:
pNewVal = (SwTblBoxValue*)pNew;
aOldValue = ((SwTblBoxValue*)pOld)->GetValue();
break;
}
// es hat sich etwas getan und im Set ist noch irgendein BoxAttribut
// vorhanden!
if( pNewFmt || pNewFml || pNewVal )
{
GetDoc()->SetFieldsDirty(true, NULL, 0);
if( SFX_ITEM_SET == GetItemState( RES_BOXATR_FORMAT, sal_False ) ||
SFX_ITEM_SET == GetItemState( RES_BOXATR_VALUE, sal_False ) ||
SFX_ITEM_SET == GetItemState( RES_BOXATR_FORMULA, sal_False ) )
{
// die Box holen
SwIterator<SwTableBox,SwFmt> aIter( *this );
SwTableBox* pBox = aIter.First();
if( pBox )
{
ASSERT( !aIter.Next(), "keine Box oder mehrere am Format" );
sal_uLong nNewFmt;
if( pNewFmt )
{
nNewFmt = pNewFmt->GetValue();
// neu Formatieren
// ist es neuer oder wurde der akt. entfernt?
if( SFX_ITEM_SET != GetItemState( RES_BOXATR_VALUE, sal_False ))
pNewFmt = 0;
}
else
{
// das akt. Item besorgen
GetItemState( RES_BOXATR_FORMAT, sal_False,
(const SfxPoolItem**)&pNewFmt );
nOldFmt = GetTblBoxNumFmt().GetValue();
nNewFmt = pNewFmt ? pNewFmt->GetValue() : nOldFmt;
}
// ist es neuer oder wurde der akt. entfernt?
if( pNewVal )
{
if( NUMBERFORMAT_TEXT != nNewFmt )
{
if( SFX_ITEM_SET == GetItemState(
RES_BOXATR_VALUE, sal_False ))
nOldFmt = NUMBERFORMAT_TEXT;
else
nNewFmt = NUMBERFORMAT_TEXT;
}
else if( NUMBERFORMAT_TEXT == nNewFmt )
nOldFmt = 0;
}
// Logik:
// ValueAenderung: -> "simuliere" eine FormatAenderung!
// FormatAenderung:
// Text -> !Text oder FormatAenderung:
// - Ausrichtung auf RECHTS, wenn LINKS oder Blocksatz
// - vertikale Ausrichtung auf UNTEN wenn OBEN oder nicht
// gesetzt ist.
// - Text ersetzen (Farbe?? neg. Zahlen ROT??)
// !Text -> Text:
// - Ausrichtung auf LINKS, wenn RECHTS
// - vertikale Ausrichtung auf OEBN, wenn UNTEN gesetzt ist
SvNumberFormatter* pNumFmtr = GetDoc()->GetNumberFormatter();
sal_Bool bNewIsTxtFmt = pNumFmtr->IsTextFormat( nNewFmt ) ||
NUMBERFORMAT_TEXT == nNewFmt;
if( (!bNewIsTxtFmt && nOldFmt != nNewFmt) || pNewFml )
{
sal_Bool bChgTxt = sal_True;
double fVal = 0;
if( !pNewVal && SFX_ITEM_SET != GetItemState(
RES_BOXATR_VALUE, sal_False, (const SfxPoolItem**)&pNewVal ))
{
// es wurde noch nie ein Wert gesetzt, dann versuche
// doch mal den Inhalt auszuwerten
sal_uLong nNdPos = pBox->IsValidNumTxtNd( sal_True );
if( ULONG_MAX != nNdPos )
{
sal_uInt32 nTmpFmtIdx = nNewFmt;
String aTxt( GetDoc()->GetNodes()[ nNdPos ]
->GetTxtNode()->GetRedlineTxt());
if( !aTxt.Len() )
bChgTxt = sal_False;
else
{
//JP 15.09.98: Bug 55741 - Tabs beibehalten
lcl_TabToBlankAtSttEnd( aTxt );
// JP 22.04.98: Bug 49659 -
// Sonderbehandlung fuer Prozent
sal_Bool bIsNumFmt = sal_False;
if( NUMBERFORMAT_PERCENT ==
pNumFmtr->GetType( nNewFmt ))
{
sal_uInt32 nTmpFmt = 0;
if( pNumFmtr->IsNumberFormat(
aTxt, nTmpFmt, fVal ))
{
if( NUMBERFORMAT_NUMBER ==
pNumFmtr->GetType( nTmpFmt ))
aTxt += '%';
bIsNumFmt = pNumFmtr->IsNumberFormat(
aTxt, nTmpFmtIdx, fVal );
}
}
else
bIsNumFmt = pNumFmtr->IsNumberFormat(
aTxt, nTmpFmtIdx, fVal );
if( bIsNumFmt )
{
// dann setze den Value direkt in den Set -
// ohne Modify
int bIsLockMod = IsModifyLocked();
LockModify();
SetFmtAttr( SwTblBoxValue( fVal ));
if( !bIsLockMod )
UnlockModify();
}
}
}
}
else
fVal = pNewVal->GetValue();
// den Inhalt mit dem neuen Wert Formtieren und in den Absatz
// schbreiben
Color* pCol = 0;
String sNewTxt;
if( DBL_MAX == fVal )
sNewTxt = ViewShell::GetShellRes()->aCalc_Error;
else
{
pNumFmtr->GetOutputString( fVal, nNewFmt, sNewTxt, &pCol );
if( !bChgTxt )
sNewTxt.Erase();
}
// ueber alle Boxen
ChgTextToNum( *pBox, sNewTxt, pCol,
GetDoc()->IsInsTblAlignNum() );
}
else if( bNewIsTxtFmt && nOldFmt != nNewFmt )
{
// auf jedenfall muessen jetzt die Formeln/Values
// geloescht werden!
// LockModify();
// ResetAttr( RES_BOXATR_FORMULA, RES_BOXATR_VALUE );
// UnlockModify();
ChgNumToText( *pBox, nNewFmt );
}
}
}
}
}
// Und die Basis-Klasse rufen
SwFrmFmt::Modify( pOld, pNew );
}
sal_Bool SwTableBox::HasNumCntnt( double& rNum, sal_uInt32& rFmtIndex,
sal_Bool& rIsEmptyTxtNd ) const
{
sal_Bool bRet = sal_False;
sal_uLong nNdPos = IsValidNumTxtNd( sal_True );
if( ULONG_MAX != nNdPos )
{
String aTxt( pSttNd->GetNodes()[ nNdPos ]->GetTxtNode()->
GetRedlineTxt() );
//JP 15.09.98: Bug 55741 - Tabs beibehalten
lcl_TabToBlankAtSttEnd( aTxt );
rIsEmptyTxtNd = 0 == aTxt.Len();
SvNumberFormatter* pNumFmtr = GetFrmFmt()->GetDoc()->GetNumberFormatter();
const SfxPoolItem* pItem;
if( SFX_ITEM_SET == GetFrmFmt()->GetItemState( RES_BOXATR_FORMAT,
sal_False, &pItem ))
{
rFmtIndex = ((SwTblBoxNumFormat*)pItem)->GetValue();
// JP 22.04.98: Bug 49659 - Sonderbehandlung fuer Prozent
if( !rIsEmptyTxtNd &&
NUMBERFORMAT_PERCENT == pNumFmtr->GetType( rFmtIndex ))
{
sal_uInt32 nTmpFmt = 0;
if( pNumFmtr->IsNumberFormat( aTxt, nTmpFmt, rNum ) &&
NUMBERFORMAT_NUMBER == pNumFmtr->GetType( nTmpFmt ))
aTxt += '%';
}
}
else
rFmtIndex = 0;
bRet = pNumFmtr->IsNumberFormat( aTxt, rFmtIndex, rNum );
}
else
rIsEmptyTxtNd = sal_False;
return bRet;
}
sal_Bool SwTableBox::IsNumberChanged() const
{
sal_Bool bRet = sal_True;
if( SFX_ITEM_SET == GetFrmFmt()->GetItemState( RES_BOXATR_FORMULA, sal_False ))
{
const SwTblBoxNumFormat *pNumFmt;
const SwTblBoxValue *pValue;
if( SFX_ITEM_SET != GetFrmFmt()->GetItemState( RES_BOXATR_VALUE, sal_False,
(const SfxPoolItem**)&pValue ))
pValue = 0;
if( SFX_ITEM_SET != GetFrmFmt()->GetItemState( RES_BOXATR_FORMAT, sal_False,
(const SfxPoolItem**)&pNumFmt ))
pNumFmt = 0;
sal_uLong nNdPos;
if( pNumFmt && pValue &&
ULONG_MAX != ( nNdPos = IsValidNumTxtNd( sal_True ) ) )
{
String sNewTxt, sOldTxt( pSttNd->GetNodes()[ nNdPos ]->
GetTxtNode()->GetRedlineTxt() );
lcl_DelTabsAtSttEnd( sOldTxt );
Color* pCol = 0;
GetFrmFmt()->GetDoc()->GetNumberFormatter()->GetOutputString(
pValue->GetValue(), pNumFmt->GetValue(), sNewTxt, &pCol );
bRet = sNewTxt != sOldTxt ||
!( ( !pCol && !GetSaveNumFmtColor() ) ||
( pCol && GetSaveNumFmtColor() &&
*pCol == *GetSaveNumFmtColor() ));
}
}
return bRet;
}
sal_uLong SwTableBox::IsValidNumTxtNd( sal_Bool bCheckAttr ) const
{
sal_uLong nPos = ULONG_MAX;
if( pSttNd )
{
SwNodeIndex aIdx( *pSttNd );
sal_uLong nIndex = aIdx.GetIndex();
const sal_uLong nIndexEnd = pSttNd->GetNodes()[ nIndex ]->EndOfSectionIndex();
const SwTxtNode *pTextNode = 0;
while( ++nIndex < nIndexEnd )
{
const SwNode* pNode = pSttNd->GetNodes()[nIndex];
if( pNode->IsTableNode() )
{ /*return ULONG_MAX if the cell contains a table(in table)*/
pTextNode = 0;
break;
}
if( pNode->IsTxtNode() )
{
if( pTextNode )
{ /*return ULONG_MAX if the cell contains complex paragraphs*/
pTextNode = 0;
break;
}
else
{
pTextNode = pNode->GetTxtNode();
nPos = nIndex;
}
}
}
if( pTextNode )
{
if( bCheckAttr )
{
const SwpHints* pHts = pTextNode->GetpSwpHints();
const String& rTxt = pTextNode->GetTxt();
// dann teste doch mal, ob das wirklich nur Text im Node steht!
// Flys/Felder/..
if( pHts )
{
xub_StrLen nNextSetField = 0;
for( sal_uInt16 n = 0; n < pHts->Count(); ++n )
{
const SwTxtAttr* pAttr = (*pHts)[ n ];
if( RES_TXTATR_NOEND_BEGIN <= pAttr->Which() ||
*pAttr->GetStart() ||
*pAttr->GetAnyEnd() < rTxt.Len() )
{
if ( (*pAttr->GetStart() == nNextSetField)
&& (pAttr->Which() == RES_TXTATR_FIELD))
{
// #i104949# hideous hack for report builder:
// it inserts hidden variable-set fields at
// the beginning of para in cell, but they
// should not turn cell into text cell
const SwField* pField = pAttr->GetFmtFld().GetField();
if (pField &&
(pField->GetTypeId() == TYP_SETFLD) &&
(0 != (static_cast<SwSetExpField const*>
(pField)->GetSubType() &
nsSwExtendedSubType::SUB_INVISIBLE)))
{
nNextSetField = *pAttr->GetStart() + 1;
continue;
}
}
nPos = ULONG_MAX;
break;
}
}
}
}
}
else
nPos = ULONG_MAX;
}
return nPos;
}
// ist das eine FormelBox oder eine Box mit numerischen Inhalt (AutoSum)
sal_uInt16 SwTableBox::IsFormulaOrValueBox() const
{
sal_uInt16 nWhich = 0;
const SwTxtNode* pTNd;
SwFrmFmt* pFmt = GetFrmFmt();
if( SFX_ITEM_SET == pFmt->GetItemState( RES_BOXATR_FORMULA, sal_False ))
nWhich = RES_BOXATR_FORMULA;
else if( SFX_ITEM_SET == pFmt->GetItemState( RES_BOXATR_VALUE, sal_False ) &&
!pFmt->GetDoc()->GetNumberFormatter()->IsTextFormat(
pFmt->GetTblBoxNumFmt().GetValue() ))
nWhich = RES_BOXATR_VALUE;
else if( pSttNd && pSttNd->GetIndex() + 2 == pSttNd->EndOfSectionIndex()
&& 0 != ( pTNd = pSttNd->GetNodes()[ pSttNd->GetIndex() + 1 ]
->GetTxtNode() ) && !pTNd->GetTxt().Len() )
nWhich = USHRT_MAX;
return nWhich;
}
void SwTableBox::ActualiseValueBox()
{
const SfxPoolItem *pFmtItem, *pValItem;
SwFrmFmt* pFmt = GetFrmFmt();
if( SFX_ITEM_SET == pFmt->GetItemState( RES_BOXATR_FORMAT, sal_True, &pFmtItem )
&& SFX_ITEM_SET == pFmt->GetItemState( RES_BOXATR_VALUE, sal_True, &pValItem ))
{
const sal_uLong nFmtId = ((SwTblBoxNumFormat*)pFmtItem)->GetValue();
sal_uLong nNdPos = ULONG_MAX;
SvNumberFormatter* pNumFmtr = pFmt->GetDoc()->GetNumberFormatter();
if( !pNumFmtr->IsTextFormat( nFmtId ) &&
ULONG_MAX != (nNdPos = IsValidNumTxtNd( sal_True )) )
{
double fVal = ((SwTblBoxValue*)pValItem)->GetValue();
Color* pCol = 0;
String sNewTxt;
pNumFmtr->GetOutputString( fVal, nFmtId, sNewTxt, &pCol );
const String& rTxt = pSttNd->GetNodes()[ nNdPos ]->GetTxtNode()->GetTxt();
if( rTxt != sNewTxt )
ChgTextToNum( *this, sNewTxt, pCol, sal_False ,nNdPos);
}
}
}
void SwTableBox_Impl::SetNewCol( Color** ppCol, const Color* pNewCol )
{
if( *ppCol != pNewCol )
{
delete *ppCol;
if( pNewCol )
*ppCol = new Color( *pNewCol );
else
*ppCol = 0;
}
}
struct SwTableCellInfo::Impl
{
const SwTable * m_pTable;
const SwCellFrm * m_pCellFrm;
const SwTabFrm * m_pTabFrm;
typedef ::std::set<const SwTableBox *> TableBoxes_t;
TableBoxes_t m_HandledTableBoxes;
public:
Impl()
: m_pTable(NULL), m_pCellFrm(NULL), m_pTabFrm(NULL)
{
}
~Impl() {}
void setTable(const SwTable * pTable) {
m_pTable = pTable;
SwFrmFmt * pFrmFmt = m_pTable->GetFrmFmt();
m_pTabFrm = SwIterator<SwTabFrm,SwFmt>::FirstElement(*pFrmFmt);
if (m_pTabFrm->IsFollow())
m_pTabFrm = m_pTabFrm->FindMaster(true);
}
const SwTable * getTable() const { return m_pTable; }
const SwCellFrm * getCellFrm() const { return m_pCellFrm; }
const SwFrm * getNextFrmInTable(const SwFrm * pFrm);
const SwCellFrm * getNextCellFrm(const SwFrm * pFrm);
const SwCellFrm * getNextTableBoxsCellFrm(const SwFrm * pFrm);
bool getNext();
};
const SwFrm * SwTableCellInfo::Impl::getNextFrmInTable(const SwFrm * pFrm)
{
const SwFrm * pResult = NULL;
if (((! pFrm->IsTabFrm()) || pFrm == m_pTabFrm) && pFrm->GetLower())
pResult = pFrm->GetLower();
else if (pFrm->GetNext())
pResult = pFrm->GetNext();
else
{
while (pFrm->GetUpper() != NULL)
{
pFrm = pFrm->GetUpper();
if (pFrm->IsTabFrm())
{
m_pTabFrm = static_cast<const SwTabFrm *>(pFrm)->GetFollow();
pResult = m_pTabFrm;
break;
}
else if (pFrm->GetNext())
{
pResult = pFrm->GetNext();
break;
}
}
}
return pResult;
}
const SwCellFrm * SwTableCellInfo::Impl::getNextCellFrm(const SwFrm * pFrm)
{
const SwCellFrm * pResult = NULL;
while ((pFrm = getNextFrmInTable(pFrm)) != NULL)
{
if (pFrm->IsCellFrm())
{
pResult = static_cast<const SwCellFrm *>(pFrm);
break;
}
}
return pResult;
}
const SwCellFrm * SwTableCellInfo::Impl::getNextTableBoxsCellFrm(const SwFrm * pFrm)
{
const SwCellFrm * pResult = NULL;
while ((pFrm = getNextCellFrm(pFrm)) != NULL)
{
const SwCellFrm * pCellFrm = static_cast<const SwCellFrm *>(pFrm);
const SwTableBox * pTabBox = pCellFrm->GetTabBox();
TableBoxes_t::const_iterator aIt = m_HandledTableBoxes.find(pTabBox);
if (aIt == m_HandledTableBoxes.end())
{
pResult = pCellFrm;
m_HandledTableBoxes.insert(pTabBox);
break;
}
}
return pResult;
}
const SwCellFrm * SwTableCellInfo::getCellFrm() const
{
return m_pImpl->getCellFrm();
}
bool SwTableCellInfo::Impl::getNext()
{
if (m_pCellFrm == NULL)
{
if (m_pTabFrm != NULL)
m_pCellFrm = Impl::getNextTableBoxsCellFrm(m_pTabFrm);
}
else
m_pCellFrm = Impl::getNextTableBoxsCellFrm(m_pCellFrm);
return m_pCellFrm != NULL;
}
SwTableCellInfo::SwTableCellInfo(const SwTable * pTable)
{
m_pImpl.reset(new Impl());
m_pImpl->setTable(pTable);
}
SwTableCellInfo::~SwTableCellInfo()
{
}
bool SwTableCellInfo::getNext()
{
return m_pImpl->getNext();
}
SwRect SwTableCellInfo::getRect() const
{
SwRect aRet;
if (getCellFrm() != NULL)
aRet = getCellFrm()->Frm();
return aRet;
}
const SwTableBox * SwTableCellInfo::getTableBox() const
{
const SwTableBox * pRet = NULL;
if (getCellFrm() != NULL)
pRet = getCellFrm()->GetTabBox();
return pRet;
}
void SwTable::RegisterToFormat( SwFmt& rFmt )
{
rFmt.Add( this );
}
void SwTableLine::RegisterToFormat( SwFmt& rFmt )
{
rFmt.Add( this );
}
void SwTableBox::RegisterToFormat( SwFmt& rFmt )
{
rFmt.Add( this );
}
void SwTableBox::ForgetFrmFmt()
{
if ( GetRegisteredIn() )
GetRegisteredInNonConst()->Remove(this);
}
| 44,443 |
311 | package datadog.trace.bootstrap.instrumentation.ci.git;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Represents a .git/config file. It uses a simple algorithm based on regex to parse line by line
* the .git/config file (INI file format).
*/
public class GitConfig {
private final Pattern section = Pattern.compile("\\s*\\[([^]]*)\\]\\s*");
private final Pattern keyValue = Pattern.compile("\\s*([^=]*)=(.*)");
private final Map<String, Map<String, String>> entries = new HashMap<>();
public GitConfig(final String path) {
load(path);
}
private void load(final String path) {
if (path == null || path.isEmpty()) {
return;
}
// Typically, a section of the .git/config file looks like:
// [remote "origin"]
// url = https://some-host/user/repository.git
// fetch = +refs/heads/*:refs/remotes/origin/*
try (final BufferedReader br = new BufferedReader(new FileReader(path))) {
String line;
String section = null;
while ((line = br.readLine()) != null) {
// Check if current line matches with the `section` regex:
Matcher m = this.section.matcher(line);
if (m.matches()) {
// Section found: (E.g: remote "origin")
section = m.group(1).trim();
} else if (section != null) {
// Locate the concrete `section` in the `entries` map
// and update it with the found key/value.
// E.g: Map({`remote "origin"`: {`url`:`https://some-host/user/repository.git`}}
Map<String, String> kv = this.entries.get(section);
if (kv == null) {
this.entries.put(section, kv = new HashMap<>());
}
// Check if current line is a key/value inside of a certain section.
m = this.keyValue.matcher(line);
if (m.matches()) {
// Key/value found: (E.g: key=url, value=https://some-host/user/repository.git)
final String key = m.group(1).trim();
final String value = m.group(2).trim();
kv.put(key, value);
}
}
}
} catch (final IOException e) {
// As extract .git config information should be a best-effort approach, we don't want to
// bother customers with
// error messages at this point. If .git/config file cannot be parsed, we return the control
// to the invoker.
}
}
public String getString(final String section, final String key) {
final Map<String, String> kv = entries.get(section);
if (kv == null) {
return null;
}
return kv.get(key);
}
}
| 1,090 |
465 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 内部错误。
INTERNALERROR = 'InternalError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# AppId错误。
INVALIDPARAMETERVALUE_APPID = 'InvalidParameterValue.AppId'
# 数量错误。
INVALIDPARAMETERVALUE_COUNT = 'InvalidParameterValue.Count'
# 验证数据错误。
INVALIDPARAMETERVALUE_DATA = 'InvalidParameterValue.Data'
# 输入字符串为空。
INVALIDPARAMETERVALUE_EMPTYSTRING = 'InvalidParameterValue.EmptyString'
# 超过数量限制。
INVALIDPARAMETERVALUE_LIMIT = 'InvalidParameterValue.Limit'
# 订单编号错误。
INVALIDPARAMETERVALUE_ORDERID = 'InvalidParameterValue.OrderId'
# 超过数量限制。
INVALIDPARAMETERVALUE_OVERLIMIT = 'InvalidParameterValue.OverLimit'
# 无权限操作。
INVALIDPARAMETERVALUE_PERMISSIONDENIED = 'InvalidParameterValue.PermissionDenied'
# 数量错误。
INVALIDPARAMETERVALUE_QUANTITY = 'InvalidParameterValue.Quantity'
# TID编码错误。
INVALIDPARAMETERVALUE_TID = 'InvalidParameterValue.Tid'
| 646 |
16,110 | #!/usr/bin/env python3.7
def f():
return (i * 2 async for i in arange(42))
def g():
return (
something_long * something_long
async for something_long in async_generator(with_an_argument)
)
async def func():
if test:
out_batched = [
i
async for i in aitertools._async_map(
self.async_inc, arange(8), batch_size=3
)
]
def awaited_generator_value(n):
return (await awaitable for awaitable in awaitable_list)
def make_arange(n):
return (i * 2 for i in range(n) if await wrap(i))
# output
#!/usr/bin/env python3.7
def f():
return (i * 2 async for i in arange(42))
def g():
return (
something_long * something_long
async for something_long in async_generator(with_an_argument)
)
async def func():
if test:
out_batched = [
i
async for i in aitertools._async_map(
self.async_inc, arange(8), batch_size=3
)
]
def awaited_generator_value(n):
return (await awaitable for awaitable in awaitable_list)
def make_arange(n):
return (i * 2 for i in range(n) if await wrap(i))
| 557 |
575 | <reponame>iridium-browser/iridium-browser<gh_stars>100-1000
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_UI_ANDROID_AUTOFILL_SAVE_ADDRESS_PROFILE_PROMPT_VIEW_ANDROID_H_
#define CHROME_BROWSER_UI_ANDROID_AUTOFILL_SAVE_ADDRESS_PROFILE_PROMPT_VIEW_ANDROID_H_
#include <jni.h>
#include <memory>
#include "base/android/scoped_java_ref.h"
#include "base/macros.h"
#include "chrome/browser/autofill/android/save_address_profile_prompt_view.h"
namespace content {
class WebContents;
}
namespace autofill {
class SaveAddressProfilePromptController;
// JNI wrapper for Java SaveAddressProfilePrompt.
class SaveAddressProfilePromptViewAndroid
: public SaveAddressProfilePromptView {
public:
explicit SaveAddressProfilePromptViewAndroid(
content::WebContents* web_contents);
SaveAddressProfilePromptViewAndroid(
const SaveAddressProfilePromptViewAndroid&) = delete;
SaveAddressProfilePromptViewAndroid& operator=(
const SaveAddressProfilePromptViewAndroid&) = delete;
~SaveAddressProfilePromptViewAndroid() override;
bool Show(SaveAddressProfilePromptController* controller) override;
private:
// The corresponding Java SaveAddressProfilePrompt owned by this class.
base::android::ScopedJavaGlobalRef<jobject> java_object_;
content::WebContents* web_contents_;
};
} // namespace autofill
#endif // CHROME_BROWSER_UI_ANDROID_AUTOFILL_SAVE_ADDRESS_PROFILE_PROMPT_VIEW_ANDROID_H_
| 517 |
4,339 | <reponame>geertjanw/ignite
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.security.scheduler;
import java.util.Collection;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.ignite.Ignition;
import org.apache.ignite.internal.processors.security.AbstractRemoteSecurityContextCheckTest;
import org.junit.Test;
/**
* Testing operation security context when the scheduler closure is executed on remote nodes.
* <p>
* The initiator node broadcasts a task to 'run' nodes that run a scheduler closure (runnable or callable). The closure
* is executed on 'run' nodes and broadcasts a task to 'endpoint' nodes. On every step, it is performed verification
* that operation security context is the initiator context.
*/
public class SchedulerRemoteSecurityContextCheckTest extends AbstractRemoteSecurityContextCheckTest {
/** */
private static volatile CountDownLatch latch;
/** {@inheritDoc} */
@Override protected void beforeTestsStarted() throws Exception {
startGridAllowAll(SRV_INITIATOR);
startClientAllowAll(CLNT_INITIATOR);
startGridAllowAll(SRV_RUN);
startClientAllowAll(CLNT_RUN);
startGridAllowAll(SRV_ENDPOINT);
startClientAllowAll(CLNT_ENDPOINT);
awaitPartitionMapExchange();
}
/** */
@Test
public void testRunLocal() {
runAndCheck(() -> Ignition.localIgnite().scheduler()
.runLocal(new TestClosure(endpointIds())).get());
}
/** */
@Test
public void testRunLocalWithDelay() {
runAndCheck(() -> Ignition.localIgnite().scheduler()
.runLocal(new TestClosure(endpointIds()), 1, TimeUnit.MILLISECONDS));
}
/** */
@Test
public void testCallLocal() {
runAndCheck(() -> Ignition.localIgnite().scheduler()
.callLocal(new TestClosure(endpointIds())).get());
}
/** {@inheritDoc} */
@Override protected void setupVerifier(Verifier verifier) {
nodesToRun().forEach(n -> {
VERIFIER.expect(n, OPERATION_START, 1);
VERIFIER.expect(n, OPERATION_CHECK, 1);
});
endpoints().forEach(n -> VERIFIER.expect(n, OPERATION_ENDPOINT, 2));
}
/** {@inheritDoc} */
@Override protected void beforeCompute() {
latch = new CountDownLatch(2);
}
/** {@inheritDoc} */
@Override protected void afterCompute() {
try {
latch.await(10, TimeUnit.SECONDS);
}
catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
/** Test closure. */
private static class TestClosure implements Runnable, Callable<Void> {
/** Endpoint ids. */
private final Collection<UUID> endpointIds;
/** */
public TestClosure(Collection<UUID> endpointIds) {
this.endpointIds = endpointIds;
}
/** {@inheritDoc} */
@Override public void run() {
VERIFIER.register(OPERATION_CHECK);
compute(Ignition.localIgnite(), endpointIds).broadcast(() -> VERIFIER.register(OPERATION_ENDPOINT));
latch.countDown();
}
/** {@inheritDoc} */
@Override public Void call() throws Exception {
run();
return null;
}
}
}
| 1,545 |
513 | <gh_stars>100-1000
/*
* The MIT License
*
* Copyright (C) 2015-2016 <NAME> <<EMAIL>>
* Illustrations have been taken from the Linux kernel rbtree.c
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* 'Software'), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "pmem.h"
#include "ptree-rb.h"
typedef enum PTreeRBColor_ {
P_TREE_RB_COLOR_RED = 0x01,
P_TREE_RB_COLOR_BLACK = 0x02
} PTreeRBColor;
typedef struct PTreeRBNode_ {
struct PTreeBaseNode_ base;
struct PTreeRBNode_ *parent;
PTreeRBColor color;
} PTreeRBNode;
static pboolean pp_tree_rb_is_black (PTreeRBNode *node);
static pboolean pp_tree_rb_is_red (PTreeRBNode *node);
static PTreeRBNode * pp_tree_rb_get_gparent (PTreeRBNode *node);
static PTreeRBNode * pp_tree_rb_get_uncle (PTreeRBNode *node);
static PTreeRBNode * pp_tree_rb_get_sibling (PTreeRBNode *node);
static void pp_tree_rb_rotate_left (PTreeRBNode *node, PTreeBaseNode **root);
static void pp_tree_rb_rotate_right (PTreeRBNode *node, PTreeBaseNode **root);
static void pp_tree_rb_balance_insert (PTreeRBNode *node, PTreeBaseNode **root);
static void pp_tree_rb_balance_remove (PTreeRBNode *node, PTreeBaseNode **root);
static pboolean
pp_tree_rb_is_black (PTreeRBNode *node)
{
if (node == NULL)
return TRUE;
return ((node->color) & P_TREE_RB_COLOR_BLACK) > 0 ? TRUE : FALSE;
}
static pboolean
pp_tree_rb_is_red (PTreeRBNode *node)
{
return ((node->color) & P_TREE_RB_COLOR_RED) > 0 ? TRUE : FALSE;
}
static PTreeRBNode *
pp_tree_rb_get_gparent (PTreeRBNode *node)
{
return node->parent->parent;
}
static PTreeRBNode *
pp_tree_rb_get_uncle (PTreeRBNode *node)
{
PTreeRBNode *gparent = pp_tree_rb_get_gparent (node);
if ((PTreeRBNode *) gparent->base.left == node->parent)
return (PTreeRBNode *) gparent->base.right;
else
return (PTreeRBNode *) gparent->base.left;
}
static PTreeRBNode *
pp_tree_rb_get_sibling (PTreeRBNode *node)
{
if (node->parent->base.left == (PTreeBaseNode *) node)
return (PTreeRBNode *) node->parent->base.right;
else
return (PTreeRBNode *) node->parent->base.left;
}
static void
pp_tree_rb_rotate_left (PTreeRBNode *node, PTreeBaseNode **root)
{
PTreeBaseNode *tmp_node;
tmp_node = node->base.right;
if (P_LIKELY (node->parent != NULL)) {
if (node->parent->base.left == (PTreeBaseNode *) node)
node->parent->base.left = tmp_node;
else
node->parent->base.right = tmp_node;
}
node->base.right = tmp_node->left;
if (tmp_node->left != NULL)
((PTreeRBNode *) tmp_node->left)->parent = node;
tmp_node->left = (PTreeBaseNode *) node;
((PTreeRBNode *) tmp_node)->parent = node->parent;
node->parent = (PTreeRBNode *) tmp_node;
if (P_UNLIKELY (((PTreeRBNode *) tmp_node)->parent == NULL))
*root = tmp_node;
}
static void
pp_tree_rb_rotate_right (PTreeRBNode *node, PTreeBaseNode **root)
{
PTreeBaseNode *tmp_node;
tmp_node = node->base.left;
if (P_LIKELY (node->parent != NULL)) {
if (node->parent->base.left == (PTreeBaseNode *) node)
node->parent->base.left = tmp_node;
else
node->parent->base.right = tmp_node;
}
node->base.left = tmp_node->right;
if (tmp_node->right != NULL)
((PTreeRBNode *) tmp_node->right)->parent = node;
tmp_node->right = (PTreeBaseNode *) node;
((PTreeRBNode *) tmp_node)->parent = node->parent;
node->parent = (PTreeRBNode *) tmp_node;
if (P_UNLIKELY (((PTreeRBNode *) tmp_node)->parent == NULL))
*root = tmp_node;
}
static void
pp_tree_rb_balance_insert (PTreeRBNode *node, PTreeBaseNode **root)
{
PTreeRBNode *uncle;
PTreeRBNode *gparent;
while (TRUE) {
/* Case 1: We are at the root */
if (P_UNLIKELY (node->parent == NULL)) {
node->color = P_TREE_RB_COLOR_BLACK;
break;
}
/* Case 2: We have a black parent */
if (pp_tree_rb_is_black (node->parent) == TRUE)
break;
uncle = pp_tree_rb_get_uncle (node);
gparent = pp_tree_rb_get_gparent (node);
/* Case 3: Both parent and uncle are red, flip colors
*
* G g
* / \ / \
* p u --> P U
* / /
* n n
*/
if (uncle != NULL && pp_tree_rb_is_red (uncle) == TRUE) {
node->parent->color = P_TREE_RB_COLOR_BLACK;
uncle->color = P_TREE_RB_COLOR_BLACK;
gparent->color = P_TREE_RB_COLOR_RED;
/* Continue iteratively from gparent */
node = gparent;
continue;
}
if (node->parent == (PTreeRBNode *) gparent->base.left) {
if (node == (PTreeRBNode *) node->parent->base.right) {
/* Case 4a: Left rotate at parent
*
* G G
* / \ / \
* p U --> n U
* \ /
* n p
*/
pp_tree_rb_rotate_left (node->parent, root);
node = (PTreeRBNode *) node->base.left;
}
gparent->color = P_TREE_RB_COLOR_RED;
node->parent->color = P_TREE_RB_COLOR_BLACK;
/* Case 5a: Right rotate at gparent
*
* G P
* / \ / \
* p U --> n g
* / \
* n U
*/
pp_tree_rb_rotate_right (gparent, root);
break;
} else {
if (node == (PTreeRBNode *) node->parent->base.left) {
/* Case 4b: Right rotate at parent */
pp_tree_rb_rotate_right (node->parent, root);
node = (PTreeRBNode *) node->base.right;
}
gparent->color = P_TREE_RB_COLOR_RED;
node->parent->color = P_TREE_RB_COLOR_BLACK;
/* Case 5b: Left rotate at gparent*/
pp_tree_rb_rotate_left (gparent, root);
break;
}
}
}
static void
pp_tree_rb_balance_remove (PTreeRBNode *node, PTreeBaseNode **root)
{
PTreeRBNode *sibling;
while (TRUE) {
/* Case 1: We are at the root */
if (P_UNLIKELY (node->parent == NULL))
break;
sibling = pp_tree_rb_get_sibling (node);
if (pp_tree_rb_is_red (sibling) == TRUE) {
/*
* Case 2: Left (right) rotate at parent
*
* P S
* / \ / \
* N s --> p Sr
* / \ / \
* Sl Sr N Sl
*/
node->parent->color = P_TREE_RB_COLOR_RED;
sibling->color = P_TREE_RB_COLOR_BLACK;
if ((PTreeBaseNode *) node == node->parent->base.left)
pp_tree_rb_rotate_left (node->parent, root);
else
pp_tree_rb_rotate_right (node->parent, root);
sibling = pp_tree_rb_get_sibling (node);
}
/*
* Case 3: Sibling (parent) color flip
*
* (p) (p)
* / \ / \
* N S --> N s
* / \ / \
* Sl Sr Sl Sr
*/
if (pp_tree_rb_is_black ((PTreeRBNode *) sibling->base.left) == TRUE &&
pp_tree_rb_is_black ((PTreeRBNode *) sibling->base.right) == TRUE) {
sibling->color = P_TREE_RB_COLOR_RED;
if (pp_tree_rb_is_black (node->parent) == TRUE) {
node = node->parent;
continue;
} else {
node->parent->color = P_TREE_RB_COLOR_BLACK;
break;
}
}
/*
* Case 4: Right (left) rotate at sibling
*
* (p) (p)
* / \ / \
* N S --> N Sl
* / \ \
* sl Sr s
* \
* Sr
*/
if ((PTreeBaseNode *) node == node->parent->base.left &&
pp_tree_rb_is_black ((PTreeRBNode *) sibling->base.right) == TRUE) {
sibling->color = P_TREE_RB_COLOR_RED;
((PTreeRBNode *) sibling->base.left)->color = P_TREE_RB_COLOR_BLACK;
pp_tree_rb_rotate_right (sibling, root);
sibling = pp_tree_rb_get_sibling (node);
} else if ((PTreeBaseNode *) node == node->parent->base.right &&
pp_tree_rb_is_black ((PTreeRBNode *) sibling->base.left) == TRUE) {
sibling->color = P_TREE_RB_COLOR_RED;
((PTreeRBNode *) sibling->base.right)->color = P_TREE_RB_COLOR_BLACK;
pp_tree_rb_rotate_left (sibling, root);
sibling = pp_tree_rb_get_sibling (node);
}
/*
* Case 5: Left (right) rotate at parent and color flips
*
* (p) (s)
* / \ / \
* N S --> P Sr
* / \ / \
* (sl) sr N (sl)
*/
sibling->color = node->parent->color;
node->parent->color = P_TREE_RB_COLOR_BLACK;
if ((PTreeBaseNode *) node == node->parent->base.left) {
((PTreeRBNode *) sibling->base.right)->color = P_TREE_RB_COLOR_BLACK;
pp_tree_rb_rotate_left (node->parent, root);
} else {
((PTreeRBNode *) sibling->base.left)->color = P_TREE_RB_COLOR_BLACK;
pp_tree_rb_rotate_right (node->parent, root);
}
break;
}
}
pboolean
p_tree_rb_insert (PTreeBaseNode **root_node,
PCompareDataFunc compare_func,
ppointer data,
PDestroyFunc key_destroy_func,
PDestroyFunc value_destroy_func,
ppointer key,
ppointer value)
{
PTreeBaseNode **cur_node;
PTreeBaseNode *parent_node;
pint cmp_result;
cur_node = root_node;
parent_node = *root_node;
/* Find where to insert the node */
while (*cur_node != NULL) {
cmp_result = compare_func (key, (*cur_node)->key, data);
if (cmp_result < 0) {
parent_node = *cur_node;
cur_node = &(*cur_node)->left;
} else if (cmp_result > 0) {
parent_node = *cur_node;
cur_node = &(*cur_node)->right;
} else
break;
}
/* If we have existing one - replace a key-value pair */
if (*cur_node != NULL) {
if (key_destroy_func != NULL)
key_destroy_func ((*cur_node)->key);
if (value_destroy_func != NULL)
value_destroy_func ((*cur_node)->value);
(*cur_node)->key = key;
(*cur_node)->value = value;
return FALSE;
}
if (P_UNLIKELY ((*cur_node = p_malloc0 (sizeof (PTreeRBNode))) == NULL))
return FALSE;
(*cur_node)->key = key;
(*cur_node)->value = value;
((PTreeRBNode *) *cur_node)->color = P_TREE_RB_COLOR_RED;
((PTreeRBNode *) *cur_node)->parent = (PTreeRBNode *) parent_node;
/* Balance the tree */
pp_tree_rb_balance_insert ((PTreeRBNode *) *cur_node, root_node);
return TRUE;
}
pboolean
p_tree_rb_remove (PTreeBaseNode **root_node,
PCompareDataFunc compare_func,
ppointer data,
PDestroyFunc key_destroy_func,
PDestroyFunc value_destroy_func,
pconstpointer key)
{
PTreeBaseNode *cur_node;
PTreeBaseNode *prev_node;
PTreeBaseNode *child_node;
PTreeRBNode *child_parent;
pint cmp_result;
cur_node = *root_node;
while (cur_node != NULL) {
cmp_result = compare_func (key, cur_node->key, data);
if (cmp_result < 0)
cur_node = cur_node->left;
else if (cmp_result > 0)
cur_node = cur_node->right;
else
break;
}
if (P_UNLIKELY (cur_node == NULL))
return FALSE;
if (cur_node->left != NULL && cur_node->right != NULL) {
prev_node = cur_node->left;
while (prev_node->right != NULL)
prev_node = prev_node->right;
cur_node->key = prev_node->key;
cur_node->value = prev_node->value;
/* Mark node for removal */
cur_node = prev_node;
}
child_node = cur_node->left == NULL ? cur_node->right : cur_node->left;
if (child_node == NULL && pp_tree_rb_is_black ((PTreeRBNode *) cur_node) == TRUE)
pp_tree_rb_balance_remove ((PTreeRBNode *) cur_node, root_node);
/* Replace node with its child */
if (cur_node == *root_node) {
*root_node = child_node;
child_parent = NULL;
} else {
child_parent = ((PTreeRBNode *) cur_node)->parent;
if (child_parent->base.left == cur_node)
child_parent->base.left = child_node;
else
child_parent->base.right = child_node;
}
if (child_node != NULL) {
((PTreeRBNode *) child_node)->parent = child_parent;
/* Check if we need to repaint the node */
if (pp_tree_rb_is_black ((PTreeRBNode *) cur_node) == TRUE)
((PTreeRBNode *) child_node)->color = P_TREE_RB_COLOR_BLACK;
}
/* Free unused node */
if (key_destroy_func != NULL)
key_destroy_func (cur_node->key);
if (value_destroy_func != NULL)
value_destroy_func (cur_node->value);
p_free (cur_node);
return TRUE;
}
void
p_tree_rb_node_free (PTreeBaseNode *node)
{
p_free (node);
}
| 5,705 |
371 | /*
* Copyright (c) 2013-2021 Cinchapi Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cinchapi.concourse;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.stream.Collectors;
import com.cinchapi.concourse.util.PrettyLinkedHashMap;
/**
* Contains functions for preserving backwards compatibility.
*
* @author <NAME>
*/
public final class BackwardsCompatability {
/**
* Transform a value returned from {@link Concourse#review(long) review}
* methods to one that should be returned from {@link Concourse#audit(long)
* audit} methods.
*
* @param review
* @return the transformed value
*/
public static <K> Map<K, String> auditFromReview(
Map<K, List<String>> review) {
return review.entrySet().stream()
.collect(Collectors.toMap(Entry::getKey,
entry -> entry.getValue().size() == 1
? entry.getValue().iterator().next().toString()
: entry.getValue().toString(),
(a, b) -> a, PrettyLinkedHashMap::create));
}
private BackwardsCompatability() {/* no-init */}
}
| 628 |
2,189 | #pragma once
#include <steem/chain/steem_fwd.hpp>
#include <steem/chain/util/manabar.hpp>
#include <steem/plugins/rc/rc_config.hpp>
#include <steem/plugins/rc/rc_utility.hpp>
#include <steem/plugins/rc/resource_count.hpp>
#include <steem/chain/steem_object_types.hpp>
#include <steem/protocol/asset.hpp>
#include <fc/int_array.hpp>
namespace steem { namespace plugins { namespace rc {
using namespace steem::chain;
using steem::protocol::asset;
using steem::protocol::asset_symbol_type;
#ifndef STEEM_RC_SPACE_ID
#define STEEM_RC_SPACE_ID 16
#endif
#define STEEM_RC_DRC_FLOAT_LEVEL (20*STEEM_1_PERCENT)
#define STEEM_RC_MAX_DRC_RATE 1000
enum rc_object_types
{
rc_resource_param_object_type = ( STEEM_RC_SPACE_ID << 8 ),
rc_pool_object_type = ( STEEM_RC_SPACE_ID << 8 ) + 1,
rc_account_object_type = ( STEEM_RC_SPACE_ID << 8 ) + 2,
rc_delegation_pool_object_type = ( STEEM_RC_SPACE_ID << 8 ) + 3,
rc_delegation_from_account_object_type = ( STEEM_RC_SPACE_ID << 8 ) + 4,
rc_indel_edge_object_type = ( STEEM_RC_SPACE_ID << 8 ) + 5,
rc_outdel_drc_edge_object_type = ( STEEM_RC_SPACE_ID << 8 ) + 6
};
class rc_resource_param_object : public object< rc_resource_param_object_type, rc_resource_param_object >
{
public:
template< typename Constructor, typename Allocator >
rc_resource_param_object( Constructor&& c, allocator< Allocator > a )
{
c( *this );
}
rc_resource_param_object() {}
id_type id;
fc::int_array< rc_resource_params, STEEM_NUM_RESOURCE_TYPES >
resource_param_array;
};
STEEM_OBJECT_ID_TYPE( rc_resource_param );
class rc_pool_object : public object< rc_pool_object_type, rc_pool_object >
{
public:
template< typename Constructor, typename Allocator >
rc_pool_object( Constructor&& c, allocator< Allocator > a )
{
c( *this );
}
rc_pool_object() {}
id_type id;
fc::int_array< int64_t, STEEM_NUM_RESOURCE_TYPES >
pool_array;
};
STEEM_OBJECT_ID_TYPE( rc_pool );
class rc_account_object : public object< rc_account_object_type, rc_account_object >
{
public:
template< typename Constructor, typename Allocator >
rc_account_object( Constructor&& c, allocator< Allocator > a )
{
c( *this );
}
rc_account_object() {}
id_type id;
account_name_type account;
account_name_type creator;
steem::chain::util::manabar rc_manabar;
asset max_rc_creation_adjustment = asset( 0, VESTS_SYMBOL );
asset vests_delegated_to_pools = asset( 0, VESTS_SYMBOL );
fc::array< account_name_type, STEEM_RC_MAX_SLOTS >
indel_slots;
uint32_t out_delegations = 0;
// This is used for bug-catching, to match that the vesting shares in a
// pre-op are equal to what they were at the last post-op.
int64_t last_max_rc = 0;
};
STEEM_OBJECT_ID_TYPE( rc_account );
/**
* Represents a delegation pool.
*/
class rc_delegation_pool_object : public object< rc_delegation_pool_object_type, rc_delegation_pool_object >
{
public:
template< typename Constructor, typename Allocator >
rc_delegation_pool_object( Constructor&& c, allocator< Allocator > a )
{
c( *this );
}
rc_delegation_pool_object() {}
id_type id;
account_name_type account;
asset_symbol_type asset_symbol;
steem::chain::util::manabar rc_pool_manabar;
int64_t max_rc = 0;
};
STEEM_OBJECT_ID_TYPE( rc_delegation_pool );
/**
* Represents the total amount of an asset delegated by a user.
*
* Only used for SMT support.
*/
class rc_delegation_from_account_object : public object< rc_delegation_from_account_object_type, rc_delegation_from_account_object >
{
public:
template< typename Constructor, typename Allocator >
rc_delegation_from_account_object( Constructor&& c, allocator< Allocator > a )
{
c( *this );
}
rc_delegation_from_account_object() {}
id_type id;
account_name_type account;
asset amount;
asset_symbol_type get_asset_symbol()const
{ return amount.symbol; }
};
STEEM_OBJECT_ID_TYPE( rc_delegation_from_account );
/**
* Represents a delegation from a user to a pool.
*/
class rc_indel_edge_object : public object< rc_indel_edge_object_type, rc_indel_edge_object >
{
public:
template< typename Constructor, typename Allocator >
rc_indel_edge_object( Constructor&& c, allocator< Allocator > a )
{
c( *this );
}
rc_indel_edge_object() {}
asset_symbol_type get_asset_symbol()const
{ return amount.symbol; }
id_type id;
account_name_type from_account;
account_name_type to_pool;
asset amount;
};
STEEM_OBJECT_ID_TYPE( rc_indel_edge );
/**
* Represents a delegation from a pool to a user based on delegated resource credits (DRC).
*
* In the case of a pool that is not under heavy load, DRC:RC has a 1:1 exchange rate.
*
* However, if the pool drops below STEEM_RC_DRC_FLOAT_LEVEL, DRC:RC exchange rate starts
* to rise according to `f(x) = 1/(a+b*x)` where `x` is the pool level, and coefficients `a`,
* `b` are set such that `f(STEEM_RC_DRC_FLOAT_LEVEL) = 1` and `f(0) = STEEM_RC_MAX_DRC_RATE`.
*
* This ensures the limited RC of oversubscribed pools under heavy load are
* shared "fairly" among their users proportionally to DRC. This logic
* provides a smooth transition between the "fiat regime" (i.e. DRC represent
* a direct allocation of RC) and the "proportional regime" (i.e. DRC represent
* the fraction of RC that the user is allowed).
*/
class rc_outdel_drc_edge_object : public object< rc_outdel_drc_edge_object_type, rc_outdel_drc_edge_object >
{
public:
template< typename Constructor, typename Allocator >
rc_outdel_drc_edge_object( Constructor&& c, allocator< Allocator > a )
{
c( *this );
}
rc_outdel_drc_edge_object() {}
id_type id;
account_name_type from_pool;
account_name_type to_account;
asset_symbol_type asset_symbol;
steem::chain::util::manabar drc_manabar;
int64_t drc_max_mana = 0;
};
STEEM_OBJECT_ID_TYPE( rc_outdel_drc_edge );
int64_t get_maximum_rc( const steem::chain::account_object& account, const rc_account_object& rc_account );
struct by_edge;
struct by_account_symbol;
struct by_pool;
typedef multi_index_container<
rc_resource_param_object,
indexed_by<
ordered_unique< tag< by_id >, member< rc_resource_param_object, rc_resource_param_object::id_type, &rc_resource_param_object::id > >
>,
allocator< rc_resource_param_object >
> rc_resource_param_index;
typedef multi_index_container<
rc_pool_object,
indexed_by<
ordered_unique< tag< by_id >, member< rc_pool_object, rc_pool_object::id_type, &rc_pool_object::id > >
>,
allocator< rc_pool_object >
> rc_pool_index;
typedef multi_index_container<
rc_account_object,
indexed_by<
ordered_unique< tag< by_id >, member< rc_account_object, rc_account_object::id_type, &rc_account_object::id > >,
ordered_unique< tag< by_name >, member< rc_account_object, account_name_type, &rc_account_object::account > >
>,
allocator< rc_account_object >
> rc_account_index;
typedef multi_index_container<
rc_delegation_pool_object,
indexed_by<
ordered_unique< tag< by_id >, member< rc_delegation_pool_object, rc_delegation_pool_object::id_type, &rc_delegation_pool_object::id > >,
ordered_unique< tag< by_account_symbol >,
composite_key< rc_delegation_pool_object,
member< rc_delegation_pool_object, account_name_type, &rc_delegation_pool_object::account >,
member< rc_delegation_pool_object, asset_symbol_type, &rc_delegation_pool_object::asset_symbol >
>
>
>,
allocator< rc_delegation_pool_object >
> rc_delegation_pool_index;
typedef multi_index_container<
rc_delegation_from_account_object,
indexed_by<
ordered_unique< tag< by_id >, member< rc_delegation_from_account_object, rc_delegation_from_account_object::id_type, &rc_delegation_from_account_object::id > >,
ordered_unique< tag< by_account_symbol >,
composite_key< rc_delegation_from_account_object,
member< rc_delegation_from_account_object, account_name_type, &rc_delegation_from_account_object::account >,
const_mem_fun< rc_delegation_from_account_object, asset_symbol_type, &rc_delegation_from_account_object::get_asset_symbol >
>
>
>,
allocator< rc_delegation_from_account_object >
> rc_delegation_from_account_index;
typedef multi_index_container<
rc_indel_edge_object,
indexed_by<
ordered_unique< tag< by_id >, member< rc_indel_edge_object, rc_indel_edge_object::id_type, &rc_indel_edge_object::id > >,
ordered_unique< tag< by_edge >,
composite_key< rc_indel_edge_object,
member< rc_indel_edge_object, account_name_type, &rc_indel_edge_object::from_account >,
const_mem_fun< rc_indel_edge_object, asset_symbol_type, &rc_indel_edge_object::get_asset_symbol >,
member< rc_indel_edge_object, account_name_type, &rc_indel_edge_object::to_pool >
>
>,
ordered_unique< tag< by_pool >,
composite_key< rc_indel_edge_object,
member< rc_indel_edge_object, account_name_type, &rc_indel_edge_object::to_pool >,
const_mem_fun< rc_indel_edge_object, asset_symbol_type, &rc_indel_edge_object::get_asset_symbol >,
member< rc_indel_edge_object, account_name_type, &rc_indel_edge_object::from_account >
>
>
>,
allocator< rc_indel_edge_object >
> rc_indel_edge_index;
typedef multi_index_container<
rc_outdel_drc_edge_object,
indexed_by<
ordered_unique< tag< by_id >, member< rc_outdel_drc_edge_object, rc_outdel_drc_edge_id_type, &rc_outdel_drc_edge_object::id > >,
ordered_unique< tag< by_edge >,
composite_key< rc_outdel_drc_edge_object,
member< rc_outdel_drc_edge_object, account_name_type, &rc_outdel_drc_edge_object::from_pool >,
member< rc_outdel_drc_edge_object, account_name_type, &rc_outdel_drc_edge_object::to_account >,
member< rc_outdel_drc_edge_object, asset_symbol_type, &rc_outdel_drc_edge_object::asset_symbol >
>
>,
ordered_unique< tag< by_pool >,
composite_key< rc_outdel_drc_edge_object,
member< rc_outdel_drc_edge_object, account_name_type, &rc_outdel_drc_edge_object::from_pool >,
member< rc_outdel_drc_edge_object, asset_symbol_type, &rc_outdel_drc_edge_object::asset_symbol >,
member< rc_outdel_drc_edge_object, rc_outdel_drc_edge_id_type, &rc_outdel_drc_edge_object::id >
>
>
>,
allocator< rc_outdel_drc_edge_object >
> rc_outdel_drc_edge_index;
} } } // steem::plugins::rc
FC_REFLECT( steem::plugins::rc::rc_resource_param_object, (id)(resource_param_array) )
CHAINBASE_SET_INDEX_TYPE( steem::plugins::rc::rc_resource_param_object, steem::plugins::rc::rc_resource_param_index )
FC_REFLECT( steem::plugins::rc::rc_pool_object, (id)(pool_array) )
CHAINBASE_SET_INDEX_TYPE( steem::plugins::rc::rc_pool_object, steem::plugins::rc::rc_pool_index )
FC_REFLECT( steem::plugins::rc::rc_account_object,
(id)
(account)
(creator)
(rc_manabar)
(max_rc_creation_adjustment)
(vests_delegated_to_pools)
(out_delegations)
(indel_slots)
(last_max_rc)
)
CHAINBASE_SET_INDEX_TYPE( steem::plugins::rc::rc_account_object, steem::plugins::rc::rc_account_index )
FC_REFLECT( steem::plugins::rc::rc_delegation_pool_object,
(id)
(account)
(asset_symbol)
(rc_pool_manabar)
(max_rc)
)
CHAINBASE_SET_INDEX_TYPE( steem::plugins::rc::rc_delegation_pool_object, steem::plugins::rc::rc_delegation_pool_index )
FC_REFLECT( steem::plugins::rc::rc_delegation_from_account_object,
(id)
(account)
(amount)
)
CHAINBASE_SET_INDEX_TYPE( steem::plugins::rc::rc_delegation_from_account_object, steem::plugins::rc::rc_delegation_from_account_index )
FC_REFLECT( steem::plugins::rc::rc_indel_edge_object,
(id)
(from_account)
(to_pool)
(amount)
)
CHAINBASE_SET_INDEX_TYPE( steem::plugins::rc::rc_indel_edge_object, steem::plugins::rc::rc_indel_edge_index )
FC_REFLECT( steem::plugins::rc::rc_outdel_drc_edge_object,
(id)
(from_pool)
(to_account)
(asset_symbol)
(drc_manabar)
(drc_max_mana)
)
CHAINBASE_SET_INDEX_TYPE( steem::plugins::rc::rc_outdel_drc_edge_object, steem::plugins::rc::rc_outdel_drc_edge_index )
| 5,981 |
1,441 | #include <bits/stdc++.h>
using namespace std;
typedef pair<int, int> ii;
typedef vector<int> vi;
class SuffixArray {
private:
vi RA; // rank array
void countingSort(int k) { // O(n)
int maxi = max(300, n); // up to 255 ASCII chars
vi c(maxi, 0); // clear frequency table
for (int i = 0; i < n; ++i) // count the frequency
++c[i+k < n ? RA[i+k] : 0]; // of each integer rank
for (int i = 0, sum = 0; i < maxi; ++i) {
int t = c[i]; c[i] = sum; sum += t;
}
vi tempSA(n);
for (int i = 0; i < n; ++i) // sort SA
tempSA[c[SA[i]+k < n ? RA[SA[i]+k] : 0]++] = SA[i];
swap(SA, tempSA); // update SA
}
void constructSA() { // can go up to 400K chars
SA.resize(n);
iota(SA.begin(), SA.end(), 0); // the initial SA
RA.resize(n);
for (int i = 0; i < n; ++i) RA[i] = T[i]; // initial rankings
for (int k = 1; k < n; k <<= 1) { // repeat log_2 n times
// this is actually radix sort
countingSort(k); // sort by 2nd item
countingSort(0); // stable-sort by 1st item
vi tempRA(n);
int r = 0;
tempRA[SA[0]] = r; // re-ranking process
for (int i = 1; i < n; ++i) // compare adj suffixes
tempRA[SA[i]] = // same pair => same rank r; otherwise, increase r
((RA[SA[i]] == RA[SA[i-1]]) && (RA[SA[i]+k] == RA[SA[i-1]+k])) ?
r : ++r;
swap(RA, tempRA); // update RA
if (RA[SA[n-1]] == n-1) break; // nice optimization
}
}
void computeLCP() {
vi Phi(n);
vi PLCP(n);
PLCP.resize(n);
Phi[SA[0]] = -1; // default value
for (int i = 1; i < n; ++i) // compute Phi in O(n)
Phi[SA[i]] = SA[i-1]; // remember prev suffix
for (int i = 0, L = 0; i < n; ++i) { // compute PLCP in O(n)
if (Phi[i] == -1) { PLCP[i] = 0; continue; } // special case
while ((i+L < n) && (Phi[i]+L < n) && (T[i+L] == T[Phi[i]+L]))
++L; // L incr max n times
PLCP[i] = L;
L = max(L-1, 0); // L dec max n times
}
LCP.resize(n);
for (int i = 0; i < n; ++i) // compute LCP in O(n)
LCP[i] = PLCP[SA[i]]; // restore PLCP
}
public:
const char* T; // the input string
const int n; // the length of T
vi SA; // Suffix Array
vi LCP; // of adj sorted suffixes
SuffixArray(const char* initialT, const int _n) : T(initialT), n(_n) {
constructSA(); // O(n log n)
computeLCP(); // O(n)
}
ii stringMatching(const char *P) { // in O(m log n)
int m = (int)strlen(P); // usually, m < n
int lo = 0, hi = n-1; // range = [0..n-1]
while (lo < hi) { // find lower bound
int mid = (lo+hi) / 2; // this is round down
int res = strncmp(T+SA[mid], P, m); // P in suffix SA[mid]?
(res >= 0) ? hi = mid : lo = mid+1; // notice the >= sign
}
if (strncmp(T+SA[lo], P, m) != 0) return {-1, -1}; // if not found
ii ans; ans.first = lo;
hi = n-1; // range = [lo..n-1]
while (lo < hi) { // now find upper bound
int mid = (lo+hi) / 2;
int res = strncmp(T+SA[mid], P, m);
(res > 0) ? hi = mid : lo = mid+1; // notice the > sign
}
if (strncmp(T+SA[hi], P, m) != 0) --hi; // special case
ans.second = hi;
return ans; // returns (lb, ub)
} // where P is found
ii LRS() { // (LRS length, index)
int idx = 0, maxLCP = -1;
for (int i = 1; i < n; ++i) // O(n), start from i = 1
if (LCP[i] > maxLCP)
maxLCP = LCP[i], idx = i;
return {maxLCP, idx};
}
ii LCS(int split_idx) { // (LCS length, index)
int idx = 0, maxLCP = -1;
for (int i = 1; i < n; ++i) { // O(n), start from i = 1
// if suffix SA[i] and suffix SA[i-1] came from the same string, skip
if ((SA[i] < split_idx) == (SA[i-1] < split_idx)) continue;
if (LCP[i] > maxLCP)
maxLCP = LCP[i], idx = i;
}
return {maxLCP, idx};
}
};
const int MAX_N = 450010; // can go up to 450K chars
char T[MAX_N];
char P[MAX_N];
char LRS_ans[MAX_N];
char LCS_ans[MAX_N];
int main() {
freopen("sa_lcp_in.txt", "r", stdin);
scanf("%s", &T); // read T
int n = (int)strlen(T); // count n
T[n++] = '$'; // add terminating symbol
SuffixArray S(T, n); // construct SA+LCP
printf("T = '%s'\n", T);
printf(" i SA[i] LCP[i] Suffix SA[i]\n");
for (int i = 0; i < n; ++i)
printf("%2d %2d %2d %s\n", i, S.SA[i], S.LCP[i], T+S.SA[i]);
// String Matching demo, we will try to find P in T
strcpy(P, "A");
auto [lb, ub] = S.stringMatching(P);
if ((lb != -1) && (ub != -1)) {
printf("P = '%s' is found SA[%d..%d] of T = '%s'\n", P, lb, ub, T);
printf("They are:\n");
for (int i = lb; i <= ub; ++i)
printf(" %s\n", T+S.SA[i]);
}
else
printf("P = '%s' is not found in T = '%s'\n", P, T);
// LRS demo, find the LRS of T
auto [LRS_len, LRS_idx] = S.LRS();
strncpy(LRS_ans, T+S.SA[LRS_idx], LRS_len);
printf("The LRS is '%s' with length = %d\n", LRS_ans, LRS_len);
// LCS demo, find the LCS of (T, P)
strcpy(P, "CATA");
int m = (int)strlen(P);
strcat(T, P); // append P to T
strcat(T, "#"); // add '#' at the back
n = (int)strlen(T); // update n
// reconstruct SA of the combined strings
SuffixArray S2(T, n); // reconstruct SA+LCP
int split_idx = n-m-1;
printf("T+P = '%s'\n", T);
printf(" i SA[i] LCP[i] From Suffix SA[i]\n");
for (int i = 0; i < n; ++i)
printf("%2d %2d %2d %2d %s\n",
i, S2.SA[i], S2.LCP[i], S2.SA[i] < split_idx ? 1 : 2, T+S2.SA[i]);
auto [LCS_len, LCS_idx] = S2.LCS(split_idx);
strncpy(LCS_ans, T+S2.SA[LCS_idx], LCS_len);
printf("The LCS is '%s' with length = %d\n", LCS_ans, LCS_len);
return 0;
}
| 4,069 |
12,278 | /*!
@file
Defines `boost::hana::Functor`.
@copyright <NAME> 2013-2017
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_HANA_CONCEPT_FUNCTOR_HPP
#define BOOST_HANA_CONCEPT_FUNCTOR_HPP
#include <boost/hana/fwd/concept/functor.hpp>
#include <boost/hana/adjust_if.hpp>
#include <boost/hana/config.hpp>
#include <boost/hana/core/default.hpp>
#include <boost/hana/core/tag_of.hpp>
#include <boost/hana/detail/integral_constant.hpp>
#include <boost/hana/transform.hpp>
BOOST_HANA_NAMESPACE_BEGIN
template <typename F>
struct Functor
: hana::integral_constant<bool,
!is_default<transform_impl<typename tag_of<F>::type>>::value ||
!is_default<adjust_if_impl<typename tag_of<F>::type>>::value
>
{ };
BOOST_HANA_NAMESPACE_END
#endif // !BOOST_HANA_CONCEPT_FUNCTOR_HPP
| 412 |
607 | #include "config/stm32plus.h"
#include "config/display/font.h"
#if defined STM32PLUS_F0
#define FONTCONST const
#else
#define FONTCONST
#endif
namespace stm32plus { namespace display {
// byte definitions for FDEF_PROGGYCLEAN
FONTCONST uint8_t FDEF_PROGGYCLEAN32_BYTES[]={ 0,0,0,0,0,0,0,0,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN33_BYTES[]={ 0,0,2,129,64,32,16,0,4,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN34_BYTES[]={ 0,10,133,2,0,0,0,0,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN35_BYTES[]={ 0,0,10,229,167,80,126,10,5,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN36_BYTES[]={ 0,0,130,167,80,112,80,40,15,2,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN37_BYTES[]={ 0,128,168,82,81,160,168,84,17,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN38_BYTES[]={ 0,0,67,34,97,74,69,34,46,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN39_BYTES[]={ 0,4,2,1,0,0,0,0,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN40_BYTES[]={ 0,8,2,65,32,16,8,4,4,2,2,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN41_BYTES[]={ 0,2,2,1,129,64,32,16,4,130,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN42_BYTES[]={ 0,0,0,128,80,113,84,8,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN43_BYTES[]={ 0,0,0,128,64,248,16,8,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN44_BYTES[]={ 0,0,0,0,0,0,0,4,2,65,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN45_BYTES[]={ 0,0,0,0,0,248,0,0,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN46_BYTES[]={ 0,0,0,0,0,0,0,4,2,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN47_BYTES[]={ 0,16,8,2,65,32,8,4,129,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN48_BYTES[]={ 0,0,71,36,82,169,68,34,14,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN49_BYTES[]={ 0,0,130,161,64,32,16,8,31,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN50_BYTES[]={ 0,0,71,4,130,32,8,2,31,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN51_BYTES[]={ 0,0,71,4,194,128,64,34,14,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN52_BYTES[]={ 0,0,8,134,34,137,252,32,16,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN53_BYTES[]={ 0,128,79,32,240,128,64,34,14,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN54_BYTES[]={ 0,0,134,32,240,136,68,34,14,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN55_BYTES[]={ 0,128,15,4,129,32,16,4,2,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN56_BYTES[]={ 0,0,71,36,226,136,68,34,14,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN57_BYTES[]={ 0,0,71,36,18,241,64,16,6,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN58_BYTES[]={ 0,0,0,128,64,0,0,8,4,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN59_BYTES[]={ 0,0,0,64,32,0,0,4,2,65,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN60_BYTES[]={ 0,0,0,0,99,12,24,48,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN61_BYTES[]={ 0,0,0,0,240,3,252,0,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN62_BYTES[]={ 0,0,0,96,192,128,49,6,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN63_BYTES[]={ 0,0,71,4,130,32,16,0,4,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN64_BYTES[]={ 0,0,71,148,173,86,115,2,30,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN65_BYTES[]={ 0,0,6,67,34,241,132,66,33,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN66_BYTES[]={ 0,128,71,36,242,9,133,66,31,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN67_BYTES[]={ 0,0,142,40,16,8,4,68,28,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN68_BYTES[]={ 0,128,71,36,20,10,133,34,15,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN69_BYTES[]={ 0,128,79,32,240,8,4,2,31,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN70_BYTES[]={ 0,128,79,32,240,8,4,2,1,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN71_BYTES[]={ 0,0,142,40,16,200,133,68,28,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN72_BYTES[]={ 0,128,80,40,244,11,133,66,33,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN73_BYTES[]={ 0,0,7,129,64,32,16,8,14,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN74_BYTES[]={ 0,0,7,2,129,64,32,16,7,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN75_BYTES[]={ 0,128,80,36,81,56,36,34,33,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN76_BYTES[]={ 0,128,64,32,16,8,4,2,31,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN77_BYTES[]={ 0,192,120,92,173,38,147,193,32,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN78_BYTES[]={ 0,128,209,168,84,74,165,98,49,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN79_BYTES[]={ 0,0,134,36,20,10,133,36,12,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN80_BYTES[]={ 0,128,71,36,18,121,4,2,1,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN81_BYTES[]={ 0,0,134,36,20,10,133,36,44,16,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN82_BYTES[]={ 0,128,71,36,18,121,36,34,33,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN83_BYTES[]={ 0,0,79,40,96,192,128,66,30,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN84_BYTES[]={ 0,192,31,129,64,32,16,8,4,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN85_BYTES[]={ 0,128,80,40,20,10,133,66,30,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN86_BYTES[]={ 0,64,48,40,18,81,40,8,4,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN87_BYTES[]={ 0,64,48,153,172,86,109,34,17,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN88_BYTES[]={ 0,128,80,72,194,96,72,66,33,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN89_BYTES[]={ 0,64,48,40,162,32,16,8,4,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN90_BYTES[]={ 0,128,31,8,130,32,8,2,63,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN91_BYTES[]={ 0,14,129,64,32,16,8,4,2,129,3,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN92_BYTES[]={ 0,129,128,64,64,32,32,16,16,8,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN93_BYTES[]={ 0,14,4,2,129,64,32,16,8,132,3,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN94_BYTES[]={ 0,4,130,66,17,137,0,0,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN95_BYTES[]={ 0,0,0,0,0,0,0,0,192,31,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN96_BYTES[]={ 0,2,2,0,0,0,0,0,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN97_BYTES[]={ 0,0,0,192,1,241,68,34,30,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN98_BYTES[]={ 0,129,64,224,17,137,68,34,15,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN99_BYTES[]={ 0,0,0,192,17,9,4,34,14,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN100_BYTES[]={ 0,16,8,196,19,137,68,34,30,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN101_BYTES[]={ 0,0,0,192,17,249,4,34,14,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN102_BYTES[]={ 0,28,129,224,33,16,8,4,2,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN103_BYTES[]={ 0,0,0,192,19,137,68,34,30,8,196,1,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN104_BYTES[]={ 0,129,64,224,17,137,68,34,17,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN105_BYTES[]={ 0,4,0,192,64,32,16,8,4,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN106_BYTES[]={ 0,8,0,128,129,64,32,16,8,196,1,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN107_BYTES[]={ 0,129,64,32,146,40,28,18,17,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN108_BYTES[]={ 0,6,2,129,64,32,16,8,4,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN109_BYTES[]={ 0,0,0,112,75,38,147,201,36,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN110_BYTES[]={ 0,0,0,224,17,137,68,34,17,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN111_BYTES[]={ 0,0,0,192,17,137,68,34,14,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN112_BYTES[]={ 0,0,0,224,17,137,68,34,143,64,32,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN113_BYTES[]={ 0,0,0,192,19,137,68,34,30,8,4,2,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN114_BYTES[]={ 0,0,0,160,49,9,4,2,1,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN115_BYTES[]={ 0,0,0,192,19,48,32,32,15,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN116_BYTES[]={ 0,0,129,192,35,16,8,4,28,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN117_BYTES[]={ 0,0,0,32,18,137,68,34,30,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN118_BYTES[]={ 0,0,0,32,18,81,40,8,4,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN119_BYTES[]={ 0,0,0,16,76,38,171,54,17,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN120_BYTES[]={ 0,0,0,32,162,32,16,20,17,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN121_BYTES[]={ 0,0,0,32,18,137,68,34,30,8,196,1,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN122_BYTES[]={ 0,0,0,224,3,65,16,4,31,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN123_BYTES[]={ 0,24,2,129,64,24,16,8,4,2,6,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN124_BYTES[]={ 0,4,2,129,64,32,16,8,4,2,1,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN125_BYTES[]={ 0,3,2,129,64,192,16,8,4,194,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN126_BYTES[]={ 0,0,0,0,112,230,0,0,0,0,0,0,0,};
FONTCONST uint8_t FDEF_PROGGYCLEAN127_BYTES[]={ 0,0,0,0,0,0,0,0,0,0,0,0,0,};
// character definitions for FDEF_PROGGYCLEAN
extern const struct FontChar FDEF_PROGGYCLEAN_CHAR[]={
{ 32,7,FDEF_PROGGYCLEAN32_BYTES },
{ 33,7,FDEF_PROGGYCLEAN33_BYTES },
{ 34,7,FDEF_PROGGYCLEAN34_BYTES },
{ 35,7,FDEF_PROGGYCLEAN35_BYTES },
{ 36,7,FDEF_PROGGYCLEAN36_BYTES },
{ 37,7,FDEF_PROGGYCLEAN37_BYTES },
{ 38,7,FDEF_PROGGYCLEAN38_BYTES },
{ 39,7,FDEF_PROGGYCLEAN39_BYTES },
{ 40,7,FDEF_PROGGYCLEAN40_BYTES },
{ 41,7,FDEF_PROGGYCLEAN41_BYTES },
{ 42,7,FDEF_PROGGYCLEAN42_BYTES },
{ 43,7,FDEF_PROGGYCLEAN43_BYTES },
{ 44,7,FDEF_PROGGYCLEAN44_BYTES },
{ 45,7,FDEF_PROGGYCLEAN45_BYTES },
{ 46,7,FDEF_PROGGYCLEAN46_BYTES },
{ 47,7,FDEF_PROGGYCLEAN47_BYTES },
{ 48,7,FDEF_PROGGYCLEAN48_BYTES },
{ 49,7,FDEF_PROGGYCLEAN49_BYTES },
{ 50,7,FDEF_PROGGYCLEAN50_BYTES },
{ 51,7,FDEF_PROGGYCLEAN51_BYTES },
{ 52,7,FDEF_PROGGYCLEAN52_BYTES },
{ 53,7,FDEF_PROGGYCLEAN53_BYTES },
{ 54,7,FDEF_PROGGYCLEAN54_BYTES },
{ 55,7,FDEF_PROGGYCLEAN55_BYTES },
{ 56,7,FDEF_PROGGYCLEAN56_BYTES },
{ 57,7,FDEF_PROGGYCLEAN57_BYTES },
{ 58,7,FDEF_PROGGYCLEAN58_BYTES },
{ 59,7,FDEF_PROGGYCLEAN59_BYTES },
{ 60,7,FDEF_PROGGYCLEAN60_BYTES },
{ 61,7,FDEF_PROGGYCLEAN61_BYTES },
{ 62,7,FDEF_PROGGYCLEAN62_BYTES },
{ 63,7,FDEF_PROGGYCLEAN63_BYTES },
{ 64,7,FDEF_PROGGYCLEAN64_BYTES },
{ 65,7,FDEF_PROGGYCLEAN65_BYTES },
{ 66,7,FDEF_PROGGYCLEAN66_BYTES },
{ 67,7,FDEF_PROGGYCLEAN67_BYTES },
{ 68,7,FDEF_PROGGYCLEAN68_BYTES },
{ 69,7,FDEF_PROGGYCLEAN69_BYTES },
{ 70,7,FDEF_PROGGYCLEAN70_BYTES },
{ 71,7,FDEF_PROGGYCLEAN71_BYTES },
{ 72,7,FDEF_PROGGYCLEAN72_BYTES },
{ 73,7,FDEF_PROGGYCLEAN73_BYTES },
{ 74,7,FDEF_PROGGYCLEAN74_BYTES },
{ 75,7,FDEF_PROGGYCLEAN75_BYTES },
{ 76,7,FDEF_PROGGYCLEAN76_BYTES },
{ 77,7,FDEF_PROGGYCLEAN77_BYTES },
{ 78,7,FDEF_PROGGYCLEAN78_BYTES },
{ 79,7,FDEF_PROGGYCLEAN79_BYTES },
{ 80,7,FDEF_PROGGYCLEAN80_BYTES },
{ 81,7,FDEF_PROGGYCLEAN81_BYTES },
{ 82,7,FDEF_PROGGYCLEAN82_BYTES },
{ 83,7,FDEF_PROGGYCLEAN83_BYTES },
{ 84,7,FDEF_PROGGYCLEAN84_BYTES },
{ 85,7,FDEF_PROGGYCLEAN85_BYTES },
{ 86,7,FDEF_PROGGYCLEAN86_BYTES },
{ 87,7,FDEF_PROGGYCLEAN87_BYTES },
{ 88,7,FDEF_PROGGYCLEAN88_BYTES },
{ 89,7,FDEF_PROGGYCLEAN89_BYTES },
{ 90,7,FDEF_PROGGYCLEAN90_BYTES },
{ 91,7,FDEF_PROGGYCLEAN91_BYTES },
{ 92,7,FDEF_PROGGYCLEAN92_BYTES },
{ 93,7,FDEF_PROGGYCLEAN93_BYTES },
{ 94,7,FDEF_PROGGYCLEAN94_BYTES },
{ 95,7,FDEF_PROGGYCLEAN95_BYTES },
{ 96,7,FDEF_PROGGYCLEAN96_BYTES },
{ 97,7,FDEF_PROGGYCLEAN97_BYTES },
{ 98,7,FDEF_PROGGYCLEAN98_BYTES },
{ 99,7,FDEF_PROGGYCLEAN99_BYTES },
{ 100,7,FDEF_PROGGYCLEAN100_BYTES },
{ 101,7,FDEF_PROGGYCLEAN101_BYTES },
{ 102,7,FDEF_PROGGYCLEAN102_BYTES },
{ 103,7,FDEF_PROGGYCLEAN103_BYTES },
{ 104,7,FDEF_PROGGYCLEAN104_BYTES },
{ 105,7,FDEF_PROGGYCLEAN105_BYTES },
{ 106,7,FDEF_PROGGYCLEAN106_BYTES },
{ 107,7,FDEF_PROGGYCLEAN107_BYTES },
{ 108,7,FDEF_PROGGYCLEAN108_BYTES },
{ 109,7,FDEF_PROGGYCLEAN109_BYTES },
{ 110,7,FDEF_PROGGYCLEAN110_BYTES },
{ 111,7,FDEF_PROGGYCLEAN111_BYTES },
{ 112,7,FDEF_PROGGYCLEAN112_BYTES },
{ 113,7,FDEF_PROGGYCLEAN113_BYTES },
{ 114,7,FDEF_PROGGYCLEAN114_BYTES },
{ 115,7,FDEF_PROGGYCLEAN115_BYTES },
{ 116,7,FDEF_PROGGYCLEAN116_BYTES },
{ 117,7,FDEF_PROGGYCLEAN117_BYTES },
{ 118,7,FDEF_PROGGYCLEAN118_BYTES },
{ 119,7,FDEF_PROGGYCLEAN119_BYTES },
{ 120,7,FDEF_PROGGYCLEAN120_BYTES },
{ 121,7,FDEF_PROGGYCLEAN121_BYTES },
{ 122,7,FDEF_PROGGYCLEAN122_BYTES },
{ 123,7,FDEF_PROGGYCLEAN123_BYTES },
{ 124,7,FDEF_PROGGYCLEAN124_BYTES },
{ 125,7,FDEF_PROGGYCLEAN125_BYTES },
{ 126,7,FDEF_PROGGYCLEAN126_BYTES },
{ 127,7,FDEF_PROGGYCLEAN127_BYTES },
};
} }
#undef FONTCONST
| 7,434 |
421 | // FormatOverload1.cpp : Defines the entry point for the console application.
//
//#include "stdafx.h"
// <Snippet8>
using namespace System;
void main()
{
DateTime^ dat = gcnew DateTime(2012, 1, 17, 9, 30, 0);
String^ city = "Chicago";
int temp = -16;
String^ output = String::Format("At {0} in {1}, the temperature was {2} degrees.",
dat, city, temp);
Console::WriteLine(output);
}
// The example displays the following output:
// At 1/17/2012 9:30:00 AM in Chicago, the temperature was -16 degrees.
// </Snippet8>
| 248 |
696 | <reponame>hwang-pku/Strata
/*
* Copyright (C) 2017 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.strata.product.etd;
import org.joda.convert.FromString;
import com.opengamma.strata.collect.TypedString;
/**
* The contract code for an Exchange Traded Derivative (ETD).
* <p>
* This is the code supplied by the exchange for use in clearing and margining, such as in SPAN.
*/
public final class EtdContractCode
extends TypedString<EtdContractCode> {
/** Serialization version. */
private static final long serialVersionUID = 1L;
//-------------------------------------------------------------------------
/**
* Obtains an instance from the specified name.
* <p>
* The name may contain any character, but must not be empty.
*
* @param name the name
* @return a type instance with the specified name
*/
@FromString
public static EtdContractCode of(String name) {
return new EtdContractCode(name);
}
/**
* Creates an instance.
*
* @param name the name
*/
private EtdContractCode(String name) {
super(name);
}
// resolve after deserialization
private Object readResolve() {
return of(getName());
}
}
| 394 |
2,633 | import os
from pathlib import Path
import pytest
from unit.applications.proto import TestApplicationProto
class TestStaticShare(TestApplicationProto):
prerequisites = {}
@pytest.fixture(autouse=True)
def setup_method_fixture(self, temp_dir):
os.makedirs(temp_dir + '/assets/dir')
os.makedirs(temp_dir + '/assets/dir2')
Path(temp_dir + '/assets/dir/file').write_text('1')
Path(temp_dir + '/assets/dir2/file2').write_text('2')
assert 'success' in self.conf(
{
"listeners": {"*:7080": {"pass": "routes"}},
"routes": [{"action": {"share": temp_dir + "/assets$uri"}}],
"applications": {},
}
)
def action_update(self, conf):
assert 'success' in self.conf(conf, 'routes/0/action')
def test_share_array(self, temp_dir):
assert self.get(url='/dir/file')['body'] == '1'
assert self.get(url='/dir2/file2')['body'] == '2'
self.action_update({"share": [temp_dir + "/assets/dir$uri"]})
assert self.get(url='/file')['body'] == '1'
assert self.get(url='/file2')['status'] == 404
self.action_update(
{
"share": [
temp_dir + "/assets/dir$uri",
temp_dir + "/assets/dir2$uri",
]
}
)
assert self.get(url='/file')['body'] == '1'
assert self.get(url='/file2')['body'] == '2'
self.action_update(
{
"share": [
temp_dir + "/assets/dir2$uri",
temp_dir + "/assets/dir3$uri",
]
}
)
assert self.get(url='/file')['status'] == 404
assert self.get(url='/file2')['body'] == '2'
def test_share_array_fallback(self):
self.action_update(
{"share": ["/blah", "/blah2"], "fallback": {"return": 201}}
)
assert self.get()['status'] == 201
def test_share_array_invalid(self):
assert 'error' in self.conf({"share": []}, 'routes/0/action')
assert 'error' in self.conf({"share": {}}, 'routes/0/action')
| 1,095 |
8,027 | <filename>src/com/facebook/buck/rules/keys/SingleBuildActionRuleKeyCache.java
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.rules.keys;
import com.facebook.buck.core.build.action.BuildEngineAction;
import com.facebook.buck.core.rulekey.AddsToRuleKey;
import com.facebook.buck.core.rulekey.RuleKey;
import com.facebook.buck.core.rules.actions.Action;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import java.util.concurrent.ExecutionException;
import java.util.function.Function;
/**
* A {@link RuleKey} cache used by a {@link RuleKeyFactory}. As items are add-only, this is intended
* to be used in a single build.
*
* @param <V> The rule key type.
*/
public class SingleBuildActionRuleKeyCache<V> {
// Use key identity when caching.
private final Cache<BuildEngineAction, V> buildEngineActionCache =
CacheBuilder.newBuilder().weakKeys().build();
private final Cache<Action, V> actionCache = CacheBuilder.newBuilder().weakKeys().build();
private final Cache<AddsToRuleKey, V> ruleKeyAppendableVCache =
CacheBuilder.newBuilder().weakKeys().build();
private <K> V getInternal(Cache<K, V> cache, K key, Function<K, V> create) {
try {
return cache.get(key, () -> create.apply(key));
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
public V get(BuildEngineAction action, Function<BuildEngineAction, V> create) {
return getInternal(buildEngineActionCache, action, create);
}
public V get(AddsToRuleKey appendable, Function<AddsToRuleKey, V> create) {
return getInternal(ruleKeyAppendableVCache, appendable, create);
}
public V get(Action action, Function<Action, V> create) {
return getInternal(actionCache, action, create);
}
}
| 711 |
303 | /* Copyright (C) 2005-2011 <NAME> */
package com.lightcrafts.ui.templates;
import com.lightcrafts.templates.TemplateKey;
import com.lightcrafts.templates.TemplateDatabase;
import com.lightcrafts.utils.xml.XmlNode;
import com.lightcrafts.utils.xml.XMLException;
import com.lightcrafts.utils.xml.XmlDocument;
import com.lightcrafts.ui.operation.OpControl;
import javax.swing.tree.TreeNode;
import java.util.Enumeration;
import java.util.List;
class TemplateTreeNode implements TreeNode {
TemplateKey key;
XmlNode node;
List<OpControl> opControls;
private TemplateNamespaceTreeNode parent;
private boolean isPreviewed;
TemplateTreeNode(
TemplateNamespaceTreeNode parent, TemplateKey key
) throws TemplateDatabase.TemplateException, XMLException {
this.key = key;
this.parent = parent;
this.key = key;
XmlDocument xml = TemplateDatabase.getTemplateDocument(key);
XmlNode root = xml.getRoot();
// Tag name copied from Document.ControlTag:
node = root.getChild("Controls");
}
public TreeNode getChildAt(int childIndex) {
return null;
}
public String toString() {
// Used by the JTree cell renderer.
return key.getName();
}
public TemplateKey getTemplateKey() {
return key;
}
public int getChildCount() {
return 0;
}
public TreeNode getParent() {
return parent;
}
public int getIndex(TreeNode node) {
return 0;
}
public boolean getAllowsChildren() {
return false;
}
public boolean isLeaf() {
return true;
}
public Enumeration children() {
return EmptyEnumeration;
}
void setPreviewed(boolean previewed) {
isPreviewed = previewed;
}
boolean isPreviewed() {
return isPreviewed;
}
private static Enumeration EmptyEnumeration = new Enumeration() {
public boolean hasMoreElements() {
return false;
}
public Object nextElement() {
return null;
}
};
}
| 824 |
441 | //
// Copyright (c) 2008-2020 the Urho3D project.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#include "../Precompiled.h"
#include "../Core/Thread.h"
#include "../IO/Log.h"
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <SDL/SDL_hints.h>
#if defined(__ANDROID_API__) && __ANDROID_API__ < 26
#include <linux/prctl.h>
#include <sys/prctl.h>
#endif
#include "../DebugNew.h"
namespace Urho3D
{
#ifdef URHO3D_THREADING
#ifdef _WIN32
#if !defined(UWP)
typedef HRESULT (WINAPI *pfnSetThreadDescription)(HANDLE, PCWSTR);
static auto pSetThreadDescription = (pfnSetThreadDescription) GetProcAddress(GetModuleHandleW(L"kernel32.dll"), "SetThreadDescription");
#endif
#pragma pack(push,8)
typedef struct tagTHREADNAME_INFO
{
DWORD dwType; /* must be 0x1000 */
LPCSTR szName; /* pointer to name (in user addr space) */
DWORD dwThreadID; /* thread ID (-1=caller thread) */
DWORD dwFlags; /* reserved for future use, must be zero */
} THREADNAME_INFO;
#pragma pack(pop)
typedef HRESULT (WINAPI *pfnSetThreadDescription)(HANDLE, PCWSTR);
DWORD WINAPI Thread::ThreadFunctionStatic(void* data)
{
Thread* thread = static_cast<Thread*>(data);
// Borrowed from SDL_systhread.c
#if !defined(UWP)
if (pSetThreadDescription)
pSetThreadDescription(GetCurrentThread(), MultiByteToWide(thread->name_).c_str());
else
#endif
if (IsDebuggerPresent())
{
// Presumably some version of Visual Studio will understand SetThreadDescription(),
// but we still need to deal with older OSes and debuggers. Set it with the arcane
// exception magic, too.
THREADNAME_INFO inf;
/* C# and friends will try to catch this Exception, let's avoid it. */
if (!SDL_GetHintBoolean(SDL_HINT_WINDOWS_DISABLE_THREAD_NAMING, SDL_TRUE))
{
// This magic tells the debugger to name a thread if it's listening.
SDL_zero(inf);
inf.dwType = 0x1000;
inf.szName = thread->name_.c_str();
inf.dwThreadID = (DWORD) -1;
inf.dwFlags = 0;
// The debugger catches this, renames the thread, continues on.
RaiseException(0x406D1388, 0, sizeof(inf) / sizeof(ULONG), (const ULONG_PTR*) &inf);
}
}
thread->ThreadFunction();
return 0;
}
#else
void* Thread::ThreadFunctionStatic(void* data)
{
auto* thread = static_cast<Thread*>(data);
#if defined(__ANDROID_API__)
#if __ANDROID_API__ < 26
prctl(PR_SET_NAME, thread->name_.c_str(), 0, 0, 0);
#else
pthread_setname_np(pthread_self(), thread->name_.c_str(), thread->name_.Length());
#endif
#elif defined(__linux__)
pthread_setname_np(pthread_self(), thread->name_.c_str());
#elif defined(__MACOSX__) || defined(__IPHONEOS__)
pthread_setname_np(thread->name_.c_str());
#endif
thread->ThreadFunction();
pthread_exit((void*)nullptr);
return nullptr;
}
#endif
#endif // URHO3D_THREADING
ThreadID Thread::mainThreadID;
Thread::Thread(const ea::string& name) :
handle_(nullptr),
shouldRun_(false),
name_(name)
{
}
Thread::~Thread()
{
Stop();
}
bool Thread::Run()
{
#ifdef URHO3D_THREADING
// Check if already running
if (handle_)
return false;
shouldRun_ = true;
#ifdef _WIN32
handle_ = CreateThread(nullptr, 0, ThreadFunctionStatic, this, 0, nullptr);
#else
pthread_attr_t type;
pthread_attr_init(&type);
pthread_attr_setdetachstate(&type, PTHREAD_CREATE_JOINABLE);
pthread_create((pthread_t*)&handle_, &type, ThreadFunctionStatic, this);
#endif
return handle_ != nullptr;
#else
return false;
#endif // URHO3D_THREADING
}
void Thread::Stop()
{
#ifdef URHO3D_THREADING
// Check if already stopped
if (!handle_)
return;
shouldRun_ = false;
#ifdef _WIN32
WaitForSingleObject((HANDLE)handle_, INFINITE);
CloseHandle((HANDLE)handle_);
#else
auto thread = (pthread_t)handle_;
if (thread)
pthread_join(thread, nullptr);
#endif
handle_ = nullptr;
#endif // URHO3D_THREADING
}
void Thread::SetPriority(int priority)
{
#ifdef URHO3D_THREADING
#ifdef _WIN32
if (handle_)
SetThreadPriority((HANDLE)handle_, priority);
#elif defined(__linux__) && !defined(__ANDROID__) && !defined(__EMSCRIPTEN__)
auto thread = (pthread_t)handle_;
if (thread)
pthread_setschedprio(thread, priority);
#endif
#endif // URHO3D_THREADING
}
void Thread::SetMainThread()
{
mainThreadID = GetCurrentThreadID();
}
ThreadID Thread::GetCurrentThreadID()
{
#ifdef URHO3D_THREADING
#ifdef _WIN32
return GetCurrentThreadId();
#else
return pthread_self();
#endif
#else
return ThreadID();
#endif // URHO3D_THREADING
}
bool Thread::IsMainThread()
{
#ifdef URHO3D_THREADING
return GetCurrentThreadID() == mainThreadID;
#else
return true;
#endif // URHO3D_THREADING
}
void Thread::SetName(const ea::string& name)
{
if (handle_ != nullptr)
{
URHO3D_LOGERROR("Thread name must be set before thread is started.");
return;
}
name_ = name;
}
}
| 2,337 |
441 | package org.basex.api.dom;
import org.basex.query.value.node.*;
import org.basex.util.*;
import org.w3c.dom.*;
/**
* DOM - Element implementation.
*
* @author BaseX Team 2005-21, BSD License
* @author <NAME>
*/
public final class BXElem extends BXNode implements Element {
/**
* Constructor.
* @param node node reference
*/
BXElem(final ANode node) {
super(node);
}
@Override
public String getNodeName() {
return Token.string(nd.name());
}
@Override
public String getLocalName() {
return Token.string(Token.local(nd.name()));
}
@Override
public BXNNode getAttributes() {
return new BXNNode(finish(nd.attributeIter()));
}
@Override
public String getAttribute(final String name) {
final ANode n = attribute(name);
return n != null ? Token.string(n.string()) : "";
}
@Override
public String getNamespaceURI() {
final byte[] uri = nd.qname().uri();
return uri.length == 0 ? null : Token.string(uri);
}
@Override
public String getAttributeNS(final String uri, final String name) {
throw notImplemented();
}
@Override
public BXAttr getAttributeNode(final String name) {
return (BXAttr) get(attribute(name));
}
@Override
public BXAttr getAttributeNodeNS(final String uri, final String name) {
throw notImplemented();
}
@Override
public BXNList getElementsByTagName(final String name) {
return getElements(name);
}
@Override
public BXNList getElementsByTagNameNS(final String uri, final String name) {
throw notImplemented();
}
@Override
public TypeInfo getSchemaTypeInfo() {
throw notImplemented();
}
@Override
public String getTagName() {
return getNodeName();
}
@Override
public boolean hasAttribute(final String name) {
return attribute(name) != null;
}
@Override
public boolean hasAttributeNS(final String uri, final String name) {
throw notImplemented();
}
@Override
public void removeAttribute(final String name) {
throw readOnly();
}
@Override
public void removeAttributeNS(final String uri, final String name) {
throw readOnly();
}
@Override
public BXAttr removeAttributeNode(final Attr oldAttr) {
throw readOnly();
}
@Override
public void setAttribute(final String name, final String value) {
throw readOnly();
}
@Override
public void setAttributeNS(final String uri, final String name, final String value) {
throw readOnly();
}
@Override
public BXAttr setAttributeNode(final Attr node) {
throw readOnly();
}
@Override
public BXAttr setAttributeNodeNS(final Attr node) {
throw readOnly();
}
@Override
public void setIdAttribute(final String name, final boolean id) {
throw readOnly();
}
@Override
public void setIdAttributeNS(final String uri, final String name, final boolean id) {
throw readOnly();
}
@Override
public void setIdAttributeNode(final Attr node, final boolean id) {
throw readOnly();
}
/**
* Returns the specified attribute.
* @param name attribute name
* @return node or {@code null}
*/
private ANode attribute(final String name) {
final byte[] nm = Token.token(name);
for(final ANode n : nd.attributeIter()) {
if(Token.eq(nm, n.name())) return n.finish();
}
return null;
}
}
| 1,121 |
403 | <reponame>vishalbelsare/OpenMatch<gh_stars>100-1000
from os.path import join
import sys
sys.path += ['../']
import argparse
import glob
import json
import logging
import os
import random
import numpy as np
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
from torch.utils.data import DataLoader
from tqdm import tqdm
import torch.distributed as dist
from torch import nn
from torch.serialization import default_restore_location
import torch.nn.functional as F
from model.models import MSMarcoConfigDict, ALL_MODELS
from utils.lamb import Lamb
import random
import transformers
from transformers import (
AdamW,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_processors as processors
import copy
from torch import nn
import pickle
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
import pandas as pd
logger = logging.getLogger(__name__)
from utils.util import (
StreamingDataset,
EmbeddingCache,
get_checkpoint_no,
get_latest_ann_data,
set_seed,
is_first_worker,
)
from data.DPR_data import GetTrainingDataProcessingFn, GetTripletTrainingDataProcessingFn,GetQuadrapuletTrainingDataProcessingFn
from utils.dpr_utils import (
load_states_from_checkpoint,
get_model_obj,
CheckpointState,
get_optimizer,
all_gather_list
)
def train(args, model, tokenizer, query_cache, passage_cache):
""" Train the model """
logger.info("Training/evaluation parameters %s", args)
tb_writer = None
if is_first_worker():
tb_writer = SummaryWriter(log_dir=args.log_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) #nll loss for query
real_batch_size = args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)
optimizer = get_optimizer(args, model, weight_decay=args.weight_decay,)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps= args.max_steps
)
global_step = 0
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.model_name_or_path != "bert-base-uncased":
if args.fp16 and args.resume_fp16_checkpoint :
from collections import OrderedDict
checkpoint = torch.load(args.model_name_or_path, map_location=lambda s, l: default_restore_location(s, 'cpu'))
new_state_dict = OrderedDict()
for k, v in checkpoint['model'].items():
name = k[7:]
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
# model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
amp.load_state_dict(checkpoint['amp'])
global_step = checkpoint['offset']
else:
saved_state = load_states_from_checkpoint(args.model_name_or_path)
global_step, model, optimizer, scheduler = _load_saved_state(model, optimizer, scheduler, saved_state, load_optimizer_scheduler=args.load_optimizer_scheduler)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from global step %d", global_step)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Max steps = %d", args.max_steps)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
tr_loss = {}
model.zero_grad()
model.train()
set_seed(args) # Added here for reproductibility
last_ann_no = -1
train_dataloader = None
train_dataloader_iter = None
dev_ndcg = 0
step = 0
iter_count = 0
if not args.dual_training:
args.dual_loss_weight = 0.0
if args.model_name_or_path != "bert-base-uncased":
nq_dev_nll_loss, nq_correct_ratio = evaluate_dev(args, model, passage_cache)
# dev_nll_loss_trivia, correct_ratio_trivia = evaluate_dev(args, model, passage_cache, "-trivia")
if is_first_worker():
tb_writer.add_scalar("dev_nll_loss/dev_nll_loss", nq_dev_nll_loss, global_step)
tb_writer.add_scalar("dev_nll_loss/correct_ratio", nq_correct_ratio, global_step)
# tb_writer.add_scalar("dev_nll_loss/dev_nll_loss_trivia", dev_nll_loss_trivia, global_step)
# tb_writer.add_scalar("dev_nll_loss/correct_ratio_trivia", correct_ratio_trivia, global_step)
while global_step < args.max_steps:
if step % args.gradient_accumulation_steps == 0 and global_step % args.logging_steps == 0:
if args.num_epoch == 0:
# check if new ann training data is availabe
ann_no, ann_path, ndcg_json = get_latest_ann_data(args.ann_dir)
if ann_path is not None and ann_no != last_ann_no:
logger.info("Training on new add data at %s", ann_path)
with open(ann_path, 'r') as f:
ann_training_data = f.readlines()
logger.info("Training data line count: %d", len(ann_training_data))
ann_training_data = [l for l in ann_training_data if len(l.split('\t')[2].split(',')) > 1]
logger.info("Filtered training data line count: %d", len(ann_training_data))
if args.dual_training:
ann_training_data = [l for l in ann_training_data if len(l.split('\t')[3].split(',')) > 1]
logger.info("Filtered training data line count in dual training: %d", len(ann_training_data))
ann_checkpoint_path = ndcg_json['checkpoint']
ann_checkpoint_no = get_checkpoint_no(ann_checkpoint_path)
aligned_size = (len(ann_training_data) // args.world_size) * args.world_size
ann_training_data = ann_training_data[:aligned_size]
logger.info("Total ann queries: %d", len(ann_training_data))
if args.triplet:
if args.dual_training:
train_dataset = StreamingDataset(ann_training_data, GetQuadrapuletTrainingDataProcessingFn(args, query_cache, passage_cache))
else:
train_dataset = StreamingDataset(ann_training_data, GetTripletTrainingDataProcessingFn(args, query_cache, passage_cache))
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size)
else:
train_dataset = StreamingDataset(ann_training_data, GetTrainingDataProcessingFn(args, query_cache, passage_cache))
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size*2)
train_dataloader_iter = iter(train_dataloader)
# re-warmup
if not args.single_warmup:
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps= len(ann_training_data)
)
if args.local_rank != -1:
dist.barrier()
if is_first_worker():
# add ndcg at checkpoint step used instead of current step
tb_writer.add_scalar("retrieval_accuracy/top20_nq", ndcg_json['top20'], ann_checkpoint_no)
tb_writer.add_scalar("retrieval_accuracy/top100_nq", ndcg_json['top100'], ann_checkpoint_no)
if 'dev_top20' in ndcg_json:
tb_writer.add_scalar("retrieval_accuracy/top20_nq_dev", ndcg_json['dev_top20'], ann_checkpoint_no)
tb_writer.add_scalar("retrieval_accuracy/top100_nq_dev", ndcg_json['dev_top100'], ann_checkpoint_no)
if 'top20_trivia' in ndcg_json:
tb_writer.add_scalar("retrieval_accuracy/top20_trivia", ndcg_json['top20_trivia'], ann_checkpoint_no)
tb_writer.add_scalar("retrieval_accuracy/top100_trivia", ndcg_json['top100_trivia'], ann_checkpoint_no)
if last_ann_no != -1:
tb_writer.add_scalar("epoch", last_ann_no, global_step-1)
tb_writer.add_scalar("epoch", ann_no, global_step)
last_ann_no = ann_no
elif step == 0:
train_data_path = os.path.join(args.data_dir, "train-data")
with open(train_data_path, 'r') as f:
training_data = f.readlines()
if args.triplet:
train_dataset = StreamingDataset(training_data, GetTripletTrainingDataProcessingFn(args, query_cache, passage_cache))
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size)
else:
train_dataset = StreamingDataset(training_data, GetTrainingDataProcessingFn(args, query_cache, passage_cache))
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size*2)
all_batch = [b for b in train_dataloader]
logger.info("Total batch count: %d", len(all_batch))
train_dataloader_iter = iter(train_dataloader)
try:
batch = next(train_dataloader_iter)
except StopIteration:
logger.info("Finished iterating current dataset, begin reiterate")
if args.num_epoch != 0:
iter_count += 1
if is_first_worker():
tb_writer.add_scalar("epoch", iter_count-1, global_step-1)
tb_writer.add_scalar("epoch", iter_count, global_step)
nq_dev_nll_loss, nq_correct_ratio = evaluate_dev(args, model, passage_cache)
# dev_nll_loss_trivia, correct_ratio_trivia = evaluate_dev(args, model, passage_cache, "-trivia")
if is_first_worker():
tb_writer.add_scalar("dev_nll_loss/dev_nll_loss", nq_dev_nll_loss, global_step)
tb_writer.add_scalar("dev_nll_loss/correct_ratio", nq_correct_ratio, global_step)
# tb_writer.add_scalar("dev_nll_loss/dev_nll_loss_trivia", dev_nll_loss_trivia, global_step)
# tb_writer.add_scalar("dev_nll_loss/correct_ratio_trivia", correct_ratio_trivia, global_step)
train_dataloader_iter = iter(train_dataloader)
batch = next(train_dataloader_iter)
dist.barrier()
if args.num_epoch != 0 and iter_count > args.num_epoch:
break
step += 1
if args.triplet:
loss, loss_dict = triplet_fwd_pass(args, model, batch)
else:
loss, correct_cnt = do_biencoder_fwd_pass(args, model, batch)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
if step % args.gradient_accumulation_steps == 0:
loss.backward()
else:
with model.no_sync():
loss.backward()
if len(tr_loss) >0:
for k in loss_dict:
tr_loss[k] = tr_loss[k] + loss_dict[k].item()
else:
for k in loss_dict:
tr_loss[k] = loss_dict[k].item()
if step % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {"loss_total":0.0}
for k in tr_loss:
tr_loss[k] = tr_loss[k] / args.logging_steps
logs[k] = tr_loss[k]
logs["loss_total"] = logs["loss_total"] + logs[k]
# loss_scalar = tr_loss / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
# logs["loss"] = loss_scalar
logs["learning_rate"] = learning_rate_scalar
tr_loss = {}
if is_first_worker():
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{"step": global_step}}))
if is_first_worker() and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.fp16:
cp = os.path.join(args.output_dir, 'checkpoint-' + str(global_step))
# Save checkpoint
checkpoint = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler' : scheduler.state_dict(),
'amp': amp.state_dict(),
'epoch':0,
'offset': global_step,
}
torch.save(checkpoint, cp)
logger.info('Saved checkpoint at %s', cp)
else:
_save_checkpoint(args, model, optimizer, scheduler, global_step)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
tb_writer.close()
return global_step
def evaluate_dev(args, model, passage_cache, source=""):
dev_query_collection_path = os.path.join(args.data_dir, "dev-query{}".format(source))
dev_query_cache = EmbeddingCache(dev_query_collection_path)
logger.info('NLL validation ...')
model.eval()
log_result_step = 100
batches = 0
total_loss = 0.0
total_correct_predictions = 0
with dev_query_cache:
dev_data_path = os.path.join(args.data_dir, "dev-data{}".format(source))
with open(dev_data_path, 'r') as f:
dev_data = f.readlines()
dev_dataset = StreamingDataset(dev_data, GetTrainingDataProcessingFn(args, dev_query_cache, passage_cache, shuffle=False))
dev_dataloader = DataLoader(dev_dataset, batch_size=args.train_batch_size*2)
# TODO: trivia loading bug
for i, batch in enumerate(dev_dataloader):
loss, correct_cnt = do_biencoder_fwd_pass(args, model, batch)
loss.backward() # get CUDA oom without this
model.zero_grad()
total_loss += loss.item()
total_correct_predictions += correct_cnt
batches += 1
if (i + 1) % log_result_step == 0:
logger.info('Eval step: %d , loss=%f ', i, loss.item())
logger.info(f"totoal {i+1} loop for dev")
total_loss = total_loss / batches
total_samples = batches * args.train_batch_size * torch.distributed.get_world_size()
correct_ratio = float(total_correct_predictions / total_samples)
logger.info('NLL Validation: loss = %f. correct prediction ratio %d/%d ~ %f', total_loss,
total_correct_predictions,
total_samples,
correct_ratio
)
model.train()
return total_loss, correct_ratio
def triplet_fwd_pass(args, model, batch):
if args.grouping_ann_data > 0:
pass # TODO: if need more type of data
else:
batch = tuple(t.to(args.device) for t in batch)
inputs = {"query_ids": batch[0].long(), "attention_mask_q": batch[1].long(),
"input_ids_a": batch[3].long(), "attention_mask_a": batch[4].long(),
"input_ids_b": batch[6].long(), "attention_mask_b": batch[7].long()}
if args.dual_training:
inputs["neg_query_ids"] = batch[9].long()
inputs["attention_mask_neg_query"] = batch[10].long()
inputs["dual_loss_weight"] = args.dual_loss_weight
loss, loss_dict = model(**inputs)
if args.n_gpu > 1:
loss = loss.mean()
for k in loss_dict:
loss_dict[k] = loss_dict[k].mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
for k in loss_dict:
loss_dict[k] = loss_dict[k] / args.gradient_accumulation_steps
return loss, loss_dict
def do_biencoder_fwd_pass(args, model, batch) -> (
torch.Tensor, int):
batch = tuple(t.to(args.device) for t in batch)
inputs = {"query_ids": batch[0][::2].long(), "attention_mask_q": batch[1][::2].long(),
"input_ids_a": batch[3].long(), "attention_mask_a": batch[4].long()}
local_q_vector, local_ctx_vectors = model(**inputs)
q_vector_to_send = torch.empty_like(local_q_vector).cpu().copy_(local_q_vector).detach_()
ctx_vector_to_send = torch.empty_like(local_ctx_vectors).cpu().copy_(local_ctx_vectors).detach_()
global_question_ctx_vectors = all_gather_list(
[q_vector_to_send, ctx_vector_to_send],
max_size=150000)
global_q_vector = []
global_ctxs_vector = []
for i, item in enumerate(global_question_ctx_vectors):
q_vector, ctx_vectors = item
if i != args.local_rank:
global_q_vector.append(q_vector.to(local_q_vector.device))
global_ctxs_vector.append(ctx_vectors.to(local_q_vector.device))
else:
global_q_vector.append(local_q_vector)
global_ctxs_vector.append(local_ctx_vectors)
global_q_vector = torch.cat(global_q_vector, dim=0)
global_ctxs_vector = torch.cat(global_ctxs_vector, dim=0)
scores = torch.matmul(global_q_vector, torch.transpose(global_ctxs_vector, 0, 1))
if len(global_q_vector.size()) > 1:
q_num = global_q_vector.size(0)
scores = scores.view(q_num, -1)
softmax_scores = F.log_softmax(scores, dim=1)
positive_idx_per_question = [i*2 for i in range(q_num)]
loss = F.nll_loss(softmax_scores, torch.tensor(positive_idx_per_question).to(softmax_scores.device),
reduction='mean')
max_score, max_idxs = torch.max(softmax_scores, 1)
correct_predictions_count = (max_idxs == torch.tensor(positive_idx_per_question).to(max_idxs.device)).sum()
is_correct = correct_predictions_count.sum().item()
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
return loss, is_correct
def _save_checkpoint(args, model, optimizer, scheduler, step: int) -> str:
offset = step
epoch = 0
model_to_save = get_model_obj(model)
cp = os.path.join(args.output_dir, 'checkpoint-' + str(offset))
meta_params = {}
state = CheckpointState(model_to_save.state_dict(),
optimizer.state_dict(),
scheduler.state_dict(),
offset,
epoch, meta_params,
)
torch.save(state._asdict(), cp)
logger.info('Saved checkpoint at %s', cp)
return cp
def optimizer_to(optim, device):
for param in optim.state.values():
# Not sure there are any global tensors in the state dict
if isinstance(param, torch.Tensor):
param.data = param.data.to(device)
if param._grad is not None:
param._grad.data = param._grad.data.to(device)
elif isinstance(param, dict):
for subparam in param.values():
if isinstance(subparam, torch.Tensor):
subparam.data = subparam.data.to(device)
if subparam._grad is not None:
subparam._grad.data = subparam._grad.data.to(device)
def _load_saved_state(model, optimizer, scheduler, saved_state: CheckpointState,load_optimizer_scheduler=False):
epoch = saved_state.epoch
step = saved_state.offset
logger.info('Loading checkpoint @ step=%s', step)
model_to_load = get_model_obj(model)
logger.info('Loading saved model state ...')
model_to_load.load_state_dict(saved_state.model_dict) # set strict=False if you use extra projection
if load_optimizer_scheduler:
optimizer.load_state_dict(saved_state.optimizer_dict)
scheduler.load_state_dict(saved_state.scheduler_dict)
logger.info('Loading the optimizer and scheduler to resume training')
# model.device
return step, model, optimizer, scheduler
def get_arguments():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the cached passage and query files",
)
parser.add_argument(
"--ann_dir",
default=None,
type=str,
required=True,
help="The ann training data dir. Should contain the output of ann data generation job",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--num_epoch",
default=0,
type=int,
help="Number of epoch to train, if specified will use training data instead of ann",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--triplet", default = False, action="store_true", help="Whether to run training.")
parser.add_argument(
"--log_dir",
default=None,
type=str,
help="Tensorboard log dir",
)
parser.add_argument(
"--optimizer",
default="adamW",
type=str,
help="Optimizer - lamb or adamW",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=2.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--max_steps",
default=300000,
type=int,
help="If > 0: set total number of training steps to perform",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--resume_fp16_checkpoint",
action="store_true",
help="whether to load the amp_dict",
)
# ----------------- ANN HyperParam ------------------
parser.add_argument(
"--load_optimizer_scheduler",
default = False,
action="store_true",
help="load scheduler from checkpoint or not",
)
parser.add_argument(
"--single_warmup",
default = True,
action="store_true",
help="use single or re-warmup",
)
# ----------------- End of Doc Ranking HyperParam ------------------
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
parser.add_argument(
"--grouping_ann_data",
type=int,
default=-1,
help="group multiple <q,d> pair data into one line, I prefer set to 32",
)
parser.add_argument(
"--dual_training",
action="store_true",
help="enable dual training, change the data loading, forward function and loss function",
)
parser.add_argument(
"--dual_loss_weight",
type=float,
default=0.1,
help="dual learning loss item weight",
)
parser.add_argument(
"--representation_l2_normalization",
action="store_true",
help="enable l2_normalization on the representative embeddings for ANN retrieval, previously named as --l2_normalization",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature in SimCLR",
)
args = parser.parse_args()
return args
def set_env(args):
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
def load_model(args):
# Prepare GLUE task
args.task_name = args.task_name.lower()
args.output_mode = "classification"
label_list = ["0", "1"]
num_labels = len(label_list)
# store args
if args.local_rank != -1:
args.world_size = torch.distributed.get_world_size()
args.rank = dist.get_rank()
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
configObj = MSMarcoConfigDict[args.model_type]
tokenizer = configObj.tokenizer_class.from_pretrained(
"bert-base-uncased",
do_lower_case=True,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if is_first_worker():
# Create output directory if needed
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
model = configObj.model_class(args)
model.is_representation_l2_normalization = args.representation_l2_normalization
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
return tokenizer, model
def main():
args = get_arguments()
set_env(args)
tokenizer, model = load_model(args)
query_collection_path = os.path.join(args.data_dir, "train-query")
query_cache = EmbeddingCache(query_collection_path)
passage_collection_path = os.path.join(args.data_dir, "passages")
passage_cache = EmbeddingCache(passage_collection_path)
with query_cache, passage_cache:
global_step = train(args, model, tokenizer, query_cache, passage_cache)
logger.info(" global_step = %s", global_step)
if args.local_rank != -1:
dist.barrier()
if __name__ == "__main__":
main() | 14,557 |
335 | {
"word": "Undersigned",
"definitions": [
"The signatory or co-signatories to the document in question."
],
"parts-of-speech": "Noun"
} | 67 |
2,151 | <gh_stars>1000+
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef UI_OZONE_PUBLIC_OZONE_PLATFORM_H_
#define UI_OZONE_PUBLIC_OZONE_PLATFORM_H_
#include <memory>
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "services/service_manager/public/cpp/bind_source_info.h"
#include "services/service_manager/public/cpp/binder_registry.h"
#include "ui/events/system_input_injector.h"
#include "ui/ozone/ozone_export.h"
namespace display {
class NativeDisplayDelegate;
}
namespace gfx {
class Rect;
}
namespace IPC {
class MessageFilter;
}
namespace service_manager {
class Connector;
}
namespace ui {
class CursorFactoryOzone;
class InputController;
class GpuPlatformSupportHost;
class OverlayManagerOzone;
class PlatformWindow;
class PlatformWindowDelegate;
class SurfaceFactoryOzone;
class SystemInputInjector;
// Base class for Ozone platform implementations.
//
// Ozone platforms must override this class and implement the virtual
// GetFooFactoryOzone() methods to provide implementations of the
// various ozone interfaces.
//
// The OzonePlatform subclass can own any state needed by the
// implementation that is shared between the various ozone interfaces,
// such as a connection to the windowing system.
//
// A platform is free to use different implementations of each
// interface depending on the context. You can, for example, create
// different objects depending on the underlying hardware, command
// line flags, or whatever is appropriate for the platform.
class OZONE_EXPORT OzonePlatform {
public:
OzonePlatform();
virtual ~OzonePlatform();
// Additional initialization params for the platform. Platforms must not
// retain a reference to this structure.
struct InitParams {
// Ozone may retain this pointer for later use. An Ozone platform embedder
// may set this value if operating in the idiomatic mojo fashion with a
// service manager. Mojo transport does not require a service manager but in
// that case ozone will not be able to connect to the DRM and cursor
// services. Instead the host must invoke |OnGpuServiceLaunched| as
// described in ui/ozone/public/gpu_platform_support_host.h to inform the
// ozone host that a process containing these services is running.
service_manager::Connector* connector = nullptr;
// Setting this to true indicates that the platform implementation should
// operate as a single process for platforms (i.e. drm) that are usually
// split between a host and viz specific portion.
bool single_process = false;
// Setting this to true indicates that the platform implementation should
// use mojo. Setting this to true requires calling |AddInterfaces|
// afterwards in the Viz process and providing a connector as part.
bool using_mojo = false;
};
// Ensures the OzonePlatform instance without doing any initialization.
// No-op in case the instance is already created.
// This is useful in order call virtual methods that depend on the ozone
// platform selected at runtime, e.g. ::GetMessageLoopTypeForGpu.
static OzonePlatform* EnsureInstance();
// Initializes the subsystems/resources necessary for the UI process (e.g.
// events) with additional properties to customize the ozone platform
// implementation. Ozone will not retain InitParams after returning from
// InitalizeForUI.
static void InitializeForUI(const InitParams& args);
// Initializes the subsystems for rendering but with additional properties
// provided by |args| as with InitalizeForUI.
static void InitializeForGPU(const InitParams& args);
// Deletes the instance. Does nothing if OzonePlatform has not yet been
// initialized.
static void Shutdown();
static OzonePlatform* GetInstance();
// Registers a callback to be run when the OzonePlatform is initialized. Note
// that if an instance already exists, then the callback is called
// immediately. If an instance does not exist, and is created later, then the
// callback is called once the instance is created and initialized, on the
// thread it is initialized on. If the caller requires the callback to run on
// a specific thread, then it needs to do ensure that by itself.
static void RegisterStartupCallback(
base::OnceCallback<void(OzonePlatform*)> callback);
// Factory getters to override in subclasses. The returned objects will be
// injected into the appropriate layer at startup. Subclasses should not
// inject these objects themselves. Ownership is retained by OzonePlatform.
virtual ui::SurfaceFactoryOzone* GetSurfaceFactoryOzone() = 0;
virtual ui::OverlayManagerOzone* GetOverlayManager() = 0;
virtual ui::CursorFactoryOzone* GetCursorFactoryOzone() = 0;
virtual ui::InputController* GetInputController() = 0;
virtual IPC::MessageFilter* GetGpuMessageFilter();
virtual ui::GpuPlatformSupportHost* GetGpuPlatformSupportHost() = 0;
virtual std::unique_ptr<SystemInputInjector> CreateSystemInputInjector() = 0;
virtual std::unique_ptr<PlatformWindow> CreatePlatformWindow(
PlatformWindowDelegate* delegate,
const gfx::Rect& bounds) = 0;
virtual std::unique_ptr<display::NativeDisplayDelegate>
CreateNativeDisplayDelegate() = 0;
// Returns the message loop type required for OzonePlatform instance that
// will be initialized for the GPU process.
virtual base::MessageLoop::Type GetMessageLoopTypeForGpu();
// Ozone platform implementations may also choose to expose mojo interfaces to
// internal functionality. Embedders wishing to take advantage of ozone mojo
// implementations must invoke AddInterfaces with a valid
// service_manager::BinderRegistry* pointer to export all Mojo interfaces
// defined within Ozone.
//
// Requests arriving before they can be immediately handled will be queued and
// executed later.
//
// A default do-nothing implementation is provided to permit platform
// implementations to opt out of implementing any Mojo interfaces.
virtual void AddInterfaces(service_manager::BinderRegistryWithArgs<
const service_manager::BindSourceInfo&>* registry);
// The GPU-specific portion of Ozone would typically run in a sandboxed
// process for additional security. Some startup might need to wait until
// after the sandbox has been configured. The embedder should use this method
// to specify that the sandbox is configured and that GPU-side setup should
// complete. A default do-nothing implementation is provided to permit
// platform implementations to ignore sandboxing and any associated launch
// ordering issues.
virtual void AfterSandboxEntry();
private:
virtual void InitializeUI(const InitParams& params) = 0;
virtual void InitializeGPU(const InitParams& params) = 0;
static OzonePlatform* instance_;
DISALLOW_COPY_AND_ASSIGN(OzonePlatform);
};
} // namespace ui
#endif // UI_OZONE_PUBLIC_OZONE_PLATFORM_H_
| 1,896 |
461 | <filename>smsBomb/plugins/aliyun.py
# coding=utf-8
import base64
import datetime
import hmac
import json
import urllib.parse
from smsBomb import SmsPlugin
def quote(s, safe='~'):
"""URL编码"""
return urllib.parse.quote(s, safe=safe)
def stringify(**kwargs):
"""参见阿里云签名需要"""
pairs = []
for k, v in sorted(kwargs.items()):
pairs.append('{}={}'.format(k, v))
return '&'.join(pairs)
def canonicalize(**kwargs):
"""阿里云签名算法需要"""
pairs = []
for k, v in sorted(kwargs.items()):
pair = '{}={}'.format(quote(k), quote(v))
pairs.append(pair)
return quote('&'.join(pairs))
class AliyunPlugin(SmsPlugin):
"""阿里大鱼短信"""
API_URLS = {
'send': 'https://dysmsapi.aliyuncs.com'
}
@property
def curtime(self):
return datetime.datetime.utcnow().isoformat("T")
def checksum(self, plain_text, secret_key=None):
plain_text = plain_text.encode('utf-8')
secret_key = secret_key if secret_key else self.auth['app_secret']
key = (secret_key + '&').encode('utf-8')
digest = hmac.new(key, plain_text, 'sha1').digest()
return quote(base64.b64encode(digest))
def _create_params(self, mobile,
sign_name,
template_code,
template_params):
"""
:param mobile: 手机号
:param sign_name: 签名
:param template_code: 模版编号
:param template_params: 模版参数
:return:
"""
return {
'AccessKeyId': self.auth['app_key'],
'Action': 'SendSms',
'Format': 'JSON',
'PhoneNumbers': str(mobile),
'RegionId': 'cn-hangzhou',
'SignName': sign_name,
'SignatureMethod': 'HMAC-SHA1',
'SignatureNonce': self.nonce,
'SignatureVersion': '1.0',
'TemplateCode': template_code,
'TemplateParam': json.dumps(template_params),
'Timestamp': self.curtime,
'Version': '2017-05-25',
}
def send(self, mobile, **kwargs):
params = self._create_params(
mobile,
kwargs['sign_name'],
kwargs['template_code'],
kwargs.get('template_params'))
plain_text = 'POST&%2F&' + canonicalize(**params)
sign = self.checksum(plain_text)
body = 'Signature={}&{}'.format(sign, stringify(**params))
self.logger.debug('拼接完成请求体: %s', body)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
resp = self._req.post(self.api, headers=headers,
data=body.encode('utf-8')).json()
self.logger.info(resp)
return resp['Code'] == 'OK'
| 1,436 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _SVX_TABWIN_HXX
#define _SVX_TABWIN_HXX
#include <svtools/svtreebx.hxx>
#include <vcl/floatwin.hxx>
#include <sfx2/basedlgs.hxx>
#include <sfx2/childwin.hxx>
#include <sfx2/ctrlitem.hxx>
#include <com/sun/star/form/XForm.hpp>
#include <comphelper/propmultiplex.hxx>
#include <svtools/transfer.hxx>
#include "svx/dbtoolsclient.hxx"
//==================================================================
class FmFieldWin;
class SAL_DLLPRIVATE FmFieldWinListBox
:public SvTreeListBox
{
FmFieldWin* pTabWin;
protected:
// virtual void Command( const CommandEvent& rEvt );
public:
FmFieldWinListBox( FmFieldWin* pParent );
virtual ~FmFieldWinListBox();
sal_Int8 AcceptDrop( const AcceptDropEvent& rEvt );
sal_Int8 ExecuteDrop( const ExecuteDropEvent& rEvt );
protected:
// DragSourceHelper
virtual void StartDrag( sal_Int8 nAction, const Point& rPosPixel );
// SvLBox
virtual sal_Bool DoubleClickHdl();
using SvLBox::ExecuteDrop;
};
//========================================================================
class FmFormShell;
class SAL_DLLPRIVATE FmFieldWinData
{
public:
FmFieldWinData();
~FmFieldWinData();
};
//========================================================================
class SAL_DLLPRIVATE FmFieldWin :public SfxFloatingWindow
,public SfxControllerItem
,public ::comphelper::OPropertyChangeListener
,public ::svxform::OStaticDataAccessTools
{
::osl::Mutex m_aMutex;
FmFieldWinListBox* pListBox;
FmFieldWinData* pData;
::svxform::SharedConnection
m_aConnection;
::rtl::OUString m_aDatabaseName,
m_aObjectName;
sal_Int32 m_nObjectType;
::comphelper::OPropertyChangeMultiplexer* m_pChangeListener;
public:
FmFieldWin(SfxBindings *pBindings,
SfxChildWindow *pMgr, Window* pParent);
virtual ~FmFieldWin();
virtual void Resize();
virtual sal_Bool Close();
virtual void GetFocus();
virtual long PreNotify( NotifyEvent& _rNEvt );
virtual void StateChanged(sal_uInt16 nSID, SfxItemState eState,
const SfxPoolItem* pState);
FmFieldWinData* GetData() const {return pData;}
void UpdateContent(FmFormShell*);
void UpdateContent(const ::com::sun::star::uno::Reference< ::com::sun::star::form::XForm > &);
void FillInfo( SfxChildWinInfo& rInfo ) const;
const ::rtl::OUString& GetDatabaseName() const { return m_aDatabaseName; }
::svxform::SharedConnection GetConnection() const { return m_aConnection; }
const ::rtl::OUString& GetObjectName() const { return m_aObjectName; }
sal_Int32 GetObjectType() const { return m_nObjectType; }
sal_Bool createSelectionControls( );
protected:
// FmXChangeListener
virtual void _propertyChanged(const ::com::sun::star::beans::PropertyChangeEvent& evt) throw( ::com::sun::star::uno::RuntimeException );
protected:
inline SfxBindings& GetBindings() { return SfxControllerItem::GetBindings(); }
inline const SfxBindings& GetBindings() const { return SfxControllerItem::GetBindings(); }
using SfxFloatingWindow::StateChanged;
};
//========================================================================
class SAL_DLLPRIVATE FmFieldWinMgr : public SfxChildWindow
{
public:
FmFieldWinMgr(Window *pParent, sal_uInt16 nId,
SfxBindings *pBindings, SfxChildWinInfo *pInfo);
SFX_DECL_CHILDWINDOW(FmFieldWinMgr);
};
#endif
| 1,418 |
1,010 | <reponame>steffakasid/RPi-Jukebox-RFID
#!/usr/bin/env python3
"""
Provides bt_switch (see below) as function and callable script
If called as script, the configuration of led_pin reflecting audio sink status is read from ../../settings/gpio_settings.ini'
See function get_led_pin_configuration for details. If no configuration file is found led_pin is None
Usage:
$ bt-sink-switch cmd [debug]
cmd = toggle|speakers|headphones : select audio target
debug : enable debug logging
"""
import sys
import re
import subprocess
import logging
import os
import configparser
# Create logger
logger = logging.getLogger('bt-sink-switch.py')
logger.setLevel(logging.DEBUG)
# Create console handler and set default level
logconsole = logging.StreamHandler()
logconsole.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s: %(message)s', datefmt='%d.%m.%Y %H:%M:%S'))
logconsole.setLevel(logging.INFO)
logger.addHandler(logconsole)
def bt_usage(sname):
"""Print usage, if module is called as script"""
print("Usage")
print(" ./" + sname + " toggle | speakers | headphones [debug]")
def bt_check_mpc_err() -> None:
"""Error check on mpd output stream and attempt to recover previous state"""
logger.debug("bt_check_mpc_err()")
mpcproc = subprocess.run("mpc status", shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logger.debug(mpcproc.stdout)
# grep for this expression: 'ERROR: Failed to open audio output'
mpcerr = re.search(b"ERROR:.*output", mpcproc.stdout)
if mpcerr is not None:
mpcplay = subprocess.run("mpc play", shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logger.debug(mpcplay)
def bt_switch(cmd, led_pin=None):
"""
Set/Toggle between regular speakers and headphone output. If no bluetooth device is connected, always defaults to mpc output 1
To be precise: toggle between mpc output 1 and mpc output 2.
So, set up your /etc/mpd.conf correctly: first audio_output section should be speakers, second audio_output section should be headphones
To set up bluetooth headphones, follow the wiki
Short guide to connect bluetooth (without audio setup)
sudo bluetoothctl
power on
agent on
scan on -> shows list of Bluetooth devices in reach
pair C4:FB:20:63:A7:F2 -> pairing happens
trust C4:FB:20:63:A7:F2 -> trust you device
connect C4:FB:20:63:A7:F2 -> connect
scan off
exit
Next time headphones are switched on, they should connect automatically
Requires
sudo apt install bluetooth
Attention
The user to runs this script (precisly who runs bluetoothctl) needs proper access rights. Otherwise bluetoothctl will always return "no default controller found"
The superuser and users of group "bluetooth" have these. You can check the policy here
/etc/dbus-1/system.d/bluetooth.conf
Best to check first if the user which later runs this script can execute bluetoothctl and get meaningful results
sudo -u www-data bluetoothctl show
E.g. if you want to do bluetooth manipulation from the web interface, you will most likely need to add www-data to the group bluetooth
if you want to test this script from the command line, you will most likely need to add user pi (or whoever you are) to the group bluetooth or run it as superuser
sudo usermod -G bluetooth -a www-data
Don't forget to reboot for group changes to take effect here
LED support
If LED number (GPIO number, BCM) is provided, a LED is switched to reflect output sink status
off = speakers, on = headphones
LED blinks if no bluetooth device is connected and bluetooth sink is requested, before script default to output 1
A note for developers: This script is not persistent and only gets called (from various sources) when the output sink is changed/toggled and exits.
This is done to make is callable from button press (gpio button handler), rfid card number, web interface
The LED state however should be persistent. With GPIOZero, the LED state gets reset at the end of the script. For that reason GPIO state is manipulated through shell commands
Parameters
----------
:param cmd: string is "toggle" | "speakers" | "headphones"
:param led_pin: integer with GPIO pin number of LED to reflect output status. If None, LED support is disabled (and no GPIO pin is blocked)
"""
# Check for valid command
if cmd != "toggle" and cmd != "speakers" and cmd != "headphones":
logger.error("Invalid command. Doing nothing.")
return
# Rudimentary check if LED pin request is valid GPIO pin number
if led_pin is not None:
if led_pin < 2 or led_pin > 27:
led_pin = None
logger.error("Invalid led_pin. Ignoring led_pin = " + str(led_pin))
if led_pin is not None:
# Set-up GPIO LED pin if not already configured. If it already exists, sanity check direction of pin before use
try:
with open("/sys/class/gpio/gpio" + str(led_pin) + "/direction") as f:
if f.readline(3) != "out":
logger.error("LED pin already in use with direction 'in'. Ignoring led_pin = " + str(led_pin))
led_pin = None
except FileNotFoundError:
# GPIO direction file does not exist -> pin is not configured. Set it up (sleep is necessary!)
proc = subprocess.run("echo " + str(led_pin) + " > /sys/class/gpio/export; \
sleep 0.1; \
echo out > /sys/class/gpio/gpio" + str(led_pin) + "/direction", shell=True, check=False,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logger.debug(proc.stdout)
# Figure out if output 1 (speakers) is enabled
isSpeakerOn_console = subprocess.run("mpc outputs", shell=True, check=False, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
logger.debug(isSpeakerOn_console.stdout)
isSpeakerOn = re.search(b"^Output 1.*enabled", isSpeakerOn_console.stdout)
# Figure out if a bluetooth device is connected (any device will do). Assume here that only speakers/headsets will be connected
# -> No need for user to adapt MAC address
# -> will actually support multiple speakers/headsets paired to the phoniebox
# Alternative: Check for specific bluetooth device only with "bluetoothctl info MACADDRESS"
isBtConnected_console = subprocess.run("bluetoothctl info", shell=True, check=False, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
logger.debug(isBtConnected_console.stdout)
isBtConnected = re.search(b"Connected:\s+yes", isBtConnected_console.stdout)
if (cmd == "toggle" and isSpeakerOn) or (cmd == "headphones"):
# Only switch to BT headphones if they are actually connected
if isBtConnected:
print("Switched audio sink to \"Output 2\"")
# With mpc enable only 2, output 1 gets disabled before output 2 gets enabled causing a stream output fail
# This order avoids the issue
proc = subprocess.run("mpc enable 2; sleep 0.1; mpc disable 1", shell=True, check=False,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logger.debug(proc.stdout)
# Yet, in some cases, a stream error still occurs: check and recover
bt_check_mpc_err()
if led_pin is not None:
proc = subprocess.run("echo 1 > /sys/class/gpio/gpio" + str(led_pin) + "/value", shell=True,
check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logger.debug(b'LED on: ' + proc.stdout)
return
else:
print("No bluetooth device connected. Defaulting to \"Output 1\".")
if led_pin:
sleeptime = 0.25
for i in range(0, 3):
subprocess.run("echo 1 > /sys/class/gpio/gpio" + str(led_pin) + "/value; sleep " + str(
sleeptime) + "; echo 0 > /sys/class/gpio/gpio" + str(led_pin) + "/value; sleep " + str(
sleeptime), shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Default: Switch to Speakers
print("Switched audio sink to \"Output 1\"")
# mpc only 1 always enables 1 first, avoiding any intermediate state with no valid output stream
proc = subprocess.run("mpc enable only 1", shell=True, check=False, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
logger.debug(proc.stdout)
# Yet, in some cases, a stream error still occurs: check and recover
bt_check_mpc_err()
if led_pin:
proc = subprocess.run("echo 0 > /sys/class/gpio/gpio" + str(led_pin) + "/value", shell=True, check=False,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logger.debug(b'LED off: ' + proc.stdout)
def get_led_pin_config(cfg_file):
"""Read the led pin for reflecting current sink status from cfg_file which is a Python configparser file
cfg_file is relative to this script's location or an absolute path
The file must contain the entry
[BluetoothToggleLed]
enabled = True
led_pin = 6
where
- led_pin is the BCM number of the GPIO pin (i.e. 'led_pin = 6' means GPIO6) and defaults to None
- enabled can be used to temporarily disable the LED
Note: Capitalization of [BluetoothToggleLed] is important!"""
# Make sure to locate cfg_file relative to this script's location independent of working directory
if not os.path.isabs(cfg_file):
cfg_file = os.path.dirname(os.path.realpath(__file__)) + '/' + cfg_file
logger.debug(f"Reading config file: '{cfg_file}'")
cfg = configparser.ConfigParser()
cfg_file_success = cfg.read(cfg_file)
if not cfg_file_success:
logger.debug(f"Could not read '{cfg_file}'. Continue with default values (i.e. led off).")
section_name = 'BluetoothToggleLed'
led_pin = None
if section_name in cfg:
if cfg[section_name].getboolean('enabled', fallback=False):
led_pin = cfg[section_name].getint('led_pin', fallback=None)
if not led_pin:
logger.warning(f"Could not find 'led_pin' or could not read integer value")
elif not 1 <= led_pin <= 27:
logger.warning(f"Ignoring out of range pin number: {led_pin}.")
led_pin = None
else:
logger.debug(f"No section {section_name} found. Defaulting to led_pin = None")
logger.debug(f"Using LED pin = {led_pin}")
return led_pin
if __name__ == "__main__":
if len(sys.argv) == 3:
logconsole.setLevel(logging.DEBUG)
if 2 <= len(sys.argv) <= 3:
cfg_led_pin = get_led_pin_config('../../settings/gpio_settings.ini')
bt_switch(sys.argv[1], cfg_led_pin)
else:
bt_usage(sys.argv[0])
| 4,364 |
2,151 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/dom_storage/session_storage_context_mojo.h"
#include <inttypes.h>
#include <cctype> // for std::isalnum
#include <cstring>
#include <utility>
#include "base/barrier_closure.h"
#include "base/bind.h"
#include "base/debug/stack_trace.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/sys_info.h"
#include "base/trace_event/memory_dump_manager.h"
#include "build/build_config.h"
#include "components/services/leveldb/public/cpp/util.h"
#include "components/services/leveldb/public/interfaces/leveldb.mojom.h"
#include "content/browser/dom_storage/session_storage_leveldb_wrapper.h"
#include "content/browser/dom_storage/session_storage_namespace_impl_mojo.h"
#include "content/browser/leveldb_wrapper_impl.h"
#include "content/common/dom_storage/dom_storage_types.h"
#include "content/public/browser/session_storage_usage_info.h"
#include "content/public/common/content_features.h"
#include "services/file/public/mojom/constants.mojom.h"
#include "services/service_manager/public/cpp/connector.h"
#include "third_party/leveldatabase/env_chromium.h"
#include "third_party/leveldatabase/leveldb_chrome.h"
#include "url/gurl.h"
namespace content {
namespace {
// After this many consecutive commit errors we'll throw away the entire
// database.
const int kSessionStorageCommitErrorThreshold = 8;
// Limits on the cache size and number of areas in memory, over which the areas
// are purged.
#if defined(OS_ANDROID)
const unsigned kMaxSessionStorageAreaCount = 10;
const size_t kMaxSessionStorageCacheSize = 2 * 1024 * 1024;
#else
const unsigned kMaxSessionStorageAreaCount = 50;
const size_t kMaxSessionStorageCacheSize = 20 * 1024 * 1024;
#endif
enum class SessionStorageCachePurgeReason {
kNotNeeded,
kSizeLimitExceeded,
kAreaCountLimitExceeded,
kInactiveOnLowEndDevice,
kAggressivePurgeTriggered
};
void RecordSessionStorageCachePurgedHistogram(
SessionStorageCachePurgeReason reason,
size_t purged_size_kib) {
UMA_HISTOGRAM_COUNTS_100000("SessionStorageContext.CachePurgedInKB",
purged_size_kib);
switch (reason) {
case SessionStorageCachePurgeReason::kSizeLimitExceeded:
UMA_HISTOGRAM_COUNTS_100000(
"SessionStorageContext.CachePurgedInKB.SizeLimitExceeded",
purged_size_kib);
break;
case SessionStorageCachePurgeReason::kAreaCountLimitExceeded:
UMA_HISTOGRAM_COUNTS_100000(
"SessionStorageContext.CachePurgedInKB.AreaCountLimitExceeded",
purged_size_kib);
break;
case SessionStorageCachePurgeReason::kInactiveOnLowEndDevice:
UMA_HISTOGRAM_COUNTS_100000(
"SessionStorageContext.CachePurgedInKB.InactiveOnLowEndDevice",
purged_size_kib);
break;
case SessionStorageCachePurgeReason::kAggressivePurgeTriggered:
UMA_HISTOGRAM_COUNTS_100000(
"SessionStorageContext.CachePurgedInKB.AggressivePurgeTriggered",
purged_size_kib);
break;
case SessionStorageCachePurgeReason::kNotNeeded:
NOTREACHED();
break;
}
}
} // namespace
SessionStorageContextMojo::SessionStorageContextMojo(
scoped_refptr<base::SequencedTaskRunner> memory_dump_task_runner,
service_manager::Connector* connector,
base::Optional<base::FilePath> local_partition_directory,
std::string leveldb_name)
: connector_(connector ? connector->Clone() : nullptr),
partition_directory_path_(std::move(local_partition_directory)),
leveldb_name_(std::move(leveldb_name)),
memory_dump_id_(base::StringPrintf("SessionStorage/0x%" PRIXPTR,
reinterpret_cast<uintptr_t>(this))),
is_low_end_device_(base::SysInfo::IsLowEndDevice()),
weak_ptr_factory_(this) {
DCHECK(base::FeatureList::IsEnabled(features::kMojoSessionStorage));
base::trace_event::MemoryDumpManager::GetInstance()
->RegisterDumpProviderWithSequencedTaskRunner(
this, "SessionStorage", std::move(memory_dump_task_runner),
base::trace_event::MemoryDumpProvider::Options());
}
SessionStorageContextMojo::~SessionStorageContextMojo() {
DCHECK_EQ(connection_state_, CONNECTION_SHUTDOWN);
base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
this);
}
void SessionStorageContextMojo::OpenSessionStorage(
int process_id,
const std::string& namespace_id,
mojom::SessionStorageNamespaceRequest request) {
if (connection_state_ != CONNECTION_FINISHED) {
RunWhenConnected(
base::BindOnce(&SessionStorageContextMojo::OpenSessionStorage,
weak_ptr_factory_.GetWeakPtr(), process_id, namespace_id,
std::move(request)));
return;
}
auto found = namespaces_.find(namespace_id);
DCHECK(found != namespaces_.end()) << namespace_id;
if (!found->second->IsPopulated() &&
!found->second->waiting_on_clone_population()) {
found->second->PopulateFromMetadata(
database_.get(), metadata_.GetOrCreateNamespaceEntry(namespace_id),
data_maps_);
}
PurgeUnusedWrappersIfNeeded();
found->second->Bind(std::move(request), process_id);
size_t total_cache_size, unused_wrapper_count;
GetStatistics(&total_cache_size, &unused_wrapper_count);
// Track the total sessionStorage cache size.
UMA_HISTOGRAM_COUNTS_100000("SessionStorageContext.CacheSizeInKB",
total_cache_size / 1024);
}
void SessionStorageContextMojo::CreateSessionNamespace(
const std::string& namespace_id) {
if (namespaces_.find(namespace_id) != namespaces_.end())
return;
namespaces_.emplace(std::make_pair(
namespace_id, CreateSessionStorageNamespaceImplMojo(namespace_id)));
}
void SessionStorageContextMojo::CloneSessionNamespace(
const std::string& namespace_id_to_clone,
const std::string& clone_namespace_id) {
if (namespaces_.find(clone_namespace_id) != namespaces_.end())
return;
std::unique_ptr<SessionStorageNamespaceImplMojo> namespace_impl =
CreateSessionStorageNamespaceImplMojo(clone_namespace_id);
namespace_impl->SetWaitingForClonePopulation();
namespaces_.emplace(
std::make_pair(clone_namespace_id, std::move(namespace_impl)));
}
void SessionStorageContextMojo::DeleteSessionNamespace(
const std::string& namespace_id,
bool should_persist) {
// The object hierarchy uses iterators bound to the metadata object, so make
// sure to delete the object hierarchy first.
namespaces_.erase(namespace_id);
if (!has_scavenged_ && should_persist)
protected_namespaces_from_scavenge_.insert(namespace_id);
if (!should_persist) {
RunWhenConnected(
base::BindOnce(&SessionStorageContextMojo::DoDatabaseDelete,
weak_ptr_factory_.GetWeakPtr(), namespace_id));
}
}
void SessionStorageContextMojo::Flush() {
if (connection_state_ != CONNECTION_FINISHED) {
RunWhenConnected(base::BindOnce(&SessionStorageContextMojo::Flush,
weak_ptr_factory_.GetWeakPtr()));
return;
}
for (const auto& it : data_maps_)
it.second->level_db_wrapper()->ScheduleImmediateCommit();
}
void SessionStorageContextMojo::GetStorageUsage(
GetStorageUsageCallback callback) {
if (connection_state_ != CONNECTION_FINISHED) {
RunWhenConnected(base::BindOnce(&SessionStorageContextMojo::GetStorageUsage,
weak_ptr_factory_.GetWeakPtr(),
std::move(callback)));
return;
}
const SessionStorageMetadata::NamespaceOriginMap& all_namespaces =
metadata_.namespace_origin_map();
std::vector<SessionStorageUsageInfo> result;
result.reserve(all_namespaces.size());
for (const auto& pair : all_namespaces) {
for (const auto& origin_map_pair : pair.second) {
SessionStorageUsageInfo info = {origin_map_pair.first.GetURL(),
pair.first};
result.push_back(std::move(info));
}
}
std::move(callback).Run(std::move(result));
}
void SessionStorageContextMojo::DeleteStorage(const url::Origin& origin,
const std::string& namespace_id) {
if (connection_state_ != CONNECTION_FINISHED) {
RunWhenConnected(base::BindOnce(&SessionStorageContextMojo::DeleteStorage,
weak_ptr_factory_.GetWeakPtr(), origin,
namespace_id));
return;
}
auto found = namespaces_.find(namespace_id);
if (found != namespaces_.end()) {
found->second->RemoveOriginData(origin);
} else {
// If we don't have the namespace loaded, then we can delete it all
// using the metadata.
std::vector<leveldb::mojom::BatchedOperationPtr> delete_operations;
metadata_.DeleteArea(namespace_id, origin, &delete_operations);
database_->Write(std::move(delete_operations),
base::BindOnce(&SessionStorageContextMojo::OnCommitResult,
base::Unretained(this)));
}
}
void SessionStorageContextMojo::ShutdownAndDelete() {
DCHECK_NE(connection_state_, CONNECTION_SHUTDOWN);
// Nothing to do if no connection to the database was ever finished.
if (connection_state_ != CONNECTION_FINISHED) {
connection_state_ = CONNECTION_SHUTDOWN;
OnShutdownComplete(leveldb::mojom::DatabaseError::OK);
return;
}
connection_state_ = CONNECTION_SHUTDOWN;
// Flush any uncommitted data.
for (const auto& it : data_maps_) {
auto* wrapper = it.second->level_db_wrapper();
LOCAL_HISTOGRAM_BOOLEAN(
"SessionStorageContext.ShutdownAndDelete.MaybeDroppedChanges",
wrapper->has_pending_load_tasks());
wrapper->ScheduleImmediateCommit();
// TODO(dmurph): Monitor the above histogram, and if dropping changes is
// common then handle that here.
wrapper->CancelAllPendingRequests();
}
OnShutdownComplete(leveldb::mojom::DatabaseError::OK);
}
void SessionStorageContextMojo::PurgeMemory() {
size_t total_cache_size, unused_wrapper_count;
GetStatistics(&total_cache_size, &unused_wrapper_count);
// Purge all wrappers that don't have bindings.
for (auto it = namespaces_.begin(); it != namespaces_.end();) {
if (!it->second->IsBound()) {
it = namespaces_.erase(it);
continue;
}
it->second->PurgeUnboundWrappers();
}
// Track the size of cache purged.
size_t final_total_cache_size;
GetStatistics(&final_total_cache_size, &unused_wrapper_count);
size_t purged_size_kib = (total_cache_size - final_total_cache_size) / 1024;
RecordSessionStorageCachePurgedHistogram(
SessionStorageCachePurgeReason::kAggressivePurgeTriggered,
purged_size_kib);
}
void SessionStorageContextMojo::PurgeUnusedWrappersIfNeeded() {
size_t total_cache_size, unused_wrapper_count;
GetStatistics(&total_cache_size, &unused_wrapper_count);
// Nothing to purge.
if (!unused_wrapper_count)
return;
SessionStorageCachePurgeReason purge_reason =
SessionStorageCachePurgeReason::kNotNeeded;
if (total_cache_size > kMaxSessionStorageCacheSize)
purge_reason = SessionStorageCachePurgeReason::kSizeLimitExceeded;
else if (data_maps_.size() > kMaxSessionStorageAreaCount)
purge_reason = SessionStorageCachePurgeReason::kAreaCountLimitExceeded;
else if (is_low_end_device_)
purge_reason = SessionStorageCachePurgeReason::kInactiveOnLowEndDevice;
if (purge_reason == SessionStorageCachePurgeReason::kNotNeeded)
return;
// Purge all wrappers that don't have bindings.
for (auto it = namespaces_.begin(); it != namespaces_.end();) {
if (!it->second->IsBound())
it = namespaces_.erase(it);
}
size_t final_total_cache_size;
GetStatistics(&final_total_cache_size, &unused_wrapper_count);
size_t purged_size_kib = (total_cache_size - final_total_cache_size) / 1024;
RecordSessionStorageCachePurgedHistogram(
SessionStorageCachePurgeReason::kAggressivePurgeTriggered,
purged_size_kib);
}
void SessionStorageContextMojo::ScavengeUnusedNamespaces(
base::OnceClosure done) {
if (has_scavenged_)
return;
if (connection_state_ != CONNECTION_FINISHED) {
RunWhenConnected(
base::BindOnce(&SessionStorageContextMojo::ScavengeUnusedNamespaces,
weak_ptr_factory_.GetWeakPtr(), std::move(done)));
return;
}
has_scavenged_ = true;
std::vector<std::string> namespaces_to_delete;
for (const auto& metadata_namespace : metadata_.namespace_origin_map()) {
const std::string& namespace_id = metadata_namespace.first;
if (namespaces_.find(namespace_id) != namespaces_.end() ||
protected_namespaces_from_scavenge_.find(namespace_id) !=
protected_namespaces_from_scavenge_.end()) {
continue;
}
namespaces_to_delete.push_back(namespace_id);
}
std::vector<leveldb::mojom::BatchedOperationPtr> delete_operations;
for (const auto& namespace_id : namespaces_to_delete) {
metadata_.DeleteNamespace(namespace_id, &delete_operations);
}
if (!delete_operations.empty()) {
database_->Write(std::move(delete_operations),
base::BindOnce(&SessionStorageContextMojo::OnCommitResult,
base::Unretained(this)));
}
protected_namespaces_from_scavenge_.clear();
if (done)
std::move(done).Run();
}
bool SessionStorageContextMojo::OnMemoryDump(
const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) {
if (connection_state_ != CONNECTION_FINISHED)
return true;
std::string context_name =
base::StringPrintf("site_storage/sessionstorage/0x%" PRIXPTR,
reinterpret_cast<uintptr_t>(this));
// Account for leveldb memory usage, which actually lives in the file service.
auto* global_dump = pmd->CreateSharedGlobalAllocatorDump(memory_dump_id_);
// The size of the leveldb dump will be added by the leveldb service.
auto* leveldb_mad = pmd->CreateAllocatorDump(context_name + "/leveldb");
// Specifies that the current context is responsible for keeping memory alive.
int kImportance = 2;
pmd->AddOwnershipEdge(leveldb_mad->guid(), global_dump->guid(), kImportance);
if (args.level_of_detail ==
base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND) {
size_t total_cache_size, unused_wrapper_count;
GetStatistics(&total_cache_size, &unused_wrapper_count);
auto* mad = pmd->CreateAllocatorDump(context_name + "/cache_size");
mad->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
base::trace_event::MemoryAllocatorDump::kUnitsBytes,
total_cache_size);
mad->AddScalar("total_areas",
base::trace_event::MemoryAllocatorDump::kUnitsObjects,
data_maps_.size());
return true;
}
for (const auto& it : data_maps_) {
// Limit the url length to 50 and strip special characters.
const auto& origin = it.second->map_data()->origin();
std::string url = origin.Serialize().substr(0, 50);
for (size_t index = 0; index < url.size(); ++index) {
if (!std::isalnum(url[index]))
url[index] = '_';
}
std::string wrapper_dump_name = base::StringPrintf(
"%s/%s/0x%" PRIXPTR, context_name.c_str(), url.c_str(),
reinterpret_cast<uintptr_t>(it.second->level_db_wrapper()));
it.second->level_db_wrapper()->OnMemoryDump(wrapper_dump_name, pmd);
}
return true;
}
void SessionStorageContextMojo::OnDataMapCreation(
const std::vector<uint8_t>& map_prefix,
SessionStorageDataMap* map) {
DCHECK(data_maps_.find(map_prefix) == data_maps_.end());
data_maps_.emplace(std::piecewise_construct,
std::forward_as_tuple(map_prefix),
std::forward_as_tuple(map));
}
void SessionStorageContextMojo::OnDataMapDestruction(
const std::vector<uint8_t>& map_prefix) {
data_maps_.erase(map_prefix);
}
void SessionStorageContextMojo::OnCommitResult(
leveldb::mojom::DatabaseError error) {
DCHECK_EQ(connection_state_, CONNECTION_FINISHED);
UMA_HISTOGRAM_ENUMERATION("SessionStorageContext.CommitResult",
leveldb::GetLevelDBStatusUMAValue(error),
leveldb_env::LEVELDB_STATUS_MAX);
if (error == leveldb::mojom::DatabaseError::OK) {
commit_error_count_ = 0;
return;
}
commit_error_count_++;
if (commit_error_count_ > kSessionStorageCommitErrorThreshold) {
if (tried_to_recover_from_commit_errors_) {
// We already tried to recover from a high commit error rate before, but
// are still having problems: there isn't really anything left to try, so
// just ignore errors.
return;
}
tried_to_recover_from_commit_errors_ = true;
// Deleting LevelDBWrappers in here could cause more commits (and commit
// errors), but those commits won't reach OnCommitResult because the wrapper
// will have been deleted before the commit finishes.
DeleteAndRecreateDatabase(
"SessionStorageContext.OpenResultAfterCommitErrors");
}
}
void SessionStorageContextMojo::SetDatabaseForTesting(
leveldb::mojom::LevelDBDatabaseAssociatedPtr database) {
DCHECK_EQ(connection_state_, NO_CONNECTION);
connection_state_ = CONNECTION_IN_PROGRESS;
database_ = std::move(database);
OnDatabaseOpened(true, leveldb::mojom::DatabaseError::OK);
}
void SessionStorageContextMojo::FlushAreaForTesting(
const std::string& namespace_id,
const url::Origin& origin) {
if (connection_state_ != CONNECTION_FINISHED)
return;
const auto& it = namespaces_.find(namespace_id);
if (it == namespaces_.end())
return;
it->second->FlushOriginForTesting(origin);
}
scoped_refptr<SessionStorageMetadata::MapData>
SessionStorageContextMojo::RegisterNewAreaMap(
SessionStorageMetadata::NamespaceEntry namespace_entry,
const url::Origin& origin) {
std::vector<leveldb::mojom::BatchedOperationPtr> save_operations;
scoped_refptr<SessionStorageMetadata::MapData> map_entry =
metadata_.RegisterNewMap(namespace_entry, origin, &save_operations);
database_->Write(std::move(save_operations),
base::BindOnce(&SessionStorageContextMojo::OnCommitResult,
base::Unretained(this)));
return map_entry;
}
void SessionStorageContextMojo::RegisterShallowClonedNamespace(
SessionStorageMetadata::NamespaceEntry source_namespace_entry,
const std::string& new_namespace_id,
const SessionStorageNamespaceImplMojo::OriginAreas& clone_from_areas) {
std::vector<leveldb::mojom::BatchedOperationPtr> save_operations;
SessionStorageMetadata::NamespaceEntry namespace_entry =
metadata_.GetOrCreateNamespaceEntry(new_namespace_id);
metadata_.RegisterShallowClonedNamespace(source_namespace_entry,
namespace_entry, &save_operations);
database_->Write(std::move(save_operations),
base::BindOnce(&SessionStorageContextMojo::OnCommitResult,
base::Unretained(this)));
auto it = namespaces_.find(new_namespace_id);
if (it != namespaces_.end()) {
it->second->PopulateAsClone(database_.get(), namespace_entry,
clone_from_areas);
return;
}
auto namespace_impl = CreateSessionStorageNamespaceImplMojo(new_namespace_id);
namespace_impl->PopulateAsClone(database_.get(), namespace_entry,
clone_from_areas);
namespaces_.emplace(
std::make_pair(new_namespace_id, std::move(namespace_impl)));
}
std::unique_ptr<SessionStorageNamespaceImplMojo>
SessionStorageContextMojo::CreateSessionStorageNamespaceImplMojo(
std::string namespace_id) {
SessionStorageNamespaceImplMojo::RegisterShallowClonedNamespace
add_namespace_callback = base::BindRepeating(
&SessionStorageContextMojo::RegisterShallowClonedNamespace,
base::Unretained(this));
SessionStorageLevelDBWrapper::RegisterNewAreaMap map_id_callback =
base::BindRepeating(&SessionStorageContextMojo::RegisterNewAreaMap,
base::Unretained(this));
return std::make_unique<SessionStorageNamespaceImplMojo>(
std::move(namespace_id), this, std::move(add_namespace_callback),
std::move(map_id_callback));
}
void SessionStorageContextMojo::DoDatabaseDelete(
const std::string& namespace_id) {
DCHECK_EQ(connection_state_, CONNECTION_FINISHED);
std::vector<leveldb::mojom::BatchedOperationPtr> delete_operations;
metadata_.DeleteNamespace(namespace_id, &delete_operations);
database_->Write(std::move(delete_operations),
base::BindOnce(&SessionStorageContextMojo::OnCommitResult,
base::Unretained(this)));
}
void SessionStorageContextMojo::RunWhenConnected(base::OnceClosure callback) {
DCHECK_NE(connection_state_, CONNECTION_SHUTDOWN);
// If we don't have a filesystem_connection_, we'll need to establish one.
if (connection_state_ == NO_CONNECTION) {
connection_state_ = CONNECTION_IN_PROGRESS;
InitiateConnection();
}
if (connection_state_ == CONNECTION_IN_PROGRESS) {
// Queue this OpenSessionStorage call for when we have a level db pointer.
on_database_opened_callbacks_.push_back(std::move(callback));
return;
}
std::move(callback).Run();
}
void SessionStorageContextMojo::InitiateConnection(bool in_memory_only) {
DCHECK_EQ(connection_state_, CONNECTION_IN_PROGRESS);
// Unit tests might not always have a Connector, use in-memory only if that
// happens.
if (!connector_) {
OnDatabaseOpened(false, leveldb::mojom::DatabaseError::OK);
return;
}
if (partition_directory_path_ && !in_memory_only) {
// We were given a subdirectory to write to. Get it and use a disk backed
// database.
connector_->BindInterface(file::mojom::kServiceName, &file_system_);
file_system_->GetSubDirectory(
partition_directory_path_.value().AsUTF8Unsafe(),
MakeRequest(&partition_directory_),
base::BindOnce(&SessionStorageContextMojo::OnDirectoryOpened,
weak_ptr_factory_.GetWeakPtr()));
} else {
// We were not given a subdirectory. Use a memory backed database.
connector_->BindInterface(file::mojom::kServiceName, &leveldb_service_);
leveldb_service_->OpenInMemory(
memory_dump_id_, "SessionStorageDatabase", MakeRequest(&database_),
base::BindOnce(&SessionStorageContextMojo::OnDatabaseOpened,
weak_ptr_factory_.GetWeakPtr(), true));
}
}
void SessionStorageContextMojo::OnDirectoryOpened(base::File::Error err) {
if (err != base::File::FILE_OK) {
// We failed to open the directory; continue with startup so that we create
// the |level_db_wrappers_|.
UMA_HISTOGRAM_ENUMERATION("SessionStorageContext.DirectoryOpenError", -err,
-base::File::FILE_ERROR_MAX);
LogDatabaseOpenResult(OpenResult::kDirectoryOpenFailed);
OnDatabaseOpened(false, leveldb::mojom::DatabaseError::OK);
return;
}
// Now that we have a directory, connect to the LevelDB service and get our
// database.
connector_->BindInterface(file::mojom::kServiceName, &leveldb_service_);
// We might still need to use the directory, so create a clone.
filesystem::mojom::DirectoryPtr partition_directory_clone;
partition_directory_->Clone(MakeRequest(&partition_directory_clone));
leveldb_env::Options options;
options.create_if_missing = true;
options.max_open_files = 0; // use minimum
// Default write_buffer_size is 4 MB but that might leave a 3.999
// memory allocation in RAM from a log file recovery.
options.write_buffer_size = 64 * 1024;
options.block_cache = leveldb_chrome::GetSharedWebBlockCache();
leveldb_service_->OpenWithOptions(
std::move(options), std::move(partition_directory_clone), leveldb_name_,
memory_dump_id_, MakeRequest(&database_),
base::BindOnce(&SessionStorageContextMojo::OnDatabaseOpened,
weak_ptr_factory_.GetWeakPtr(), false));
}
void SessionStorageContextMojo::OnDatabaseOpened(
bool in_memory,
leveldb::mojom::DatabaseError status) {
if (status != leveldb::mojom::DatabaseError::OK) {
UMA_HISTOGRAM_ENUMERATION("SessionStorageContext.DatabaseOpenError",
leveldb::GetLevelDBStatusUMAValue(status),
leveldb_env::LEVELDB_STATUS_MAX);
if (in_memory) {
UMA_HISTOGRAM_ENUMERATION(
"SessionStorageContext.DatabaseOpenError.Memory",
leveldb::GetLevelDBStatusUMAValue(status),
leveldb_env::LEVELDB_STATUS_MAX);
} else {
UMA_HISTOGRAM_ENUMERATION("SessionStorageContext.DatabaseOpenError.Disk",
leveldb::GetLevelDBStatusUMAValue(status),
leveldb_env::LEVELDB_STATUS_MAX);
}
LogDatabaseOpenResult(OpenResult::kDatabaseOpenFailed);
// If we failed to open the database, try to delete and recreate the
// database, or ultimately fallback to an in-memory database.
DeleteAndRecreateDatabase(
"SessionStorageContext.OpenResultAfterOpenFailed");
return;
}
// Verify DB schema version.
if (database_) {
database_->Get(
std::vector<uint8_t>(
SessionStorageMetadata::kDatabaseVersionBytes,
std::end(SessionStorageMetadata::kDatabaseVersionBytes)),
base::BindOnce(&SessionStorageContextMojo::OnGotDatabaseVersion,
weak_ptr_factory_.GetWeakPtr()));
return;
}
OnConnectionFinished();
}
void SessionStorageContextMojo::OnGotDatabaseVersion(
leveldb::mojom::DatabaseError status,
const std::vector<uint8_t>& value) {
std::vector<leveldb::mojom::BatchedOperationPtr> migration_operations;
if (status == leveldb::mojom::DatabaseError::NOT_FOUND) {
// New database, or schema v0. We must treat this as a schema v0 database.
metadata_.ParseDatabaseVersion(base::nullopt, &migration_operations);
} else if (status == leveldb::mojom::DatabaseError::OK) {
if (!metadata_.ParseDatabaseVersion(value, &migration_operations)) {
LogDatabaseOpenResult(OpenResult::kInvalidVersion);
DeleteAndRecreateDatabase(
"SessionStorageContext.OpenResultAfterInvalidVersion");
return;
}
database_initialized_ = true;
} else {
// Other read error. Possibly database corruption.
UMA_HISTOGRAM_ENUMERATION("SessionStorageContext.ReadVersionError",
leveldb::GetLevelDBStatusUMAValue(status),
leveldb_env::LEVELDB_STATUS_MAX);
LogDatabaseOpenResult(OpenResult::kVersionReadError);
DeleteAndRecreateDatabase(
"SessionStorageContext.OpenResultAfterReadVersionError");
return;
}
connection_state_ = FETCHING_METADATA;
base::RepeatingClosure barrier = base::BarrierClosure(
2, base::BindOnce(&SessionStorageContextMojo::OnConnectionFinished,
weak_ptr_factory_.GetWeakPtr()));
std::vector<uint8_t> namespace_prefix(
SessionStorageMetadata::kNamespacePrefixBytes,
std::end(SessionStorageMetadata::kNamespacePrefixBytes));
std::vector<uint8_t> next_map_id_key(
SessionStorageMetadata::kNextMapIdKeyBytes,
std::end(SessionStorageMetadata::kNextMapIdKeyBytes));
database_->GetPrefixed(
namespace_prefix,
base::BindOnce(&SessionStorageContextMojo::OnGotNamespaces,
weak_ptr_factory_.GetWeakPtr(), barrier,
std::move(migration_operations)));
database_->Get(next_map_id_key,
base::BindOnce(&SessionStorageContextMojo::OnGotNextMapId,
weak_ptr_factory_.GetWeakPtr(), barrier));
}
void SessionStorageContextMojo::OnGotNamespaces(
base::OnceClosure done,
std::vector<leveldb::mojom::BatchedOperationPtr> migration_operations,
leveldb::mojom::DatabaseError status,
std::vector<leveldb::mojom::KeyValuePtr> values) {
DCHECK(connection_state_ == FETCHING_METADATA);
bool parsing_failure =
status == leveldb::mojom::DatabaseError::OK &&
!metadata_.ParseNamespaces(std::move(values), &migration_operations);
if (status != leveldb::mojom::DatabaseError::OK || parsing_failure) {
UMA_HISTOGRAM_ENUMERATION("SessionStorageContext.ReadNamespacesError",
leveldb::GetLevelDBStatusUMAValue(status),
leveldb_env::LEVELDB_STATUS_MAX);
LogDatabaseOpenResult(OpenResult::kNamespacesReadError);
DeleteAndRecreateDatabase(
"SessionStorageContext.OpenResultAfterReadNamespacesError");
return;
}
// Write all of our migration operations if we have any.
if (!migration_operations.empty()) {
database_->Write(std::move(migration_operations),
base::BindOnce(&SessionStorageContextMojo::OnCommitResult,
base::Unretained(this)));
}
std::move(done).Run();
}
void SessionStorageContextMojo::OnGotNextMapId(
base::OnceClosure done,
leveldb::mojom::DatabaseError status,
const std::vector<uint8_t>& map_id) {
DCHECK(connection_state_ == FETCHING_METADATA);
if (status == leveldb::mojom::DatabaseError::NOT_FOUND) {
std::move(done).Run();
return;
}
if (status == leveldb::mojom::DatabaseError::OK) {
metadata_.ParseNextMapId(map_id);
std::move(done).Run();
return;
}
// Other read error. Possibly database corruption.
UMA_HISTOGRAM_ENUMERATION("SessionStorageContext.ReadNextMapIdError",
leveldb::GetLevelDBStatusUMAValue(status),
leveldb_env::LEVELDB_STATUS_MAX);
LogDatabaseOpenResult(OpenResult::kNamespacesReadError);
DeleteAndRecreateDatabase(
"SessionStorageContext.OpenResultAfterReadNextMapIdError");
}
void SessionStorageContextMojo::OnConnectionFinished() {
DCHECK(!database_ || connection_state_ == FETCHING_METADATA);
if (!database_) {
partition_directory_.reset();
file_system_.reset();
leveldb_service_.reset();
}
// If connection was opened successfully, reset tried_to_recreate_during_open_
// to enable recreating the database on future errors.
if (database_)
tried_to_recreate_during_open_ = false;
open_result_histogram_ = nullptr;
// |database_| should be known to either be valid or invalid by now. Run our
// delayed bindings.
connection_state_ = CONNECTION_FINISHED;
std::vector<base::OnceClosure> callbacks;
std::swap(callbacks, on_database_opened_callbacks_);
for (size_t i = 0; i < callbacks.size(); ++i)
std::move(callbacks[i]).Run();
}
void SessionStorageContextMojo::DeleteAndRecreateDatabase(
const char* histogram_name) {
// We're about to set database_ to null, so delete the LevelDBWrappers
// that might still be using the old database.
for (const auto& it : data_maps_)
it.second->level_db_wrapper()->CancelAllPendingRequests();
for (const auto& namespace_pair : namespaces_) {
namespace_pair.second->Reset();
}
DCHECK(data_maps_.empty());
// Reset state to be in process of connecting. This will cause requests for
// LevelDBWrappers to be queued until the connection is complete.
connection_state_ = CONNECTION_IN_PROGRESS;
commit_error_count_ = 0;
database_ = nullptr;
open_result_histogram_ = histogram_name;
bool recreate_in_memory = false;
// If tried to recreate database on disk already, try again but this time
// in memory.
if (tried_to_recreate_during_open_ && !!partition_directory_path_) {
recreate_in_memory = true;
} else if (tried_to_recreate_during_open_) {
// Give up completely, run without any database.
OnConnectionFinished();
return;
}
tried_to_recreate_during_open_ = true;
// Unit tests might not have a bound file_service_, in which case there is
// nothing to retry.
if (!file_system_.is_bound()) {
OnConnectionFinished();
return;
}
protected_namespaces_from_scavenge_.clear();
// Destroy database, and try again.
if (partition_directory_.is_bound()) {
leveldb_service_->Destroy(
std::move(partition_directory_), leveldb_name_,
base::BindOnce(&SessionStorageContextMojo::OnDBDestroyed,
weak_ptr_factory_.GetWeakPtr(), recreate_in_memory));
} else {
// No directory, so nothing to destroy. Retrying to recreate will probably
// fail, but try anyway.
InitiateConnection(recreate_in_memory);
}
}
void SessionStorageContextMojo::OnDBDestroyed(
bool recreate_in_memory,
leveldb::mojom::DatabaseError status) {
UMA_HISTOGRAM_ENUMERATION("SessionStorageContext.DestroyDBResult",
leveldb::GetLevelDBStatusUMAValue(status),
leveldb_env::LEVELDB_STATUS_MAX);
// We're essentially ignoring the status here. Even if destroying failed we
// still want to go ahead and try to recreate.
InitiateConnection(recreate_in_memory);
}
void SessionStorageContextMojo::OnShutdownComplete(
leveldb::mojom::DatabaseError error) {
delete this;
}
void SessionStorageContextMojo::GetStatistics(size_t* total_cache_size,
size_t* unused_wrapper_count) {
*total_cache_size = 0;
*unused_wrapper_count = 0;
for (const auto& it : data_maps_) {
*total_cache_size += it.second->level_db_wrapper()->memory_used();
if (it.second->binding_count() == 0)
(*unused_wrapper_count)++;
}
}
void SessionStorageContextMojo::LogDatabaseOpenResult(OpenResult result) {
if (result != OpenResult::kSuccess) {
LOG(ERROR) << "Got error when openning: " << static_cast<int>(result);
UMA_HISTOGRAM_ENUMERATION("SessionStorageContext.OpenError", result);
}
if (open_result_histogram_) {
base::UmaHistogramEnumeration(open_result_histogram_, result);
}
}
} // namespace content
| 13,081 |
513 | # Copyright (c) 2020 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from . import Version, TRANSPORT, Connection, CommandError, ApplicationNotAvailableError
from time import time
from enum import Enum, IntEnum, unique
from typing import Tuple
import abc
import struct
class SmartCardConnection(Connection, metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def transport(self) -> TRANSPORT:
"""Get the transport type of the connection (USB or NFC)"""
@abc.abstractmethod
def send_and_receive(self, apdu: bytes) -> Tuple[bytes, int]:
"""Sends a command APDU and returns the response"""
class ApduError(CommandError):
"""Thrown when an APDU response has the wrong SW code"""
def __init__(self, data: bytes, sw: int):
self.data = data
self.sw = sw
def __str__(self):
return f"APDU error: SW=0x{self.sw:04x}"
@unique
class ApduFormat(str, Enum):
"""APDU encoding format"""
SHORT = "short"
EXTENDED = "extended"
@unique
class SW(IntEnum):
NO_INPUT_DATA = 0x6285
VERIFY_FAIL_NO_RETRY = 0x63C0
WRONG_LENGTH = 0x6700
SECURITY_CONDITION_NOT_SATISFIED = 0x6982
AUTH_METHOD_BLOCKED = 0x6983
DATA_INVALID = 0x6984
CONDITIONS_NOT_SATISFIED = 0x6985
COMMAND_NOT_ALLOWED = 0x6986
INCORRECT_PARAMETERS = 0x6A80
FUNCTION_NOT_SUPPORTED = 0x6A81
FILE_NOT_FOUND = 0x6A82
NO_SPACE = 0x6A84
REFERENCE_DATA_NOT_FOUND = 0x6A88
WRONG_PARAMETERS_P1P2 = 0x6B00
INVALID_INSTRUCTION = 0x6D00
COMMAND_ABORTED = 0x6F00
OK = 0x9000
INS_SELECT = 0xA4
P1_SELECT = 0x04
P2_SELECT = 0x00
INS_SEND_REMAINING = 0xC0
SW1_HAS_MORE_DATA = 0x61
SHORT_APDU_MAX_CHUNK = 0xFF
def _encode_short_apdu(cla, ins, p1, p2, data):
return struct.pack(">BBBBB", cla, ins, p1, p2, len(data)) + data
def _encode_extended_apdu(cla, ins, p1, p2, data):
return struct.pack(">BBBBBH", cla, ins, p1, p2, 0, len(data)) + data
class SmartCardProtocol:
def __init__(
self,
smartcard_connection: SmartCardConnection,
ins_send_remaining: int = INS_SEND_REMAINING,
):
self.apdu_format = ApduFormat.SHORT
self.connection = smartcard_connection
self._ins_send_remaining = ins_send_remaining
self._touch_workaround = False
self._last_long_resp = 0.0
def close(self) -> None:
self.connection.close()
def enable_touch_workaround(self, version: Version) -> None:
self._touch_workaround = self.connection.transport == TRANSPORT.USB and (
(4, 2, 0) <= version <= (4, 2, 6)
)
def select(self, aid: bytes) -> bytes:
try:
return self.send_apdu(0, INS_SELECT, P1_SELECT, P2_SELECT, aid)
except ApduError as e:
if e.sw in (
SW.FILE_NOT_FOUND,
SW.INVALID_INSTRUCTION,
SW.WRONG_PARAMETERS_P1P2,
):
raise ApplicationNotAvailableError()
raise
def send_apdu(
self, cla: int, ins: int, p1: int, p2: int, data: bytes = b""
) -> bytes:
if (
self._touch_workaround
and self._last_long_resp > 0
and time() - self._last_long_resp < 2
):
self.connection.send_and_receive(
_encode_short_apdu(0, 0, 0, 0, b"")
) # Dummy APDU, returns error
self._last_long_resp = 0
if self.apdu_format is ApduFormat.SHORT:
while len(data) > SHORT_APDU_MAX_CHUNK:
chunk, data = data[:SHORT_APDU_MAX_CHUNK], data[SHORT_APDU_MAX_CHUNK:]
response, sw = self.connection.send_and_receive(
_encode_short_apdu(0x10 | cla, ins, p1, p2, chunk)
)
if sw != SW.OK:
raise ApduError(response, sw)
response, sw = self.connection.send_and_receive(
_encode_short_apdu(cla, ins, p1, p2, data)
)
get_data = _encode_short_apdu(0, self._ins_send_remaining, 0, 0, b"")
elif self.apdu_format is ApduFormat.EXTENDED:
response, sw = self.connection.send_and_receive(
_encode_extended_apdu(cla, ins, p1, p2, data)
)
get_data = _encode_extended_apdu(0, self._ins_send_remaining, 0, 0, b"")
else:
raise TypeError("Invalid ApduFormat set")
# Read chained response
buf = b""
while sw >> 8 == SW1_HAS_MORE_DATA:
buf += response
response, sw = self.connection.send_and_receive(get_data)
if sw != SW.OK:
raise ApduError(response, sw)
buf += response
if self._touch_workaround and len(buf) > 54:
self._last_long_resp = time()
else:
self._last_long_resp = 0
return buf
| 2,772 |
2,381 | package io.jenkins.plugins.casc;
public class ConfiguratorConflictException extends ConfiguratorException {
public ConfiguratorConflictException(String message) {
super(message);
}
}
| 63 |
5,535 | // Greenplum Database
// Copyright (C) 2016 VMware, Inc. or its affiliates.
#ifndef GPOPT_CColConstraintsHashMapper_H
#define GPOPT_CColConstraintsHashMapper_H
#include "gpos/memory/CMemoryPool.h"
#include "gpopt/base/CConstraint.h"
#include "gpopt/base/IColConstraintsMapper.h"
namespace gpopt
{
class CColConstraintsHashMapper : public IColConstraintsMapper
{
public:
CColConstraintsHashMapper(CMemoryPool *mp, CConstraintArray *pdrgPcnstr);
CConstraintArray *PdrgPcnstrLookup(CColRef *colref) override;
~CColConstraintsHashMapper() override;
private:
ColRefToConstraintArrayMap *m_phmColConstr;
};
} // namespace gpopt
#endif //GPOPT_CColConstraintsHashMapper_H
| 266 |
5,865 | <gh_stars>1000+
/*
* Copyright 2021 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.server.transaction;
import com.thoughtworks.go.server.cache.GoCache;
import org.springframework.transaction.TransactionStatus;
import static org.hamcrest.Matchers.nullValue;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
class TransactionCacheAssertionUtil {
private GoCache goCache;
private TransactionTemplate transactionTemplate;
public TransactionCacheAssertionUtil(GoCache goCache, TransactionTemplate transactionTemplate) {
this.goCache = goCache;
this.transactionTemplate = transactionTemplate;
}
public String doInTxnWithCachePut(final DoInTxn inTxn) {
goCache.put("loser", "boozer");
final String[] cachedValueBeforeAndAfter = new String[2];
transactionTemplate.execute(new org.springframework.transaction.support.TransactionCallbackWithoutResult() {
@Override protected void doInTransactionWithoutResult(TransactionStatus status) {
cachedValueBeforeAndAfter[0] = (String) goCache.get("loser");
inTxn.invoke();
cachedValueBeforeAndAfter[1] = (String) goCache.get("loser");
}
});
assertThat(goCache.get("loser"), is("boozer"));
assertThat(cachedValueBeforeAndAfter[0], is("boozer"));
return cachedValueBeforeAndAfter[1];
}
public void assertCacheBehaviourInTxn(final DoInTxn inTxn) {
assertThat(doInTxnWithCachePut(inTxn), is(nullValue()));
}
public static interface DoInTxn {
public void invoke();
}
}
| 753 |
14,668 | #!/usr/bin/env vpython3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
import six
if six.PY3:
import unittest.mock as mock
from blinkpy.web_tests.stale_expectation_removal import constants
from blinkpy.web_tests.stale_expectation_removal import data_types
from blinkpy.web_tests.stale_expectation_removal import queries
from blinkpy.web_tests.stale_expectation_removal import unittest_utils as wt_uu
from unexpected_passes_common import constants as common_constants
from unexpected_passes_common import data_types as common_data_types
class ConvertJsonResultToResultObjectUnittest(unittest.TestCase):
def setUp(self):
common_data_types.SetResultImplementation(data_types.WebTestResult)
def tearDown(self):
common_data_types.SetResultImplementation(common_data_types.BaseResult)
def testDurationIsSet(self):
"""Tests that the duration is set appropriately on the result."""
json_result = {
'id': 'build-1234',
'test_id': 'ninja://:blink_web_tests/test',
'status': 'PASS',
'typ_tags': ['debug'],
'step_name': 'step_name',
'duration': '10',
'timeout': '3',
}
querier = wt_uu.CreateGenericWebTestQuerier()
result = querier._ConvertJsonResultToResultObject(json_result)
self.assertTrue(result.is_slow_result)
self.assertEqual(result._duration, 10)
class GetRelevantExpectationFilesForQueryResultUnittest(unittest.TestCase):
def testNoFiles(self):
"""Tests that no reported expectation files are handled properly."""
query_result = {}
querier = wt_uu.CreateGenericWebTestQuerier()
self.assertEqual(
querier._GetRelevantExpectationFilesForQueryResult(query_result),
[])
def testAbsolutePath(self):
"""Tests that absolute paths are ignored."""
query_result = {
'expectation_files': ['/posix/path', '/c:/windows/path']
}
querier = wt_uu.CreateGenericWebTestQuerier()
self.assertEqual(
querier._GetRelevantExpectationFilesForQueryResult(query_result),
[])
def testRelativePath(self):
"""Tests that relative paths are properly reconstructed."""
query_result = {
'expectation_files':
['TestExpectations', 'flag-specific/someflag']
}
querier = wt_uu.CreateGenericWebTestQuerier()
expected_files = [
os.path.join(constants.WEB_TEST_ROOT_DIR, 'TestExpectations'),
os.path.join(constants.WEB_TEST_ROOT_DIR, 'flag-specific',
'someflag'),
]
self.assertEqual(
querier._GetRelevantExpectationFilesForQueryResult(query_result),
expected_files)
@unittest.skipIf(six.PY2, 'Script and unittest are Python 3-only')
class GetQueryGeneratorForBuilderUnittest(unittest.TestCase):
def setUp(self):
self._query_patcher = mock.patch(
'blinkpy.web_tests.stale_expectation_removal.queries.'
'WebTestBigQueryQuerier._RunBigQueryCommandsForJsonOutput')
self._query_mock = self._query_patcher.start()
self.addCleanup(self._query_patcher.stop)
def testNoLargeQueryMode(self):
"""Tests that the expected clause is returned in normal mode."""
querier = wt_uu.CreateGenericWebTestQuerier()
query_generator = querier._GetQueryGeneratorForBuilder('', '')
self.assertEqual(len(query_generator.GetClauses()), 1)
self.assertEqual(query_generator.GetClauses()[0], '')
self.assertIsInstance(query_generator,
queries.WebTestFixedQueryGenerator)
self._query_mock.assert_not_called()
def testLargeQueryModeNoTests(self):
"""Tests that a special value is returned if no tests are found."""
querier = wt_uu.CreateGenericWebTestQuerier(large_query_mode=True)
self._query_mock.return_value = []
query_generator = querier._GetQueryGeneratorForBuilder('', '')
self.assertIsNone(query_generator)
self._query_mock.assert_called_once()
def testLargeQueryModeFoundTests(self):
"""Tests that a clause containing found tests is returned."""
querier = wt_uu.CreateGenericWebTestQuerier(large_query_mode=True)
self._query_mock.return_value = [
{
'test_id': 'foo_test',
},
{
'test_id': 'bar_test',
},
]
query_generator = querier._GetQueryGeneratorForBuilder('', '')
self.assertEqual(query_generator.GetClauses(),
['AND test_id IN UNNEST(["foo_test", "bar_test"])'])
self.assertIsInstance(query_generator,
queries.WebTestSplitQueryGenerator)
@unittest.skipIf(six.PY2, 'Script and unittest are Python 3-only')
class GeneratedQueryUnittest(unittest.TestCase):
maxDiff = None
def testCi(self):
"""Tests that the generated CI query is as expected."""
expected_query = """\
WITH
builds AS (
SELECT
DISTINCT exported.id build_inv_id,
partition_time
FROM
`chrome-luci-data.chromium.blink_web_tests_ci_test_results` tr
WHERE
exported.realm = "chromium:ci"
AND STRUCT("builder", @builder_name) IN UNNEST(variant)
ORDER BY partition_time DESC
LIMIT @num_builds
),
results AS (
SELECT
exported.id,
test_id,
status,
duration,
(
SELECT value
FROM tr.tags
WHERE key = "step_name") as step_name,
(
SELECT value
FROM tr.tags
WHERE key = "web_tests_base_timeout") as timeout,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "typ_tag") as typ_tags,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "raw_typ_expectation") as typ_expectations,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "web_tests_used_expectations_file") as expectation_files
FROM
`chrome-luci-data.chromium.blink_web_tests_ci_test_results` tr,
builds b
WHERE
exported.id = build_inv_id
AND status != "SKIP"
tfc
)
SELECT *
FROM results
WHERE
"Failure" IN UNNEST(typ_expectations)
OR "Crash" IN UNNEST(typ_expectations)
OR "Timeout" IN UNNEST(typ_expectations)
"""
self.assertEqual(
queries.CI_BQ_QUERY_TEMPLATE.format(test_filter_clause='tfc'),
expected_query)
def testTry(self):
"""Tests that the generated try query is as expected."""
expected_query = """\
WITH
submitted_builds AS (
SELECT
CONCAT("build-", CAST(unnested_builds.id AS STRING)) as id
FROM
`commit-queue.chromium.attempts`,
UNNEST(builds) as unnested_builds,
UNNEST(gerrit_changes) as unnested_changes
WHERE
unnested_builds.host = "cr-buildbucket.appspot.com"
AND unnested_changes.submit_status = "SUCCESS"
AND start_time > TIMESTAMP_SUB(CURRENT_TIMESTAMP(),
INTERVAL 30 DAY)
),
builds AS (
SELECT
DISTINCT exported.id build_inv_id,
partition_time
FROM
`chrome-luci-data.chromium.blink_web_tests_try_test_results` tr,
submitted_builds sb
WHERE
exported.realm = "chromium:try"
AND STRUCT("builder", @builder_name) IN UNNEST(variant)
AND exported.id = sb.id
ORDER BY partition_time DESC
LIMIT @num_builds
),
results AS (
SELECT
exported.id,
test_id,
status,
duration,
(
SELECT value
FROM tr.tags
WHERE key = "step_name") as step_name,
(
SELECT value
FROM tr.tags
WHERE key = "web_tests_base_timeout") as timeout,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "typ_tag") as typ_tags,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "raw_typ_expectation") as typ_expectations,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "web_tests_used_expectations_file") as expectation_files
FROM
`chrome-luci-data.chromium.blink_web_tests_try_test_results` tr,
builds b
WHERE
exported.id = build_inv_id
AND status != "SKIP"
tfc
)
SELECT *
FROM results
WHERE
"Failure" IN UNNEST(typ_expectations)
OR "Crash" IN UNNEST(typ_expectations)
OR "Timeout" IN UNNEST(typ_expectations)
"""
self.assertEqual(
queries.TRY_BQ_QUERY_TEMPLATE.format(test_filter_clause='tfc'),
expected_query)
@unittest.skipIf(six.PY2, 'Script and unittest are Python 3-only')
class QueryGeneratorImplUnittest(unittest.TestCase):
def testCi(self):
"""Tests that CI builders use the correct query."""
q = queries.QueryGeneratorImpl(['tfc'],
common_constants.BuilderTypes.CI)
self.assertEqual(len(q), 1)
expected_query = queries.CI_BQ_QUERY_TEMPLATE.format(
test_filter_clause='tfc')
self.assertEqual(q[0], expected_query)
def testTry(self):
"""Tests that try builders use the correct query."""
q = queries.QueryGeneratorImpl(['tfc'],
common_constants.BuilderTypes.TRY)
self.assertEqual(len(q), 1)
expected_query = queries.TRY_BQ_QUERY_TEMPLATE.format(
test_filter_clause='tfc')
self.assertEqual(q[0], expected_query)
def testUnknownBuilderType(self):
"""Tests that an exception is raised for unknown builder types."""
with self.assertRaises(RuntimeError):
queries.QueryGeneratorImpl(['tfc'], 'unknown')
class StripPrefixFromTestIdUnittest(unittest.TestCase):
def testUnknownPrefix(self):
"""Tests that an error is raised if an unknown prefix is found."""
querier = wt_uu.CreateGenericWebTestQuerier()
with self.assertRaises(RuntimeError):
querier._StripPrefixFromTestId('foobar')
def testKnownPrefixes(self):
"""Tests that all known prefixes are properly stripped."""
querier = wt_uu.CreateGenericWebTestQuerier()
test_ids = [prefix + 'a' for prefix in queries.KNOWN_TEST_ID_PREFIXES]
for t in test_ids:
stripped = querier._StripPrefixFromTestId(t)
self.assertEqual(stripped, 'a')
if __name__ == '__main__':
unittest.main(verbosity=2)
| 4,741 |
841 | package org.jboss.resteasy.test.asynch;
import java.util.HashMap;
import java.util.Map;
import jakarta.servlet.http.HttpServletResponse;
import jakarta.ws.rs.client.Client;
import jakarta.ws.rs.client.ClientBuilder;
import jakarta.ws.rs.core.HttpHeaders;
import jakarta.ws.rs.core.Response;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.arquillian.container.test.api.RunAsClient;
import org.jboss.arquillian.junit.Arquillian;
import org.jboss.resteasy.test.asynch.resource.AsynchCounterResource;
import org.jboss.resteasy.utils.PortProviderUtil;
import org.jboss.resteasy.utils.TestUtil;
import org.jboss.shrinkwrap.api.Archive;
import org.jboss.shrinkwrap.api.spec.WebArchive;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
/**
* @tpSubChapter Asynchronous RESTEasy
* @tpChapter Integration tests
* @tpTestCaseDetails Tests use of SecureRandom to generate location job ids, RESTEASY-1483
* @tpSince RESTEasy 3.1.0.Final
*/
@RunWith(Arquillian.class)
@RunAsClient
public class AsynchCounterTest {
static Client client;
@BeforeClass
public static void setup() {
client = ClientBuilder.newClient();
}
@AfterClass
public static void close() {
client.close();
}
@Deployment
public static Archive<?> deploy() {
WebArchive war = TestUtil.prepareArchive(AsynchCounterTest.class.getSimpleName());
Map<String, String> contextParam = new HashMap<>();
contextParam.put("resteasy.async.job.service.enabled", "true");
contextParam.put("resteasy.secure.random.max.use", "2");
return TestUtil.finishContainerPrepare(war, contextParam, AsynchCounterResource.class);
}
private String generateURL(String path) {
return PortProviderUtil.generateURL(path, AsynchCounterTest.class.getSimpleName());
}
/**
* @tpTestDetails Test that job ids are no longer consecutive
* @tpInfo RESTEASY-1483
* @tpSince RESTEasy 3.1.0.Final
*/
@Test
public void testAsynchCounter() throws Exception {
Response response = client.target(generateURL("?asynch=true")).request().get();
Assert.assertEquals(HttpServletResponse.SC_ACCEPTED, response.getStatus());
String jobUrl = response.getHeaderString(HttpHeaders.LOCATION);
int job1 = Integer.parseInt(jobUrl.substring(jobUrl.lastIndexOf('-') + 1));
response.close();
response = client.target(generateURL("?asynch=true")).request().get();
Assert.assertEquals(HttpServletResponse.SC_ACCEPTED, response.getStatus());
jobUrl = response.getHeaderString(HttpHeaders.LOCATION);
int job2 = Integer.parseInt(jobUrl.substring(jobUrl.lastIndexOf('-') + 1));
Assert.assertTrue(job2 != job1 + 1);
response.close();
}
}
| 1,106 |
1,031 | package at.wirecube.additiveanimations.additive_animator.animation_set.view;
import android.view.View;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import at.wirecube.additiveanimations.additive_animator.animation_set.AnimationAction;
import at.wirecube.additiveanimations.additive_animator.animation_set.AnimationState;
public class ViewAnimationState extends AnimationState<View> {
@NonNull
private final List<AnimationAction.Animation<View>> mAnimations = new ArrayList<>();
@Nullable
private final AnimationStartAction<View> mStartAction;
@Nullable
private final AnimationEndAction<View> mEndAction;
public ViewAnimationState(@NonNull List<ViewAnimation> animations) {
this(animations, null, null);
}
public ViewAnimationState(@NonNull ViewAnimation... animations) {
this(Arrays.asList(animations), null, null);
}
public ViewAnimationState(@NonNull List<ViewAnimation> animations, @Nullable AnimationStartAction<View> startAction) {
this(animations, startAction, null);
}
public ViewAnimationState(ViewAnimation animation, @Nullable AnimationStartAction<View> startAction) {
this(Arrays.asList(animation), startAction, null);
}
public ViewAnimationState(@Nullable AnimationStartAction<View> startAction, @NonNull ViewAnimation... animations) {
this(Arrays.asList(animations), startAction, null);
}
public ViewAnimationState(@NonNull ViewAnimation animation, @Nullable AnimationEndAction<View> endAction) {
this(Arrays.asList(animation), null, endAction);
}
public ViewAnimationState(@NonNull List<ViewAnimation> animations, @Nullable AnimationEndAction<View> endAction) {
this(animations, null, endAction);
}
public ViewAnimationState(@Nullable AnimationEndAction<View> endAction, @NonNull ViewAnimation... animations) {
this(Arrays.asList(animations), null, endAction);
}
public ViewAnimationState(@NonNull ViewAnimation animation, @Nullable AnimationStartAction<View> startAction, @Nullable AnimationEndAction<View> endAction) {
this(Arrays.asList(animation), startAction, endAction);
}
public ViewAnimationState(@Nullable AnimationStartAction<View> startAction, @Nullable AnimationEndAction<View> endAction, @NonNull ViewAnimation... animations) {
this(Arrays.asList(animations), startAction, endAction);
}
public ViewAnimationState(@NonNull List<ViewAnimation> animations, @Nullable AnimationStartAction<View> startAction, @Nullable AnimationEndAction<View> endAction) {
mAnimations.addAll(animations);
mStartAction = startAction;
mEndAction = endAction;
}
@Override
public List<Animation<View>> getAnimations() {
return mAnimations;
}
@Override
public AnimationStartAction<View> getAnimationStartAction() {
return mStartAction;
}
@Override
public AnimationEndAction<View> getAnimationEndAction() {
return mEndAction;
}
}
| 1,022 |
513 | {
"family": "Sedgwick Ave Display",
"variants": ["regular"],
"subsets": ["latin", "latin-ext", "vietnamese"],
"version": "v8",
"lastModified": "2021-03-19",
"files": {
"regular": "http://fonts.gstatic.com/s/sedgwickavedisplay/v8/xfuu0XPgU3jZPUoUo3ScvmPi-NapQ8OxM2czd-YnOzUD.ttf"
},
"category": "handwriting",
"kind": "webfonts#webfont"
}
| 168 |
4,761 | <gh_stars>1000+
#!/usr/bin/env python3
import sys
import os
import json
import re
def change_file_ext(filename, extension):
return os.path.splitext(filename)[0] + '.' + extension
def strip_adoc(heading):
return re.sub(r'\b(_|\*)(.+?)\1\b', r'\2', heading.replace('`', ''))
file_headings = dict()
def heading_to_anchor(filepath, heading, anchor):
if anchor is None:
# The replace(' -- ', '') is needed because AsciiDoc transforms ' -- ' to ' — ' (narrow-space, em-dash, narrow-space) which then collapses down to '' when calculating the anchor
anchor = re.sub(r'\-+', '-', re.sub(r'[^-\w]', '', heading.lower().replace(' -- ', '').replace(' ', '-').replace('.', '-')))
if filepath not in file_headings:
file_headings[filepath] = set()
proposed_anchor = anchor
num = 1 # this isn't a logic bug, the first duplicate anchor gets suffixed with "-2"
while proposed_anchor in file_headings[filepath]:
num += 1
proposed_anchor = '{}-{}'.format(anchor, num)
file_headings[filepath].add(proposed_anchor)
return proposed_anchor
def read_file_with_includes(filepath):
content = ''
with open(filepath) as adoc_fh:
parent_dir = os.path.dirname(filepath)
for line in adoc_fh.readlines():
m = re.match(r'^include::(.*)\[\]\s*$', line)
if m:
content += read_file_with_includes(os.path.join(parent_dir, m.group(1)))
else:
content += line
return content
min_level = 2 # this has to be 2
max_level = 4 # this can be 2, 3 or 4
if __name__ == "__main__":
index_json = sys.argv[1]
adoc_dir = sys.argv[2]
output_json = sys.argv[3]
with open(index_json) as json_fh:
data = json.load(json_fh)
output_data = []
for tab in data['tabs']:
nav = []
for subitem in tab['subitems']:
if 'subpath' in subitem:
nav.append({
'path': os.path.join('/', tab['path'], change_file_ext(subitem['subpath'], 'html')),
'title': subitem['title'],
'sections': [],
})
level = min_level
top_level_file = os.path.join(adoc_dir, tab['path'], subitem['subpath'])
adoc_content = read_file_with_includes(top_level_file)
header_id = None
for line in adoc_content.split('\n'):
m = re.match(r'^\[\[(.*)\]\]\s*$', line)
if m:
header_id = m.group(1)
else:
m = re.match(r'^(=+)\s+(.+?)\s*$', line)
if m:
# Need to compute anchors for *every* header (updates file_headings)
newlevel = len(m.group(1))
heading = strip_adoc(m.group(2))
anchor = heading_to_anchor(top_level_file, heading, header_id)
if min_level <= newlevel <= max_level:
# Treat levels 3 and 4 identically
if newlevel > 3:
newlevel = 3
entry = {'heading': heading, 'anchor': anchor}
if newlevel > level:
nav[-1]['sections'][-1]['subsections'] = []
level = newlevel
if level == 2:
nav[-1]['sections'].append(entry)
elif level == 3:
nav[-1]['sections'][-1]['subsections'].append(entry)
header_id = None
output_data.append({'title': tab['title'], 'path': '/{}/'.format(tab['path']), 'toc': nav})
with open(output_json, 'w') as out_fh:
json.dump(output_data, out_fh, indent=4)
| 2,303 |
6,073 | <reponame>nwind/exceljs
{
"differentFirst": true,
"differentOddEven": true,
"firstHeader": "Hello Exceljs",
"firstFooter": "Hello World",
"oddHeader": "&C&KCCCCCC&\"Aril\"52 exceljs oddHeader",
"oddFooter": "&C&KCCCCCC&\"Aril\"52 exceljs oddFooter",
"evenHeader": "&C&KCCCCCC&\"Aril\"52 exceljs evenHeader",
"evenFooter": "&C&KCCCCCC&\"Aril\"52 exceljs evenFooter"
} | 161 |
435 | {
"alias": "video/2570/the-young-coder-lets-learn-python",
"category": "PyCon US 2014",
"copyright_text": "https://www.youtube.com/t/terms",
"description": "",
"duration": null,
"id": 2570,
"language": "eng",
"quality_notes": "",
"recorded": "2014-04-12",
"slug": "the-young-coder-lets-learn-python",
"speakers": [
"<NAME>",
"<NAME>"
],
"summary": "Since PyCon 2013, interest in the Young Coders class has intensified.\nSeveral Python conferences have run their own, and classes outside of\nconferences - from one-time workshops to after school clubs - have\nsprung up as well. With more people than ever interested in teaching\nPython to kids, we're here to address how to organize a class. It takes\nsome effort to set up, but the payoff is enormous.\n",
"tags": [],
"thumbnail_url": "https://i1.ytimg.com/vi/HgpsmOpZfD4/hqdefault.jpg",
"title": "The Young Coder: Let's Learn Python",
"videos": [
{
"length": 0,
"type": "youtube",
"url": "https://www.youtube.com/watch?v=HgpsmOpZfD4"
}
]
}
| 393 |
8,629 | #include <Backups/BackupEntryFromSmallFile.h>
#include <Disks/IDisk.h>
#include <Disks/IO/createReadBufferFromFileBase.h>
#include <IO/ReadHelpers.h>
namespace DB
{
namespace
{
String readFile(const String & file_path)
{
auto buf = createReadBufferFromFileBase(file_path, /* settings= */ {});
String s;
readStringUntilEOF(s, *buf);
return s;
}
String readFile(const DiskPtr & disk, const String & file_path)
{
auto buf = disk->readFile(file_path);
String s;
readStringUntilEOF(s, *buf);
return s;
}
}
BackupEntryFromSmallFile::BackupEntryFromSmallFile(const String & file_path_, const std::optional<UInt128> & checksum_)
: BackupEntryFromMemory(readFile(file_path_), checksum_), file_path(file_path_)
{
}
BackupEntryFromSmallFile::BackupEntryFromSmallFile(
const DiskPtr & disk_, const String & file_path_, const std::optional<UInt128> & checksum_)
: BackupEntryFromMemory(readFile(disk_, file_path_), checksum_), disk(disk_), file_path(file_path_)
{
}
}
| 425 |
481 | import os
import cv2
import numpy as np
import pickle
from tqdm import trange, tqdm
data_root = '/pathe/to/LV-MHP-v1/'
img_root = data_root + 'images/'
ann_root = data_root + 'annotations/'
global_seg_root = data_root + 'global_seg/'
def obtain_ann_dict(img_root, ann_root):
ann_dict={}
assert(os.path.isdir(img_root)), 'Path does not exist: {}'.format(img_root)
assert(os.path.isdir(ann_root)), 'Path does not exist: {}'.format(ann_root)
for add in os.listdir(img_root):
ann_dict[add]=[]
for add in os.listdir(ann_root):
ann_dict[add[0:4]+'.jpg'].append(add)
return ann_dict
ann_dict = obtain_ann_dict(img_root, ann_root)
def get_train_dat():
src = 'train_list.txt'
return get_data(src)
def get_val_dat():
src = 'val_list.txt'
return get_data(src)
def get_test_dat():
src = 'test_list.txt'
return get_data(src)
def get_data(src):
flist=[line.strip() for line in open(data_root+src).readlines()]
list_dat=[]
for add in tqdm(flist, desc='Loading %s ..' %src):
dat={}
im_sz=cv2.imread(img_root + add).shape
dat['filepath'] = img_root + add
dat['global_mask_add'] = global_seg_root + add.replace('.jpg', '.png')
dat['width'] = im_sz[1]
dat['height'] = im_sz[0]
dat['bboxes'] = []
for ann_add in sorted(ann_dict[add]):
ann=cv2.imread(ann_root+ann_add, cv2.IMREAD_GRAYSCALE)
ys, xs=np.where(ann>0)
x1, x2, y1, y2 = xs.min(), xs.max(), ys.min(), ys.max()
dat['bboxes'].append(
{'class': 'person',
'ann_path': ann_root+ann_add,
'x1': x1,
'y1': y1,
'x2': x2,
'y2': y2 } )
list_dat.append(dat)
return list_dat
def show_data(list_dat, num=4):
from pylab import plt
for dat in np.random.choice(list_dat, num):
print dat
im=cv2.imread(dat['filepath'])[:,:,::-1]
plt.figure(1)
plt.imshow(im)
for bbox in dat['bboxes']:
plt.gca().add_patch(plt.Rectangle((bbox['x1'], bbox['y1']),
bbox['x2'] - bbox['x1'],
bbox['y2'] - bbox['y1'], fill=False,
edgecolor='red', linewidth=1) )
for idx, bbox in enumerate(dat['bboxes']):
ann=cv2.imread(bbox['ann_path'], cv2.IMREAD_GRAYSCALE)
plt.figure(11+idx)
plt.imshow(ann)
plt.show()
if __name__ == '__main__':
dat_list = get_val_dat()
show_data(dat_list)
dat_list = get_train_dat()
show_data(dat_list)
dat_list = get_test_dat()
show_data(dat_list)
| 1,420 |
607 | /*
* This file is a part of the open source stm32plus library.
* Copyright (c) 2011,2012,2013,2014 <NAME> <www.andybrown.me.uk>
* Please see website for licensing terms.
*/
#pragma once
/*
* Application class
*/
class CHidReaderApp : public CWinApp
{
public:
virtual BOOL InitInstance();
DECLARE_MESSAGE_MAP()
};
| 112 |
484 | <filename>broker/src/test/java/io/camunda/zeebe/broker/system/configuration/CompressionCfgTest.java
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH under
* one or more contributor license agreements. See the NOTICE file distributed
* with this work for additional information regarding copyright ownership.
* Licensed under the Zeebe Community License 1.1. You may not use this file
* except in compliance with the Zeebe Community License 1.1.
*/
package io.camunda.zeebe.broker.system.configuration;
import static org.assertj.core.api.Assertions.assertThat;
import io.atomix.cluster.messaging.MessagingConfig.CompressionAlgorithm;
import java.util.Map;
import org.junit.Test;
public final class CompressionCfgTest {
@Test
public void shouldConfigureCompressionAlgorithm() {
// when
final BrokerCfg cfg = TestConfigReader.readConfig("compression-cfg", Map.of());
final ClusterCfg config = cfg.getCluster();
// then
assertThat(config.getMessageCompression()).isEqualTo(CompressionAlgorithm.SNAPPY);
}
@Test
public void shouldSetDefaultCompression() {
// when
final BrokerCfg cfg = TestConfigReader.readConfig("cluster-cfg", Map.of());
final ClusterCfg config = cfg.getCluster();
// then
assertThat(config.getMessageCompression()).isEqualTo(CompressionAlgorithm.NONE);
}
}
| 423 |
348 | <filename>docs/data/leg-t2/042/04203271.json
{"nom":"Saint-Paul-en-Jarez","circ":"3ème circonscription","dpt":"Loire","inscrits":3003,"abs":1719,"votants":1284,"blancs":97,"nuls":35,"exp":1152,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":607},{"nuance":"UDI","nom":"M. <NAME>","voix":545}]} | 123 |
511 | <reponame>SenthilKumarGS/TizenRT
/****************************************************************************
*
* Copyright 2018 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
****************************************************************************/
//***************************************************************************
// Included Files
//***************************************************************************
#include <tinyara/compiler.h>
//***************************************************************************
// Pre-processor Definitions
//***************************************************************************
//***************************************************************************
// Private Types
//***************************************************************************
#ifdef __ARM_EABI__
// The 32-bit ARM C++ ABI specifies that the guard is a 32-bit
// variable and the least significant bit contains 0 prior to
// initialization, and 1 after.
typedef int __guard;
#else
// The "standard" C++ ABI specifies that the guard is a 64-bit
// variable and the first byte contains 0 prior to initialization, and
// 1 after.
__extension__ typedef int __guard __attribute__((mode(__DI__)));
#endif
| 371 |
1,909 | package org.knowm.xchange.itbit.service.marketdata;
import static org.assertj.core.api.Assertions.assertThat;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.util.ISO8601DateFormat;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigDecimal;
import org.junit.Test;
import org.knowm.xchange.itbit.dto.trade.ItBitTradeHistory;
import org.knowm.xchange.itbit.dto.trade.ItBitUserTrade;
/** Test Transaction[] JSON parsing */
public class JsonTest {
@Test
public void testTradeHistory() throws IOException {
InputStream is =
JsonTest.class.getResourceAsStream(
"/org/knowm/xchange/itbit/service/marketdata/ItBitTradeHistory.json");
ObjectMapper mapper = new ObjectMapper();
ItBitTradeHistory tradeHistory = mapper.readValue(is, ItBitTradeHistory.class);
assertThat(tradeHistory.getCurrentPageNumber()).isEqualTo(1);
assertThat(tradeHistory.getTotalNumberOfRecords()).isEqualTo(2);
assertThat(tradeHistory.getLatestExecutionId()).isEqualTo("332");
assertThat(tradeHistory.getRecordsPerPage()).isEqualTo(50);
assertThat(tradeHistory.getTradingHistory()).hasSize(2);
ItBitUserTrade userTrade = tradeHistory.getTradingHistory().get(0);
assertThat(userTrade.getDirection()).isEqualTo(ItBitUserTrade.Direction.buy);
assertThat(userTrade.getCurrency1()).isEqualTo("XBT");
assertThat(userTrade.getCurrency1Amount()).isEqualByComparingTo(new BigDecimal("0.0001"));
assertThat(userTrade.getRebatesApplied()).isEqualTo(new BigDecimal("-0.000125265"));
assertThat(userTrade.getTimestamp())
.withDateFormat(new ISO8601DateFormat())
.isEqualTo("2015-05-11T14:48:01.987Z");
}
}
| 632 |
435 | /*
* Copyright (C) 2014 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.h6ah4i.android.example.openslmediaplayer.app.contents;
import java.util.ArrayList;
import android.content.Context;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.CompoundButton;
import android.widget.SeekBar;
import android.widget.Spinner;
import com.h6ah4i.android.example.openslmediaplayer.R;
import com.h6ah4i.android.example.openslmediaplayer.app.framework.AppEvent;
import com.h6ah4i.android.example.openslmediaplayer.app.framework.AppEventBus;
import com.h6ah4i.android.example.openslmediaplayer.app.model.EnvironmentalReverbStateStore;
import com.h6ah4i.android.example.openslmediaplayer.app.model.EventDefs;
import com.h6ah4i.android.example.openslmediaplayer.app.model.EventDefs.EnvironmentalReverbNotifyEvents;
import com.h6ah4i.android.example.openslmediaplayer.app.model.EventDefs.EnvironmentalReverbReqEvents;
import com.h6ah4i.android.example.openslmediaplayer.app.utils.ActionBarTileBuilder;
public class EnvironmentalReverbFragment
extends AudioEffectSettingsBaseFragment
implements AdapterView.OnItemSelectedListener,
SeekBar.OnSeekBarChangeListener {
// constants
static final int SEEKBAR_MAX = 10000;
// internal classes
private static class AppEventReceiver extends AppEventBus.Receiver<EnvironmentalReverbFragment> {
private static final int[] FILTER = new int[] {
EventDefs.Category.NOTIFY_ENVIRONMENTAL_REVERB,
};
public AppEventReceiver(EnvironmentalReverbFragment holder) {
super(holder, FILTER);
}
@Override
protected void onReceiveAppEvent(EnvironmentalReverbFragment holder, AppEvent event) {
holder.onReceiveAppEvent(event);
}
}
// fields
private AppEventReceiver mAppEventReceiver;
private SeekBar mSeekBarDecayHFRatio;
private SeekBar mSeekBarDecayTime;
private SeekBar mSeekBarDensity;
private SeekBar mSeekBarDiffusion;
private SeekBar mSeekBarReflectionsDelay;
private SeekBar mSeekBarReflectionsLevel;
private SeekBar mSeekBarReverbDelay;
private SeekBar mSeekBarReverbLevel;
private SeekBar mSeekBarRoomHFLevel;
private SeekBar mSeekBarRoomLevel;
private Spinner mSpinnerPreset;
private ArrayList<SeekBar> mParamSeekBars;
private int mActiveTrackingSeekBarIndex = -1;
@Override
protected CharSequence onGetActionBarTitleText() {
return ActionBarTileBuilder.makeTitleString(getActivity(),
R.string.title_environmental_reverb);
}
@Override
protected boolean onGetActionBarSwitchCheckedState() {
return getAppController().getEnvironmentalReverbStateStore().isEnabled();
}
@Override
protected void onActionBarSwitchCheckedChanged(
CompoundButton buttonView, boolean isChecked) {
postAppEvent(EnvironmentalReverbReqEvents.SET_ENABLED, (isChecked ? 1 : 0), 0);
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
View rootView = inflater.inflate(R.layout.fragment_environment_reverb, container, false);
return rootView;
}
@Override
public void onViewCreated(View view, Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
obtainViewReferences();
setupViews();
}
@Override
public void onStart() {
super.onStart();
mAppEventReceiver = new AppEventReceiver(this);
eventBus().register(mAppEventReceiver);
}
@Override
public void onStop() {
super.onStop();
eventBus().unregister(mAppEventReceiver);
mAppEventReceiver = null;
}
@Override
public void onDestroyView() {
super.onDestroyView();
mSeekBarDecayHFRatio = null;
mSeekBarDecayTime = null;
mSeekBarDensity = null;
mSeekBarDiffusion = null;
mSeekBarReflectionsDelay = null;
mSeekBarReflectionsLevel = null;
mSeekBarReverbDelay = null;
mSeekBarReverbLevel = null;
mSeekBarRoomHFLevel = null;
mSeekBarRoomLevel = null;
mSpinnerPreset = null;
mParamSeekBars.clear();
mParamSeekBars = null;
}
@Override
public void onItemSelected(AdapterView<?> parent, View view, int position, long id) {
switch (parent.getId()) {
case R.id.spinner_environment_reverb_preset:
postAppEvent(
EnvironmentalReverbReqEvents.SET_PRESET,
(position - 1), 0);
break;
}
}
@Override
public void onNothingSelected(AdapterView<?> parent) {
}
@Override
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
if (fromUser) {
int paramIndex = mParamSeekBars.indexOf(seekBar);
postAppEvent(
EnvironmentalReverbReqEvents.SET_PARAMETER,
paramIndex, seekBarProgressToFloat(progress));
}
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
mActiveTrackingSeekBarIndex = mParamSeekBars.indexOf(seekBar);
}
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
mActiveTrackingSeekBarIndex = -1;
}
/* package */void onReceiveAppEvent(AppEvent event) {
switch (event.category) {
case EventDefs.Category.NOTIFY_ENVIRONMENTAL_REVERB:
onReceiveEnvironmentalReverbNotifyEvents(event);
break;
}
}
private void onReceiveEnvironmentalReverbNotifyEvents(AppEvent event) {
switch (event.event) {
case EnvironmentalReverbNotifyEvents.PRESET_UPDATED:
// preset updated
if (mSpinnerPreset != null) {
updatePresetSpinner();
}
break;
case EnvironmentalReverbNotifyEvents.PARAMETER_UPDATED: {
// parameter updated
final int paramIndex = event.arg1;
final boolean nowTracking = (mActiveTrackingSeekBarIndex == paramIndex);
if (mParamSeekBars != null && !nowTracking) {
final EnvironmentalReverbStateStore state = getEnvironmentalReverbState();
if (paramIndex == EnvironmentalReverbReqEvents.PARAM_INDEX_ALL) {
for (int i = 0; i < mParamSeekBars.size(); i++) {
final float value = state.getNormalizedParameter(i);
mParamSeekBars.get(i).setProgress(floatToSeekbarProgress(value));
}
} else {
final float value = state.getNormalizedParameter(paramIndex);
mParamSeekBars.get(paramIndex).setProgress(
floatToSeekbarProgress(value));
}
}
}
break;
}
}
private void obtainViewReferences() {
mSeekBarDecayHFRatio =
findSeekBarByIdAndSetListener(R.id.seekbar_environment_reverb_decay_hf_ratio);
mSeekBarDecayTime =
findSeekBarByIdAndSetListener(R.id.seekbar_environment_reverb_decay_time);
mSeekBarDensity =
findSeekBarByIdAndSetListener(R.id.seekbar_environment_reverb_density);
mSeekBarDiffusion =
findSeekBarByIdAndSetListener(R.id.seekbar_environment_reverb_diffusion);
mSeekBarReflectionsDelay =
findSeekBarByIdAndSetListener(R.id.seekbar_environment_reverb_reflections_delay);
mSeekBarReflectionsLevel =
findSeekBarByIdAndSetListener(R.id.seekbar_environment_reverb_reflections_level);
mSeekBarReverbDelay =
findSeekBarByIdAndSetListener(R.id.seekbar_environment_reverb_reverb_delay);
mSeekBarReverbLevel =
findSeekBarByIdAndSetListener(R.id.seekbar_environment_reverb_reverb_level);
mSeekBarRoomHFLevel =
findSeekBarByIdAndSetListener(R.id.seekbar_environment_reverb_room_hf_level);
mSeekBarRoomLevel =
findSeekBarByIdAndSetListener(R.id.seekbar_environment_reverb_room_level);
mSpinnerPreset = findSpinnerByIdAndSetListener(R.id.spinner_environment_reverb_preset);
}
private void setupViews() {
final Context context = getActivity();
mParamSeekBars = new ArrayList<SeekBar>();
mParamSeekBars.add(mSeekBarDecayHFRatio);
mParamSeekBars.add(mSeekBarDecayTime);
mParamSeekBars.add(mSeekBarDensity);
mParamSeekBars.add(mSeekBarDiffusion);
mParamSeekBars.add(mSeekBarReflectionsDelay);
mParamSeekBars.add(mSeekBarReflectionsLevel);
mParamSeekBars.add(mSeekBarReverbDelay);
mParamSeekBars.add(mSeekBarReverbLevel);
mParamSeekBars.add(mSeekBarRoomHFLevel);
mParamSeekBars.add(mSeekBarRoomLevel);
for (SeekBar seekbar : mParamSeekBars) {
seekbar.setMax(SEEKBAR_MAX);
}
// disable unsupported parameter seekbars
// (these parameters are not supported in current Android)
mSeekBarReflectionsLevel.setEnabled(false);
mSeekBarReflectionsDelay.setEnabled(false);
mSeekBarReverbDelay.setEnabled(false);
{
final ArrayAdapter<CharSequence> adapter =
ArrayAdapter.createFromResource(
context, R.array.aux_env_reverb_preset_names,
android.R.layout.simple_spinner_item);
adapter.setDropDownViewResource(
android.R.layout.simple_spinner_dropdown_item);
mSpinnerPreset.setAdapter(adapter);
}
updatePresetSpinner();
}
private void updatePresetSpinner() {
mSpinnerPreset.setSelection(getEnvironmentalReverbState().getPreset() + 1);
}
private EnvironmentalReverbStateStore getEnvironmentalReverbState() {
return getAppController().getEnvironmentalReverbStateStore();
}
private void postAppEvent(int event, int arg1, int arg2) {
eventBus().post(new AppEvent(
EventDefs.Category.ENVIRONMENTAL_REVERB, event, arg1, arg2));
}
private void postAppEvent(int event, int arg1, float arg2) {
eventBus().post(new AppEvent(
EventDefs.Category.ENVIRONMENTAL_REVERB, event, arg1, arg2));
}
private static float seekBarProgressToFloat(int progress) {
return progress * (1.0f / SEEKBAR_MAX);
}
private static int floatToSeekbarProgress(float value) {
return (int) (value * SEEKBAR_MAX);
}
}
| 4,979 |
799 | <reponame>diCagri/content
import pytest
from ServiceNow_CMDB import Client, records_list_command, get_record_command, create_record_command, \
update_record_command, add_relation_command, delete_relation_command
from test_data.result_constants import EXPECTED_RECORDS_LIST_NO_RECORDS, \
EXPECTED_RECORDS_LIST_WITH_RECORDS, EXPECTED_GET_RECORD, EXPECTED_CREATE_RECORD, EXPECTED_UPDATE_RECORD, \
EXPECTED_ADD_RELATION, EXPECTED_DELETE_RELATION
from test_data.response_constants import RECORDS_LIST_EMPTY_RESPONSE, RECORDS_LIST_RESPONSE_WITH_RECORDS, \
GET_RECORD_RESPONSE, CREATE_RECORD_RESPONSE, UPDATE_RECORD_RESPONSE, \
ADD_RELATION_RESPONSE, DELETE_RELATION_RESPONSE
from ServiceNowApiModule import ServiceNowClient
@pytest.mark.parametrize('response, expected_result', [
(RECORDS_LIST_RESPONSE_WITH_RECORDS, EXPECTED_RECORDS_LIST_WITH_RECORDS),
(RECORDS_LIST_EMPTY_RESPONSE, EXPECTED_RECORDS_LIST_NO_RECORDS)
])
def test_records_list_command(response, expected_result, mocker):
"""
Given:
- The records list command.
When:
- Mocking the response from the http request once to a response containing records, and once to a response with
no records.
Then:
- Validate that in the first case when the response contains records, the context has both `Class` and `Records`
keys. In the second case, when no records are in the response, validate the context has only the `Class` key.
"""
client = Client({})
mocker.patch.object(ServiceNowClient, 'http_request', return_value=response)
result = records_list_command(client, args={'class': 'test_class'})
assert expected_result == result[1]
def test_get_record_command(mocker):
"""
Given:
- The get record by id command.
When:
- Mocking the response from the http request to a response containing several attributes, inbound and outbound
relations of the record.
Then:
- Validate that the output context of the command contains all attributes and relations that were returned.
"""
client = Client(credentials={})
mocker.patch.object(ServiceNowClient, 'http_request', return_value=GET_RECORD_RESPONSE)
result = get_record_command(client, args={'class': 'test_class', 'sys_id': 'record_id'})
assert EXPECTED_GET_RECORD == result[1]
def test_create_record_command(mocker):
"""
Given:
- The create record command.
When:
- Mocking the response from the http request to a response containing the attributes of the new record with no
inbound or outbound relations.
Then:
- Validate that the context output contains the `Class`, `SysId` and `Attributes` keys according to the mocked
response, and that the inbound and outbound relations lists are empty.
"""
client = Client(credentials={})
mocker.patch.object(ServiceNowClient, 'http_request', return_value=CREATE_RECORD_RESPONSE)
result = create_record_command(client, args={'class': 'test_class', 'attributes': 'name=Test Create Record'})
assert EXPECTED_CREATE_RECORD == result[1]
def test_update_record_command(mocker):
"""
Given:
- The update record command.
When:
- Mocking the response from the http request to a response containing the attributes of the updated record.
Then:
- Validate that the context output was changed according to the new attributes.
"""
client = Client(credentials={})
mocker.patch.object(ServiceNowClient, 'http_request', return_value=UPDATE_RECORD_RESPONSE)
result = update_record_command(client, args={'class': 'test_class', 'sys_id': 'record_id',
'attributes': 'name=Test Create Record'})
assert EXPECTED_UPDATE_RECORD == result[1]
def test_add_relation_command(mocker):
"""
Given:
- The add relation command.
When:
- Mocking the response from the http request to a response containing the attributes and the relations of the
record.
Then:
- Validate that the `InboundRelations` key in the context output contains the added relation.
"""
client = Client(credentials={})
mocker.patch.object(ServiceNowClient, 'http_request', return_value=ADD_RELATION_RESPONSE)
result = add_relation_command(client, args={
'class': 'test_class',
'sys_id': 'record_id',
'inbound_relations': "[{'type': 'relation_type', 'target':'target', 'sys_class_name':'class_name'}]"
})
assert EXPECTED_ADD_RELATION == result[1]
def test_delete_relation_command(mocker):
"""
Given:
- The delete relation command.
When:
- Mocking the response from the http request to a response containing the attributes and the relations of the
record.
Then:
- Validate that the `InboundRelations` key in the context output is empty.
"""
client = Client(credentials={})
mocker.patch.object(ServiceNowClient, 'http_request', return_value=DELETE_RELATION_RESPONSE)
result = delete_relation_command(client, args={'class': 'test_class', 'sys_id': 'record_id',
'relation_sys_id': 'rel_id'})
assert EXPECTED_DELETE_RELATION == result[1]
| 1,951 |
1,515 | <filename>qigsaw-android/splitcore/src/main/java/com/iqiyi/android/qigsaw/core/DefaultObtainUserConfirmationDialog.java<gh_stars>1000+
/*
* MIT License
*
* Copyright (c) 2019-present, iQIYI, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.iqiyi.android.qigsaw.core;
import android.graphics.Color;
import android.graphics.drawable.ColorDrawable;
import android.os.Bundle;
import androidx.annotation.Nullable;
import androidx.annotation.RestrictTo;
import android.view.View;
import android.view.ViewGroup;
import android.widget.TextView;
import java.text.DecimalFormat;
import static androidx.annotation.RestrictTo.Scope.LIBRARY_GROUP;
@RestrictTo(LIBRARY_GROUP)
public class DefaultObtainUserConfirmationDialog extends ObtainUserConfirmationDialog {
private boolean fromUserClick;
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (checkInternParametersIllegal()) {
finish();
return;
}
setContentView(R.layout.activity_obtain_user_confirmation);
getWindow().setBackgroundDrawable(new ColorDrawable(Color.TRANSPARENT));
getWindow().setLayout(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.WRAP_CONTENT);
setFinishOnTouchOutside(false);
TextView descText = findViewById(R.id.user_conformation_tv);
DecimalFormat df = new DecimalFormat("#.00");
double convert = getRealTotalBytesNeedToDownload() / (1024f * 1024f);
descText.setText(String.format(getString(R.string.prompt_desc), df.format(convert)));
findViewById(R.id.user_confirm).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (!fromUserClick) {
fromUserClick = true;
onUserConfirm();
}
}
});
findViewById(R.id.user_cancel).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (!fromUserClick) {
fromUserClick = true;
onUserCancel();
}
}
});
}
} | 1,215 |
1,837 | package com.dianping.zebra.log;
import org.apache.log4j.LogManager;
import org.apache.log4j.xml.DOMConfigurator;
public class Log4jLogger implements Logger {
private org.apache.log4j.Logger logger;
static {
init();
}
private static synchronized void init() {
new DOMConfigurator().doConfigure(Log4jLogger.class.getResource("zebra_log4j.xml"),
LogManager.getLoggerRepository());
}
public Log4jLogger(String loggerName) {
this.logger = org.apache.log4j.Logger.getLogger(loggerName);
}
@Override
public void debug(String message) {
logger.debug(message);
}
@Override
public void debug(String msg, Throwable e) {
}
@Override
public void info(String message) {
logger.info(message);
}
@Override
public void warn(String message) {
logger.warn(message);
}
@Override
public void warn(String message, Throwable t) {
logger.warn(message, t);
}
@Override
public void error(String message) {
logger.error(message);
}
@Override
public void error(String message, Throwable t) {
logger.error(message, t);
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
}
| 413 |
5,535 | /*-------------------------------------------------------------------------
*
* cdbdisp_dtx.h
* routines for dispatching DTX commands to the qExec processes.
*
* Portions Copyright (c) 2005-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
*
*
* IDENTIFICATION
* src/include/cdb/cdbdisp_dtx.h
*
*-------------------------------------------------------------------------
*/
#ifndef CDBDISP_DTX_H
#define CDBDISP_DTX_H
#include "cdb/cdbtm.h"
struct pg_result; /* #include "libpq-fe.h" */
struct CdbPgResults;
/*
* CdbDispatchDtxProtocolCommand:
* Sends a non-cancelable command to all segment dbs, primary
*
* Returns a malloc'ed array containing the PGresult objects thus
* produced; the caller must PQclear() them and free() the array.
* A NULL entry follows the last used entry in the array.
*
* Any error message - whether or not it is associated with an
* PGresult object - is returned in *qeError.
*/
struct pg_result **
CdbDispatchDtxProtocolCommand(DtxProtocolCommand dtxProtocolCommand,
char *dtxProtocolCommandLoggingStr,
char *gid,
ErrorData **qeError,
int *resultCount,
List *dtxSegments,
char *serializedDtxContextInfo,
int serializedDtxContextInfoLen);
/*
* used to take the current Transaction Snapshot and serialized a version of it
* into the static variable serializedDtxContextInfo
*/
char *
qdSerializeDtxContextInfo(int * size, bool wantSnapshot, bool inCursor, int txnOptions, char *debugCaller);
#endif /* CDBDISP_DTX_H */
| 540 |
852 | #ifndef DataFormatsL1TCorrelator_TkPrimaryVertex_h
#define DataFormatsL1TCorrelator_TkPrimaryVertex_h
// Package: L1TCorrelator
// Class : TkPrimaryVertex
// First version of a class for L1-zvertex
#include <vector>
#include "DataFormats/Common/interface/Ref.h"
#include "DataFormats/Common/interface/RefVector.h"
namespace l1t {
class TkPrimaryVertex {
public:
TkPrimaryVertex() : zvertex_(-999), sum_(-999) {}
~TkPrimaryVertex() {}
TkPrimaryVertex(float z, float s) : zvertex_(z), sum_(s) {}
float zvertex() const { return zvertex_; }
float sum() const { return sum_; }
private:
float zvertex_;
float sum_;
};
typedef std::vector<TkPrimaryVertex> TkPrimaryVertexCollection;
typedef edm::Ref<TkPrimaryVertexCollection> TkPrimaryVertexRef;
typedef edm::RefVector<TkPrimaryVertexCollection> TkPrimaryVertexRefVector;
typedef std::vector<TkPrimaryVertexRef> TkPrimaryVertexVectorRef;
} // namespace l1t
#endif
| 371 |
1,705 | {
"active": true,
"synopsis": "AWS IoT for LoRaWAN enables customers to setup a private LoRaWAN network by connecting their LoRaWAN devices and gateways to the AWS cloud without managing a LoRaWAN Network Server.",
"generate-client-constructors": true
} | 73 |
488 | <gh_stars>100-1000
// auto x = 42;
int cnt = 0;
// auto f = [&]{cnt++;};
auto f = [&]{cnt;};
| 47 |
3,586 | <filename>metadata-io/src/main/java/com/linkedin/metadata/search/utils/GraphUtil.java
package com.linkedin.metadata.search.utils;
import com.linkedin.data.DataMap;
import com.linkedin.data.template.RecordTemplate;
import com.linkedin.metadata.dao.utils.RecordUtils;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringJoiner;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import org.apache.commons.lang3.ClassUtils;
import org.neo4j.driver.types.Node;
import org.neo4j.driver.types.Path;
import org.neo4j.driver.types.Relationship;
public class GraphUtil {
public static final String URN_FIELD = "urn";
public static final String SOURCE_FIELD = "source";
public static final String DESTINATION_FIELD = "destination";
private GraphUtil() {
// Util class
}
/**
* Converts ENTITY to node (field:value map).
*
* @param entity ENTITY defined in models
* @return unmodifiable field value map
*/
@Nonnull
public static <ENTITY extends RecordTemplate> Map<String, Object> entityToNode(@Nonnull ENTITY entity) {
final Map<String, Object> fields = new HashMap<>();
// put all field values
entity.data().forEach((k, v) -> fields.put(k, toValueObject(v)));
return fields;
}
/**
* Converts RELATIONSHIP to cypher matching criteria, excluding source and destination, e.g. {key: "value"}.
*
* @param relationship RELATIONSHIP defined in models
* @return Criteria String, or "" if no additional fields in relationship
*/
@Nonnull
public static <RELATIONSHIP extends RecordTemplate> String relationshipToCriteria(
@Nonnull RELATIONSHIP relationship) {
final StringJoiner joiner = new StringJoiner(",", "{", "}");
// put all field values except source and destination
relationship.data().forEach((k, v) -> {
if (!SOURCE_FIELD.equals(k) && !DESTINATION_FIELD.equals(k)) {
joiner.add(toCriterionString(k, v));
}
});
return joiner.length() <= 2 ? "" : joiner.toString();
}
// Returns self if primitive type, otherwise, return toString()
@Nonnull
private static Object toValueObject(@Nonnull Object obj) {
if (ClassUtils.isPrimitiveOrWrapper(obj.getClass())) {
return obj;
}
return obj.toString();
}
// Returns "key:value" String, if value is not primitive, then use toString() and double quote it
@Nonnull
private static String toCriterionString(@Nonnull String key, @Nonnull Object value) {
if (ClassUtils.isPrimitiveOrWrapper(value.getClass())) {
return key + ":" + value;
}
return key + ":\"" + value.toString() + "\"";
}
/**
* Converts node (field:value map) to ENTITY RecordTemplate.
*
* @param node Neo4j Node of entityClass type
* @return RecordTemplate
*/
@Nonnull
public static RecordTemplate nodeToEntity(@Nonnull Node node) {
final String className = node.labels().iterator().next();
return RecordUtils.toRecordTemplate(className, new DataMap(node.asMap()));
}
/**
* Converts path segment (field:value map) list of {@link RecordTemplate}s of nodes and edges.
*
* @param segment the segment of a path containing nodes and edges
*/
@Nonnull
public static List<RecordTemplate> pathSegmentToRecordList(@Nonnull Path.Segment segment) {
final Node startNode = segment.start();
final Node endNode = segment.end();
final Relationship edge = segment.relationship();
return Arrays.asList(
nodeToEntity(startNode),
edgeToRelationship(startNode, endNode, edge),
nodeToEntity(endNode)
);
}
/**
* Converts edge (source-relationship->destination) to RELATIONSHIP.
*
* @param relationshipClass Class of RELATIONSHIP
* @param source Neo4j source Node
* @param destination Neo4j destination Node
* @param relationship Neo4j relationship
* @return ENTITY
*/
@Nonnull
public static <RELATIONSHIP extends RecordTemplate> RELATIONSHIP edgeToRelationship(
@Nonnull Class<RELATIONSHIP> relationshipClass, @Nonnull Node source, @Nonnull Node destination,
@Nonnull Relationship relationship) {
final DataMap dataMap = relationshipDataMap(source, destination, relationship);
return RecordUtils.toRecordTemplate(relationshipClass, dataMap);
}
/**
* Converts edge (source-relationship->destination) to RELATIONSHIP RecordTemplate.
*
* @param source Neo4j source Node
* @param destination Neo4j destination Node
* @param relationship Neo4j relationship
* @return ENTITY RecordTemplate
*/
@Nonnull
public static RecordTemplate edgeToRelationship(@Nonnull Node source, @Nonnull Node destination,
@Nonnull Relationship relationship) {
final String className = relationship.type();
final DataMap dataMap = relationshipDataMap(source, destination, relationship);
return RecordUtils.toRecordTemplate(className, dataMap);
}
@Nonnull
private static DataMap relationshipDataMap(@Nonnull Node source, @Nonnull Node destination,
@Nonnull Relationship relationship) {
final DataMap dataMap = new DataMap(relationship.asMap());
dataMap.put(SOURCE_FIELD, source.get(URN_FIELD).asString());
dataMap.put(DESTINATION_FIELD, destination.get(URN_FIELD).asString());
return dataMap;
}
// Gets the Node/Edge type from an Entity/Relationship, using the backtick-quoted FQCN
@Nonnull
public static String getType(@Nullable RecordTemplate record) {
return record == null ? "" : getType(record.getClass());
}
// Gets the Node/Edge type from an Entity/Relationship class, return empty string if null
@Nonnull
public static String getTypeOrEmptyString(@Nullable Class<? extends RecordTemplate> recordClass) {
return recordClass == null ? "" : ":" + getType(recordClass);
}
// Gets the Node/Edge type from an Entity/Relationship class, using the backtick-quoted FQCN
@Nonnull
public static String getType(@Nonnull Class<? extends RecordTemplate> recordClass) {
return new StringBuilder("`").append(recordClass.getCanonicalName()).append("`").toString();
}
} | 1,955 |
892 | <reponame>westonsteimel/advisory-database-github
{
"schema_version": "1.2.0",
"id": "GHSA-hwgx-6pfh-c8pv",
"modified": "2022-05-01T07:43:38Z",
"published": "2022-05-01T07:43:38Z",
"aliases": [
"CVE-2006-6993"
],
"details": "Multiple SQL injection vulnerabilities in pages/addcomment2.php in Neuron Blog 1.1 allow remote attackers to inject arbitrary SQL commands via the (1) commentname, (2) commentmail, (3) commentwebsite, and (4) comment parameters. NOTE: the provenance of this information is unknown; the details are obtained solely from third party information.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2006-6993"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/19703"
},
{
"type": "WEB",
"url": "http://www.vupen.com/english/advisories/2006/1406"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
} | 440 |
466 | //---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2013 Pivotal, Inc.
//
// @filename:
// CLogicalExternalGet.cpp
//
// @doc:
// Implementation of external get
//---------------------------------------------------------------------------
#include "gpopt/operators/CLogicalExternalGet.h"
#include "gpos/base.h"
#include "gpopt/base/CColRefSet.h"
#include "gpopt/base/CColRefSetIter.h"
#include "gpopt/base/CColRefTable.h"
#include "gpopt/base/COptCtxt.h"
#include "gpopt/base/CUtils.h"
#include "gpopt/metadata/CName.h"
#include "gpopt/metadata/CTableDescriptor.h"
using namespace gpopt;
//---------------------------------------------------------------------------
// @function:
// CLogicalExternalGet::CLogicalExternalGet
//
// @doc:
// Ctor - for pattern
//
//---------------------------------------------------------------------------
CLogicalExternalGet::CLogicalExternalGet(CMemoryPool *mp) : CLogicalGet(mp)
{
}
//---------------------------------------------------------------------------
// @function:
// CLogicalExternalGet::CLogicalExternalGet
//
// @doc:
// Ctor
//
//---------------------------------------------------------------------------
CLogicalExternalGet::CLogicalExternalGet(CMemoryPool *mp,
const CName *pnameAlias,
CTableDescriptor *ptabdesc)
: CLogicalGet(mp, pnameAlias, ptabdesc)
{
}
//---------------------------------------------------------------------------
// @function:
// CLogicalExternalGet::CLogicalExternalGet
//
// @doc:
// Ctor
//
//---------------------------------------------------------------------------
CLogicalExternalGet::CLogicalExternalGet(CMemoryPool *mp,
const CName *pnameAlias,
CTableDescriptor *ptabdesc,
CColRefArray *pdrgpcrOutput)
: CLogicalGet(mp, pnameAlias, ptabdesc, pdrgpcrOutput)
{
}
//---------------------------------------------------------------------------
// @function:
// CLogicalExternalGet::Matches
//
// @doc:
// Match function on operator level
//
//---------------------------------------------------------------------------
BOOL
CLogicalExternalGet::Matches(COperator *pop) const
{
if (pop->Eopid() != Eopid())
{
return false;
}
CLogicalExternalGet *popGet = CLogicalExternalGet::PopConvert(pop);
return Ptabdesc() == popGet->Ptabdesc() &&
PdrgpcrOutput()->Equals(popGet->PdrgpcrOutput());
}
//---------------------------------------------------------------------------
// @function:
// CLogicalExternalGet::PopCopyWithRemappedColumns
//
// @doc:
// Return a copy of the operator with remapped columns
//
//---------------------------------------------------------------------------
COperator *
CLogicalExternalGet::PopCopyWithRemappedColumns(
CMemoryPool *mp, UlongToColRefMap *colref_mapping, BOOL must_exist)
{
CColRefArray *pdrgpcrOutput = NULL;
if (must_exist)
{
pdrgpcrOutput =
CUtils::PdrgpcrRemapAndCreate(mp, PdrgpcrOutput(), colref_mapping);
}
else
{
pdrgpcrOutput = CUtils::PdrgpcrRemap(mp, PdrgpcrOutput(),
colref_mapping, must_exist);
}
CName *pnameAlias = GPOS_NEW(mp) CName(mp, Name());
CTableDescriptor *ptabdesc = Ptabdesc();
ptabdesc->AddRef();
return GPOS_NEW(mp)
CLogicalExternalGet(mp, pnameAlias, ptabdesc, pdrgpcrOutput);
}
//---------------------------------------------------------------------------
// @function:
// CLogicalExternalGet::PxfsCandidates
//
// @doc:
// Get candidate xforms
//
//---------------------------------------------------------------------------
CXformSet *
CLogicalExternalGet::PxfsCandidates(CMemoryPool *mp) const
{
CXformSet *xform_set = GPOS_NEW(mp) CXformSet(mp);
(void) xform_set->ExchangeSet(CXform::ExfExternalGet2ExternalScan);
return xform_set;
}
// EOF
| 1,172 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.