focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static Duration parse(final String text) { try { final String[] parts = text.split("\\s"); if (parts.length != 2) { throw new IllegalArgumentException("Expected 2 tokens, got: " + parts.length); } final long size = parseNumeric(parts[0]); return buildDuration(size, parts[1]); } catch (final Exception e) { throw new IllegalArgumentException("Invalid duration: '" + text + "'. " + e.getMessage(), e); } }
@Test public void shouldIncludeValidTimeUnitsInExceptionMessage() { // Then: // When: final Exception e = assertThrows( IllegalArgumentException.class, () -> parse("10 Bananas") ); // Then: assertThat(e.getMessage(), containsString("Supported time units are: " + "NANOSECONDS, MICROSECONDS, MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS")); }
public static Class<?> forName(String name) throws ClassNotFoundException { return forName(name, getClassLoader()); }
@Test void testForName1() throws Exception { assertThat(ClassUtils.forName(ClassUtilsTest.class.getName()) == ClassUtilsTest.class, is(true)); }
@Override public N getSchedulerNode(NodeId nodeId) { return nodeTracker.getNode(nodeId); }
@Test(timeout = 30000L) public void testNodeRemovedWithAllocationTags() throws Exception { // Currently only can be tested against capacity scheduler. if (getSchedulerType().equals(SchedulerType.CAPACITY)) { final String testTag1 = "some-tag"; YarnConfiguration conf = getConf(); conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER, "scheduler"); MockRM rm1 = new MockRM(conf); rm1.start(); MockNM nm1 = new MockNM("127.0.0.1:1234", 10240, rm1.getResourceTrackerService()); nm1.registerNode(); MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(200, rm1) .withAppName("name") .withUser("user") .withAcls(new HashMap<>()) .withUnmanagedAM(false) .withQueue("default") .withMaxAppAttempts(-1) .withCredentials(null) .withAppType("Test") .withWaitForAppAcceptedState(false) .withKeepContainers(true) .build(); RMApp app1 = MockRMAppSubmitter.submit(rm1, data); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); // allocate 1 container with tag1 SchedulingRequest sr = SchedulingRequest .newInstance(1L, Priority.newInstance(1), ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED), Sets.newHashSet(testTag1), ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)), null); AllocateRequest ar = AllocateRequest.newBuilder() .schedulingRequests(Lists.newArrayList(sr)).build(); am1.allocate(ar); nm1.nodeHeartbeat(true); List<Container> allocated = new ArrayList<>(); while (allocated.size() < 1) { AllocateResponse rsp = am1 .allocate(new ArrayList<>(), new ArrayList<>()); allocated.addAll(rsp.getAllocatedContainers()); nm1.nodeHeartbeat(true); Thread.sleep(1000); } Assert.assertEquals(1, allocated.size()); Set<Container> containers = allocated.stream() .filter(container -> container.getAllocationRequestId() == 1L) .collect(Collectors.toSet()); Assert.assertNotNull(containers); Assert.assertEquals(1, containers.size()); ContainerId cid = containers.iterator().next().getId(); // mock container start rm1.getRMContext().getScheduler() .getSchedulerNode(nm1.getNodeId()).containerStarted(cid); // verifies the allocation is made with correct number of tags Map<String, Long> nodeTags = rm1.getRMContext() .getAllocationTagsManager() .getAllocationTagsWithCount(nm1.getNodeId()); Assert.assertNotNull(nodeTags.get(testTag1)); Assert.assertEquals(1, nodeTags.get(testTag1).intValue()); // remove the node RMNode node1 = MockNodes.newNodeInfo( 0, Resources.createResource(nm1.getMemory()), 1, "127.0.0.1", 1234); rm1.getRMContext().getScheduler().handle( new NodeRemovedSchedulerEvent(node1)); // Once the node is removed, the tag should be removed immediately nodeTags = rm1.getRMContext().getAllocationTagsManager() .getAllocationTagsWithCount(nm1.getNodeId()); Assert.assertNull(nodeTags); } }
public OpenAPI filter(OpenAPI openAPI, OpenAPISpecFilter filter, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) { OpenAPI filteredOpenAPI = filterOpenAPI(filter, openAPI, params, cookies, headers); if (filteredOpenAPI == null) { return filteredOpenAPI; } OpenAPI clone = new OpenAPI(); clone.info(filteredOpenAPI.getInfo()); clone.openapi(filteredOpenAPI.getOpenapi()); clone.jsonSchemaDialect(filteredOpenAPI.getJsonSchemaDialect()); clone.setSpecVersion(filteredOpenAPI.getSpecVersion()); clone.setExtensions(filteredOpenAPI.getExtensions()); clone.setExternalDocs(filteredOpenAPI.getExternalDocs()); clone.setSecurity(filteredOpenAPI.getSecurity()); clone.setServers(filteredOpenAPI.getServers()); clone.tags(filteredOpenAPI.getTags() == null ? null : new ArrayList<>(openAPI.getTags())); final Set<String> allowedTags = new HashSet<>(); final Set<String> filteredTags = new HashSet<>(); Paths clonedPaths = new Paths(); if (filteredOpenAPI.getPaths() != null) { for (String resourcePath : filteredOpenAPI.getPaths().keySet()) { PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath); PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers); PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags); if (clonedPathItem != null) { if (!clonedPathItem.readOperations().isEmpty()) { clonedPaths.addPathItem(resourcePath, clonedPathItem); } } } clone.paths(clonedPaths); } filteredTags.removeAll(allowedTags); final List<Tag> tags = clone.getTags(); if (tags != null && !filteredTags.isEmpty()) { tags.removeIf(tag -> filteredTags.contains(tag.getName())); if (clone.getTags().isEmpty()) { clone.setTags(null); } } if (filteredOpenAPI.getWebhooks() != null) { for (String resourcePath : filteredOpenAPI.getWebhooks().keySet()) { PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath); PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers); PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags); if (clonedPathItem != null) { if (!clonedPathItem.readOperations().isEmpty()) { clone.addWebhooks(resourcePath, clonedPathItem); } } } } if (filteredOpenAPI.getComponents() != null) { clone.components(new Components()); clone.getComponents().setSchemas(filterComponentsSchema(filter, filteredOpenAPI.getComponents().getSchemas(), params, cookies, headers)); clone.getComponents().setSecuritySchemes(filteredOpenAPI.getComponents().getSecuritySchemes()); clone.getComponents().setCallbacks(filteredOpenAPI.getComponents().getCallbacks()); clone.getComponents().setExamples(filteredOpenAPI.getComponents().getExamples()); clone.getComponents().setExtensions(filteredOpenAPI.getComponents().getExtensions()); clone.getComponents().setHeaders(filteredOpenAPI.getComponents().getHeaders()); clone.getComponents().setLinks(filteredOpenAPI.getComponents().getLinks()); clone.getComponents().setParameters(filteredOpenAPI.getComponents().getParameters()); clone.getComponents().setRequestBodies(filteredOpenAPI.getComponents().getRequestBodies()); clone.getComponents().setResponses(filteredOpenAPI.getComponents().getResponses()); clone.getComponents().setPathItems(filteredOpenAPI.getComponents().getPathItems()); } if (filter.isRemovingUnreferencedDefinitions()) { clone = removeBrokenReferenceDefinitions(clone); } return clone; }
@Test public void shouldRemoveBrokenNestedRefsKeepComposedSchemas() throws IOException { final OpenAPI openAPI = getOpenAPI31(RESOURCE_PATH_COMPOSED_SCHEMA); final RemoveUnreferencedDefinitionsFilter remover = new RemoveUnreferencedDefinitionsFilter(); final OpenAPI filtered = new SpecFilter().filter(openAPI, remover, null, null, null); assertEquals(filtered.getComponents().getSchemas().size(), 4, "Expected to have parent and abstract child with both implementations schemas"); assertTrue(filtered.getComponents().getSchemas().containsKey("SomeChild1ImplObject"), "Schemas should contains child 1 implementation"); assertTrue(filtered.getComponents().getSchemas().containsKey("SomeChild2ImplObject"), "Schemas should contains child 2 implementation"); assertTrue(filtered.getComponents().getSchemas().containsKey("SomeChildObject"), "Schemas should contains child abstract parent"); }
@Override public HashSlotCursor12byteKey cursor() { return new CursorIntKey2(); }
@Test(expected = AssertionError.class) public void testCursor_advance_whenDisposed() { HashSlotCursor12byteKey cursor = hsa.cursor(); hsa.dispose(); cursor.advance(); }
public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) { Predicate<TopicMessageDTO> predicate; try { predicate = MessageFilters.celScriptFilter(execData.getFilterCode()); } catch (Exception e) { log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e); return new SmartFilterTestExecutionResultDTO() .error("Compilation error : " + e.getMessage()); } try { var result = predicate.test( new TopicMessageDTO() .key(execData.getKey()) .content(execData.getValue()) .headers(execData.getHeaders()) .offset(execData.getOffset()) .partition(execData.getPartition()) .timestamp( Optional.ofNullable(execData.getTimestampMs()) .map(ts -> OffsetDateTime.ofInstant(Instant.ofEpochMilli(ts), ZoneOffset.UTC)) .orElse(null)) ); return new SmartFilterTestExecutionResultDTO() .result(result); } catch (Exception e) { log.info("Smart filter {} execution error", execData, e); return new SmartFilterTestExecutionResultDTO() .error("Execution error : " + e.getMessage()); } }
@Test void execSmartFilterTestReturnsExecutionResult() { var params = new SmartFilterTestExecutionDTO() .filterCode("has(record.key) && has(record.value) && record.headers.size() != 0 " + "&& has(record.timestampMs) && has(record.offset)") .key("1234") .value("{ \"some\" : \"value\" } ") .headers(Map.of("h1", "hv1")) .offset(12345L) .timestampMs(System.currentTimeMillis()) .partition(1); var actual = execSmartFilterTest(params); assertThat(actual.getError()).isNull(); assertThat(actual.getResult()).isTrue(); params.setFilterCode("false"); actual = execSmartFilterTest(params); assertThat(actual.getError()).isNull(); assertThat(actual.getResult()).isFalse(); }
public long getAndSet(long newValue) { return getAndSetVal(newValue); }
@Test public void testGetAndSet() { PaddedAtomicLong counter = new PaddedAtomicLong(1); long result = counter.getAndSet(2); assertEquals(1, result); assertEquals(2, counter.get()); }
@Override public KubevirtIpPool ipPool() { return ipPool; }
@Test public void testIpAllocationAndRelease() throws Exception { KubevirtIpPool ipPool1 = network1.ipPool(); IpAddress ip = ipPool1.allocateIp(); assertEquals(100, ipPool1.availableIps().size()); assertEquals(1, ipPool1.allocatedIps().size()); assertEquals(IpAddress.valueOf("10.10.10.100"), ip); ipPool1.releaseIp(ip); assertEquals(101, ipPool1.availableIps().size()); assertEquals(0, ipPool1.allocatedIps().size()); assertTrue(ipPool1.availableIps().contains(ip)); }
static void writeResponse(Configuration conf, Writer out, String format, String propertyName) throws IOException, IllegalArgumentException, BadFormatException { if (FORMAT_JSON.equals(format)) { Configuration.dumpConfiguration(conf, propertyName, out); } else if (FORMAT_XML.equals(format)) { conf.writeXml(propertyName, out, conf); } else { throw new BadFormatException("Bad format: " + format); } }
@Test @SuppressWarnings("unchecked") public void testWriteJson() throws Exception { StringWriter sw = new StringWriter(); ConfServlet.writeResponse(getTestConf(), sw, "json"); String json = sw.toString(); boolean foundSetting = false; Object parsed = JSON.parse(json); Object[] properties = ((Map<String, Object[]>)parsed).get("properties"); for (Object o : properties) { Map<String, Object> propertyInfo = (Map<String, Object>)o; String key = (String)propertyInfo.get("key"); String val = (String)propertyInfo.get("value"); String resource = (String)propertyInfo.get("resource"); System.err.println("k: " + key + " v: " + val + " r: " + resource); if (TEST_KEY.equals(key) && TEST_VAL.equals(val) && "programmatically".equals(resource)) { foundSetting = true; } } assertTrue(foundSetting); }
protected void replaceJettyXmlIfItBelongsToADifferentVersion(File jettyConfig) throws IOException { if (Files.readString(jettyConfig.toPath(), UTF_8).contains(JETTY_CONFIG_VERSION)) return; replaceFileWithPackagedOne(jettyConfig); }
@Test public void shouldReplaceJettyXmlIfItDoesNotContainCorrespondingJettyVersionNumber(@TempDir Path temporaryFolder) throws IOException { Path jettyXml = Files.createFile(temporaryFolder.resolve("jetty.xml")); when(systemEnvironment.getJettyConfigFile()).thenReturn(jettyXml.toFile()); String originalContent = "jetty-v6.2.3\nsome other local changes"; Files.writeString(jettyXml, originalContent, UTF_8); jettyServer.replaceJettyXmlIfItBelongsToADifferentVersion(systemEnvironment.getJettyConfigFile()); try (InputStream configStream = Objects.requireNonNull(getClass().getResourceAsStream("config/jetty.xml"))) { assertThat(Files.readString(jettyXml, UTF_8)).isEqualTo(new String(configStream.readAllBytes(), UTF_8)); } }
protected String decideSource(MappedMessage cef, RawMessage raw) { // Try getting the host name from the CEF extension "deviceAddress"/"dvc" final Map<String, Object> fields = cef.mappedExtensions(); if (fields != null && !fields.isEmpty()) { final String deviceAddress = (String) fields.getOrDefault(CEFMapping.dvc.getFullName(), fields.get(CEFMapping.dvc.getKeyName())); if (!isNullOrEmpty(deviceAddress)) { return deviceAddress; } } // Try getting the hostname from the CEF message metadata (e. g. syslog) if (!isNullOrEmpty(cef.host())) { return cef.host(); } // Use raw message source information if we were not able to parse a source from the CEF extensions. final ResolvableInetSocketAddress address = raw.getRemoteAddress(); final InetSocketAddress remoteAddress; if (address == null) { remoteAddress = null; } else { remoteAddress = address.getInetSocketAddress(); } return remoteAddress == null ? "unknown" : remoteAddress.getAddress().toString(); }
@Test public void decideSourceWithFullDeviceAddressReturnsExtensionValue() throws Exception { final MappedMessage cefMessage = mock(MappedMessage.class); when(cefMessage.mappedExtensions()).thenReturn(Collections.singletonMap("deviceAddress", "128.66.23.42")); final RawMessage rawMessage = new RawMessage(new byte[0], new InetSocketAddress("example.com", 12345)); assertEquals("128.66.23.42", codec.decideSource(cefMessage, rawMessage)); }
@Override public boolean isProjectVisibilitySynchronizationActivated() { return findManagedProjectService() .map(ManagedProjectService::isProjectVisibilitySynchronizationActivated) .orElse(false); }
@Test public void isProjectVisibilitySynchronizationActivated_whenManagedInstanceServices_shouldDelegatesToRightService() { DelegatingManagedServices managedInstanceService = new DelegatingManagedServices(Set.of(new NeverManagedInstanceService(), new AlwaysManagedInstanceService())); assertThat(managedInstanceService.isProjectVisibilitySynchronizationActivated()).isTrue(); }
public List<B2UploadPartResponse> list(final String fileid) throws BackgroundException { if(log.isInfoEnabled()) { log.info(String.format("List completed parts of file %s", fileid)); } // This operation lists the parts that have been uploaded for a specific multipart upload. try { // Completed parts final List<B2UploadPartResponse> completed = new ArrayList<B2UploadPartResponse>(); Integer startPartNumber = null; do { final B2ListPartsResponse response = session.getClient().listParts( fileid, startPartNumber, null); completed.addAll(response.getFiles()); startPartNumber = response.getNextPartNumber(); } while(startPartNumber != null); return completed; } catch(B2ApiException e) { throw new B2ExceptionMappingService(this.fileid).map(e); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e); } }
@Test public void testList() throws Exception { final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path file = new Path(bucket, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final B2StartLargeFileResponse startResponse = session.getClient().startLargeFileUpload( new B2VersionIdProvider(session).getVersionId(bucket), file.getName(), null, Collections.emptyMap()); assertTrue(new B2LargeUploadPartService(session, new B2VersionIdProvider(session)).list(startResponse.getFileId()).isEmpty()); session.getClient().cancelLargeFileUpload(startResponse.getFileId()); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { SQLStatement dalStatement = sqlStatementContext.getSqlStatement(); if (dalStatement instanceof MySQLShowDatabasesStatement) { return new LocalDataMergedResult(Collections.singleton(new LocalDataQueryResultRow(databaseName))); } ShardingSphereSchema schema = getSchema(sqlStatementContext, database); if (dalStatement instanceof MySQLShowTablesStatement) { return new LogicTablesMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowTableStatusStatement) { return new ShowTableStatusMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowIndexStatement) { return new ShowIndexMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowCreateTableStatement) { return new ShowCreateTableMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } return new TransparentMergedResult(queryResults.get(0)); }
@Test void assertMergeForShowTableStatusStatement() throws SQLException { DALStatement dalStatement = new MySQLShowTableStatusStatement(); SQLStatementContext sqlStatementContext = mockSQLStatementContext(dalStatement); ShardingDALResultMerger resultMerger = new ShardingDALResultMerger(DefaultDatabase.LOGIC_NAME, mock(ShardingRule.class)); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getName()).thenReturn(DefaultDatabase.LOGIC_NAME); assertThat(resultMerger.merge(queryResults, sqlStatementContext, database, mock(ConnectionContext.class)), instanceOf(ShowTableStatusMergedResult.class)); }
@Override public void close() { if (!isVersionCommitted) { job.setVersion(initialJobVersion); } }
@Test void testJobVersionerOnRollbackVersionIsRestored() { // GIVEN Job job = aScheduledJob().withVersion(5).build(); // WHEN JobVersioner jobVersioner = new JobVersioner(job); // THEN assertThat(job).hasVersion(6); // WHEN jobVersioner.close(); // THEN assertThat(job).hasVersion(5); }
public static void main(String[] args) throws Exception { DateFormat dateFormat = new SimpleDateFormat( FixedDateFormat.FixedFormat.ISO8601_OFFSET_DATE_TIME_HHMM.getPattern()); Thread.setDefaultUncaughtExceptionHandler((thread, exception) -> { System.out.println(String.format("%s [%s] error Uncaught exception in thread %s: %s", dateFormat.format(new Date()), thread.getContextClassLoader(), thread.getName(), exception.getMessage())); exception.printStackTrace(System.out); }); BrokerStarter starter = new BrokerStarter(); Runtime.getRuntime().addShutdownHook( new Thread(() -> { try { starter.shutdown(); } catch (Throwable t) { log.error("Error while shutting down Pulsar service", t); } finally { LogManager.shutdown(); } }, "pulsar-service-shutdown") ); PulsarByteBufAllocator.registerOOMListener(oomException -> { if (starter.brokerConfig != null && starter.brokerConfig.isSkipBrokerShutdownOnOOM()) { log.error("-- Received OOM exception: {}", oomException.getMessage(), oomException); } else { log.error("-- Shutting down - Received OOM exception: {}", oomException.getMessage(), oomException); if (starter.pulsarService != null) { starter.pulsarService.shutdownNow(); } } }); try { int start = starter.start(args); if (start != 0) { System.exit(start); } } catch (Throwable t) { log.error("Failed to start pulsar service.", t); ShutdownUtil.triggerImmediateForcefulShutdown(); } finally { starter.join(); } }
@Test public void testMainGenerateDocs() throws Exception { PrintStream oldStream = System.out; try { ByteArrayOutputStream baoStream = new ByteArrayOutputStream(); System.setOut(new PrintStream(baoStream)); Class argumentsClass = Class.forName("org.apache.pulsar.PulsarBrokerStarter$StarterArguments"); PulsarBrokerStarter.main(new String[]{"-g"}); String message = baoStream.toString(); Field[] fields = argumentsClass.getDeclaredFields(); for (Field field : fields) { boolean fieldHasAnno = field.isAnnotationPresent(Option.class); if (fieldHasAnno) { Option fieldAnno = field.getAnnotation(Option.class); String[] names = fieldAnno.names(); String nameStr = Arrays.asList(names).toString(); nameStr = nameStr.substring(1, nameStr.length() - 1); assertTrue(message.indexOf(nameStr) > 0); } } } finally { System.setOut(oldStream); } }
public void installIntents(Optional<IntentData> toUninstall, Optional<IntentData> toInstall) { // If no any Intents to be uninstalled or installed, ignore it. if (!toUninstall.isPresent() && !toInstall.isPresent()) { return; } // Classify installable Intents to different installers. ArrayListMultimap<IntentInstaller, Intent> uninstallInstallers; ArrayListMultimap<IntentInstaller, Intent> installInstallers; Set<IntentInstaller> allInstallers = Sets.newHashSet(); if (toUninstall.isPresent()) { uninstallInstallers = getInstallers(toUninstall.get()); allInstallers.addAll(uninstallInstallers.keySet()); } else { uninstallInstallers = ArrayListMultimap.create(); } if (toInstall.isPresent()) { installInstallers = getInstallers(toInstall.get()); allInstallers.addAll(installInstallers.keySet()); } else { installInstallers = ArrayListMultimap.create(); } // Generates an installation context for the high level Intent. IntentInstallationContext installationContext = new IntentInstallationContext(toUninstall.orElse(null), toInstall.orElse(null)); //Generates different operation context for different installable Intents. Map<IntentInstaller, IntentOperationContext> contexts = Maps.newHashMap(); allInstallers.forEach(installer -> { List<Intent> intentsToUninstall = uninstallInstallers.get(installer); List<Intent> intentsToInstall = installInstallers.get(installer); // Connect context to high level installation context IntentOperationContext context = new IntentOperationContext(intentsToUninstall, intentsToInstall, installationContext); installationContext.addPendingContext(context); contexts.put(installer, context); }); // Apply contexts to installers contexts.forEach((installer, context) -> { installer.apply(context); }); }
@Test public void testInstallNothing() { installCoordinator.installIntents(Optional.empty(), Optional.empty()); assertNull(intentStore.newData); }
static void runHook() { for (String filename : FILES) { new File(filename).delete(); } }
@Test public void testSimulateTriggerDeleteFileOnExitHook() { // simulate app exit DeleteFileOnExitHook.runHook(); File[] files = new File(HOOK_TEST_TMP).listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.startsWith(DiskFileUpload.prefix); } }); assertEquals(0, files.length); }
@Override public void lock() { try { lock(-1, null, false); } catch (InterruptedException e) { throw new IllegalStateException(); } }
@Test public void testIsHeldByCurrentThread() { RLock lock = redisson.getLock("lock"); Assertions.assertFalse(lock.isHeldByCurrentThread()); lock.lock(); Assertions.assertTrue(lock.isHeldByCurrentThread()); lock.unlock(); Assertions.assertFalse(lock.isHeldByCurrentThread()); }
@Override public boolean addClass(final Class<?> stepClass) { if (stepClasses.contains(stepClass)) { return true; } checkNoComponentAnnotations(stepClass); if (hasCucumberContextConfiguration(stepClass)) { checkOnlyOneClassHasCucumberContextConfiguration(stepClass); withCucumberContextConfiguration = stepClass; } stepClasses.add(stepClass); return true; }
@Test void shouldBeStoppableWhenFacedWithInvalidConfiguration() { final ObjectFactory factory = new SpringFactory(); factory.addClass(WithEmptySpringAnnotations.class); IllegalStateException exception = assertThrows(IllegalStateException.class, factory::start); assertThat(exception.getMessage(), containsString("DelegatingSmartContextLoader was unable to detect defaults")); assertDoesNotThrow(factory::stop); }
@Override public int getPartitionId(Object object) { return client.getClientPartitionService().getPartitionId(object); }
@Test public void testPartitionId() { int partitionId = context.getPartitionId("myKey"); assertTrue(partitionId >= 0); }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void sub1() { String inputExpression = "(y - 5) ** 3"; BaseNode infix = parse( inputExpression, mapOf(entry("y", BuiltInType.NUMBER)) ); assertThat( infix).isInstanceOf(InfixOpNode.class); assertThat( infix.getResultType()).isEqualTo(BuiltInType.NUMBER); assertThat( infix.getText()).isEqualTo(inputExpression); InfixOpNode sub = (InfixOpNode) infix; assertThat( sub.getLeft()).isInstanceOf(InfixOpNode.class); assertThat( sub.getLeft().getText()).isEqualTo( "y - 5"); assertThat( sub.getOperator()).isEqualTo(InfixOperator.POW); assertThat( sub.getRight()).isInstanceOf(NumberNode.class); assertThat( sub.getRight().getText()).isEqualTo("3"); InfixOpNode mult = (InfixOpNode) sub.getLeft(); assertThat( mult.getLeft()).isInstanceOf(NameRefNode.class); assertThat( mult.getLeft().getText()).isEqualTo("y"); assertThat( mult.getOperator()).isEqualTo(InfixOperator.SUB); assertThat( mult.getRight()).isInstanceOf(NumberNode.class); assertThat( mult.getRight().getText()).isEqualTo("5"); }
@Override public boolean isGroupManaged(DbSession dbSession, String groupUuid) { return findManagedInstanceService() .map(managedInstanceService -> managedInstanceService.isGroupManaged(dbSession, groupUuid)) .orElse(false); }
@Test public void isGroupManaged_delegatesToRightService_andPropagateAnswer() { DelegatingManagedServices managedInstanceService = new DelegatingManagedServices(Set.of(new NeverManagedInstanceService(), new AlwaysManagedInstanceService())); assertThat(managedInstanceService.isGroupManaged(dbSession, "whatever")).isTrue(); }
@Experimental public static Configuration forReporter(Configuration configuration, String reporterName) { return new DelegatingConfiguration( configuration, ConfigConstants.METRICS_REPORTER_PREFIX + reporterName + "."); }
@Test void testForReporterRead() { Configuration configuration = new Configuration(); configuration.set(FULL_OPTION, "value"); assertThat(MetricOptions.forReporter(configuration, "my_reporter").get(SUB_OPTION)) .isEqualTo("value"); }
public static <T> Iterator<T> prepend(T prepend, @Nonnull Iterator<? extends T> iterator) { checkNotNull(iterator, "iterator cannot be null."); return new PrependIterator<>(prepend, iterator); }
@Test public void prependNullToEmptyIterator() { var actual = IterableUtil.prepend(null, Collections.emptyIterator()); assertIteratorsEquals(Collections.emptyList(), actual); }
public int add(Object o) { HollowTypeMapper typeMapper = getTypeMapper(o.getClass(), null, null); return typeMapper.write(o); }
@Test public void testNullElements() { HollowObjectMapper mapper = new HollowObjectMapper(writeStateEngine); // Lists cannot contain null elements try { mapper.add(new TypeWithList("a", null, "c")); Assert.fail("NullPointerException not thrown from List containing null elements"); } catch (NullPointerException e) { String m = e.getMessage(); Assert.assertNotNull(m); Assert.assertTrue(m.contains("Lists")); } // Sets cannot contain null elements try { mapper.add(new TypeWithSet("a", null, "c")); Assert.fail("NullPointerException not thrown from Set containing null elements"); } catch (NullPointerException e) { String m = e.getMessage(); Assert.assertNotNull(m); Assert.assertTrue(m.contains("Sets")); } // Maps cannot contain null keys try { mapper.add(new TypeWithMap("a", "a", null, "b", "c", "c")); Assert.fail("NullPointerException not thrown from Map containing null keys"); } catch (NullPointerException e) { String m = e.getMessage(); Assert.assertNotNull(m); Assert.assertTrue(m.contains("Maps")); Assert.assertTrue(m.contains("key")); } // Maps cannot contain null values try { mapper.add(new TypeWithMap("a", "a", "b", null, "c", "c")); Assert.fail("NullPointerException not thrown from Map containing null values"); } catch (NullPointerException e) { String m = e.getMessage(); Assert.assertNotNull(m); Assert.assertTrue(m.contains("Maps")); Assert.assertTrue(m.contains("value")); } }
@Deprecated public static <T> Task<T> callable(final String name, final Callable<? extends T> callable) { return Task.callable(name, () -> callable.call()); }
@Test public void testDelayTaskFailed() { IllegalArgumentException exception = new IllegalArgumentException("Oops!"); final Task<Integer> task = Task.callable(() -> { throw exception; }); final Task<Integer> taskWithDelay = task.withDelay(200, TimeUnit.MILLISECONDS); IllegalArgumentException actualException = runAndWaitException(taskWithDelay, IllegalArgumentException.class); assertEquals(exception, actualException); // Both tasks should be completed. assertTrue(task.isDone()); assertTrue(taskWithDelay.isDone()); // Both tasks should have failed. assertTrue(task.isFailed()); assertTrue(taskWithDelay.isFailed()); }
public TriRpcStatus appendDescription(String description) { if (this.description == null) { return withDescription(description); } else { String newDescription = this.description + "\n" + description; return withDescription(newDescription); } }
@Test void appendDescription() { TriRpcStatus origin = TriRpcStatus.NOT_FOUND; TriRpcStatus withDesc = origin.appendDescription("desc0"); TriRpcStatus withDesc2 = withDesc.appendDescription("desc1"); Assertions.assertNull(origin.description); Assertions.assertTrue(withDesc2.description.contains("desc1")); Assertions.assertTrue(withDesc2.description.contains("desc0")); }
@Override public byte[] serializeValue(CoordinatorRecord record) { // Tombstone is represented with a null value. if (record.value() == null) { return null; } else { return MessageUtil.toVersionPrefixedBytes( record.value().version(), record.value().message() ); } }
@Test public void testSerializeValue() { GroupCoordinatorRecordSerde serializer = new GroupCoordinatorRecordSerde(); CoordinatorRecord record = new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupMetadataKey().setGroupId("group"), (short) 3 ), new ApiMessageAndVersion( new ConsumerGroupMetadataValue().setEpoch(10), (short) 0 ) ); assertArrayEquals( MessageUtil.toVersionPrefixedBytes(record.value().version(), record.value().message()), serializer.serializeValue(record) ); }
public static Getter newMethodGetter(Object object, Getter parent, Method method, String modifier) throws Exception { return newGetter(object, parent, modifier, method.getReturnType(), method::invoke, (t, et) -> new MethodGetter(parent, method, modifier, t, et)); }
@Test public void newMethodGetter_whenExtractingFromSimpleField_thenReturnFieldContentIsItIs() throws Exception { OuterObject object = new OuterObject("name"); Getter getter = GetterFactory.newMethodGetter(object, null, outerNameMethod, null); String result = (String) getter.getValue(object); assertEquals("name", result); }
public static <T> RBFNetwork<T> fit(T[] x, double[] y, RBF<T>[] rbf) { return fit(x, y, rbf, false); }
@Test public void test2DPlanes() { System.out.println("2dplanes"); MathEx.setSeed(19650218); // to get repeatable results. RegressionValidations<RBFNetwork<double[]>> result = CrossValidation.regression(10, Planes.x, Planes.y, (xi, yi) -> RBFNetwork.fit(xi, yi, RBF.fit(xi, 20, 5.0))); System.out.println(result); assertEquals(1.7087, result.avg.rmse, 1E-4); }
public synchronized BeamFnStateClient forApiServiceDescriptor( ApiServiceDescriptor apiServiceDescriptor) throws IOException { // We specifically are synchronized so that we only create one GrpcStateClient at a time // preventing a race where multiple GrpcStateClient objects might be constructed at the same // for the same ApiServiceDescriptor. BeamFnStateClient rval; synchronized (cache) { rval = cache.get(apiServiceDescriptor); } if (rval == null) { // We can't be synchronized on cache while constructing the GrpcStateClient since if the // connection fails, onError may be invoked from the gRPC thread which will invoke // closeAndCleanUp that clears the cache. rval = new GrpcStateClient(apiServiceDescriptor); synchronized (cache) { cache.put(apiServiceDescriptor, rval); } } return rval; }
@Test // The checker erroneously flags that the CompletableFuture is not being resolved since it is the // result to Executor#submit. @SuppressWarnings("FutureReturnValueIgnored") public void testServerErrorCausesPendingAndFutureCallsToFail() throws Exception { BeamFnStateClient client = clientCache.forApiServiceDescriptor(apiServiceDescriptor); Future<CompletableFuture<StateResponse>> stateResponse = executor.submit(() -> client.handle(StateRequest.newBuilder().setInstructionId(SUCCESS))); Future<Void> serverResponse = executor.submit( () -> { // Wait for the client to connect. StreamObserver<StateResponse> outboundServerObserver = outboundServerObservers.take(); // Send an error from the server. outboundServerObserver.onError( new StatusRuntimeException(Status.INTERNAL.withDescription(SERVER_ERROR))); return null; }); CompletableFuture<StateResponse> inflight = stateResponse.get(); serverResponse.get(); try { inflight.get(); fail("Expected unsuccessful response due to server error"); } catch (ExecutionException e) { assertThat(e.toString(), containsString(SERVER_ERROR)); } }
@NotNull public SocialUserDO authSocialUser(Integer socialType, Integer userType, String code, String state) { // 优先从 DB 中获取,因为 code 有且可以使用一次。 // 在社交登录时,当未绑定 User 时,需要绑定登录,此时需要 code 使用两次 SocialUserDO socialUser = socialUserMapper.selectByTypeAndCodeAnState(socialType, code, state); if (socialUser != null) { return socialUser; } // 请求获取 AuthUser authUser = socialClientService.getAuthUser(socialType, userType, code, state); Assert.notNull(authUser, "三方用户不能为空"); // 保存到 DB 中 socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, authUser.getUuid()); if (socialUser == null) { socialUser = new SocialUserDO(); } socialUser.setType(socialType).setCode(code).setState(state) // 需要保存 code + state 字段,保证后续可查询 .setOpenid(authUser.getUuid()).setToken(authUser.getToken().getAccessToken()).setRawTokenInfo((toJsonString(authUser.getToken()))) .setNickname(authUser.getNickname()).setAvatar(authUser.getAvatar()).setRawUserInfo(toJsonString(authUser.getRawUserInfo())); if (socialUser.getId() == null) { socialUserMapper.insert(socialUser); } else { socialUserMapper.updateById(socialUser); } return socialUser; }
@Test public void testAuthSocialUser_exists() { // 准备参数 Integer socialType = SocialTypeEnum.GITEE.getType(); Integer userType = randomEle(SocialTypeEnum.values()).getType(); String code = "tudou"; String state = "yuanma"; // mock 方法 SocialUserDO socialUser = randomPojo(SocialUserDO.class).setType(socialType).setCode(code).setState(state); socialUserMapper.insert(socialUser); // 调用 SocialUserDO result = socialUserService.authSocialUser(socialType, userType, code, state); // 断言 assertPojoEquals(socialUser, result); }
@Override public InterpreterResult interpret(String script, InterpreterContext context) { LOGGER.debug("Run MongoDB script: {}", script); if (StringUtils.isEmpty(script)) { return new InterpreterResult(Code.SUCCESS); } String paragraphId = context.getParagraphId(); // Write script in a temporary file // The script is enriched with extensions final File scriptFile = new File(getScriptFileName(paragraphId)); try { FileUtils.write(scriptFile, shellExtension + script); } catch (IOException e) { LOGGER.error("Can not write script in temp file", e); return new InterpreterResult(Code.ERROR, e.getMessage()); } InterpreterResult result = new InterpreterResult(InterpreterResult.Code.SUCCESS); final DefaultExecutor executor = new DefaultExecutor(); final ByteArrayOutputStream errorStream = new ByteArrayOutputStream(); executor.setStreamHandler(new PumpStreamHandler(context.out, errorStream)); executor.setWatchdog(new ExecuteWatchdog(commandTimeout)); final CommandLine cmdLine = CommandLine.parse(getProperty("mongo.shell.path")); cmdLine.addArgument("--quiet", false); cmdLine.addArgument(dbAddress, false); cmdLine.addArgument(scriptFile.getAbsolutePath(), false); try { executor.execute(cmdLine); runningProcesses.put(paragraphId, executor); } catch (ExecuteException e) { LOGGER.error("Can not run script in paragraph {}", paragraphId, e); final int exitValue = e.getExitValue(); Code code = Code.ERROR; String msg = errorStream.toString(); if (exitValue == SIGTERM_CODE) { code = Code.INCOMPLETE; msg = msg + "Paragraph received a SIGTERM.\n"; LOGGER.info("The paragraph {} stopped executing: {}", paragraphId, msg); } msg += "ExitValue: " + exitValue; result = new InterpreterResult(code, msg); } catch (IOException e) { LOGGER.error("Can not run script in paragraph {}", paragraphId, e); result = new InterpreterResult(Code.ERROR, e.getMessage()); } finally { FileUtils.deleteQuietly(scriptFile); stopProcess(paragraphId); } return result; }
@Test void testSuccess() { final String userScript = "print('hello');"; final InterpreterResult res = interpreter.interpret(userScript, context); assertSame(Code.SUCCESS, res.code(), "Check SUCCESS: " + res.message()); try { out.flush(); } catch (IOException ex) { System.out.println(ex.getMessage()); } final String resultScript = new String(getBufferBytes()); final String expectedScript = SHELL_EXTENSION.replace( "TABLE_LIMIT_PLACEHOLDER", interpreter.getProperty("mongo.shell.command.table.limit")) .replace("TARGET_DB_PLACEHOLDER", interpreter.getProperty("mongo.server.database")) .replace("USER_NAME_PLACEHOLDER", interpreter.getProperty("mongo.server.username")) .replace("PASSWORD_PLACEHOLDER", interpreter.getProperty("mongo.server.password")) .replace("AUTH_DB_PLACEHOLDER", interpreter. getProperty("mongo.server.authenticationDatabase"))+ userScript; // The script that is executed must contain the functions provided by this interpreter assertEquals(expectedScript, resultScript, "Check SCRIPT"); }
protected String getConfig(File configFile) { return ThrowableFunction.execute( configFile, file -> canRead(configFile) ? readFileToString(configFile, getEncoding()) : null); }
@Test void testPublishAndGetConfig() { assertTrue(configuration.publishConfig(KEY, CONTENT)); assertTrue(configuration.publishConfig(KEY, CONTENT)); assertTrue(configuration.publishConfig(KEY, CONTENT)); assertEquals(CONTENT, configuration.getConfig(KEY, DEFAULT_GROUP)); }
public ProviderBuilder networker(String networker) { this.networker = networker; return getThis(); }
@Test void networker() { ProviderBuilder builder = ProviderBuilder.newBuilder(); builder.networker("networker"); Assertions.assertEquals("networker", builder.build().getNetworker()); }
@Override void toHtml() throws IOException { writeHtmlHeader(); htmlCoreReport.toHtml(); writeHtmlFooter(); }
@Test public void testWithNoDatabase() throws IOException { final HtmlReport htmlReport = new HtmlReport(collector, null, javaInformationsList, Period.TOUT, writer); htmlReport.toHtml(null, null); assertNotEmptyAndClear(writer); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { return payload.readStringFixByBytes(readLengthFromMeta(columnDef.getColumnMeta(), payload)); }
@Test void assertReadWithMeta4() { columnDef.setColumnMeta(4); when(payload.readInt4()).thenReturn(Integer.MAX_VALUE); when(payload.readStringFixByBytes(Integer.MAX_VALUE)).thenReturn(new byte[255]); assertThat(new MySQLBlobBinlogProtocolValue().read(columnDef, payload), is(new byte[255])); }
public Command create( final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext context) { return create(statement, context.getServiceContext(), context); }
@Test public void shouldCreateCommandForPlannedQueryInDedicatedRuntime() { // Given: givenPlannedQuery(); PersistentQueryMetadataImpl queryMetadata = mock(PersistentQueryMetadataImpl.class); when(executionContext.execute(any(), any(ConfiguredKsqlPlan.class))).thenReturn(result); when(result.getQuery()).thenReturn(Optional.ofNullable(queryMetadata)); when(config.getBoolean(KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED)).thenReturn(true); // When: final Command command = commandFactory.create(configuredStatement, executionContext); // Then: assertThat(command, is(Command.of( ConfiguredKsqlPlan.of( A_PLAN, SessionConfig.of(config, ImmutableMap.of(KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED, false)))))); }
@Override public Event clone() { final ConvertedMap map = ConvertedMap.newFromMap(Cloner.<Map<String, Object>>deep(data)); map.putInterned(METADATA, Cloner.<Map<String, Object>>deep(metadata)); return new Event(map); }
@Test public void testClone() throws Exception { Map<String, Object> data = new HashMap<>(); List<Object> l = new ArrayList<>(); data.put("array", l); Map<String, Object> m = new HashMap<>(); m.put("foo", "bar"); l.add(m); data.put("foo", 1.0); data.put("bar", "bar"); data.put("baz", 1); Event e = new Event(data); Event f = e.clone(); assertJsonEquals("{\"bar\":\"bar\",\"@timestamp\":\"" + e.getTimestamp().toString() + "\",\"array\":[{\"foo\":\"bar\"}],\"foo\":1.0,\"@version\":\"1\",\"baz\":1}", f.toJson()); assertJsonEquals(f.toJson(), e.toJson()); }
public static BufferedImage alphaOffset(final Image rawImg, final int offset) { BufferedImage image = toARGB(rawImg); final int numComponents = image.getColorModel().getNumComponents(); final float[] scales = new float[numComponents]; final float[] offsets = new float[numComponents]; Arrays.fill(scales, 1f); Arrays.fill(offsets, 0f); offsets[numComponents - 1] = offset; return offset(image, scales, offsets); }
@Test public void alphaOffset() { // alphaOffset(BufferedImage image, int offset) assertTrue(bufferedImagesEqual(oneByOne(BLACK_TRANSPARENT), ImageUtil.alphaOffset(oneByOne(BLACK_TRANSPARENT), -255))); assertTrue(bufferedImagesEqual(oneByOne(new Color(0, 0, 0, 50)), ImageUtil.alphaOffset(oneByOne(BLACK_TRANSPARENT), 50))); assertTrue(bufferedImagesEqual(oneByOne(BLACK_HALF_TRANSPARENT), ImageUtil.alphaOffset(oneByOne(BLACK_TRANSPARENT), 128))); assertTrue(bufferedImagesEqual(oneByOne(BLACK_TRANSPARENT), ImageUtil.alphaOffset(oneByOne(BLACK_HALF_TRANSPARENT), -255))); assertTrue(bufferedImagesEqual(oneByOne(BLACK), ImageUtil.alphaOffset(oneByOne(BLACK_TRANSPARENT), 255))); assertTrue(bufferedImagesEqual(oneByOne(new Color(0, 0, 0, 200)), ImageUtil.alphaOffset(oneByOne(BLACK), -55))); assertTrue(bufferedImagesEqual(oneByOne(BLACK), ImageUtil.alphaOffset(oneByOne(BLACK), 255))); // alphaOffset(BufferedImage image, float offset) assertTrue(bufferedImagesEqual(oneByOne(BLACK_TRANSPARENT), ImageUtil.alphaOffset(oneByOne(BLACK_TRANSPARENT), 0f))); assertTrue(bufferedImagesEqual(oneByOne(BLACK_TRANSPARENT), ImageUtil.alphaOffset(oneByOne(BLACK_TRANSPARENT), 1f))); assertTrue(bufferedImagesEqual(oneByOne(BLACK_TRANSPARENT), ImageUtil.alphaOffset(oneByOne(BLACK_TRANSPARENT), 2f))); assertTrue(bufferedImagesEqual(oneByOne(BLACK_TRANSPARENT), ImageUtil.alphaOffset(oneByOne(BLACK_HALF_TRANSPARENT), 0f))); assertTrue(bufferedImagesEqual(oneByOne(BLACK_HALF_TRANSPARENT), ImageUtil.alphaOffset(oneByOne(BLACK_HALF_TRANSPARENT), 1f))); assertTrue(bufferedImagesEqual(oneByOne(BLACK), ImageUtil.alphaOffset(oneByOne(BLACK_HALF_TRANSPARENT), 2f))); assertTrue(bufferedImagesEqual(oneByOne(BLACK_TRANSPARENT), ImageUtil.alphaOffset(oneByOne(BLACK), 0f))); assertTrue(bufferedImagesEqual(oneByOne(BLACK_HALF_TRANSPARENT), ImageUtil.alphaOffset(oneByOne(BLACK), 0.503f))); // opacityOffset does Math.floor assertTrue(bufferedImagesEqual(oneByOne(BLACK), ImageUtil.alphaOffset(oneByOne(BLACK), 1f))); assertTrue(bufferedImagesEqual(oneByOne(BLACK), ImageUtil.alphaOffset(oneByOne(BLACK), 2f))); }
public boolean isEmpty() { return branches.isEmpty(); }
@Test public void isEmpty() { assertThat(underTest.isEmpty()).isFalse(); assertThat(new ProjectBranches(Collections.emptyList()).isEmpty()).isTrue(); assertThat(new ProjectBranches(Collections.emptyList()).defaultBranchName()).isEqualTo("main"); }
public static RowCoder of(Schema schema) { return new RowCoder(schema); }
@Test(expected = NonDeterministicException.class) public void testVerifyDeterministic() throws NonDeterministicException { Schema schema = Schema.builder() .addField("f1", FieldType.DOUBLE) .addField("f2", FieldType.FLOAT) .addField("f3", FieldType.INT32) .build(); RowCoder coder = RowCoder.of(schema); coder.verifyDeterministic(); }
private List<Object> getTargetInvokersByRequest(String targetName, List<Object> invokers, Object invocation) { Map<String, Object> attachments = parseAttachments(invocation); List<String> requestTags = routerConfig.getRequestTags(); if (CollectionUtils.isEmpty(requestTags)) { return invokers; } // Set of tags used to match instances Map<String, String> tags = new HashMap<>(); // A set of tags used to filter instances, where value is null, indicating that all instances containing the // tag are filtered without determining the value value Map<String, String> mismatchTags = new HashMap<>(); for (Map.Entry<String, Object> entry : attachments.entrySet()) { String key = entry.getKey(); if (!requestTags.contains(key)) { continue; } String replaceDashKey = key; if (replaceDashKey.contains(RouterConstant.DASH)) { // Dubbo will replace the "-" in the key with "." replaceDashKey = replaceDashKey.replace(RouterConstant.DASH, RouterConstant.POINT); } mismatchTags.put(RuleUtils.getMetaKey(replaceDashKey), null); String value = Optional.ofNullable(entry.getValue()).map(String::valueOf).orElse(null); if (StringUtils.isExist(value)) { tags.put(RuleUtils.getMetaKey(replaceDashKey), value); } } if (StringUtils.isExist(tags.get(RouterConstant.META_VERSION_KEY))) { mismatchTags.put(RouterConstant.META_VERSION_KEY, tags.get(RouterConstant.META_VERSION_KEY)); } else { // All instances contain version, so null values cannot be stored mismatchTags.remove(RouterConstant.META_VERSION_KEY); } boolean isReturnAllInstancesWhenMismatch = false; if (CollectionUtils.isEmpty(mismatchTags)) { mismatchTags = allMismatchTags; isReturnAllInstancesWhenMismatch = true; } List<Object> result = RuleStrategyHandler.INSTANCE.getMatchInvokersByRequest(targetName, invokers, tags); if (CollectionUtils.isEmpty(result)) { result = RuleStrategyHandler.INSTANCE.getMismatchInvokers(targetName, invokers, Collections.singletonList(mismatchTags), isReturnAllInstancesWhenMismatch); } return result; }
@Test public void testGetTargetInvokersByRequest() { config.setRequestTags(Arrays.asList("foo", "bar", "version")); config.setUseRequestRouter(true); List<Object> invokers = new ArrayList<>(); ApacheInvoker<Object> invoker1 = new ApacheInvoker<>("1.0.0", Collections.singletonMap(RouterConstant.PARAMETERS_KEY_PREFIX + "bar", "bar1")); invokers.add(invoker1); ApacheInvoker<Object> invoker2 = new ApacheInvoker<>("1.0.1", Collections.singletonMap(RouterConstant.PARAMETERS_KEY_PREFIX + "foo", "bar2")); invokers.add(invoker2); ApacheInvoker<Object> invoker3 = new ApacheInvoker<>("1.0.1"); invokers.add(invoker3); Invocation invocation = new ApacheInvocation(); Map<String, String> queryMap = new HashMap<>(); queryMap.put("side", "consumer"); queryMap.put("group", "fooGroup"); queryMap.put("version", "0.0.1"); queryMap.put("interface", "io.sermant.foo.FooTest"); DubboCache.INSTANCE.putApplication("io.sermant.foo.FooTest", "foo"); // Matching instances with foo: bar2 invocation.setAttachment("foo", "bar2"); invocation.setAttachment("foo1", "bar2"); List<Object> targetInvokers = (List<Object>) flowRouteHandler.handle( DubboCache.INSTANCE.getApplication("io.sermant.foo.FooTest") , invokers, invocation, queryMap, "io.sermant.foo.FooTest"); Assert.assertEquals(1, targetInvokers.size()); Assert.assertEquals(invoker2, targetInvokers.get(0)); // Matching instances with version 1.0.0 invocation.getObjectAttachments().clear(); invocation.setAttachment("version", "1.0.0"); targetInvokers = (List<Object>) flowRouteHandler.handle(DubboCache.INSTANCE.getApplication("io.sermant" + ".foo.FooTest") , invokers, invocation, queryMap, "io.sermant.foo.FooTest"); Assert.assertEquals(1, targetInvokers.size()); Assert.assertEquals(invoker1, targetInvokers.get(0)); }
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { // set the encoding? try { SyndFeedInput input = new SyndFeedInput(); input.setAllowDoctypes(false); SyndFeed feed = input.build(new InputSource(CloseShieldInputStream.wrap(stream))); String title = stripTags(feed.getTitleEx()); String description = stripTags(feed.getDescriptionEx()); metadata.set(TikaCoreProperties.TITLE, title); metadata.set(TikaCoreProperties.DESCRIPTION, description); // store the other fields in the metadata XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); xhtml.startDocument(); xhtml.element("h1", title); xhtml.element("p", description); xhtml.startElement("ul"); for (Object e : feed.getEntries()) { SyndEntry entry = (SyndEntry) e; String link = entry.getLink(); if (link != null) { xhtml.startElement("li"); xhtml.startElement("a", "href", link); xhtml.characters(stripTags(entry.getTitleEx())); xhtml.endElement("a"); SyndContent content = entry.getDescription(); if (content != null) { xhtml.newline(); xhtml.characters(stripTags(content)); } xhtml.endElement("li"); } } xhtml.endElement("ul"); xhtml.endDocument(); } catch (FeedException e) { throw new TikaException("RSS parse error", e); } }
@Test public void testRSSParser() throws Exception { // These RSS files should have basically the same contents, // represented in the various RSS format versions for (String rssFile : new String[]{"/test-documents/rsstest_091.rss", "/test-documents/rsstest_20.rss"}) { try (InputStream input = FeedParserTest.class.getResourceAsStream(rssFile)) { Metadata metadata = new Metadata(); ContentHandler handler = new BodyContentHandler(); ParseContext context = new ParseContext(); new FeedParser().parse(input, handler, metadata, context); String content = handler.toString(); assertNotNull(content); assertEquals("Sample RSS File for Junit test", metadata.get(TikaCoreProperties.DESCRIPTION)); assertEquals("TestChannel", metadata.get(TikaCoreProperties.TITLE)); // TODO find a way of testing the paragraphs and anchors } } }
public static String underScoreToCamelCase(String key) { if (key.isEmpty()) return key; StringBuilder sb = new StringBuilder(key.length()); for (int i = 0; i < key.length(); i++) { char c = key.charAt(i); if (c == '_') { i++; if (i < key.length()) sb.append(Character.toUpperCase(key.charAt(i))); else sb.append(c); } else sb.append(c); } return sb.toString(); }
@Test public void testUnderscoreToCamelCase() { assertEquals("testCase", Helper.underScoreToCamelCase("test_case")); assertEquals("testCaseTBD", Helper.underScoreToCamelCase("test_case_t_b_d")); assertEquals("TestCase_", Helper.underScoreToCamelCase("_test_case_")); }
@Override public void isEqualTo(@Nullable Object expected) { super.isEqualTo(expected); }
@Test public void isEqualTo_WithoutToleranceParameter_Fail_Shorter() { expectFailureWhenTestingThat(array(2.2f, 3.3f)).isEqualTo(array(2.2f)); }
@Override public PageResult<ApiAccessLogDO> getApiAccessLogPage(ApiAccessLogPageReqVO pageReqVO) { return apiAccessLogMapper.selectPage(pageReqVO); }
@Test public void testGetApiAccessLogPage() { ApiAccessLogDO apiAccessLogDO = randomPojo(ApiAccessLogDO.class, o -> { o.setUserId(2233L); o.setUserType(UserTypeEnum.ADMIN.getValue()); o.setApplicationName("yudao-test"); o.setRequestUrl("foo"); o.setBeginTime(buildTime(2021, 3, 13)); o.setDuration(1000); o.setResultCode(GlobalErrorCodeConstants.SUCCESS.getCode()); }); apiAccessLogMapper.insert(apiAccessLogDO); // 测试 userId 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setUserId(3344L))); // 测试 userType 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setUserType(UserTypeEnum.MEMBER.getValue()))); // 测试 applicationName 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setApplicationName("test"))); // 测试 requestUrl 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setRequestUrl("bar"))); // 测试 beginTime 不匹配:构造一个早期时间 2021-02-06 00:00:00 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setBeginTime(buildTime(2021, 2, 6)))); // 测试 duration 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setDuration(100))); // 测试 resultCode 不匹配 apiAccessLogMapper.insert(cloneIgnoreId(apiAccessLogDO, o -> o.setResultCode(2))); // 准备参数 ApiAccessLogPageReqVO reqVO = new ApiAccessLogPageReqVO(); reqVO.setUserId(2233L); reqVO.setUserType(UserTypeEnum.ADMIN.getValue()); reqVO.setApplicationName("yudao-test"); reqVO.setRequestUrl("foo"); reqVO.setBeginTime(buildBetweenTime(2021, 3, 13, 2021, 3, 13)); reqVO.setDuration(1000); reqVO.setResultCode(GlobalErrorCodeConstants.SUCCESS.getCode()); // 调用 PageResult<ApiAccessLogDO> pageResult = apiAccessLogService.getApiAccessLogPage(reqVO); // 断言,只查到了一条符合条件的 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(apiAccessLogDO, pageResult.getList().get(0)); }
public boolean deleteReplica(Replica replica) { try (CloseableLock ignored = CloseableLock.lock(this.rwLock.writeLock())) { if (replicas.contains(replica)) { replicas.remove(replica); GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteReplica(id, replica.getBackendId()); return true; } } return false; }
@Test public void deleteReplicaTest() { // delete replica1 Assert.assertTrue(tablet.deleteReplicaByBackendId(replica1.getBackendId())); Assert.assertNull(tablet.getReplicaById(replica1.getId())); // err: re-delete replica1 Assert.assertFalse(tablet.deleteReplicaByBackendId(replica1.getBackendId())); Assert.assertFalse(tablet.deleteReplica(replica1)); Assert.assertNull(tablet.getReplicaById(replica1.getId())); // delete replica2 Assert.assertTrue(tablet.deleteReplica(replica2)); Assert.assertEquals(1, tablet.getImmutableReplicas().size()); // clear replicas tablet.clearReplica(); Assert.assertEquals(0, tablet.getImmutableReplicas().size()); }
public SCMView getSCMView(String pluginId) { return pluginRequestHelper.submitRequest(pluginId, REQUEST_SCM_VIEW, new DefaultPluginInteractionCallback<>() { @Override public SCMView onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { return messageHandlerMap.get(resolvedExtensionVersion).responseMessageForSCMView(responseBody); } }); }
@Test public void shouldTalkToPluginToGetSCMView() throws Exception { SCMView deserializedResponse = new SCMView() { @Override public String displayValue() { return null; } @Override public String template() { return null; } }; when(jsonMessageHandler.responseMessageForSCMView(responseBody)).thenReturn(deserializedResponse); SCMView response = scmExtension.getSCMView(PLUGIN_ID); assertRequest(requestArgumentCaptor.getValue(), SCM_EXTENSION, "1.0", SCMExtension.REQUEST_SCM_VIEW, null); verify(jsonMessageHandler).responseMessageForSCMView(responseBody); assertSame(response, deserializedResponse); }
@Override public void registry(ServiceInstance serviceInstance) { DiscoveryManager.INSTANCE.registry(serviceInstance); }
@Test public void registry() { final ServiceInstance serviceInstance = CommonUtils.buildInstance(serviceName, 8989); DiscoveryManager.INSTANCE.registry(serviceInstance); Mockito.verify(zkService34, Mockito.times(1)).registry(serviceInstance); }
@Override public List<String> splitAndEvaluate() { return Strings.isNullOrEmpty(inlineExpression) ? Collections.emptyList() : flatten(evaluate(GroovyUtils.split(handlePlaceHolder(inlineExpression)))); }
@Test void assertEvaluateForRange() { List<String> expected = TypedSPILoader.getService(InlineExpressionParser.class, "GROOVY", PropertiesBuilder.build( new PropertiesBuilder.Property(InlineExpressionParser.INLINE_EXPRESSION_KEY, "t_order_${0..2},t_order_item_${0..1}"))).splitAndEvaluate(); assertThat(expected.size(), is(5)); assertThat(expected, hasItems("t_order_0", "t_order_1", "t_order_2", "t_order_item_0", "t_order_item_1")); }
static boolean isProcessFile(final String pathName) { return pathName.endsWith("bpmn") || pathName.endsWith("bpmn2") || pathName.endsWith("bpmn-cm"); }
@Test public void testIsProcessFile() { assertThat(KieModuleMetaDataImpl.isProcessFile("abc.bpmn")).isTrue(); assertThat(KieModuleMetaDataImpl.isProcessFile("abc.bpmn2")).isTrue(); assertThat(KieModuleMetaDataImpl.isProcessFile("abc.bpmn-cm")).isTrue(); assertThat(KieModuleMetaDataImpl.isProcessFile("abc.bpmn2-cm")).isFalse(); }
public static DirectoryLock lockForDirectory(File dir, ILogger logger) { File lockFile = new File(dir, FILE_NAME); FileChannel channel = openChannel(lockFile); FileLock lock = acquireLock(lockFile, channel); if (logger.isFineEnabled()) { logger.fine("Acquired lock on " + lockFile.getAbsolutePath()); } return new DirectoryLock(dir, channel, lock, logger); }
@Test public void test_lockForDirectory_whenAlreadyLocked() { directoryLock = lockForDirectory(directory, logger); assertThatThrownBy(() -> lockForDirectory(directory, logger)).isInstanceOf(HazelcastException.class); }
public void tick() { // The main loop does two primary things: 1) drive the group membership protocol, responding to rebalance events // as they occur, and 2) handle external requests targeted at the leader. All the "real" work of the herder is // performed in this thread, which keeps synchronization straightforward at the cost of some operations possibly // blocking up this thread (especially those in callbacks due to rebalance events). try { // if we failed to read to end of log before, we need to make sure the issue was resolved before joining group // Joining and immediately leaving for failure to read configs is exceedingly impolite if (!canReadConfigs) { if (readConfigToEnd(workerSyncTimeoutMs)) { canReadConfigs = true; } else { return; // Safe to return and tick immediately because readConfigToEnd will do the backoff for us } } log.debug("Ensuring group membership is still active"); String stageDescription = "ensuring membership in the cluster"; member.ensureActive(() -> new TickThreadStage(stageDescription)); completeTickThreadStage(); // Ensure we're in a good state in our group. If not restart and everything should be setup to rejoin if (!handleRebalanceCompleted()) return; } catch (WakeupException e) { // May be due to a request from another thread, or might be stopping. If the latter, we need to check the // flag immediately. If the former, we need to re-run the ensureActive call since we can't handle requests // unless we're in the group. log.trace("Woken up while ensure group membership is still active"); return; } if (fencedFromConfigTopic) { if (isLeader()) { // We were accidentally fenced out, possibly by a zombie leader try { log.debug("Reclaiming write privileges for config topic after being fenced out"); try (TickThreadStage stage = new TickThreadStage("reclaiming write privileges for the config topic")) { configBackingStore.claimWritePrivileges(); } fencedFromConfigTopic = false; log.debug("Successfully reclaimed write privileges for config topic after being fenced out"); } catch (Exception e) { log.warn("Unable to claim write privileges for config topic. Will backoff and possibly retry if still the leader", e); backoff(CONFIG_TOPIC_WRITE_PRIVILEGES_BACKOFF_MS); return; } } else { log.trace("Relinquished write privileges for config topic after being fenced out, since worker is no longer the leader of the cluster"); // We were meant to be fenced out because we fell out of the group and a new leader was elected fencedFromConfigTopic = false; } } long now = time.milliseconds(); if (checkForKeyRotation(now)) { log.debug("Distributing new session key"); keyExpiration = Long.MAX_VALUE; try { SessionKey newSessionKey = new SessionKey(keyGenerator.generateKey(), now); writeToConfigTopicAsLeader( "writing a new session key to the config topic", () -> configBackingStore.putSessionKey(newSessionKey) ); } catch (Exception e) { log.info("Failed to write new session key to config topic; forcing a read to the end of the config topic before possibly retrying", e); canReadConfigs = false; return; } } // Process any external requests // TODO: Some of these can be performed concurrently or even optimized away entirely. // For example, if three different connectors are slated to be restarted, it's fine to // restart all three at the same time instead. // Another example: if multiple configurations are submitted for the same connector, // the only one that actually has to be written to the config topic is the // most-recently one. Long scheduledTick = null; while (true) { final DistributedHerderRequest next = peekWithoutException(); if (next == null) { break; } else if (now >= next.at) { currentRequest = requests.pollFirst(); } else { scheduledTick = next.at; break; } runRequest(next.action(), next.callback()); } // Process all pending connector restart requests processRestartRequests(); if (scheduledRebalance < Long.MAX_VALUE) { scheduledTick = scheduledTick != null ? Math.min(scheduledTick, scheduledRebalance) : scheduledRebalance; rebalanceResolved = false; log.debug("Scheduled rebalance at: {} (now: {} scheduledTick: {}) ", scheduledRebalance, now, scheduledTick); } if (isLeader() && internalRequestValidationEnabled() && keyExpiration < Long.MAX_VALUE) { scheduledTick = scheduledTick != null ? Math.min(scheduledTick, keyExpiration) : keyExpiration; log.debug("Scheduled next key rotation at: {} (now: {} scheduledTick: {}) ", keyExpiration, now, scheduledTick); } // Process any configuration updates AtomicReference<Set<String>> connectorConfigUpdatesCopy = new AtomicReference<>(); AtomicReference<Set<String>> connectorTargetStateChangesCopy = new AtomicReference<>(); AtomicReference<Set<ConnectorTaskId>> taskConfigUpdatesCopy = new AtomicReference<>(); boolean shouldReturn; if (member.currentProtocolVersion() == CONNECT_PROTOCOL_V0) { shouldReturn = updateConfigsWithEager(connectorConfigUpdatesCopy, connectorTargetStateChangesCopy); // With eager protocol we should return immediately if needsReconfigRebalance has // been set to retain the old workflow if (shouldReturn) { return; } if (connectorConfigUpdatesCopy.get() != null) { processConnectorConfigUpdates(connectorConfigUpdatesCopy.get()); } if (connectorTargetStateChangesCopy.get() != null) { processTargetStateChanges(connectorTargetStateChangesCopy.get()); } } else { shouldReturn = updateConfigsWithIncrementalCooperative(connectorConfigUpdatesCopy, connectorTargetStateChangesCopy, taskConfigUpdatesCopy); if (connectorConfigUpdatesCopy.get() != null) { processConnectorConfigUpdates(connectorConfigUpdatesCopy.get()); } if (connectorTargetStateChangesCopy.get() != null) { processTargetStateChanges(connectorTargetStateChangesCopy.get()); } if (taskConfigUpdatesCopy.get() != null) { processTaskConfigUpdatesWithIncrementalCooperative(taskConfigUpdatesCopy.get()); } if (shouldReturn) { return; } } // Let the group take any actions it needs to try { long nextRequestTimeoutMs = scheduledTick != null ? Math.max(scheduledTick - time.milliseconds(), 0L) : Long.MAX_VALUE; log.trace("Polling for group activity; will wait for {}ms or until poll is interrupted by " + "either config backing store updates or a new external request", nextRequestTimeoutMs); String pollDurationDescription = scheduledTick != null ? "for up to " + nextRequestTimeoutMs + "ms or " : ""; String stageDescription = "polling the group coordinator " + pollDurationDescription + "until interrupted"; member.poll(nextRequestTimeoutMs, () -> new TickThreadStage(stageDescription)); completeTickThreadStage(); // Ensure we're in a good state in our group. If not restart and everything should be setup to rejoin handleRebalanceCompleted(); } catch (WakeupException e) { // FIXME should not be WakeupException log.trace("Woken up while polling for group activity"); // Ignore. Just indicates we need to check the exit flag, for requested actions, etc. } }
@Test public void testIncrementalCooperativeRebalanceForExistingMember() { connectProtocolVersion = CONNECT_PROTOCOL_V1; // Join group. First rebalance contains revocations because a new member joined. when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); expectRebalance(singletonList(CONN1), singletonList(TASK1), ConnectProtocol.Assignment.NO_ERROR, 1, Collections.emptyList(), Collections.emptyList(), 0); doNothing().when(member).requestRejoin(); expectMemberPoll(); herder.configState = SNAPSHOT; time.sleep(1000L); assertStatistics(0, 0, 0, Double.POSITIVE_INFINITY); herder.tick(); // In the second rebalance the new member gets its assignment and this member has no // assignments or revocations expectRebalance(1, Collections.emptyList(), Collections.emptyList()); time.sleep(2000L); assertStatistics(3, 1, 100, 2000); herder.tick(); time.sleep(3000L); assertStatistics(3, 2, 100, 3000); verifyNoMoreInteractions(member, statusBackingStore, configBackingStore, worker); }
@Override @Transactional(value="defaultTransactionManager") public OAuth2AccessTokenEntity refreshAccessToken(String refreshTokenValue, TokenRequest authRequest) throws AuthenticationException { if (Strings.isNullOrEmpty(refreshTokenValue)) { // throw an invalid token exception if there's no refresh token value at all throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue); } OAuth2RefreshTokenEntity refreshToken = clearExpiredRefreshToken(tokenRepository.getRefreshTokenByValue(refreshTokenValue)); if (refreshToken == null) { // throw an invalid token exception if we couldn't find the token throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue); } ClientDetailsEntity client = refreshToken.getClient(); AuthenticationHolderEntity authHolder = refreshToken.getAuthenticationHolder(); // make sure that the client requesting the token is the one who owns the refresh token ClientDetailsEntity requestingClient = clientDetailsService.loadClientByClientId(authRequest.getClientId()); if (!client.getClientId().equals(requestingClient.getClientId())) { tokenRepository.removeRefreshToken(refreshToken); throw new InvalidClientException("Client does not own the presented refresh token"); } //Make sure this client allows access token refreshing if (!client.isAllowRefresh()) { throw new InvalidClientException("Client does not allow refreshing access token!"); } // clear out any access tokens if (client.isClearAccessTokensOnRefresh()) { tokenRepository.clearAccessTokensForRefreshToken(refreshToken); } if (refreshToken.isExpired()) { tokenRepository.removeRefreshToken(refreshToken); throw new InvalidTokenException("Expired refresh token: " + refreshTokenValue); } OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity(); // get the stored scopes from the authentication holder's authorization request; these are the scopes associated with the refresh token Set<String> refreshScopesRequested = new HashSet<>(refreshToken.getAuthenticationHolder().getAuthentication().getOAuth2Request().getScope()); Set<SystemScope> refreshScopes = scopeService.fromStrings(refreshScopesRequested); // remove any of the special system scopes refreshScopes = scopeService.removeReservedScopes(refreshScopes); Set<String> scopeRequested = authRequest.getScope() == null ? new HashSet<String>() : new HashSet<>(authRequest.getScope()); Set<SystemScope> scope = scopeService.fromStrings(scopeRequested); // remove any of the special system scopes scope = scopeService.removeReservedScopes(scope); if (scope != null && !scope.isEmpty()) { // ensure a proper subset of scopes if (refreshScopes != null && refreshScopes.containsAll(scope)) { // set the scope of the new access token if requested token.setScope(scopeService.toStrings(scope)); } else { String errorMsg = "Up-scoping is not allowed."; logger.error(errorMsg); throw new InvalidScopeException(errorMsg); } } else { // otherwise inherit the scope of the refresh token (if it's there -- this can return a null scope set) token.setScope(scopeService.toStrings(refreshScopes)); } token.setClient(client); if (client.getAccessTokenValiditySeconds() != null) { Date expiration = new Date(System.currentTimeMillis() + (client.getAccessTokenValiditySeconds() * 1000L)); token.setExpiration(expiration); } if (client.isReuseRefreshToken()) { // if the client re-uses refresh tokens, do that token.setRefreshToken(refreshToken); } else { // otherwise, make a new refresh token OAuth2RefreshTokenEntity newRefresh = createRefreshToken(client, authHolder); token.setRefreshToken(newRefresh); // clean up the old refresh token tokenRepository.removeRefreshToken(refreshToken); } token.setAuthenticationHolder(authHolder); tokenEnhancer.enhance(token, authHolder.getAuthentication()); tokenRepository.saveAccessToken(token); return token; }
@Test public void refreshAccessToken_requestingLessScope() { Set<String> lessScope = newHashSet("openid", "profile"); tokenRequest.setScope(lessScope); OAuth2AccessTokenEntity token = service.refreshAccessToken(refreshTokenValue, tokenRequest); verify(scopeService, atLeastOnce()).removeReservedScopes(anySet()); assertThat(token.getScope(), equalTo(lessScope)); }
public Predicate<InMemoryFilterable> parse(final List<String> filterExpressions, final List<EntityAttribute> attributes) { if (filterExpressions == null || filterExpressions.isEmpty()) { return Predicates.alwaysTrue(); } final Map<String, List<Filter>> groupedByField = filterExpressions.stream() .map(expr -> singleFilterParser.parseSingleExpression(expr, attributes)) .collect(groupingBy(Filter::field)); return groupedByField.values().stream() .map(grouped -> grouped.stream() .map(Filter::toPredicate) .collect(Collectors.toList())) .map(groupedPredicates -> groupedPredicates.stream().reduce(Predicate::or).orElse(Predicates.alwaysTrue())) .reduce(Predicate::and).orElse(Predicates.alwaysTrue()); }
@Test void returnsAlwaysTruePredicateOnNullFilterList() { assertThat(toTest.parse(null, List.of())) .isEqualTo(Predicates.alwaysTrue()); }
static BlockStmt getComplexPartialScoreVariableDeclaration(final String variableName, final ComplexPartialScore complexPartialScore) { final MethodDeclaration methodDeclaration = COMPLEX_PARTIAL_SCORE_TEMPLATE.getMethodsByName(GETKIEPMMLCOMPLEXPARTIALSCORE).get(0).clone(); final BlockStmt complexPartialScoreBody = methodDeclaration.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, methodDeclaration))); final VariableDeclarator variableDeclarator = getVariableDeclarator(complexPartialScoreBody, COMPLEX_PARTIAL_SCORE) .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, COMPLEX_PARTIAL_SCORE, complexPartialScoreBody))); variableDeclarator.setName(variableName); final BlockStmt toReturn = new BlockStmt(); String nestedVariableName = String.format(VARIABLE_NAME_TEMPLATE, variableName, 0); BlockStmt toAdd = getKiePMMLExpressionBlockStmt(nestedVariableName, complexPartialScore.getExpression()); toAdd.getStatements().forEach(toReturn::addStatement); final ObjectCreationExpr objectCreationExpr = variableDeclarator.getInitializer() .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, COMPLEX_PARTIAL_SCORE, toReturn))) .asObjectCreationExpr(); objectCreationExpr.getArguments().set(0, new StringLiteralExpr(variableName)); objectCreationExpr.getArguments().set(2, new NameExpr(nestedVariableName)); complexPartialScoreBody.getStatements().forEach(toReturn::addStatement); return toReturn; }
@Test void getComplexPartialScoreVariableDeclaration() throws IOException { final String variableName = "variableName"; Constant constant = new Constant(); constant.setValue(value1); ComplexPartialScore complexPartialScore = new ComplexPartialScore(); complexPartialScore.setExpression(constant); BlockStmt retrieved = KiePMMLComplexPartialScoreFactory.getComplexPartialScoreVariableDeclaration(variableName, complexPartialScore); String text = getFileContent(TEST_01_SOURCE); Statement expected = JavaParserUtils.parseBlock(String.format(text, constant.getValue(), variableName)); assertThat(retrieved).isEqualTo(expected); List<Class<?>> imports = Arrays.asList(KiePMMLConstant.class, KiePMMLComplexPartialScore.class, Collections.class); commonValidateCompilationWithImports(retrieved, imports); }
@Override public String getTableAlias() { SQLSelectQueryBlock selectQueryBlock = getSelect(); SQLTableSource tableSource = selectQueryBlock.getFrom(); return tableSource.getAlias(); }
@Test public void testGetTableAlias() { //test for no alias String sql = "SELECT * FROM t WITH (UPDLOCK) WHERE id = ?"; SQLStatement ast = getSQLStatement(sql); SqlServerSelectForUpdateRecognizer recognizer = new SqlServerSelectForUpdateRecognizer(sql, ast); Assertions.assertNull(recognizer.getTableAlias()); //test for alias sql = "SELECT * FROM t t1 WITH (UPDLOCK) WHERE id = ?"; ast = getSQLStatement(sql); recognizer = new SqlServerSelectForUpdateRecognizer(sql, ast); Assertions.assertEquals("t1", recognizer.getTableAlias()); }
public static Date parseHttpDate(CharSequence txt) { return parseHttpDate(txt, 0, txt.length()); }
@Test public void testParseWithFunkyTimezone() { assertEquals(DATE, parseHttpDate("Sun Nov 06 08:49:37 1994 -0000")); }
public Namespace findBranch(String appId, String parentClusterName, String namespaceName) { return namespaceService.findChildNamespace(appId, parentClusterName, namespaceName); }
@Test @Sql(scripts = "/sql/namespace-branch-test.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_METHOD) @Sql(scripts = "/sql/clean.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD) public void testFindBranch() { Namespace branch = namespaceBranchService.findBranch(testApp, testCluster, testNamespace); Assert.assertNotNull(branch); Assert.assertEquals(testBranchName, branch.getClusterName()); }
@Override public DataType getDataType() { return DataType.PREDICATE; }
@Test public void requireThatDataTypeIsPredicate() { assertEquals(DataType.PREDICATE, new PredicateFieldValue().getDataType()); }
@Override public void track(String eventName, JSONObject properties) { }
@Test public void track() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.track("TestTrackEvent"); }
@Override public void close() throws IOException { if (mClosed.getAndSet(true)) { LOG.warn("OBSOutputStream is already closed"); return; } mLocalOutputStream.close(); try { BufferedInputStream in = new BufferedInputStream( new FileInputStream(mFile)); ObjectMetadata objMeta = new ObjectMetadata(); objMeta.setContentLength(mFile.length()); if (mHash != null) { byte[] hashBytes = mHash.digest(); objMeta.setContentMd5(new String(Base64.encodeBase64(hashBytes))); } mContentHash = mObsClient.putObject(mBucketName, mKey, in, objMeta).getEtag(); } catch (ObsException e) { LOG.error("Failed to upload {}. Temporary file @ {}", mKey, mFile.getPath()); throw new IOException(e); } finally { // Delete the temporary file on the local machine if the GCS client completed the // upload or if the upload failed. if (!mFile.delete()) { LOG.error("Failed to delete temporary file @ {}", mFile.getPath()); } } }
@Test @PrepareForTest(OBSOutputStream.class) public void testCloseSuccess() throws Exception { PowerMockito.whenNew(File.class).withArguments(Mockito.anyString()).thenReturn(mFile); FileOutputStream outputStream = PowerMockito.mock(FileOutputStream.class); PowerMockito.whenNew(FileOutputStream.class).withArguments(mFile).thenReturn(outputStream); FileInputStream inputStream = PowerMockito.mock(FileInputStream.class); PowerMockito.whenNew(FileInputStream.class).withArguments(mFile).thenReturn(inputStream); OBSOutputStream stream = new OBSOutputStream("testBucketName", "testKey", mObsClient, sConf.getList(PropertyKey.TMP_DIRS)); stream.close(); Mockito.verify(mFile).delete(); }
public void addReplaceFileId(String partitionPath, String fileId) { if (!partitionToReplaceFileIds.containsKey(partitionPath)) { partitionToReplaceFileIds.put(partitionPath, new ArrayList<>()); } partitionToReplaceFileIds.get(partitionPath).add(fileId); }
@Test public void verifyFieldNamesInReplaceCommitMetadata() throws IOException { List<HoodieWriteStat> fakeHoodieWriteStats = HoodieTestUtils.generateFakeHoodieWriteStat(10); HoodieReplaceCommitMetadata commitMetadata = new HoodieReplaceCommitMetadata(); fakeHoodieWriteStats.forEach(stat -> { commitMetadata.addWriteStat(stat.getPartitionPath(), stat); commitMetadata.addReplaceFileId(stat.getPartitionPath(), stat.getFileId()); }); verifyMetadataFieldNames(commitMetadata, EXPECTED_FIELD_NAMES); }
public static Class forName(String className) { return forName(className, true); }
@Test public void forName3() throws Exception { Class clazz = ClassUtils.forName("java.lang.String", Thread.currentThread().getContextClassLoader()); Assert.assertEquals(clazz, String.class); boolean error = false; try { ClassUtils.forName("asdasdasdsad", Thread.currentThread().getContextClassLoader()); } catch (Exception e) { error = true; } Assert.assertTrue(error); }
public <T> void writeTo(T object, OutputStream entityStream) throws IOException { ObjectWriter writer = objectWriterByClass.get(object.getClass()); if (writer == null) { mapper.writeValue(entityStream, object); } else { writer.writeValue(entityStream, object); } }
@Test public void testInstanceInfoJacksonEncodeXStreamDecode() throws Exception { // Encode ByteArrayOutputStream captureStream = new ByteArrayOutputStream(); codec.writeTo(INSTANCE_INFO_1_A1, captureStream); byte[] encoded = captureStream.toByteArray(); // Decode InputStream source = new ByteArrayInputStream(encoded); InstanceInfo decoded = (InstanceInfo) new EntityBodyConverter().read(source, InstanceInfo.class, MediaType.APPLICATION_JSON_TYPE); assertTrue(EurekaEntityComparators.equal(decoded, INSTANCE_INFO_1_A1)); }
public DoubleArrayAsIterable usingTolerance(double tolerance) { return new DoubleArrayAsIterable(tolerance(tolerance), iterableSubject()); }
@Test public void usingTolerance_contains_success() { assertThat(array(1.1, TOLERABLE_2POINT2, 3.3)).usingTolerance(DEFAULT_TOLERANCE).contains(2.2); }
public static Pod createPod( String name, String namespace, Labels labels, OwnerReference ownerReference, PodTemplate template, Map<String, String> defaultPodLabels, Map<String, String> podAnnotations, Affinity affinity, List<Container> initContainers, List<Container> containers, List<Volume> volumes, List<LocalObjectReference> defaultImagePullSecrets, PodSecurityContext podSecurityContext ) { return new PodBuilder() .withNewMetadata() .withName(name) .withLabels(labels.withAdditionalLabels(Util.mergeLabelsOrAnnotations(defaultPodLabels, TemplateUtils.labels(template))).toMap()) .withNamespace(namespace) .withAnnotations(Util.mergeLabelsOrAnnotations(podAnnotations, TemplateUtils.annotations(template))) .withOwnerReferences(ownerReference) .endMetadata() .withNewSpec() .withRestartPolicy("Never") .withServiceAccountName(name) .withEnableServiceLinks(template != null ? template.getEnableServiceLinks() : null) .withAffinity(affinity) .withInitContainers(initContainers) .withContainers(containers) .withVolumes(volumes) .withTolerations(template != null && template.getTolerations() != null ? template.getTolerations() : null) .withTerminationGracePeriodSeconds(template != null ? (long) template.getTerminationGracePeriodSeconds() : 30L) .withImagePullSecrets(imagePullSecrets(template, defaultImagePullSecrets)) .withSecurityContext(podSecurityContext) .withPriorityClassName(template != null ? template.getPriorityClassName() : null) .withSchedulerName(template != null && template.getSchedulerName() != null ? template.getSchedulerName() : "default-scheduler") .withHostAliases(template != null ? template.getHostAliases() : null) .withTopologySpreadConstraints(template != null ? template.getTopologySpreadConstraints() : null) .endSpec() .build(); }
@Test public void testCreatePodWithTemplate() { Pod pod = WorkloadUtils.createPod( NAME, NAMESPACE, LABELS, OWNER_REFERENCE, new PodTemplateBuilder() .withNewMetadata() .withLabels(Map.of("label-3", "value-3", "label-4", "value-4")) .withAnnotations(Map.of("anno-1", "value-1", "anno-2", "value-2")) .endMetadata() .withEnableServiceLinks(false) .withAffinity(new Affinity()) // => should be ignored .withImagePullSecrets(List.of(new LocalObjectReference("some-other-pull-secret"))) .withPriorityClassName("my-priority-class") .withHostAliases(DEFAULT_HOST_ALIAS) .withTolerations(DEFAULT_TOLERATION) .withTerminationGracePeriodSeconds(15) .withSecurityContext(new PodSecurityContextBuilder().withRunAsUser(0L).build()) // => should be ignored .withTopologySpreadConstraints(DEFAULT_TOPOLOGY_SPREAD_CONSTRAINT) .withSchedulerName("my-scheduler") .build(), Map.of("default-label", "default-value"), Map.of("extra", "annotations"), DEFAULT_AFFINITY, List.of(new ContainerBuilder().withName("init-container").build()), List.of(new ContainerBuilder().withName("container").build()), VolumeUtils.createPodSetVolumes(NAME + "-0", DEFAULT_STORAGE, false), List.of(new LocalObjectReference("some-pull-secret")), DEFAULT_POD_SECURITY_CONTEXT ); assertThat(pod.getMetadata().getName(), is(NAME)); assertThat(pod.getMetadata().getNamespace(), is(NAMESPACE)); assertThat(pod.getMetadata().getLabels(), is(LABELS.withAdditionalLabels(Map.of("default-label", "default-value", "label-3", "value-3", "label-4", "value-4")).toMap())); assertThat(pod.getMetadata().getAnnotations(), is(Map.of("extra", "annotations", "anno-1", "value-1", "anno-2", "value-2"))); assertThat(pod.getSpec().getRestartPolicy(), is("Never")); assertThat(pod.getSpec().getServiceAccountName(), is(NAME)); assertThat(pod.getSpec().getEnableServiceLinks(), is(false)); assertThat(pod.getSpec().getAffinity(), is(DEFAULT_AFFINITY)); assertThat(pod.getSpec().getInitContainers().size(), is(1)); assertThat(pod.getSpec().getInitContainers().get(0).getName(), is("init-container")); assertThat(pod.getSpec().getContainers().size(), is(1)); assertThat(pod.getSpec().getContainers().get(0).getName(), is("container")); assertThat(pod.getSpec().getVolumes(), is(VolumeUtils.createPodSetVolumes(NAME + "-0", DEFAULT_STORAGE, false))); assertThat(pod.getSpec().getTolerations(), is(List.of(DEFAULT_TOLERATION))); assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(15L)); assertThat(pod.getSpec().getImagePullSecrets(), is(List.of(new LocalObjectReference("some-other-pull-secret")))); assertThat(pod.getSpec().getSecurityContext(), is(DEFAULT_POD_SECURITY_CONTEXT)); assertThat(pod.getSpec().getPriorityClassName(), is("my-priority-class")); assertThat(pod.getSpec().getSchedulerName(), is("my-scheduler")); assertThat(pod.getSpec().getHostAliases(), is(List.of(DEFAULT_HOST_ALIAS))); assertThat(pod.getSpec().getTopologySpreadConstraints(), is(List.of(DEFAULT_TOPOLOGY_SPREAD_CONSTRAINT))); }
@Override @SuppressWarnings("deprecation") public HttpRoute determineRoute(HttpHost target, HttpContext context) { if ( ! target.getSchemeName().equals("http") && ! target.getSchemeName().equals("https")) throw new IllegalArgumentException("Scheme must be 'http' or 'https' when using HttpToHttpsRoutePlanner, was '" + target.getSchemeName() + "'"); if (HttpClientContext.adapt(context).getRequestConfig().getProxy() != null) throw new IllegalArgumentException("Proxies are not supported with HttpToHttpsRoutePlanner"); int port = DefaultSchemePortResolver.INSTANCE.resolve(target); return new HttpRoute(new HttpHost("https", target.getAddress(), target.getHostName(), port)); }
@Test void verifySchemeIsRewritten() { assertEquals(new HttpRoute(new HttpHost("https", "host", 1)), planner.determineRoute(new HttpHost("http", "host", 1), new HttpClientContext())); }
public long getActualStartTimeMs() { return actualStartTime; }
@Test void initCreate_withStartTime_storesCustomStartTime() throws InterruptedException { long initTime = Instant.now().toEpochMilli(); Thread.sleep(500); AsyncInitializationWrapper init = new AsyncInitializationWrapper(initTime); assertEquals(initTime, init.getActualStartTimeMs()); }
public String toLocaleString() { return toLocaleString(localeUTC.getLanguage(), ZoneId.systemDefault().toString()); }
@Test void testToLocaleString() { TbDate d = new TbDate(1693962245000L); // Depends on time zone, so we just check it works; Assertions.assertNotNull(d.toLocaleString()); Assertions.assertNotNull(d.toLocaleString("en-US")); Assertions.assertEquals("9/5/23, 9:04:05 PM", d.toLocaleString("en-US", "America/New_York")); Assertions.assertEquals("23. 9. 5. 오후 9:04:05", d.toLocaleString("ko-KR", "America/New_York")); Assertions.assertEquals("06.09.23, 04:04:05", d.toLocaleString( "uk-UA", "Europe/Kiev")); String expected_ver = Runtime.version().feature() == 11 ? "5\u200F/9\u200F/2023 9:04:05 م" : "5\u200F/9\u200F/2023, 9:04:05 م"; Assertions.assertEquals(expected_ver, d.toLocaleString( "ar-EG", "America/New_York")); Assertions.assertEquals("Tuesday, September 5, 2023 at 9:04:05 PM Eastern Daylight Time", d.toLocaleString("en-US", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("dateStyle", "full") .put("timeStyle", "full") .toString())); Assertions.assertEquals("2023년 9월 5일 화요일 오후 9시 4분 5초 미 동부 하계 표준시", d.toLocaleString("ko-KR", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("dateStyle", "full") .put("timeStyle", "full") .toString())); Assertions.assertEquals("середа, 6 вересня 2023 р. о 04:04:05 за східноєвропейським літнім часом", d.toLocaleString("uk-UA", JacksonUtil.newObjectNode() .put("timeZone", "Europe/Kiev") .put("dateStyle", "full") .put("timeStyle", "full") .toString())); expected_ver = Runtime.version().feature() == 11 ? "الثلاثاء، 5 سبتمبر 2023 9:04:05 م التوقيت الصيفي الشرقي لأمريكا الشمالية" : "الثلاثاء، 5 سبتمبر 2023 في 9:04:05 م التوقيت الصيفي الشرقي لأمريكا الشمالية"; Assertions.assertEquals(expected_ver, d.toLocaleString("ar-EG", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("dateStyle", "full") .put("timeStyle", "full") .toString())); Assertions.assertEquals("9/5/2023, 9:04:05 PM", d.toLocaleString("en-US", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("pattern", "M/d/yyyy, h:mm:ss a") .toString())); Assertions.assertEquals("9/5/2023, 9:04:05 오후", d.toLocaleString("ko-KR", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("pattern", "M/d/yyyy, h:mm:ss a") .toString())); Assertions.assertEquals("9/6/2023, 4:04:05 дп", d.toLocaleString("uk-UA", JacksonUtil.newObjectNode() .put("timeZone", "Europe/Kiev") .put("pattern", "M/d/yyyy, h:mm:ss a") .toString())); Assertions.assertEquals("9/5/2023, 9:04:05 م", d.toLocaleString("ar-EG", JacksonUtil.newObjectNode() .put("timeZone", "America/New_York") .put("pattern", "M/d/yyyy, h:mm:ss a") .toString())); }
public static Range parse(String value, DateFormat dateFormat) { final int index = value.indexOf(CUSTOM_PERIOD_SEPARATOR); if (index == -1) { try { return Period.valueOfIgnoreCase(value).getRange(); } catch (final IllegalArgumentException e) { return Period.JOUR.getRange(); } } // rq: on pourrait essayer aussi des dateFormat alternatifs, // par exemple même pattern mais sans les slashs ou juste avec jour et mois Date startDate; try { startDate = dateFormat.parse(value.substring(0, index)); } catch (final ParseException e) { startDate = new Date(); } Date endDate; if (index < value.length() - 1) { try { endDate = dateFormat.parse(value.substring(index + 1)); } catch (final ParseException e) { endDate = new Date(); } } else { endDate = new Date(); } return createCustomRange(startDate, endDate); }
@Test public void testParse() { I18N.bindLocale(Locale.FRENCH); try { final DateFormat dateFormat = I18N.createDateFormat(); assertEquals("parse1", periodRange.getPeriod(), Range.parse(periodRange.getValue(), dateFormat).getPeriod()); assertTrue("parse2", isSameDay(customRange.getStartDate(), Range.parse(customRange.getValue(), dateFormat).getStartDate())); assertTrue("parse3", isSameDay(customRange.getEndDate(), Range.parse(customRange.getValue(), dateFormat).getEndDate())); // on teste le résultat en cas d'erreur de format assertNotNull("parse4", Range .parse("xxxxxx" + Range.CUSTOM_PERIOD_SEPARATOR + "01/01/2010", dateFormat)); assertNotNull("parse5", Range .parse("01/01/2010" + Range.CUSTOM_PERIOD_SEPARATOR + "xxxxxx", dateFormat)); assertNotNull("parse6", Range.parse("01/01/2010" + Range.CUSTOM_PERIOD_SEPARATOR, dateFormat)); assertNotNull("parse6b", Range.parse("01/01/2011", dateFormat)); // on teste les bornes min et max final Calendar calendar = Calendar.getInstance(); final int currentYear = calendar.get(Calendar.YEAR); Range range = Range.parse("01/01/2000" + Range.CUSTOM_PERIOD_SEPARATOR + "01/01/2030", dateFormat); calendar.setTime(range.getStartDate()); assertTrue("parse7", calendar.get(Calendar.YEAR) >= currentYear - 2); calendar.setTime(range.getEndDate()); assertTrue("parse7", calendar.get(Calendar.YEAR) <= currentYear); range = Range.parse("01/01/2030" + Range.CUSTOM_PERIOD_SEPARATOR + "01/01/2030", dateFormat); calendar.setTime(range.getStartDate()); assertTrue("parse8", calendar.get(Calendar.YEAR) <= currentYear); } finally { I18N.unbindLocale(); } }
public List<T> findCycle() { resetState(); for (T vertex : graph.getVertices()) { if (colors.get(vertex) == WHITE) { if (visitDepthFirst(vertex, new ArrayList<>(List.of(vertex)))) { if (cycle == null) throw new IllegalStateException("Null cycle - this should never happen"); if (cycle.isEmpty()) throw new IllegalStateException("Empty cycle - this should never happen"); log.log(FINE, () -> "Cycle detected: " + cycle); return cycle; } } } return new ArrayList<>(); }
@Test void findCycle_is_idempotent_with_cycle() { var graph = new Graph<Vertices>(); graph.edge(A, A); var cycleFinder = new CycleFinder<>(graph); assertTrue(cycleFinder.findCycle().containsAll(List.of(A, A))); assertTrue(cycleFinder.findCycle().containsAll(List.of(A, A))); }
Record deserialize(Object data) { return (Record) fieldDeserializer.value(data); }
@Test public void testDeserializeEverySupportedType() { Assume.assumeFalse( "No test yet for Hive3 (Date/Timestamp creation)", HiveVersion.min(HiveVersion.HIVE_3)); Deserializer deserializer = new Deserializer.Builder() .schema(HiveIcebergTestUtils.FULL_SCHEMA) .writerInspector((StructObjectInspector) IcebergObjectInspector.create(HiveIcebergTestUtils.FULL_SCHEMA)) .sourceInspector(HiveIcebergTestUtils.FULL_SCHEMA_OBJECT_INSPECTOR) .build(); Record expected = HiveIcebergTestUtils.getTestRecord(); Record actual = deserializer.deserialize(HiveIcebergTestUtils.valuesForTestRecord(expected)); HiveIcebergTestUtils.assertEquals(expected, actual); }
@ApiOperation(value = "Create Or update Tenant (saveTenant)", notes = "Create or update the Tenant. When creating tenant, platform generates Tenant Id as " + UUID_WIKI_LINK + "Default Rule Chain and Device profile are also generated for the new tenants automatically. " + "The newly created Tenant Id will be present in the response. " + "Specify existing Tenant Id id to update the Tenant. " + "Referencing non-existing Tenant Id will cause 'Not Found' error." + "Remove 'id', 'tenantId' from the request body example (below) to create new Tenant entity." + SYSTEM_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('SYS_ADMIN')") @RequestMapping(value = "/tenant", method = RequestMethod.POST) @ResponseBody public Tenant saveTenant(@Parameter(description = "A JSON value representing the tenant.") @RequestBody Tenant tenant) throws Exception { checkEntity(tenant.getId(), tenant, Resource.TENANT); return tbTenantService.save(tenant); }
@Test public void testIsolatedQueueDeletion() throws Exception { loginSysAdmin(); TenantProfile tenantProfile = new TenantProfile(); tenantProfile.setName("Test profile"); TenantProfileData tenantProfileData = new TenantProfileData(); tenantProfileData.setConfiguration(new DefaultTenantProfileConfiguration()); tenantProfile.setProfileData(tenantProfileData); tenantProfile.setIsolatedTbRuleEngine(true); addQueueConfig(tenantProfile, MAIN_QUEUE_NAME); tenantProfile = doPost("/api/tenantProfile", tenantProfile, TenantProfile.class); createDifferentTenant(); loginSysAdmin(); savedDifferentTenant.setTenantProfileId(tenantProfile.getId()); savedDifferentTenant = saveTenant(savedDifferentTenant); TenantId tenantId = differentTenantId; await().atMost(30, TimeUnit.SECONDS) .until(() -> { TopicPartitionInfo tpi = partitionService.resolve(ServiceType.TB_RULE_ENGINE, MAIN_QUEUE_NAME, tenantId, tenantId); return !tpi.getTenantId().get().isSysTenantId(); }); TopicPartitionInfo tpi = new TopicPartitionInfo(MAIN_QUEUE_TOPIC, tenantId, 0, false); String isolatedTopic = tpi.getFullTopicName(); TbMsg expectedMsg = publishTbMsg(tenantId, tpi); awaitTbMsg(tbMsg -> tbMsg.getId().equals(expectedMsg.getId()), 10000); // to wait for consumer start loginSysAdmin(); tenantProfile.setIsolatedTbRuleEngine(false); tenantProfile.getProfileData().setQueueConfiguration(Collections.emptyList()); tenantProfile = doPost("/api/tenantProfile", tenantProfile, TenantProfile.class); await().atMost(30, TimeUnit.SECONDS) .until(() -> partitionService.resolve(ServiceType.TB_RULE_ENGINE, MAIN_QUEUE_NAME, tenantId, tenantId) .getTenantId().get().isSysTenantId()); List<UUID> submittedMsgs = new ArrayList<>(); long timeLeft = TimeUnit.SECONDS.toMillis(7); // based on topic-deletion-delay int msgs = 100; for (int i = 1; i <= msgs; i++) { TbMsg tbMsg = publishTbMsg(tenantId, tpi); submittedMsgs.add(tbMsg.getId()); Thread.sleep(timeLeft / msgs); } await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> { verify(queueAdmin, times(1)).deleteTopic(eq(isolatedTopic)); }); await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { for (UUID msgId : submittedMsgs) { verify(actorContext).tell(argThat(msg -> { return msg instanceof QueueToRuleEngineMsg && ((QueueToRuleEngineMsg) msg).getMsg().getId().equals(msgId); })); } }); }
boolean needsMigration() { File mappingFile = UserIdMapper.getConfigFile(usersDirectory); if (mappingFile.exists() && mappingFile.isFile()) { LOGGER.finest("User mapping file already exists. No migration needed."); return false; } File[] userDirectories = listUserDirectories(); return userDirectories != null && userDirectories.length > 0; }
@Test public void needsMigrationNoUserConfigFiles() throws IOException { UserIdMigrator migrator = createUserIdMigrator(); assertThat(migrator.needsMigration(), is(false)); }
@Override public CompletableFuture<CreateTopicsResponseData> createTopics( ControllerRequestContext context, CreateTopicsRequestData request, Set<String> describable ) { if (request.topics().isEmpty()) { return CompletableFuture.completedFuture(new CreateTopicsResponseData()); } return appendWriteEvent("createTopics", context.deadlineNs(), () -> replicationControl.createTopics(context, request, describable)); }
@Test public void testConfigResourceExistenceChecker() throws Throwable { try ( LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv.Builder(3). build(); QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv.Builder(logEnv). setControllerBuilderInitializer(controllerBuilder -> controllerBuilder.setConfigSchema(SCHEMA) ). build() ) { QuorumController active = controlEnv.activeController(); registerBrokersAndUnfence(active, 5); active.createTopics(ANONYMOUS_CONTEXT, new CreateTopicsRequestData(). setTopics(new CreatableTopicCollection(Collections.singleton( new CreatableTopic().setName("foo"). setReplicationFactor((short) 3). setNumPartitions(1)).iterator())), Collections.singleton("foo")).get(); ConfigResourceExistenceChecker checker = active.new ConfigResourceExistenceChecker(); // A ConfigResource with type=BROKER and name=(empty string) represents // the default broker resource. It is used to set cluster configs. checker.accept(new ConfigResource(BROKER, "")); // Broker 3 exists, so we can set a configuration for it. checker.accept(new ConfigResource(BROKER, "3")); // Broker 10 does not exist, so this should throw an exception. assertThrows(BrokerIdNotRegisteredException.class, () -> checker.accept(new ConfigResource(BROKER, "10"))); // Topic foo exists, so we can set a configuration for it. checker.accept(new ConfigResource(TOPIC, "foo")); // Topic bar does not exist, so this should throw an exception. assertThrows(UnknownTopicOrPartitionException.class, () -> checker.accept(new ConfigResource(TOPIC, "bar"))); testToImages(logEnv.allRecords()); } }
@Override public T deserialize(final String topic, final byte[] data) { final List<?> values = inner.deserialize(topic, data); if (values == null) { return null; } SerdeUtils.throwOnColumnCountMismatch(numColumns, values.size(), false, topic); return factory.apply(values); }
@Test public void shouldConvertListToRowWhenDeserializing() { // Given: givenInnerDeserializerReturns(ImmutableList.of("world", -10)); // When: final TestListWrapper result = deserializer.deserialize("topicName", SERIALIZED); // Then: assertThat(result.getList(), is(ImmutableList.of("world", -10))); }
public static boolean isObjectArray(final Object obj) { return obj instanceof Object[]; }
@SuppressWarnings("ConstantValue") @Test void isObjectArray() { Assertions.assertTrue(DataTypeUtil.isObjectArray(new Object[]{})); Assertions.assertTrue(DataTypeUtil.isObjectArray(new Object[]{new Object()})); Assertions.assertTrue(DataTypeUtil.isObjectArray(new Object[]{new Object(), new Object()})); Assertions.assertTrue(DataTypeUtil.isObjectArray(new Object[]{null})); Assertions.assertTrue(DataTypeUtil.isObjectArray(new Object[]{null, new Object()})); Assertions.assertTrue(DataTypeUtil.isObjectArray(new String[]{"test"})); Assertions.assertFalse(DataTypeUtil.isObjectArray(null)); Assertions.assertFalse(DataTypeUtil.isObjectArray(new int[]{})); Assertions.assertFalse(DataTypeUtil.isObjectArray(new int[]{1})); Assertions.assertFalse(DataTypeUtil.isObjectArray(new int[]{1, 2})); Assertions.assertFalse(DataTypeUtil.isObjectArray(new Object())); Assertions.assertFalse(DataTypeUtil.isObjectArray(new long[]{1})); Assertions.assertFalse(DataTypeUtil.isObjectArray(new double[]{1})); Assertions.assertFalse(DataTypeUtil.isObjectArray(new byte[]{1})); Assertions.assertFalse(DataTypeUtil.isObjectArray(new char[]{'c'})); Assertions.assertFalse(DataTypeUtil.isObjectArray(new float[]{1})); Assertions.assertFalse(DataTypeUtil.isObjectArray(new short[]{1})); }
public File saveSecret(String filename) throws IOException { return secretConfig().save(filename); }
@Test public void testSaveSecret() throws IOException { ZCert cert = new ZCert("uYax]JF%mz@r%ERApd<h]pkJ/Wn//lG!%mQ>Ob3U", "!LeSNcjV%qv!apmqePOP:}MBWPCHfdY4IkqO=AW0"); cert.setMeta("version", "1"); StringWriter writer = new StringWriter(); cert.saveSecret(writer); String datePattern = "[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}[+-]?[0-9]{4}"; String expected = "# \\*\\* Generated on " + datePattern + " by ZCert \\*\\*\n" + "# ZeroMQ CURVE \\*\\*Secret\\*\\* Certificate\n" + "# DO NOT PROVIDE THIS FILE TO OTHER USERS nor change its permissions.\n\n" + "metadata\n" + " version = \"1\"\n" + "curve\n" + " secret-key = \"!LeSNcjV%qv!apmqePOP:}MBWPCHfdY4IkqO=AW0\"\n" + " public-key = \"uYax]JF%mz@r%ERApd<h]pkJ/Wn//lG!%mQ>Ob3U\"\n"; String result = writer.toString(); assertThat(Pattern.compile(expected).matcher(result).matches(), is(true)); }
public static StatementExecutorResponse execute( final ConfiguredStatement<TerminateQuery> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final TerminateQuery terminateQuery = statement.getStatement(); // do default behaviour for TERMINATE ALL if (!terminateQuery.getQueryId().isPresent()) { return StatementExecutorResponse.notHandled(); } final QueryId queryId = terminateQuery.getQueryId().get(); final RemoteHostExecutor remoteHostExecutor = RemoteHostExecutor.create( statement, sessionProperties, executionContext, serviceContext.getKsqlClient() ); if (executionContext.getPersistentQuery(queryId).isPresent() || statement.getUnMaskedStatementText().equals( TerminateCluster.TERMINATE_CLUSTER_STATEMENT_TEXT)) { // do default behaviour for terminating persistent queries return StatementExecutorResponse.notHandled(); } else { // Check are we running this push query locally, if yes then terminate, otherwise // propagate terminate query to other nodes if (executionContext.getQuery(queryId).isPresent()) { executionContext.getQuery(queryId).get().close(); } else { final boolean wasTerminatedRemotely = remoteHostExecutor.fetchAllRemoteResults().getLeft() .values() .stream() .map(TerminateQueryEntity.class::cast) .map(TerminateQueryEntity::getWasTerminated) .anyMatch(b -> b.equals(true)); if (!wasTerminatedRemotely) { throw new KsqlException(String.format( "Failed to terminate query with query ID: '%s'", queryId)); } } return StatementExecutorResponse.handled(Optional.of( new TerminateQueryEntity(statement.getMaskedStatementText(), queryId.toString(), true) )); } }
@Test public void shouldDefaultToDistributorForTerminateCluster() { // Given: final ConfiguredStatement<?> terminatePersistent = engine.configure("TERMINATE CLUSTER;"); final KsqlEngine engine = mock(KsqlEngine.class); // When: final Optional<KsqlEntity> ksqlEntity = CustomExecutors.TERMINATE_QUERY.execute( terminatePersistent, mock(SessionProperties.class), engine, this.engine.getServiceContext() ).getEntity(); // Then: assertThat(ksqlEntity, is(Optional.empty())); }
@Override public SourceReader<Long, NumberSequenceSplit> createReader(SourceReaderContext readerContext) { return new IteratorSourceReader<>(readerContext); }
@Test void testReaderCheckpoints() throws Exception { final long from = 177; final long mid = 333; final long to = 563; final long elementsPerCycle = (to - from) / 3; final TestingReaderOutput<Long> out = new TestingReaderOutput<>(); SourceReader<Long, NumberSequenceSource.NumberSequenceSplit> reader = createReader(); reader.addSplits( Arrays.asList( new NumberSequenceSource.NumberSequenceSplit("split-1", from, mid), new NumberSequenceSource.NumberSequenceSplit("split-2", mid + 1, to))); long remainingInCycle = elementsPerCycle; while (reader.pollNext(out) != InputStatus.END_OF_INPUT) { if (--remainingInCycle <= 0) { remainingInCycle = elementsPerCycle; // checkpoint List<NumberSequenceSource.NumberSequenceSplit> splits = reader.snapshotState(1L); // re-create and restore reader = createReader(); if (splits.isEmpty()) { reader.notifyNoMoreSplits(); } else { reader.addSplits(splits); } } } final List<Long> result = out.getEmittedRecords(); validateSequence(result, from, to); }
@Override public void init(TbContext ctx, TbNodeConfiguration configuration) throws TbNodeException { this.config = TbNodeUtils.convert(configuration, TbJsonPathNodeConfiguration.class); this.jsonPathValue = config.getJsonPath(); if (!TbJsonPathNodeConfiguration.DEFAULT_JSON_PATH.equals(this.jsonPathValue)) { this.configurationJsonPath = Configuration.builder() .jsonProvider(new JacksonJsonNodeJsonProvider()) .build(); this.jsonPath = JsonPath.compile(config.getJsonPath()); } }
@Test void givenJsonMsg_whenOnMsg_thenVerifyJavaPrimitiveOutput() throws Exception { config.setJsonPath("$.attributes.length()"); nodeConfiguration = new TbNodeConfiguration(JacksonUtil.valueToTree(config)); node.init(ctx, nodeConfiguration); String data = "{\"attributes\":[{\"attribute_1\":10},{\"attribute_2\":20},{\"attribute_3\":30},{\"attribute_4\":40}]}"; VerifyOutputMsg(data, 1, 4); }
public static TopicPartitionMetadata decode(final String encryptedString) { long timestamp = RecordQueue.UNKNOWN; ProcessorMetadata metadata = new ProcessorMetadata(); if (encryptedString.isEmpty()) { return new TopicPartitionMetadata(timestamp, metadata); } try { final ByteBuffer buffer = ByteBuffer.wrap(Base64.getDecoder().decode(encryptedString)); final byte version = buffer.get(); switch (version) { case (byte) 1: timestamp = buffer.getLong(); break; case LATEST_MAGIC_BYTE: timestamp = buffer.getLong(); if (buffer.remaining() > 0) { final byte[] metaBytes = new byte[buffer.remaining()]; buffer.get(metaBytes); metadata = ProcessorMetadata.deserialize(metaBytes); } break; default: LOG.warn( "Unsupported offset metadata version found. Supported version <= {}. Found version {}.", LATEST_MAGIC_BYTE, version); } } catch (final Exception exception) { LOG.warn("Unsupported offset metadata found"); } return new TopicPartitionMetadata(timestamp, metadata); }
@Test public void shouldDecodeEmptyStringVersionTwo() { final TopicPartitionMetadata expected = new TopicPartitionMetadata(RecordQueue.UNKNOWN, new ProcessorMetadata()); final TopicPartitionMetadata topicMeta = TopicPartitionMetadata.decode(""); assertThat(topicMeta, is(expected)); }
@Override public void pushWriteLockedEdge(Inode inode, String childName) { Edge edge = new Edge(inode.getId(), childName); Preconditions.checkState(!endsInInode(), "Cannot push edge write lock to edge %s; lock list %s ends in an inode", edge, this); Preconditions.checkState(endsInWriteLock(), "Cannot push write lock; lock list %s ends in a read lock"); if (endsInMultipleWriteLocks()) { // If the lock before the edge lock is already WRITE, we can just acquire more WRITE locks. lockInode(inode, LockMode.WRITE); lockEdge(inode, childName, LockMode.WRITE); } else { Edge lastEdge = lastEdge(); RWLockResource lastEdgeReadLock = mInodeLockManager.lockEdge(lastEdge, LockMode.READ, mUseTryLock); RWLockResource inodeLock = mInodeLockManager.lockInode(inode, LockMode.READ, mUseTryLock); RWLockResource nextEdgeLock = mInodeLockManager.lockEdge(edge, LockMode.WRITE, mUseTryLock); removeLastLock(); // Remove edge write lock addEdgeLock(lastEdge, LockMode.READ, lastEdgeReadLock); addInodeLock(inode, LockMode.READ, inodeLock); addEdgeLock(edge, LockMode.WRITE, nextEdgeLock); } }
@Test public void pushWriteLockedEdge() { mLockList.lockRootEdge(LockMode.WRITE); assertEquals(LockMode.WRITE, mLockList.getLockMode()); assertTrue(mLockList.getLockedInodes().isEmpty()); mLockList.pushWriteLockedEdge(mRootDir, mDirA.getName()); assertEquals(LockMode.WRITE, mLockList.getLockMode()); assertEquals(Arrays.asList(mRootDir), mLockList.getLockedInodes()); mLockList.pushWriteLockedEdge(mDirA, mDirB.getName()); assertEquals(LockMode.WRITE, mLockList.getLockMode()); assertEquals(Arrays.asList(mRootDir, mDirA), mLockList.getLockedInodes()); checkOnlyNodesReadLocked(mRootDir, mDirA); checkOnlyNodesWriteLocked(); checkOnlyIncomingEdgesReadLocked(mRootDir, mDirA); checkOnlyIncomingEdgesWriteLocked(mDirB); }
public static String parseToString(Map<String, String> attributes) { if (attributes == null || attributes.size() == 0) { return ""; } List<String> kvs = new ArrayList<>(); for (Map.Entry<String, String> entry : attributes.entrySet()) { String value = entry.getValue(); if (Strings.isNullOrEmpty(value)) { kvs.add(entry.getKey()); } else { kvs.add(entry.getKey() + ATTR_KEY_VALUE_EQUAL_SIGN + entry.getValue()); } } return String.join(ATTR_ARRAY_SEPARATOR_COMMA, kvs); }
@Test public void testParseToString() { Assert.assertEquals("", AttributeParser.parseToString(null)); Assert.assertEquals("", AttributeParser.parseToString(newHashMap())); HashMap<String, String> map = new HashMap<>(); int addSize = 10; for (int i = 0; i < addSize; i++) { map.put("+add.key" + i, "value" + i); } int deleteSize = 10; for (int i = 0; i < deleteSize; i++) { map.put("-delete.key" + i, ""); } Assert.assertEquals(addSize + deleteSize, AttributeParser.parseToString(map).split(",").length); }
String getBuildVersion() { return getManifestAttribute("Implementation-Version", "<version_unknown>"); }
@Test public void testRequestedManifestIsLocatedAndLoaded() throws Exception { DiscoveryBuildInfo buildInfo = new DiscoveryBuildInfo(ObjectMapper.class); assertThat(buildInfo.getBuildVersion().contains("version_unknown"), is(false)); }
public static List<CredentialRetriever> getFromCredentialRetrievers( CommonCliOptions commonCliOptions, DefaultCredentialRetrievers defaultCredentialRetrievers) throws FileNotFoundException { // these are all mutually exclusive as enforced by the CLI commonCliOptions .getUsernamePassword() .ifPresent( credential -> defaultCredentialRetrievers.setKnownCredential( credential, "--username/--password")); commonCliOptions .getFromUsernamePassword() .ifPresent( credential -> defaultCredentialRetrievers.setKnownCredential( credential, "--from-username/--from-password")); commonCliOptions .getCredentialHelper() .ifPresent(defaultCredentialRetrievers::setCredentialHelper); commonCliOptions .getFromCredentialHelper() .ifPresent(defaultCredentialRetrievers::setCredentialHelper); return defaultCredentialRetrievers.asList(); }
@Test @Parameters(method = "paramsFromNone") public void testGetFromCredentialRetriever_none(String[] args) throws FileNotFoundException { CommonCliOptions commonCliOptions = CommandLine.populateCommand(new CommonCliOptions(), ArrayUtils.addAll(DEFAULT_ARGS, args)); Credentials.getFromCredentialRetrievers(commonCliOptions, defaultCredentialRetrievers); verify(defaultCredentialRetrievers).asList(); verifyNoMoreInteractions(defaultCredentialRetrievers); }
@Override public void finishSink(String dbName, String tableName, List<TSinkCommitInfo> commitInfos, String branch) { if (commitInfos.isEmpty()) { LOG.warn("No commit info on {}.{} after hive sink", dbName, tableName); return; } HiveTable table = (HiveTable) getTable(dbName, tableName); String stagingDir = commitInfos.get(0).getStaging_dir(); boolean isOverwrite = commitInfos.get(0).isIs_overwrite(); List<PartitionUpdate> partitionUpdates = commitInfos.stream() .map(TSinkCommitInfo::getHive_file_info) .map(fileInfo -> PartitionUpdate.get(fileInfo, stagingDir, table.getTableLocation())) .collect(Collectors.collectingAndThen(Collectors.toList(), PartitionUpdate::merge)); List<String> partitionColNames = table.getPartitionColumnNames(); for (PartitionUpdate partitionUpdate : partitionUpdates) { PartitionUpdate.UpdateMode mode; if (table.isUnPartitioned()) { mode = isOverwrite ? UpdateMode.OVERWRITE : UpdateMode.APPEND; partitionUpdate.setUpdateMode(mode); break; } else { List<String> partitionValues = toPartitionValues(partitionUpdate.getName()); Preconditions.checkState(partitionColNames.size() == partitionValues.size(), "Partition columns names size doesn't equal partition values size. %s vs %s", partitionColNames.size(), partitionValues.size()); if (hmsOps.partitionExists(table, partitionValues)) { mode = isOverwrite ? UpdateMode.OVERWRITE : UpdateMode.APPEND; } else { mode = PartitionUpdate.UpdateMode.NEW; } partitionUpdate.setUpdateMode(mode); } } HiveCommitter committer = new HiveCommitter( hmsOps, fileOps, updateExecutor, refreshOthersFeExecutor, table, new Path(stagingDir)); try (Timer ignored = Tracers.watchScope(EXTERNAL, "HIVE.SINK.commit")) { committer.commit(partitionUpdates); } }
@Test public void testAppendTable() throws Exception { String stagingDir = "hdfs://127.0.0.1:10000/tmp/starrocks/queryid"; THiveFileInfo fileInfo = new THiveFileInfo(); fileInfo.setFile_name("myfile.parquet"); fileInfo.setPartition_path("hdfs://127.0.0.1:10000/tmp/starrocks/queryid/"); fileInfo.setRecord_count(10); fileInfo.setFile_size_in_bytes(100); TSinkCommitInfo tSinkCommitInfo = new TSinkCommitInfo(); tSinkCommitInfo.setStaging_dir(stagingDir); tSinkCommitInfo.setIs_overwrite(false); tSinkCommitInfo.setHive_file_info(fileInfo); new MockUp<RemoteFileOperations>() { @Mock public void asyncRenameFiles( List<CompletableFuture<?>> renameFileFutures, AtomicBoolean cancelled, Path writePath, Path targetPath, List<String> fileNames) { } }; AnalyzeTestUtil.init(); hiveMetadata.finishSink("hive_db", "unpartitioned_table", Lists.newArrayList(tSinkCommitInfo), null); }
@Override public ProtocolConfig build() { ProtocolConfig protocolConfig = new ProtocolConfig(); super.build(protocolConfig); protocolConfig.setAccepts(accepts); protocolConfig.setAccesslog(accesslog); protocolConfig.setBuffer(buffer); protocolConfig.setCharset(charset); protocolConfig.setClient(client); protocolConfig.setCodec(codec); protocolConfig.setContextpath(contextpath); protocolConfig.setCorethreads(corethreads); protocolConfig.setDefault(isDefault); protocolConfig.setDispatcher(dispatcher); protocolConfig.setExchanger(exchanger); protocolConfig.setExtension(extension); protocolConfig.setHeartbeat(heartbeat); protocolConfig.setHost(host); protocolConfig.setIothreads(iothreads); protocolConfig.setKeepAlive(keepAlive); protocolConfig.setName(name); protocolConfig.setNetworker(networker); protocolConfig.setOptimizer(optimizer); protocolConfig.setParameters(parameters); protocolConfig.setPayload(payload); protocolConfig.setPort(port); protocolConfig.setPrompt(prompt); protocolConfig.setQueues(queues); protocolConfig.setRegister(register); protocolConfig.setSerialization(serialization); protocolConfig.setServer(server); protocolConfig.setStatus(status); protocolConfig.setTelnet(telnet); protocolConfig.setThreadpool(threadpool); protocolConfig.setThreads(threads); protocolConfig.setTransporter(transporter); protocolConfig.setSslEnabled(sslEnabled); protocolConfig.setExtProtocol(extProtocol); return protocolConfig; }
@Test void build() { ProtocolBuilder builder = new ProtocolBuilder(); builder.name("name") .host("host") .port(8080) .contextpath("contextpath") .threadpool("mockthreadpool") .corethreads(1) .threads(2) .iothreads(3) .queues(4) .accepts(5) .codec("mockcodec") .serialization("serialization") .charset("utf-8") .payload(6) .buffer(1024) .heartbeat(1000) .accesslog("accesslog") .transporter("mocktransporter") .exchanger("mockexchanger") .dispatcher("mockdispatcher") .networker("networker") .server("server") .client("client") .telnet("mocktelnethandler") .prompt("prompt") .status("mockstatuschecker") .register(true) .keepAlive(false) .optimizer("optimizer") .extension("extension") .isDefault(true) .appendParameter("default.num", "one") .id("id"); ProtocolConfig config = builder.build(); ProtocolConfig config2 = builder.build(); Assertions.assertEquals(8080, config.getPort()); Assertions.assertEquals(1, config.getCorethreads()); Assertions.assertEquals(2, config.getThreads()); Assertions.assertEquals(3, config.getIothreads()); Assertions.assertEquals(4, config.getQueues()); Assertions.assertEquals(5, config.getAccepts()); Assertions.assertEquals(6, config.getPayload()); Assertions.assertEquals(1024, config.getBuffer()); Assertions.assertEquals(1000, config.getHeartbeat()); Assertions.assertEquals("name", config.getName()); Assertions.assertEquals("host", config.getHost()); Assertions.assertEquals("contextpath", config.getContextpath()); Assertions.assertEquals("mockthreadpool", config.getThreadpool()); Assertions.assertEquals("mockcodec", config.getCodec()); Assertions.assertEquals("serialization", config.getSerialization()); Assertions.assertEquals("utf-8", config.getCharset()); Assertions.assertEquals("accesslog", config.getAccesslog()); Assertions.assertEquals("mocktransporter", config.getTransporter()); Assertions.assertEquals("mockexchanger", config.getExchanger()); Assertions.assertEquals("mockdispatcher", config.getDispatcher()); Assertions.assertEquals("networker", config.getNetworker()); Assertions.assertEquals("server", config.getServer()); Assertions.assertEquals("client", config.getClient()); Assertions.assertEquals("mocktelnethandler", config.getTelnet()); Assertions.assertEquals("prompt", config.getPrompt()); Assertions.assertEquals("mockstatuschecker", config.getStatus()); Assertions.assertEquals("optimizer", config.getOptimizer()); Assertions.assertEquals("extension", config.getExtension()); Assertions.assertTrue(config.isRegister()); Assertions.assertFalse(config.getKeepAlive()); Assertions.assertTrue(config.isDefault()); Assertions.assertTrue(config.getParameters().containsKey("default.num")); Assertions.assertEquals("one", config.getParameters().get("default.num")); Assertions.assertEquals("id", config.getId()); Assertions.assertNotSame(config, config2); }
@Override public void write(InputT element, Context context) throws IOException, InterruptedException { while (bufferedRequestEntries.size() >= maxBufferedRequests) { flush(); } addEntryToBuffer(elementConverter.apply(element, context), false); nonBlockingFlush(); }
@Test public void testThatUnwrittenRecordsInBufferArePersistedWhenSnapshotIsTaken() throws IOException, InterruptedException { AsyncSinkWriterImpl sink = new AsyncSinkWriterImplBuilder().context(sinkInitContext).build(); for (int i = 0; i < 23; i++) { sink.write(String.valueOf(i)); } assertThat(res.size()).isEqualTo(20); assertThatBufferStatesAreEqual(sink.wrapRequests(20, 21, 22), getWriterState(sink)); }
@Override public boolean addTopicConfig(final String topicName, final Map<String, ?> overrides) { final ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, topicName); final Map<String, String> stringConfigs = toStringConfigs(overrides); try { final Map<String, String> existingConfig = topicConfig(topicName, false); final boolean changed = stringConfigs.entrySet().stream() .anyMatch(e -> !Objects.equals(existingConfig.get(e.getKey()), e.getValue())); if (!changed) { return false; } final Set<AlterConfigOp> entries = stringConfigs.entrySet().stream() .map(e -> new ConfigEntry(e.getKey(), e.getValue())) .map(ce -> new AlterConfigOp(ce, AlterConfigOp.OpType.SET)) .collect(Collectors.toSet()); final Map<ConfigResource, Collection<AlterConfigOp>> request = Collections.singletonMap(resource, entries); ExecutorUtil.executeWithRetries( () -> adminClient.get().incrementalAlterConfigs(request).all().get(), ExecutorUtil.RetryBehaviour.ON_RETRYABLE); return true; } catch (final UnsupportedVersionException e) { return addTopicConfigLegacy(topicName, stringConfigs); } catch (final Exception e) { throw new KafkaResponseGetFailedException( "Failed to set config for Kafka Topic " + topicName, e); } }
@Test public void shouldSetNonStringTopicConfig() { // Given: givenTopicConfigs( "peter", overriddenConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "12345"), defaultConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy") ); final Map<String, ?> configOverrides = ImmutableMap.of( TopicConfig.RETENTION_MS_CONFIG, 54321L ); // When: final boolean changed = kafkaTopicClient.addTopicConfig("peter", configOverrides); // Then: assertThat("should return true", changed); verify(adminClient).incrementalAlterConfigs(ImmutableMap.of( topicResource("peter"), ImmutableSet.of( setConfig(TopicConfig.RETENTION_MS_CONFIG, "54321") ) )); }
public static Db use() { return use(DSFactory.get()); }
@Test @Disabled public void queryFetchTest() throws SQLException { // https://gitee.com/dromara/hutool/issues/I4JXWN Db.use().query((conn->{ PreparedStatement ps = conn.prepareStatement("select * from table", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); ps.setFetchSize(Integer.MIN_VALUE); ps.setFetchDirection(ResultSet.FETCH_FORWARD); return ps; }), EntityListHandler.create()); }
@Override public void execute(Runnable command) { taskExecutor.execute(new RunnableWrapper<>(command, contextGetter, contextSetter)); }
@Test public void testExecute() { TEST_THREAD_LOCAL.set(TEST); ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.initialize(); AtomicReference<Boolean> result = new AtomicReference<>(false); CountDownLatch latch = new CountDownLatch(1); TaskExecutorWrapper<String> taskExecutorWrapper = new TaskExecutorWrapper<>( executor, TEST_THREAD_LOCAL::get, TEST_THREAD_LOCAL::set); taskExecutorWrapper.execute(() -> { result.set(TEST.equals(TEST_THREAD_LOCAL.get())); latch.countDown(); }); try { latch.await(); assertThat(result.get()).isTrue(); } catch (InterruptedException e) { fail(e.getMessage()); } }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); final WindowKeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = WindowKeyQuery.withKeyAndWindowStartRange(key, lower, upper); StateQueryRequest<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> request = inStore(stateStore.getStateStoreName()).withQuery(query); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final KafkaStreams streams = stateStore.getKafkaStreams(); final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result = streams.query(request); final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult = result.getPartitionResults().get(partition); if (queryResult.isFailure()) { throw failedQueryException(queryResult); } if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = queryResult.getResult()) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIteratorWithPosition( builder.build().iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test @SuppressWarnings("unchecked") public void shouldReturnValuesForOpenEndBounds_fetchAll() { // Given: final Range<Instant> end = Range.open( NOW, NOW.plusSeconds(10) ); final Range<Instant> startEquiv = Range.open( end.lowerEndpoint().minus(WINDOW_SIZE), end.upperEndpoint().minus(WINDOW_SIZE) ); final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> partitionResult = new StateQueryResult<>(); final QueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> queryResult = QueryResult.forResult(keyValueIterator); queryResult.setPosition(POSITION); partitionResult.addResult(PARTITION, queryResult); when(kafkaStreams.query(any(StateQueryRequest.class))).thenReturn(partitionResult); when(keyValueIterator.hasNext()) .thenReturn(true, true, true, false); when(keyValueIterator.next()) .thenReturn(new KeyValue<>(new Windowed<>(A_KEY, new TimeWindow(startEquiv.lowerEndpoint().toEpochMilli(), startEquiv.lowerEndpoint().toEpochMilli() + WINDOW_SIZE.toMillis())), VALUE_1)) .thenReturn(new KeyValue<>(new Windowed<>(A_KEY2, new TimeWindow(startEquiv.lowerEndpoint().plusMillis(1).toEpochMilli(), startEquiv.lowerEndpoint().toEpochMilli() + WINDOW_SIZE.toMillis() + 1)), VALUE_2)) .thenReturn(new KeyValue<>(new Windowed<>(A_KEY3, new TimeWindow(startEquiv.upperEndpoint().toEpochMilli(), startEquiv.upperEndpoint().toEpochMilli() + WINDOW_SIZE.toMillis())), VALUE_3)) .thenThrow(new AssertionError()); // When: final KsMaterializedQueryResult<WindowedRow> result = table.get(PARTITION, Range.all(), end); // Then: final Iterator<WindowedRow> rowIterator = result.getRowIterator(); assertThat(rowIterator.hasNext(), is(true)); assertThat(rowIterator.next(), is (WindowedRow.of( SCHEMA, windowedKey(A_KEY2, startEquiv.lowerEndpoint().plusMillis(1)), VALUE_2.value(), VALUE_2.timestamp()))); assertThat(rowIterator.hasNext(), is(false)); assertThat(result.getPosition(), not(Optional.empty())); assertThat(result.getPosition().get(), is(POSITION)); }
int parseAndConvert(String[] args) throws Exception { Options opts = createOptions(); int retVal = 0; try { if (args.length == 0) { LOG.info("Missing command line arguments"); printHelp(opts); return 0; } CommandLine cliParser = new GnuParser().parse(opts, args); if (cliParser.hasOption(CliOption.HELP.shortSwitch)) { printHelp(opts); return 0; } FSConfigToCSConfigConverter converter = prepareAndGetConverter(cliParser); converter.convert(converterParams); String outputDir = converterParams.getOutputDirectory(); boolean skipVerification = cliParser.hasOption(CliOption.SKIP_VERIFICATION.shortSwitch); if (outputDir != null && !skipVerification) { validator.validateConvertedConfig( converterParams.getOutputDirectory()); } } catch (ParseException e) { String msg = "Options parsing failed: " + e.getMessage(); logAndStdErr(e, msg); printHelp(opts); retVal = -1; } catch (PreconditionException e) { String msg = "Cannot start FS config conversion due to the following" + " precondition error: " + e.getMessage(); handleException(e, msg); retVal = -1; } catch (UnsupportedPropertyException e) { String msg = "Unsupported property/setting encountered during FS config " + "conversion: " + e.getMessage(); handleException(e, msg); retVal = -1; } catch (ConversionException | IllegalArgumentException e) { String msg = "Fatal error during FS config conversion: " + e.getMessage(); handleException(e, msg); retVal = -1; } catch (VerificationException e) { Throwable cause = e.getCause(); String msg = "Verification failed: " + e.getCause().getMessage(); conversionOptions.handleVerificationFailure(cause, msg); retVal = -1; } conversionOptions.handleParsingFinished(); return retVal; }
@Test public void testMissingYarnSiteXmlArgument() throws Exception { setupFSConfigConversionFiles(true); FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler(); String[] args = new String[] {"-o", FSConfigConverterTestCommons.OUTPUT_DIR}; int retVal = argumentHandler.parseAndConvert(args); assertEquals("Return value", -1, retVal); assertTrue("Error content missing", fsTestCommons.getErrContent() .toString().contains("Missing yarn-site.xml parameter")); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { if(0L == status.getLength()) { return new NullInputStream(0L); } final Storage.Objects.Get request = session.getClient().objects().get( containerService.getContainer(file).getName(), containerService.getKey(file)); if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) { request.setUserProject(session.getHost().getCredentials().getUsername()); } final VersioningConfiguration versioning = null != session.getFeature(Versioning.class) ? session.getFeature(Versioning.class).getConfiguration( containerService.getContainer(file) ) : VersioningConfiguration.empty(); if(versioning.isEnabled()) { if(StringUtils.isNotBlank(file.attributes().getVersionId())) { request.setGeneration(Long.parseLong(file.attributes().getVersionId())); } } if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } final HttpHeaders headers = request.getRequestHeaders(); headers.setRange(header); // Disable compression headers.setAcceptEncoding("identity"); } return request.executeMediaAsInputStream(); } catch(IOException e) { throw new GoogleStorageExceptionMappingService().map("Download {0} failed", e, file); } }
@Test(expected = NotfoundException.class) public void testReadNotFound() throws Exception { final TransferStatus status = new TransferStatus(); final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume)); new GoogleStorageReadFeature(session).read(new Path(container, "nosuchname", EnumSet.of(Path.Type.file)), status.withLength(2L), new DisabledConnectionCallback()); }
public byte[] getDir_uuid() { return dir_uuid; }
@Test public void getDir_uuid() { assertNotNull(chmItsfHeader.getDir_uuid()); }
private Schema getSchema() { try { final String schemaString = getProperties().getProperty(SCHEMA); if (schemaString == null) { throw new ParquetEncodingException("Can not store relation in Parquet as the schema is unknown"); } return Utils.getSchemaFromString(schemaString); } catch (ParserException e) { throw new ParquetEncodingException("can not get schema from context", e); } }
@Test public void testComplexSchema() throws ExecException, Exception { String out = "target/out"; PigServer pigServer = new PigServer(ExecType.LOCAL); Data data = Storage.resetData(pigServer); Collection<Tuple> list = new ArrayList<Tuple>(); for (int i = 0; i < 1000; i++) { list.add(tuple("a" + i, bag(tuple("o", "b")))); } for (int i = 10; i < 2000; i++) { list.add(tuple("a" + i, bag(tuple("o", "b"), tuple("o", "b"), tuple("o", "b"), tuple("o", "b")))); } for (int i = 20; i < 3000; i++) { list.add(tuple("a" + i, bag(tuple("o", "b"), tuple("o", null), tuple(null, "b"), tuple(null, null)))); } for (int i = 30; i < 4000; i++) { list.add(tuple("a" + i, null)); } Collections.shuffle((List<?>) list); data.set("in", "a:chararray, b:{t:(c:chararray, d:chararray)}", list); pigServer.setBatchOn(); pigServer.registerQuery("A = LOAD 'in' USING mock.Storage();"); pigServer.deleteFile(out); pigServer.registerQuery("Store A into '" + out + "' using " + ParquetStorer.class.getName() + "();"); if (pigServer.executeBatch().get(0).getStatus() != JOB_STATUS.COMPLETED) { throw new RuntimeException( "Job failed", pigServer.executeBatch().get(0).getException()); } { pigServer.registerQuery("B = LOAD '" + out + "' USING " + ParquetLoader.class.getName() + "();"); pigServer.registerQuery("Store B into 'out' using mock.Storage();"); if (pigServer.executeBatch().get(0).getStatus() != JOB_STATUS.COMPLETED) { throw new RuntimeException( "Job failed", pigServer.executeBatch().get(0).getException()); } List<Tuple> result = data.get("out"); assertEquals(list, result); final Schema schema = data.getSchema("out"); assertEquals( "{a:chararray, b:{t:(c:chararray, d:chararray)}}".replaceAll(" ", ""), schema.toString().replaceAll(" ", "")); } { pigServer.registerQuery( "C = LOAD '" + out + "' USING " + ParquetLoader.class.getName() + "('a:chararray');"); pigServer.registerQuery("Store C into 'out2' using mock.Storage();"); if (pigServer.executeBatch().get(0).getStatus() != JOB_STATUS.COMPLETED) { throw new RuntimeException( "Job failed", pigServer.executeBatch().get(0).getException()); } final Function<Tuple, Object> grabFirstColumn = new Function<Tuple, Object>() { @Override public Object apply(Tuple input) { try { return input.get(0); } catch (ExecException e) { throw new RuntimeException(e); } } }; List<Tuple> result2 = data.get("out2"); // Functional programming!! Object[] result2int = Collections2.transform(result2, grabFirstColumn).toArray(); Object[] input2int = Collections2.transform(list, grabFirstColumn).toArray(); assertArrayEquals(input2int, result2int); } }