focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public long availablePermits() { return get(availablePermitsAsync()); }
@Test public void testAvailablePermits() throws InterruptedException { RRateLimiter rt = redisson.getRateLimiter("rt2"); rt.trySetRate(RateType.OVERALL, 10, 5, RateIntervalUnit.SECONDS); assertThat(rt.availablePermits()).isEqualTo(10); rt.acquire(1); Thread.sleep(6000); assertThat(rt.availablePermits()).isEqualTo(10); }
<K, V> List<ConsumerRecord<K, V>> fetchRecords(FetchConfig fetchConfig, Deserializers<K, V> deserializers, int maxRecords) { // Error when fetching the next record before deserialization. if (corruptLastRecord) throw new KafkaException("Received exception when fetching the next record from " + partition + ". If needed, please seek past the record to " + "continue consumption.", cachedRecordException); if (isConsumed) return Collections.emptyList(); List<ConsumerRecord<K, V>> records = new ArrayList<>(); try { for (int i = 0; i < maxRecords; i++) { // Only move to next record if there was no exception in the last fetch. Otherwise, we should // use the last record to do deserialization again. if (cachedRecordException == null) { corruptLastRecord = true; lastRecord = nextFetchedRecord(fetchConfig); corruptLastRecord = false; } if (lastRecord == null) break; Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch()); TimestampType timestampType = currentBatch.timestampType(); ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch, timestampType, lastRecord); records.add(record); recordsRead++; bytesRead += lastRecord.sizeInBytes(); nextFetchOffset = lastRecord.offset() + 1; // In some cases, the deserialization may have thrown an exception and the retry may succeed, // we allow user to move forward in this case. cachedRecordException = null; } } catch (SerializationException se) { cachedRecordException = se; if (records.isEmpty()) throw se; } catch (KafkaException e) { cachedRecordException = e; if (records.isEmpty()) throw new KafkaException("Received exception when fetching the next record from " + partition + ". If needed, please seek past the record to " + "continue consumption.", e); } return records; }
@Test public void testCommittedTransactionRecordsIncluded() { int numRecords = 10; Records rawRecords = newTranscactionalRecords(ControlRecordType.COMMIT, numRecords); FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData() .setRecords(rawRecords); CompletedFetch completedFetch = newCompletedFetch(0, partitionData); try (final Deserializers<String, String> deserializers = newStringDeserializers()) { FetchConfig fetchConfig = newFetchConfig(IsolationLevel.READ_COMMITTED, true); List<ConsumerRecord<String, String>> records = completedFetch.fetchRecords(fetchConfig, deserializers, 10); assertEquals(10, records.size()); } }
@Override public URL getApiRoute(String apiRouteBase) throws MalformedURLException { return new URL(apiRouteBase); }
@Test public void testGetApiRoute() throws MalformedURLException { Assert.assertEquals( new URL("http://someApiBase/"), testAuthenticationMethodRetriever.getApiRoute("http://someApiBase/")); }
@Operation(summary = "queryDataSource", description = "QUERY_DATA_SOURCE_NOTES") @Parameters({ @Parameter(name = "id", description = "DATA_SOURCE_ID", required = true, schema = @Schema(implementation = int.class, example = "100")) }) @GetMapping(value = "/{id}") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_DATASOURCE_ERROR) public Result<Object> queryDataSource(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable("id") int id) { BaseDataSourceParamDTO dataSource = dataSourceService.queryDataSource(id, loginUser); return Result.success(dataSource); }
@Disabled("unknown you datasources id") @Test public void testQueryDataSource() throws Exception { MvcResult mvcResult = mockMvc.perform(get("/datasources/2") .header("sessionId", sessionId)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); }
public static Optional<TableMetaData> load(final DataSource dataSource, final String tableNamePattern, final DatabaseType databaseType) throws SQLException { DialectDatabaseMetaData dialectDatabaseMetaData = new DatabaseTypeRegistry(databaseType).getDialectDatabaseMetaData(); try (MetaDataLoaderConnection connection = new MetaDataLoaderConnection(databaseType, dataSource.getConnection())) { String formattedTableNamePattern = dialectDatabaseMetaData.formatTableNamePattern(tableNamePattern); return isTableExist(connection, formattedTableNamePattern) ? Optional.of(new TableMetaData(tableNamePattern, ColumnMetaDataLoader.load( connection, formattedTableNamePattern, databaseType), IndexMetaDataLoader.load(connection, formattedTableNamePattern), Collections.emptyList())) : Optional.empty(); } }
@Test void assertLoadWithExistedTable() throws SQLException { Map<String, SchemaMetaData> actual = MetaDataLoader.load(Collections.singleton(new MetaDataLoaderMaterial(Collections.singleton(TEST_TABLE), dataSource, databaseType, "sharding_db"))); TableMetaData tableMetaData = actual.get("sharding_db").getTables().iterator().next(); Collection<ColumnMetaData> columns = tableMetaData.getColumns(); assertThat(columns.size(), is(2)); Iterator<ColumnMetaData> columnsIterator = columns.iterator(); assertColumnMetaData(columnsIterator.next(), "pk_col", Types.INTEGER, true, true); assertColumnMetaData(columnsIterator.next(), "col", Types.VARCHAR, false, false); Collection<IndexMetaData> indexes = tableMetaData.getIndexes(); assertThat(indexes.size(), is(1)); Iterator<IndexMetaData> indexesIterator = indexes.iterator(); assertThat(indexesIterator.next().getName(), is("my_index")); }
public MonetaryFormat decimalMark(char decimalMark) { checkArgument(!Character.isDigit(decimalMark), () -> "decimalMark can't be digit: " + decimalMark); checkArgument(decimalMark > 0, () -> "decimalMark must be positive: " + decimalMark); if (decimalMark == this.decimalMark) return this; else return new MonetaryFormat(negativeSign, positiveSign, zeroDigit, decimalMark, minDecimals, decimalGroups, shift, roundingMode, codes, codeSeparator, codePrefixed); }
@Test public void testDecimalMark() { assertEquals("1.00", NO_CODE.format(Coin.COIN).toString()); assertEquals("1,00", NO_CODE.decimalMark(',').format(Coin.COIN).toString()); }
public static IpPrefix valueOf(int address, int prefixLength) { return new IpPrefix(IpAddress.valueOf(address), prefixLength); }
@Test(expected = IllegalArgumentException.class) public void testInvalidValueOfStringNegativePrefixLengthIPv4() { IpPrefix ipPrefix; ipPrefix = IpPrefix.valueOf("1.2.3.4/-1"); }
public void convert(FSConfigToCSConfigConverterParams params) throws Exception { validateParams(params); this.clusterResource = getClusterResource(params); this.convertPlacementRules = params.isConvertPlacementRules(); this.outputDirectory = params.getOutputDirectory(); this.rulesToFile = params.isPlacementRulesToFile(); this.usePercentages = params.isUsePercentages(); this.preemptionMode = params.getPreemptionMode(); prepareOutputFiles(params.isConsole()); loadConversionRules(params.getConversionRulesConfig()); Configuration inputYarnSiteConfig = getInputYarnSiteConfig(params); handleFairSchedulerConfig(params, inputYarnSiteConfig); convert(inputYarnSiteConfig); }
@Test public void testInvalidYarnSiteXml() throws Exception { FSConfigToCSConfigConverterParams params = createParamsBuilder(YARN_SITE_XML_INVALID) .withClusterResource(CLUSTER_RESOURCE_STRING) .build(); expectedException.expect(RuntimeException.class); converter.convert(params); }
@Override public List<WidgetType> findWidgetTypesByWidgetsBundleId(UUID tenantId, UUID widgetsBundleId) { return DaoUtil.convertDataList(widgetTypeRepository.findWidgetTypesByWidgetsBundleId(widgetsBundleId)); }
@Test public void testFindByWidgetsBundleId() { List<WidgetType> widgetTypes = widgetTypeDao.findWidgetTypesByWidgetsBundleId(TenantId.SYS_TENANT_ID.getId(), widgetsBundle.getUuidId()); assertEquals(WIDGET_TYPE_COUNT, widgetTypes.size()); }
public static boolean isEqualCollection(final Collection a, final Collection b) { if (a.size() != b.size()) { return false; } else { Map mapa = getCardinalityMap(a); Map mapb = getCardinalityMap(b); if (mapa.size() != mapb.size()) { return false; } else { Iterator it = mapa.keySet().iterator(); while (it.hasNext()) { Object obj = it.next(); if (getFreq(obj, mapa) != getFreq(obj, mapb)) { return false; } } return true; } } }
@Test void testIsEqualCollection() { List<String> list1 = Arrays.asList("2", "2", "3"); List<String> list2 = Arrays.asList("3", "2", "2"); List<String> list3 = Arrays.asList("3", "2", "3"); List<String> list4 = Arrays.asList("3", "2"); assertTrue(CollectionUtils.isEqualCollection(list1, list2)); assertFalse(CollectionUtils.isEqualCollection(list1, list3)); assertFalse(CollectionUtils.isEqualCollection(list1, list4)); List<String> list5 = Arrays.asList("3", "2", "1"); assertFalse(CollectionUtils.isEqualCollection(list1, list5)); List<String> list6 = Arrays.asList("2", "2", "1"); assertFalse(CollectionUtils.isEqualCollection(list1, list6)); }
public PrepareResult prepare(HostValidator hostValidator, DeployLogger logger, PrepareParams params, Optional<ApplicationVersions> activeApplicationVersions, Instant now, File serverDbSessionDir, ApplicationPackage applicationPackage, SessionZooKeeperClient sessionZooKeeperClient) { ApplicationId applicationId = params.getApplicationId(); Preparation preparation = new Preparation(hostValidator, logger, params, activeApplicationVersions, TenantRepository.getTenantPath(applicationId.tenant()), serverDbSessionDir, applicationPackage, sessionZooKeeperClient, onnxModelCost, endpointCertificateSecretStores); preparation.preprocess(); try { AllocatedHosts allocatedHosts = preparation.buildModels(now); preparation.makeResult(allocatedHosts); if ( ! params.isDryRun()) { FileReference fileReference = preparation.triggerDistributionOfApplicationPackage(); preparation.writeStateZK(fileReference); preparation.writeEndpointCertificateMetadataZK(); preparation.writeContainerEndpointsZK(); } log.log(Level.FINE, () -> "time used " + params.getTimeoutBudget().timesUsed() + " : " + applicationId); return preparation.result(); } catch (IllegalArgumentException e) { if (e instanceof InvalidApplicationException) throw e; throw new InvalidApplicationException("Invalid application package", e); } }
@Test(expected = LoadBalancerServiceException.class) public void require_that_conflict_is_returned_when_creating_load_balancer_fails() throws IOException { var configserverConfig = new ConfigserverConfig.Builder().hostedVespa(true).build(); MockProvisioner provisioner = new MockProvisioner().transientFailureOnPrepare(); preparer = createPreparer(HostProvisionerProvider.withProvisioner(provisioner, configserverConfig)); var params = new PrepareParams.Builder().applicationId(applicationId("test")).build(); prepare(new File("src/test/resources/deploy/hosted-app"), params); }
@Override public Object merge(T mergingValue, T existingValue) { if (existingValue == null) { return null; } return existingValue.getRawValue(); }
@Test public void merge_bothValuesNull() { MapMergeTypes existing = mergingValueWithGivenValue(null); MapMergeTypes merging = mergingValueWithGivenValue(null); assertNull(mergePolicy.merge(merging, existing)); }
public boolean setLocations(DefaultIssue issue, @Nullable Object locations) { if (!locationsEqualsIgnoreHashes(locations, issue.getLocations())) { issue.setLocations(locations); issue.setChanged(true); issue.setLocationsChanged(true); return true; } return false; }
@Test void change_locations_if_secondary_text_rage_changed() { DbCommons.TextRange range = DbCommons.TextRange.newBuilder().setStartLine(1).build(); DbIssues.Locations locations = DbIssues.Locations.newBuilder() .addFlow(DbIssues.Flow.newBuilder() .addLocation(DbIssues.Location.newBuilder().setTextRange(range)) .build()) .build(); issue.setLocations(locations); DbIssues.Locations.Builder builder = locations.toBuilder(); builder.getFlowBuilder(0).getLocationBuilder(0).setTextRange(range.toBuilder().setEndLine(2)); boolean updated = underTest.setLocations(issue, builder.build()); assertThat(updated).isTrue(); }
public static Combine.BinaryCombineDoubleFn ofDoubles() { return new Max.MaxDoubleFn(); }
@Test public void testMaxDoubleFnInfinity() { testCombineFn( Max.ofDoubles(), Lists.newArrayList(Double.NEGATIVE_INFINITY, 2.0, 3.0, Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY); }
public static String implode(String[] array, String sepString) { if (array==null) return null; StringBuilder ret = new StringBuilder(); if (sepString==null) sepString=""; for (int i = 0 ; i<array.length ; i++) { ret.append(array[i]); if (!(i==array.length-1)) ret.append(sepString); } return ret.toString(); }
@Test public void testImplode() { assertNull(StringUtilities.implode(null, null)); assertEquals(StringUtilities.implode(new String[0], null), ""); assertEquals(StringUtilities.implode(new String[] {"foo"}, null), "foo"); assertEquals(StringUtilities.implode(new String[] {"foo"}, "asdfsdfsadfsadfasdfs"), "foo"); assertEquals(StringUtilities.implode(new String[] {"foo", "bar"}, null), "foobar"); assertEquals(StringUtilities.implode(new String[] {"foo", "bar"}, "\n"), "foo\nbar"); assertEquals(StringUtilities.implode(new String[] {"foo"}, "\n"), "foo"); assertEquals(StringUtilities.implode(new String[] {"foo", "bar", null}, "\n"), "foo\nbar\nnull"); assertEquals(StringUtilities.implode(new String[] {"foo", "bar"}, "\n"), "foo\nbar"); assertEquals(StringUtilities.implode(new String[] {"foo", "bar", "baz"}, null), "foobarbaz"); }
@Override public RetrievableStateHandle<T> getAndLock(String pathInZooKeeper) throws Exception { return get(pathInZooKeeper, true); }
@Test void testGetNonExistingPath() { final TestingLongStateHandleHelper stateHandleProvider = new TestingLongStateHandleHelper(); ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> store = new ZooKeeperStateHandleStore<>(getZooKeeperClient(), stateHandleProvider); assertThatThrownBy(() -> store.getAndLock("/testGetNonExistingPath")) .isInstanceOf(Exception.class); }
public static String[] getDistinctStrings( String[] strings ) { if ( strings == null ) { return null; } if ( strings.length == 0 ) { return new String[] {}; } String[] sorted = sortStrings( strings ); List<String> result = new ArrayList<>(); String previous = ""; for ( int i = 0; i < sorted.length; i++ ) { if ( !sorted[i].equalsIgnoreCase( previous ) ) { result.add( sorted[i] ); } previous = sorted[i]; } return result.toArray( new String[result.size()] ); }
@Test public void testGetDistinctStrings() { assertNull( Const.getDistinctStrings( null ) ); assertTrue( Const.getDistinctStrings( new String[] {} ).length == 0 ); Assert.assertArrayEquals( new String[] { "bar", "foo" }, Const.getDistinctStrings( new String[] { "foo", "bar", "foo", "bar" } ) ); }
public Release findLatestActiveRelease(Namespace namespace) { return findLatestActiveRelease(namespace.getAppId(), namespace.getClusterName(), namespace.getNamespaceName()); }
@Test public void testLoadConfigWithConfigNotFound() throws Exception { String someAppId = "1"; String someClusterName = "someClusterName"; String someNamespaceName = "someNamespaceName"; when(releaseRepository.findFirstByAppIdAndClusterNameAndNamespaceNameAndIsAbandonedFalseOrderByIdDesc(someAppId, someClusterName, someNamespaceName)) .thenReturn(null); Release result = releaseService.findLatestActiveRelease(someAppId, someClusterName, someNamespaceName); assertNull(result); verify(releaseRepository, times(1)).findFirstByAppIdAndClusterNameAndNamespaceNameAndIsAbandonedFalseOrderByIdDesc( someAppId, someClusterName, someNamespaceName); }
public static String getExactlyExpression(final String value) { return Strings.isNullOrEmpty(value) ? value : CharMatcher.anyOf(" ").removeFrom(value); }
@Test void assertGetExactlyExpressionUsingAndReturningEmptyString() { assertThat(SQLUtils.getExactlyExpression(""), is("")); }
public static String u2(int v) { char[] result = new char[4]; for (int i = 0; i < 4; i++) { result[3 - i] = Character.forDigit(v & 0x0f, 16); v >>= 4; } return new String(result); }
@Test public void testU2() { Assert.assertEquals("0000", Hex.u2(0)); Assert.assertEquals("04d2", Hex.u2(1234)); Assert.assertEquals("02d2", Hex.u2(1234567890)); }
public static ResourceBundle getBundledResource(String basename) { return ResourceBundle.getBundle(basename, new UTF8Control()); }
@Test public void getBundleByClassAndName() { title("getBundleByClassAndName"); res = LionUtils.getBundledResource(LionUtilsTest.class, "SomeResource"); assertNotNull("missing resource bundle", res); String v1 = res.getString("key1"); String v2 = res.getString("key2"); print("v1 is %s, v2 is %s", v1, v2); assertEquals("v1 value wrong", "value one", v1); assertEquals("v2 value wrong", "value two", v2); res = LionUtils.getBundledResource(LionUtils.class, "SomeOtherResource"); assertNotNull("missing OTHER resource bundle", res); v1 = res.getString("key1"); v2 = res.getString("key2"); print("v1 is %s, v2 is %s", v1, v2); assertEquals("v1 value wrong", "Hay", v1); assertEquals("v2 value wrong", "Bee", v2); }
public int toInt() { ByteBuffer bb = ByteBuffer.wrap(super.toOctets()); return bb.getInt(); }
@Test public void testToInt() { Ip4Address ipAddress; ipAddress = Ip4Address.valueOf("1.2.3.4"); assertThat(ipAddress.toInt(), is(0x01020304)); ipAddress = Ip4Address.valueOf("0.0.0.0"); assertThat(ipAddress.toInt(), is(0)); ipAddress = Ip4Address.valueOf("255.255.255.255"); assertThat(ipAddress.toInt(), is(-1)); }
@Override public <R> List<R> queryMany(String sql, Object[] args, RowMapper<R> mapper) { return queryMany(jdbcTemplate, sql, args, mapper); }
@Test void testQueryMany3() { String sql = "SELECT data_id FROM config_info WHERE id >= ? AND id <= ?"; Object[] args = new Object[] {1, 2}; String dataId1 = "test1"; String dataId2 = "test2"; List<String> resultList = new ArrayList<>(); resultList.add(dataId1); resultList.add(dataId2); Class clazz = dataId1.getClass(); when(jdbcTemplate.queryForList(sql, args, clazz)).thenReturn(resultList); assertEquals(operate.queryMany(sql, args, clazz), resultList); }
@Override public long getMin() { if (values.length == 0) { return 0; } return values[0]; }
@Test public void calculatesAMinOfZeroForAnEmptySnapshot() { final Snapshot emptySnapshot = new WeightedSnapshot( weightedArray(new long[]{}, new double[]{})); assertThat(emptySnapshot.getMin()) .isZero(); }
public static Builder builder() { return new Builder(); }
@Test void fail_when_search_query_length_is_less_than_3_characters() { assertThatThrownBy(() -> { PermissionQuery.builder() .setSearchQuery("so") .build(); }) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Search query should contains at least 3 characters"); }
@Override public ByteBuf writeBytes(byte[] src, int srcIndex, int length) { ensureWritable(length); setBytes(writerIndex, src, srcIndex, length); writerIndex += length; return this; }
@Test public void testWriteBytesAfterRelease2() { final ByteBuf buffer = copiedBuffer(new byte[8]); try { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().writeBytes(buffer, 1); } }); } finally { buffer.release(); } }
@VisibleForTesting static boolean hasEnoughCurvature(final int[] xs, final int[] ys, final int middlePointIndex) { // Calculate the radianValue formed between middlePointIndex, and one point in either // direction final int startPointIndex = middlePointIndex - CURVATURE_NEIGHBORHOOD; final int startX = xs[startPointIndex]; final int startY = ys[startPointIndex]; final int endPointIndex = middlePointIndex + CURVATURE_NEIGHBORHOOD; final int endX = xs[endPointIndex]; final int endY = ys[endPointIndex]; final int middleX = xs[middlePointIndex]; final int middleY = ys[middlePointIndex]; final int firstSectionXDiff = startX - middleX; final int firstSectionYDiff = startY - middleY; final double firstSectionLength = Math.sqrt(firstSectionXDiff * firstSectionXDiff + firstSectionYDiff * firstSectionYDiff); final int secondSectionXDiff = endX - middleX; final int secondSectionYDiff = endY - middleY; final double secondSectionLength = Math.sqrt( secondSectionXDiff * secondSectionXDiff + secondSectionYDiff * secondSectionYDiff); final double dotProduct = firstSectionXDiff * secondSectionXDiff + firstSectionYDiff * secondSectionYDiff; final double radianValue = Math.acos(dotProduct / firstSectionLength / secondSectionLength); return radianValue <= CURVATURE_THRESHOLD; }
@Test public void testHasEnoughCurvature15Degrees() { final int[] Xs = new int[3]; final int[] Ys = new int[3]; // https://www.triangle-calculator.com/?what=&q=A%3D165%2C+b%3D100%2C+c%3D100&submit=Solve // A[100; 0] B[0; 0] C[196.593; 25.882] Xs[0] = 0; Ys[0] = 0; Xs[1] = 100; Ys[1] = 0; Xs[2] = 196; Ys[2] = 26; Assert.assertTrue(GestureTypingDetector.hasEnoughCurvature(Xs, Ys, 1)); Xs[0] = 0; Ys[0] = 0; Xs[1] = 100; Ys[1] = 0; Xs[2] = 196; Ys[2] = -26; Assert.assertTrue(GestureTypingDetector.hasEnoughCurvature(Xs, Ys, 1)); }
static Optional<File> getOptionalFileFromResource(URL retrieved) { try { File toReturn = getFileFromResource(retrieved); logger.debug(TO_RETURN_TEMPLATE, toReturn); return Optional.of(toReturn); } catch (Exception e) { throw new KieEfestoCommonException("Failed to read file " + retrieved, e); } }
@Test void getOptionalFileFromResource() { URL resourceUrl = getResourceUrl(); Optional<File> retrieved = MemoryFileUtils.getOptionalFileFromResource(resourceUrl); assertThat(retrieved).isNotNull(); assertThat(retrieved.isPresent()).isTrue(); assertThat(retrieved).get().isInstanceOf(MemoryFile.class); }
public String getId(String name) { // Use the id directly if it is unique and the length is less than max if (name.length() <= maxHashLength && usedIds.add(name)) { return name; } // Pick the last bytes of hashcode and use hex format final String hexString = Integer.toHexString(name.hashCode()); final String origId = hexString.length() <= maxHashLength ? hexString : hexString.substring(Math.max(0, hexString.length() - maxHashLength)); String id = origId; int suffixNum = 2; while (!usedIds.add(id)) { // A duplicate! Retry. id = origId + "-" + suffixNum++; } LOG.info("Name {} is mapped to id {}", name, id); return id; }
@Test public void testSameShortNames() { final HashIdGenerator idGenerator = new HashIdGenerator(); String id = idGenerator.getId("abcd"); Assert.assertEquals("abcd", id); String id2 = idGenerator.getId("abcd"); Assert.assertNotEquals("abcd", id2); }
@Override public void appendOrOverwriteRegion(int subpartition, T newRegion) throws IOException { // This method will only be called when we want to eliminate a region. We can't let the // region be reloaded into the cache, otherwise it will lead to an infinite loop. long oldRegionOffset = findRegion(subpartition, newRegion.getFirstBufferIndex(), false); if (oldRegionOffset != -1) { // if region is already exists in file, overwrite it. writeRegionToOffset(oldRegionOffset, newRegion); } else { // otherwise, append region to region group. appendRegion(subpartition, newRegion); } }
@Test void testWriteMoreThanOneRegionGroup() throws Exception { List<TestingFileDataIndexRegion> regions = createTestRegions(0, 0L, 2, 2); int regionGroupSize = regions.stream().mapToInt(TestingFileDataIndexRegion::getSize).sum() + 1; try (FileDataIndexSpilledRegionManager<TestingFileDataIndexRegion> spilledRegionManager = createSpilledRegionManager(regionGroupSize, (ignore1, ignore2) -> {})) { spilledRegionManager.appendOrOverwriteRegion(0, regions.get(0)); spilledRegionManager.appendOrOverwriteRegion(0, regions.get(1)); // region group has no enough space, will start new region group. TestingFileDataIndexRegion regionInNewRegionGroup = createSingleTestRegion(4, 4L, 2); spilledRegionManager.appendOrOverwriteRegion(0, regionInNewRegionGroup); FileChannel indexFileChannel = FileChannel.open(indexFilePath, StandardOpenOption.READ); TestingFileDataIndexRegion readRegion = readRegionFromFile( indexFileChannel, // offset is region group size instead of two regions size to prove that // new region group is started. regionGroupSize); assertRegionEquals(readRegion, regionInNewRegionGroup); } }
@Override public int read() throws IOException { if (mPosition == mLength) { // at end of file return -1; } updateStreamIfNeeded(); int res = mUfsInStream.get().read(); if (res == -1) { return -1; } mPosition++; Metrics.BYTES_READ_FROM_UFS.inc(1); return res; }
@Test public void readOverflowOffLen() throws IOException, AlluxioException { // TODO(lu) enable for client cache in the future Assume.assumeFalse(mConf.getBoolean(PropertyKey.USER_CLIENT_CACHE_ENABLED)); AlluxioURI ufsPath = getUfsPath(); createFile(ufsPath, CHUNK_SIZE); try (FileInStream inStream = getStream(ufsPath)) { assertThrows(IllegalArgumentException.class, () -> inStream.read(new byte[CHUNK_SIZE], 0, CHUNK_SIZE * 2)); } }
public HttpResult getBinary(String url) throws IOException, NotModifiedException { return getBinary(url, null, null); }
@Test void lastModifiedReturns304() { this.mockServerClient.when(HttpRequest.request().withMethod("GET").withHeader(HttpHeaders.IF_MODIFIED_SINCE, "123456")) .respond(HttpResponse.response().withStatusCode(HttpStatus.SC_NOT_MODIFIED)); Assertions.assertThrows(NotModifiedException.class, () -> getter.getBinary(this.feedUrl, "123456", null)); }
@Override public void prepareContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException { @SuppressWarnings("unchecked") List<String> localDirs = ctx.getExecutionAttribute(CONTAINER_LOCAL_DIRS); @SuppressWarnings("unchecked") Map<org.apache.hadoop.fs.Path, List<String>> resources = ctx.getExecutionAttribute(LOCALIZED_RESOURCES); @SuppressWarnings("unchecked") List<String> commands = ctx.getExecutionAttribute(CONTAINER_RUN_CMDS); Map<String, String> env = ctx.getContainer().getLaunchContext().getEnvironment(); String username = ctx.getExecutionAttribute(USER); if(!isSandboxContainerWhitelisted(username, commands)) { String tmpDirBase = configuration.get("hadoop.tmp.dir"); if (tmpDirBase == null) { throw new ContainerExecutionException("hadoop.tmp.dir not set!"); } try { String containerID = ctx.getExecutionAttribute(CONTAINER_ID_STR); initializePolicyDir(); List<String> groupPolicyFiles = getGroupPolicyFiles(configuration, ctx.getExecutionAttribute(USER)); Path policyFilePath = Files.createFile( Paths.get(policyFileDir.toString(), containerID + "-" + NMContainerPolicyUtils.POLICY_FILE), POLICY_ATTR); try(OutputStream policyOutputStream = Files.newOutputStream(policyFilePath)) { containerPolicies.put(containerID, policyFilePath); NMContainerPolicyUtils.generatePolicyFile(policyOutputStream, localDirs, groupPolicyFiles, resources, configuration); NMContainerPolicyUtils.appendSecurityFlags( commands, env, policyFilePath, sandboxMode); } } catch (IOException e) { throw new ContainerExecutionException(e); } } }
@Test public void testGroupPolicies() throws IOException, ContainerExecutionException { // Generate new policy files each containing one grant File openSocketPolicyFile = File.createTempFile("openSocket", "policy", baseTestDirectory); File classLoaderPolicyFile = File.createTempFile("createClassLoader", "policy", baseTestDirectory); Permission socketPerm = new SocketPermission("localhost:0", "listen"); Permission runtimePerm = new RuntimePermission("createClassLoader"); StringBuilder socketPermString = new StringBuilder(); Formatter openSocketPolicyFormatter = new Formatter(socketPermString); openSocketPolicyFormatter.format(SOCKET_PERMISSION_FORMAT, socketPerm.getClass().getName(), socketPerm.getName(), socketPerm.getActions()); FileWriter socketPermWriter = new FileWriter(openSocketPolicyFile); socketPermWriter.write(socketPermString.toString()); socketPermWriter.close(); StringBuilder classLoaderPermString = new StringBuilder(); Formatter classLoaderPolicyFormatter = new Formatter(classLoaderPermString); classLoaderPolicyFormatter.format(RUNTIME_PERMISSION_FORMAT, runtimePerm.getClass().getName(), runtimePerm.getName()); FileWriter classLoaderPermWriter = new FileWriter(classLoaderPolicyFile); classLoaderPermWriter.write(classLoaderPermString.toString()); classLoaderPermWriter.close(); conf.set(YarnConfiguration.YARN_CONTAINER_SANDBOX_POLICY_GROUP_PREFIX + WHITELIST_GROUP, openSocketPolicyFile.toString()); conf.set(YarnConfiguration.YARN_CONTAINER_SANDBOX_POLICY_GROUP_PREFIX + NORMAL_GROUP, classLoaderPolicyFile.toString()); String[] inputCommand = {"$JAVA_HOME/bin/java jar MyJob.jar"}; List<String> commands = Arrays.asList(inputCommand); runtimeContextBuilder.setExecutionAttribute(USER, WHITELIST_USER); runtimeContextBuilder.setExecutionAttribute(CONTAINER_RUN_CMDS, commands); runtime.prepareContainer(runtimeContextBuilder.build()); //pull generated policy from cmd Matcher policyMatches = Pattern.compile(POLICY_APPEND_FLAG + "=?([^ ]+)") .matcher(commands.get(0)); policyMatches.find(); String generatedPolicy = policyMatches.group(1); //Test that generated policy file has included both policies Assert.assertTrue( Files.readAllLines(Paths.get(generatedPolicy)).contains( classLoaderPermString.toString().split("\n")[1])); Assert.assertTrue( Files.readAllLines(Paths.get(generatedPolicy)).contains( socketPermString.toString().split("\n")[1])); }
public CompactedPinotSegmentRecordReader(File indexDir, RoaringBitmap validDocIds) { this(indexDir, validDocIds, null); }
@Test public void testCompactedPinotSegmentRecordReader() throws Exception { RoaringBitmap validDocIds = new RoaringBitmap(); for (int i = 0; i < NUM_ROWS; i += 2) { validDocIds.add(i); } List<GenericRow> outputRows = new ArrayList<>(); List<GenericRow> rewoundOuputRows = new ArrayList<>(); CompactedPinotSegmentRecordReader compactedReader = new CompactedPinotSegmentRecordReader(_segmentIndexDir, validDocIds); while (compactedReader.hasNext()) { outputRows.add(compactedReader.next()); } compactedReader.rewind(); while (compactedReader.hasNext()) { rewoundOuputRows.add(compactedReader.next()); } compactedReader.close(); Assert.assertEquals(outputRows.size(), NUM_ROWS / 2, "Number of _rows returned by CompactedPinotSegmentRecordReader is incorrect"); for (int i = 0; i < outputRows.size(); i++) { GenericRow outputRow = outputRows.get(i); GenericRow row = _rows.get(i * 2); Assert.assertEquals(outputRow.getValue(D_SV_1), row.getValue(D_SV_1)); Assert.assertTrue(PinotSegmentUtil.compareMultiValueColumn(outputRow.getValue(D_MV_1), row.getValue(D_MV_1))); Assert.assertEquals(outputRow.getValue(M1), row.getValue(M1)); Assert.assertEquals(outputRow.getValue(M2), row.getValue(M2)); Assert.assertEquals(outputRow.getValue(TIME), row.getValue(TIME)); } Assert.assertEquals(rewoundOuputRows.size(), NUM_ROWS / 2, "Number of _rows returned by CompactedPinotSegmentRecordReader is incorrect"); for (int i = 0; i < rewoundOuputRows.size(); i++) { GenericRow outputRow = rewoundOuputRows.get(i); GenericRow row = _rows.get(i * 2); Assert.assertEquals(outputRow.getValue(D_SV_1), row.getValue(D_SV_1)); Assert.assertTrue(PinotSegmentUtil.compareMultiValueColumn(outputRow.getValue(D_MV_1), row.getValue(D_MV_1))); Assert.assertEquals(outputRow.getValue(M1), row.getValue(M1)); Assert.assertEquals(outputRow.getValue(M2), row.getValue(M2)); Assert.assertEquals(outputRow.getValue(TIME), row.getValue(TIME)); } }
protected BaseNode parse(String input) { return input.isEmpty() || input.isBlank() ? getNullNode() : parseNotEmptyInput(input); }
@Test void parse_NotEmptyString() { String input = ""; assertThat(rangeFunction.parse(input)) .withFailMessage(String.format("Check `%s`", input)) .isInstanceOf(NullNode.class); input = "null"; assertThat(rangeFunction.parse("null")) .withFailMessage(String.format("Check `%s`", input)) .isInstanceOf(NullNode.class); input = "1"; assertThat(rangeFunction.parse("1")) .withFailMessage(String.format("Check `%s`", input)) .isInstanceOf(NumberNode.class); input = "\"a\""; assertThat(rangeFunction.parse(input)).withFailMessage(String.format("Check `%s`", input)).isInstanceOf(StringNode.class); input = "false"; assertThat(rangeFunction.parse(input)).withFailMessage(String.format("Check `%s`", input)).isInstanceOf(BooleanNode.class); input = "@\"2019-01-01\""; assertThat(rangeFunction.parse(input)).withFailMessage(String.format("Check `%s`", input)).isInstanceOf(AtLiteralNode.class); input = "duration(\"P2DT20H14M\")"; assertThat(rangeFunction.parse(input)).withFailMessage(String.format("Check `%s`", input)).isInstanceOf(FunctionInvocationNode.class); }
@Override public boolean verify(String hostname, SSLSession session) { if (LOCALHOST_HOSTNAME[0].equalsIgnoreCase(hostname) || LOCALHOST_HOSTNAME[1].equals(hostname)) { return true; } if (isIP(hostname)) { return true; } return hv.verify(hostname, session); }
@Test void testVerify() { assertTrue(selfHostnameVerifier.verify("localhost", sslSession)); assertTrue(selfHostnameVerifier.verify("127.0.0.1", sslSession)); assertTrue(selfHostnameVerifier.verify("10.10.10.10", sslSession)); // hit cache assertTrue(selfHostnameVerifier.verify("10.10.10.10", sslSession)); assertFalse(selfHostnameVerifier.verify("", sslSession)); assertFalse(selfHostnameVerifier.verify(null, sslSession)); verify(hostnameVerifier, times(2)).verify(any(), eq(sslSession)); }
@Deprecated @Restricted(DoNotUse.class) public static String resolve(ConfigurationContext context, String toInterpolate) { return context.getSecretSourceResolver().resolve(toInterpolate); }
@Test public void resolve_mixedSingleEntryWithDefault() { environment.set("FOO", "www.foo.io"); assertThat(resolve("${protocol:-https}://${FOO:-www.bar.io}"), equalTo("https://www.foo.io")); }
public void maybeFlush() { // We check dirtyTopicId first to avoid having to take the lock unnecessarily in the frequently called log append path if (dirtyTopicIdOpt.isPresent()) { // We synchronize on the actual write to disk synchronized (lock) { dirtyTopicIdOpt.ifPresent(topicId -> { try { try (FileOutputStream fileOutputStream = new FileOutputStream(tempPath().toFile()); BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(fileOutputStream, StandardCharsets.UTF_8))) { writer.write(new PartitionMetadata(CURRENT_VERSION, topicId).encode()); writer.flush(); fileOutputStream.getFD().sync(); } Utils.atomicMoveWithFallback(tempPath(), path()); } catch (IOException e) { String msg = "Error while writing partition metadata file " + file.getAbsolutePath(); logDirFailureChannel.maybeAddOfflineLogDir(logDir(), msg, e); throw new KafkaStorageException(msg, e); } dirtyTopicIdOpt = Optional.empty(); }); } } }
@Test public void testMaybeFlushWithNoTopicIdPresent() { PartitionMetadataFile partitionMetadataFile = new PartitionMetadataFile(file, null); partitionMetadataFile.maybeFlush(); assertEquals(0, file.length()); }
public void doGet( HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { if ( isJettyMode() && !request.getContextPath().startsWith( CONTEXT_PATH ) ) { return; } if ( log.isDebug() ) { logDebug( BaseMessages.getString( PKG, "ExecuteTransServlet.Log.ExecuteTransRequested" ) ); } // Options taken from PAN // String[] knownOptions = new String[] { REP, USER, PASS, TRANS, LEVEL }; String repOption = request.getParameter( REP ); String userOption = request.getParameter( USER ); String passOption = Encr.decryptPasswordOptionallyEncrypted( request.getParameter( PASS ) ); String transOption = request.getParameter( TRANS ); String levelOption = request.getParameter( LEVEL ); response.setStatus( HttpServletResponse.SC_OK ); String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null ); if ( encoding != null && !Utils.isEmpty( encoding.trim() ) ) { response.setCharacterEncoding( encoding ); response.setContentType( "text/html; charset=" + encoding ); } PrintWriter out = response.getWriter(); if ( transOption == null ) { response.setStatus( HttpServletResponse.SC_BAD_REQUEST ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "ExecuteTransServlet.Error.MissingMandatoryParameter", TRANS ) ) ); return; } try { final Repository repository = openRepository( repOption, userOption, passOption ); final TransMeta transMeta = loadTransformation( repository, transOption ); // Set the servlet parameters as variables in the transformation // String[] parameters = transMeta.listParameters(); Enumeration<?> parameterNames = request.getParameterNames(); while ( parameterNames.hasMoreElements() ) { String parameter = (String) parameterNames.nextElement(); String[] values = request.getParameterValues( parameter ); // Ignore the known options. set the rest as variables // if ( Const.indexOfString( parameter, knownOptions ) < 0 ) { // If it's a trans parameter, set it, otherwise simply set the variable // if ( Const.indexOfString( parameter, parameters ) < 0 ) { transMeta.setVariable( parameter, values[0] ); } else { transMeta.setParameterValue( parameter, values[0] ); } } } TransExecutionConfiguration transExecutionConfiguration = new TransExecutionConfiguration(); LogLevel logLevel = LogLevel.getLogLevelForCode( levelOption ); transExecutionConfiguration.setLogLevel( logLevel ); TransConfiguration transConfiguration = new TransConfiguration( transMeta, transExecutionConfiguration ); String carteObjectId = UUID.randomUUID().toString(); SimpleLoggingObject servletLoggingObject = new SimpleLoggingObject( CONTEXT_PATH, LoggingObjectType.CARTE, null ); servletLoggingObject.setContainerObjectId( carteObjectId ); servletLoggingObject.setLogLevel( logLevel ); // Create the transformation and store in the list... // final Trans trans = new Trans( transMeta, servletLoggingObject ); trans.setRepository( repository ); trans.setSocketRepository( getSocketRepository() ); getTransformationMap().addTransformation( transMeta.getName(), carteObjectId, trans, transConfiguration ); trans.setContainerObjectId( carteObjectId ); if ( repository != null ) { // The repository connection is open: make sure we disconnect from the repository once we // are done with this transformation. // trans.addTransListener( new TransAdapter() { @Override public void transFinished( Trans trans ) { repository.disconnect(); } } ); } // Pass the servlet print writer to the transformation... // trans.setServletPrintWriter( out ); trans.setServletReponse( response ); trans.setServletRequest( request ); try { // Execute the transformation... // executeTrans( trans ); String logging = KettleLogStore.getAppender().getBuffer( trans.getLogChannelId(), false ).toString(); if ( trans.isFinishedOrStopped() && trans.getErrors() > 0 ) { response.setStatus( HttpServletResponse.SC_INTERNAL_SERVER_ERROR ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "ExecuteTransServlet.Error.ErrorExecutingTrans", logging ) ) ); } out.flush(); } catch ( Exception executionException ) { String logging = KettleLogStore.getAppender().getBuffer( trans.getLogChannelId(), false ).toString(); throw new KettleException( BaseMessages.getString( PKG, "ExecuteTransServlet.Error.ErrorExecutingTrans", logging ), executionException ); } } catch ( Exception ex ) { // When we get to this point KettleAuthenticationException has already been wrapped in an Execution Exception // and that in a KettleException Throwable kettleExceptionCause = ex.getCause(); if ( kettleExceptionCause != null && kettleExceptionCause instanceof ExecutionException ) { Throwable executionExceptionCause = kettleExceptionCause.getCause(); if ( executionExceptionCause != null && executionExceptionCause instanceof KettleAuthenticationException ) { response.setStatus( HttpServletResponse.SC_UNAUTHORIZED ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "ExecuteTransServlet.Error.Authentication", getContextPath() ) ) ); } } else if ( ex.getMessage().contains( UNABLE_TO_FIND_TRANS ) ) { response.setStatus( HttpServletResponse.SC_NOT_FOUND ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "ExecuteTransServlet.Error.UnableToFindTransformation", transOption ) ) ); } else { response.setStatus( HttpServletResponse.SC_INTERNAL_SERVER_ERROR ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "ExecuteTransServlet.Error.UnexpectedError", Const.CR + Const.getStackTracker( ex ) ) ) ); } } }
@Test public void doGetMissingMandatoryParamTransTest() throws Exception { HttpServletRequest mockHttpServletRequest = mock( HttpServletRequest.class ); HttpServletResponse mockHttpServletResponse = mock( HttpServletResponse.class ); KettleLogStore.init(); StringWriter out = new StringWriter(); PrintWriter printWriter = new PrintWriter( out ); when( mockHttpServletRequest.getParameter( "rep" ) ).thenReturn( "Repo" ); when( mockHttpServletRequest.getParameter( "trans" ) ).thenReturn( null ); when( mockHttpServletResponse.getWriter() ).thenReturn( printWriter ); executeTransServlet.doGet( mockHttpServletRequest, mockHttpServletResponse ); verify( mockHttpServletResponse ).setStatus( HttpServletResponse.SC_OK ); verify( mockHttpServletResponse ).setStatus( HttpServletResponse.SC_BAD_REQUEST ); }
public static MemberVersion of(int major, int minor, int patch) { if (major == 0 && minor == 0 && patch == 0) { return MemberVersion.UNKNOWN; } else { return new MemberVersion(major, minor, patch); } }
@Test public void testVersionOf_whenVersionStringIsRC() { MemberVersion expected = MemberVersion.of(3, 8, 1); assertEquals(expected, MemberVersion.of(VERSION_3_8_1_RC1_STRING)); }
File putIfAbsent(String userId, boolean saveToDisk) throws IOException { String idKey = getIdStrategy().keyFor(userId); String directoryName = idToDirectoryNameMap.get(idKey); File directory = null; if (directoryName == null) { synchronized (this) { directoryName = idToDirectoryNameMap.get(idKey); if (directoryName == null) { directory = createDirectoryForNewUser(userId); directoryName = directory.getName(); idToDirectoryNameMap.put(idKey, directoryName); if (saveToDisk) { save(); } } } } return directory == null ? new File(usersDirectory, directoryName) : directory; }
@Test public void testDirectoryFormatAllSuppressedCharacters() throws IOException { UserIdMapper mapper = createUserIdMapper(IdStrategy.CASE_INSENSITIVE); String user1 = "!@#$%^"; File directory1 = mapper.putIfAbsent(user1, true); assertThat(directory1.getName(), startsWith("_")); }
public int doWork() { final long nowNs = nanoClock.nanoTime(); cachedNanoClock.update(nowNs); dutyCycleTracker.measureAndUpdate(nowNs); int workCount = commandQueue.drain(CommandProxy.RUN_TASK, Configuration.COMMAND_DRAIN_LIMIT); final int bytesReceived = dataTransportPoller.pollTransports(); totalBytesReceived.getAndAddOrdered(bytesReceived); final PublicationImage[] publicationImages = this.publicationImages; for (int lastIndex = publicationImages.length - 1, i = lastIndex; i >= 0; i--) { final PublicationImage image = publicationImages[i]; if (image.isConnected(nowNs)) { image.checkEosForDrainTransition(nowNs); workCount += image.sendPendingStatusMessage(nowNs); workCount += image.processPendingLoss(); workCount += image.initiateAnyRttMeasurements(nowNs); } else { this.publicationImages = 1 == this.publicationImages.length ? EMPTY_IMAGES : ArrayUtil.remove(this.publicationImages, i); image.removeFromDispatcher(); image.receiverRelease(); } } checkPendingSetupMessages(nowNs); if (reResolutionCheckIntervalNs > 0 && (reResolutionDeadlineNs - nowNs) < 0) { reResolutionDeadlineNs = nowNs + reResolutionCheckIntervalNs; dataTransportPoller.checkForReResolutions(nowNs, conductorProxy); } return workCount + bytesReceived; }
@Test @InterruptAfter(10) void shouldCreateRcvTermAndSendSmOnSetup() throws IOException { receiverProxy.registerReceiveChannelEndpoint(receiveChannelEndpoint); receiverProxy.addSubscription(receiveChannelEndpoint, STREAM_ID); receiver.doWork(); receiver.doWork(); fillSetupFrame(setupHeader); receiveChannelEndpoint.onSetupMessage( setupHeader, setupBuffer, SetupFlyweight.HEADER_LENGTH, senderAddress, 0); final PublicationImage image = new PublicationImage( CORRELATION_ID, ctx, receiveChannelEndpoint, 0, senderAddress, SESSION_ID, STREAM_ID, INITIAL_TERM_ID, ACTIVE_TERM_ID, INITIAL_TERM_OFFSET, (short)0, rawLog, mockFeedbackDelayGenerator, POSITIONS, mockHighestReceivedPosition, mockRebuildPosition, SOURCE_IDENTITY, congestionControl); final int messagesRead = drainConductorQueue( (e) -> { // pass in new term buffer from conductor, which should trigger SM receiverProxy.newPublicationImage(receiveChannelEndpoint, image); }); assertThat(messagesRead, is(1)); nanoClock.advance(STATUS_MESSAGE_TIMEOUT * 2); receiver.doWork(); final ByteBuffer rcvBuffer = ByteBuffer.allocateDirect(256); InetSocketAddress rcvAddress; do { rcvAddress = (InetSocketAddress)senderChannel.receive(rcvBuffer); } while (null == rcvAddress); statusHeader.wrap(new UnsafeBuffer(rcvBuffer)); assertNotNull(rcvAddress); assertThat(rcvAddress.getPort(), is(UDP_CHANNEL.remoteData().getPort())); assertThat(statusHeader.headerType(), is(HeaderFlyweight.HDR_TYPE_SM)); assertThat(statusHeader.streamId(), is(STREAM_ID)); assertThat(statusHeader.sessionId(), is(SESSION_ID)); assertThat(statusHeader.consumptionTermId(), is(ACTIVE_TERM_ID)); assertThat(statusHeader.frameLength(), is(StatusMessageFlyweight.HEADER_LENGTH)); }
public static Path getJobAttemptPath(JobContext context, Path out) { return getJobAttemptPath(getAppAttemptId(context), out); }
@Test public void testJobAbort() throws Exception { Path jobAttemptPath = jobCommitter.getJobAttemptPath(job); FileSystem fs = jobAttemptPath.getFileSystem(conf); Set<String> uploads = runTasks(job, 4, 3); assertPathExists(fs, "No job attempt path", jobAttemptPath); jobCommitter.abortJob(job, JobStatus.State.KILLED); assertEquals("Should have committed no uploads: " + jobCommitter, 0, results.getCommits().size()); assertEquals("Should have deleted no uploads: " + jobCommitter, 0, results.getDeletes().size()); assertEquals("Should have aborted all uploads: " + jobCommitter, uploads, getAbortedIds(results.getAborts())); assertPathDoesNotExist(fs, "jobAttemptPath not deleted", jobAttemptPath); }
@Override public void setConfig(RedisClusterNode node, String param, String value) { RedisClient entry = getEntry(node); RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_SET, param, value); syncFuture(f); }
@Test public void testSetConfig() { RedisClusterNode master = getFirstMaster(); connection.setConfig(master, "timeout", "10"); }
@Override @SuppressWarnings("unchecked") public <K, V> List<Map<K, V>> toMaps(DataTable dataTable, Type keyType, Type valueType) { requireNonNull(dataTable, "dataTable may not be null"); requireNonNull(keyType, "keyType may not be null"); requireNonNull(valueType, "valueType may not be null"); if (dataTable.isEmpty()) { return emptyList(); } DataTableType keyConverter = registry.lookupCellTypeByType(keyType); DataTableType valueConverter = registry.lookupCellTypeByType(valueType); List<String> problems = new ArrayList<>(); if (keyConverter == null) { problems.add(problemNoTableCellTransformer(keyType)); } if (valueConverter == null) { problems.add(problemNoTableCellTransformer(valueType)); } if (!problems.isEmpty()) { throw mapsNoConverterDefined(keyType, valueType, problems); } DataTable header = dataTable.rows(0, 1); List<Map<K, V>> result = new ArrayList<>(); List<K> keys = unpack((List<List<K>>) keyConverter.transform(header.cells())); DataTable rows = dataTable.rows(1); if (rows.isEmpty()) { return emptyList(); } List<List<V>> transform = (List<List<V>>) valueConverter.transform(rows.cells()); for (List<V> values : transform) { result.add(createMap(keyType, keys, valueType, values)); } return unmodifiableList(result); }
@Test void convert_to_maps_of_optional() { DataTable table = parse("", "| header1 | header2 |", "| 311 | 12299 |"); Map<Optional<String>, Optional<BigInteger>> expectedMap = new HashMap<Optional<String>, Optional<BigInteger>>() { { put(Optional.of("header1"), Optional.of(new BigInteger("311"))); put(Optional.of("header2"), Optional.of(new BigInteger("12299"))); } }; List<Map<Optional<String>, Optional<BigInteger>>> expected = singletonList(expectedMap); assertEquals(expected, converter.toMaps(table, OPTIONAL_STRING, OPTIONAL_BIG_INTEGER)); }
@Override public synchronized void execute() { boolean debugMode = conf.isLoadBalancerDebugModeEnabled() || log.isDebugEnabled(); if (debugMode) { log.info("Load balancer enabled: {}, Shedding enabled: {}.", conf.isLoadBalancerEnabled(), conf.isLoadBalancerSheddingEnabled()); } if (!isLoadBalancerSheddingEnabled()) { if (debugMode) { log.info("The load balancer or load balancer shedding already disabled. Skipping."); } return; } // Remove bundles who have been unloaded for longer than the grace period from the recently unloaded map. final long timeout = System.currentTimeMillis() - TimeUnit.MINUTES.toMillis(conf.getLoadBalancerSheddingGracePeriodMinutes()); recentlyUnloadedBundles.keySet().removeIf(e -> recentlyUnloadedBundles.get(e) < timeout); long asyncOpTimeoutMs = conf.getNamespaceBundleUnloadingTimeoutMs(); synchronized (namespaceUnloadStrategy) { try { Boolean isChannelOwner = channel.isChannelOwnerAsync().get(asyncOpTimeoutMs, TimeUnit.MILLISECONDS); if (!isChannelOwner) { if (debugMode) { log.info("Current broker is not channel owner. Skipping."); } return; } List<String> availableBrokers = context.brokerRegistry().getAvailableBrokersAsync() .get(asyncOpTimeoutMs, TimeUnit.MILLISECONDS); if (debugMode) { log.info("Available brokers: {}", availableBrokers); } if (availableBrokers.size() <= 1) { log.info("Only 1 broker available: no load shedding will be performed. Skipping."); return; } final Set<UnloadDecision> decisions = namespaceUnloadStrategy .findBundlesForUnloading(context, recentlyUnloadedBundles, recentlyUnloadedBrokers); if (debugMode) { log.info("[{}] Unload decision result: {}", namespaceUnloadStrategy.getClass().getSimpleName(), decisions); } if (decisions.isEmpty()) { if (debugMode) { log.info("[{}] Unload decision unloads is empty. Skipping.", namespaceUnloadStrategy.getClass().getSimpleName()); } return; } List<CompletableFuture<Void>> futures = new ArrayList<>(); unloadBrokers.clear(); decisions.forEach(decision -> { if (decision.getLabel() == Success) { Unload unload = decision.getUnload(); log.info("[{}] Unloading bundle: {}", namespaceUnloadStrategy.getClass().getSimpleName(), unload); futures.add(unloadManager.waitAsync(channel.publishUnloadEventAsync(unload), unload.serviceUnit(), decision, asyncOpTimeoutMs, TimeUnit.MILLISECONDS) .thenAccept(__ -> { unloadBrokers.add(unload.sourceBroker()); recentlyUnloadedBundles.put(unload.serviceUnit(), System.currentTimeMillis()); recentlyUnloadedBrokers.put(unload.sourceBroker(), System.currentTimeMillis()); })); } }); FutureUtil.waitForAll(futures) .whenComplete((__, ex) -> counter.updateUnloadBrokerCount(unloadBrokers.size())) .get(asyncOpTimeoutMs, TimeUnit.MILLISECONDS); } catch (Exception ex) { log.error("[{}] Namespace unload has exception.", namespaceUnloadStrategy.getClass().getSimpleName(), ex); } finally { if (counter.updatedAt() > counterLastUpdatedAt) { unloadMetrics.set(counter.toMetrics(pulsar.getAdvertisedAddress())); counterLastUpdatedAt = counter.updatedAt(); } } } }
@Test(timeOut = 30 * 1000) public void testNotChannelOwner() { AtomicReference<List<Metrics>> reference = new AtomicReference<>(); UnloadCounter counter = new UnloadCounter(); LoadManagerContext context = setupContext(); context.brokerConfiguration().setLoadBalancerEnabled(false); ServiceUnitStateChannel channel = mock(ServiceUnitStateChannel.class); NamespaceUnloadStrategy unloadStrategy = mock(NamespaceUnloadStrategy.class); UnloadManager unloadManager = mock(UnloadManager.class); PulsarService pulsar = mock(PulsarService.class); UnloadScheduler scheduler = new UnloadScheduler(pulsar, loadManagerExecutor, unloadManager, context, channel, unloadStrategy, counter, reference); doReturn(CompletableFuture.completedFuture(false)).when(channel).isChannelOwnerAsync(); scheduler.execute(); verify(context.brokerRegistry(), times(0)).getAvailableBrokersAsync(); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } CachedQueryEntry<?, ?> that = (CachedQueryEntry<?, ?>) o; return keyData.equals(that.keyData); }
@Test @SuppressWarnings("ConstantConditions") public void testEquals_givenOtherIsNull_thenReturnFalse() { CachedQueryEntry entry1 = createEntry("key"); CachedQueryEntry entry2 = null; assertFalse(entry1.equals(entry2)); }
public static synchronized String quantityToStackSize(long quantity) { if (quantity < 0) { // Long.MIN_VALUE = -1 * Long.MIN_VALUE so we need to correct for it. return "-" + quantityToStackSize(quantity == Long.MIN_VALUE ? Long.MAX_VALUE : -quantity); } else if (quantity < 10_000) { return NUMBER_FORMATTER.format(quantity); } String suffix = SUFFIXES[0]; long divideBy = 1; // determine correct suffix by iterating backward through the list // of suffixes until the suffix results in a value >= 1 for (int i = (SUFFIXES.length - 1); i >= 0; i--) { divideBy = (long) Math.pow(10, i * 3); if ((double) quantity / divideBy >= 1) { suffix = SUFFIXES[i]; break; } } // get locale formatted string String formattedString = NUMBER_FORMATTER.format((double) quantity / divideBy); // strip down any digits past the 4 first formattedString = (formattedString.length() > 4 ? formattedString.substring(0, 4) : formattedString); // make sure the last character is not a "." return (formattedString.endsWith(".") ? formattedString.substring(0, 3) : formattedString) + suffix; }
@Test public void quantityToStackSize() { assertEquals("0", QuantityFormatter.quantityToStackSize(0)); assertEquals("999", QuantityFormatter.quantityToStackSize(999)); assertEquals("1,000", QuantityFormatter.quantityToStackSize(1000)); assertEquals("9,450", QuantityFormatter.quantityToStackSize(9450)); assertEquals("14.5K", QuantityFormatter.quantityToStackSize(14_500)); assertEquals("99.9K", QuantityFormatter.quantityToStackSize(99_920)); assertEquals("100K", QuantityFormatter.quantityToStackSize(100_000)); assertEquals("10M", QuantityFormatter.quantityToStackSize(10_000_000)); assertEquals("2.14B", QuantityFormatter.quantityToStackSize(Integer.MAX_VALUE)); assertEquals("100B", QuantityFormatter.quantityToStackSize(100_000_000_000L)); assertEquals("0", QuantityFormatter.quantityToStackSize(-0)); assertEquals("-400", QuantityFormatter.quantityToStackSize(-400)); assertEquals("-400K", QuantityFormatter.quantityToStackSize(-400_000)); assertEquals("-40M", QuantityFormatter.quantityToStackSize(-40_000_000)); assertEquals("-2.14B", QuantityFormatter.quantityToStackSize(Integer.MIN_VALUE)); assertEquals("-400B", QuantityFormatter.quantityToStackSize(-400_000_000_000L)); }
public static Builder builder() { return new Builder(); }
@Test public void testEqualsAndHashCode() { WebSocketUpstream upstream1 = WebSocketUpstream.builder().protocol("protocol").upstreamUrl("url") .status(true).warmup(50).timestamp(1650549243L).build(); WebSocketUpstream upstream2 = WebSocketUpstream.builder().protocol("protocol").upstreamUrl("url") .status(true).warmup(50).timestamp(1650549243L).build(); assertThat(ImmutableSet.of(upstream1, upstream2), hasSize(1)); }
@Override synchronized public void close() { if (stream != null) { IOUtils.cleanupWithLogger(LOG, stream); stream = null; } }
@Test(timeout=120000) public void testRefillReservoir() throws Exception { OsSecureRandom random = getOsSecureRandom(); for (int i = 0; i < 8196; i++) { random.nextLong(); } random.close(); }
public static long getLastModified(URL resourceURL) { final String protocol = resourceURL.getProtocol(); switch (protocol) { case "jar": try { final JarURLConnection jarConnection = (JarURLConnection) resourceURL.openConnection(); final JarEntry entry = jarConnection.getJarEntry(); return entry.getTime(); } catch (IOException ignored) { } return 0; case "file": URLConnection connection = null; try { connection = resourceURL.openConnection(); return connection.getLastModified(); } catch (IOException ignored) { } finally { if (connection != null) { try { connection.getInputStream().close(); } catch (IOException ignored) { } } } return 0; default: throw new IllegalArgumentException("Unsupported protocol " + protocol + " for resource " + resourceURL); } }
@Test void getLastModifiedReturnsZeroIfAnErrorOccurs() throws Exception { final URL url = new URL("file:/some/path/that/doesnt/exist"); final long lastModified = ResourceURL.getLastModified(url); assertThat(lastModified) .isZero(); }
public static String buildURIFromPattern(String pattern, List<Parameter> parameters) { if (parameters != null) { // Browse parameters and choose between template or query one. for (Parameter parameter : parameters) { String wadlTemplate = "{" + parameter.getName() + "}"; String swaggerTemplate = "/:" + parameter.getName(); if (pattern.contains(wadlTemplate)) { // It's a template parameter. pattern = pattern.replace(wadlTemplate, encodePath(parameter.getValue())); } else if (pattern.contains(swaggerTemplate)) { // It's a template parameter. pattern = pattern.replace(":" + parameter.getName(), encodePath(parameter.getValue())); } else { // It's a query parameter, ensure we have started delimiting them. if (!pattern.contains("?")) { pattern += "?"; } if (pattern.contains("=")) { pattern += "&"; } pattern += parameter.getName() + "=" + encodeValue(parameter.getValue()); } } } return pattern; }
@Test void testBuildURIFromPatternWithMapWithParamsArray() { // Prepare a bunch of parameters. Multimap<String, String> parameters = ArrayListMultimap.create(); parameters.put("year", "2018"); parameters.put("month", "05"); parameters.put("status", "published"); parameters.put("status", "proofred"); parameters.put("page", "0"); // Test with old wadl like template format. String pattern = "http://localhost:8080/blog/{year}/{month}"; String uri = URIBuilder.buildURIFromPattern(pattern, parameters); assertTrue("http://localhost:8080/blog/2018/05?page=0&status=published&status=proofred".equals(uri) || "http://localhost:8080/blog/2018/05?status=published&status=proofred&page=0".equals(uri)); // Test with new swagger like template format. pattern = "http://localhost:8080/blog/:year/:month"; uri = URIBuilder.buildURIFromPattern(pattern, parameters); assertTrue("http://localhost:8080/blog/2018/05?page=0&status=published&status=proofred".equals(uri) || "http://localhost:8080/blog/2018/05?status=published&status=proofred&page=0".equals(uri)); }
public XmlStreamInfo information() throws IOException { if (information.problem != null) { return information; } if (XMLStreamConstants.START_DOCUMENT != reader.getEventType()) { information.problem = new IllegalStateException("Expected START_DOCUMENT"); return information; } boolean skipComments = false; try { while (reader.hasNext()) { int ev = reader.next(); switch (ev) { case XMLStreamConstants.COMMENT: if (!skipComments) { // search for modelines String comment = reader.getText(); if (comment != null) { comment.lines().map(String::trim).forEach(l -> { if (l.startsWith("camel-k:")) { information.modelines.add(l); } }); } } break; case XMLStreamConstants.START_ELEMENT: if (information.rootElementName != null) { // only root element is checked. No need to parse more return information; } skipComments = true; information.rootElementName = reader.getLocalName(); information.rootElementNamespace = reader.getNamespaceURI(); for (int ns = 0; ns < reader.getNamespaceCount(); ns++) { String prefix = reader.getNamespacePrefix(ns); information.namespaceMapping.put(prefix == null ? "" : prefix, reader.getNamespaceURI(ns)); } for (int at = 0; at < reader.getAttributeCount(); at++) { QName qn = reader.getAttributeName(at); String prefix = qn.getPrefix() == null ? "" : qn.getPrefix().trim(); String nsURI = qn.getNamespaceURI() == null ? "" : qn.getNamespaceURI().trim(); String value = reader.getAttributeValue(at); String localPart = qn.getLocalPart(); if (nsURI.isEmpty() || prefix.isEmpty()) { // according to XML spec, this attribut is not namespaced, not in default namespace // https://www.w3.org/TR/xml-names/#defaulting // > The namespace name for an unprefixed attribute name always has no value. information.attributes.put(localPart, value); } else { information.attributes.put("{" + nsURI + "}" + localPart, value); information.attributes.put(prefix + ":" + localPart, value); } } break; case XMLStreamConstants.END_ELEMENT: case XMLStreamConstants.END_DOCUMENT: if (information.rootElementName == null) { information.problem = new IllegalArgumentException("XML Stream is empty"); return information; } break; default: break; } } } catch (XMLStreamException e) { information.problem = e; return information; } return information; }
@Test public void simplestDocument() throws IOException { String xml = "<root />"; XmlStreamDetector detector = new XmlStreamDetector(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))); XmlStreamInfo info = detector.information(); assertTrue(info.isValid()); assertEquals("root", info.getRootElementName()); assertNull(info.getRootElementNamespace()); }
@Override public Consumer<Packet> get() { return responseHandler; }
@Test public void get_whenZeroResponseThreads() { supplier = newSupplier(0); assertInstanceOf(InboundResponseHandler.class, supplier.get()); }
@Override @SuppressWarnings("unchecked") public O process(I i) throws Exception { LOG.debug("processing item [{}]...", i); O result = (O) producerTemplate.requestBody(endpointUri, i); LOG.debug("processed item"); return result; }
@Test public void shouldReturnDoubledMessage() throws Exception { // When String messageRead = camelItemProcessor.process(message); // Then assertEquals(message + message, messageRead); }
@Override public ByteBuf writeMediumLE(int value) { ensureWritable0(3); _setMediumLE(writerIndex, value); writerIndex += 3; return this; }
@Test public void testWriteMediumLEAfterRelease() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().writeMediumLE(1); } }); }
public static DumpedPrivateKey fromBase58(@Nullable Network network, String base58) throws AddressFormatException, AddressFormatException.WrongNetwork { byte[] versionAndDataBytes = Base58.decodeChecked(base58); int version = versionAndDataBytes[0] & 0xFF; byte[] bytes = Arrays.copyOfRange(versionAndDataBytes, 1, versionAndDataBytes.length); if (network == null) { for (NetworkParameters p : Networks.get()) if (version == p.getDumpedPrivateKeyHeader()) return new DumpedPrivateKey(p.network(), bytes); throw new AddressFormatException.InvalidPrefix("No network found for version " + version); } else { NetworkParameters params = NetworkParameters.of(network); if (version == params.getDumpedPrivateKeyHeader()) return new DumpedPrivateKey(network, bytes); throw new AddressFormatException.WrongNetwork(version); } }
@Test(expected = AddressFormatException.InvalidDataLength.class) public void fromBase58_tooShort() { String base58 = Base58.encodeChecked(NetworkParameters.of(MAINNET).getDumpedPrivateKeyHeader(), new byte[31]); DumpedPrivateKey.fromBase58((Network) null, base58); }
public static StrictFieldProjectionFilter fromSemicolonDelimitedString(String columnsToKeepGlobs) { return new StrictFieldProjectionFilter(parseSemicolonDelimitedString(columnsToKeepGlobs)); }
@Test public void testFromSemicolonDelimitedString() { List<String> globs = StrictFieldProjectionFilter.parseSemicolonDelimitedString(";x.y.z;*.a.b.c*;;foo;;;;bar;"); assertEquals(Arrays.asList("x.y.z", "*.a.b.c*", "foo", "bar"), globs); try { StrictFieldProjectionFilter.parseSemicolonDelimitedString(";;"); fail("this should throw"); } catch (ThriftProjectionException e) { assertEquals("Semicolon delimited string ';;' contains 0 glob strings", e.getMessage()); } }
@Transactional public String login(OauthLoginRequest request) { OauthInfoApiResponse oauthInfoApiResponse = oauthClient.requestOauthInfo(request); User user = userRepository.findByEmail(oauthInfoApiResponse.kakao_account().email()) .orElseGet(() -> signUp(oauthInfoApiResponse)); return jwtTokenProvider.createToken(user); }
@DisplayName("로그인 성공 : 존재하는 회원이면 데이터베이스에 새로운 유저를 추가하지않고 토큰을 바로 반환한다.") @Test void login() { // given userRepository.save(USER1); Mockito.when(oauthClient.requestOauthInfo(any(OauthLoginRequest.class))) .thenReturn(UserFixture.OAUTH_INFO_RESPONSE_USER1); // when String token = authService.login(oauthLoginRequest); // then assertThat(token).isNotBlank(); }
@Override public CompletableFuture<Void> close(boolean closeWithoutWaitingClientDisconnect) { return close(true, closeWithoutWaitingClientDisconnect); }
@Test public void testRemoveProducerOnNonPersistentTopic() throws Exception { final String topicName = "non-persistent://prop/ns-abc/topic_" + UUID.randomUUID(); Producer<byte[]> producer = pulsarClient.newProducer() .topic(topicName) .create(); NonPersistentTopic topic = (NonPersistentTopic) pulsar.getBrokerService().getTopicReference(topicName).get(); Field field = AbstractTopic.class.getDeclaredField("userCreatedProducerCount"); field.setAccessible(true); int userCreatedProducerCount = (int) field.get(topic); assertEquals(userCreatedProducerCount, 1); producer.close(); userCreatedProducerCount = (int) field.get(topic); assertEquals(userCreatedProducerCount, 0); }
@Override public void removeInstancePort(String portId) { checkArgument(!Strings.isNullOrEmpty(portId), ERR_NULL_INSTANCE_PORT_ID); synchronized (this) { if (isInstancePortInUse(portId)) { final String error = String.format(MSG_INSTANCE_PORT, portId, ERR_IN_USE); throw new IllegalStateException(error); } InstancePort instancePort = instancePortStore.removeInstancePort(portId); if (instancePort != null) { log.info(String.format(MSG_INSTANCE_PORT, instancePort.portId(), MSG_REMOVED)); } } }
@Test(expected = IllegalArgumentException.class) public void testRemoveInstancePortWithNull() { target.removeInstancePort(null); }
@Override public boolean isEnabled(CeWorker ceWorker) { return ceWorker.getOrdinal() < ceConfiguration.getWorkerCount(); }
@Test public void isEnabled_returns_true_if_ordinal_is_invalid() { int ordinal = -1 - random.nextInt(3); when(ceWorker.getOrdinal()).thenReturn(ordinal); assertThat(underTest.isEnabled(ceWorker)) .as("For invalid ordinal " + ordinal + " and workerCount " + randomWorkerCount) .isTrue(); }
@VisibleForTesting int log2Floor(long n) { checkArgument(n >= 0); return n == 0 ? -1 : LongMath.log2(n, RoundingMode.FLOOR); }
@Test public void testLog2Floor_Positive() { OrderedCode orderedCode = new OrderedCode(); assertEquals(0, orderedCode.log2Floor(1)); assertEquals(1, orderedCode.log2Floor(2)); assertEquals(1, orderedCode.log2Floor(3)); assertEquals(2, orderedCode.log2Floor(4)); assertEquals(5, orderedCode.log2Floor(63)); assertEquals(6, orderedCode.log2Floor(64)); assertEquals(62, orderedCode.log2Floor(Long.MAX_VALUE)); }
static String removeWhiteSpaceFromJson(String json) { //reparse the JSON to ensure that all whitespace formatting is uniform String flattend = FLAT_GSON.toJson(JsonParser.parseString(json)); return flattend; }
@Test public void removeWhiteSpaceFromJson_removesNewLinesAndLeadingSpaceAndMiddleSpace() { String input = "{\n \"a\": 123,\n \"b\": 456\n}"; String output = "{\"a\":123,\"b\":456}"; assertThat( removeWhiteSpaceFromJson(input), is(output) ); }
public static void disablePullConsumption(DefaultLitePullConsumerWrapper wrapper, Set<String> topics) { Set<String> subscribedTopic = wrapper.getSubscribedTopics(); if (subscribedTopic.stream().anyMatch(topics::contains)) { suspendPullConsumer(wrapper); return; } resumePullConsumer(wrapper); }
@Test public void testDisablePullConsumptionWithNoSubTractTopics() { subscribedTopics = new HashSet<>(); subscribedTopics.add("test-topic-2"); subscribedTopics.add("test-topic-3"); pullConsumerWrapper.setSubscribedTopics(subscribedTopics); pullConsumerWrapper.setProhibition(true); pullConsumerWrapper.setSubscriptionType(SubscriptionType.SUBSCRIBE); RocketMqPullConsumerController.disablePullConsumption(pullConsumerWrapper, prohibitionTopics); Assert.assertFalse(pullConsumerWrapper.isProhibition()); // 恢复消费后,再次下发禁消费配置 MQClientInstance clientFactory = pullConsumerWrapper.getClientFactory(); Mockito.reset(clientFactory); RocketMqPullConsumerController.disablePullConsumption(pullConsumerWrapper, prohibitionTopics); Mockito.verify(clientFactory, Mockito.times(0)) .registerConsumer(Mockito.any(), Mockito.any()); }
@Override public long getCreationTime() { return creationTime; }
@Test public void testGetCreationTime() { assertTrue(queryCacheEventData.getCreationTime() > 0); }
@Override public Alarm save(Alarm alarm, User user) throws ThingsboardException { ActionType actionType = alarm.getId() == null ? ActionType.ADDED : ActionType.UPDATED; TenantId tenantId = alarm.getTenantId(); try { AlarmApiCallResult result; if (alarm.getId() == null) { result = alarmSubscriptionService.createAlarm(AlarmCreateOrUpdateActiveRequest.fromAlarm(alarm, user.getId())); } else { result = alarmSubscriptionService.updateAlarm(AlarmUpdateRequest.fromAlarm(alarm, user.getId())); } if (!result.isSuccessful()) { throw new ThingsboardException(ThingsboardErrorCode.ITEM_NOT_FOUND); } AlarmInfo resultAlarm = result.getAlarm(); if (alarm.isAcknowledged() && !resultAlarm.isAcknowledged()) { resultAlarm = ack(resultAlarm, alarm.getAckTs(), user); } if (alarm.isCleared() && !resultAlarm.isCleared()) { resultAlarm = clear(resultAlarm, alarm.getClearTs(), user); } UserId newAssignee = alarm.getAssigneeId(); UserId curAssignee = resultAlarm.getAssigneeId(); if (newAssignee != null && !newAssignee.equals(curAssignee)) { resultAlarm = assign(resultAlarm, newAssignee, alarm.getAssignTs(), user); } else if (newAssignee == null && curAssignee != null) { resultAlarm = unassign(alarm, alarm.getAssignTs(), user); } if (result.isModified()) { logEntityActionService.logEntityAction(tenantId, alarm.getOriginator(), resultAlarm, resultAlarm.getCustomerId(), actionType, user); } return new Alarm(resultAlarm); } catch (Exception e) { logEntityActionService.logEntityAction(tenantId, emptyId(EntityType.ALARM), alarm, actionType, user, e); throw e; } }
@Test public void testSave() throws ThingsboardException { var alarm = new AlarmInfo(); when(alarmSubscriptionService.createAlarm(any())).thenReturn(AlarmApiCallResult.builder() .successful(true) .modified(true) .alarm(alarm) .build()); service.save(alarm, new User()); verify(logEntityActionService, times(1)).logEntityAction(any(), any(), any(), any(), eq(ActionType.ADDED), any()); verify(alarmSubscriptionService, times(1)).createAlarm(any()); }
@Tolerate public void setChatId(@NonNull Long chatId) { this.chatId = chatId.toString(); }
@Test public void chatIdCantBeEmpty() { SendInvoice sendInvoice = createSendInvoiceObject(); sendInvoice.setChatId(""); Throwable thrown = assertThrows(TelegramApiValidationException.class, sendInvoice::validate); assertEquals("ChatId parameter can't be empty", thrown.getMessage()); }
@VisibleForTesting SmsTemplateDO validateSmsTemplate(String templateCode) { // 获得短信模板。考虑到效率,从缓存中获取 SmsTemplateDO template = smsTemplateService.getSmsTemplateByCodeFromCache(templateCode); // 短信模板不存在 if (template == null) { throw exception(SMS_SEND_TEMPLATE_NOT_EXISTS); } return template; }
@Test public void testCheckSmsTemplateValid_notExists() { // 准备参数 String templateCode = randomString(); // mock 方法 // 调用,并断言异常 assertServiceException(() -> smsSendService.validateSmsTemplate(templateCode), SMS_SEND_TEMPLATE_NOT_EXISTS); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldParseStringListWithNullFirstAsString() { String str = "[null, 1]"; SchemaAndValue result = Values.parseString(str); assertEquals(Type.STRING, result.schema().type()); assertEquals(str, result.value()); }
public static SetAclPOptions setAclDefaults(AlluxioConfiguration conf) { return SetAclPOptions.newBuilder() .setCommonOptions(commonDefaults(conf)) .setRecursive(false) .build(); }
@Test public void setAclOptionsDefaults() { SetAclPOptions options = FileSystemOptionsUtils.setAclDefaults(mConf); assertNotNull(options); assertFalse(options.getRecursive()); }
public static VerificationMode once() { return times(1); }
@Test public void should_verify_form_data() throws Exception { final HttpServer server = httpServer(port(), hit); server.post(eq(form("name"), "dreamhead")).response("foobar"); running(server, () -> { Request request = Request.post(root()).bodyForm(new BasicNameValuePair("name", "dreamhead")); String content = helper.executeAsString(request); assertThat(content, is("foobar")); }); hit.verify(eq(form("name"), "dreamhead"), once()); }
public boolean equalPathsTo(NodeRelativePath nodeRelativePath2) { return Arrays.equals(beginPath, nodeRelativePath2.beginPath) && Arrays.equals(endPath, nodeRelativePath2.endPath); }
@Test public void equalPaths(){ final NodeModel parent = root(); final NodeModel node1 = new NodeModel("node1", map); parent.insert(node1); final NodeModel node2 = new NodeModel("node2", map); parent.insert(node2); final NodeRelativePath nodeRelativePath1 = new NodeRelativePath(node1, node2); final NodeRelativePath nodeRelativePath2 = new NodeRelativePath(node1, node2); assertTrue(nodeRelativePath1.equalPathsTo(nodeRelativePath2)); }
@Override public boolean contains(double lat1, double lon1) { return normDist(lat1, lon1) <= normedDist; }
@Test public void testContains() { Circle c = new Circle(10, 10, 120000); assertTrue(c.contains(new BBox(9, 11, 10, 10.1))); assertFalse(c.contains(new BBox(9, 11, 8, 9))); assertFalse(c.contains(new BBox(9, 12, 10, 10.1))); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void sendLocation() { float lat = 21.999998f, lng = 105.2f, horizontalAccuracy = 1.9f; int livePeriod = 60, heading = 120, proximityAlertRadius = 50000; Location location = bot.execute(new SendLocation(chatId, lat, lng) .horizontalAccuracy(horizontalAccuracy) .livePeriod(livePeriod) .heading(heading) .proximityAlertRadius(proximityAlertRadius) ).message().location(); assertEquals(lat, location.latitude(), 0.00001f); assertEquals(lng, location.longitude(), 0.00001f); assertEquals(horizontalAccuracy, location.horizontalAccuracy(), 0.11f); assertEquals(livePeriod, location.livePeriod().intValue()); assertEquals(heading, location.heading().intValue()); assertEquals(proximityAlertRadius, location.proximityAlertRadius().intValue()); }
@NotNull @Override public INode enrich(@NotNull INode node) { if (node instanceof SHA2 sha2) { return enrich(sha2); } return node; }
@Test void oid1() { DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "Jca"); final SHA2 sha256 = new SHA2(256, testDetectionLocation); this.logBefore(sha256); final SHA2Enricher sha2Enricher = new SHA2Enricher(); final INode enriched = sha2Enricher.enrich(sha256); this.logAfter(enriched); assertThat(enriched.hasChildOfType(Oid.class)).isPresent(); assertThat(enriched.hasChildOfType(Oid.class).get().asString()) .isEqualTo("2.16.840.1.101.3.4.2.1"); }
@GetMapping("/authorize") @Operation(summary = "获得授权信息", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【获取】调用") @Parameter(name = "clientId", required = true, description = "客户端编号", example = "tudou") public CommonResult<OAuth2OpenAuthorizeInfoRespVO> authorize(@RequestParam("clientId") String clientId) { // 0. 校验用户已经登录。通过 Spring Security 实现 // 1. 获得 Client 客户端的信息 OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientId); // 2. 获得用户已经授权的信息 List<OAuth2ApproveDO> approves = oauth2ApproveService.getApproveList(getLoginUserId(), getUserType(), clientId); // 拼接返回 return success(OAuth2OpenConvert.INSTANCE.convert(client, approves)); }
@Test public void testAuthorize() { // 准备参数 String clientId = randomString(); // mock 方法(client) OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class).setClientId("demo_client_id").setScopes(ListUtil.toList("read", "write", "all")); when(oauth2ClientService.validOAuthClientFromCache(eq(clientId))).thenReturn(client); // mock 方法(approve) List<OAuth2ApproveDO> approves = asList( randomPojo(OAuth2ApproveDO.class).setScope("read").setApproved(true), randomPojo(OAuth2ApproveDO.class).setScope("write").setApproved(false)); when(oauth2ApproveService.getApproveList(isNull(), eq(UserTypeEnum.ADMIN.getValue()), eq(clientId))).thenReturn(approves); // 调用 CommonResult<OAuth2OpenAuthorizeInfoRespVO> result = oauth2OpenController.authorize(clientId); // 断言 assertEquals(0, result.getCode()); assertPojoEquals(client, result.getData().getClient()); assertEquals(new KeyValue<>("read", true), result.getData().getScopes().get(0)); assertEquals(new KeyValue<>("write", false), result.getData().getScopes().get(1)); assertEquals(new KeyValue<>("all", false), result.getData().getScopes().get(2)); }
@Override public HttpClientResponse execute(URI uri, String httpMethod, RequestHttpEntity requestHttpEntity) throws Exception { while (interceptors.hasNext()) { HttpClientRequestInterceptor nextInterceptor = interceptors.next(); if (nextInterceptor.isIntercept(uri, httpMethod, requestHttpEntity)) { return nextInterceptor.intercept(); } } return httpClientRequest.execute(uri, httpMethod, requestHttpEntity); }
@Test void testExecuteIntercepted() throws Exception { when(interceptor.isIntercept(any(), any(), any())).thenReturn(true); HttpClientResponse response = clientRequest.execute(URI.create("http://example.com"), "GET", new RequestHttpEntity(Header.EMPTY, Query.EMPTY)); assertEquals(interceptorResponse, response); }
@Nullable public static ValueReference of(Object value) { if (value instanceof Boolean) { return of((Boolean) value); } else if (value instanceof Double) { return of((Double) value); } else if (value instanceof Float) { return of((Float) value); } else if (value instanceof Integer) { return of((Integer) value); } else if (value instanceof Long) { return of((Long) value); } else if (value instanceof String) { return of((String) value); } else if (value instanceof Enum) { return of((Enum) value); } else if (value instanceof EncryptedValue encryptedValue) { return of(encryptedValue); } else { return null; } }
@Test public void deserializeString() throws IOException { assertThat(objectMapper.readValue("{\"@type\":\"string\",\"@value\":\"\"}", ValueReference.class)).isEqualTo(ValueReference.of("")); assertThat(objectMapper.readValue("{\"@type\":\"string\",\"@value\":\"Test\"}", ValueReference.class)).isEqualTo(ValueReference.of("Test")); }
public static EvictionConfig newEvictionConfig(Integer maxSize, MaxSizePolicy maxSizePolicy, EvictionPolicy evictionPolicy, boolean isNearCache, boolean isIMap, String comparatorClassName, EvictionPolicyComparator<?, ?, ?> comparator) { int finalSize = maxSize(maxSize, isIMap); MaxSizePolicy finalMaxSizePolicy = maxSizePolicy(maxSizePolicy, isIMap); EvictionPolicy finalEvictionPolicy = evictionPolicy(evictionPolicy, isIMap); try { doEvictionConfigChecks(finalMaxSizePolicy, finalEvictionPolicy, comparatorClassName, comparator, isIMap, isNearCache); } catch (IllegalArgumentException e) { throw new InvalidConfigurationException(e.getMessage()); } EvictionConfig evictionConfig = new EvictionConfig() .setSize(finalSize) .setMaxSizePolicy(finalMaxSizePolicy) .setEvictionPolicy(finalEvictionPolicy); if (comparatorClassName != null) { evictionConfig.setComparatorClassName(comparatorClassName); } if (comparator != null) { evictionConfig.setComparator(comparator); } return evictionConfig; }
@Test public void should_use_default_cache_max_size_for_null_size() { EvictionConfig evictionConfig = newEvictionConfig(null, false); assertThat(evictionConfig.getSize()).isEqualTo(EvictionConfig.DEFAULT_MAX_ENTRY_COUNT); }
@Override public TbPair<Boolean, JsonNode> upgrade(int fromVersion, JsonNode oldConfiguration) throws TbNodeException { return fromVersion == 0 ? upgradeRuleNodesWithOldPropertyToUseFetchTo( oldConfiguration, "fetchToMetadata", TbMsgSource.METADATA.name(), TbMsgSource.DATA.name()) : new TbPair<>(false, oldConfiguration); }
@Test void givenOldConfig_whenUpgrade_thenShouldReturnTrueResultWithNewConfig() throws Exception { String oldConfig = "{\"fetchToMetadata\":true}"; JsonNode configJson = JacksonUtil.toJsonNode(oldConfig); TbPair<Boolean, JsonNode> upgrade = node.upgrade(0, configJson); assertTrue(upgrade.getFirst()); assertEquals(config, JacksonUtil.treeToValue(upgrade.getSecond(), config.getClass())); }
@Override protected CompletableFuture<JobSubmitResponseBody> handleRequest( @Nonnull HandlerRequest<JobSubmitRequestBody> request, @Nonnull DispatcherGateway gateway) throws RestHandlerException { final Collection<File> uploadedFiles = request.getUploadedFiles(); final Map<String, Path> nameToFile = uploadedFiles.stream() .collect(Collectors.toMap(File::getName, Path::fromLocalFile)); if (uploadedFiles.size() != nameToFile.size()) { throw new RestHandlerException( String.format( "The number of uploaded files was %s than the expected count. Expected: %s Actual %s", uploadedFiles.size() < nameToFile.size() ? "lower" : "higher", nameToFile.size(), uploadedFiles.size()), HttpResponseStatus.BAD_REQUEST); } final JobSubmitRequestBody requestBody = request.getRequestBody(); if (requestBody.jobGraphFileName == null) { throw new RestHandlerException( String.format( "The %s field must not be omitted or be null.", JobSubmitRequestBody.FIELD_NAME_JOB_GRAPH), HttpResponseStatus.BAD_REQUEST); } CompletableFuture<JobGraph> jobGraphFuture = loadJobGraph(requestBody, nameToFile); Collection<Path> jarFiles = getJarFilesToUpload(requestBody.jarFileNames, nameToFile); Collection<Tuple2<String, Path>> artifacts = getArtifactFilesToUpload(requestBody.artifactFileNames, nameToFile); CompletableFuture<JobGraph> finalizedJobGraphFuture = uploadJobGraphFiles(gateway, jobGraphFuture, jarFiles, artifacts, configuration); CompletableFuture<Acknowledge> jobSubmissionFuture = finalizedJobGraphFuture.thenCompose( jobGraph -> gateway.submitJob(jobGraph, timeout)); return jobSubmissionFuture.thenCombine( jobGraphFuture, (ack, jobGraph) -> new JobSubmitResponseBody("/jobs/" + jobGraph.getJobID())); }
@TestTemplate void testFileHandling() throws Exception { final String dcEntryName = "entry"; CompletableFuture<JobGraph> submittedJobGraphFuture = new CompletableFuture<>(); DispatcherGateway dispatcherGateway = TestingDispatcherGateway.newBuilder() .setBlobServerPort(blobServer.getPort()) .setSubmitFunction( submittedJobGraph -> { submittedJobGraphFuture.complete(submittedJobGraph); return CompletableFuture.completedFuture(Acknowledge.get()); }) .build(); JobSubmitHandler handler = new JobSubmitHandler( () -> CompletableFuture.completedFuture(dispatcherGateway), RpcUtils.INF_TIMEOUT, Collections.emptyMap(), Executors.directExecutor(), configuration); final Path jobGraphFile = TempDirUtils.newFile(temporaryFolder).toPath(); final Path jarFile = TempDirUtils.newFile(temporaryFolder).toPath(); final Path artifactFile = TempDirUtils.newFile(temporaryFolder).toPath(); final JobGraph jobGraph = JobGraphTestUtils.emptyJobGraph(); // the entry that should be updated jobGraph.addUserArtifact( dcEntryName, new DistributedCache.DistributedCacheEntry("random", false)); try (ObjectOutputStream objectOut = new ObjectOutputStream(Files.newOutputStream(jobGraphFile))) { objectOut.writeObject(jobGraph); } JobSubmitRequestBody request = new JobSubmitRequestBody( jobGraphFile.getFileName().toString(), Collections.singletonList(jarFile.getFileName().toString()), Collections.singleton( new JobSubmitRequestBody.DistributedCacheFile( dcEntryName, artifactFile.getFileName().toString()))); handler.handleRequest( HandlerRequest.create( request, EmptyMessageParameters.getInstance(), Arrays.asList( jobGraphFile.toFile(), jarFile.toFile(), artifactFile.toFile())), dispatcherGateway) .get(); assertThat(submittedJobGraphFuture).as("No JobGraph was submitted.").isCompleted(); final JobGraph submittedJobGraph = submittedJobGraphFuture.get(); assertThat(submittedJobGraph.getUserJarBlobKeys()).hasSize(1); assertThat(submittedJobGraph.getUserArtifacts()).hasSize(1); assertThat(submittedJobGraph.getUserArtifacts().get(dcEntryName).blobKey).isNotNull(); }
@Nonnull public static <K, V> BatchSource<Entry<K, V>> map(@Nonnull String mapName) { return batchFromProcessor("mapSource(" + mapName + ')', readMapP(mapName)); }
@Test public void mapWithFilterAndProjection_byRef() { // Given List<Integer> input = sequence(itemCount); putToBatchSrcMap(input); // When BatchSource<Integer> source = Sources.map(srcMap, truePredicate(), Projections.singleAttribute("value")); // Then p.readFrom(source).writeTo(sink); execute(); assertEquals(toBag(input), sinkToBag()); }
public static RawPrivateTransaction decode(final String hexTransaction) { final byte[] transaction = Numeric.hexStringToByteArray(hexTransaction); final TransactionType transactionType = getPrivateTransactionType(transaction); if (transactionType == TransactionType.EIP1559) { return decodePrivateTransaction1559(transaction); } return decodeLegacyPrivateTransaction(transaction); }
@Test public void testDecodingSignedPrivacyGroup() throws Exception { final BigInteger nonce = BigInteger.ZERO; final BigInteger gasPrice = BigInteger.ONE; final BigInteger gasLimit = BigInteger.TEN; final String to = "0x0add5355"; final RawPrivateTransaction rawTransaction = RawPrivateTransaction.createTransaction( nonce, gasPrice, gasLimit, to, "", MOCK_ENCLAVE_KEY, MOCK_ENCLAVE_KEY, RESTRICTED); final String privateKey = "8f2a55949038a9610f50fb23b5883af3b4ecb3c3bb792cbcefbd1542c692be63"; final Credentials credentials = Credentials.create(privateKey); final byte[] encodedMessage = PrivateTransactionEncoder.signMessage(rawTransaction, credentials); final String hexMessage = Numeric.toHexString(encodedMessage); final RawPrivateTransaction result = PrivateTransactionDecoder.decode(hexMessage); assertNotNull(result); assertEquals(nonce, result.getNonce()); assertEquals(gasPrice, result.getGasPrice()); assertEquals(gasLimit, result.getGasLimit()); assertEquals(to, result.getTo()); assertEquals("", result.getData()); assertEquals(MOCK_ENCLAVE_KEY, result.getPrivateFrom()); assertEquals(MOCK_ENCLAVE_KEY, result.getPrivacyGroupId().get()); assertEquals(RESTRICTED, result.getRestriction()); assertTrue(result instanceof SignedRawPrivateTransaction); final SignedRawPrivateTransaction signedResult = (SignedRawPrivateTransaction) result; assertNotNull(signedResult.getSignatureData()); Sign.SignatureData signatureData = signedResult.getSignatureData(); final byte[] encodedTransaction = PrivateTransactionEncoder.encode(rawTransaction); final BigInteger key = Sign.signedMessageToKey(encodedTransaction, signatureData); assertEquals(key, credentials.getEcKeyPair().getPublicKey()); assertEquals(credentials.getAddress(), signedResult.getFrom()); signedResult.verify(credentials.getAddress()); assertNull(signedResult.getChainId()); }
@Override public FindCoordinatorRequest.Builder buildRequest(Set<CoordinatorKey> keys) { unrepresentableKeys = keys.stream().filter(k -> k == null || !isRepresentableKey(k.idValue)).collect(Collectors.toSet()); Set<CoordinatorKey> representableKeys = keys.stream().filter(k -> k != null && isRepresentableKey(k.idValue)).collect(Collectors.toSet()); if (batch) { ensureSameType(representableKeys); FindCoordinatorRequestData data = new FindCoordinatorRequestData() .setKeyType(type.id()) .setCoordinatorKeys(representableKeys.stream().map(k -> k.idValue).collect(Collectors.toList())); return new FindCoordinatorRequest.Builder(data); } else { CoordinatorKey key = requireSingletonAndType(representableKeys); return new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() .setKey(key.idValue) .setKeyType(key.type.id()) ); } }
@Test public void testBuildLookupRequestNonRepresentable() { CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext()); FindCoordinatorRequest.Builder request = strategy.buildRequest(new HashSet<>(Arrays.asList( CoordinatorKey.byGroupId("foo"), null))); assertEquals("", request.data().key()); assertEquals(1, request.data().coordinatorKeys().size()); }
@Override public void unlock(final Path file, final String token) throws BackgroundException { try { for(LockFileResultEntry result : new DbxUserFilesRequests(session.getClient(file)).unlockFileBatch(Collections.singletonList( new UnlockFileArg(containerService.getKey(file)))).getEntries()) { if(result.isFailure()) { throw failure(result); } if(log.isDebugEnabled()) { log.debug(String.format("Unlocked file %s with result %s", file, result.getSuccessValue())); } } } catch(DbxException e) { throw new DropboxExceptionMappingService().map("Failure to write attributes of {0}", e, file); } }
@Test public void testLockNotfound() throws Exception { final DropboxTouchFeature touch = new DropboxTouchFeature(session); final Path file = touch.touch(new Path(new Path(new DefaultHomeFinderService(session).find(), "Projects", EnumSet.of(Path.Type.directory, Path.Type.shared)).withAttributes(new PathAttributes().withFileId("7581509952")), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final DropboxLockFeature f = new DropboxLockFeature(session); f.unlock(file, "l"); new DropboxDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static Map<String, Object> entries() { return CONTEXT_HOLDER.entries(); }
@Test public void testEntries() { RootContext.bind(DEFAULT_XID); Map<String, Object> entries = RootContext.entries(); assertThat(entries.get(RootContext.KEY_XID)).isEqualTo(DEFAULT_XID); RootContext.unbind(); }
@Override public Map<String, Object> create(final T entity, final Class<?> entityClass) { final Map<String, Object> respEntityContext = this.responseEntityConverter.convertValue(entity, entityClass); if (respEntityContext != null) { return Map.of("response_entity", respEntityContext); } else { return Map.of("response_entity", Map.of()); } }
@Test void createsProperContext() { final Map<String, Object> expected = Map.of("response_entity", Map.of( "tiny_id", "42", "tiny_title", "Carramba!" )); DefaultSuccessContextCreator<TinyEntity> toTest = new DefaultSuccessContextCreator<>(new ResponseEntityConverter(new ObjectMapper())); assertThat(toTest.create(new TinyEntity("42", "Carramba!"), TinyEntity.class)).isEqualTo(expected); }
@Override @SuppressWarnings("unchecked") // compatibility is explicitly checked public <T> Set<T> convertTo(Class<T> type) { if (type.isAssignableFrom(getClass())) { return (Set<T>) Collections.singleton(this); } if (type.isAssignableFrom(Dependency.class)) { return (Set<T>) Dependency.tryCreateFromAccess(this); } return Collections.emptySet(); }
@Test public void convertTo() { TestJavaAccess access = javaAccessFrom(importClassWithContext(String.class), "toString") .to(Object.class, "toString") .inLineNumber(11); assertThatConversionOf(access) .satisfiesStandardConventions() .isPossibleToSingleElement(Dependency.class, dependency -> { assertThatDependency(dependency) .matches(String.class, Object.class) .hasDescription(access.getDescription()); }); }
static void maybeReportHybridDiscoveryIssue(PluginDiscoveryMode discoveryMode, PluginScanResult serviceLoadingScanResult, PluginScanResult mergedResult) { SortedSet<PluginDesc<?>> missingPlugins = new TreeSet<>(); mergedResult.forEach(missingPlugins::add); serviceLoadingScanResult.forEach(missingPlugins::remove); if (missingPlugins.isEmpty()) { if (discoveryMode == PluginDiscoveryMode.HYBRID_WARN || discoveryMode == PluginDiscoveryMode.HYBRID_FAIL) { log.warn("All plugins have ServiceLoader manifests, consider reconfiguring {}={}", WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.SERVICE_LOAD); } } else { String message = String.format( "One or more plugins are missing ServiceLoader manifests may not be usable with %s=%s: %s%n" + "Read the documentation at %s for instructions on migrating your plugins " + "to take advantage of the performance improvements of %s mode.", WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.SERVICE_LOAD, missingPlugins.stream() .map(pluginDesc -> pluginDesc.location() + "\t" + pluginDesc.className() + "\t" + pluginDesc.type() + "\t" + pluginDesc.version()) .collect(Collectors.joining("\n", "[\n", "\n]")), "https://kafka.apache.org/documentation.html#connect_plugindiscovery", PluginDiscoveryMode.SERVICE_LOAD ); if (discoveryMode == PluginDiscoveryMode.HYBRID_WARN) { log.warn("{} To silence this warning, set {}={} in the worker config.", message, WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.ONLY_SCAN); } else if (discoveryMode == PluginDiscoveryMode.HYBRID_FAIL) { throw new ConnectException(String.format("%s To silence this error, set %s=%s in the worker config.", message, WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.HYBRID_WARN)); } } }
@Test public void testHybridFailNoPlugins() { try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Plugins.class)) { Plugins.maybeReportHybridDiscoveryIssue(PluginDiscoveryMode.HYBRID_FAIL, empty, empty); assertTrue(logCaptureAppender.getEvents().stream().anyMatch(e -> e.getLevel().equals("WARN") && e.getMessage().contains(WorkerConfig.PLUGIN_DISCOVERY_CONFIG) )); } }
@PostMapping @Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX + "namespaces", action = ActionTypes.WRITE) public Boolean createNamespace(@RequestParam("customNamespaceId") String namespaceId, @RequestParam("namespaceName") String namespaceName, @RequestParam(value = "namespaceDesc", required = false) String namespaceDesc) { if (StringUtils.isBlank(namespaceId)) { namespaceId = UUID.randomUUID().toString(); } else { namespaceId = namespaceId.trim(); if (!namespaceIdCheckPattern.matcher(namespaceId).matches()) { return false; } if (namespaceId.length() > NAMESPACE_ID_MAX_LENGTH) { return false; } // check unique if (namespacePersistService.tenantInfoCountByTenantId(namespaceId) > 0) { return false; } } // contains illegal chars if (!namespaceNameCheckPattern.matcher(namespaceName).matches()) { return false; } try { return namespaceOperationService.createNamespace(namespaceId, namespaceName, namespaceDesc); } catch (NacosException e) { return false; } }
@Test void testCreateNamespaceWithCustomId() throws Exception { namespaceController.createNamespace("test-Id", "testName", "testDesc"); verify(namespaceOperationService).createNamespace("test-Id", "testName", "testDesc"); }
public static <T> CheckedSupplier<T> recover(CheckedSupplier<T> supplier, CheckedFunction<Throwable, T> exceptionHandler) { return () -> { try { return supplier.get(); } catch (Throwable throwable) { return exceptionHandler.apply(throwable); } }; }
@Test public void shouldRecoverFromException() throws Throwable { CheckedSupplier<String> callable = () -> { throw new IOException("BAM!"); }; CheckedSupplier<String> callableWithRecovery = CheckedFunctionUtils.recover(callable, (ex) -> "Bla"); String result = callableWithRecovery.get(); assertThat(result).isEqualTo("Bla"); }
public long cardinality() { // The initial guess of 1 may seem awful, but this converges quickly, and starting small returns better results for small cardinalities. // This generally takes <= 40 iterations, even for cardinalities as large as 10^33. double guess = 1; double changeInGuess = Double.POSITIVE_INFINITY; int iterations = 0; while (Math.abs(changeInGuess) > 0.1 && iterations < MAX_ESTIMATION_ITERATIONS) { changeInGuess = -logLikelihoodFirstDerivative(guess) / logLikelihoodSecondDerivative(guess); guess += changeInGuess; iterations += 1; } return Math.max(0, Math.round(guess)); }
@Test public void testSimulatedCardinalityEstimates() { // Instead of creating sketches by adding items, we simulate them for fast testing of huge cardinalities. // For reference, 10^33 is one decillion. // The goal here is to test general functionality and numerical stability. int[] magnitudes = {6, 9, 12, 15, 18, 21, 24, 27, 30, 33}; double[] epsilons = {4, SfmSketch.NON_PRIVATE_EPSILON}; for (int mag : magnitudes) { int n = (int) Math.pow(10, mag); for (double eps : epsilons) { SfmSketch sketch = createSketchWithTargetCardinality(4096, 24, eps, n); assertEquals(sketch.cardinality(), n, n * 0.1); } } }
public static long getNumSector(String requestSize, String sectorSize) { Double memSize = Double.parseDouble(requestSize); Double sectorBytes = Double.parseDouble(sectorSize); Double nSectors = memSize / sectorBytes; Double memSizeKB = memSize / 1024; Double memSizeGB = memSize / (1024 * 1024 * 1024); Double memSize100GB = memSizeGB / 100; // allocation bitmap file: one bit per sector Double allocBitmapSize = nSectors / 8; // extend overflow file: 4MB, plus 4MB per 100GB Double extOverflowFileSize = memSize100GB * 1024 * 1024 * 4; // journal file: 8MB, plus 8MB per 100GB Double journalFileSize = memSize100GB * 1024 * 1024 * 8; // catalog file: 10bytes per KB Double catalogFileSize = memSizeKB * 10; // hot files: 5bytes per KB Double hotFileSize = memSizeKB * 5; // quota users file and quota groups file Double quotaUsersFileSize = (memSizeGB * 256 + 1) * 64; Double quotaGroupsFileSize = (memSizeGB * 32 + 1) * 64; Double metadataSize = allocBitmapSize + extOverflowFileSize + journalFileSize + catalogFileSize + hotFileSize + quotaUsersFileSize + quotaGroupsFileSize; Double allocSize = memSize + metadataSize; Double numSectors = allocSize / sectorBytes; System.out.println(numSectors.longValue() + 1); // round up return numSectors.longValue() + 1; }
@Test public void getSectorTestGB() { String testRequestSize = "1073741824"; // 1GB String testSectorSize = "512"; long result = HFSUtils.getNumSector(testRequestSize, testSectorSize); assertEquals(2128667L, result); // 1GB/512B = 2097152 }
protected String createPermissionString(ActiveMQDestination dest, String verb) { if (dest.isComposite()) { throw new IllegalArgumentException("Use createPermissionStrings for composite destinations."); } StringBuilder sb = new StringBuilder(); if (permissionStringPrefix != null) { sb.append(permissionStringPrefix); if (!permissionStringPrefix.endsWith(":")) { sb.append(":"); } } if (dest.isTemporary()) { sb.append("temp-"); } if (dest.isTopic()) { sb.append("topic:"); } else { sb.append("queue:"); } sb.append(dest.getPhysicalName()); sb.append(':'); sb.append(verb); return sb.toString(); }
@Test(expected = IllegalArgumentException.class) public void testCreatePermissionStringWithCompositeDestination() { ActiveMQTopic topicA = new ActiveMQTopic("A"); ActiveMQTopic topicB = new ActiveMQTopic("B"); ActiveMQDestination composite = new AnyDestination(new ActiveMQDestination[]{topicA, topicB}); resolver.createPermissionString(composite, "read"); }
@Override public void destroy() { super.destroy(); // remove child listener Set<URL> urls = zkListeners.keySet(); for (URL url : urls) { ConcurrentMap<NotifyListener, ChildListener> map = zkListeners.get(url); if (CollectionUtils.isEmptyMap(map)) { continue; } Collection<ChildListener> childListeners = map.values(); if (CollectionUtils.isEmpty(childListeners)) { continue; } if (ANY_VALUE.equals(url.getServiceInterface())) { String root = toRootPath(); childListeners.stream().forEach(childListener -> zkClient.removeChildListener(root, childListener)); } else { for (String path : toCategoriesPath(url)) { childListeners.stream().forEach(childListener -> zkClient.removeChildListener(path, childListener)); } } } zkListeners.clear(); // Just release zkClient reference, but can not close zk client here for zk client is shared somewhere else. // See org.apache.dubbo.remoting.zookeeper.AbstractZookeeperTransporter#destroy() zkClient = null; }
@Test void testDestroy() { zookeeperRegistry.destroy(); assertThat(zookeeperRegistry.isAvailable(), is(false)); }
public Schema toKsqlSchema(final Schema schema) { try { final Schema rowSchema = toKsqlFieldSchema(schema); if (rowSchema.type() != Schema.Type.STRUCT) { throw new KsqlException("KSQL stream/table schema must be structured"); } if (rowSchema.fields().isEmpty()) { throw new KsqlException("Schema does not include any columns with " + "types that ksqlDB supports." + System.lineSeparator() + "schema: " + FORMATTER.format(schema)); } return rowSchema; } catch (final UnsupportedTypeException e) { throw new KsqlException("Unsupported type at root of schema: " + e.getMessage(), e); } }
@Test public void shouldTranslateTimeTypes() { final Schema connectSchema = SchemaBuilder .struct() .field("timefield", Time.SCHEMA) .field("datefield", Date.SCHEMA) .field("timestampfield", Timestamp.SCHEMA) .build(); final Schema ksqlSchema = translator.toKsqlSchema(connectSchema); assertThat(ksqlSchema.field(nameTranslator.apply("timefield")).schema(), equalTo(OPTIONAL_TIME_SCHEMA)); assertThat(ksqlSchema.field(nameTranslator.apply("datefield")).schema(), equalTo(OPTIONAL_DATE_SCHEMA)); assertThat(ksqlSchema.field(nameTranslator.apply("timestampfield")).schema(), equalTo(OPTIONAL_TIMESTAMP_SCHEMA)); }
@VisibleForTesting public static String parseErrorMessage( String errorMessage, Date now ) { StringBuilder parsed = new StringBuilder(); try { String[] splitString = errorMessage.split( "\\n" ); parsed.append( splitString[1] ).append( "\n" ); for ( int i = 2; i < splitString.length; i++ ) { String dateStr = splitString[i].substring( 0, splitString[i].indexOf( " -" ) ); if ( isDateAfterOrSame( formatDate( now ), dateStr ) ) { parsed.append( splitString[i] ).append( "\n" ); } } } catch ( Exception e ) { return errorMessage; } return parsed.toString(); }
@Test public void parseErrorMessageUsingSameDateTest() { String result = TransPreviewProgressDialog.parseErrorMessage( ERROR_MSG, parseDate( SAME_DATE_STR ) ); assertEquals( FAILED_TO_INIT_MSG + EXPECTED_ERROR_MSG, result ); }
public abstract String getErrorCodeName();
@Test public void testErrorCode() { assertEquals( new ClusterConnectionException(new SocketTimeoutException(), QUERY_STAGE).getErrorCodeName(), "CLUSTER_CONNECTION(SocketTimeoutException)"); assertEquals( new PrestoQueryException(new SQLException(), false, QUERY_STAGE, Optional.of(REMOTE_TASK_ERROR), EMPTY_STATS).getErrorCodeName(), "PRESTO(REMOTE_TASK_ERROR)"); assertEquals( new PrestoQueryException(new SQLException(), false, QUERY_STAGE, Optional.empty(), EMPTY_STATS).getErrorCodeName(), "PRESTO(UNKNOWN)"); }
@Override public OpenstackVtapNetwork createVtapNetwork(Mode mode, Integer networkId, IpAddress serverIp) { checkNotNull(mode, VTAP_DESC_NULL, "mode"); checkNotNull(serverIp, VTAP_DESC_NULL, "serverIp"); DefaultOpenstackVtapNetwork vtapNetwork = DefaultOpenstackVtapNetwork.builder() .mode(mode) .networkId(networkId) .serverIp(serverIp) .build(); return store.createVtapNetwork(VTAP_NETWORK_KEY, vtapNetwork); }
@Test(expected = NullPointerException.class) public void testCreateNullVtapNetwork() { target.createVtapNetwork(null, null, null); }