focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public Schema addToSchema(Schema schema) {
validate(schema);
schema.addProp(LOGICAL_TYPE_PROP, name);
schema.setLogicalType(this);
return schema;
} | @Test
void decimalFixedPrecisionLimit() {
// 4 bytes can hold up to 9 digits of precision
final Schema schema = Schema.createFixed("aDecimal", null, null, 4);
assertThrows("Should reject precision", IllegalArgumentException.class, "fixed(4) cannot store 10 digits (max 9)",
() -> {
LogicalTypes.decimal(10).addToSchema(schema);
return null;
});
assertNull(LogicalTypes.fromSchemaIgnoreInvalid(schema), "Invalid logical type should not be set on schema");
// 129 bytes can hold up to 310 digits of precision
final Schema schema129 = Schema.createFixed("aDecimal", null, null, 129);
assertThrows("Should reject precision", IllegalArgumentException.class,
"fixed(129) cannot store 311 digits (max 310)", () -> {
LogicalTypes.decimal(311).addToSchema(schema129);
return null;
});
assertNull(LogicalTypes.fromSchemaIgnoreInvalid(schema129), "Invalid logical type should not be set on schema");
} |
@Override
public LookupResult<BrokerKey> handleResponse(Set<BrokerKey> keys, AbstractResponse abstractResponse) {
validateLookupKeys(keys);
MetadataResponse response = (MetadataResponse) abstractResponse;
MetadataResponseData.MetadataResponseBrokerCollection brokers = response.data().brokers();
if (brokers.isEmpty()) {
log.debug("Metadata response contained no brokers. Will backoff and retry");
return LookupResult.empty();
} else {
log.debug("Discovered all brokers {} to send requests to", brokers);
}
Map<BrokerKey, Integer> brokerKeys = brokers.stream().collect(Collectors.toMap(
broker -> new BrokerKey(OptionalInt.of(broker.nodeId())),
MetadataResponseData.MetadataResponseBroker::nodeId
));
return new LookupResult<>(
Collections.singletonList(ANY_BROKER),
Collections.emptyMap(),
brokerKeys
);
} | @Test
public void testHandleResponseWithNoBrokers() {
AllBrokersStrategy strategy = new AllBrokersStrategy(logContext);
MetadataResponseData response = new MetadataResponseData();
AdminApiLookupStrategy.LookupResult<AllBrokersStrategy.BrokerKey> lookupResult = strategy.handleResponse(
AllBrokersStrategy.LOOKUP_KEYS,
new MetadataResponse(response, ApiKeys.METADATA.latestVersion())
);
assertEquals(Collections.emptyMap(), lookupResult.failedKeys);
assertEquals(Collections.emptyMap(), lookupResult.mappedKeys);
} |
@Override
public ServletStream stream() {
return stream;
} | @Test
public void flushBuffer_bufferIsFlushed() throws IOException {
underTest.stream().flushBuffer();
verify(response).flushBuffer();
} |
public static void checkAndThrowAttributeValue(String value)
throws IOException {
if (value == null) {
return;
} else if (value.trim().length() > MAX_LABEL_LENGTH) {
throw new IOException("Attribute value added exceeds " + MAX_LABEL_LENGTH
+ " character(s)");
}
value = value.trim();
if(value.isEmpty()) {
return;
}
boolean match = ATTRIBUTE_VALUE_PATTERN.matcher(value).matches();
if (!match) {
throw new IOException("attribute value should only contains "
+ "{0-9, a-z, A-Z, -, _} and should not started with {-,_}"
+ ", now it is= " + value);
}
} | @Test
void testAttributeValueAddition() {
String[] values =
new String[]{"1_8", "1.8", "ABZ", "ABZ", "az", "a-z", "a_z",
"123456789"};
for (String val : values) {
try {
NodeLabelUtil.checkAndThrowAttributeValue(val);
} catch (Exception e) {
fail("Valid values for NodeAttributeValue :" + val);
}
}
String[] invalidVals = new String[]{"_18", "1,8", "1/5", ".15", "1\\5"};
for (String val : invalidVals) {
try {
NodeLabelUtil.checkAndThrowAttributeValue(val);
fail("Valid values for NodeAttributeValue :" + val);
} catch (Exception e) {
// IGNORE
}
}
} |
@SuppressWarnings("deprecation")
public static String encodeHex(byte[] data) {
int l = data.length;
byte[] out = new byte[l << 1];
for (int i = 0; i < l; i++) {
byte b = data[i];
int j = i << 1;
out[j] = DIGITS_LOWER[((0xF0 & b) >>> 4)];
out[j + 1] = DIGITS_LOWER[(0xF & b)];
}
return new String(out, 0, 0, out.length);
} | @Test
public void testEncodeHex() {
assertEquals("", Hex.encodeHex(new byte[0]));
assertEquals("00", Hex.encodeHex(new byte[1]));
assertEquals("00000000", Hex.encodeHex(new byte[4]));
assertEquals("10a3d1ff", Hex.encodeHex(new byte[]{0x10, (byte) 0xa3, (byte) 0xd1, (byte) 0xff}));
} |
public static String stripInstanceAndHost(String metricsName) {
String[] pieces = metricsName.split("\\.");
int len = pieces.length;
if (len <= 1) {
return metricsName;
}
// Master metrics doesn't have hostname included.
if (!pieces[0].equals(MetricsSystem.InstanceType.MASTER.toString())
&& !pieces[0].equals(InstanceType.CLUSTER.toString())
&& pieces.length > 2) {
pieces[len - 1] = null;
}
pieces[0] = null;
return Joiner.on(".").skipNulls().join(pieces);
} | @Test
public void stripInstanceAndHostTest() {
assertEquals("name", MetricsSystem.stripInstanceAndHost("Master.name"));
assertEquals("name", MetricsSystem.stripInstanceAndHost("Worker.name.10_0_0_136"));
assertEquals("name.UFS:ufs", MetricsSystem.stripInstanceAndHost("Client.name.UFS:ufs.0_0_0_0"));
assertEquals("name.UFS:ufs.UFS_TYPE:local",
MetricsSystem.stripInstanceAndHost("Worker.name.UFS:ufs.UFS_TYPE:local.0_0_0_0"));
} |
@Override public void callExtensionPoint( LogChannelInterface logChannelInterface, Object o ) throws KettleException {
AbstractMeta abstractMeta = (AbstractMeta) o;
final EmbeddedMetaStore embeddedMetaStore = abstractMeta.getEmbeddedMetaStore();
RunConfigurationManager embeddedRunConfigurationManager =
EmbeddedRunConfigurationManager.build( embeddedMetaStore );
List<RunConfiguration> runConfigurationList = embeddedRunConfigurationManager.load();
List<String> runConfigurationNames = runConfigurationList.stream().map( RunConfiguration::getName ).collect( Collectors.toList() );
runConfigurationNames.addAll( runConfigurationManager.getNames() );
runConfigurationList.addAll( createSlaveServerRunConfigurations( runConfigurationNames, abstractMeta ) );
for ( RunConfiguration runConfiguration : runConfigurationList ) {
if ( !runConfiguration.getName().equals( DefaultRunConfigurationProvider.DEFAULT_CONFIG_NAME ) ) {
runConfigurationManager.save( runConfiguration );
}
}
} | @Test
public void testCallExtensionPoint() throws Exception {
runConfigurationImportExtensionPoint.callExtensionPoint( log, abstractMeta );
verify( abstractMeta ).getEmbeddedMetaStore();
} |
@Override
public Response submitApplication(ApplicationSubmissionContextInfo newApp, HttpServletRequest hsr)
throws AuthorizationException, IOException, InterruptedException {
long startTime = clock.getTime();
// We verify the parameters to ensure that newApp is not empty and
// that the format of applicationId is correct.
if (newApp == null || newApp.getApplicationId() == null) {
routerMetrics.incrAppsFailedSubmitted();
String errMsg = "Missing ApplicationSubmissionContextInfo or "
+ "applicationSubmissionContext information.";
RouterAuditLogger.logFailure(getUser().getShortUserName(), SUBMIT_NEW_APP, UNKNOWN,
TARGET_WEB_SERVICE, errMsg);
return Response.status(Status.BAD_REQUEST).entity(errMsg).build();
}
try {
String applicationId = newApp.getApplicationId();
RouterServerUtil.validateApplicationId(applicationId);
} catch (IllegalArgumentException e) {
routerMetrics.incrAppsFailedSubmitted();
RouterAuditLogger.logFailure(getUser().getShortUserName(), SUBMIT_NEW_APP, UNKNOWN,
TARGET_WEB_SERVICE, e.getMessage());
return Response.status(Status.BAD_REQUEST).entity(e.getLocalizedMessage()).build();
}
List<SubClusterId> blackList = new ArrayList<>();
try {
int activeSubClustersCount = federationFacade.getActiveSubClustersCount();
int actualRetryNums = Math.min(activeSubClustersCount, numSubmitRetries);
Response response = ((FederationActionRetry<Response>) (retryCount) ->
invokeSubmitApplication(newApp, blackList, hsr, retryCount)).
runWithRetries(actualRetryNums, submitIntervalTime);
if (response != null) {
long stopTime = clock.getTime();
routerMetrics.succeededAppsSubmitted(stopTime - startTime);
return response;
}
} catch (Exception e) {
routerMetrics.incrAppsFailedSubmitted();
RouterAuditLogger.logFailure(getUser().getShortUserName(), SUBMIT_NEW_APP, UNKNOWN,
TARGET_WEB_SERVICE, e.getMessage());
return Response.status(Status.SERVICE_UNAVAILABLE).entity(e.getLocalizedMessage()).build();
}
routerMetrics.incrAppsFailedSubmitted();
String errMsg = String.format("Application %s with appId %s failed to be submitted.",
newApp.getApplicationName(), newApp.getApplicationId());
LOG.error(errMsg);
RouterAuditLogger.logFailure(getUser().getShortUserName(), SUBMIT_NEW_APP, UNKNOWN,
TARGET_WEB_SERVICE, errMsg);
return Response.status(Status.SERVICE_UNAVAILABLE).entity(errMsg).build();
} | @Test
public void testSubmitApplication()
throws YarnException, IOException, InterruptedException {
ApplicationId appId =
ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context =
new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
Response response = interceptor.submitApplication(context, null);
Assert.assertEquals(ACCEPTED, response.getStatus());
SubClusterId ci = (SubClusterId) response.getEntity();
Assert.assertNotNull(response);
SubClusterId scIdResult = stateStoreUtil.queryApplicationHomeSC(appId);
Assert.assertNotNull(scIdResult);
Assert.assertTrue(subClusters.contains(scIdResult));
Assert.assertEquals(ci, scIdResult);
} |
@Override
public String convert(ILoggingEvent event) {
List<KeyValuePair> kvpList = event.getKeyValuePairs();
if (kvpList == null || kvpList.isEmpty()) {
return CoreConstants.EMPTY_STRING;
}
StringBuilder sb = new StringBuilder();
for (int i = 0; i < kvpList.size(); i++) {
KeyValuePair kvp = kvpList.get(i);
if (i != 0)
sb.append(' ');
sb.append(String.valueOf(kvp.key));
sb.append('=');
Character quoteChar = valueQuoteSpec.asChar();
if (quoteChar != null)
sb.append(quoteChar);
sb.append(String.valueOf(kvp.value));
if (quoteChar != null)
sb.append(quoteChar);
}
return sb.toString();
} | @Test
public void testWithOnelKVP() {
event.addKeyValuePair(new KeyValuePair("k", "v"));
String result = converter.convert(event);
assertEquals("k=\"v\"", result);
} |
@Override
protected void rename(
List<LocalResourceId> srcResourceIds,
List<LocalResourceId> destResourceIds,
MoveOptions... moveOptions)
throws IOException {
if (moveOptions.length > 0) {
throw new UnsupportedOperationException("Support for move options is not yet implemented.");
}
checkArgument(
srcResourceIds.size() == destResourceIds.size(),
"Number of source files %s must equal number of destination files %s",
srcResourceIds.size(),
destResourceIds.size());
int numFiles = srcResourceIds.size();
for (int i = 0; i < numFiles; i++) {
LocalResourceId src = srcResourceIds.get(i);
LocalResourceId dst = destResourceIds.get(i);
LOG.debug("Renaming {} to {}", src, dst);
File parent = dst.getCurrentDirectory().getPath().toFile();
if (!parent.exists()) {
checkArgument(
parent.mkdirs() || parent.exists(),
"Unable to make output directory %s in order to move into file %s",
parent,
dst.getPath());
}
// Rename the source file, replacing the existing destination.
Files.move(
src.getPath(),
dst.getPath(),
StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.ATOMIC_MOVE);
}
} | @Test
public void testMoveFilesWithException() throws Exception {
Path srcPath1 = temporaryFolder.newFile().toPath();
Path srcPath2 = temporaryFolder.newFile().toPath();
Path destPath1 = temporaryFolder.getRoot().toPath().resolve("nonexistentdir").resolve("dest1");
Path destPath2 = srcPath2.resolveSibling("dest2");
createFileWithContent(srcPath1, "content1");
createFileWithContent(srcPath2, "content2");
try (MockedStatic<java.nio.file.Files> mockFiles =
Mockito.mockStatic(java.nio.file.Files.class)) {
System.out.println(srcPath1 + " plus " + destPath1);
mockFiles
.when(() -> java.nio.file.Files.move(any(), any(), any(), any()))
.thenThrow(new RuntimeException("exception while moving files"));
thrown.expect(RuntimeException.class);
localFileSystem.rename(
toLocalResourceIds(ImmutableList.of(srcPath1, srcPath2), false /* isDirectory */),
toLocalResourceIds(ImmutableList.of(destPath1, destPath2), false /* isDirectory */));
}
} |
public String convertToPrintFriendlyString(String phiString) {
if (null == phiString) {
return NULL_REPLACEMENT_VALUE;
} else if (phiString.isEmpty()) {
return EMPTY_REPLACEMENT_VALUE;
}
int conversionLength = (logPhiMaxBytes > 0) ? Integer.min(phiString.length(), logPhiMaxBytes) : phiString.length();
StringBuilder builder = new StringBuilder(conversionLength + STRING_BUFFER_PAD_SIZE);
for (int i = 0; i < conversionLength; ++i) {
appendCharacterAsPrintFriendlyString(builder, phiString.charAt(i));
}
return builder.toString();
} | @Test
public void testConvertToPrintFriendlyStringWithPhiMaxBytes() {
Hl7Util local = new Hl7Util(3, LOG_PHI_TRUE);
String result = local.convertToPrintFriendlyString(TEST_MESSAGE);
assertEquals("MSH", result);
} |
public static ParamType getSchemaFromType(final Type type) {
return getSchemaFromType(type, JAVA_TO_ARG_TYPE);
} | @Test
public void shouldGetDecimalSchemaForBigDecimalClass() {
assertThat(
UdfUtil.getSchemaFromType(BigDecimal.class),
is(ParamTypes.DECIMAL)
);
} |
@Override
public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException {
super.renameSnapshot(fullPath(path), snapshotOldName, snapshotNewName);
} | @Test(timeout = 30000)
public void testRenameSnapshot() throws Exception {
Path snapRootPath = new Path("/snapPath");
Path chRootedSnapRootPath = new Path("/a/b/snapPath");
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem())
.getRawFileSystem();
chrootFs.renameSnapshot(snapRootPath, "snapOldName", "snapNewName");
verify(mockFs).renameSnapshot(chRootedSnapRootPath, "snapOldName",
"snapNewName");
} |
@Nullable
public synchronized Beacon track(@NonNull Beacon beacon) {
Beacon trackedBeacon = null;
if (beacon.isMultiFrameBeacon() || beacon.getServiceUuid() != -1) {
trackedBeacon = trackGattBeacon(beacon);
}
else {
trackedBeacon = beacon;
}
return trackedBeacon;
} | @Test
public void gattBeaconExtraDataIsNotReturned() {
Beacon extraDataBeacon = getGattBeaconExtraData();
ExtraDataBeaconTracker tracker = new ExtraDataBeaconTracker();
Beacon trackedBeacon = tracker.track(extraDataBeacon);
assertNull("trackedBeacon should be null", trackedBeacon);
} |
public SqlType getExpressionSqlType(final Expression expression) {
return getExpressionSqlType(expression, Collections.emptyMap());
} | @Test
public void shouldProcessDateLiteral() {
assertThat(expressionTypeManager.getExpressionSqlType(new DateLiteral(new Date(86400000))), is(SqlTypes.DATE));
} |
@Override
public Object[] toArray() {
Object[] array = new Object[size];
for (int i = 0; i < size; i++) {
array[i] = i;
}
return array;
} | @Test
public void toArray() throws Exception {
RangeSet rs = new RangeSet(4);
Object[] array = rs.toArray();
assertEquals(4, array.length);
assertEquals(0, array[0]);
assertEquals(1, array[1]);
assertEquals(2, array[2]);
assertEquals(3, array[3]);
} |
@Override
public boolean equals(Object obj)
{
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ComplexResourceKey<?, ?> other = (ComplexResourceKey<?, ?>) obj;
// Key cannot be null
return key.equals(other.key)
&& (params == null ? other.params == null : (params.equals(other.params)));
} | @Test
public void testEquals() throws CloneNotSupportedException
{
DataMap keyMap = new DataMap();
keyMap.put("keyField1", "keyValue1");
EmptyRecord key1 = new EmptyRecord(keyMap);
DataMap paramMap = new DataMap();
paramMap.put("paramField1", "paramValue1");
EmptyRecord param1 = new EmptyRecord(paramMap);
ComplexResourceKey<EmptyRecord, EmptyRecord> complexKey1 =
new ComplexResourceKey<>(key1, param1);
EmptyRecord key2 = key1.copy();
EmptyRecord param2 = param1.copy();
ComplexResourceKey<EmptyRecord, EmptyRecord> complexKey2 =
new ComplexResourceKey<>(key2, param2);
Assert.assertTrue(complexKey1.equals(complexKey2));
// Different key part
complexKey2.key.data().put("keyField1", "keyValue2");
Assert.assertFalse(complexKey1.equals(complexKey2));
complexKey2.key.data().put("keyField1", "keyValue1");
// Different param part
complexKey2.params.data().put("paramField1", "paramValue2");
Assert.assertFalse(complexKey1.equals(complexKey2));
complexKey2.params.data().put("paramField1", "paramValue1");
// One param null, other not
complexKey1 = new ComplexResourceKey<>(key1, null);
complexKey2 = new ComplexResourceKey<>(key2, param2);
Assert.assertFalse(complexKey1.equals(complexKey2));
Assert.assertFalse(complexKey2.equals(complexKey1));
// Both param null
complexKey2 = new ComplexResourceKey<>(key2, null);
Assert.assertTrue(complexKey1.equals(complexKey2));
} |
@Override
public synchronized DefaultConnectClient get(
final Optional<String> ksqlAuthHeader,
final List<Entry<String, String>> incomingRequestHeaders,
final Optional<KsqlPrincipal> userPrincipal
) {
if (defaultConnectAuthHeader == null) {
defaultConnectAuthHeader = buildDefaultAuthHeader();
}
final Map<String, Object> configWithPrefixOverrides =
ksqlConfig.valuesWithPrefixOverride(KsqlConfig.KSQL_CONNECT_PREFIX);
return new DefaultConnectClient(
ksqlConfig.getString(KsqlConfig.CONNECT_URL_PROPERTY),
buildAuthHeader(ksqlAuthHeader, incomingRequestHeaders),
requestHeadersExtension
.map(extension -> extension.getHeaders(userPrincipal))
.orElse(Collections.emptyMap()),
Optional.ofNullable(newSslContext(configWithPrefixOverrides)),
shouldVerifySslHostname(configWithPrefixOverrides),
ksqlConfig.getLong(KsqlConfig.CONNECT_REQUEST_TIMEOUT_MS)
);
} | @Test
public void shouldNotFailOnMissingCredentials() {
// Given:
givenCustomBasicAuthHeader();
// no credentials file present
// When:
final DefaultConnectClient connectClient =
connectClientFactory.get(Optional.empty(), Collections.emptyList(), Optional.empty());
// Then:
assertThat(connectClient.getRequestHeaders(), is(EMPTY_HEADERS));
} |
public static GitVersion parse(String versionFromConsoleOutput) {
String[] lines = versionFromConsoleOutput.split("\n");
String firstLine = lines[0];
Matcher m = GIT_VERSION_PATTERN.matcher(firstLine);
if (m.find()) {
try {
return new GitVersion(parseVersion(m));
} catch (Exception e) {
throw bomb("cannot parse git version : " + versionFromConsoleOutput);
}
}
throw bomb("cannot parse git version : " + versionFromConsoleOutput);
} | @Test
public void shouldThowExceptionWhenGitVersionCannotBeParsed() {
String invalidGitVersion = "git version ga5ed0asdasd.ga5ed0";
assertThatExceptionOfType(RuntimeException.class)
.isThrownBy(() -> GitVersion.parse(invalidGitVersion))
.withMessage("cannot parse git version : " + invalidGitVersion);
} |
@CanIgnoreReturnValue
@Override
public V put(K key, V value) {
if (key == null) {
throw new NullPointerException("key == null");
}
if (value == null && !allowNullValues) {
throw new NullPointerException("value == null");
}
Node<K, V> created = find(key, true);
V result = created.value;
created.value = value;
return result;
} | @Test
public void testEqualsAndHashCode() throws Exception {
LinkedTreeMap<String, Integer> map1 = new LinkedTreeMap<>();
map1.put("A", 1);
map1.put("B", 2);
map1.put("C", 3);
map1.put("D", 4);
LinkedTreeMap<String, Integer> map2 = new LinkedTreeMap<>();
map2.put("C", 3);
map2.put("B", 2);
map2.put("D", 4);
map2.put("A", 1);
MoreAsserts.assertEqualsAndHashCode(map1, map2);
} |
public <ConfigType extends ConfigInstance> ConfigType toInstance(Class<ConfigType> clazz, String configId) {
return ConfigInstanceUtil.getNewInstance(clazz, configId, this);
} | @Test
public void non_existent_struct_in_array_of_struct_in_payload_is_ignored() {
Slime slime = new Slime();
Cursor nestedArrEntry = slime.setObject().setArray("nestedarr").addObject();
addStructFields(nestedArrEntry.setObject("inner"), "existing", "MALE", null);
addStructFields(nestedArrEntry.setObject("non_existent"), "non-existent", "MALE", null);
StructtypesConfig config = new ConfigPayload(slime).toInstance(StructtypesConfig.class, "");
assertThat(config.nestedarr(0).inner().name(), is("existing"));
} |
public static PositionBound at(final Position position) {
return new PositionBound(position);
} | @Test
public void shouldEqualPosition() {
final PositionBound bound1 = PositionBound.at(Position.emptyPosition());
final PositionBound bound2 = PositionBound.at(Position.emptyPosition());
assertEquals(bound1, bound2);
} |
public Encoding encode(String text) { return encode(text, Language.UNKNOWN); } | @Test
void truncates_to_max_length() throws IOException {
int maxLength = 3;
var builder = new HuggingFaceTokenizer.Builder()
.addDefaultModel(decompressModelFile(tmp, "bert-base-uncased"))
.setMaxLength(maxLength)
.setTruncation(true);
String input = "what was the impact of the manhattan project";
try (var tokenizerWithoutSpecialTokens = builder.addSpecialTokens(false).build();
var tokenizerWithSpecialTokens = builder.addSpecialTokens(true).build()) {
var encodingWithoutSpecialTokens = tokenizerWithoutSpecialTokens.encode(input);
assertMaxLengthRespected(maxLength, encodingWithoutSpecialTokens);
assertNotEquals(101, encodingWithoutSpecialTokens.ids().get(0));
var encodingWithSpecialTokens = tokenizerWithSpecialTokens.encode(input);
assertMaxLengthRespected(maxLength, encodingWithSpecialTokens);
assertEquals(101, encodingWithSpecialTokens.ids().get(0));
}
} |
@Override
public Byte getByte(K name) {
return null;
} | @Test
public void testGetByte() {
assertNull(HEADERS.getByte("name1"));
} |
public Stream<Hit> stream() {
if (nPostingLists == 0) {
return Stream.empty();
}
return StreamSupport.stream(new PredicateSpliterator(), false);
} | @Test
void requireThatMinFeatureIsUsedToPruneResults() {
PredicateSearch search = createPredicateSearch(
new byte[]{3, 1},
postingList(
SubqueryBitmap.ALL_SUBQUERIES,
entry(0, 0x000100ff),
entry(1, 0x000100ff)));
assertEquals(List.of(new Hit(1)).toString(), search.stream().toList().toString());
} |
public String generateError(String error) {
Map<String, String> values = ImmutableMap.of(
PARAMETER_TOTAL_WIDTH, valueOf(MARGIN + computeWidth(error) + MARGIN),
PARAMETER_LABEL, error);
StringSubstitutor strSubstitutor = new StringSubstitutor(values);
return strSubstitutor.replace(errorTemplate);
} | @Test
public void generate_error() {
initSvgGenerator();
String result = underTest.generateError("Error");
assertThat(result).contains("<text", ">Error</text>");
} |
public static CacheStats empty() {
return EMPTY_STATS;
} | @Test
public void empty() {
var stats = CacheStats.of(0, 0, 0, 0, 0, 0, 0);
checkStats(stats, 0, 0, 1.0, 0, 0.0, 0, 0, 0.0, 0, 0, 0.0, 0, 0);
assertThat(stats).isEqualTo(CacheStats.empty());
assertThat(stats.equals(null)).isFalse();
assertThat(stats).isNotEqualTo(new Object());
assertThat(stats).isEqualTo(CacheStats.empty());
assertThat(stats.hashCode()).isEqualTo(CacheStats.empty().hashCode());
assertThat(stats.toString()).isEqualTo(CacheStats.empty().toString());
} |
public boolean eval(ContentFile<?> file) {
// TODO: detect the case where a column is missing from the file using file's max field id.
return new MetricsEvalVisitor().eval(file);
} | @Test
public void testMissingStats() {
DataFile missingStats = new TestDataFile("file.parquet", Row.of(), 50);
Expression[] exprs =
new Expression[] {
lessThan("no_stats", 5),
lessThanOrEqual("no_stats", 30),
equal("no_stats", 70),
greaterThan("no_stats", 78),
greaterThanOrEqual("no_stats", 90),
notEqual("no_stats", 101),
isNull("no_stats"),
notNull("no_stats"),
isNaN("all_nans"),
notNaN("all_nans")
};
for (Expression expr : exprs) {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, expr).eval(missingStats);
assertThat(shouldRead)
.as("Should never match when stats are missing for expr: " + expr)
.isFalse();
}
} |
@Override
public ExecuteContext before(ExecuteContext context) {
if (!(context.getObject() instanceof Builder)) {
return context;
}
init();
Builder builder = (Builder) context.getObject();
Optional<Object> filters = ReflectUtils.getFieldValue(builder, "filters");
if (filters.isPresent()) {
List<ExchangeFilterFunction> list = (List<ExchangeFilterFunction>) filters.get();
for (ExchangeFilterFunction filterFunction : list) {
if (filterFunction instanceof RouterExchangeFilterFunction) {
// If it has already been injected and retried, it will not be injected again
return context;
}
}
// When a filter is present, inject into the first one
list.add(0, new RouterExchangeFilterFunction());
return context;
}
builder.filter(new RouterExchangeFilterFunction());
return context;
} | @Test
public void testBefore() {
// When no filter exists
interceptor.before(context);
List<ExchangeFilterFunction> list = (List<ExchangeFilterFunction>) ReflectUtils
.getFieldValue(builder, "filters").orElse(null);
Assert.assertNotNull(list);
Assert.assertTrue(list.get(0) instanceof RouterExchangeFilterFunction);
// When the RouterExchangeFilterFunction already exists
interceptor.before(context);
list = (List<ExchangeFilterFunction>) ReflectUtils.getFieldValue(builder, "filters").orElse(null);
Assert.assertNotNull(list);
Assert.assertTrue(list.get(0) instanceof RouterExchangeFilterFunction);
Assert.assertEquals(1, list.size());
// Clear the data
ReflectUtils.setFieldValue(builder, "filters", null);
// Test the case where you already have a filter
ExchangeFilterFunction function = (clientRequest, exchangeFunction) -> exchangeFunction.exchange(clientRequest);
builder.filter(function);
interceptor.before(context);
list = (List<ExchangeFilterFunction>) ReflectUtils.getFieldValue(builder, "filters").orElse(null);
Assert.assertNotNull(list);
Assert.assertEquals(2, list.size());
Assert.assertTrue(list.get(0) instanceof RouterExchangeFilterFunction);
Assert.assertEquals(function, list.get(1));
} |
private static String getProperty(String name, Configuration configuration) {
return Optional.of(configuration.getStringArray(relaxPropertyName(name)))
.filter(values -> values.length > 0)
.map(Arrays::stream)
.map(stream -> stream.collect(Collectors.joining(",")))
.orElse(null);
} | @Test
public void assertPropertiesFromFSSpec() {
Map<String, String> configs = new HashMap<>();
configs.put("config.property.1", "val1");
configs.put("config.property.2", "val2");
configs.put("config.property.3", "val3");
PinotFSSpec pinotFSSpec = new PinotFSSpec();
pinotFSSpec.setConfigs(configs);
PinotConfiguration pinotConfiguration = new PinotConfiguration(pinotFSSpec);
Assert.assertEquals(pinotConfiguration.getProperty("config.property.1"), "val1");
Assert.assertEquals(pinotConfiguration.getProperty("config.property.2"), "val2");
Assert.assertEquals(pinotConfiguration.getProperty("config.property.3"), "val3");
// Asserts no error occurs when no configuration is provided in the spec.
new PinotConfiguration(new PinotFSSpec());
} |
@Override
public boolean setProperties(Namespace namespace, Map<String, String> properties)
throws NoSuchNamespaceException {
if (!namespaceExists(namespace)) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
Preconditions.checkNotNull(properties, "Invalid properties to set: null");
if (properties.isEmpty()) {
return false;
}
Preconditions.checkArgument(
!properties.containsKey(NAMESPACE_EXISTS_PROPERTY),
"Cannot set reserved property: %s",
NAMESPACE_EXISTS_PROPERTY);
Map<String, String> startingProperties = fetchProperties(namespace);
Map<String, String> inserts = Maps.newHashMap();
Map<String, String> updates = Maps.newHashMap();
for (String key : properties.keySet()) {
String value = properties.get(key);
if (startingProperties.containsKey(key)) {
updates.put(key, value);
} else {
inserts.put(key, value);
}
}
boolean hadInserts = false;
if (!inserts.isEmpty()) {
hadInserts = insertProperties(namespace, inserts);
}
boolean hadUpdates = false;
if (!updates.isEmpty()) {
hadUpdates = updateProperties(namespace, updates);
}
return hadInserts || hadUpdates;
} | @Test
public void testSetProperties() {
Namespace testNamespace = Namespace.of("testDb", "ns1", "ns2");
Map<String, String> testMetadata =
ImmutableMap.of("key_1", "value_1", "key_2", "value_2", "key_3", "value_3");
catalog.createNamespace(testNamespace, testMetadata);
// Add more properties to set to test insert and update
Map<String, String> propertiesToSet =
ImmutableMap.of(
"key_5",
"value_5",
"key_3",
"new_value_3",
"key_1",
"new_value_1",
"key_4",
"value_4",
"key_2",
"new_value_2");
assertThat(catalog.namespaceExists(testNamespace)).isTrue();
assertThat(catalog.setProperties(testNamespace, propertiesToSet)).isTrue();
Map<String, String> allProperties = catalog.loadNamespaceMetadata(testNamespace);
assertThat(allProperties).hasSize(6);
Map<String, String> namespaceProperties = catalog.loadNamespaceMetadata(testNamespace);
assertThat(propertiesToSet.keySet())
.as("All new keys should be in the namespace properties")
.isEqualTo(Sets.intersection(propertiesToSet.keySet(), namespaceProperties.keySet()));
// values should match
for (Map.Entry<String, String> keyValue : propertiesToSet.entrySet()) {
assertThat(namespaceProperties)
.as("Value for key " + keyValue.getKey() + " should match")
.containsEntry(keyValue.getKey(), keyValue.getValue());
}
} |
@Override
public int hashCode() {
return value ? Boolean.TRUE.hashCode() : Boolean.FALSE.hashCode();
} | @Test
void requireThatHashCodeIsImplemented() {
assertEquals(new BooleanPredicate(true).hashCode(), new BooleanPredicate(true).hashCode());
assertEquals(new BooleanPredicate(false).hashCode(), new BooleanPredicate(false).hashCode());
} |
String upload(File report) {
LOG.debug("Upload report");
long startTime = System.currentTimeMillis();
Part filePart = new Part(MediaTypes.ZIP, report);
PostRequest post = new PostRequest("api/ce/submit")
.setMediaType(MediaTypes.PROTOBUF)
.setParam("projectKey", moduleHierarchy.root().key())
.setParam("projectName", moduleHierarchy.root().getOriginalName())
.setPart("report", filePart);
ciConfiguration.getDevOpsPlatformInfo().ifPresent(devOpsPlatformInfo -> {
post.setParam(CHARACTERISTIC, buildCharacteristicParam(DEVOPS_PLATFORM_URL ,devOpsPlatformInfo.getUrl()));
post.setParam(CHARACTERISTIC, buildCharacteristicParam(DEVOPS_PLATFORM_PROJECT_IDENTIFIER, devOpsPlatformInfo.getProjectIdentifier()));
});
String branchName = branchConfiguration.branchName();
if (branchName != null) {
if (branchConfiguration.branchType() != PULL_REQUEST) {
post.setParam(CHARACTERISTIC, buildCharacteristicParam(CeTaskCharacteristics.BRANCH, branchName));
post.setParam(CHARACTERISTIC, buildCharacteristicParam(BRANCH_TYPE, branchConfiguration.branchType().name()));
} else {
post.setParam(CHARACTERISTIC, buildCharacteristicParam(CeTaskCharacteristics.PULL_REQUEST, branchConfiguration.pullRequestKey()));
}
}
WsResponse response;
try {
post.setWriteTimeOutInMs(properties.reportPublishTimeout() * 1000);
response = wsClient.call(post);
} catch (Exception e) {
throw new IllegalStateException("Failed to upload report: " + e.getMessage(), e);
}
try {
response.failIfNotSuccessful();
} catch (HttpException e) {
throw MessageException.of(String.format("Server failed to process report. Please check server logs: %s", DefaultScannerWsClient.createErrorMessage(e)));
}
try (InputStream protobuf = response.contentStream()) {
return Ce.SubmitResponse.parser().parseFrom(protobuf).getTaskId();
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
long stopTime = System.currentTimeMillis();
LOG.info("Analysis report uploaded in " + (stopTime - startTime) + "ms");
}
} | @Test
public void send_pull_request_characteristic() throws Exception {
String branchName = "feature";
String pullRequestId = "pr-123";
when(branchConfiguration.branchName()).thenReturn(branchName);
when(branchConfiguration.branchType()).thenReturn(PULL_REQUEST);
when(branchConfiguration.pullRequestKey()).thenReturn(pullRequestId);
WsResponse response = mock(WsResponse.class);
PipedOutputStream out = new PipedOutputStream();
PipedInputStream in = new PipedInputStream(out);
Ce.SubmitResponse.newBuilder().build().writeTo(out);
out.close();
when(response.failIfNotSuccessful()).thenReturn(response);
when(response.contentStream()).thenReturn(in);
when(wsClient.call(any(WsRequest.class))).thenReturn(response);
underTest.upload(reportTempFolder.newFile());
ArgumentCaptor<WsRequest> capture = ArgumentCaptor.forClass(WsRequest.class);
verify(wsClient).call(capture.capture());
WsRequest wsRequest = capture.getValue();
assertThat(wsRequest.getParameters().getKeys()).hasSize(2);
assertThat(wsRequest.getParameters().getValues("projectKey")).containsExactly("org.sonarsource.sonarqube:sonarqube");
assertThat(wsRequest.getParameters().getValues("characteristic"))
.containsExactlyInAnyOrder("pullRequest=" + pullRequestId);
} |
@Override
public ResultSet getIndexInfo(final String catalog, final String schema, final String table, final boolean unique, final boolean approximate) {
return null;
} | @Test
void assertGetIndexInfo() {
assertNull(metaData.getIndexInfo("", "", "", false, false));
} |
@Override
public String serviceToUrl(String protocol, String serviceId, String tag, String requestKey) {
if(StringUtils.isBlank(serviceId)) {
logger.debug("The serviceId cannot be blank");
return null;
}
URL url = loadBalance.select(discovery(protocol, serviceId, tag), serviceId, tag, requestKey);
if (url != null) {
logger.debug("Final url after load balance = {}.", url);
// construct a url in string
return protocol + "://" + url.getHost() + ":" + url.getPort();
} else {
logger.debug("The service: {} cannot be found from service discovery.", serviceId);
return null;
}
} | @Test
public void testServiceToSingleUrlWithEnv() {
String s = cluster.serviceToUrl("https", "com.networknt.chainwriter-1.0.0", "0000", null);
Assert.assertTrue("https://localhost:8444".equals(s));
} |
@Override
public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows);
} | @SuppressWarnings("deprecation")
@Test
public void shouldNotAllowNullValueJoinerOnLeftJoinWithStreamJoined() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.leftJoin(
testStream,
(ValueJoiner<? super String, ? super String, ?>) null,
JoinWindows.of(ofMillis(10)),
StreamJoined.as("name")));
assertThat(exception.getMessage(), equalTo("joiner can't be null"));
} |
@Udf
public int field(
@UdfParameter final String str,
@UdfParameter final String... args
) {
if (str == null || args == null) {
return 0;
}
for (int i = 0; i < args.length; i++) {
if (str.equals(args[i])) {
return i + 1;
}
}
return 0;
} | @Test
public void shouldFindArgumentWhenOneIsNull() {
// When:
final int pos = field.field("world", null, "world");
// Then:
assertThat(pos, equalTo(2));
} |
Future<RecordMetadata> send(final ProducerRecord<byte[], byte[]> record,
final Callback callback) {
maybeBeginTransaction();
try {
return producer.send(record, callback);
} catch (final KafkaException uncaughtException) {
if (isRecoverable(uncaughtException)) {
// producer.send() call may throw a KafkaException which wraps a FencedException,
// in this case we should throw its wrapped inner cause so that it can be
// captured and re-wrapped as TaskMigratedException
throw new TaskMigratedException(
formatException("Producer got fenced trying to send a record"),
uncaughtException.getCause()
);
} else {
throw new StreamsException(
formatException(String.format("Error encountered trying to send record to topic %s", record.topic())),
uncaughtException
);
}
}
} | @Test
public void shouldFailOnMaybeBeginTransactionIfTransactionsNotInitializedForExactlyOnceAlpha() {
final StreamsProducer streamsProducer =
new StreamsProducer(
eosAlphaConfig,
"threadId",
eosAlphaMockClientSupplier,
new TaskId(0, 0),
null,
logContext,
mockTime
);
final IllegalStateException thrown = assertThrows(
IllegalStateException.class,
() -> streamsProducer.send(record, null)
);
assertThat(thrown.getMessage(), is("MockProducer hasn't been initialized for transactions."));
} |
public void submit() {
//Transmit information to our transfer object to communicate between layers
saveToWorker();
//call the service layer to register our worker
service.registerWorker(worker);
//check for any errors
if (worker.getNotification().hasErrors()) {
indicateErrors();
LOGGER.info("Not registered, see errors");
} else {
LOGGER.info("Registration Succeeded");
}
} | @Test
void submitSuccessfully() {
// Ensure the worker is null initially
registerWorkerForm = new RegisterWorkerForm("John Doe", "Engineer", LocalDate.of(1990, 1, 1));
assertNull(registerWorkerForm.worker);
// Submit the form
registerWorkerForm.submit();
// Verify that the worker is not null after submission
assertNotNull(registerWorkerForm.worker);
// Verify that the worker's properties are set correctly
assertEquals("John Doe", registerWorkerForm.worker.getName());
assertEquals("Engineer", registerWorkerForm.worker.getOccupation());
assertEquals(LocalDate.of(1990, 1, 1), registerWorkerForm.worker.getDateOfBirth());
} |
@VisibleForTesting
static Optional<String> getChildValue(@Nullable Xpp3Dom dom, String... childNodePath) {
if (dom == null) {
return Optional.empty();
}
Xpp3Dom node = dom;
for (String child : childNodePath) {
node = node.getChild(child);
if (node == null) {
return Optional.empty();
}
}
return Optional.ofNullable(node.getValue());
} | @Test
public void testGetChildValue_childPathMatched() {
Xpp3Dom root = newXpp3Dom("root", "value");
Xpp3Dom foo = addXpp3DomChild(root, "foo", "foo");
addXpp3DomChild(foo, "bar", "bar");
assertThat(MavenProjectProperties.getChildValue(root, "foo")).isEqualTo(Optional.of("foo"));
assertThat(MavenProjectProperties.getChildValue(root, "foo", "bar"))
.isEqualTo(Optional.of("bar"));
assertThat(MavenProjectProperties.getChildValue(foo, "bar")).isEqualTo(Optional.of("bar"));
} |
@Override
public double distanceBtw(Point p1, Point p2) {
numCalls++;
confirmRequiredDataIsPresent(p1);
confirmRequiredDataIsPresent(p2);
Duration timeDelta = Duration.between(p1.time(), p2.time()); //can be positive of negative
timeDelta = timeDelta.abs();
Double horizontalDistanceInNm = p1.distanceInNmTo(p2);
Double horizontalDistanceInFeet = horizontalDistanceInNm * Spherical.feetPerNM();
Double altitudeDifferenceInFeet = Math.abs(p1.altitude().inFeet() - p2.altitude().inFeet());
Double distInFeet = hypot(horizontalDistanceInFeet, altitudeDifferenceInFeet);
return (distanceCoef * distInFeet) + (timeCoef * timeDelta.toMillis());
} | @Test
public void testPointsRequireAltitude() {
PointDistanceMetric metric = new PointDistanceMetric(1.0, 1.0);
Point point = new PointBuilder()
.latLong(0.0, 0.0)
.time(Instant.EPOCH)
.build();
assertThrows(
IllegalArgumentException.class,
() -> metric.distanceBtw(point, point),
"Points should require altitude"
);
} |
public void start(ProxyService service) {
extensions.values().forEach(extension -> extension.start(service));
} | @Test
public void testStart() {
ProxyService service = mock(ProxyService.class);
extensions.start(service);
verify(extension1, times(1)).start(same(service));
verify(extension2, times(1)).start(same(service));
} |
@Override
public String format(final Schema schema) {
final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema);
return options.contains(Option.AS_COLUMN_LIST)
? stripTopLevelStruct(converted)
: converted;
} | @Test
public void shouldFormatOptionalInt() {
assertThat(DEFAULT.format(Schema.OPTIONAL_INT32_SCHEMA), is("INT"));
assertThat(STRICT.format(Schema.OPTIONAL_INT32_SCHEMA), is("INT"));
} |
@Override
public <X> Object wrap(X value, WrapperOptions options) {
if (value == null) {
return null;
}
Blob blob = null;
Clob clob = null;
if (Blob.class.isAssignableFrom(value.getClass())) {
blob = options.getLobCreator().wrap((Blob) value);
} if (Clob.class.isAssignableFrom(value.getClass())) {
clob = options.getLobCreator().wrap((Clob) value);
} else if (byte[].class.isAssignableFrom(value.getClass())) {
blob = options.getLobCreator().createBlob((byte[]) value);
} else if (InputStream.class.isAssignableFrom(value.getClass())) {
InputStream inputStream = (InputStream) value;
try {
blob = options.getLobCreator().createBlob(inputStream, inputStream.available());
} catch (IOException e) {
throw unknownWrap(value.getClass());
}
}
String stringValue;
try {
if (blob != null) {
stringValue = new String(DataHelper.extractBytes(blob.getBinaryStream()));
} else if (clob != null) {
stringValue = DataHelper.extractString(clob);
} else {
stringValue = value.toString();
}
} catch (SQLException e) {
throw new HibernateException("Unable to extract binary stream from Blob", e);
}
return fromString(stringValue);
} | @Test
public void testNullPropertyType() {
JsonTypeDescriptor descriptor = new JsonTypeDescriptor();
try {
descriptor.wrap("a", null);
fail("Should fail because the propertyType is null!");
} catch (HibernateException expected) {
}
} |
@Override
public void readLine(String line) {
if (line.startsWith("#") || line.isEmpty()) {
return;
}
// In some cases, ARIN may have multiple results with different NetType values. When that happens,
// we want to use the data from the entry with the data closest to the customer actually using the IP.
if (line.startsWith("NetType:")) {
prevNetworkType = currNetworkType;
currNetworkType = NetworkType.getEnum(lineValue(line));
if (null != currNetworkType && currNetworkType.isMoreSpecificThan(prevNetworkType)) {
this.organization = null;
this.countryCode = null;
}
}
if((line.startsWith("Organization:") || line.startsWith("Customer:")) && this.organization == null) {
this.organization = lineValue(line);
}
if(line.startsWith("Country:") && this.countryCode == null) {
this.countryCode = lineValue(line);
}
if(line.startsWith("ResourceLink") && !line.contains("http")) {
this.isRedirect = true;
registryRedirect = findRegistryFromWhoisServer(lineValue(line));
}
} | @Test
public void testRunRedirect() throws Exception {
ARINResponseParser parser = new ARINResponseParser();
for (String line : REDIRECT_TO_RIPENCC.split("\n")) {
parser.readLine(line);
}
assertTrue(parser.isRedirect());
assertEquals(InternetRegistry.RIPENCC, parser.getRegistryRedirect());
} |
public static String fix(final String raw) {
if ( raw == null || "".equals( raw.trim() )) {
return raw;
}
MacroProcessor macroProcessor = new MacroProcessor();
macroProcessor.setMacros( macros );
return macroProcessor.parse( raw );
} | @Test
public void testLeaveAssertLogicalAlone() {
final String original = "drools.insertLogical(foo)";
assertEqualsIgnoreWhitespace( original,
KnowledgeHelperFixerTest.fixer.fix( original ) );
} |
public static Expression generateFilterExpression(SearchArgument sarg) {
return translate(sarg.getExpression(), sarg.getLeaves());
} | @Test
public void testFloatType() {
SearchArgument.Builder builder = SearchArgumentFactory.newBuilder();
SearchArgument arg = builder.startAnd().equals("float", PredicateLeaf.Type.FLOAT, 1200D).end().build();
UnboundPredicate expected = Expressions.equal("float", 1200D);
UnboundPredicate actual = (UnboundPredicate) HiveIcebergFilterFactory.generateFilterExpression(arg);
assertPredicatesMatch(expected, actual);
} |
@Override
protected int entrySize() {
return ENTRY_SIZE;
} | @Test
public void testEntrySize() {
assertEquals(12, idx.entrySize());
} |
public List<Long> allWindows() {
return getWindowList(_oldestWindowIndex, _currentWindowIndex);
} | @Test
public void testAllWindows() {
MetricSampleAggregator<String, IntegerEntity> aggregator =
new MetricSampleAggregator<>(NUM_WINDOWS, WINDOW_MS, MIN_SAMPLES_PER_WINDOW,
0, _metricDef);
assertTrue(aggregator.allWindows().isEmpty());
CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW,
aggregator, ENTITY1, 0, WINDOW_MS,
_metricDef);
List<Long> allStWindows = aggregator.allWindows();
assertEquals(NUM_WINDOWS + 1, allStWindows.size());
for (int i = 0; i < NUM_WINDOWS + 1; i++) {
assertEquals((i + 1) * WINDOW_MS, allStWindows.get(i).longValue());
}
} |
public boolean isUsingAloneCoupon() {
return this.policy.isCanUseAlone();
} | @Test
void 독자_사용_쿠폰이면_true를_반환한다() {
// given
Coupon coupon = 쿠픈_생성_독자_사용_할인율_10_퍼센트();
// when
boolean result = coupon.isUsingAloneCoupon();
// then
assertThat(result).isTrue();
} |
@Override
public int hashCode() {
return keyData.hashCode();
} | @Test
public void testHashCode() {
CachedQueryEntry entry = createEntry("key");
assertEquals(entry.hashCode(), entry.hashCode());
} |
public boolean sync() throws IOException {
if (!preSyncCheck()) {
return false;
}
if (!getAllDiffs()) {
return false;
}
List<Path> sourcePaths = context.getSourcePaths();
final Path sourceDir = sourcePaths.get(0);
final Path targetDir = context.getTargetPath();
final FileSystem tfs = targetDir.getFileSystem(conf);
Path tmpDir = null;
try {
tmpDir = createTargetTmpDir(tfs, targetDir);
DiffInfo[] renameAndDeleteDiffs =
getRenameAndDeleteDiffsForSync(targetDir);
if (renameAndDeleteDiffs.length > 0) {
// do the real sync work: deletion and rename
syncDiff(renameAndDeleteDiffs, tfs, tmpDir);
}
return true;
} catch (Exception e) {
DistCp.LOG.warn("Failed to use snapshot diff for distcp", e);
return false;
} finally {
deleteTargetTmpDir(tfs, tmpDir);
// TODO: since we have tmp directory, we can support "undo" with failures
// set the source path using the snapshot path
context.setSourcePaths(Arrays.asList(getSnapshotPath(sourceDir,
context.getToSnapshot())));
}
} | @Test
public void testFallback() throws Exception {
// the source/target dir are not snapshottable dir
Assert.assertFalse(sync());
// make sure the source path has been updated to the snapshot path
final Path spath = new Path(source,
HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s2");
Assert.assertEquals(spath, context.getSourcePaths().get(0));
// reset source path in options
context.setSourcePaths(Collections.singletonList(source));
// the source/target does not have the given snapshots
dfs.allowSnapshot(source);
dfs.allowSnapshot(target);
Assert.assertFalse(sync());
Assert.assertEquals(spath, context.getSourcePaths().get(0));
// reset source path in options
context.setSourcePaths(Collections.singletonList(source));
dfs.createSnapshot(source, "s1");
dfs.createSnapshot(source, "s2");
dfs.createSnapshot(target, "s1");
Assert.assertTrue(sync());
// reset source paths in options
context.setSourcePaths(Collections.singletonList(source));
// changes have been made in target
final Path subTarget = new Path(target, "sub");
dfs.mkdirs(subTarget);
Assert.assertFalse(sync());
// make sure the source path has been updated to the snapshot path
Assert.assertEquals(spath, context.getSourcePaths().get(0));
// reset source paths in options
context.setSourcePaths(Collections.singletonList(source));
dfs.delete(subTarget, true);
Assert.assertTrue(sync());
} |
@Override
public GetApplicationAttemptReportResponse getApplicationAttemptReport(
GetApplicationAttemptReportRequest request)
throws YarnException, IOException {
if (request == null || request.getApplicationAttemptId() == null
|| request.getApplicationAttemptId().getApplicationId() == null) {
routerMetrics.incrAppAttemptReportFailedRetrieved();
String msg = "Missing getApplicationAttemptReport request or applicationId " +
"or applicationAttemptId information.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_APPLICATION_ATTEMPT_REPORT, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, null);
}
long startTime = clock.getTime();
SubClusterId subClusterId = null;
ApplicationId applicationId = request.getApplicationAttemptId().getApplicationId();
try {
subClusterId = getApplicationHomeSubCluster(applicationId);
} catch (YarnException e) {
routerMetrics.incrAppAttemptReportFailedRetrieved();
String msgFormat = "ApplicationAttempt %s belongs to " +
"Application %s does not exist in FederationStateStore.";
ApplicationAttemptId applicationAttemptId = request.getApplicationAttemptId();
RouterAuditLogger.logFailure(user.getShortUserName(), GET_APPLICATION_ATTEMPT_REPORT, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msgFormat, applicationAttemptId, applicationId);
RouterServerUtil.logAndThrowException(e, msgFormat, applicationAttemptId, applicationId);
}
ApplicationClientProtocol clientRMProxy =
getClientRMProxyForSubCluster(subClusterId);
GetApplicationAttemptReportResponse response = null;
try {
response = clientRMProxy.getApplicationAttemptReport(request);
} catch (Exception e) {
routerMetrics.incrAppAttemptReportFailedRetrieved();
String msg = String.format(
"Unable to get the applicationAttempt report for %s to SubCluster %s.",
request.getApplicationAttemptId(), subClusterId.getId());
RouterAuditLogger.logFailure(user.getShortUserName(), GET_APPLICATION_ATTEMPT_REPORT, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, e);
}
if (response == null) {
LOG.error("No response when attempting to retrieve the report of "
+ "the applicationAttempt {} to SubCluster {}.",
request.getApplicationAttemptId(), subClusterId.getId());
}
long stopTime = clock.getTime();
routerMetrics.succeededAppAttemptReportRetrieved(stopTime - startTime);
RouterAuditLogger.logSuccess(user.getShortUserName(), GET_APPLICATION_ATTEMPT_REPORT,
TARGET_CLIENT_RM_SERVICE);
return response;
} | @Test
public void testGetApplicationAttemptReport()
throws YarnException, IOException, InterruptedException {
LOG.info("Test FederationClientInterceptor: Get ApplicationAttempt Report.");
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
SubmitApplicationRequest request = mockSubmitApplicationRequest(appId);
// Submit the application we want the applicationAttempt report later
SubmitApplicationResponse response = interceptor.submitApplication(request);
Assert.assertNotNull(response);
Assert.assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId));
// Call GetApplicationAttempts Get ApplicationAttemptId
GetApplicationAttemptsRequest attemptsRequest =
GetApplicationAttemptsRequest.newInstance(appId);
GetApplicationAttemptsResponse attemptsResponse =
interceptor.getApplicationAttempts(attemptsRequest);
// Wait for app to start
while(attemptsResponse.getApplicationAttemptList().size() == 0) {
attemptsResponse =
interceptor.getApplicationAttempts(attemptsRequest);
}
Assert.assertNotNull(attemptsResponse);
GetApplicationAttemptReportRequest requestGet =
GetApplicationAttemptReportRequest.newInstance(
attemptsResponse.getApplicationAttemptList().get(0).getApplicationAttemptId());
GetApplicationAttemptReportResponse responseGet =
interceptor.getApplicationAttemptReport(requestGet);
Assert.assertNotNull(responseGet);
} |
public ModelMBeanInfo getMBeanInfo(Object defaultManagedBean, Object customManagedBean, String objectName) throws JMException {
if ((defaultManagedBean == null && customManagedBean == null) || objectName == null)
return null;
// skip proxy classes
if (defaultManagedBean != null && Proxy.isProxyClass(defaultManagedBean.getClass())) {
LOGGER.trace("Skip creating ModelMBeanInfo due proxy class {}", defaultManagedBean.getClass());
return null;
}
// maps and lists to contain information about attributes and operations
Map<String, ManagedAttributeInfo> attributes = new LinkedHashMap<>();
Set<ManagedOperationInfo> operations = new LinkedHashSet<>();
Set<ModelMBeanAttributeInfo> mBeanAttributes = new LinkedHashSet<>();
Set<ModelMBeanOperationInfo> mBeanOperations = new LinkedHashSet<>();
Set<ModelMBeanNotificationInfo> mBeanNotifications = new LinkedHashSet<>();
// extract details from default managed bean
if (defaultManagedBean != null) {
extractAttributesAndOperations(defaultManagedBean.getClass(), attributes, operations);
extractMbeanAttributes(defaultManagedBean, attributes, mBeanAttributes, mBeanOperations);
extractMbeanOperations(defaultManagedBean, operations, mBeanOperations);
extractMbeanNotifications(defaultManagedBean, mBeanNotifications);
}
// extract details from custom managed bean
if (customManagedBean != null) {
extractAttributesAndOperations(customManagedBean.getClass(), attributes, operations);
extractMbeanAttributes(customManagedBean, attributes, mBeanAttributes, mBeanOperations);
extractMbeanOperations(customManagedBean, operations, mBeanOperations);
extractMbeanNotifications(customManagedBean, mBeanNotifications);
}
// create the ModelMBeanInfo
String name = getName(customManagedBean != null ? customManagedBean : defaultManagedBean, objectName);
String description = getDescription(customManagedBean != null ? customManagedBean : defaultManagedBean, objectName);
ModelMBeanAttributeInfo[] arrayAttributes = mBeanAttributes.toArray(new ModelMBeanAttributeInfo[mBeanAttributes.size()]);
ModelMBeanOperationInfo[] arrayOperations = mBeanOperations.toArray(new ModelMBeanOperationInfo[mBeanOperations.size()]);
ModelMBeanNotificationInfo[] arrayNotifications = mBeanNotifications.toArray(new ModelMBeanNotificationInfo[mBeanNotifications.size()]);
ModelMBeanInfo info = new ModelMBeanInfoSupport(name, description, arrayAttributes, null, arrayOperations, arrayNotifications);
LOGGER.trace("Created ModelMBeanInfo {}", info);
return info;
} | @Test(expected = IllegalArgumentException.class)
public void testAttributeSetterNameNotCapitial() throws JMException {
mbeanInfoAssembler.getMBeanInfo(new BadAttributeSetterNameNotCapital(), null, "someName");
} |
@Produces
@DefaultBean
@Singleton
JobRunrDashboardWebServer dashboardWebServer(StorageProvider storageProvider, JsonMapper jobRunrJsonMapper, JobRunrDashboardWebServerConfiguration dashboardWebServerConfiguration) {
if (jobRunrBuildTimeConfiguration.dashboard().enabled()) {
return new JobRunrDashboardWebServer(storageProvider, jobRunrJsonMapper, dashboardWebServerConfiguration);
}
return null;
} | @Test
void dashboardWebServerIsSetupWhenConfigured() {
when(dashboardBuildTimeConfiguration.enabled()).thenReturn(true);
assertThat(jobRunrProducer.dashboardWebServer(storageProvider, jsonMapper, usingStandardDashboardConfiguration())).isNotNull();
} |
public static boolean isCompositeType(LogicalType logicalType) {
if (logicalType instanceof DistinctType) {
return isCompositeType(((DistinctType) logicalType).getSourceType());
}
LogicalTypeRoot typeRoot = logicalType.getTypeRoot();
return typeRoot == STRUCTURED_TYPE || typeRoot == ROW;
} | @Test
void testIsCompositeTypeSimpleType() {
DataType dataType = DataTypes.TIMESTAMP();
assertThat(LogicalTypeChecks.isCompositeType(dataType.getLogicalType())).isFalse();
} |
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
for(Path file : files.keySet()) {
callback.delete(file);
try {
if(file.attributes().isDuplicate()) {
// Already trashed
log.warn(String.format("Delete file %s already in trash", file));
new NodesApi(session.getClient()).removeDeletedNodes(new DeleteDeletedNodesRequest().deletedNodeIds(Collections.singletonList(
Long.parseLong(nodeid.getVersionId(file)))), StringUtils.EMPTY);
}
else if(file.attributes().getVerdict() == PathAttributes.Verdict.malicious) {
// Delete malicious file
log.warn(String.format("Delete file %s marked as malicious", file));
new NodesApi(session.getClient()).removeMaliciousFile(
Long.parseLong(nodeid.getVersionId(file)), StringUtils.EMPTY);
}
else {
new NodesApi(session.getClient()).removeNode(
Long.parseLong(nodeid.getVersionId(file)), StringUtils.EMPTY);
}
nodeid.cache(file, null);
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Cannot delete {0}", e, file);
}
}
} | @Test
public void testDeleteFolderRoomWithContent() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path folder = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(room,
new AlphanumericRandomStringService().random().toLowerCase(), EnumSet.of(Path.Type.directory)), new TransferStatus());
assertTrue(new DefaultFindFeature(session).find(folder));
final Path file = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SDSTouchFeature(session, nodeid).touch(file, new TransferStatus());
assertTrue(new DefaultFindFeature(session).find(file));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new DefaultFindFeature(session).find(folder));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new DefaultFindFeature(session).find(room));
} |
@Override
public void execute( RunConfiguration runConfiguration, ExecutionConfiguration executionConfiguration,
AbstractMeta meta, VariableSpace variableSpace, Repository repository ) throws KettleException {
DefaultRunConfiguration defaultRunConfiguration = (DefaultRunConfiguration) runConfiguration;
if ( executionConfiguration instanceof TransExecutionConfiguration ) {
configureTransExecution( (TransExecutionConfiguration) executionConfiguration, defaultRunConfiguration,
variableSpace, meta, repository );
}
if ( executionConfiguration instanceof JobExecutionConfiguration ) {
configureJobExecution( (JobExecutionConfiguration) executionConfiguration, defaultRunConfiguration, variableSpace,
meta, repository );
}
variableSpace.setVariable( "engine", null );
variableSpace.setVariable( "engine.remote", null );
variableSpace.setVariable( "engine.scheme", null );
variableSpace.setVariable( "engine.url", null );
} | @Test
public void testExecuteClusteredTrans() throws Exception {
DefaultRunConfiguration defaultRunConfiguration = new DefaultRunConfiguration();
defaultRunConfiguration.setName( "Default Configuration" );
defaultRunConfiguration.setLocal( false );
defaultRunConfiguration.setRemote( false );
defaultRunConfiguration.setClustered( true );
TransExecutionConfiguration transExecutionConfiguration = new TransExecutionConfiguration();
defaultRunConfigurationExecutor
.execute( defaultRunConfiguration, transExecutionConfiguration, abstractMeta, variableSpace, null );
assertTrue( transExecutionConfiguration.isExecutingClustered() );
assertFalse( transExecutionConfiguration.isExecutingRemotely() );
assertFalse( transExecutionConfiguration.isExecutingLocally() );
} |
@Override
public void commit(Collection<CommitRequest<FileSinkCommittable>> requests)
throws IOException, InterruptedException {
for (CommitRequest<FileSinkCommittable> request : requests) {
FileSinkCommittable committable = request.getCommittable();
if (committable.hasPendingFile()) {
// We should always use commitAfterRecovery which contains additional checks.
bucketWriter.recoverPendingFile(committable.getPendingFile()).commitAfterRecovery();
}
if (committable.hasInProgressFileToCleanup()) {
bucketWriter.cleanupInProgressFileRecoverable(
committable.getInProgressFileToCleanup());
}
if (committable.hasCompactedFileToCleanup()) {
Path committedFileToCleanup = committable.getCompactedFileToCleanup();
try {
committedFileToCleanup.getFileSystem().delete(committedFileToCleanup, false);
} catch (Exception e) {
// Try best to cleanup compacting files, skip if failed.
if (LOG.isDebugEnabled()) {
LOG.debug(
"Failed to cleanup a compacted file, the file will be remained and should not be visible: {}",
committedFileToCleanup,
e);
}
}
}
}
} | @Test
void testCleanupInProgressFiles() throws Exception {
StubBucketWriter stubBucketWriter = new StubBucketWriter();
FileCommitter fileCommitter = new FileCommitter(stubBucketWriter);
MockCommitRequest<FileSinkCommittable> fileSinkCommittable =
new MockCommitRequest<>(
new FileSinkCommittable(
"0", new FileSinkTestUtils.TestInProgressFileRecoverable()));
fileCommitter.commit(Collections.singletonList(fileSinkCommittable));
assertThat(stubBucketWriter.getRecoveredPendingFiles()).isEmpty();
assertThat(stubBucketWriter.getNumCleanUp()).isEqualTo(1);
assertThat(fileSinkCommittable.getNumberOfRetries()).isEqualTo(0);
} |
@Override
protected ObjectPermissions getPermissions() {
return mPermissions.get();
} | @Test
public void getPermissionsWithMapping() throws Exception {
Map<PropertyKey, Object> conf = new HashMap<>();
conf.put(PropertyKey.UNDERFS_S3_OWNER_ID_TO_USERNAME_MAPPING, "111=altname");
try (Closeable c = new ConfigurationRule(conf, CONF).toResource()) {
S3AUnderFileSystem s3UnderFileSystem =
new S3AUnderFileSystem(
new AlluxioURI("s3a://" + BUCKET_NAME), mClient, mAsyncClient, BUCKET_NAME,
mExecutor, mManager, UnderFileSystemConfiguration.defaults(CONF), false, false);
Mockito.when(mClient.getS3AccountOwner()).thenReturn(new Owner("111", "test"));
Mockito.when(mClient.getBucketAcl(Mockito.anyString())).thenReturn(new AccessControlList());
ObjectUnderFileSystem.ObjectPermissions permissions = s3UnderFileSystem.getPermissions();
Assert.assertEquals("altname", permissions.getOwner());
Assert.assertEquals("altname", permissions.getGroup());
Assert.assertEquals(0, permissions.getMode());
}
} |
public AbstractRequestBuilder<K, V, R> setReqParam(String key, Object value)
{
ArgumentUtil.notNull(value, "value");
return setParam(key, value);
} | @Test(expectedExceptions = NullPointerException.class)
public void testSetReqParamWithNullValue()
{
final AbstractRequestBuilder<?, ?, ?> builder = new DummyAbstractRequestBuilder();
builder.setReqParam("a", null);
} |
@Override
public void close() {
close(Duration.ofMillis(Long.MAX_VALUE));
} | @Test
@SuppressWarnings("deprecation")
public void testExplicitlyOnlyEnableClientTelemetryReporter() {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ProducerConfig.AUTO_INCLUDE_JMX_REPORTER_CONFIG, "false");
KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer());
assertEquals(1, producer.metrics.reporters().size());
assertInstanceOf(ClientTelemetryReporter.class, producer.metrics.reporters().get(0));
producer.close();
} |
public static long getUsableMemory() {
return getMaxMemory() - getTotalMemory() + getFreeMemory();
} | @Test
public void getUsableMemoryTest(){
assertTrue(RuntimeUtil.getUsableMemory() > 0);
} |
public Optional<Session> login(@Nullable String currentSessionId, String host,
ActorAwareAuthenticationToken authToken) throws AuthenticationServiceUnavailableException {
final String previousSessionId = StringUtils.defaultIfBlank(currentSessionId, null);
final Subject subject = new Subject.Builder().sessionId(previousSessionId).host(host).buildSubject();
ThreadContext.bind(subject);
try {
final Session session = subject.getSession();
subject.login(authToken);
return createSession(subject, session, host);
} catch (AuthenticationServiceUnavailableException e) {
log.info("Session creation failed due to authentication service being unavailable. Actor: \"{}\"",
authToken.getActor().urn());
final Map<String, Object> auditEventContext = ImmutableMap.of(
"remote_address", host,
"message", "Authentication service unavailable: " + e.getMessage()
);
auditEventSender.failure(authToken.getActor(), SESSION_CREATE, auditEventContext);
throw e;
} catch (AuthenticationException e) {
log.info("Invalid credentials in session create request. Actor: \"{}\"", authToken.getActor().urn());
final Map<String, Object> auditEventContext = ImmutableMap.of(
"remote_address", host
);
auditEventSender.failure(authToken.getActor(), SESSION_CREATE, auditEventContext);
return Optional.empty();
}
} | @Test
public void serviceUnavailableStateIsCleared() {
setUpUserMock();
assertFalse(SecurityUtils.getSubject().isAuthenticated());
final AtomicBoolean doThrow = new AtomicBoolean(true);
final SimpleAccountRealm switchableRealm = new SimpleAccountRealm() {
@Override
protected AuthenticationInfo doGetAuthenticationInfo(
AuthenticationToken token) throws AuthenticationException {
if (doThrow.get()) {
throw new AuthenticationServiceUnavailableException("not available");
} else {
return super.doGetAuthenticationInfo(token);
}
}
};
securityManager.setRealms(ImmutableList.of(switchableRealm, new SimpleAccountRealm()));
// realm will throw an exception on auth attempt
assertThatThrownBy(() -> sessionCreator.login(null, "host", validToken)).isInstanceOf(
AuthenticationServiceUnavailableException.class);
assertThat(SecurityUtils.getSubject().isAuthenticated()).isFalse();
// switch realm to not throw an exception but simply reject the credentials
doThrow.set(false);
sessionCreator.login(null, "host", validToken);
assertThat(SecurityUtils.getSubject().isAuthenticated()).isFalse();
} |
@Override
public ShardingSphereUser swapToObject(final YamlUserConfiguration yamlConfig) {
if (null == yamlConfig) {
return null;
}
Grantee grantee = convertYamlUserToGrantee(yamlConfig.getUser());
return new ShardingSphereUser(grantee.getUsername(), yamlConfig.getPassword(), grantee.getHostname(), yamlConfig.getAuthenticationMethodName(), yamlConfig.isAdmin());
} | @Test
void assertSwapToNullObject() {
assertNull(new YamlUserSwapper().swapToObject(null));
} |
public AnalysisPropertyDto setValue(String value) {
requireNonNull(value, "value cannot be null");
this.value = value;
return this;
} | @Test
void null_value_should_throw_NPE() {
underTest = new AnalysisPropertyDto();
assertThatThrownBy(() -> underTest.setValue(null))
.isInstanceOf(NullPointerException.class)
.hasMessage("value cannot be null");
} |
@Override
public Catalog createCatalog(Context context) {
final FactoryUtil.CatalogFactoryHelper helper =
FactoryUtil.createCatalogFactoryHelper(this, context);
helper.validate();
return new HiveCatalog(
context.getName(),
helper.getOptions().get(DEFAULT_DATABASE),
helper.getOptions().get(HIVE_CONF_DIR),
helper.getOptions().get(HADOOP_CONF_DIR),
helper.getOptions().get(HIVE_VERSION));
} | @Test
public void testCreateHiveCatalogWithIllegalHadoopConfDir() throws IOException {
final String catalogName = "mycatalog";
final String hadoopConfDir = tempFolder.newFolder().getAbsolutePath();
final Map<String, String> options = new HashMap<>();
options.put(CommonCatalogOptions.CATALOG_TYPE.key(), HiveCatalogFactoryOptions.IDENTIFIER);
options.put(HiveCatalogFactoryOptions.HIVE_CONF_DIR.key(), CONF_DIR.getPath());
options.put(HiveCatalogFactoryOptions.HADOOP_CONF_DIR.key(), hadoopConfDir);
assertThatThrownBy(
() ->
FactoryUtil.createCatalog(
catalogName,
options,
null,
Thread.currentThread().getContextClassLoader()))
.isInstanceOf(ValidationException.class);
} |
@Override
public Enumeration<URL> getResources(String name) throws IOException {
final List<URL> urls = new ArrayList<>();
for (ClassLoader classLoader : classLoaders) {
final Enumeration<URL> resources = classLoader.getResources(name);
if (resources.hasMoreElements()) {
urls.addAll(Collections.list(resources));
}
}
if (urls.isEmpty() && LOG.isTraceEnabled()) {
LOG.trace("Resource " + name + " not found.");
}
return Collections.enumeration(urls);
} | @Test
public void getResourcesReturnsEmptyEnumerationIfResourceDoesNotExist() throws Exception {
final ChainingClassLoader chainingClassLoader = new ChainingClassLoader(getClass().getClassLoader());
final Enumeration<URL> resources = chainingClassLoader.getResources("ThisClassHopeFullyDoesNotExist" + Instant.now().toEpochMilli());
assertThat(resources.hasMoreElements()).isFalse();
} |
public IssueSyncProgress getIssueSyncProgress(DbSession dbSession) {
int completedCount = dbClient.projectDao().countIndexedProjects(dbSession);
int total = dbClient.projectDao().countProjects(dbSession);
boolean hasFailures = dbClient.ceActivityDao().hasAnyFailedOrCancelledIssueSyncTask(dbSession);
boolean isCompleted = !dbClient.ceQueueDao().hasAnyIssueSyncTaskPendingOrInProgress(dbSession);
return new IssueSyncProgress(isCompleted, completedCount, total, hasFailures);
} | @Test
public void return_is_completed_true_if_pending_task_exist_but_all_branches_have_been_synced() {
insertCeQueue("TASK_1", Status.PENDING);
// only project
IntStream.range(0, 10).forEach(value -> insertProjectWithBranches(false, 0));
// project + additional branch
IntStream.range(0, 10).forEach(value -> insertProjectWithBranches(false, 1));
IssueSyncProgress result = underTest.getIssueSyncProgress(db.getSession());
assertThat(result.isCompleted()).isTrue();
} |
public boolean existsNotCreatedStep() {
return totalStepCount > getCreatedStepCount();
} | @Test
public void testExistsNotCreatedStep() throws Exception {
WorkflowRuntimeOverview overview =
loadObject(
"fixtures/instances/sample-workflow-runtime-overview.json",
WorkflowRuntimeOverview.class);
assertFalse(overview.existsNotCreatedStep());
overview.setTotalStepCount(2);
assertTrue(overview.existsNotCreatedStep());
overview
.getStepOverview()
.put(StepInstance.Status.NOT_CREATED, WorkflowStepStatusSummary.of(1));
assertTrue(overview.existsNotCreatedStep());
overview.getStepOverview().put(StepInstance.Status.CREATED, WorkflowStepStatusSummary.of(0));
assertTrue(overview.existsNotCreatedStep());
} |
@Override
public HttpHeaders set(HttpHeaders headers) {
if (headers instanceof DefaultHttpHeaders) {
this.headers.set(((DefaultHttpHeaders) headers).headers);
return this;
} else {
return super.set(headers);
}
} | @Test
public void testSetNullHeaderValueValidate() {
final HttpHeaders headers = new DefaultHttpHeaders(true);
assertThrows(NullPointerException.class, new Executable() {
@Override
public void execute() {
headers.set(of("test"), (CharSequence) null);
}
});
} |
public synchronized void flush() {
try {
output().flush();
} catch (IOException e) {
handleIOException(e);
}
} | @Test
public void flush_streamIsFlushed() throws IOException {
underTest.flush();
verify(outputStream, Mockito.only()).flush();
} |
public Calendar ceil(long t) {
Calendar cal = new GregorianCalendar(Locale.US);
cal.setTimeInMillis(t);
return ceil(cal);
} | @Test public void hashedMinute() throws Exception {
long t = new GregorianCalendar(2013, Calendar.MARCH, 21, 16, 21).getTimeInMillis();
compare(new GregorianCalendar(2013, Calendar.MARCH, 21, 17, 56), new CronTab("H 17 * * *", Hash.from("stuff")).ceil(t));
compare(new GregorianCalendar(2013, Calendar.MARCH, 21, 16, 56), new CronTab("H * * * *", Hash.from("stuff")).ceil(t));
compare(new GregorianCalendar(2013, Calendar.MARCH, 21, 16, 56), new CronTab("@hourly", Hash.from("stuff")).ceil(t));
compare(new GregorianCalendar(2013, Calendar.MARCH, 21, 17, 20), new CronTab("@hourly", Hash.from("junk")).ceil(t));
compare(new GregorianCalendar(2013, Calendar.MARCH, 22, 13, 56), new CronTab("H H(12-13) * * *", Hash.from("stuff")).ceil(t));
} |
@Implementation
public static synchronized Dialog getErrorDialog(
int errorCode, Activity activity, int requestCode) {
return googlePlayServicesUtilImpl.getErrorDialog(errorCode, activity, requestCode);
} | @Test
public void getErrorDialog() {
assertNotNull(
GooglePlayServicesUtil.getErrorDialog(ConnectionResult.SERVICE_MISSING, new Activity(), 0));
assertNull(GooglePlayServicesUtil.getErrorDialog(ConnectionResult.SUCCESS, new Activity(), 0));
assertNotNull(
GooglePlayServicesUtil.getErrorDialog(
ConnectionResult.SERVICE_MISSING, new Activity(), 0, null));
assertNull(
GooglePlayServicesUtil.getErrorDialog(ConnectionResult.SUCCESS, new Activity(), 0, null));
} |
@Override
public double getStdDev() {
// two-pass algorithm for variance, avoids numeric overflow
if (values.length <= 1) {
return 0;
}
final double mean = getMean();
double sum = 0;
for (long value : values) {
final double diff = value - mean;
sum += diff * diff;
}
final double variance = sum / (values.length - 1);
return Math.sqrt(variance);
} | @Test
public void calculatesTheStdDev() throws Exception {
assertThat(snapshot.getStdDev())
.isEqualTo(1.5811, offset(0.0001));
} |
@Override
public int hashCode() {
return Long.hashCode(bps);
} | @Test
public void testHashCode() {
Long expected = (one * 1000);
assertEquals(small.hashCode(), expected.hashCode());
} |
public boolean assign(DefaultIssue issue, @Nullable UserDto user, IssueChangeContext context) {
String assigneeUuid = user != null ? user.getUuid() : null;
if (!Objects.equals(assigneeUuid, issue.assignee())) {
String newAssigneeName = user == null ? null : user.getName();
issue.setFieldChange(context, ASSIGNEE, UNUSED, newAssigneeName);
issue.setAssigneeUuid(user != null ? user.getUuid() : null);
issue.setUpdateDate(context.date());
issue.setChanged(true);
issue.setSendNotifications(true);
return true;
}
return false;
} | @Test
void unassign() {
issue.setAssigneeUuid("user_uuid");
boolean updated = underTest.assign(issue, null, context);
assertThat(updated).isTrue();
assertThat(issue.assignee()).isNull();
assertThat(issue.mustSendNotifications()).isTrue();
FieldDiffs.Diff diff = issue.currentChange().get(ASSIGNEE);
assertThat(diff.oldValue()).isEqualTo(UNUSED);
assertThat(diff.newValue()).isNull();
} |
@Override
public void doAlarm(List<AlarmMessage> alarmMessages) throws Exception {
Map<String, WechatSettings> settingsMap = alarmRulesWatcher.getWechatSettings();
if (settingsMap == null || settingsMap.isEmpty()) {
return;
}
Map<String, List<AlarmMessage>> groupedMessages = groupMessagesByHook(alarmMessages);
for (Map.Entry<String, List<AlarmMessage>> entry : groupedMessages.entrySet()) {
var hookName = entry.getKey();
var messages = entry.getValue();
var setting = settingsMap.get(hookName);
if (setting == null || CollectionUtils.isEmpty(setting.getWebhooks()) || CollectionUtils.isEmpty(
messages)) {
continue;
}
for (final var url : setting.getWebhooks()) {
for (final var alarmMessage : messages) {
final var requestBody = String.format(
setting.getTextTemplate(), alarmMessage.getAlarmMessage()
);
try {
post(URI.create(url), requestBody, Map.of());
} catch (Exception e) {
log.error("Failed to send alarm message to Wechat webhook: {}", url, e);
}
}
}
}
} | @Test
public void testWechatWebhook() throws Exception {
List<String> remoteEndpoints = new ArrayList<>();
remoteEndpoints.add("http://127.0.0.1:" + SERVER.httpPort() + "/wechathook/receiveAlarm");
Rules rules = new Rules();
String template = "{\"msgtype\":\"text\",\"text\":{\"content\":\"Skywaling alarm: %s\"}}";
WechatSettings setting1 = new WechatSettings("setting1", AlarmHooksType.wechat, true);
setting1.setWebhooks(remoteEndpoints);
setting1.setTextTemplate(template);
WechatSettings setting2 = new WechatSettings("setting2", AlarmHooksType.wechat, false);
setting2.setWebhooks(remoteEndpoints);
setting2.setTextTemplate(template);
rules.getWechatSettingsMap().put(setting1.getFormattedName(), setting1);
rules.getWechatSettingsMap().put(setting2.getFormattedName(), setting2);
AlarmRulesWatcher alarmRulesWatcher = new AlarmRulesWatcher(rules, null);
WechatHookCallback wechatHookCallback = new WechatHookCallback(alarmRulesWatcher);
List<AlarmMessage> alarmMessages = new ArrayList<>(2);
AlarmMessage alarmMessage = new AlarmMessage();
alarmMessage.setScopeId(DefaultScopeDefine.SERVICE);
alarmMessage.setRuleName("service_resp_time_rule");
alarmMessage.setAlarmMessage("alarmMessage with [DefaultScopeDefine.All]");
alarmMessage.getHooks().add(setting1.getFormattedName());
alarmMessages.add(alarmMessage);
AlarmMessage anotherAlarmMessage = new AlarmMessage();
anotherAlarmMessage.setRuleName("service_resp_time_rule_2");
anotherAlarmMessage.setScopeId(DefaultScopeDefine.ENDPOINT);
anotherAlarmMessage.setAlarmMessage("anotherAlarmMessage with [DefaultScopeDefine.Endpoint]");
anotherAlarmMessage.getHooks().add(setting2.getFormattedName());
alarmMessages.add(anotherAlarmMessage);
wechatHookCallback.doAlarm(alarmMessages);
Assertions.assertTrue(IS_SUCCESS.get());
} |
@Override
public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException {
final AttributedList<Path> children = new AttributedList<>();
if(replies.isEmpty()) {
return children;
}
// At least one entry successfully parsed
boolean success = false;
for(String line : replies) {
final Map<String, Map<String, String>> file = this.parseFacts(line);
if(null == file) {
log.error(String.format("Error parsing line %s", line));
continue;
}
for(Map.Entry<String, Map<String, String>> f : file.entrySet()) {
final String name = f.getKey();
// size -- Size in octets
// modify -- Last modification time
// create -- Creation time
// type -- Entry type
// unique -- Unique id of file/directory
// perm -- File permissions, whether read, write, execute is allowed for the login id.
// lang -- Language of the file name per IANA [11] registry.
// media-type -- MIME media-type of file contents per IANA registry.
// charset -- Character set per IANA registry (if not UTF-8)
final Map<String, String> facts = f.getValue();
if(!facts.containsKey("type")) {
log.error(String.format("No type fact in line %s", line));
continue;
}
final Path parsed;
if("dir".equals(facts.get("type").toLowerCase(Locale.ROOT))) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.directory));
}
else if("file".equals(facts.get("type").toLowerCase(Locale.ROOT))) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file));
}
else if(facts.get("type").toLowerCase(Locale.ROOT).matches("os\\.unix=slink:.*")) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file, Path.Type.symboliclink));
// Parse symbolic link target in Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar
final String[] type = facts.get("type").split(":");
if(type.length == 2) {
final String target = type[1];
if(target.startsWith(String.valueOf(Path.DELIMITER))) {
parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file)));
}
else {
parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(String.format("%s/%s", directory.getAbsolute(), target)), EnumSet.of(Path.Type.file)));
}
}
else {
log.warn(String.format("Missing symbolic link target for type %s in line %s", facts.get("type"), line));
continue;
}
}
else {
log.warn(String.format("Ignored type %s in line %s", facts.get("type"), line));
continue;
}
if(!success) {
if(parsed.isDirectory() && directory.getName().equals(name)) {
log.warn(String.format("Possibly bogus response line %s", line));
}
else {
success = true;
}
}
if(name.equals(".") || name.equals("..")) {
if(log.isDebugEnabled()) {
log.debug(String.format("Skip %s", name));
}
continue;
}
if(facts.containsKey("size")) {
parsed.attributes().setSize(Long.parseLong(facts.get("size")));
}
if(facts.containsKey("unix.uid")) {
parsed.attributes().setOwner(facts.get("unix.uid"));
}
if(facts.containsKey("unix.owner")) {
parsed.attributes().setOwner(facts.get("unix.owner"));
}
if(facts.containsKey("unix.gid")) {
parsed.attributes().setGroup(facts.get("unix.gid"));
}
if(facts.containsKey("unix.group")) {
parsed.attributes().setGroup(facts.get("unix.group"));
}
if(facts.containsKey("unix.mode")) {
parsed.attributes().setPermission(new Permission(facts.get("unix.mode")));
}
else if(facts.containsKey("perm")) {
if(PreferencesFactory.get().getBoolean("ftp.parser.mlsd.perm.enable")) {
Permission.Action user = Permission.Action.none;
final String flags = facts.get("perm");
if(StringUtils.contains(flags, 'r') || StringUtils.contains(flags, 'l')) {
// RETR command may be applied to that object
// Listing commands, LIST, NLST, and MLSD may be applied
user = user.or(Permission.Action.read);
}
if(StringUtils.contains(flags, 'w') || StringUtils.contains(flags, 'm') || StringUtils.contains(flags, 'c')) {
user = user.or(Permission.Action.write);
}
if(StringUtils.contains(flags, 'e')) {
// CWD command naming the object should succeed
user = user.or(Permission.Action.execute);
if(parsed.isDirectory()) {
user = user.or(Permission.Action.read);
}
}
final Permission permission = new Permission(user, Permission.Action.none, Permission.Action.none);
parsed.attributes().setPermission(permission);
}
}
if(facts.containsKey("modify")) {
// Time values are always represented in UTC
parsed.attributes().setModificationDate(this.parseTimestamp(facts.get("modify")));
}
if(facts.containsKey("create")) {
// Time values are always represented in UTC
parsed.attributes().setCreationDate(this.parseTimestamp(facts.get("create")));
}
children.add(parsed);
}
}
if(!success) {
throw new FTPInvalidListException(children);
}
return children;
} | @Test
public void testSkipParentDir() throws Exception {
Path path = new Path(
"/www", EnumSet.of(Path.Type.directory));
String[] replies = new String[]{
"Type=pdir;Unique=aaaaacUYqaaa;Perm=cpmel; /",
"Type=pdir;Unique=aaaaacUYqaaa;Perm=cpmel; ..",
"Type=file;Unique=aaab8bUYqaaa;Perm=rf;Size=34589; ftpd.c"
};
final AttributedList<Path> children = new FTPMlsdListResponseReader().read(path, Arrays.asList(replies));
assertEquals(1, children.size());
assertEquals("ftpd.c", children.get(0).getName());
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 3) {
onInvalidDataReceived(device, data);
return;
}
final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0);
if (opCode != OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE && opCode != OP_CODE_RESPONSE_CODE) {
onInvalidDataReceived(device, data);
return;
}
final int operator = data.getIntValue(Data.FORMAT_UINT8, 1);
if (operator != OPERATOR_NULL) {
onInvalidDataReceived(device, data);
return;
}
switch (opCode) {
case OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE -> {
// Field size is defined per service
int numberOfRecords;
switch (data.size() - 2) {
case 1 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT8, 2);
case 2 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT16_LE, 2);
case 4 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT32_LE, 2);
default -> {
// Other field sizes are not supported
onInvalidDataReceived(device, data);
return;
}
}
onNumberOfRecordsReceived(device, numberOfRecords);
}
case OP_CODE_RESPONSE_CODE -> {
if (data.size() != 4) {
onInvalidDataReceived(device, data);
return;
}
final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 2);
final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 3);
if (responseCode == RACP_RESPONSE_SUCCESS) {
onRecordAccessOperationCompleted(device, requestCode);
} else if (responseCode == RACP_ERROR_NO_RECORDS_FOUND) {
onRecordAccessOperationCompletedWithNoRecordsFound(device, requestCode);
} else {
onRecordAccessOperationError(device, requestCode, responseCode);
}
}
}
} | @Test
public void onRecordAccessOperationError_invalidOperand() {
final Data data = new Data(new byte[] { 6, 0, 1, 5 });
callback.onDataReceived(null, data);
assertEquals(5, error);
} |
@Override
public ByteOrder getByteOrder() {
return ByteOrder.nativeOrder();
} | @Override
@Test
public void testGetByteOrder() {
assertEquals(ByteOrder.nativeOrder(), in.getByteOrder());
} |
@Override
public void pre(SpanAdapter span, Exchange exchange, Endpoint endpoint) {
super.pre(span, exchange, endpoint);
span.setTag(TagConstants.DB_SYSTEM, CASSANDRA_DB_TYPE);
URI uri = URI.create(endpoint.getEndpointUri());
if (uri.getPath() != null && !uri.getPath().isEmpty()) {
// Strip leading '/' from path
span.setTag(TagConstants.DB_NAME, uri.getPath().substring(1));
}
String cql = exchange.getIn().getHeader(CAMEL_CQL_QUERY, String.class);
if (cql != null) {
span.setTag(TagConstants.DB_STATEMENT, cql);
} else {
Map<String, String> queryParameters = toQueryParameters(endpoint.getEndpointUri());
if (queryParameters.containsKey("cql")) {
span.setTag(TagConstants.DB_STATEMENT, queryParameters.get("cql"));
}
}
} | @Test
public void testPreCqlFromHeader() {
String cql = "select * from users";
Endpoint endpoint = Mockito.mock(Endpoint.class);
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Mockito.when(endpoint.getEndpointUri()).thenReturn("cql://host1,host2?consistencyLevel=quorum");
Mockito.when(exchange.getIn()).thenReturn(message);
Mockito.when(message.getHeader(CqlSpanDecorator.CAMEL_CQL_QUERY, String.class)).thenReturn(cql);
SpanDecorator decorator = new CqlSpanDecorator();
MockSpanAdapter span = new MockSpanAdapter();
decorator.pre(span, exchange, endpoint);
assertEquals(CqlSpanDecorator.CASSANDRA_DB_TYPE, span.tags().get(TagConstants.DB_SYSTEM));
assertEquals(cql, span.tags().get(TagConstants.DB_STATEMENT));
assertNull(span.tags().get(TagConstants.DB_NAME));
} |
public char toChar(String name) {
return toChar(name, '\u0000');
} | @Test
public void testToChar_String() {
System.out.println("toChar");
char expResult;
char result;
Properties props = new Properties();
props.put("value1", "f");
props.put("value2", "w");
props.put("empty", "");
props.put("str", "abc");
props.put("boolean", "true");
props.put("float", "24.98");
props.put("int", "12");
props.put("char", "a");
PropertyParser instance = new PropertyParser(props);
expResult = 'f';
result = instance.toChar("value1");
assertEquals(expResult, result);
expResult = 'w';
result = instance.toChar("value2");
assertEquals(expResult, result);
expResult = '\u0000';
result = instance.toChar("empty");
assertEquals(expResult, result);
expResult = '\u0000';
result = instance.toChar("str");
assertEquals(expResult, result);
expResult = '\u0000';
result = instance.toChar("boolean");
assertEquals(expResult, result);
expResult = '\u0000';
result = instance.toChar("float");
assertEquals(expResult, result);
expResult = '\u0000';
result = instance.toChar("int");
assertEquals(expResult, result);
expResult = 'a';
result = instance.toChar("char");
assertEquals(expResult, result);
expResult = '\u0000';
result = instance.toChar("nonexistent");
assertEquals(expResult, result);
} |
@Override
public TypeDefinition build(
ProcessingEnvironment processingEnv, PrimitiveType type, Map<String, TypeDefinition> typeCache) {
TypeDefinition typeDefinition = new TypeDefinition(type.toString());
return typeDefinition;
} | @Test
void testBuild() {
buildAndAssertTypeDefinition(processingEnv, zField, builder);
buildAndAssertTypeDefinition(processingEnv, bField, builder);
buildAndAssertTypeDefinition(processingEnv, cField, builder);
buildAndAssertTypeDefinition(processingEnv, sField, builder);
buildAndAssertTypeDefinition(processingEnv, iField, builder);
buildAndAssertTypeDefinition(processingEnv, lField, builder);
buildAndAssertTypeDefinition(processingEnv, zField, builder);
buildAndAssertTypeDefinition(processingEnv, fField, builder);
buildAndAssertTypeDefinition(processingEnv, dField, builder);
} |
public static <T> RedistributeArbitrarily<T> arbitrarily() {
return new RedistributeArbitrarily<>(null, false);
} | @Test
@Category({ValidatesRunner.class, UsesTestStream.class})
public void testRedistributeWithTimestampsStreaming() {
TestStream<Long> stream =
TestStream.create(VarLongCoder.of())
.advanceWatermarkTo(new Instant(0L).plus(Duration.standardDays(48L)))
.addElements(
TimestampedValue.of(0L, new Instant(0L)),
TimestampedValue.of(1L, new Instant(0L).plus(Duration.standardDays(48L))),
TimestampedValue.of(
2L, BoundedWindow.TIMESTAMP_MAX_VALUE.minus(Duration.standardDays(48L))))
.advanceWatermarkToInfinity();
PCollection<KV<String, Long>> input =
pipeline
.apply(stream)
.apply(WithKeys.of(""))
.apply(Window.into(FixedWindows.of(Duration.standardMinutes(10L))));
PCollection<KV<String, Long>> reshuffled = input.apply(Redistribute.arbitrarily());
PAssert.that(reshuffled.apply(Values.create())).containsInAnyOrder(0L, 1L, 2L);
pipeline.run();
} |
static long appendRecords(
Logger log,
ControllerResult<?> result,
int maxRecordsPerBatch,
Function<List<ApiMessageAndVersion>, Long> appender
) {
try {
List<ApiMessageAndVersion> records = result.records();
if (result.isAtomic()) {
// If the result must be written out atomically, check that it is not too large.
// In general, we create atomic batches when it is important to commit "all, or
// nothing". They are limited in size and must only be used when the batch size
// is bounded.
if (records.size() > maxRecordsPerBatch) {
throw new IllegalStateException("Attempted to atomically commit " +
records.size() + " records, but maxRecordsPerBatch is " +
maxRecordsPerBatch);
}
long offset = appender.apply(records);
if (log.isTraceEnabled()) {
log.trace("Atomically appended {} record(s) ending with offset {}.",
records.size(), offset);
}
return offset;
} else {
// If the result is non-atomic, then split it into as many batches as needed.
// The appender callback will create an in-memory snapshot for each batch,
// since we might need to revert to any of them. We will only return the final
// offset of the last batch, however.
int startIndex = 0, numBatches = 0;
while (true) {
numBatches++;
int endIndex = startIndex + maxRecordsPerBatch;
if (endIndex > records.size()) {
long offset = appender.apply(records.subList(startIndex, records.size()));
if (log.isTraceEnabled()) {
log.trace("Appended {} record(s) in {} batch(es), ending with offset {}.",
records.size(), numBatches, offset);
}
return offset;
} else {
appender.apply(records.subList(startIndex, endIndex));
}
startIndex += maxRecordsPerBatch;
}
}
} catch (ApiException e) {
// If the Raft client throws a subclass of ApiException, we need to convert it into a
// RuntimeException so that it will be handled as the unexpected exception that it is.
// ApiExceptions are reserved for expected errors such as incorrect uses of controller
// APIs, permission errors, NotControllerException, etc. etc.
throw new RuntimeException(e);
}
} | @Test
public void testAppendRecords() {
TestAppender appender = new TestAppender();
assertEquals(5, QuorumController.appendRecords(log,
ControllerResult.of(Arrays.asList(rec(0), rec(1), rec(2), rec(3), rec(4)), null),
2,
appender));
} |
@Override
@Transactional(rollbackFor = Exception.class)
public void updateFileConfigMaster(Long id) {
// 校验存在
validateFileConfigExists(id);
// 更新其它为非 master
fileConfigMapper.updateBatch(new FileConfigDO().setMaster(false));
// 更新
fileConfigMapper.updateById(new FileConfigDO().setId(id).setMaster(true));
// 清空缓存
clearCache(null, true);
} | @Test
public void testUpdateFileConfigMaster_success() {
// mock 数据
FileConfigDO dbFileConfig = randomFileConfigDO().setMaster(false);
fileConfigMapper.insert(dbFileConfig);// @Sql: 先插入出一条存在的数据
FileConfigDO masterFileConfig = randomFileConfigDO().setMaster(true);
fileConfigMapper.insert(masterFileConfig);// @Sql: 先插入出一条存在的数据
// 调用
fileConfigService.updateFileConfigMaster(dbFileConfig.getId());
// 断言数据
assertTrue(fileConfigMapper.selectById(dbFileConfig.getId()).getMaster());
assertFalse(fileConfigMapper.selectById(masterFileConfig.getId()).getMaster());
// 验证 cache
assertNull(fileConfigService.getClientCache().getIfPresent(0L));
} |
public LogicalSchema resolve(final ExecutionStep<?> step, final LogicalSchema schema) {
return Optional.ofNullable(HANDLERS.get(step.getClass()))
.map(h -> h.handle(this, schema, step))
.orElseThrow(() -> new IllegalStateException("Unhandled step class: " + step.getClass()));
} | @Test
public void shouldResolveSchemaForTableSelectWithColumnNames() {
// Given:
final TableSelect<?> step = new TableSelect<>(
PROPERTIES,
tableSource,
ImmutableList.of(ColumnName.of("NEW_KEY")),
ImmutableList.of(
add("JUICE", "ORANGE", "APPLE"),
ref("PLANTAIN", "BANANA"),
ref("CITRUS", "ORANGE")),
internalFormats
);
// When:
final LogicalSchema result = resolver.resolve(step, SCHEMA);
// Then:
assertThat(result, is(
LogicalSchema.builder()
.keyColumn(ColumnName.of("NEW_KEY"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of("JUICE"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of("PLANTAIN"), SqlTypes.STRING)
.valueColumn(ColumnName.of("CITRUS"), SqlTypes.INTEGER)
.build())
);
} |
@Override
void handle(Connection connection, DatabaseCharsetChecker.State state) throws SQLException {
// PostgreSQL does not have concept of case-sensitive collation. Only charset ("encoding" in postgresql terminology)
// must be verified.
expectUtf8AsDefault(connection);
if (state == DatabaseCharsetChecker.State.UPGRADE || state == DatabaseCharsetChecker.State.STARTUP) {
// no need to check columns on fresh installs... as they are not supposed to exist!
expectUtf8Columns(connection);
}
} | @Test
public void upgrade_fails_if_non_utf8_column() throws Exception {
// default charset is ok but two columns are not
answerDefaultCharset("utf8");
answerColumns(asList(
new String[] {TABLE_ISSUES, COLUMN_KEE, "utf8"},
new String[] {TABLE_PROJECTS, COLUMN_KEE, "latin"},
new String[] {TABLE_PROJECTS, COLUMN_NAME, "latin"}));
assertThatThrownBy(() -> underTest.handle(connection, DatabaseCharsetChecker.State.UPGRADE))
.isInstanceOf(MessageException.class)
.hasMessage("Database columns [projects.kee, projects.name] must have UTF8 charset.");
} |
public Optional<TableDistribution> mergeDistribution(
MergingStrategy mergingStrategy,
Optional<TableDistribution> sourceTableDistribution,
Optional<TableDistribution> derivedTabledDistribution) {
if (derivedTabledDistribution.isPresent()
&& sourceTableDistribution.isPresent()
&& mergingStrategy != MergingStrategy.EXCLUDING) {
throw new ValidationException(
"The base table already has a distribution defined. You might want to specify "
+ "EXCLUDING DISTRIBUTION.");
}
if (derivedTabledDistribution.isPresent()) {
return derivedTabledDistribution;
}
return sourceTableDistribution;
} | @Test
void mergeDistributionFromBaseTable() {
Optional<TableDistribution> sourceDistribution =
Optional.of(TableDistribution.ofHash(Collections.singletonList("a"), 3));
Optional<TableDistribution> mergePartitions =
util.mergeDistribution(
getDefaultMergingStrategies().get(FeatureOption.DISTRIBUTION),
sourceDistribution,
Optional.empty());
assertThat(mergePartitions).isEqualTo(sourceDistribution);
} |
@Override public DubboServerRequest request() {
return request;
} | @Test void request() {
assertThat(response.request()).isSameAs(request);
} |
public static Builder builder(Type type) {
return new Builder(type);
} | @Test
public void set_uuid_throws_NPE_if_component_arg_is_Null() {
assertThatThrownBy(() -> builder(FILE).setKey(null))
.isInstanceOf(NullPointerException.class);
} |
public static StatementExecutorResponse execute(
final ConfiguredStatement<ListQueries> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final RemoteHostExecutor remoteHostExecutor = RemoteHostExecutor.create(
statement,
sessionProperties,
executionContext,
serviceContext.getKsqlClient()
);
return statement.getStatement().getShowExtended()
? executeExtended(statement, sessionProperties, executionContext, remoteHostExecutor)
: executeSimple(statement, executionContext, remoteHostExecutor);
} | @Test
public void shouldNotMergeDifferentRunningQueries() {
// Given
when(sessionProperties.getInternalRequest()).thenReturn(false);
final ConfiguredStatement<?> showQueries = engine.configure("SHOW QUERIES;");
final PersistentQueryMetadata localMetadata = givenPersistentQuery("id", RUNNING_QUERY_STATE);
final PersistentQueryMetadata remoteMetadata = givenPersistentQuery("different Id", RUNNING_QUERY_STATE);
when(mockKsqlEngine.getAllLiveQueries()).thenReturn(ImmutableList.of(localMetadata));
when(mockKsqlEngine.getPersistentQueries()).thenReturn(ImmutableList.of(localMetadata));
final List<RunningQuery> remoteRunningQueries = Collections.singletonList(persistentQueryMetadataToRunningQuery(
remoteMetadata,
new QueryStatusCount(Collections.singletonMap(KsqlQueryStatus.RUNNING, 1))));
when(remoteQueries.getQueries()).thenReturn(remoteRunningQueries);
when(ksqlEntityList.get(anyInt())).thenReturn(remoteQueries);
when(response.getResponse()).thenReturn(ksqlEntityList);
queryStatusCount.updateStatusCount(RUNNING_QUERY_STATE, 1);
// When
final Queries queries = (Queries) CustomExecutors.LIST_QUERIES.execute(
showQueries,
sessionProperties,
mockKsqlEngine,
serviceContext
).getEntity().orElseThrow(IllegalStateException::new);
// Then
assertThat(queries.getQueries(),
containsInAnyOrder(
persistentQueryMetadataToRunningQuery(localMetadata, queryStatusCount),
persistentQueryMetadataToRunningQuery(remoteMetadata, queryStatusCount)));
} |
@SuppressWarnings("rawtypes")
@Deprecated
public synchronized Topology addProcessor(final String name,
final org.apache.kafka.streams.processor.ProcessorSupplier supplier,
final String... parentNames) {
return addProcessor(
name,
new ProcessorSupplier<Object, Object, Object, Object>() {
@Override
public Set<StoreBuilder<?>> stores() {
return supplier.stores();
}
@Override
public org.apache.kafka.streams.processor.api.Processor<Object, Object, Object, Object> get() {
return ProcessorAdapter.adaptRaw(supplier.get());
}
},
parentNames
);
} | @Test
public void shouldNotAllowNullProcessorSupplierWhenAddingProcessor() {
assertThrows(NullPointerException.class, () -> topology.addProcessor("name",
(ProcessorSupplier<Object, Object, Object, Object>) null));
} |
public static void activateParams( VariableSpace childVariableSpace, NamedParams childNamedParams, VariableSpace parent, String[] listParameters,
String[] mappingVariables, String[] inputFields ) {
activateParams( childVariableSpace, childNamedParams, parent, listParameters, mappingVariables, inputFields, true );
} | @Test
public void activateParamsTestWithNoParameterChild() throws Exception {
String newParam = "newParamParent";
String parentValue = "parentValue";
TransMeta parentMeta = new TransMeta();
TransMeta childVariableSpace = new TransMeta();
String[] parameters = childVariableSpace.listParameters();
StepWithMappingMeta.activateParams( childVariableSpace, childVariableSpace, parentMeta,
parameters, new String[] { newParam }, new String[] { parentValue }, true );
Assert.assertEquals( parentValue, childVariableSpace.getParameterValue( newParam ) );
} |
protected String generateQueryString(MultiValuedTreeMap<String, String> parameters, boolean encode, String encodeCharset)
throws ServletException {
if (parameters == null || parameters.isEmpty()) {
return null;
}
if (queryString != null) {
return queryString;
}
StringBuilder queryStringBuilder = new StringBuilder();
try {
for (String key : parameters.keySet()) {
for (String val : parameters.get(key)) {
queryStringBuilder.append("&");
if (encode) {
queryStringBuilder.append(URLEncoder.encode(key, encodeCharset));
} else {
queryStringBuilder.append(key);
}
queryStringBuilder.append("=");
if (val != null) {
if (encode) {
queryStringBuilder.append(URLEncoder.encode(val, encodeCharset));
} else {
queryStringBuilder.append(val);
}
}
}
}
} catch (UnsupportedEncodingException e) {
throw new ServletException("Invalid charset passed for query string encoding", e);
}
queryString = queryStringBuilder.toString();
queryString = queryString.substring(1); // remove the first & - faster to do it here than adding logic in the Lambda
return queryString;
} | @Test
void queryString_generateQueryString_nullParameterIsEmpty() {
AwsProxyHttpServletRequest request = new AwsProxyHttpServletRequest(queryStringNullValue, mockContext, null, config);
String parsedString = null;
try {
parsedString = request.generateQueryString(request.getAwsProxyRequest().getMultiValueQueryStringParameters(), true, config.getUriEncoding());
} catch (ServletException e) {
e.printStackTrace();
fail("Could not generate query string");
}
assertTrue(parsedString.endsWith("three="));
} |
@Override
public void filter(ContainerRequestContext requestContext) {
if (isInternalRequest(requestContext)) {
log.trace("Skipping authentication for internal request");
return;
}
try {
log.debug("Authenticating request");
BasicAuthCredentials credentials = new BasicAuthCredentials(requestContext.getHeaderString(AUTHORIZATION));
LoginContext loginContext = new LoginContext(
CONNECT_LOGIN_MODULE,
null,
new BasicAuthCallBackHandler(credentials),
configuration);
loginContext.login();
setSecurityContextForRequest(requestContext, credentials);
} catch (LoginException | ConfigException e) {
// Log at debug here in order to avoid polluting log files whenever someone mistypes their credentials
log.debug("Request failed authentication", e);
requestContext.abortWith(
Response.status(Response.Status.UNAUTHORIZED)
.entity("User cannot access the resource.")
.build());
}
} | @Test
public void testBadPassword() throws IOException {
File credentialFile = setupPropertyLoginFile(true);
JaasBasicAuthFilter jaasBasicAuthFilter = setupJaasFilter("KafkaConnect", credentialFile.getPath());
ContainerRequestContext requestContext = setMock("Basic", "user", "password1");
jaasBasicAuthFilter.filter(requestContext);
verify(requestContext).abortWith(any(Response.class));
verify(requestContext, atLeastOnce()).getMethod();
verify(requestContext).getHeaderString(JaasBasicAuthFilter.AUTHORIZATION);
} |
public Nutrients calculateNutrients(BigDecimal grams) {
if (grams == null || grams.doubleValue() <= 0) {
return Nutrients.createEmptyNutrients();
}
return new Nutrients(calculateCalories(grams),
calculateCarbohydrates(grams),
calculateProteins(grams),
calculateFats(grams));
} | @Test
void calculateNutrients_nullValue() {
Nutrients result = product.calculateNutrients(null);
assertAll("Should return empty nutrients",
() -> assertEquals(new BigDecimal("0"), result.getCalories().getTotalCalories()),
() -> assertEquals(new BigDecimal("0"), result.getCarbohydrates().getTotalCarbohydrates()),
() -> assertEquals(new BigDecimal("0"), result.getProteins().getTotalProteins()),
() -> assertEquals(new BigDecimal("0"), result.getFats().getTotalFats()));
} |
public static void main(String[] args) {
if (args.length < 1 || args[0].equals("-h") || args[0].equals("--help")) {
System.out.println(usage);
return;
}
// Copy args, because CommandFormat mutates the list.
List<String> argsList = new ArrayList<String>(Arrays.asList(args));
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "-glob", "-jar");
try {
cf.parse(argsList);
} catch (UnknownOptionException e) {
terminate(1, "unrecognized option");
return;
}
String classPath = System.getProperty("java.class.path");
if (cf.getOpt("-glob")) {
// The classpath returned from the property has been globbed already.
System.out.println(classPath);
} else if (cf.getOpt("-jar")) {
if (argsList.isEmpty() || argsList.get(0) == null ||
argsList.get(0).isEmpty()) {
terminate(1, "-jar option requires path of jar file to write");
return;
}
// Write the classpath into the manifest of a temporary jar file.
Path workingDir = new Path(System.getProperty("user.dir"));
final String tmpJarPath;
try {
tmpJarPath = FileUtil.createJarWithClassPath(classPath, workingDir,
System.getenv())[0];
} catch (IOException e) {
terminate(1, "I/O error creating jar: " + e.getMessage());
return;
}
// Rename the temporary file to its final location.
String jarPath = argsList.get(0);
try {
FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath));
} catch (IOException e) {
terminate(1, "I/O error renaming jar temporary file to path: " +
e.getMessage());
return;
}
}
} | @Test
public void testHelpShort() {
Classpath.main(new String[] { "-h" });
String strOut = new String(stdout.toByteArray(), UTF8);
assertTrue(strOut.contains("Prints the classpath"));
assertTrue(stderr.toByteArray().length == 0);
} |
public static String roundStr(double v, int scale) {
return round(v, scale).toPlainString();
} | @Test
public void roundStrTest() {
final String roundStr = NumberUtil.roundStr(2.647, 2);
assertEquals(roundStr, "2.65");
final String roundStr1 = NumberUtil.roundStr(0, 10);
assertEquals(roundStr1, "0.0000000000");
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.