index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/InnerJoinFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import java.util.List;
import org.apache.crunch.Emitter;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PType;
import com.google.common.collect.Lists;
/**
* Used to perform the last step of an inner join.
*
* @param <K> Type of the keys.
* @param <U> Type of the first {@link org.apache.crunch.PTable}'s values
* @param <V> Type of the second {@link org.apache.crunch.PTable}'s values
*/
public class InnerJoinFn<K, U, V> extends JoinFn<K, U, V> {
private transient K lastKey;
private transient List<U> leftValues;
public InnerJoinFn(PType<K> keyType, PType<U> leftValueType) {
super(keyType, leftValueType);
}
@Override
public void initialize() {
super.initialize();
lastKey = null;
this.leftValues = Lists.newArrayList();
}
@Override
public void join(K key, int id, Iterable<Pair<U, V>> pairs, Emitter<Pair<K, Pair<U, V>>> emitter) {
if (!key.equals(lastKey)) {
lastKey = keyType.getDetachedValue(key);
leftValues.clear();
}
if (id == 0) { // from left
for (Pair<U, V> pair : pairs) {
if (pair.first() != null) {
leftValues.add(leftValueType.getDetachedValue(pair.first()));
}
}
} else { // from right
for (Pair<U, V> pair : pairs) {
for (U u : leftValues) {
emitter.emit(Pair.of(lastKey, Pair.of(u, pair.second())));
}
}
}
}
@Override
public String getJoinType() {
return "innerJoin";
}
}
| 2,900 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/ShardedJoinStrategy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
* law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
* for the specific language governing permissions and limitations under the License.
*/
package org.apache.crunch.lib.join;
import java.io.Serializable;
import java.util.Random;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.MapFn;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PTypeFamily;
/**
* JoinStrategy that splits the key space up into shards.
* <p>
* This strategy is useful when there are multiple values per key on at least one side of the join,
* and a large proportion of the values are mapped to a small number of keys.
* <p>
* Using this strategy will increase the number of keys being joined, but can increase performance
* by spreading processing of a single key over multiple reduce groups.
* <p>
* A custom {@link ShardingStrategy} can be provided so that only certain keys are sharded, or
* keys can be sharded in accordance with how many values are mapped to them.
*/
public class ShardedJoinStrategy<K, U, V> implements JoinStrategy<K, U, V> {
/**
* Determines over how many shards a key will be split in a sharded join.
* <p>
* It is essential that implementations of this class are deterministic.
*/
public static interface ShardingStrategy<K> extends Serializable {
/**
* Retrieve the number of shards over which the given key should be split.
* @param key key for which shards are to be determined
* @return number of shards for the given key, must be greater than 0
*/
int getNumShards(K key);
}
private JoinStrategy<Pair<K, Integer>, U, V> wrappedJoinStrategy;
private ShardingStrategy<K> shardingStrategy;
/**
* Instantiate with a constant number of shards to use for all keys.
*
* @param numShards number of shards to use
*/
public ShardedJoinStrategy(int numShards) {
this(new ConstantShardingStrategy<K>(numShards));
}
/**
* Instantiate with a constant number of shards to use for all keys.
*
* @param numShards number of shards to use
* @param numReducers the amount of reducers to run the join with
*/
public ShardedJoinStrategy(int numShards, int numReducers) {
this(new ConstantShardingStrategy<K>(numShards), numReducers);
}
/**
* Instantiate with a custom sharding strategy.
*
* @param shardingStrategy strategy to be used for sharding
*/
public ShardedJoinStrategy(ShardingStrategy<K> shardingStrategy) {
this.wrappedJoinStrategy = new DefaultJoinStrategy<Pair<K, Integer>, U, V>();
this.shardingStrategy = shardingStrategy;
}
/**
* Instantiate with a custom sharding strategy and a specified number of reducers.
*
* @param shardingStrategy strategy to be used for sharding
* @param numReducers the amount of reducers to run the join with
*/
public ShardedJoinStrategy(ShardingStrategy<K> shardingStrategy, int numReducers) {
if (numReducers < 1) {
throw new IllegalArgumentException("Num reducers must be > 0, got " + numReducers);
}
this.wrappedJoinStrategy = new DefaultJoinStrategy<Pair<K, Integer>, U, V>(numReducers);
this.shardingStrategy = shardingStrategy;
}
@Override
public PTable<K, Pair<U, V>> join(PTable<K, U> left, PTable<K, V> right, JoinType joinType) {
if (joinType == JoinType.FULL_OUTER_JOIN || joinType == JoinType.LEFT_OUTER_JOIN) {
throw new UnsupportedOperationException("Join type " + joinType + " not supported by ShardedJoinStrategy");
}
PTypeFamily ptf = left.getTypeFamily();
PTableType<Pair<K, Integer>, U> shardedLeftType = ptf.tableOf(ptf.pairs(left.getKeyType(), ptf.ints()), left.getValueType());
PTableType<Pair<K, Integer>, V> shardedRightType = ptf.tableOf(ptf.pairs(right.getKeyType(), ptf.ints()), right.getValueType());
PTableType<K, Pair<U,V>> outputType = ptf.tableOf(left.getKeyType(), ptf.pairs(left.getValueType(), right.getValueType()));
PTable<Pair<K,Integer>,U> shardedLeft = left.parallelDo("Pre-shard left", new PreShardLeftSideFn<K, U>(shardingStrategy), shardedLeftType);
PTable<Pair<K,Integer>,V> shardedRight = right.parallelDo("Pre-shard right", new PreShardRightSideFn<K, V>(shardingStrategy), shardedRightType);
PTable<Pair<K, Integer>, Pair<U, V>> shardedJoined = wrappedJoinStrategy.join(shardedLeft, shardedRight, joinType);
return shardedJoined.parallelDo("Unshard", new UnshardFn<K, U, V>(), outputType);
}
private static class PreShardLeftSideFn<K, U> extends DoFn<Pair<K, U>, Pair<Pair<K, Integer>, U>> {
private ShardingStrategy<K> shardingStrategy;
public PreShardLeftSideFn(ShardingStrategy<K> shardingStrategy) {
this.shardingStrategy = shardingStrategy;
}
@Override
public void process(Pair<K, U> input, Emitter<Pair<Pair<K, Integer>, U>> emitter) {
K key = input.first();
int numShards = shardingStrategy.getNumShards(key);
if (numShards < 1) {
throw new IllegalArgumentException("Num shards must be > 0, got " + numShards + " for " + key);
}
for (int i = 0; i < numShards; i++) {
emitter.emit(Pair.of(Pair.of(key, i), input.second()));
}
}
}
private static class PreShardRightSideFn<K, V> extends MapFn<Pair<K, V>, Pair<Pair<K, Integer>, V>> {
private ShardingStrategy<K> shardingStrategy;
private transient Random random;
public PreShardRightSideFn(ShardingStrategy<K> shardingStrategy) {
this.shardingStrategy = shardingStrategy;
}
@Override
public void initialize() {
random = new Random(getTaskAttemptID().getTaskID().getId());
}
@Override
public Pair<Pair<K, Integer>, V> map(Pair<K, V> input) {
K key = input.first();
V value = input.second();
int numShards = shardingStrategy.getNumShards(key);
if (numShards < 1) {
throw new IllegalArgumentException("Num shards must be > 0, got " + numShards + " for " + key);
}
return Pair.of(Pair.of(key, random.nextInt(numShards)), value);
}
}
private static class UnshardFn<K, U, V> extends MapFn<Pair<Pair<K, Integer>, Pair<U, V>>, Pair<K, Pair<U, V>>> {
@Override
public Pair<K, Pair<U, V>> map(Pair<Pair<K, Integer>, Pair<U, V>> input) {
return Pair.of(input.first().first(), input.second());
}
}
/**
* Sharding strategy that returns the same number of shards for all keys.
*/
private static class ConstantShardingStrategy<K> implements ShardingStrategy<K> {
private int numShards;
public ConstantShardingStrategy(int numShards) {
this.numShards = numShards;
}
@Override
public int getNumShards(K key) {
return numShards;
}
}
}
| 2,901 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Inner and outer joins on collections.
*/
package org.apache.crunch.lib.join;
| 2,902 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/JoinFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PType;
/**
* Represents a {@link org.apache.crunch.DoFn} for performing joins.
*
* @param <K> Type of the keys.
* @param <U> Type of the first {@link org.apache.crunch.PTable}'s values
* @param <V> Type of the second {@link org.apache.crunch.PTable}'s values
*/
public abstract class JoinFn<K, U, V> extends
DoFn<Pair<Pair<K, Integer>, Iterable<Pair<U, V>>>, Pair<K, Pair<U, V>>> {
protected PType<K> keyType;
protected PType<U> leftValueType;
/**
* Instantiate with the PType of the value of the left side of the join (used for creating deep
* copies of values).
*
* @param keyType The PType of the value used as the key of the join
* @param leftValueType The PType of the value type of the left side of the join
*/
public JoinFn(PType<K> keyType, PType<U> leftValueType) {
this.keyType = keyType;
this.leftValueType = leftValueType;
}
@Override
public void initialize() {
this.keyType.initialize(getConfiguration());
this.leftValueType.initialize(getConfiguration());
}
/** @return The name of this join type (e.g. innerJoin, leftOuterJoin). */
public abstract String getJoinType();
/**
* Performs the actual joining.
*
* @param key The key for this grouping of values.
* @param id The side that this group of values is from (0 -> left, 1 -> right).
* @param pairs The group of values associated with this key and id pair.
* @param emitter The emitter to send the output to.
*/
public abstract void join(K key, int id, Iterable<Pair<U, V>> pairs,
Emitter<Pair<K, Pair<U, V>>> emitter);
/**
* Split up the input record to make coding a bit more manageable.
*
* @param input The input record.
* @param emitter The emitter to send the output to.
*/
@Override
public void process(Pair<Pair<K, Integer>, Iterable<Pair<U, V>>> input,
Emitter<Pair<K, Pair<U, V>>> emitter) {
join(input.first().first(), input.first().second(), input.second(), emitter);
}
}
| 2,903 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/LeftOuterJoinFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import java.util.List;
import org.apache.crunch.Emitter;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PType;
import com.google.common.collect.Lists;
/**
* Used to perform the last step of an left outer join.
*
* @param <K> Type of the keys.
* @param <U> Type of the first {@link org.apache.crunch.PTable}'s values
* @param <V> Type of the second {@link org.apache.crunch.PTable}'s values
*/
public class LeftOuterJoinFn<K, U, V> extends JoinFn<K, U, V> {
private transient int lastId;
private transient K lastKey;
private transient List<U> leftValues;
public LeftOuterJoinFn(PType<K> keyType, PType<U> leftValueType) {
super(keyType, leftValueType);
}
/** {@inheritDoc} */
@Override
public void initialize() {
super.initialize();
lastId = 1;
lastKey = null;
this.leftValues = Lists.newArrayList();
}
/** {@inheritDoc} */
@Override
public void join(K key, int id, Iterable<Pair<U, V>> pairs, Emitter<Pair<K, Pair<U, V>>> emitter) {
if (!key.equals(lastKey)) {
// Make sure that left side always gets emitted.
if (0 == lastId) {
for (U u : leftValues) {
emitter.emit(Pair.of(lastKey, Pair.of(u, (V) null)));
}
}
lastKey = keyType.getDetachedValue(key);
leftValues.clear();
}
if (id == 0) {
for (Pair<U, V> pair : pairs) {
if (pair.first() != null)
leftValues.add(leftValueType.getDetachedValue(pair.first()));
}
} else {
for (Pair<U, V> pair : pairs) {
for (U u : leftValues) {
emitter.emit(Pair.of(lastKey, Pair.of(u, pair.second())));
}
}
}
lastId = id;
}
/** {@inheritDoc} */
@Override
public void cleanup(Emitter<Pair<K, Pair<U, V>>> emitter) {
if (0 == lastId) {
for (U u : leftValues) {
emitter.emit(Pair.of(lastKey, Pair.of(u, (V) null)));
}
}
}
/** {@inheritDoc} */
@Override
public String getJoinType() {
return "leftOuterJoin";
}
}
| 2,904 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/sort/SortFns.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.sort;
import java.util.List;
import java.util.UUID;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.crunch.MapFn;
import org.apache.crunch.Tuple;
import org.apache.crunch.lib.Sort.ColumnOrder;
import org.apache.crunch.lib.Sort.Order;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.TupleFactory;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.avro.Avros;
import com.google.common.collect.Lists;
/**
* A set of {@code DoFn}s that are used by Crunch's {@code Sort} library.
*/
public class SortFns {
/**
* Extracts a single indexed key from a {@code Tuple} instance.
*/
public static class SingleKeyFn<V extends Tuple, K> extends MapFn<V, K> {
private final int index;
public SingleKeyFn(int index) {
this.index = index;
}
@Override
public K map(V input) {
return (K) input.get(index);
}
}
/**
* Extracts a composite key from a {@code Tuple} instance.
*/
public static class TupleKeyFn<V extends Tuple, K extends Tuple> extends MapFn<V, K> {
private final int[] indices;
private final TupleFactory tupleFactory;
public TupleKeyFn(int[] indices, TupleFactory tupleFactory) {
this.indices = indices;
this.tupleFactory = tupleFactory;
}
@Override
public K map(V input) {
Object[] values = new Object[indices.length];
for (int i = 0; i < indices.length; i++) {
values[i] = input.get(indices[i]);
}
return (K) tupleFactory.makeTuple(values);
}
}
/**
* Pulls a composite set of keys from an Avro {@code GenericRecord} instance.
*/
public static class AvroGenericFn<V extends Tuple> extends MapFn<V, GenericRecord> {
private final int[] indices;
private final String schemaJson;
private transient Schema schema;
public AvroGenericFn(int[] indices, Schema schema) {
this.indices = indices;
this.schemaJson = schema.toString();
}
@Override
public void initialize() {
this.schema = (new Schema.Parser()).parse(schemaJson);
}
@Override
public GenericRecord map(V input) {
GenericRecord rec = new GenericData.Record(schema);
for (int i = 0; i < indices.length; i++) {
rec.put(i, input.get(indices[i]));
}
return rec;
}
}
/**
* Constructs an Avro schema for the given {@code PType<S>} that respects the given column
* orderings.
*/
public static <S> Schema createOrderedTupleSchema(PType<S> ptype, ColumnOrder[] orders) {
// Guarantee each tuple schema has a globally unique name
String tupleName = "tuple" + UUID.randomUUID().toString().replace('-', 'x');
Schema schema = Schema.createRecord(tupleName, "", "crunch", false);
List<Schema.Field> fields = Lists.newArrayList();
AvroType<S> parentAvroType = (AvroType<S>) ptype;
Schema parentAvroSchema = parentAvroType.getSchema();
for (int index = 0; index < orders.length; index++) {
ColumnOrder columnOrder = orders[index];
AvroType<?> atype = (AvroType<?>) ptype.getSubTypes().get(index);
Schema fieldSchema = atype.getSchema();
String fieldName = parentAvroSchema.getFields().get(index).name();
// Note: avro sorting of strings is inverted relative to how sorting works for WritableComparable
// Text instances: making this consistent
Schema.Field.Order order = columnOrder.order() == Order.DESCENDING ? Schema.Field.Order.DESCENDING :
Schema.Field.Order.ASCENDING;
fields.add(new Schema.Field(fieldName, fieldSchema, "", null, order));
}
schema.setFields(fields);
return schema;
}
/**
* Utility class for encapsulating key extraction logic and serialization information about
* key extraction.
*/
public static class KeyExtraction<V extends Tuple> {
private PType<V> ptype;
private final ColumnOrder[] columnOrder;
private final int[] cols;
private MapFn<V, Object> byFn;
private PType<Object> keyPType;
public KeyExtraction(PType<V> ptype, ColumnOrder[] columnOrder) {
this.ptype = ptype;
this.columnOrder = columnOrder;
this.cols = new int[columnOrder.length];
for (int i = 0; i < columnOrder.length; i++) {
cols[i] = columnOrder[i].column() - 1;
}
init();
}
private void init() {
List<PType> pt = ptype.getSubTypes();
PTypeFamily ptf = ptype.getFamily();
if (cols.length == 1) {
byFn = new SingleKeyFn(cols[0]);
keyPType = pt.get(cols[0]);
} else {
TupleFactory tf;
switch (cols.length) {
case 2:
tf = TupleFactory.PAIR;
keyPType = ptf.pairs(pt.get(cols[0]), pt.get(cols[1]));
break;
case 3:
tf = TupleFactory.TUPLE3;
keyPType = ptf.triples(pt.get(cols[0]), pt.get(cols[1]), pt.get(cols[2]));
break;
case 4:
tf = TupleFactory.TUPLE4;
keyPType = ptf.quads(pt.get(cols[0]), pt.get(cols[1]), pt.get(cols[2]), pt.get(cols[3]));
break;
default:
PType[] pts = new PType[cols.length];
for (int i = 0; i < pts.length; i++) {
pts[i] = pt.get(cols[i]);
}
tf = TupleFactory.TUPLEN;
keyPType = (PType<Object>) (PType<?>) ptf.tuples(pts);
}
if (ptf == AvroTypeFamily.getInstance()) {
Schema s = createOrderedTupleSchema(keyPType, columnOrder);
keyPType = (PType<Object>) (PType<?>) Avros.generics(s);
byFn = new AvroGenericFn(cols, s);
} else {
byFn = new TupleKeyFn(cols, tf);
}
}
}
public MapFn<V, Object> getByFn() {
return byFn;
}
public PType<Object> getKeyType() {
return keyPType;
}
}
}
| 2,905 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/sort/ReverseAvroComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.sort;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryData;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.reflect.ReflectData;
import org.apache.crunch.types.avro.AvroMode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.RawComparator;
public class ReverseAvroComparator<T> extends Configured implements RawComparator<AvroKey<T>> {
private Schema schema;
private AvroMode mode;
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
schema = (new Schema.Parser()).parse(conf.get("crunch.schema"));
mode = AvroMode.fromShuffleConfiguration(conf);
}
}
@Override
public int compare(AvroKey<T> o1, AvroKey<T> o2) {
return mode.getData().compare(o2.datum(), o1.datum(), schema);
}
@Override
public int compare(byte[] arg0, int arg1, int arg2, byte[] arg3, int arg4, int arg5) {
return BinaryData.compare(arg3, arg4, arg5, arg0, arg1, arg2, schema);
}
}
| 2,906 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/sort/ReverseWritableComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.sort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapred.JobConf;
public class ReverseWritableComparator<T> extends Configured implements RawComparator<T> {
private RawComparator<T> comparator;
@SuppressWarnings("unchecked")
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
JobConf jobConf = new JobConf(conf);
comparator = WritableComparator.get(jobConf.getMapOutputKeyClass().asSubclass(WritableComparable.class));
}
}
@Override
public int compare(byte[] arg0, int arg1, int arg2, byte[] arg3, int arg4, int arg5) {
return safeNegate(comparator.compare(arg0, arg1, arg2, arg3, arg4, arg5));
}
@Override
public int compare(T o1, T o2) {
return safeNegate(comparator.compare(o1, o2));
}
/**
* @return an {@code int} definitely of the opposite sign as its argument. This is {@code -i}
* unless {@code i == Integer.MIN_VALUE} in which case it's {@code Integer.MAX_VALUE}
*/
private static int safeNegate(int i) {
return i == Integer.MIN_VALUE ? Integer.MAX_VALUE : -i;
}
}
| 2,907 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/sort/TotalOrderPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.sort;
import java.io.IOException;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import org.apache.avro.Schema;
import org.apache.avro.mapred.AvroKey;
import org.apache.crunch.io.CompositePathIterable;
import org.apache.crunch.io.avro.AvroFileReaderFactory;
import org.apache.crunch.io.seq.SeqFileReaderFactory;
import org.apache.crunch.types.writable.WritableDeepCopier;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Partitioner;
/**
* A partition-aware {@code Partitioner} instance that can work with either Avro or Writable-formatted
* keys.
*/
public class TotalOrderPartitioner<K, V> extends Partitioner<K, V> implements Configurable {
public static final String DEFAULT_PATH = "_partition.lst";
public static final String PARTITIONER_PATH =
"crunch.totalorderpartitioner.path";
private Configuration conf;
private Node<K> partitions;
@Override
public Configuration getConf() {
return conf;
}
@Override
public void setConf(Configuration conf) {
try {
this.conf = conf;
String parts = getPartitionFile(conf);
final Path partFile = new Path(parts);
final FileSystem fs = (DEFAULT_PATH.equals(parts))
? FileSystem.getLocal(conf) // assume in DistributedCache
: partFile.getFileSystem(conf);
Job job = new Job(conf);
Class<K> keyClass = (Class<K>)job.getMapOutputKeyClass();
RawComparator<K> comparator =
(RawComparator<K>) job.getSortComparator();
K[] splitPoints = readPartitions(fs, partFile, keyClass, conf, comparator);
int numReduceTasks = job.getNumReduceTasks();
if (splitPoints.length != numReduceTasks - 1) {
throw new IOException("Wrong number of partitions in keyset");
}
partitions = new BinarySearchNode(splitPoints, comparator);
} catch (IOException e) {
throw new IllegalArgumentException("Can't read partitions file", e);
}
}
@Override
public int getPartition(K key, V value, int modulo) {
return partitions.findPartition(key);
}
public static void setPartitionFile(Configuration conf, Path p) {
conf.set(PARTITIONER_PATH, p.toString());
}
public static String getPartitionFile(Configuration conf) {
return conf.get(PARTITIONER_PATH, DEFAULT_PATH);
}
@SuppressWarnings("unchecked") // map output key class
private K[] readPartitions(FileSystem fs, Path p, Class<K> keyClass,
Configuration conf, final RawComparator<K> comparator) throws IOException {
ArrayList<K> parts = new ArrayList<K>();
String schema = conf.get("crunch.schema");
if (schema != null) {
Schema s = (new Schema.Parser()).parse(schema);
AvroFileReaderFactory<K> a = new AvroFileReaderFactory<K>(s);
Iterator<K> iter = CompositePathIterable.create(fs, p, a).iterator();
while (iter.hasNext()) {
parts.add((K) new AvroKey<K>(iter.next()));
}
} else {
WritableDeepCopier wdc = new WritableDeepCopier(keyClass);
SeqFileReaderFactory<K> s = new SeqFileReaderFactory<K>(keyClass);
Iterator<K> iter = CompositePathIterable.create(fs, p, s).iterator();
while (iter.hasNext()) {
parts.add((K) wdc.deepCopy((Writable) iter.next()));
}
}
Collections.sort(parts, comparator);
return parts.toArray((K[])Array.newInstance(keyClass, parts.size()));
}
/**
* Interface to the partitioner to locate a key in the partition keyset.
*/
public interface Node<T> {
/**
* Locate partition in keyset K, st [Ki..Ki+1) defines a partition,
* with implicit K0 = -inf, Kn = +inf, and |K| = #partitions - 1.
*/
int findPartition(T key);
}
public static class BinarySearchNode<K> implements Node<K> {
private final K[] splitPoints;
private final RawComparator<K> comparator;
public BinarySearchNode(K[] splitPoints, RawComparator<K> comparator) {
this.splitPoints = splitPoints;
this.comparator = comparator;
}
public int findPartition(K key) {
final int pos = Arrays.binarySearch(splitPoints, key, comparator) + 1;
return (pos < 0) ? -pos : pos;
}
}
}
| 2,908 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/sort/TupleWritableComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.sort;
import java.util.List;
import com.google.common.collect.Lists;
import org.apache.crunch.lib.Sort.ColumnOrder;
import org.apache.crunch.lib.Sort.Order;
import org.apache.crunch.types.writable.TupleWritable;
import org.apache.crunch.types.writable.WritableType;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import com.google.common.base.Joiner;
public class TupleWritableComparator extends WritableComparator implements Configurable {
private static final String CRUNCH_ORDERING_PROPERTY = "crunch.ordering";
private Configuration conf;
private ColumnOrder[] columnOrders;
public TupleWritableComparator() {
super(TupleWritable.class, true);
}
public static void configureOrdering(Configuration conf, WritableType[] types, ColumnOrder[] columnOrders) {
List<String> ordering = Lists.newArrayList();
for (int i = 0; i < types.length; i++) {
ordering.add(columnOrders[i].order().name());
}
conf.set(CRUNCH_ORDERING_PROPERTY, Joiner.on(",").join(ordering));
}
@Override
public int compare(WritableComparable a, WritableComparable b) {
TupleWritable ta = (TupleWritable) a;
TupleWritable tb = (TupleWritable) b;
for (int index = 0; index < columnOrders.length; index++) {
int order = 1;
if (columnOrders[index].order() == Order.ASCENDING) {
order = 1;
} else if (columnOrders[index].order() == Order.DESCENDING) {
order = -1;
} else { // ignore
continue;
}
if (!ta.has(index) && !tb.has(index)) {
continue;
} else if (ta.has(index) && !tb.has(index)) {
return order;
} else if (!ta.has(index) && tb.has(index)) {
return -order;
} else {
Writable v1 = ta.get(index);
Writable v2 = tb.get(index);
if (v1 != v2 && (v1 != null && !v1.equals(v2))) {
if (v1 instanceof WritableComparable && v2 instanceof WritableComparable) {
int cmp = ((WritableComparable) v1).compareTo((WritableComparable) v2);
if (cmp != 0) {
return order * cmp;
}
} else {
int cmp = v1.hashCode() - v2.hashCode();
if (cmp != 0) {
return order * cmp;
}
}
}
}
}
return 0; // ordering using specified cols found no differences
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
if (conf != null) {
String ordering = conf.get(CRUNCH_ORDERING_PROPERTY);
String[] columnOrderNames = ordering.split(",");
columnOrders = new ColumnOrder[columnOrderNames.length];
for (int i = 0; i < columnOrders.length; i++) {
Order order = Order.valueOf(columnOrderNames[i]);
columnOrders[i] = ColumnOrder.by(i + 1, order);
}
}
}
}
| 2,909 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/fn/IdentityFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.MapFn;
public class IdentityFn<T> extends MapFn<T, T> {
private static final IdentityFn<Object> INSTANCE = new IdentityFn<Object>();
@SuppressWarnings("unchecked")
public static <T> IdentityFn<T> getInstance() {
return (IdentityFn<T>) INSTANCE;
}
// Non-instantiable
private IdentityFn() {
}
@Override
public T map(T input) {
return input;
}
}
| 2,910 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/fn/FilterFns.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.FilterFn;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import java.util.List;
/**
* A collection of pre-defined {@link FilterFn} implementations.
*/
public final class FilterFns {
// Note: We delegate to the deprecated implementation classes in FilterFn. When their
// time is up, we just move them here.
private FilterFns() {
// utility class, not for instantiation
}
/**
* Accept an entry if all of the given filters accept it, using short-circuit evaluation.
* @param fn1 The first functions to delegate to
* @param fn2 The second functions to delegate to
* @return The composed filter function
*/
public static <S> FilterFn<S> and(FilterFn<S> fn1, FilterFn<S> fn2) {
return new AndFn<S>(fn1, fn2);
}
/**
* Accept an entry if all of the given filters accept it, using short-circuit evaluation.
* @param fns The functions to delegate to (in the given order)
* @return The composed filter function
*/
public static <S> FilterFn<S> and(FilterFn<S>... fns) {
return new AndFn<S>(fns);
}
/**
* Accept an entry if at least one of the given filters accept it, using short-circuit evaluation.
* @param fn1 The first functions to delegate to
* @param fn2 The second functions to delegate to
* @return The composed filter function
*/
public static <S> FilterFn<S> or(FilterFn<S> fn1, FilterFn<S> fn2) {
return new OrFn<S>(fn1, fn2);
}
/**
* Accept an entry if at least one of the given filters accept it, using short-circuit evaluation.
* @param fns The functions to delegate to (in the given order)
* @return The composed filter function
*/
public static <S> FilterFn<S> or(FilterFn<S>... fns) {
return new OrFn<S>(fns);
}
/**
* Accept an entry if the given filter <em>does not</em> accept it.
* @param fn The function to delegate to
* @return The composed filter function
*/
public static <S> FilterFn<S> not(FilterFn<S> fn) {
return new NotFn<S>(fn);
}
/**
* Accept everything.
* @return A filter function that accepts everything.
*/
public static <S> FilterFn<S> ACCEPT_ALL() {
return new AcceptAllFn<S>();
}
/**
* Reject everything.
* @return A filter function that rejects everything.
*/
public static <S> FilterFn<S> REJECT_ALL() {
return not(new AcceptAllFn<S>());
}
private static class AndFn<S> extends FilterFn<S> {
private final List<FilterFn<S>> fns;
public AndFn(FilterFn<S>... fns) {
this.fns = ImmutableList.<FilterFn<S>> copyOf(fns);
}
@Override
public void configure(Configuration conf) {
for (FilterFn<S> fn : fns) {
fn.configure(conf);
}
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
for (FilterFn<S> fn : fns) {
fn.setContext(context);
}
}
@Override
public void initialize() {
for (FilterFn<S> fn : fns) {
fn.initialize();
}
}
@Override
public void cleanup() {
for (FilterFn<S> fn : fns) {
fn.cleanup();
}
}
@Override
public boolean accept(S input) {
for (FilterFn<S> fn : fns) {
if (!fn.accept(input)) {
return false;
}
}
return true;
}
@Override
public float scaleFactor() {
float scaleFactor = 1.0f;
for (FilterFn<S> fn : fns) {
scaleFactor *= fn.scaleFactor();
}
return scaleFactor;
}
}
private static class OrFn<S> extends FilterFn<S> {
private final List<FilterFn<S>> fns;
public OrFn(FilterFn<S>... fns) {
this.fns = ImmutableList.<FilterFn<S>> copyOf(fns);
}
@Override
public void configure(Configuration conf) {
for (FilterFn<S> fn : fns) {
fn.configure(conf);
}
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
for (FilterFn<S> fn : fns) {
fn.setContext(context);
}
}
@Override
public void initialize() {
for (FilterFn<S> fn : fns) {
fn.initialize();
}
}
@Override
public void cleanup() {
for (FilterFn<S> fn : fns) {
fn.cleanup();
}
}
@Override
public boolean accept(S input) {
for (FilterFn<S> fn : fns) {
if (fn.accept(input)) {
return true;
}
}
return false;
}
@Override
public float scaleFactor() {
float scaleFactor = 0.0f;
for (FilterFn<S> fn : fns) {
scaleFactor += fn.scaleFactor();
}
return Math.min(1.0f, scaleFactor);
}
}
private static class NotFn<S> extends FilterFn<S> {
private final FilterFn<S> base;
public NotFn(FilterFn<S> base) {
this.base = base;
}
@Override
public void configure(Configuration conf) {
base.configure(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
base.setContext(context);
}
@Override
public void initialize() {
base.initialize();
}
@Override
public void cleanup() {
base.cleanup();
}
@Override
public boolean accept(S input) {
return !base.accept(input);
}
@Override
public float scaleFactor() {
return 1.0f - base.scaleFactor();
}
}
private static class AcceptAllFn<S> extends FilterFn<S> {
@Override
public boolean accept(S input) {
return true;
}
@Override
public float scaleFactor() {
return 1.0f;
}
}
}
| 2,911 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/fn/Aggregators.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import com.google.common.collect.SortedMultiset;
import com.google.common.collect.TreeMultiset;
import org.apache.crunch.Aggregator;
import org.apache.crunch.CombineFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
import org.apache.crunch.types.PType;
import org.apache.crunch.util.Tuples;
import org.apache.hadoop.conf.Configuration;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
/**
* A collection of pre-defined {@link org.apache.crunch.Aggregator}s.
*
* <p>The factory methods of this class return {@link org.apache.crunch.Aggregator}
* instances that you can use to combine the values of a {@link PGroupedTable}.
* In most cases, they turn a multimap (multiple entries per key) into a map (one
* entry per key).</p>
*
* <p><strong>Note</strong>: When using composed aggregators, like those built by the
* {@link #pairAggregator(Aggregator, Aggregator) pairAggregator()}
* factory method, you typically don't want to put in the same child aggregator more than once,
* even if all child aggregators have the same type. In most cases, this is what you want:</p>
*
* <pre>
* PTable<K, Long> result = groupedTable.combineValues(
* pairAggregator(SUM_LONGS(), SUM_LONGS())
* );
* </pre>
*/
public final class Aggregators {
private Aggregators() {
// utility class, not for instantiation
}
/**
* Sum up all {@code long} values.
* @return The newly constructed instance
*/
public static Aggregator<Long> SUM_LONGS() {
return new SumLongs();
}
/**
* Sum up all {@code int} values.
* @return The newly constructed instance
*/
public static Aggregator<Integer> SUM_INTS() {
return new SumInts();
}
/**
* Sum up all {@code float} values.
* @return The newly constructed instance
*/
public static Aggregator<Float> SUM_FLOATS() {
return new SumFloats();
}
/**
* Sum up all {@code double} values.
* @return The newly constructed instance
*/
public static Aggregator<Double> SUM_DOUBLES() {
return new SumDoubles();
}
/**
* Sum up all {@link BigInteger} values.
* @return The newly constructed instance
*/
public static Aggregator<BigInteger> SUM_BIGINTS() {
return new SumBigInts();
}
/**
* Sum up all {@link BigDecimal} values.
* @return The newly constructed instance
*/
public static Aggregator<BigDecimal> SUM_BIGDECIMALS() {
return new SumBigDecimals();
}
/**
* Return the maximum of all given {@link Comparable} values.
* @return The newly constructed instance
*/
public static <C extends Comparable<C>> Aggregator<C> MAX_COMPARABLES() {
return new MaxComparables<C>();
}
/**
* Return the maximum of all given {@code long} values.
* @return The newly constructed instance
*/
public static Aggregator<Long> MAX_LONGS() {
return new MaxComparables<Long>();
}
/**
* Return the {@code n} largest {@code long} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<Long> MAX_LONGS(int n) {
return new MaxNAggregator<Long>(n);
}
/**
* Return the maximum of all given {@code int} values.
* @return The newly constructed instance
*/
public static Aggregator<Integer> MAX_INTS() {
return new MaxComparables<Integer>();
}
/**
* Return the {@code n} largest {@code int} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<Integer> MAX_INTS(int n) {
return new MaxNAggregator<Integer>(n);
}
/**
* Return the maximum of all given {@code float} values.
* @return The newly constructed instance
*/
public static Aggregator<Float> MAX_FLOATS() {
return new MaxComparables<Float>();
}
/**
* Return the {@code n} largest {@code float} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<Float> MAX_FLOATS(int n) {
return new MaxNAggregator<Float>(n);
}
/**
* Return the maximum of all given {@code double} values.
* @return The newly constructed instance
*/
public static Aggregator<Double> MAX_DOUBLES() {
return new MaxComparables<Double>();
}
/**
* Return the {@code n} largest {@code double} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<Double> MAX_DOUBLES(int n) {
return new MaxNAggregator<Double>(n);
}
/**
* Return the maximum of all given {@link BigInteger} values.
* @return The newly constructed instance
*/
public static Aggregator<BigInteger> MAX_BIGINTS() {
return new MaxComparables<BigInteger>();
}
/**
* Return the {@code n} largest {@link BigInteger} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<BigInteger> MAX_BIGINTS(int n) {
return new MaxNAggregator<BigInteger>(n);
}
/**
* Return the maximum of all given {@link BigDecimal} values.
* @return The newly constructed instance
*/
public static Aggregator<BigDecimal> MAX_BIGDECIMALS() {
return new MaxComparables<BigDecimal>();
}
/**
* Return the {@code n} largest {@link BigDecimal} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<BigDecimal> MAX_BIGDECIMALS(int n) {
return new MaxNAggregator<BigDecimal>(n);
}
/**
* Return the {@code n} largest values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @param cls The type of the values to aggregate (must implement {@link Comparable}!)
* @return The newly constructed instance
*/
public static <V extends Comparable<V>> Aggregator<V> MAX_N(int n, Class<V> cls) {
return new MaxNAggregator<V>(n);
}
/**
* Return the {@code n} largest unique values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @param cls The type of the values to aggregate (must implement {@link Comparable}!)
* @return The newly constructed instance
*/
public static <V extends Comparable<V>> Aggregator<V> MAX_UNIQUE_N(int n, Class<V> cls) {
return new MaxUniqueNAggregator<V>(n);
}
/**
* Return the minimum of all given {@link Comparable} values.
* @return The newly constructed instance
*/
public static <C extends Comparable<C>> Aggregator<C> MIN_COMPARABLES() {
return new MinComparables<C>();
}
/**
* Return the minimum of all given {@code long} values.
* @return The newly constructed instance
*/
public static Aggregator<Long> MIN_LONGS() {
return new MinComparables<Long>();
}
/**
* Return the {@code n} smallest {@code long} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<Long> MIN_LONGS(int n) {
return new MinNAggregator<Long>(n);
}
/**
* Return the minimum of all given {@code int} values.
* @return The newly constructed instance
*/
public static Aggregator<Integer> MIN_INTS() {
return new MinComparables<Integer>();
}
/**
* Return the {@code n} smallest {@code int} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<Integer> MIN_INTS(int n) {
return new MinNAggregator<Integer>(n);
}
/**
* Return the minimum of all given {@code float} values.
* @return The newly constructed instance
*/
public static Aggregator<Float> MIN_FLOATS() {
return new MinComparables<Float>();
}
/**
* Return the {@code n} smallest {@code float} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<Float> MIN_FLOATS(int n) {
return new MinNAggregator<Float>(n);
}
/**
* Return the minimum of all given {@code double} values.
* @return The newly constructed instance
*/
public static Aggregator<Double> MIN_DOUBLES() {
return new MinComparables<Double>();
}
/**
* Return the {@code n} smallest {@code double} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<Double> MIN_DOUBLES(int n) {
return new MinNAggregator<Double>(n);
}
/**
* Return the minimum of all given {@link BigInteger} values.
* @return The newly constructed instance
*/
public static Aggregator<BigInteger> MIN_BIGINTS() {
return new MinComparables<BigInteger>();
}
/**
* Return the {@code n} smallest {@link BigInteger} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<BigInteger> MIN_BIGINTS(int n) {
return new MinNAggregator<BigInteger>(n);
}
/**
* Return the minimum of all given {@link BigDecimal} values.
* @return The newly constructed instance
*/
public static Aggregator<BigDecimal> MIN_BIGDECIMALS() {
return new MinComparables<BigDecimal>();
}
/**
* Return the {@code n} smallest {@link BigDecimal} values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @return The newly constructed instance
*/
public static Aggregator<BigDecimal> MIN_BIGDECIMALS(int n) {
return new MinNAggregator<BigDecimal>(n);
}
/**
* Return the {@code n} smallest values (or fewer if there are fewer
* values than {@code n}).
* @param n The number of values to return
* @param cls The type of the values to aggregate (must implement {@link Comparable}!)
* @return The newly constructed instance
*/
public static <V extends Comparable<V>> Aggregator<V> MIN_N(int n, Class<V> cls) {
return new MinNAggregator<V>(n);
}
/**
* Returns the {@code n} smallest unique values (or fewer if there are fewer unique values than {@code n}).
* @param n The number of values to return
* @param cls The type of the values to aggregate (must implement {@link Comparable}!)
* @return The newly constructed instance
*/
public static <V extends Comparable<V>> Aggregator<V> MIN_UNIQUE_N(int n, Class<V> cls) {
return new MinUniqueNAggregator<V>(n);
}
/**
* Return the first {@code n} values (or fewer if there are fewer values than {@code n}).
*
* @param n The number of values to return
* @return The newly constructed instance
*/
public static <V> Aggregator<V> FIRST_N(int n) {
return new FirstNAggregator<V>(n);
}
/**
* Return the last {@code n} values (or fewer if there are fewer values than {@code n}).
*
* @param n The number of values to return
* @return The newly constructed instance
*/
public static <V> Aggregator<V> LAST_N(int n) {
return new LastNAggregator<V>(n);
}
/**
* Concatenate strings, with a separator between strings. There
* is no limits of length for the concatenated string.
*
* <p><em>Note: String concatenation is not commutative, which means the
* result of the aggregation is not deterministic!</em></p>
*
* @param separator
* the separator which will be appended between each string
* @param skipNull
* define if we should skip null values. Throw
* NullPointerException if set to false and there is a null
* value.
* @return The newly constructed instance
*/
public static Aggregator<String> STRING_CONCAT(String separator, boolean skipNull) {
return new StringConcatAggregator(separator, skipNull);
}
/**
* Concatenate strings, with a separator between strings. You can specify
* the maximum length of the output string and of the input strings, if
* they are > 0. If a value is <= 0, there is no limit.
*
* <p>Any too large string (or any string which would made the output too
* large) will be silently discarded.</p>
*
* <p><em>Note: String concatenation is not commutative, which means the
* result of the aggregation is not deterministic!</em></p>
*
* @param separator
* the separator which will be appended between each string
* @param skipNull
* define if we should skip null values. Throw
* NullPointerException if set to false and there is a null
* value.
* @param maxOutputLength
* the maximum length of the output string. If it's set <= 0,
* there is no limit. The number of characters of the output
* string will be < maxOutputLength.
* @param maxInputLength
* the maximum length of the input strings. If it's set <= 0,
* there is no limit. The number of characters of the input string
* will be < maxInputLength to be concatenated.
* @return The newly constructed instance
*/
public static Aggregator<String> STRING_CONCAT(String separator, boolean skipNull,
long maxOutputLength, long maxInputLength) {
return new StringConcatAggregator(separator, skipNull, maxOutputLength, maxInputLength);
}
/**
* Collect the unique elements of the input, as defined by the {@code equals} method for
* the input objects. No guarantees are made about the order in which the final elements
* will be returned.
*
* @return The newly constructed instance
*/
public static <V> Aggregator<V> UNIQUE_ELEMENTS() {
return new SetAggregator<V>();
}
/**
* Collect a sample of unique elements from the input, where 'unique' is defined by
* the {@code equals} method for the input objects. No guarantees are made about which
* elements will be returned, simply that there will not be any more than the given sample
* size for any key.
*
* @param maximumSampleSize The maximum number of unique elements to return per key
* @return The newly constructed instance
*/
public static <V> Aggregator<V> SAMPLE_UNIQUE_ELEMENTS(int maximumSampleSize) {
return new SetAggregator<V>(maximumSampleSize);
}
/**
* Apply separate aggregators to each component of a {@link Pair}.
*/
public static <V1, V2> Aggregator<Pair<V1, V2>> pairAggregator(
Aggregator<V1> a1, Aggregator<V2> a2) {
return new PairAggregator<V1, V2>(a1, a2);
}
/**
* Apply separate aggregators to each component of a {@link Tuple3}.
*/
public static <V1, V2, V3> Aggregator<Tuple3<V1, V2, V3>> tripAggregator(
Aggregator<V1> a1, Aggregator<V2> a2, Aggregator<V3> a3) {
return new TripAggregator<V1, V2, V3>(a1, a2, a3);
}
/**
* Apply separate aggregators to each component of a {@link Tuple4}.
*/
public static <V1, V2, V3, V4> Aggregator<Tuple4<V1, V2, V3, V4>> quadAggregator(
Aggregator<V1> a1, Aggregator<V2> a2, Aggregator<V3> a3, Aggregator<V4> a4) {
return new QuadAggregator<V1, V2, V3, V4>(a1, a2, a3, a4);
}
/**
* Apply separate aggregators to each component of a {@link Tuple}.
*/
public static Aggregator<TupleN> tupleAggregator(Aggregator<?>... aggregators) {
return new TupleNAggregator(aggregators);
}
/**
* Wrap a {@link CombineFn} adapter around the given aggregator.
*
* @param aggregator The instance to wrap
* @return A {@link CombineFn} delegating to {@code aggregator}
*
* @deprecated use the safer {@link #toCombineFn(Aggregator, PType)} instead.
*/
@Deprecated
public static final <K, V> CombineFn<K, V> toCombineFn(Aggregator<V> aggregator) {
return toCombineFn(aggregator, null);
}
/**
* Wrap a {@link CombineFn} adapter around the given aggregator.
*
* @param aggregator The instance to wrap
* @param ptype The PType of the aggregated value (for detaching complex objects)
* @return A {@link CombineFn} delegating to {@code aggregator}
*/
public static final <K, V> CombineFn<K, V> toCombineFn(Aggregator<V> aggregator, PType<V> ptype) {
return new AggregatorCombineFn<K, V>(aggregator, ptype);
}
/**
* Base class for aggregators that do not require any initialization.
*/
public static abstract class SimpleAggregator<T> implements Aggregator<T> {
@Override
public void initialize(Configuration conf) {
// No-op
}
}
/**
* A {@code CombineFn} that delegates all of the actual work to an
* {@code Aggregator} instance.
*/
private static class AggregatorCombineFn<K, V> extends CombineFn<K, V> {
// TODO: Has to be fully qualified until CombineFn.Aggregator can be removed.
private final Aggregator<V> aggregator;
private final PType<V> ptype;
public AggregatorCombineFn(Aggregator<V> aggregator, PType<V> ptype) {
this.aggregator = aggregator;
this.ptype = ptype;
}
@Override
public void initialize() {
aggregator.initialize(getConfiguration());
if (ptype != null) {
ptype.initialize(getConfiguration());
}
}
@Override
public void process(Pair<K, Iterable<V>> input, Emitter<Pair<K, V>> emitter) {
aggregator.reset();
for (V v : input.second()) {
aggregator.update(ptype == null ? v : ptype.getDetachedValue(v));
}
for (V v : aggregator.results()) {
emitter.emit(Pair.of(input.first(), v));
}
}
}
private static class SumLongs extends SimpleAggregator<Long> {
private long sum = 0;
@Override
public void reset() {
sum = 0;
}
@Override
public void update(Long next) {
sum += next;
}
@Override
public Iterable<Long> results() {
return ImmutableList.of(sum);
}
}
private static class SumInts extends SimpleAggregator<Integer> {
private int sum = 0;
@Override
public void reset() {
sum = 0;
}
@Override
public void update(Integer next) {
sum += next;
}
@Override
public Iterable<Integer> results() {
return ImmutableList.of(sum);
}
}
private static class SumFloats extends SimpleAggregator<Float> {
private float sum = 0;
@Override
public void reset() {
sum = 0f;
}
@Override
public void update(Float next) {
sum += next;
}
@Override
public Iterable<Float> results() {
return ImmutableList.of(sum);
}
}
private static class SumDoubles extends SimpleAggregator<Double> {
private double sum = 0;
@Override
public void reset() {
sum = 0f;
}
@Override
public void update(Double next) {
sum += next;
}
@Override
public Iterable<Double> results() {
return ImmutableList.of(sum);
}
}
private static class SumBigInts extends SimpleAggregator<BigInteger> {
private BigInteger sum = BigInteger.ZERO;
@Override
public void reset() {
sum = BigInteger.ZERO;
}
@Override
public void update(BigInteger next) {
sum = sum.add(next);
}
@Override
public Iterable<BigInteger> results() {
return ImmutableList.of(sum);
}
}
private static class SumBigDecimals extends SimpleAggregator<BigDecimal> {
private BigDecimal sum = BigDecimal.ZERO;
@Override
public void reset() {
sum = BigDecimal.ZERO;
}
@Override
public void update(BigDecimal next) {
sum = sum.add(next);
}
@Override
public Iterable<BigDecimal> results() {
return ImmutableList.of(sum);
}
}
private static class MaxComparables<C extends Comparable<C>> extends SimpleAggregator<C> {
private C max = null;
@Override
public void reset() {
max = null;
}
@Override
public void update(C next) {
if (max == null || max.compareTo(next) < 0) {
max = next;
}
}
@Override
public Iterable<C> results() {
return ImmutableList.of(max);
}
}
private static class MinComparables<C extends Comparable<C>> extends SimpleAggregator<C> {
private C min = null;
@Override
public void reset() {
min = null;
}
@Override
public void update(C next) {
if (min == null || min.compareTo(next) > 0) {
min = next;
}
}
@Override
public Iterable<C> results() {
return ImmutableList.of(min);
}
}
private static class MaxNAggregator<V extends Comparable<V>> extends SimpleAggregator<V> {
private final int arity;
private transient SortedMultiset<V> elements;
public MaxNAggregator(int arity) {
this.arity = arity;
}
@Override
public void reset() {
if (elements == null) {
elements = TreeMultiset.create();
} else {
elements.clear();
}
}
@Override
public void update(V value) {
if (elements.size() < arity) {
elements.add(value);
} else if (value.compareTo(elements.firstEntry().getElement()) > 0) {
elements.remove(elements.firstEntry().getElement());
elements.add(value);
}
}
@Override
public Iterable<V> results() {
return ImmutableList.copyOf(elements);
}
}
private static class MaxUniqueNAggregator<V extends Comparable<V>> extends SimpleAggregator<V> {
private final int arity;
private transient SortedSet<V> elements;
public MaxUniqueNAggregator(int arity) {
this.arity = arity;
}
@Override
public void reset() {
if (elements == null) {
elements = new TreeSet<V>();
} else {
elements.clear();
}
}
@Override
public void update(V value) {
if (elements.size() < arity) {
elements.add(value);
} else if (!elements.contains(value) && value.compareTo(elements.first()) > 0) {
elements.remove(elements.first());
elements.add(value);
}
}
@Override
public Iterable<V> results() {
return ImmutableList.copyOf(elements);
}
}
private static class MinNAggregator<V extends Comparable<V>> extends SimpleAggregator<V> {
private final int arity;
private transient SortedMultiset<V> elements;
public MinNAggregator(int arity) {
this.arity = arity;
}
@Override
public void reset() {
if (elements == null) {
elements = TreeMultiset.create();
} else {
elements.clear();
}
}
@Override
public void update(V value) {
if (elements.size() < arity) {
elements.add(value);
} else if (value.compareTo(elements.lastEntry().getElement()) < 0) {
elements.remove(elements.lastEntry().getElement());
elements.add(value);
}
}
@Override
public Iterable<V> results() {
return ImmutableList.copyOf(elements);
}
}
private static class MinUniqueNAggregator<V extends Comparable<V>> extends SimpleAggregator<V> {
private final int arity;
private transient SortedSet<V> elements;
public MinUniqueNAggregator(int arity) {
this.arity = arity;
}
@Override
public void reset() {
if (elements == null) {
elements = new TreeSet<V>();
} else {
elements.clear();
}
}
@Override
public void update(V value) {
if (elements.size() < arity) {
elements.add(value);
} else if (!elements.contains(value) && value.compareTo(elements.last()) < 0) {
elements.remove(elements.last());
elements.add(value);
}
}
@Override
public Iterable<V> results() {
return ImmutableList.copyOf(elements);
}
}
private static class FirstNAggregator<V> extends SimpleAggregator<V> {
private final int arity;
private final List<V> elements;
public FirstNAggregator(int arity) {
this.arity = arity;
this.elements = Lists.newArrayList();
}
@Override
public void reset() {
elements.clear();
}
@Override
public void update(V value) {
if (elements.size() < arity) {
elements.add(value);
}
}
@Override
public Iterable<V> results() {
return ImmutableList.copyOf(elements);
}
}
private static class LastNAggregator<V> extends SimpleAggregator<V> {
private final int arity;
private final LinkedList<V> elements;
public LastNAggregator(int arity) {
this.arity = arity;
this.elements = Lists.newLinkedList();
}
@Override
public void reset() {
elements.clear();
}
@Override
public void update(V value) {
elements.add(value);
if (elements.size() == arity + 1) {
elements.removeFirst();
}
}
@Override
public Iterable<V> results() {
return ImmutableList.copyOf(elements);
}
}
private static class StringConcatAggregator extends SimpleAggregator<String> {
private final String separator;
private final boolean skipNulls;
private final long maxOutputLength;
private final long maxInputLength;
private long currentLength;
private final LinkedList<String> list = new LinkedList<String>();
private transient Joiner joiner;
public StringConcatAggregator(final String separator, final boolean skipNulls) {
this.separator = separator;
this.skipNulls = skipNulls;
this.maxInputLength = 0;
this.maxOutputLength = 0;
}
public StringConcatAggregator(final String separator, final boolean skipNull, final long maxOutputLength, final long maxInputLength) {
this.separator = separator;
this.skipNulls = skipNull;
this.maxOutputLength = maxOutputLength;
this.maxInputLength = maxInputLength;
this.currentLength = -separator.length();
}
@Override
public void reset() {
if (joiner == null) {
joiner = skipNulls ? Joiner.on(separator).skipNulls() : Joiner.on(separator);
}
currentLength = -separator.length();
list.clear();
}
@Override
public void update(final String next) {
long length = (next == null) ? 0 : next.length() + separator.length();
if ((maxOutputLength > 0 && currentLength + length > maxOutputLength) ||
(maxInputLength > 0 && next != null && next.length() > maxInputLength)) {
return;
}
if (maxOutputLength > 0) {
currentLength += length;
}
list.add(next);
}
@Override
public Iterable<String> results() {
return ImmutableList.of(joiner.join(list));
}
}
private static abstract class TupleAggregator<T> implements Aggregator<T> {
private final List<Aggregator<Object>> aggregators;
@SuppressWarnings("unchecked")
public TupleAggregator(Aggregator<?>... aggregators) {
this.aggregators = Lists.newArrayList();
for (Aggregator<?> a : aggregators) {
this.aggregators.add((Aggregator<Object>) a);
}
}
@Override
public void initialize(Configuration configuration) {
for (Aggregator<?> a : aggregators) {
a.initialize(configuration);
}
}
@Override
public void reset() {
for (Aggregator<?> a : aggregators) {
a.reset();
}
}
protected void updateTuple(Tuple t) {
for (int i = 0; i < aggregators.size(); i++) {
aggregators.get(i).update(t.get(i));
}
}
protected Iterable<Object> results(int index) {
return aggregators.get(index).results();
}
}
private static class PairAggregator<V1, V2> extends TupleAggregator<Pair<V1, V2>> {
public PairAggregator(Aggregator<V1> a1, Aggregator<V2> a2) {
super(a1, a2);
}
@Override
public void update(Pair<V1, V2> value) {
updateTuple(value);
}
@SuppressWarnings("unchecked")
@Override
public Iterable<Pair<V1, V2>> results() {
return new Tuples.PairIterable<V1, V2>((Iterable<V1>) results(0), (Iterable<V2>) results(1));
}
}
private static class TripAggregator<A, B, C> extends TupleAggregator<Tuple3<A, B, C>> {
public TripAggregator(Aggregator<A> a1, Aggregator<B> a2, Aggregator<C> a3) {
super(a1, a2, a3);
}
@Override
public void update(Tuple3<A, B, C> value) {
updateTuple(value);
}
@SuppressWarnings("unchecked")
@Override
public Iterable<Tuple3<A, B, C>> results() {
return new Tuples.TripIterable<A, B, C>((Iterable<A>) results(0), (Iterable<B>) results(1),
(Iterable<C>) results(2));
}
}
private static class QuadAggregator<A, B, C, D> extends TupleAggregator<Tuple4<A, B, C, D>> {
public QuadAggregator(Aggregator<A> a1, Aggregator<B> a2, Aggregator<C> a3, Aggregator<D> a4) {
super(a1, a2, a3, a4);
}
@Override
public void update(Tuple4<A, B, C, D> value) {
updateTuple(value);
}
@SuppressWarnings("unchecked")
@Override
public Iterable<Tuple4<A, B, C, D>> results() {
return new Tuples.QuadIterable<A, B, C, D>((Iterable<A>) results(0), (Iterable<B>) results(1),
(Iterable<C>) results(2), (Iterable<D>) results(3));
}
}
private static class TupleNAggregator extends TupleAggregator<TupleN> {
private final int size;
public TupleNAggregator(Aggregator<?>... aggregators) {
super(aggregators);
size = aggregators.length;
}
@Override
public void update(TupleN value) {
updateTuple(value);
}
@Override
public Iterable<TupleN> results() {
Iterable<?>[] iterables = new Iterable[size];
for (int i = 0; i < size; i++) {
iterables[i] = results(i);
}
return new Tuples.TupleNIterable(iterables);
}
}
private static class SetAggregator<V> extends SimpleAggregator<V> {
private final Set<V> elements;
private final int sizeLimit;
public SetAggregator() {
this(-1);
}
public SetAggregator(int sizeLimit) {
this.elements = Sets.newHashSet();
this.sizeLimit = sizeLimit;
}
@Override
public void reset() {
elements.clear();
}
@Override
public void update(V value) {
if (sizeLimit == -1 || elements.size() < sizeLimit) {
elements.add(value);
}
}
@Override
public Iterable<V> results() {
return ImmutableList.copyOf(elements);
}
}
}
| 2,912 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/fn/CompositeMapFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.Emitter;
import org.apache.crunch.MapFn;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
public class CompositeMapFn<R, S, T> extends MapFn<R, T> {
private final MapFn<R, S> first;
private final MapFn<S, T> second;
public CompositeMapFn(MapFn<R, S> first, MapFn<S, T> second) {
this.first = first;
this.second = second;
}
@Override
public void setConfiguration(Configuration conf) {
this.first.setConfiguration(conf);
this.second.setConfiguration(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
first.setContext(context);
second.setContext(context);
}
@Override
public void initialize() {
first.initialize();
second.initialize();
}
public MapFn<R, S> getFirst() {
return first;
}
public MapFn<S, T> getSecond() {
return second;
}
@Override
public T map(R input) {
return second.map(first.map(input));
}
@Override
public void cleanup(Emitter<T> emitter) {
first.cleanup(null);
second.cleanup(null);
}
@Override
public void configure(Configuration conf) {
first.configure(conf);
second.configure(conf);
}
@Override
public float scaleFactor() {
return first.scaleFactor() * second.scaleFactor();
}
}
| 2,913 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/fn/PairMapFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.Emitter;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
public class PairMapFn<K, V, S, T> extends MapFn<Pair<K, V>, Pair<S, T>> {
private MapFn<K, S> keys;
private MapFn<V, T> values;
public PairMapFn(MapFn<K, S> keys, MapFn<V, T> values) {
this.keys = keys;
this.values = values;
}
@Override
public void configure(Configuration conf) {
keys.configure(conf);
values.configure(conf);
}
@Override
public void setConfiguration(Configuration conf) {
keys.setConfiguration(conf);
values.setConfiguration(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
keys.setContext(context);
values.setContext(context);
}
@Override
public void initialize() {
keys.initialize();
values.initialize();
}
@Override
public float scaleFactor() {
return (keys.scaleFactor() + values.scaleFactor()) / 2.0f;
}
@Override
public Pair<S, T> map(Pair<K, V> input) {
return Pair.of(keys.map(input.first()), values.map(input.second()));
}
@Override
public void cleanup(Emitter<Pair<S, T>> emitter) {
keys.cleanup(null);
values.cleanup(null);
}
}
| 2,914 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/fn/SwapFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
/**
* Swap the elements of a {@code Pair} type.
*/
public class SwapFn<V1, V2> extends MapFn<Pair<V1, V2>, Pair<V2, V1>> {
public static <V1, V2> PType<Pair<V2, V1>> ptype(PType<Pair<V1, V2>> pt) {
return pt.getFamily().pairs(pt.getSubTypes().get(1), pt.getSubTypes().get(0));
}
public static <K, V> PTableType<V, K> tableType(PTableType<K, V> ptt) {
return ptt.getFamily().tableOf(ptt.getValueType(), ptt.getKeyType());
}
@Override
public Pair<V2, V1> map(Pair<V1, V2> input) {
if (input == null) {
return null;
}
return Pair.of(input.second(), input.first());
}
}
| 2,915 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/fn/ExtractKeyFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
/**
* Wrapper function for converting a key-from-value extractor {@code MapFn<V, K>} into a
* key-value pair extractor that is used to convert from a {@code PCollection<V>} to a
* {@code PTable<K, V>}.
*/
public class ExtractKeyFn<K, V> extends MapFn<V, Pair<K, V>> {
private final MapFn<V, K> mapFn;
public ExtractKeyFn(MapFn<V, K> mapFn) {
this.mapFn = mapFn;
}
@Override
public void setConfiguration(Configuration conf) {
mapFn.setConfiguration(conf);
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
mapFn.setContext(context);
}
@Override
public void configure(Configuration conf) {
mapFn.configure(conf);
}
@Override
public void initialize() {
mapFn.initialize();
}
@Override
public float scaleFactor() {
return 1.0f + mapFn.scaleFactor();
}
@Override
public Pair<K, V> map(V input) {
return Pair.of(mapFn.map(input), input);
}
}
| 2,916 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/fn/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Commonly used functions for manipulating collections.
*/
package org.apache.crunch.fn;
| 2,917 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/materialize/MaterializableMap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.materialize;
import java.io.Serializable;
import java.util.AbstractMap;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.crunch.Pair;
public class MaterializableMap<K, V> extends AbstractMap<K, V> implements Serializable {
private transient Iterable<Pair<K, V>> iterable;
private Map<K, V> delegate;
public MaterializableMap(Iterable<Pair<K, V>> iterable) {
this.iterable = iterable;
}
private Map<K, V> delegate() {
if (delegate == null) {
delegate = new HashMap<K, V>();
for (Pair<K, V> x : iterable) {
delegate.put(x.first(), x.second());
}
}
return delegate;
}
@Override
public Set<Map.Entry<K, V>> entrySet() {
return delegate().entrySet();
}
@Override
public V get(Object key) {
return delegate().get(key);
}
@Override
public boolean containsKey(Object key) {
return delegate().containsKey(key);
}
@Override
public int hashCode() {
return delegate().hashCode();
}
@Override
public boolean equals(Object other) {
return delegate().equals(other);
}
}
| 2,918 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/materialize/MaterializableIterable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.materialize;
import java.io.IOException;
import java.util.Iterator;
import com.google.common.collect.Iterators;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.Pipeline;
import org.apache.crunch.PipelineResult;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.io.PathTarget;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.impl.FileSourceImpl;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A reference to the materialized output of a {@code PCollection} created
* by a subclass of {@code DistributedPipeline}.
*/
public class MaterializableIterable<E> implements Iterable<E> {
private static final Logger LOG = LoggerFactory.getLogger(MaterializableIterable.class);
private final Pipeline pipeline;
private final ReadableSource<E> source;
private Iterable<E> materialized;
private PipelineResult result;
public MaterializableIterable(Pipeline pipeline, ReadableSource<E> source) {
this.pipeline = pipeline;
this.source = source;
this.materialized = null;
}
/**
* Returns the backing {@code ReadableSource} for this instance.
*/
public ReadableSource<E> getSource() {
return source;
}
/**
* Indicates whether this instance is backed by a {@code SourceTarget}.
*/
public boolean isSourceTarget() {
return (source instanceof SourceTarget);
}
/**
* Returns the {@code Path} that contains this data, or null if no such path exists.
*/
public Path getPath() {
if (source instanceof FileSourceImpl) {
return ((FileSourceImpl) source).getPath();
}
if (source instanceof PathTarget) {
return ((PathTarget) source).getPath();
}
return null;
}
/**
* Returns the {@code PipelineResult} that was generated by the Pipeline execution that
* created this data. This result will only be non-empty if an actual pipeline execution was
* performed in order to generate this data, and it will only be non-null if this method is
* called after the data from this Iterable is retrieved.
*/
public PipelineResult getPipelineResult() {
return result;
}
@Override
public Iterator<E> iterator() {
if (materialized == null) {
this.result = pipeline.run();
if (result.succeeded() || !pipeline.getConfiguration().getBoolean("crunch.empty.materialize.on.failure", false)) {
materialize();
} else {
LOG.error("Pipeline run failed, returning empty iterator");
return Iterators.emptyIterator();
}
}
return materialized.iterator();
}
public void materialize() {
try {
materialized = source.read(pipeline.getConfiguration());
} catch (IOException e) {
LOG.error("Could not materialize: {}", source, e);
throw new CrunchRuntimeException(e);
}
}
}
| 2,919 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/materialize
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/materialize/pobject/CollectionPObject.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.materialize.pobject;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import org.apache.crunch.PCollection;
/**
* A concrete implementation of {@link org.apache.crunch.materialize.pobject.PObjectImpl} whose
* value is a Java {@link java.util.Collection} containing the elements of the underlying {@link
* PCollection} for this {@link org.apache.crunch.PObject}.
*
* @param <S> The value type for elements contained in the {@code Collection} value encapsulated
* by this {@code PObject}.
*/
public class CollectionPObject<S> extends PObjectImpl<S, Collection<S>> {
/**
* Constructs a new instance of this {@code PObject} implementation.
*
* @param collect The backing {@code PCollection} for this {@code PObject}.
*/
public CollectionPObject(PCollection<S> collect) {
super(collect);
}
/** {@inheritDoc} */
@Override
public Collection<S> process(Iterable<S> input) {
Collection<S> target = new ArrayList<S>();
Iterator<S> itr = input.iterator();
while (itr.hasNext()) {
target.add(itr.next());
}
return target;
}
}
| 2,920 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/materialize
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/materialize/pobject/FirstElementPObject.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.materialize.pobject;
import java.util.Iterator;
import org.apache.crunch.PCollection;
/**
* A concrete implementation of {@link PObjectImpl} that uses the first element in the backing
* {@link PCollection} as the {@link org.apache.crunch.PObject} value.
*
* @param <T> The value type of this {@code PObject}.
*/
public class FirstElementPObject<T> extends PObjectImpl<T, T> {
private T defaultValue;
/**
* Constructs a new instance of this {@code PObject} implementation.
*
* @param collect The backing {@code PCollection} for this {@code PObject}.
*/
public FirstElementPObject(PCollection<T> collect) {
this(collect, null);
}
/**
* Constructs a new instance of this {@code PObject} implementation.
*
* @param collect The backing {@code PCollection} for this {@code PObject}.
* @param defaultValue The value to return if the backing PCollection is empty.
*/
public FirstElementPObject(PCollection<T> collect, T defaultValue) {
super(collect);
this.defaultValue = defaultValue;
}
/** {@inheritDoc} */
@Override
public T process(Iterable<T> input) {
Iterator<T> itr = input.iterator();
if (itr.hasNext()) {
return itr.next();
}
return defaultValue;
}
}
| 2,921 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/materialize
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/materialize/pobject/MapPObject.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.materialize.pobject;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.crunch.PCollection;
import org.apache.crunch.Pair;
/**
* A concrete implementation of {@link PObjectImpl} whose
* value is a Java {@link Map}. The underlying {@link PCollection} for this
* {@link org.apache.crunch.PObject} must contain {@link Pair}s of values. The
* first element of the pair will be used as the map key, while the second element will be used
* as the map value. Note that the contents of the underlying {@code PCollection} may not be
* reflected in the returned {@code Map}, since a single key may be mapped to several values in
* the underlying {@code PCollection}, and only one of those values will appear in the {@code
* Map} encapsulated by this {@code PObject}.
*
* @param <K> The type of keys for the Map.
* @param <V> The type of values for the Map.
*/
public class MapPObject<K, V> extends PObjectImpl<Pair<K, V>, Map<K, V>> {
/**
* Constructs a new instance of this {@code PObject} implementation.
*
* @param collect The backing {@code PCollection} for this {@code PObject}.
*/
public MapPObject(PCollection<Pair<K, V>> collect) {
super(collect);
}
/** {@inheritDoc} */
@Override
public Map<K, V> process(Iterable<Pair<K, V>> input) {
Map<K, V> target = new HashMap<K, V>();
Iterator<Pair<K, V>> itr = input.iterator();
while (itr.hasNext()) {
Pair<K, V> pair = itr.next();
target.put(pair.first(), pair.second());
}
return target;
}
}
| 2,922 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/materialize
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/materialize/pobject/PObjectImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.materialize.pobject;
import org.apache.crunch.PCollection;
import org.apache.crunch.PObject;
/**
* An abstract implementation of {@link PObject} that is backed by a {@link PCollection}.
* Clients creating a concrete implementation should override the method
* {@link PObjectImpl#process(Iterable)}, which transforms the backing PCollection into the
* singleton value encapsulated by the PObject. Once this {code PObject}'s value has been
* calculated, the value is cached to prevent subsequent materializations of the backing
* {@code PCollection}.
*
* @param <S> The type contained in the underlying PCollection.
* @param <T> The type encapsulated by this PObject.
*/
public abstract class PObjectImpl<S, T> implements PObject<T> {
// The name of the collection, used as the name for this instance as well.
private String name;
// A referenced to the materialized contents of a PCollection.
private Iterable<S> iterable;
// A variable to hold a cached copy of the value of this {@code PObject},
// to prevent unnecessary materializations of the backing {@code PCollection}.
private T cachedValue;
// A flag indicating if a value for this {@code PObject} has been cached.
private boolean isCached;
/**
* Constructs a new instance of this {@code PObject} implementation.
*
* @param collect The backing {@code PCollection} for this {@code PObject}.
*/
public PObjectImpl(PCollection<S> collect) {
this.name = collect.toString();
this.iterable = collect.materialize();
this.cachedValue = null;
this.isCached = false;
}
/** {@inheritDoc} */
@Override
public String toString() {
return name;
}
/** {@inheritDoc} */
@Override
public final T getValue() {
if (!isCached) {
cachedValue = process(iterable);
isCached = true;
}
return cachedValue;
}
/**
* Transforms the provided Iterable, obtained from the backing {@link PCollection},
* into the value encapsulated by this {@code PObject}.
*
* @param input An Iterable whose elements correspond to those of the backing {@code
* PCollection}.
* @return The value of this {@code PObject}.
*/
protected abstract T process(Iterable<S> input);
}
| 2,923 |
0 |
Create_ds/crunch/crunch-scrunch/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-scrunch/src/main/java/org/apache/crunch/scrunch/ScalaSafeReflectDatumReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.scrunch;
import java.io.IOException;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import org.apache.avro.io.ResolvingDecoder;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.reflect.ReflectDatumReader;
import org.apache.hadoop.util.ReflectionUtils;
import scala.collection.JavaConversions;
public class ScalaSafeReflectDatumReader<T> extends ReflectDatumReader<T> {
public ScalaSafeReflectDatumReader(Schema schema) {
super(schema, schema, ScalaSafeReflectData.getInstance());
}
@Override
protected Object readArray(Object old, Schema expected,
ResolvingDecoder in) throws IOException {
return scalaIterableCheck(super.readArray(old, expected, in), expected);
}
@Override
protected Object readMap(Object old, Schema expected,
ResolvingDecoder in) throws IOException {
return scalaMapCheck(super.readMap(old, expected, in), expected);
}
public static Object scalaMapCheck(Object map, Schema schema) {
Class mapClass = ScalaSafeReflectData.getClassProp(schema,
ScalaSafeReflectData.CLASS_PROP);
if (mapClass != null && mapClass.isAssignableFrom(scala.collection.Map.class)) {
return JavaConversions.mapAsScalaMap((Map) map);
}
return map;
}
public static Object scalaIterableCheck(Object array, Schema schema) {
Class collectionClass = ScalaSafeReflectData.getClassProp(schema,
ScalaSafeReflectData.CLASS_PROP);
if (collectionClass != null) {
if (scala.collection.Iterable.class.isAssignableFrom(collectionClass)) {
scala.collection.Iterable it = toIter(array);
if (scala.collection.immutable.List.class.isAssignableFrom(collectionClass)) {
return it.toList();
}
if (scala.collection.mutable.Buffer.class.isAssignableFrom(collectionClass)) {
return it.toBuffer();
}
if (scala.collection.immutable.Set.class.isAssignableFrom(collectionClass)) {
return it.toSet();
}
return it;
}
}
return array;
}
private static scala.collection.Iterable toIter(Object array) {
return JavaConversions.collectionAsScalaIterable((Collection) array);
}
@Override
@SuppressWarnings("unchecked")
protected Object newArray(Object old, int size, Schema schema) {
Class collectionClass = ScalaSafeReflectData.getClassProp(schema,
ScalaSafeReflectData.CLASS_PROP);
Class elementClass = ScalaSafeReflectData.getClassProp(schema,
ScalaSafeReflectData.ELEMENT_PROP);
if (collectionClass == null && elementClass == null)
return super.newArray(old, size, schema); // use specific/generic
ScalaSafeReflectData data = ScalaSafeReflectData.getInstance();
if (collectionClass != null && !collectionClass.isArray()) {
if (old instanceof Collection) {
((Collection)old).clear();
return old;
}
if (scala.collection.Iterable.class.isAssignableFrom(collectionClass) ||
collectionClass.isAssignableFrom(ArrayList.class)) {
return Lists.newArrayList();
}
return data.newInstance(collectionClass, schema);
}
if (elementClass == null) {
elementClass = data.getClass(schema.getElementType());
}
return Array.newInstance(elementClass, size);
}
}
| 2,924 |
0 |
Create_ds/crunch/crunch-scrunch/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-scrunch/src/main/java/org/apache/crunch/scrunch/ScalaSafeReflectDatumWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.scrunch;
import java.util.Iterator;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.reflect.ReflectDatumWriter;
import scala.collection.JavaConversions;
/**
*
*/
public class ScalaSafeReflectDatumWriter<T> extends ReflectDatumWriter<T> {
public ScalaSafeReflectDatumWriter(Schema schema) {
super(schema, ScalaSafeReflectData.getInstance());
}
@Override
protected long getArraySize(Object array) {
if (array instanceof scala.collection.Iterable) {
return ((scala.collection.Iterable) array).size();
}
return super.getArraySize(array);
}
@Override
protected Iterator<Object> getArrayElements(Object array) {
if (array instanceof scala.collection.Iterable) {
return JavaConversions.asJavaIterable((scala.collection.Iterable) array).iterator();
}
return (Iterator<Object>) super.getArrayElements(array);
}
@Override
protected int getMapSize(Object map) {
if (map instanceof scala.collection.Map) {
return ((scala.collection.Map) map).size();
}
return super.getMapSize(map);
}
/** Called by the default implementation of {@link #writeMap} to enumerate
* map elements. The default implementation is for {@link Map}.*/
@SuppressWarnings("unchecked")
protected Iterable<Map.Entry<Object,Object>> getMapEntries(Object map) {
if (map instanceof scala.collection.Map) {
return JavaConversions.mapAsJavaMap((scala.collection.Map) map).entrySet();
}
return super.getMapEntries(map);
}
}
| 2,925 |
0 |
Create_ds/crunch/crunch-scrunch/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-scrunch/src/main/java/org/apache/crunch/scrunch/ScalaSafeReflectData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.scrunch;
import java.lang.reflect.Field;
import java.lang.reflect.GenericArrayType;
import java.lang.reflect.Modifier;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.AvroTypeException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericFixed;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.reflect.Stringable;
import org.apache.avro.reflect.Union;
import org.apache.avro.specific.FixedSize;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.node.NullNode;
/**
* Scala-oriented support class for serialization via reflection.
*/
public class ScalaSafeReflectData extends ReflectData.AllowNull {
private static final ScalaSafeReflectData INSTANCE = new ScalaSafeReflectData();
public static ScalaSafeReflectData getInstance() { return INSTANCE; }
static final String CLASS_PROP = "java-class";
static final String ELEMENT_PROP = "java-element-class";
static Class getClassProp(Schema schema, String prop) {
String name = schema.getProp(prop);
if (name == null) return null;
try {
return Class.forName(name);
} catch (ClassNotFoundException e) {
throw new AvroRuntimeException(e);
}
}
/**
* This method is the whole reason for this class to exist, so that I can
* hack around a problem where calling getSimpleName on a class that is
* defined inside of the Scala REPL can cause an internal language error,
* which I'm not a huge fan of.
*
* @param clazz
* @return
*/
private String getSimpleName(Class clazz) {
try {
return clean(clazz.getSimpleName());
} catch (InternalError ie) {
// This can happen in Scala when we're using the Console. Crazy, right?
String fullName = clazz.getName();
String[] pieces = fullName.split("\\.");
return clean(pieces[pieces.length - 1]);
}
}
@Override
@SuppressWarnings(value="unchecked")
protected Schema createSchema(Type type, Map<String,Schema> names) {
if (type instanceof GenericArrayType) { // generic array
Type component = ((GenericArrayType)type).getGenericComponentType();
if (component == Byte.TYPE) // byte array
return Schema.create(Schema.Type.BYTES);
Schema result = Schema.createArray(createSchema(component, names));
setElement(result, component);
return result;
} else if (type instanceof ParameterizedType) {
ParameterizedType ptype = (ParameterizedType)type;
Class raw = (Class)ptype.getRawType();
Type[] params = ptype.getActualTypeArguments();
if (java.util.Map.class.isAssignableFrom(raw) ||
scala.collection.Map.class.isAssignableFrom(raw)) {
Type key = params[0];
Type value = params[1];
if (!(key == String.class))
throw new AvroTypeException("Map key class not String: "+key);
Schema schema = Schema.createMap(createSchema(value, names));
schema.addProp(CLASS_PROP, raw.getName());
return schema;
} else if (Collection.class.isAssignableFrom(raw) ||
scala.collection.Iterable.class.isAssignableFrom(raw)) { // Collection
if (params.length != 1)
throw new AvroTypeException("No array type specified.");
Schema schema = Schema.createArray(createSchema(params[0], names));
schema.addProp(CLASS_PROP, raw.getName());
return schema;
} else {
throw new AvroTypeException("Could not convert type: " + type);
}
} else if ((type == Short.class) || (type == Short.TYPE)) {
Schema result = Schema.create(Schema.Type.INT);
result.addProp(CLASS_PROP, Short.class.getName());
return result;
} else if (type instanceof Class) { // Class
Class<?> c = (Class<?>)type;
if (c.isPrimitive() || Number.class.isAssignableFrom(c)
|| c == Void.class || c == Boolean.class) // primitive
return super.createSchema(type, names);
if (c.isArray()) { // array
Class component = c.getComponentType();
if (component == Byte.TYPE) { // byte array
Schema result = Schema.create(Schema.Type.BYTES);
result.addProp(CLASS_PROP, c.getName()); // For scala-specific byte arrays
return result;
}
Schema result = Schema.createArray(createSchema(component, names));
result.addProp(CLASS_PROP, c.getName());
result.addProp(ELEMENT_PROP, component.getName());
setElement(result, component);
return result;
}
if (CharSequence.class.isAssignableFrom(c)) // String
return Schema.create(Schema.Type.STRING);
if (ByteBuffer.class.isAssignableFrom(c)) {
return Schema.create(Schema.Type.BYTES);
}
String fullName = c.getName();
Schema schema = names.get(fullName);
if (schema == null) {
String name = getSimpleName(c);
String space = c.getPackage() == null ? "" : c.getPackage().getName();
if (c.getEnclosingClass() != null) // nested class
space = c.getEnclosingClass().getName() + "$";
Union union = c.getAnnotation(Union.class);
if (union != null) { // union annotated
return getAnnotatedUnion(union, names);
} else if (c.isAnnotationPresent(Stringable.class)){ // Stringable
Schema result = Schema.create(Schema.Type.STRING);
result.addProp(CLASS_PROP, c.getName());
return result;
} else if (c.isEnum()) { // Enum
List<String> symbols = new ArrayList<String>();
Enum[] constants = (Enum[])c.getEnumConstants();
for (int i = 0; i < constants.length; i++)
symbols.add(constants[i].name());
schema = Schema.createEnum(name, null /* doc */, space, symbols);
} else if (GenericFixed.class.isAssignableFrom(c)) { // fixed
int size = c.getAnnotation(FixedSize.class).value();
schema = Schema.createFixed(name, null /* doc */, space, size);
} else if (IndexedRecord.class.isAssignableFrom(c)) { // specific
return super.createSchema(type, names);
} else { // record
List<Schema.Field> fields = new ArrayList<Schema.Field>();
boolean error = Throwable.class.isAssignableFrom(c);
schema = Schema.createRecord(name, null /* doc */, space, error);
names.put(c.getName(), schema);
for (Field field : getFields(c))
if ((field.getModifiers()&(Modifier.TRANSIENT|Modifier.STATIC))==0){
Schema fieldSchema = createFieldSchema(field, names);
JsonNode defaultValue = null;
if (fieldSchema.getType() == Schema.Type.UNION) {
Schema defaultType = fieldSchema.getTypes().get(0);
if (defaultType.getType() == Schema.Type.NULL) {
defaultValue = NullNode.getInstance();
}
}
fields.add(new Schema.Field(clean(field.getName()),
fieldSchema, null /* doc */, defaultValue));
}
if (error) // add Throwable message
fields.add(new Schema.Field("detailMessage", THROWABLE_MESSAGE,
null, null));
schema.setFields(fields);
}
names.put(fullName, schema);
}
return schema;
}
return super.createSchema(type, names);
}
private static final Schema THROWABLE_MESSAGE =
makeNullable(Schema.create(Schema.Type.STRING));
@Override
public Object getField(Object record, String name, int position) {
if (record instanceof IndexedRecord)
return super.getField(record, name, position);
try {
return getField(record.getClass(), name).get(record);
} catch (IllegalAccessException e) {
throw new AvroRuntimeException(e);
}
}
private static final Map<Class,Map<String,Field>> FIELD_CACHE =
new ConcurrentHashMap<Class,Map<String,Field>>();
private static Field getField(Class c, String name) {
Map<String,Field> fields = FIELD_CACHE.get(c);
if (fields == null) {
fields = new ConcurrentHashMap<String,Field>();
FIELD_CACHE.put(c, fields);
}
Field f = fields.get(name);
if (f == null) {
f = findField(c, name);
fields.put(name, f);
}
return f;
}
private static Field findField(Class original, String name) {
Class c = original;
do {
try {
Field f = c.getDeclaredField(dirty(name));
f.setAccessible(true);
return f;
} catch (NoSuchFieldException e) {}
c = c.getSuperclass();
} while (c != null);
throw new AvroRuntimeException("No field named "+name+" in: "+original);
}
private static String clean(String dirty) {
return dirty.replace("$", "___");
}
private static String dirty(String clean) {
return clean.replace("___", "$");
}
// Return of this class and its superclasses to serialize.
// Not cached, since this is only used to create schemas, which are cached.
private Collection<Field> getFields(Class recordClass) {
Map<String,Field> fields = new LinkedHashMap<String,Field>();
Class c = recordClass;
do {
if (c.getPackage() != null
&& c.getPackage().getName().startsWith("java."))
break; // skip java built-in classes
for (Field field : c.getDeclaredFields())
if ((field.getModifiers() & (Modifier.TRANSIENT|Modifier.STATIC)) == 0)
if (fields.put(field.getName(), field) != null)
throw new AvroTypeException(c+" contains two fields named: "+field);
c = c.getSuperclass();
} while (c != null);
return fields.values();
}
@SuppressWarnings(value="unchecked")
private void setElement(Schema schema, Type element) {
if (!(element instanceof Class)) return;
Class<?> c = (Class<?>)element;
Union union = c.getAnnotation(Union.class);
if (union != null) // element is annotated union
schema.addProp(ELEMENT_PROP, c.getName());
}
// construct a schema from a union annotation
private Schema getAnnotatedUnion(Union union, Map<String,Schema> names) {
List<Schema> branches = new ArrayList<Schema>();
for (Class branch : union.value())
branches.add(createSchema(branch, names));
return Schema.createUnion(branches);
}
@Override
protected boolean isArray(Object datum) {
if (datum == null) return false;
return (datum instanceof Collection) || datum.getClass().isArray() ||
(datum instanceof scala.collection.Iterable);
}
@Override
protected boolean isMap(Object datum) {
return (datum instanceof java.util.Map) || (datum instanceof scala.collection.Map);
}
@Override
protected String getSchemaName(Object datum) {
if (datum != null) {
if(byte[].class.isAssignableFrom(datum.getClass())) {
return Schema.Type.BYTES.getName();
}
}
return super.getSchemaName(datum);
}
}
| 2,926 |
0 |
Create_ds/crunch/crunch-scrunch/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-scrunch/src/main/java/org/apache/crunch/scrunch/ScalaReflectDataFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.scrunch;
import org.apache.avro.Schema;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.reflect.ReflectDatumReader;
import org.apache.avro.reflect.ReflectDatumWriter;
import org.apache.crunch.types.avro.ReaderWriterFactory;
import org.apache.crunch.types.avro.ReflectDataFactory;
/**
* An implementation of the {@code ReflectDataFactory} class to work with Scala classes.
*/
public class ScalaReflectDataFactory implements ReaderWriterFactory {
@Override
public ReflectData getData() { return ScalaSafeReflectData.getInstance(); }
@Override
public <T> ReflectDatumReader<T> getReader(Schema schema) {
return new ScalaSafeReflectDatumReader<T>(schema);
}
@Override
public <T> ReflectDatumWriter<T> getWriter(Schema schema) {
return new ScalaSafeReflectDatumWriter<T>(schema);
}
}
| 2,927 |
0 |
Create_ds/mantis-api/src/test/java/io/mantisrx
|
Create_ds/mantis-api/src/test/java/io/mantisrx/api/UtilTest.java
|
package io.mantisrx.api;
import org.junit.Test;
import static org.junit.Assert.assertArrayEquals;
public class UtilTest {
@Test
public void testGetTagList() {
String[] tags = Util.getTaglist("/jobconnectbyname/rx-sps-tracker?clientId=testClientId", "testTargetId", "us-east-1");
assertArrayEquals(new String[]{
"clientId", "testClientId",
"SessionId", "testTargetId",
"urlPath", "/jobconnectbyname/rx-sps-tracker",
"region", "us-east-1"}, tags);
tags = Util.getTaglist("/jobconnectbyname/rx-sps-tracker?clientId=testClientId&MantisApiTag=tag1:value1", "testTargetId", "us-east-1");
assertArrayEquals(new String[]{
"tag1", "value1",
"clientId", "testClientId",
"SessionId", "testTargetId",
"urlPath", "/jobconnectbyname/rx-sps-tracker",
"region", "us-east-1"}, tags);
tags = Util.getTaglist("/jobconnectbyname/rx-sps-tracker?clientId=testClientId&MantisApiTag=tag1:value1&MantisApiTag=clientId:testClientId2", "testTargetId", "us-east-1");
assertArrayEquals(new String[]{
"tag1", "value1",
"clientId", "testClientId2",
"SessionId", "testTargetId",
"urlPath", "/jobconnectbyname/rx-sps-tracker",
"region", "us-east-1"}, tags);
}
}
| 2,928 |
0 |
Create_ds/mantis-api/src/test/java/io/mantisrx/api
|
Create_ds/mantis-api/src/test/java/io/mantisrx/api/tunnel/CrossRegionHandlerTest.java
|
package io.mantisrx.api.tunnel;
import com.google.common.collect.ImmutableList;
import io.mantisrx.api.push.ConnectionBroker;
import junit.framework.TestCase;
import org.junit.Test;
import rx.Scheduler;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
public class CrossRegionHandlerTest extends TestCase {
@Test
public void testParseUriRegion() {
CrossRegionHandler regionHandler = spy(new CrossRegionHandler(ImmutableList.of(), mock(MantisCrossRegionalClient.class), mock(ConnectionBroker.class), mock(Scheduler.class)));
doReturn(ImmutableList.of("us-east-1", "eu-west-1")).when(regionHandler).getTunnelRegions();
assertEquals(ImmutableList.of("us-east-1"), regionHandler.parseRegionsInUri("/region/us-east-1/foobar"));
assertEquals(ImmutableList.of("us-east-2"), regionHandler.parseRegionsInUri("/region/us-east-2/foobar"));
assertEquals(ImmutableList.of("us-east-1", "eu-west-1"), regionHandler.parseRegionsInUri("/region/all/foobar"));
assertEquals(ImmutableList.of("us-east-1", "eu-west-1"), regionHandler.parseRegionsInUri("/region/ALL/foobar"));
doReturn(ImmutableList.of("us-east-1", "eu-west-1", "us-west-2")).when(regionHandler).getTunnelRegions();
assertEquals(ImmutableList.of("us-east-1", "eu-west-1", "us-west-2"), regionHandler.parseRegionsInUri("/region/ALL/foobar"));
assertEquals(ImmutableList.of("us-east-1", "us-east-2"), regionHandler.parseRegionsInUri("/region/us-east-1,us-east-2/foobar"));
assertEquals(ImmutableList.of("us-east-1", "us-west-2"), regionHandler.parseRegionsInUri("/region/us-east-1,us-west-2/foobar"));
}
}
| 2,929 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/Bootstrap.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api;
import com.google.inject.Injector;
import com.netflix.config.ConfigurationManager;
import com.netflix.governator.InjectorBuilder;
import com.netflix.zuul.netty.server.BaseServerStartup;
import com.netflix.zuul.netty.server.Server;
/**
* Bootstrap
*
* Author: Arthur Gonigberg
* Date: November 20, 2017
*/
public class Bootstrap {
public static void main(String[] args) {
String propertiesFile = null;
if (args.length >= 2 && "-p".equals(args[0])) {
propertiesFile = args[1];
if (propertiesFile.endsWith(".properties")) {
propertiesFile = propertiesFile.substring(0, propertiesFile.length() - 11);
}
}
new Bootstrap().start(propertiesFile);
}
public void start(String configName) {
System.out.println("Mantis API: starting up.");
long startTime = System.currentTimeMillis();
int exitCode = 0;
Server server = null;
try {
ConfigurationManager.loadCascadedPropertiesFromResources(configName);
Injector injector = InjectorBuilder.fromModule(new MantisAPIModule()).createInjector();
BaseServerStartup serverStartup = injector.getInstance(BaseServerStartup.class);
server = serverStartup.server();
long startupDuration = System.currentTimeMillis() - startTime;
System.out.println("Mantis API: finished startup. Duration = " + startupDuration + " ms");
server.start();
server.awaitTermination();
}
catch (Throwable t) {
t.printStackTrace();
System.err.println("###############");
System.err.println("Mantis API: initialization failed. Forcing shutdown now.");
System.err.println("###############");
exitCode = 1;
}
finally {
// server shutdown
if (server != null) server.stop();
System.exit(exitCode);
}
}
}
| 2,930 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/Util.java
|
/**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api;
import com.google.common.base.Strings;
import io.netty.handler.codec.http.QueryStringDecoder;
import lombok.experimental.UtilityClass;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.slf4j.Logger;
import rx.Observable;
import rx.functions.Func1;
import java.util.*;
import java.util.concurrent.TimeUnit;
import static io.mantisrx.api.Constants.*;
@UtilityClass
@Slf4j
public class Util {
private static final int defaultNumRetries = 2;
public static boolean startsWithAnyOf(final String target, List<String> prefixes) {
for (String prefix : prefixes) {
if (target.startsWith(prefix)) {
return true;
}
}
return false;
}
//
// Regions
//
public static String getLocalRegion() {
return System.getenv("EC2_REGION");
}
//
// Query Params
//
public static String[] getTaglist(String uri, String id) {
return getTaglist(uri, id, null);
}
public static String[] getTaglist(String uri, String id, String region) {
QueryStringDecoder queryStringDecoder = new QueryStringDecoder(uri);
Map<String, List<String>> queryParameters = queryStringDecoder.parameters();
boolean isClientIdSet = false;
final List<String> tags = new LinkedList<>();
if (queryParameters != null) {
List<String> tagVals = queryParameters.get(TagsParamName);
if (tagVals != null) {
for (String s : tagVals) {
StringTokenizer tokenizer = new StringTokenizer(s, TagNameValDelimiter);
if (tokenizer.countTokens() == 2) {
String s1 = tokenizer.nextToken();
String s2 = tokenizer.nextToken();
if (s1 != null && !s1.isEmpty() && s2 != null && !s2.isEmpty()) {
tags.add(s1);
tags.add(s2);
if (ClientIdTagName.equals(s1)) {
isClientIdSet = true;
}
}
}
}
}
tagVals = queryParameters.get(ClientIdTagName);
if (!isClientIdSet && tagVals != null && !tagVals.isEmpty()) {
tags.add(ClientIdTagName);
tags.add(tagVals.get(0));
}
}
tags.add("SessionId");
tags.add(id);
tags.add("urlPath");
tags.add(queryStringDecoder.path());
if (!Strings.isNullOrEmpty(region)) {
tags.add("region");
tags.add(region);
}
return tags.toArray(new String[]{});
}
//
// Retries
//
public static Func1<Observable<? extends Throwable>, Observable<?>> getRetryFunc(final Logger logger, String name) {
return getRetryFunc(logger, name, defaultNumRetries);
}
public static Func1<Observable<? extends Throwable>, Observable<?>> getRetryFunc(final Logger logger, String name, final int retries) {
final int limit = retries == Integer.MAX_VALUE ? retries : retries + 1;
return attempts -> attempts
.zipWith(Observable.range(1, limit), (t1, integer) -> {
logger.warn("Caught exception connecting for {}.", name, t1);
return new ImmutablePair<Throwable, Integer>(t1, integer);
})
.flatMap(pair -> {
Throwable t = pair.left;
int retryIter = pair.right;
long delay = Math.round(Math.pow(2, retryIter));
if (retryIter > retries) {
logger.error("Exceeded maximum retries ({}) for {} with exception: {}", retries, name, t.getMessage(), t);
return Observable.error(new Exception("Timeout after " + retries + " retries"));
}
logger.info("Retrying connection to {} after sleeping for {} seconds.", name, delay, t);
return Observable.timer(delay, TimeUnit.SECONDS);
});
}
}
| 2,931 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/MantisAPIModule.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api;
import com.google.inject.Scopes;
import com.google.inject.util.Modules;
import com.netflix.appinfo.EurekaInstanceConfig;
import com.netflix.appinfo.providers.MyDataCenterInstanceConfigProvider;
import com.netflix.discovery.guice.EurekaModule;
import com.netflix.zuul.*;
import com.netflix.zuul.filters.FilterRegistry;
import com.netflix.zuul.filters.MutableFilterRegistry;
import com.netflix.zuul.groovy.GroovyCompiler;
import com.netflix.zuul.groovy.GroovyFileFilter;
import io.mantisrx.api.services.AppStreamDiscoveryService;
import io.mantisrx.api.services.AppStreamStore;
import io.mantisrx.api.services.ConfigurationBasedAppStreamStore;
import io.mantisrx.server.core.Configurations;
import io.mantisrx.server.core.CoreConfiguration;
import io.mantisrx.server.master.client.HighAvailabilityServices;
import io.mantisrx.server.master.client.HighAvailabilityServicesUtil;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.DeserializationFeature;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
import com.netflix.config.ConfigurationManager;
import com.netflix.netty.common.accesslog.AccessLogPublisher;
import com.netflix.netty.common.status.ServerStatusManager;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.patterns.ThreadPoolMonitor;
import com.netflix.zuul.context.SessionContextDecorator;
import com.netflix.zuul.context.ZuulSessionContextDecorator;
import com.netflix.zuul.init.ZuulFiltersModule;
import com.netflix.zuul.netty.server.BaseServerStartup;
import com.netflix.zuul.netty.server.ClientRequestReceiver;
import com.netflix.zuul.origins.BasicNettyOriginManager;
import com.netflix.zuul.origins.OriginManager;
import io.mantisrx.api.services.artifacts.ArtifactManager;
import io.mantisrx.api.services.artifacts.InMemoryArtifactManager;
import com.netflix.zuul.stats.BasicRequestMetricsPublisher;
import com.netflix.zuul.stats.RequestMetricsPublisher;
import io.mantisrx.api.tunnel.MantisCrossRegionalClient;
import io.mantisrx.api.tunnel.NoOpCrossRegionalClient;
import io.mantisrx.client.MantisClient;
import io.mantisrx.server.worker.client.WorkerMetricsClient;
import io.mantisrx.shaded.org.apache.curator.framework.listen.Listenable;
import io.mantisrx.shaded.org.apache.curator.framework.listen.StandardListenerManager;
import org.apache.commons.configuration.AbstractConfiguration;
import rx.Scheduler;
import rx.schedulers.Schedulers;
import java.io.FilenameFilter;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.*;
public class MantisAPIModule extends AbstractModule {
@Override
protected void configure() {
bind(AbstractConfiguration.class).toInstance(ConfigurationManager.getConfigInstance());
bind(BaseServerStartup.class).to(MantisServerStartup.class);
// use provided basic netty origin manager
bind(OriginManager.class).to(BasicNettyOriginManager.class);
// zuul filter loading
bind(DynamicCodeCompiler.class).to(GroovyCompiler.class);
bind(FilenameFilter.class).to(GroovyFileFilter.class);
install(Modules.override(new EurekaModule()).with(new AbstractModule() {
@Override
protected void configure() {
bind(EurekaInstanceConfig.class).toProvider(MyDataCenterInstanceConfigProvider.class).in(Scopes.SINGLETON);
}
}));
install(new ZuulFiltersModule());
bind(FilterLoader.class).to(DynamicFilterLoader.class);
bind(FilterRegistry.class).to(MutableFilterRegistry.class);
bind(FilterFileManager.class).asEagerSingleton();
// general server bindings
bind(ServerStatusManager.class); // health/discovery status
bind(SessionContextDecorator.class).to(ZuulSessionContextDecorator.class); // decorate new sessions when requests come in
bind(Registry.class).to(DefaultRegistry.class); // atlas metrics registry
bind(RequestCompleteHandler.class).to(BasicRequestCompleteHandler.class); // metrics post-request completion
bind(RequestMetricsPublisher.class).to(BasicRequestMetricsPublisher.class); // timings publisher
// access logger, including request ID generator
bind(AccessLogPublisher.class).toInstance(new AccessLogPublisher("ACCESS",
(channel, httpRequest) -> ClientRequestReceiver.getRequestFromChannel(channel).getContext().getUUID()));
bind(ArtifactManager.class).to(InMemoryArtifactManager.class);
bind(MantisCrossRegionalClient.class).to(NoOpCrossRegionalClient.class);
bind(ObjectMapper.class).toInstance(new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false));
}
@Provides
@Singleton
HighAvailabilityServices provideHighAvailabilityServices(AbstractConfiguration configuration) {
Properties props = new Properties();
configuration.getKeys("mantis").forEachRemaining(key -> {
props.put(key, configuration.getString(key));
});
return HighAvailabilityServicesUtil.createHAServices(
Configurations.frmProperties(props, CoreConfiguration.class));
}
@Provides @Singleton MantisClient provideMantisClient(AbstractConfiguration configuration) {
Properties props = new Properties();
configuration.getKeys("mantis").forEachRemaining(key -> {
props.put(key, configuration.getString(key));
});
return new MantisClient(props);
}
@Provides
@Singleton
@Named("io-scheduler")
Scheduler provideIoScheduler(Registry registry) {
ThreadPoolExecutor executor = new ThreadPoolExecutor(16, 128, 60,
TimeUnit.SECONDS, new LinkedBlockingQueue<>());
ThreadPoolMonitor.attach(registry, executor, "io-thread-pool");
return Schedulers.from(executor);
}
@Provides
@Singleton
ConfigurationBasedAppStreamStore.ConfigSource provideConfigSource(AbstractConfiguration configuration) {
return new ConfigurationBasedAppStreamStore.ConfigSource() {
@Override
public Listenable<ConfigurationBasedAppStreamStore.ConfigurationChangeListener> getListenable() {
return StandardListenerManager.standard();
}
@Override
public String get() {
return String.join(",", configuration.getStringArray("mreAppJobClusterMap"));
}
};
}
@Provides
@Singleton
AppStreamDiscoveryService provideAppStreamDiscoveryService(MantisClient mantisClient,
@Named("io-scheduler") Scheduler ioScheduler,
ConfigurationBasedAppStreamStore.ConfigSource configSource) {
AppStreamStore appStreamStore = new ConfigurationBasedAppStreamStore(configSource);
return new AppStreamDiscoveryService(mantisClient, ioScheduler, appStreamStore);
}
@Provides @Singleton
WorkerMetricsClient provideWorkerMetricsClient(AbstractConfiguration configuration) {
Properties props = new Properties();
configuration.getKeys("mantis").forEachRemaining(key -> {
props.put(key, configuration.getString(key));
});
return new WorkerMetricsClient(props);
}
@Provides
@Singleton
@Named("push-prefixes")
List<String> providePushPrefixes() {
List<String> pushPrefixes = new ArrayList<>(20);
pushPrefixes.add("/jobconnectbyid");
pushPrefixes.add("/api/v1/jobconnectbyid");
pushPrefixes.add("/jobconnectbyname");
pushPrefixes.add("/api/v1/jobconnectbyname");
pushPrefixes.add("/jobsubmitandconnect");
pushPrefixes.add("/api/v1/jobsubmitandconnect");
pushPrefixes.add("/jobClusters/discoveryInfoStream");
pushPrefixes.add("/jobstatus");
pushPrefixes.add("/api/v1/jobstatus");
pushPrefixes.add("/api/v1/jobs/schedulingInfo/");
pushPrefixes.add("/api/v1/metrics");
return pushPrefixes;
}
}
| 2,932 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/MantisServerStartup.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api;
import com.google.inject.name.Named;
import com.netflix.appinfo.ApplicationInfoManager;
import com.netflix.config.DynamicIntProperty;
import com.netflix.discovery.EurekaClient;
import com.netflix.netty.common.accesslog.AccessLogPublisher;
import com.netflix.netty.common.channel.config.ChannelConfig;
import com.netflix.netty.common.channel.config.CommonChannelConfigKeys;
import com.netflix.netty.common.metrics.EventLoopGroupMetrics;
import com.netflix.netty.common.proxyprotocol.StripUntrustedProxyHeadersHandler;
import com.netflix.netty.common.status.ServerStatusManager;
import com.netflix.spectator.api.Registry;
import com.netflix.zuul.FilterLoader;
import com.netflix.zuul.FilterUsageNotifier;
import com.netflix.zuul.RequestCompleteHandler;
import com.netflix.zuul.context.SessionContextDecorator;
import com.netflix.zuul.netty.server.BaseServerStartup;
import com.netflix.zuul.netty.server.DirectMemoryMonitor;
import com.netflix.zuul.netty.server.NamedSocketAddress;
import io.mantisrx.api.initializers.MantisApiServerChannelInitializer;
import io.mantisrx.api.push.ConnectionBroker;
import io.mantisrx.api.tunnel.MantisCrossRegionalClient;
import io.mantisrx.server.master.client.HighAvailabilityServices;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.group.ChannelGroup;
import org.apache.commons.configuration.AbstractConfiguration;
import rx.Scheduler;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.net.InetSocketAddress;
import java.util.*;
@Singleton
public class MantisServerStartup extends BaseServerStartup {
private final HighAvailabilityServices highAvailabilityServices;
private final MantisCrossRegionalClient mantisCrossRegionalClient;
private final ConnectionBroker connectionBroker;
private final Scheduler scheduler;
private final List<String> pushPrefixes;
@Inject
public MantisServerStartup(ServerStatusManager serverStatusManager, FilterLoader filterLoader,
SessionContextDecorator sessionCtxDecorator, FilterUsageNotifier usageNotifier,
RequestCompleteHandler reqCompleteHandler, Registry registry,
DirectMemoryMonitor directMemoryMonitor, EventLoopGroupMetrics eventLoopGroupMetrics,
EurekaClient discoveryClient, ApplicationInfoManager applicationInfoManager,
AccessLogPublisher accessLogPublisher,
AbstractConfiguration configurationManager,
HighAvailabilityServices highAvailabilityServices,
MantisCrossRegionalClient mantisCrossRegionalClient,
ConnectionBroker connectionBroker,
@Named("io-scheduler") Scheduler scheduler,
@Named("push-prefixes") List<String> pushPrefixes
) {
super(serverStatusManager, filterLoader, sessionCtxDecorator, usageNotifier, reqCompleteHandler, registry,
directMemoryMonitor, eventLoopGroupMetrics, discoveryClient, applicationInfoManager,
accessLogPublisher);
this.highAvailabilityServices = highAvailabilityServices;
this.mantisCrossRegionalClient = mantisCrossRegionalClient;
this.connectionBroker = connectionBroker;
this.scheduler = scheduler;
this.pushPrefixes = pushPrefixes;
// Mantis Master Listener
highAvailabilityServices
.getMasterMonitor()
.getMasterObservable()
.filter(x -> x != null)
.forEach(masterDescription -> {
LOG.info("Received new Mantis Master: " + masterDescription);
configurationManager.setProperty("api.ribbon.listOfServers",
masterDescription.getHostIP() + ":" + masterDescription.getApiPort());
});
}
@Override
protected Map<NamedSocketAddress, ChannelInitializer<?>> chooseAddrsAndChannels(ChannelGroup clientChannels) {
Map<NamedSocketAddress, ChannelInitializer<?>> addrsToChannels = new HashMap<>();
String mainPortName = "main";
int port = new DynamicIntProperty("zuul.server.port.main", 7001).get();
NamedSocketAddress sockAddr = new NamedSocketAddress(mainPortName, new InetSocketAddress(port));
ChannelConfig channelConfig = defaultChannelConfig(mainPortName);
ChannelConfig channelDependencies = defaultChannelDependencies(mainPortName);
/* These settings may need to be tweaked depending if you're running behind an ELB HTTP listener, TCP listener,
* or directly on the internet.
*/
channelConfig.set(CommonChannelConfigKeys.allowProxyHeadersWhen,
StripUntrustedProxyHeadersHandler.AllowWhen.ALWAYS);
channelConfig.set(CommonChannelConfigKeys.preferProxyProtocolForClientIp, false);
channelConfig.set(CommonChannelConfigKeys.isSSlFromIntermediary, false);
channelConfig.set(CommonChannelConfigKeys.withProxyProtocol, false);
addrsToChannels.put(
sockAddr,
new MantisApiServerChannelInitializer(
String.valueOf(port), channelConfig, channelDependencies, clientChannels, pushPrefixes,
highAvailabilityServices, mantisCrossRegionalClient, connectionBroker,
scheduler, false));
logAddrConfigured(sockAddr);
return Collections.unmodifiableMap(addrsToChannels);
}
}
| 2,933 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/MantisConfigurationBasedServerList.java
|
/**
* Copyright 2023 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.loadbalancer.ConfigurationBasedServerList;
import com.netflix.loadbalancer.Server;
import com.netflix.niws.loadbalancer.DiscoveryEnabledServer;
import java.util.List;
public class MantisConfigurationBasedServerList extends ConfigurationBasedServerList {
@Override
protected List<Server> derive(String value) {
List<Server> list = Lists.newArrayList();
if (!Strings.isNullOrEmpty(value)) {
for (String s : value.split(",")) {
Server server = new Server(s.trim());
InstanceInfo instanceInfo =
InstanceInfo.Builder.newBuilder()
.setAppName("mantismasterv2")
.setIPAddr(server.getHost())
.setPort(server.getPort())
.build();
list.add(new DiscoveryEnabledServer(instanceInfo, false, true));
}
}
return list;
}
}
| 2,934 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/Constants.java
|
package io.mantisrx.api;
import lombok.experimental.UtilityClass;
@UtilityClass
public class Constants {
public static final String numMessagesCounterName = "numSinkMessages";
public static final String numDroppedMessagesCounterName = "numDroppedSinkMessages";
public static final String numBytesCounterName = "numSinkBytes";
public static final String numDroppedBytesCounterName = "numDroppedSinkBytes";
public static final String drainTriggeredCounterName = "drainTriggered";
public static final String numIncomingMessagesCounterName = "numIncomingMessages";
public static final String SSE_DATA_SUFFIX = "\r\n\r\n";
public static final String SSE_DATA_PREFIX = "data: ";
public static final long TunnelPingIntervalSecs = 12;
public static final String TunnelPingMessage = "MantisApiTunnelPing";
public static final String TunnelPingParamName = "MantisApiTunnelPingEnabled";
public static final String OriginRegionTagName = "originRegion";
public static final String ClientIdTagName = "clientId";
public static final String TagsParamName = "MantisApiTag";
public static final String TagNameValDelimiter = ":";
public static final String metaErrorMsgHeader = "mantis.meta.error.message";
public static final String metaOriginName = "mantis.meta.origin";
public static final String numRemoteBytesCounterName = "numRemoteSinkBytes";
public static final String numRemoteMessagesCounterName = "numRemoteMessages";
public static final String numSseErrorsCounterName = "numSseErrors";
public static final String DUMMY_TIMER_DATA = "DUMMY_TIMER_DATA";
public static final String MANTISAPI_CACHED_HEADER = "x-nflx-mantisapi-cached";
}
| 2,935 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/MasterCacheLoader.java
|
package io.mantisrx.api.filters;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheStats;
import com.google.inject.Inject;
import com.netflix.config.DynamicBooleanProperty;
import com.netflix.config.DynamicIntProperty;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import com.netflix.spectator.api.patterns.PolledMeter;
import com.netflix.zuul.filters.http.HttpOutboundSyncFilter;
import com.netflix.zuul.message.http.HttpResponseMessage;
import io.mantisrx.api.Constants;
import lombok.extern.slf4j.Slf4j;
import java.util.concurrent.TimeUnit;
@Slf4j
public class MasterCacheLoader extends HttpOutboundSyncFilter {
@Override
public boolean needsBodyBuffered(HttpResponseMessage message) {
return true;
}
private static DynamicBooleanProperty cacheEnabled = new DynamicBooleanProperty("mantisapi.cache.enabled", false);
private static DynamicIntProperty cacheSize = new DynamicIntProperty("mantisapi.cache.size", 1000);
private static DynamicIntProperty cacheDurationSeconds = new DynamicIntProperty("mantisapi.cache.seconds", 1);
public static final Cache<String, String> masterCache = CacheBuilder.newBuilder()
.maximumSize(cacheSize.get())
.expireAfterWrite(cacheDurationSeconds.get(), TimeUnit.SECONDS)
.build();
@Inject
public MasterCacheLoader(Registry registry) {
CacheStats stats = masterCache.stats();
PolledMeter.using(registry)
.withName("mantis.api.cache.size")
.withTag(new BasicTag("id", "api"))
.monitorMonotonicCounter(masterCache, Cache::size);
PolledMeter.using(registry)
.withName("mantis.api.cache.hitCount")
.withTag(new BasicTag("id", "api"))
.monitorMonotonicCounter(stats, CacheStats::hitCount);
PolledMeter.using(registry)
.withName("mantis.api.cache.missCount")
.withTag(new BasicTag("id", "api"))
.monitorMonotonicCounter(stats, CacheStats::missCount);
}
@Override
public HttpResponseMessage apply(HttpResponseMessage input) {
String key = input.getInboundRequest().getPathAndQuery();
String responseBody = input.getBodyAsText();
if (null != responseBody && cacheEnabled.get()) {
masterCache.put(key, responseBody);
}
return input;
}
@Override
public int filterOrder() {
return 999;
}
@Override
public boolean shouldFilter(HttpResponseMessage msg) {
return msg.getOutboundRequest().getContext().getRouteVIP() != null
&& msg.getOutboundRequest().getContext().getRouteVIP().equalsIgnoreCase("api")
&& msg.getInboundRequest().getMethod().equalsIgnoreCase("get")
&& msg.getHeaders().getAll(Constants.MANTISAPI_CACHED_HEADER).size() == 0; // Set by the MasterCacheHitChecker, ensures we aren't re-caching.
}
}
| 2,936 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/Artifacts.java
|
package io.mantisrx.api.filters;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Strings;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpHeaderNames;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import io.mantisrx.api.proto.Artifact;
import io.mantisrx.api.services.artifacts.ArtifactManager;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.vavr.control.Try;
import lombok.extern.slf4j.Slf4j;
import javax.inject.Inject;
import java.util.List;
import java.util.Optional;
@Slf4j
public class Artifacts extends HttpSyncEndpoint {
private final ArtifactManager artifactManager;
private final ObjectMapper objectMapper;
public static final String PATH_SPEC = "/api/v1/artifacts";
@Override
public boolean needsBodyBuffered(HttpRequestMessage input) {
return input.getMethod().toLowerCase().equals("post");
}
@Inject
public Artifacts(ArtifactManager artifactManager, ObjectMapper objectMapper) {
this.artifactManager = artifactManager;
this.objectMapper = objectMapper;
artifactManager.putArtifact(new Artifact("mantis.json", 0, new byte[0]));
artifactManager.putArtifact(new Artifact("mantis.zip", 0, new byte[0]));
}
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
if (request.getMethod().toLowerCase().equals("get")) {
String fileName = request.getPath().replaceFirst("^" + PATH_SPEC + "/?", "");
if (Strings.isNullOrEmpty(fileName)) {
List<String> files = artifactManager
.getArtifacts();
Try<String> serialized = Try.of(() -> objectMapper.writeValueAsString(files));
return serialized.map(body -> {
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request, 200);
response.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
response.setBodyAsText(body);
return response;
}).getOrElseGet(t -> {
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request, 500);
response.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.TEXT_PLAIN.toString());
response.setBodyAsText(t.getMessage());
return response;
});
} else {
Optional<Artifact> artifact = artifactManager.getArtifact(fileName);
return artifact.map(art -> {
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request,
HttpResponseStatus.OK.code());
response.setBody(art.getContent());
response.getHeaders().set(HttpHeaderNames.CONTENT_TYPE,
fileName.endsWith("json")
? HttpHeaderValues.APPLICATION_JSON.toString()
: HttpHeaderValues.APPLICATION_OCTET_STREAM.toString());
response.getHeaders().set("Content-Disposition",
String.format("attachment; filename=\"%s\"", fileName));
return response;
}).orElseGet(() -> {
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request,
HttpResponseStatus.NOT_FOUND.code());
response.setBody(new byte[]{});
return response;
});
}
} else if (request.getMethod().toLowerCase().equals("post")) {
byte[] body = request.getBody();
artifactManager.putArtifact(new Artifact("testing.json", body.length, body));
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request,
HttpResponseStatus.OK.code());
return response;
}
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request, HttpResponseStatus.METHOD_NOT_ALLOWED.code());
response.setBodyAsText(HttpResponseStatus.METHOD_NOT_ALLOWED.reasonPhrase());
response.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.TEXT_PLAIN.toString());
return response;
}
}
| 2,937 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/Routes.java
|
/**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import com.netflix.zuul.context.SessionContext;
import com.netflix.zuul.filters.http.HttpInboundSyncFilter;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.netty.filter.ZuulEndPointRunner;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class Routes extends HttpInboundSyncFilter {
@Override
public int filterOrder() {
return 0;
}
@Override
public boolean shouldFilter(HttpRequestMessage httpRequestMessage) {
return true;
}
@Override
public HttpRequestMessage apply(HttpRequestMessage request) {
SessionContext context = request.getContext();
String path = request.getPath();
String host = request.getOriginalHost();
if (request.getMethod().toLowerCase().equals("options")) {
context.setEndpoint(Options.class.getCanonicalName());
} else if (path.equalsIgnoreCase("/healthcheck")) {
context.setEndpoint(Healthcheck.class.getCanonicalName());
} else if (path.equalsIgnoreCase("/favicon.ico")) {
context.setEndpoint(Favicon.class.getCanonicalName());
} else if (path.startsWith(Artifacts.PATH_SPEC)) {
context.setEndpoint(Artifacts.class.getCanonicalName());
} else if (path.equalsIgnoreCase("/api/v1/mantis/publish/streamDiscovery")) {
context.setEndpoint(AppStreamDiscovery.class.getCanonicalName());
} else if (path.startsWith("/jobClusters/discoveryInfo")) {
String jobCluster = request.getPath().replaceFirst(JobDiscoveryInfoCacheHitChecker.PATH_SPEC + "/", "");
String newUrl = "/api/v1/jobClusters/" + jobCluster + "/latestJobDiscoveryInfo";
request.setPath(newUrl);
context.setEndpoint(ZuulEndPointRunner.PROXY_ENDPOINT_FILTER_NAME);
context.setRouteVIP("api");
} else if (path.equalsIgnoreCase("/api/v1/mql/parse")) {
context.setEndpoint(MQLParser.class.getCanonicalName());
} else if (path.equals(MREAppStreamToJobClusterMapping.PATH_SPEC)) {
context.setEndpoint(MREAppStreamToJobClusterMapping.class.getCanonicalName());
} else {
context.setEndpoint(ZuulEndPointRunner.PROXY_ENDPOINT_FILTER_NAME);
context.setRouteVIP("api");
}
return request;
}
}
| 2,938 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/MetricsReporting.java
|
package io.mantisrx.api.filters;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Timer;
import com.netflix.zuul.filters.http.HttpOutboundSyncFilter;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.netty.SpectatorUtils;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
public class MetricsReporting extends HttpOutboundSyncFilter {
private static final ConcurrentHashMap<Tuple2<String, String>, Timer> timerCache = new ConcurrentHashMap<>(500);
private static final ConcurrentHashMap<Tuple2<String, String>, Counter> counterCache = new ConcurrentHashMap<>(500);
@Override
public HttpResponseMessage apply(HttpResponseMessage input) {
String path = input.getInboundRequest().getPath();
String status = statusCodeToStringRepresentation(input.getStatus());
// Record Latency. Zuul no longer record total request time.
timerCache.computeIfAbsent(Tuple.of(path, status),
tuple -> SpectatorUtils.newTimer("latency", path,"status", status))
.record(input.getContext().getOriginReportedDuration(), TimeUnit.NANOSECONDS);
// Record Request
counterCache.computeIfAbsent(Tuple.of(path, status),
tuple -> SpectatorUtils.newCounter("requests", path, "status", status))
.increment();
return input;
}
private String statusCodeToStringRepresentation(Integer statusCode) {
return (statusCode / 100) + "xx";
}
@Override
public int filterOrder() {
return -100;
}
@Override
public boolean shouldFilter(HttpResponseMessage msg) {
return true;
}
}
| 2,939 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/Healthcheck.java
|
/**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import com.netflix.zuul.stats.status.StatusCategoryUtils;
import com.netflix.zuul.stats.status.ZuulStatusCategory;
public class Healthcheck extends HttpSyncEndpoint {
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
resp.setBodyAsText("mantisapi healthy");
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.SUCCESS);
return resp;
}
}
| 2,940 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/Favicon.java
|
/**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import com.netflix.zuul.stats.status.StatusCategoryUtils;
import com.netflix.zuul.stats.status.ZuulStatusCategory;
/**
* Returns an empty 200 response to prevent 404s on Favicon.
*/
public class Favicon extends HttpSyncEndpoint {
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
resp.setBody(new byte[0]);
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.SUCCESS);
return resp;
}
}
| 2,941 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/MREAppStreamToJobClusterMapping.java
|
/**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.discovery.proto.AppJobClustersMap;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpHeaderNames;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import io.mantisrx.api.services.AppStreamDiscoveryService;
import io.mantisrx.shaded.com.google.common.base.Preconditions;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.vavr.control.Try;
import java.util.List;
import javax.inject.Inject;
public class MREAppStreamToJobClusterMapping extends HttpSyncEndpoint {
private final AppStreamDiscoveryService appStreamDiscoveryService;
private final ObjectMapper objectMapper;
private static final String APPNAME_QUERY_PARAM = "app";
public static final String PATH_SPEC = "/api/v1/mantis/publish/streamJobClusterMap";
@Inject
public MREAppStreamToJobClusterMapping(AppStreamDiscoveryService appStreamDiscoveryService,
ObjectMapper objectMapper) {
Preconditions.checkArgument(appStreamDiscoveryService != null, "appStreamDiscoveryService cannot be null");
this.appStreamDiscoveryService = appStreamDiscoveryService;
Preconditions.checkArgument(objectMapper != null, "objectMapper cannot be null");
this.objectMapper = objectMapper;
}
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
List<String> apps = request.getQueryParams().get(APPNAME_QUERY_PARAM);
Try<AppJobClustersMap> payloadTry = Try.ofCallable(() -> appStreamDiscoveryService.getAppJobClustersMap(apps));
Try<String> serialized = payloadTry.flatMap(payload -> Try.of(() -> objectMapper.writeValueAsString(payload)));
return serialized.map(body -> {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
resp.setBodyAsText(body);
resp.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
return resp;
}).getOrElseGet(t -> {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
resp.setBodyAsText(t.getMessage());
resp.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.TEXT_PLAIN.toString());
return resp;
});
}
}
| 2,942 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/AppStreamDiscovery.java
|
/**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.Inject;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpHeaderNames;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import io.mantisrx.api.proto.AppDiscoveryMap;
import io.mantisrx.api.services.AppStreamDiscoveryService;
import com.netflix.zuul.stats.status.StatusCategoryUtils;
import com.netflix.zuul.stats.status.ZuulStatusCategory;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.vavr.control.Either;
import io.vavr.control.Try;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
import java.util.function.Function;
@Slf4j
public class AppStreamDiscovery extends HttpSyncEndpoint {
private final AppStreamDiscoveryService appStreamDiscoveryService;
private final ObjectMapper objectMapper;
private static final String APPNAME_QUERY_PARAM = "app";
@Inject
public AppStreamDiscovery(AppStreamDiscoveryService appStreamDiscoveryService,
ObjectMapper objectMapper) {
this.appStreamDiscoveryService = appStreamDiscoveryService;
this.objectMapper = objectMapper;
}
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
List<String> apps = request.getQueryParams().get(APPNAME_QUERY_PARAM);
Either<String, AppDiscoveryMap> result = appStreamDiscoveryService.getAppDiscoveryMap(apps);
return result.bimap(errorMessage -> {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 500);
resp.setBodyAsText(errorMessage);
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.FAILURE_LOCAL);
return resp;
}, appDiscoveryMap -> {
Try<String> serialized = Try.of(() -> objectMapper.writeValueAsString(appDiscoveryMap));
if (serialized.isSuccess()) {
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.SUCCESS);
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
resp.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
resp.setBodyAsText(serialized.get());
return resp;
} else {
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.FAILURE_LOCAL);
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 500);
resp.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.TEXT_PLAIN.toString());
resp.setBodyAsText(serialized.getOrElseGet(Throwable::getMessage));
return resp;
}
}).getOrElseGet(Function.identity());
}
}
| 2,943 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/MasterCacheHitChecker.java
|
package io.mantisrx.api.filters;
import com.google.inject.Inject;
import com.google.inject.name.Named;
import com.netflix.config.DynamicBooleanProperty;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.filters.http.HttpInboundSyncFilter;
import com.netflix.zuul.message.http.HttpHeaderNames;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Constants;
import io.mantisrx.api.Util;
import io.netty.handler.codec.http.HttpHeaderValues;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
@Slf4j
public class MasterCacheHitChecker extends HttpInboundSyncFilter {
private static DynamicBooleanProperty cacheEnabled = new DynamicBooleanProperty("mantisapi.cache.enabled", false);
private static final ConcurrentHashMap<String, Counter> cacheHitCounters = new ConcurrentHashMap<>(500);
private static final ConcurrentHashMap<String, Counter> cacheMissCounters = new ConcurrentHashMap<>(500);
private static final String CACHE_HIT_COUNTER_NAME = "mantis.api.cache.count";
private final List<String> pushPrefixes;
@Inject
public MasterCacheHitChecker(@Named("push-prefixes") List<String> pushPrefixes) {
super();
this.pushPrefixes = pushPrefixes;
}
@Override
public HttpRequestMessage apply(HttpRequestMessage request) {
if(cacheEnabled.get()) {
String key = request.getPathAndQuery();
String bodyText = MasterCacheLoader.masterCache.getIfPresent(key);
if (bodyText != null) { // Cache Hit
HttpResponseMessage response = new HttpResponseMessageImpl(request.getContext(), request, 200);
response.setBodyAsText(bodyText);
response.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
response.getHeaders().set(Constants.MANTISAPI_CACHED_HEADER, "true");
request.getContext().setStaticResponse(response);
cacheHitCounters.computeIfAbsent(key,
k -> SpectatorUtils.newCounter(CACHE_HIT_COUNTER_NAME, "api", "endpoint", k, "class", "hit"))
.increment();
} else { // Cache Miss
cacheMissCounters.computeIfAbsent(key,
k -> SpectatorUtils.newCounter(CACHE_HIT_COUNTER_NAME, "api", "endpoint", k, "class", "miss"))
.increment();
}
}
return request;
}
@Override
public int filterOrder() {
return 0;
}
@Override
public boolean shouldFilter(HttpRequestMessage msg) {
String key = msg.getPathAndQuery();
return msg.getMethod().equalsIgnoreCase("get")
&& key.startsWith("/api")
&& !Util.startsWithAnyOf(key, pushPrefixes);
}
}
| 2,944 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/Options.java
|
package io.mantisrx.api.filters;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import com.netflix.zuul.stats.status.StatusCategoryUtils;
import com.netflix.zuul.stats.status.ZuulStatusCategory;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpResponseStatus;
public class Options extends HttpSyncEndpoint {
@Override
public HttpResponseMessage apply(HttpRequestMessage request) {
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, HttpResponseStatus.OK.code());
resp.setBodyAsText("");
StatusCategoryUtils.setStatusCategory(request.getContext(), ZuulStatusCategory.SUCCESS);
return resp;
}
}
| 2,945 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/OutboundHeaders.java
|
package io.mantisrx.api.filters;
import com.netflix.zuul.filters.http.HttpOutboundSyncFilter;
import com.netflix.zuul.message.HeaderName;
import com.netflix.zuul.message.http.HttpResponseMessage;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.util.AsciiString;
public class OutboundHeaders extends HttpOutboundSyncFilter {
@Override
public boolean shouldFilter(HttpResponseMessage msg) {
return true;
}
@Override
public HttpResponseMessage apply(HttpResponseMessage resp) {
upsert(resp, HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
addHeaderIfMissing(resp, HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS,
"Origin, X-Requested-With, Accept, Content-Type, Cache-Control");
addHeaderIfMissing(resp, HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS,
"GET, OPTIONS, PUT, POST, DELETE, CONNECT");
addHeaderIfMissing(resp, HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
return resp;
}
private void upsert(HttpResponseMessage resp, AsciiString name, String value) {
resp.getHeaders().remove(new HeaderName(name.toString()));
resp.getHeaders().add(new HeaderName(name.toString()), value);
}
private void addHeaderIfMissing(HttpResponseMessage resp, AsciiString name, String value) {
if (resp.getHeaders().getAll(name.toString()).size() == 0) {
resp.getHeaders().add(name.toString(), value);
}
}
@Override
public int filterOrder() {
return 0;
}
}
| 2,946 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/JobDiscoveryInfoCacheHitChecker.java
|
/**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import com.google.common.base.Strings;
import com.netflix.config.DynamicBooleanProperty;
import com.netflix.zuul.filters.http.HttpInboundSyncFilter;
import com.netflix.zuul.message.http.HttpHeaderNames;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import io.mantisrx.api.Constants;
import io.mantisrx.api.services.JobDiscoveryService;
import io.netty.handler.codec.http.HttpHeaderValues;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class JobDiscoveryInfoCacheHitChecker extends HttpInboundSyncFilter {
public static final String PATH_SPEC = "/jobClusters/discoveryInfo";
private static DynamicBooleanProperty cacheEnabled = new DynamicBooleanProperty("mantisapi.cache.enabled", false);
@Override
public int filterOrder() {
return -1;
}
@Override
public boolean shouldFilter(HttpRequestMessage httpRequestMessage) {
String jobCluster = httpRequestMessage.getPath().replaceFirst(PATH_SPEC + "/", "");
return httpRequestMessage.getPath().startsWith(PATH_SPEC)
&& JobDiscoveryService.jobDiscoveryInfoCache.getIfPresent(jobCluster) != null;
}
@Override
public HttpRequestMessage apply(HttpRequestMessage request) {
String jobCluster = request.getPath().replaceFirst(PATH_SPEC + "/", "");
HttpResponseMessage resp = new HttpResponseMessageImpl(request.getContext(), request, 200);
String bodyText = JobDiscoveryService.jobDiscoveryInfoCache.getIfPresent(jobCluster) ;
if (cacheEnabled.get() && !Strings.isNullOrEmpty(bodyText)) {
log.info("Serving cached job discovery info for {}.", jobCluster);
resp.setBodyAsText(bodyText);
resp.getHeaders().set(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
resp.getHeaders().set(Constants.MANTISAPI_CACHED_HEADER, "true");
request.getContext().setStaticResponse(resp);
}
return request;
}
}
| 2,947 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/JobDiscoveryCacheLoader.java
|
/**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import com.netflix.config.DynamicBooleanProperty;
import com.netflix.zuul.filters.http.HttpOutboundSyncFilter;
import com.netflix.zuul.message.http.HttpResponseMessage;
import io.mantisrx.api.Constants;
import io.mantisrx.api.services.JobDiscoveryService;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class JobDiscoveryCacheLoader extends HttpOutboundSyncFilter {
private static DynamicBooleanProperty cacheEnabled = new DynamicBooleanProperty("mantisapi.cache.enabled", false);
@Override
public boolean needsBodyBuffered(HttpResponseMessage message) {
return true;
}
@Override
public int filterOrder() {
return 999; // Don't really care.
}
@Override
public boolean shouldFilter(HttpResponseMessage response) {
return response.getOutboundRequest().getPath().matches("^/api/v1/jobClusters/.*/latestJobDiscoveryInfo$")
&& response.getHeaders().getAll(Constants.MANTISAPI_CACHED_HEADER).isEmpty()
&& cacheEnabled.get();
}
@Override
public HttpResponseMessage apply(HttpResponseMessage response) {
String jobCluster = response.getOutboundRequest().getPath()
.replaceFirst("^/api/v1/jobClusters/", "")
.replaceFirst("/latestJobDiscoveryInfo$", "");
String responseBody = response.getBodyAsText();
if (null != responseBody) {
log.info("Caching latest job discovery info for {}.", jobCluster);
JobDiscoveryService.jobDiscoveryInfoCache.put(jobCluster, response.getBodyAsText());
}
return response;
}
}
| 2,948 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/filters/MQLParser.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.filters;
import io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.zuul.filters.http.HttpSyncEndpoint;
import com.netflix.zuul.message.http.HttpRequestMessage;
import com.netflix.zuul.message.http.HttpResponseMessage;
import com.netflix.zuul.message.http.HttpResponseMessageImpl;
import io.mantisrx.mql.shaded.clojure.java.api.Clojure;
import io.mantisrx.mql.shaded.clojure.lang.IFn;
import lombok.Value;
import lombok.extern.slf4j.Slf4j;
import java.nio.charset.Charset;
@Slf4j
public class MQLParser extends HttpSyncEndpoint {
private static IFn require = Clojure.var("io.mantisrx.mql.shaded.clojure.core", "require");
static {
require.invoke(Clojure.read("io.mantisrx.mql.core"));
require.invoke(Clojure.read("io.mantisrx.mql.jvm.interfaces.server"));
require.invoke(Clojure.read("io.mantisrx.mql.jvm.interfaces.core"));
}
private static IFn parses = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "parses?");
private static IFn getParseError = Clojure.var("io.mantisrx.mql.jvm.interfaces.core", "get-parse-error");
private static final ObjectMapper objectMapper = new ObjectMapper();
public @Value class MQLParseResult {
private boolean success;
private String criterion;
private String message;
}
@Override
public HttpResponseMessage apply(HttpRequestMessage input) {
String query = input.getQueryParams().getFirst("criterion");
boolean parses = parses(query);
String parseError = getParseError(query);
MQLParseResult result = new MQLParseResult(parses, query, parses ? "" : parseError);
try {
HttpResponseMessage response = new HttpResponseMessageImpl(input.getContext(), input, 200);
response.setBody(objectMapper.writeValueAsBytes(result));
return response;
} catch (JsonProcessingException ex) {
HttpResponseMessage response = new HttpResponseMessageImpl(input.getContext(), input, 500);
response.setBody(getErrorResponse(ex.getMessage()).getBytes(Charset.defaultCharset()));
return response;
}
}
/**
* A predicate which indicates whether or not the MQL parser considers query to be a valid query.
* @param query A String representing the MQL query.
* @return A boolean indicating whether or not the query successfully parses.
*/
public static Boolean parses(String query) {
return (Boolean) parses.invoke(query);
}
/**
* A convenience function allowing a caller to determine what went wrong if a call to #parses(String query) returns
* false.
* @param query A String representing the MQL query.
* @return A String representing the parse error for an MQL query, null if no parse error occurred.
*/
public static String getParseError(String query) {
return (String) getParseError.invoke(query);
}
private String getErrorResponse(String exceptionMessage) {
StringBuilder sb = new StringBuilder(50);
sb.append("{\"success\": false, \"messages\": \"");
sb.append(exceptionMessage);
sb.append("\"}");
return sb.toString();
}
}
| 2,949 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/proto/Artifact.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.proto;
import java.util.Objects;
public class Artifact {
private long sizeInBytes;
private String fileName;
private byte[] content;
public Artifact(String fileName, long sizeInBytes, byte[] content) {
Objects.requireNonNull(fileName, "File name cannot be null");
Objects.requireNonNull(content, "Content cannot be null");
this.fileName = fileName;
this.sizeInBytes = sizeInBytes;
this.content = content;
}
public long getSizeInBytes() {
return this.sizeInBytes;
}
public byte[] getContent() {
return content;
}
public String getFileName() {
return fileName;
}
}
| 2,950 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/proto/AppDiscoveryMap.java
|
/**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.proto;
import io.mantisrx.server.core.JobSchedulingInfo;
import java.util.HashMap;
import java.util.Map;
public class AppDiscoveryMap {
public final String version;
public final Long timestamp;
public final Map<String, Map<String, JobSchedulingInfo>> mappings = new HashMap<>();
public AppDiscoveryMap(String version, Long timestamp) {
this.version = version;
this.timestamp = timestamp;
}
public void addMapping(String app, String stream, JobSchedulingInfo schedulingInfo) {
if(!mappings.containsKey(app)) {
mappings.put(app, new HashMap<String, JobSchedulingInfo>());
}
mappings.get(app).put(stream, schedulingInfo);
}
}
| 2,951 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/tunnel/MantisCrossRegionalClient.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.tunnel;
import io.netty.buffer.ByteBuf;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
public interface MantisCrossRegionalClient {
HttpClient<ByteBuf, ServerSentEvent> getSecureSseClient(String region);
HttpClient<String, ByteBuf> getSecureRestClient(String region);
}
| 2,952 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/tunnel/RegionData.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.tunnel;
import lombok.Value;
public @Value class RegionData {
private final String region;
private final boolean success;
private final String data;
private final int responseCode;
}
| 2,953 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/tunnel/NoOpCrossRegionalClient.java
|
package io.mantisrx.api.tunnel;
import io.netty.buffer.ByteBuf;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
public class NoOpCrossRegionalClient implements MantisCrossRegionalClient {
@Override
public HttpClient<ByteBuf, ServerSentEvent> getSecureSseClient(String region) {
throw new UnsupportedOperationException();
}
@Override
public HttpClient<String, ByteBuf> getSecureRestClient(String region) {
throw new UnsupportedOperationException();
}
}
| 2,954 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/tunnel/CrossRegionHandler.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.tunnel;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.config.DynamicIntProperty;
import com.netflix.config.DynamicStringProperty;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Constants;
import io.mantisrx.api.Util;
import io.mantisrx.api.push.ConnectionBroker;
import io.mantisrx.api.push.PushConnectionDetails;
import io.mantisrx.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.DefaultHttpHeaders;
import io.netty.handler.codec.http.DefaultHttpResponse;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpResponse;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.HttpUtil;
import io.netty.handler.codec.http.HttpVersion;
import io.netty.handler.codec.http.QueryStringDecoder;
import io.netty.handler.codec.http.QueryStringEncoder;
import lombok.extern.slf4j.Slf4j;
import mantis.io.reactivex.netty.channel.StringTransformer;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import rx.Observable;
import rx.Scheduler;
import rx.Subscription;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import static io.mantisrx.api.Constants.OriginRegionTagName;
import static io.mantisrx.api.Constants.TagNameValDelimiter;
import static io.mantisrx.api.Constants.TagsParamName;
import static io.mantisrx.api.Constants.TunnelPingMessage;
import static io.mantisrx.api.Constants.TunnelPingParamName;
import static io.mantisrx.api.Util.getLocalRegion;
@Slf4j
public class CrossRegionHandler extends SimpleChannelInboundHandler<FullHttpRequest> {
private final List<String> pushPrefixes;
private final MantisCrossRegionalClient mantisCrossRegionalClient;
private final ConnectionBroker connectionBroker;
private final Scheduler scheduler;
private Subscription subscription = null;
private String uriForLogging = null;
private ScheduledExecutorService scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
new ThreadFactoryBuilder().setNameFormat("cross-region-handler-drainer-%d").build());
private ScheduledFuture drainFuture;
private final DynamicIntProperty queueCapacity = new DynamicIntProperty("io.mantisrx.api.push.queueCapacity", 1000);
private final DynamicIntProperty writeIntervalMillis = new DynamicIntProperty("io.mantisrx.api.push.writeIntervalMillis", 50);
private final DynamicStringProperty tunnelRegionsProperty = new DynamicStringProperty("io.mantisrx.api.tunnel.regions", Util.getLocalRegion());
public CrossRegionHandler(
List<String> pushPrefixes,
MantisCrossRegionalClient mantisCrossRegionalClient,
ConnectionBroker connectionBroker,
Scheduler scheduler) {
super(true);
this.pushPrefixes = pushPrefixes;
this.mantisCrossRegionalClient = mantisCrossRegionalClient;
this.connectionBroker = connectionBroker;
this.scheduler = scheduler;
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, FullHttpRequest request) throws Exception {
uriForLogging = request.uri();
if (HttpUtil.is100ContinueExpected(request)) {
send100Contine(ctx);
}
if (isCrossRegionStreamingPath(request.uri())) {
handleRemoteSse(ctx, request);
} else { // REST
if (request.method() == HttpMethod.HEAD) {
handleHead(ctx, request);
} else if (request.method() == HttpMethod.GET) {
handleRestGet(ctx, request);
} else if(request.method() == HttpMethod.POST) {
handleRestPost(ctx, request);
} else {
ctx.fireChannelRead(request.retain());
}
}
}
//
// REST Implementations
//
private void handleHead(ChannelHandlerContext ctx, FullHttpRequest request) {
HttpHeaders headers = new DefaultHttpHeaders();
headers.add(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.APPLICATION_JSON);
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS,
"Origin, X-Requested-With, Accept, Content-Type, Cache-Control");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS,
"GET, OPTIONS, PUT, POST, DELETE, CONNECT");
HttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.OK,
Unpooled.copiedBuffer("", Charset.defaultCharset()),
headers,
new DefaultHttpHeaders());
ctx.writeAndFlush(response)
.addListener(__ -> ctx.close());
}
@VisibleForTesting
List<String> getTunnelRegions() {
return parseRegionCsv(tunnelRegionsProperty.get());
}
private static List<String> parseRegionCsv(String regionCsv) {
return Arrays.stream(regionCsv.split(","))
.map(String::trim)
.map(String::toLowerCase)
.collect(Collectors.toList());
}
@VisibleForTesting
List<String> parseRegionsInUri(String uri) {
final String regionString = getRegion(uri);
if (isAllRegion(regionString)) {
return getTunnelRegions();
} else if (regionString.contains(",")) {
return parseRegionCsv(regionString);
} else {
return Collections.singletonList(regionString);
}
}
private void handleRestGet(ChannelHandlerContext ctx, FullHttpRequest request) {
List<String> regions = parseRegionsInUri(request.uri());
String uri = getTail(request.uri());
log.info("Relaying GET URI {} to {} (original uri {}).", uri, regions, request.uri());
Observable.from(regions)
.flatMap(region -> {
final AtomicReference<Throwable> ref = new AtomicReference<>();
HttpClientRequest<String> rq = HttpClientRequest.create(HttpMethod.GET, uri);
return Observable
.create((Observable.OnSubscribe<HttpClient<String, ByteBuf>>) subscriber ->
subscriber.onNext(mantisCrossRegionalClient.getSecureRestClient(region)))
.flatMap(client -> {
ref.set(null);
return client.submit(rq)
.flatMap(resp -> {
final int code = resp.getStatus().code();
if (code >= 500) {
throw new RuntimeException(resp.getStatus().toString());
}
return responseToRegionData(region, resp);
})
.onErrorReturn(t -> {
log.warn("Error getting response from remote master: " + t.getMessage());
ref.set(t);
return new RegionData(region, false, t.getMessage(), 0);
});
})
.map(data -> {
final Throwable t = ref.get();
if (t != null)
throw new RuntimeException(t);
return data;
})
.retryWhen(Util.getRetryFunc(log, uri + " in " + region))
.take(1)
.onErrorReturn(t -> new RegionData(region, false, t.getMessage(), 0));
})
.reduce(new ArrayList<RegionData>(3), (regionDatas, regionData) -> {
regionDatas.add(regionData);
return regionDatas;
})
.observeOn(scheduler)
.subscribeOn(scheduler)
.take(1)
.subscribe(result -> writeDataAndCloseChannel(ctx, result));
}
private void handleRestPost(ChannelHandlerContext ctx, FullHttpRequest request) {
String uri = getTail(request.uri());
List<String> regions = parseRegionsInUri(request.uri());
log.info("Relaying POST URI {} to {} (original uri {}).", uri, regions, request.uri());
final AtomicReference<Throwable> ref = new AtomicReference<>();
String content = request.content().toString(Charset.defaultCharset());
Observable.from(regions)
.flatMap(region -> {
HttpClientRequest<String> rq = HttpClientRequest.create(HttpMethod.POST, uri);
rq.withRawContent(content, StringTransformer.DEFAULT_INSTANCE);
return Observable
.create((Observable.OnSubscribe<HttpClient<String, ByteBuf>>) subscriber ->
subscriber.onNext(mantisCrossRegionalClient.getSecureRestClient(region)))
.flatMap(client -> client.submit(rq)
.flatMap(resp -> {
final int code = resp.getStatus().code();
if (code >= 500) {
throw new RuntimeException(resp.getStatus().toString() + "in " + region );
}
return responseToRegionData(region, resp);
})
.onErrorReturn(t -> {
log.warn("Error getting response from remote master: " + t.getMessage());
ref.set(t);
return new RegionData(region, false, t.getMessage(), 0);
}))
.map(data -> {
final Throwable t = ref.get();
if (t != null)
throw new RuntimeException(t);
return data;
})
.retryWhen(Util.getRetryFunc(log, uri + " in " + region))
.take(1)
.onErrorReturn(t -> new RegionData(region, false, t.getMessage(), 0));
})
.reduce(new ArrayList<RegionData>(), (regionDatas, regionData) -> {
regionDatas.add(regionData);
return regionDatas;
})
.observeOn(scheduler)
.subscribeOn(scheduler)
.take(1)
.subscribe(result -> writeDataAndCloseChannel(ctx, result));
}
private void handleRemoteSse(ChannelHandlerContext ctx, FullHttpRequest request) {
HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.OK);
HttpHeaders headers = response.headers();
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS, "Origin, X-Requested-With, Accept, Content-Type, Cache-Control");
headers.set(HttpHeaderNames.CONTENT_TYPE, "text/event-stream");
headers.set(HttpHeaderNames.CACHE_CONTROL, "no-cache, no-store, max-age=0, must-revalidate");
headers.set(HttpHeaderNames.PRAGMA, HttpHeaderValues.NO_CACHE);
headers.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED);
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
ctx.writeAndFlush(response);
final boolean sendThroughTunnelPings = hasTunnelPingParam(request.uri());
final String uri = uriWithTunnelParamsAdded(getTail(request.uri()));
List<String> regions = parseRegionsInUri(request.uri());
log.info("Initiating remote SSE connection to {} in {} (original URI: {}).", uri, regions, request.uri());
PushConnectionDetails pcd = PushConnectionDetails.from(uri, regions);
String[] tags = Util.getTaglist(request.uri(), pcd.target, getRegion(request.uri()));
Counter numDroppedBytesCounter = SpectatorUtils.newCounter(Constants.numDroppedBytesCounterName, pcd.target, tags);
Counter numDroppedMessagesCounter = SpectatorUtils.newCounter(Constants.numDroppedMessagesCounterName, pcd.target, tags);
Counter numMessagesCounter = SpectatorUtils.newCounter(Constants.numMessagesCounterName, pcd.target, tags);
Counter numBytesCounter = SpectatorUtils.newCounter(Constants.numBytesCounterName, pcd.target, tags);
Counter drainTriggeredCounter = SpectatorUtils.newCounter(Constants.drainTriggeredCounterName, pcd.target, tags);
Counter numIncomingMessagesCounter = SpectatorUtils.newCounter(Constants.numIncomingMessagesCounterName, pcd.target, tags);
BlockingQueue<String> queue = new LinkedBlockingQueue<String>(queueCapacity.get());
drainFuture = scheduledExecutorService.scheduleAtFixedRate(() -> {
try {
if (queue.size() > 0 && ctx.channel().isWritable()) {
drainTriggeredCounter.increment();
final List<String> items = new ArrayList<>(queue.size());
synchronized (queue) {
queue.drainTo(items);
}
for (String data : items) {
ctx.write(Unpooled.copiedBuffer(data, StandardCharsets.UTF_8));
numMessagesCounter.increment();
numBytesCounter.increment(data.length());
}
ctx.flush();
}
} catch (Exception ex) {
log.error("Error writing to channel", ex);
}
}, writeIntervalMillis.get(), writeIntervalMillis.get(), TimeUnit.MILLISECONDS);
subscription = connectionBroker.connect(pcd)
.filter(event -> !event.equalsIgnoreCase(TunnelPingMessage) || sendThroughTunnelPings)
.doOnNext(event -> {
numIncomingMessagesCounter.increment();
if (!Constants.DUMMY_TIMER_DATA.equals(event)) {
String data = Constants.SSE_DATA_PREFIX + event + Constants.SSE_DATA_SUFFIX;
boolean offer = false;
synchronized (queue) {
offer = queue.offer(data);
}
if (!offer) {
numDroppedBytesCounter.increment(data.length());
numDroppedMessagesCounter.increment();
}
}
})
.subscribe();
}
@Override
public void channelUnregistered(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is unregistered. URI: {}", ctx.channel(), uriForLogging);
unsubscribeIfSubscribed();
super.channelUnregistered(ctx);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is inactive. URI: {}", ctx.channel(), uriForLogging);
unsubscribeIfSubscribed();
super.channelInactive(ctx);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
log.warn("Exception caught by channel {}. URI: {}", ctx.channel(), uriForLogging, cause);
unsubscribeIfSubscribed();
// This is the tail of handlers. We should close the channel between the server and the client,
// essentially causing the client to disconnect and terminate.
ctx.close();
}
/** Unsubscribe if it's subscribed. */
private void unsubscribeIfSubscribed() {
if (subscription != null && !subscription.isUnsubscribed()) {
log.info("SSE unsubscribing subscription with URI: {}", uriForLogging);
subscription.unsubscribe();
}
if (drainFuture != null) {
drainFuture.cancel(false);
}
if (scheduledExecutorService != null) {
scheduledExecutorService.shutdown();
}
}
private boolean hasTunnelPingParam(String uri) {
return uri != null && uri.contains(TunnelPingParamName);
}
private Observable<RegionData> responseToRegionData(String region, HttpClientResponse<ByteBuf> resp) {
final int code = resp.getStatus().code();
return resp.getContent()
.collect(Unpooled::buffer,
ByteBuf::writeBytes)
.map(byteBuf -> new RegionData(region, true,
byteBuf.toString(StandardCharsets.UTF_8), code)
)
.onErrorReturn(t -> new RegionData(region, false, t.getMessage(), code));
}
private void writeDataAndCloseChannel(ChannelHandlerContext ctx, ArrayList<RegionData> result) {
try {
String serialized = responseToString(result);
HttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.OK,
Unpooled.copiedBuffer(serialized, Charset.defaultCharset()));
HttpHeaders headers = response.headers();
headers.add(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.APPLICATION_JSON + "; charset=utf-8");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS,
"Origin, X-Requested-With, Accept, Content-Type, Cache-Control");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS,
"GET, OPTIONS, PUT, POST, DELETE, CONNECT");
headers.add(HttpHeaderNames.CONTENT_LENGTH, serialized.length());
ctx.writeAndFlush(response)
.addListener(__ -> ctx.close());
} catch (Exception ex) {
log.error("Error serializing cross regional response: {}", ex.getMessage(), ex);
}
}
private String uriWithTunnelParamsAdded(String uri) {
QueryStringDecoder queryStringDecoder = new QueryStringDecoder(uri);
QueryStringEncoder queryStringEncoder = new QueryStringEncoder(queryStringDecoder.path());
queryStringDecoder.parameters().forEach((key, value) -> value.forEach(val -> queryStringEncoder.addParam(key, val)));
queryStringEncoder.addParam(TunnelPingParamName, "true");
queryStringEncoder.addParam(TagsParamName, OriginRegionTagName + TagNameValDelimiter + getLocalRegion());
return queryStringEncoder.toString();
}
private static void send100Contine(ChannelHandlerContext ctx) {
FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.CONTINUE);
ctx.writeAndFlush(response);
}
private boolean isCrossRegionStreamingPath(String uri) {
return Util.startsWithAnyOf(getTail(uri), this.pushPrefixes);
}
private static String getTail(String uri) {
return uri.replaceFirst("^/region/.*?/", "/");
}
/**
* Fetches a region from a URI if it contains one, returns garbage if not.
*
* @param uri The uri from which to fetch the region.
* @return The region embedded in the URI, always lower case.
* */
private static String getRegion(String uri) {
return uri.replaceFirst("^/region/", "")
.replaceFirst("/.*$", "")
.trim()
.toLowerCase();
}
/**
* Checks for a specific `all` string to connect to all regions
* specified by {@link CrossRegionHandler#getTunnelRegions()}
*/
private static boolean isAllRegion(String region) {
return region != null && region.trim().equalsIgnoreCase("all");
}
private static String responseToString(List<RegionData> dataList) {
StringBuilder sb = new StringBuilder("[");
boolean first = true;
for (RegionData data : dataList) {
if (first)
first = false;
else {
sb.append(",");
}
if (data.isSuccess()) {
String outputData = getForceWrappedJson(data.getData(), data.getRegion(), data.getResponseCode(), null);
sb.append(outputData);
} else {
sb.append(getForceWrappedJson("", data.getRegion(), data.getResponseCode(), data.getData()));
}
}
sb.append("]");
return sb.toString();
}
private final static String regKey = "mantis.meta.origin";
private final static String errKey = "mantis.meta.errorString";
private final static String codeKey = "mantis.meta.origin.response.code";
public static String getWrappedJson(String data, String region, String err) {
return getWrappedJsonIntl(data, region, err, 0, false);
}
public static String getForceWrappedJson(String data, String region, int code, String err) {
return getWrappedJsonIntl(data, region, err, code, true);
}
private static String getWrappedJsonIntl(String data, String region, String err, int code, boolean forceJson) {
try {
JSONObject o = new JSONObject(data);
o.put(regKey, region);
if (err != null && !err.isEmpty())
o.put(errKey, err);
if (code > 0)
o.put(codeKey, "" + code);
return o.toString();
} catch (JSONException e) {
try {
JSONArray a = new JSONArray(data);
if (!forceJson)
return data;
JSONObject o = new JSONObject();
o.put(regKey, region);
if (err != null && !err.isEmpty())
o.put(errKey, err);
if (code > 0)
o.put(codeKey, "" + code);
o.accumulate("response", a);
return o.toString();
} catch (JSONException ae) {
if (!forceJson)
return data;
JSONObject o = new JSONObject();
o.put(regKey, region);
if (err != null && !err.isEmpty())
o.put(errKey, err);
if (code > 0)
o.put(codeKey, "" + code);
o.put("response", data);
return o.toString();
}
}
}
}
| 2,955 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/push/MantisWebSocketFrameHandler.java
|
package io.mantisrx.api.push;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.ArrayList;
import java.util.List;
import com.netflix.config.DynamicIntProperty;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Constants;
import io.mantisrx.api.Util;
import io.mantisrx.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.websocketx.TextWebSocketFrame;
import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler;
import io.netty.util.ReferenceCountUtil;
import lombok.extern.slf4j.Slf4j;
import rx.Subscription;
@Slf4j
public class MantisWebSocketFrameHandler extends SimpleChannelInboundHandler<TextWebSocketFrame> {
private final ConnectionBroker connectionBroker;
private final DynamicIntProperty queueCapacity = new DynamicIntProperty("io.mantisrx.api.push.queueCapacity", 1000);
private final DynamicIntProperty writeIntervalMillis = new DynamicIntProperty("io.mantisrx.api.push.writeIntervalMillis", 50);
private Subscription subscription;
private String uri;
private ScheduledExecutorService scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
new ThreadFactoryBuilder().setNameFormat("websocket-handler-drainer-%d").build());
private ScheduledFuture drainFuture;
public MantisWebSocketFrameHandler(ConnectionBroker broker) {
super(true);
this.connectionBroker = broker;
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt.getClass() == WebSocketServerProtocolHandler.HandshakeComplete.class) {
WebSocketServerProtocolHandler.HandshakeComplete complete = (WebSocketServerProtocolHandler.HandshakeComplete) evt;
uri = complete.requestUri();
final PushConnectionDetails pcd = PushConnectionDetails.from(uri);
log.info("Request to URI '{}' is a WebSSocket upgrade, removing the SSE handler", uri);
if (ctx.pipeline().get(MantisSSEHandler.class) != null) {
ctx.pipeline().remove(MantisSSEHandler.class);
}
final String[] tags = Util.getTaglist(uri, pcd.target);
Counter numDroppedBytesCounter = SpectatorUtils.newCounter(Constants.numDroppedBytesCounterName, pcd.target, tags);
Counter numDroppedMessagesCounter = SpectatorUtils.newCounter(Constants.numDroppedMessagesCounterName, pcd.target, tags);
Counter numMessagesCounter = SpectatorUtils.newCounter(Constants.numMessagesCounterName, pcd.target, tags);
Counter numBytesCounter = SpectatorUtils.newCounter(Constants.numBytesCounterName, pcd.target, tags);
Counter drainTriggeredCounter = SpectatorUtils.newCounter(Constants.drainTriggeredCounterName, pcd.target, tags);
Counter numIncomingMessagesCounter = SpectatorUtils.newCounter(Constants.numIncomingMessagesCounterName, pcd.target, tags);
BlockingQueue<String> queue = new LinkedBlockingQueue<>(queueCapacity.get());
drainFuture = scheduledExecutorService.scheduleAtFixedRate(() -> {
try {
if (queue.size() > 0 && ctx.channel().isWritable()) {
drainTriggeredCounter.increment();
final List<String> items = new ArrayList<>(queue.size());
synchronized (queue) {
queue.drainTo(items);
}
for (String data : items) {
ctx.write(new TextWebSocketFrame(data));
numMessagesCounter.increment();
numBytesCounter.increment(data.length());
}
ctx.flush();
}
} catch (Exception ex) {
log.error("Error writing to channel", ex);
}
}, writeIntervalMillis.get(), writeIntervalMillis.get(), TimeUnit.MILLISECONDS);
this.subscription = this.connectionBroker.connect(pcd)
.doOnNext(event -> {
numIncomingMessagesCounter.increment();
if (!Constants.DUMMY_TIMER_DATA.equals(event)) {
boolean offer = false;
synchronized (queue) {
offer = queue.offer(event);
}
if (!offer) {
numDroppedBytesCounter.increment(event.length());
numDroppedMessagesCounter.increment();
}
}
})
.subscribe();
} else {
ReferenceCountUtil.retain(evt);
super.userEventTriggered(ctx, evt);
}
}
@Override
public void channelUnregistered(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is unregistered. URI: {}", ctx.channel(), uri);
unsubscribeIfSubscribed();
super.channelUnregistered(ctx);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is inactive. URI: {}", ctx.channel(), uri);
unsubscribeIfSubscribed();
super.channelInactive(ctx);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
log.warn("Exception caught by channel {}. URI: {}", ctx.channel(), uri, cause);
unsubscribeIfSubscribed();
// This is the tail of handlers. We should close the channel between the server and the client,
// essentially causing the client to disconnect and terminate.
ctx.close();
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, TextWebSocketFrame msg) {
// No op.
}
/** Unsubscribe if it's subscribed. */
private void unsubscribeIfSubscribed() {
if (subscription != null && !subscription.isUnsubscribed()) {
log.info("WebSocket unsubscribing subscription with URI: {}", uri);
subscription.unsubscribe();
}
if (drainFuture != null) {
drainFuture.cancel(false);
}
if (scheduledExecutorService != null) {
scheduledExecutorService.shutdown();
}
}
}
| 2,956 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/push/ConnectionBroker.java
|
package io.mantisrx.api.push;
import io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Constants;
import io.mantisrx.api.Util;
import io.mantisrx.api.services.JobDiscoveryService;
import io.mantisrx.api.tunnel.MantisCrossRegionalClient;
import io.mantisrx.client.MantisClient;
import io.mantisrx.client.SinkConnectionFunc;
import io.mantisrx.client.SseSinkConnectionFunction;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.mantisrx.server.worker.client.MetricsClient;
import io.mantisrx.server.worker.client.SseWorkerConnectionFunction;
import io.mantisrx.server.worker.client.WorkerConnectionsStatus;
import io.mantisrx.server.worker.client.WorkerMetricsClient;
import io.vavr.control.Try;
import lombok.extern.slf4j.Slf4j;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
import rx.Observable;
import rx.Observer;
import rx.Scheduler;
import rx.functions.Action1;
import rx.schedulers.Schedulers;
import java.util.List;
import java.util.Map;
import java.util.WeakHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static io.mantisrx.api.Constants.TunnelPingMessage;
import static io.mantisrx.api.Util.getLocalRegion;
@Slf4j
@Singleton
public class ConnectionBroker {
private final MantisClient mantisClient;
private final MantisCrossRegionalClient mantisCrossRegionalClient;
private final WorkerMetricsClient workerMetricsClient;
private final JobDiscoveryService jobDiscoveryService;
private final Scheduler scheduler;
private final ObjectMapper objectMapper;
private final Map<PushConnectionDetails, Observable<String>> connectionCache = new WeakHashMap<>();
@Inject
public ConnectionBroker(MantisClient mantisClient,
MantisCrossRegionalClient mantisCrossRegionalClient,
WorkerMetricsClient workerMetricsClient,
@Named("io-scheduler") Scheduler scheduler,
ObjectMapper objectMapper) {
this.mantisClient = mantisClient;
this.mantisCrossRegionalClient = mantisCrossRegionalClient;
this.workerMetricsClient = workerMetricsClient;
this.jobDiscoveryService = JobDiscoveryService.getInstance(mantisClient, scheduler);
this.scheduler = scheduler;
this.objectMapper = objectMapper;
}
public Observable<String> connect(PushConnectionDetails details) {
if (!connectionCache.containsKey(details)) {
switch (details.type) {
case CONNECT_BY_NAME:
return getConnectByNameFor(details)
.subscribeOn(scheduler)
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.share();
case CONNECT_BY_ID:
return getConnectByIdFor(details)
.subscribeOn(scheduler)
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.share();
case METRICS:
return getWorkerMetrics(details)
.subscribeOn(scheduler)
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
});
case JOB_STATUS:
connectionCache.put(details,
mantisClient
.getJobStatusObservable(details.target)
.subscribeOn(scheduler)
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.replay(25)
.autoConnect());
break;
case JOB_SCHEDULING_INFO:
connectionCache.put(details,
mantisClient.getSchedulingChanges(details.target)
.subscribeOn(scheduler)
.map(changes -> Try.of(() -> objectMapper.writeValueAsString(changes)).getOrElse("Error"))
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.replay(1)
.autoConnect());
break;
case JOB_CLUSTER_DISCOVERY:
connectionCache.put(details,
jobDiscoveryService.jobDiscoveryInfoStream(jobDiscoveryService.key(JobDiscoveryService.LookupType.JOB_CLUSTER, details.target))
.subscribeOn(scheduler)
.map(jdi ->Try.of(() -> objectMapper.writeValueAsString(jdi)).getOrElse("Error"))
.doOnCompleted(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.doOnUnsubscribe(() -> {
log.info("Purging {} from cache.", details);
connectionCache.remove(details);
})
.replay(1)
.autoConnect());
break;
}
log.info("Caching connection for: {}", details);
}
return connectionCache.get(details);
}
//
// Helpers
//
private Observable<String> getConnectByNameFor(PushConnectionDetails details) {
return details.regions.isEmpty()
? getResults(false, this.mantisClient, details.target, details.getSinkparameters())
.flatMap(m -> m)
.map(MantisServerSentEvent::getEventAsString)
: getRemoteDataObservable(details.getUri(), details.target, details.getRegions().asJava());
}
private Observable<String> getConnectByIdFor(PushConnectionDetails details) {
return details.getRegions().isEmpty()
? getResults(true, this.mantisClient, details.target, details.getSinkparameters())
.flatMap(m -> m)
.map(MantisServerSentEvent::getEventAsString)
: getRemoteDataObservable(details.getUri(), details.target, details.getRegions().asJava());
}
private static SinkConnectionFunc<MantisServerSentEvent> getSseConnFunc(final String target, SinkParameters sinkParameters) {
return new SseSinkConnectionFunction(true,
t -> log.warn("Reconnecting to sink of job " + target + " after error: " + t.getMessage()),
sinkParameters);
}
private static Observable<Observable<MantisServerSentEvent>> getResults(boolean isJobId, MantisClient mantisClient,
final String target, SinkParameters sinkParameters) {
final AtomicBoolean hasError = new AtomicBoolean();
return isJobId ?
mantisClient.getSinkClientByJobId(target, getSseConnFunc(target, sinkParameters), null).getResults() :
mantisClient.getSinkClientByJobName(target, getSseConnFunc(target, sinkParameters), null)
.switchMap(serverSentEventSinkClient -> {
if (serverSentEventSinkClient.hasError()) {
hasError.set(true);
return Observable.error(new Exception(serverSentEventSinkClient.getError()));
}
return serverSentEventSinkClient.getResults();
})
.takeWhile(o -> !hasError.get());
}
//
// Tunnel
//
private Observable<String> getRemoteDataObservable(String uri, String target, List<String> regions) {
return Observable.from(regions)
.flatMap(region -> {
final String originReplacement = "\\{\"" + Constants.metaOriginName + "\": \"" + region + "\", ";
if (region.equalsIgnoreCase(getLocalRegion())) {
return this.connect(PushConnectionDetails.from(uri))
.map(datum -> datum.replaceFirst("^\\{", originReplacement));
} else {
log.info("Connecting to remote region {} at {}.", region, uri);
return mantisCrossRegionalClient.getSecureSseClient(region)
.submit(HttpClientRequest.createGet(uri))
.retryWhen(Util.getRetryFunc(log, uri + " in " + region))
.doOnError(throwable -> log.warn(
"Error getting response from remote SSE server for uri {} in region {}: {}",
uri, region, throwable.getMessage(), throwable)
).flatMap(remoteResponse -> {
if (!remoteResponse.getStatus().reasonPhrase().equals("OK")) {
log.warn("Unexpected response from remote sink for uri {} region {}: {}", uri, region, remoteResponse.getStatus().reasonPhrase());
String err = remoteResponse.getHeaders().get(Constants.metaErrorMsgHeader);
if (err == null || err.isEmpty())
err = remoteResponse.getStatus().reasonPhrase();
return Observable.<MantisServerSentEvent>error(new Exception(err))
.map(datum -> datum.getEventAsString());
}
return clientResponseToObservable(remoteResponse, target, region, uri)
.map(datum -> datum.replaceFirst("^\\{", originReplacement))
.doOnError(t -> log.error(t.getMessage()));
})
.subscribeOn(scheduler)
.observeOn(scheduler)
.doOnError(t -> log.warn("Error streaming in remote data ({}). Will retry: {}", region, t.getMessage(), t))
.doOnCompleted(() -> log.info(String.format("remote sink connection complete for uri %s, region=%s", uri, region)));
}
})
.observeOn(scheduler)
.subscribeOn(scheduler)
.doOnError(t -> log.error("Error in flatMapped cross-regional observable for {}", uri, t));
}
private Observable<String> clientResponseToObservable(HttpClientResponse<ServerSentEvent> response, String target, String
region, String uri) {
Counter numRemoteBytes = SpectatorUtils.newCounter(Constants.numRemoteBytesCounterName, target, "region", region);
Counter numRemoteMessages = SpectatorUtils.newCounter(Constants.numRemoteMessagesCounterName, target, "region", region);
Counter numSseErrors = SpectatorUtils.newCounter(Constants.numSseErrorsCounterName, target, "region", region);
return response.getContent()
.doOnError(t -> log.warn(t.getMessage()))
.timeout(3 * Constants.TunnelPingIntervalSecs, TimeUnit.SECONDS)
.doOnError(t -> log.warn("Timeout getting data from remote {} connection for {}", region, uri))
.filter(sse -> !(!sse.hasEventType() || !sse.getEventTypeAsString().startsWith("error:")) ||
!TunnelPingMessage.equals(sse.contentAsString()))
.map(t1 -> {
String data = "";
if (t1.hasEventType() && t1.getEventTypeAsString().startsWith("error:")) {
log.error("SSE has error, type=" + t1.getEventTypeAsString() + ", content=" + t1.contentAsString());
numSseErrors.increment();
throw new RuntimeException("Got error SSE event: " + t1.contentAsString());
}
try {
data = t1.contentAsString();
if (data != null) {
numRemoteBytes.increment(data.length());
numRemoteMessages.increment();
}
} catch (Exception e) {
log.error("Could not extract data from SSE " + e.getMessage(), e);
}
return data;
});
}
private Observable<String> getWorkerMetrics(PushConnectionDetails details) {
final String jobId = details.target;
SinkParameters metricNamesFilter = details.getSinkparameters();
final MetricsClient<MantisServerSentEvent> metricsClient = workerMetricsClient.getMetricsClientByJobId(jobId,
new SseWorkerConnectionFunction(true, new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
log.error("Metric connection error: " + throwable.getMessage());
try {
Thread.sleep(500);
} catch (InterruptedException ie) {
log.error("Interrupted waiting for retrying connection");
}
}
}, metricNamesFilter),
new Observer<WorkerConnectionsStatus>() {
@Override
public void onCompleted() {
log.info("got onCompleted in WorkerConnStatus obs");
}
@Override
public void onError(Throwable e) {
log.info("got onError in WorkerConnStatus obs");
}
@Override
public void onNext(WorkerConnectionsStatus workerConnectionsStatus) {
log.info("got WorkerConnStatus {}", workerConnectionsStatus);
}
});
return metricsClient
.getResults()
.flatMap(metrics -> metrics
.map(MantisServerSentEvent::getEventAsString));
}
}
| 2,957 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/push/PushConnectionDetails.java
|
package io.mantisrx.api.push;
import io.mantisrx.runtime.parameter.SinkParameter;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.netty.handler.codec.http.QueryStringDecoder;
import io.vavr.collection.List;
import io.vavr.control.Try;
import lombok.Value;
import java.util.stream.Collectors;
public @Value class PushConnectionDetails {
public enum TARGET_TYPE {
CONNECT_BY_NAME,
CONNECT_BY_ID,
JOB_STATUS,
JOB_SCHEDULING_INFO,
JOB_CLUSTER_DISCOVERY,
METRICS
}
private final String uri;
public final String target;
public final TARGET_TYPE type;
public final List<String> regions;
/**
* Determines the connection type for a given push connection.
*
* @param uri Request URI as returned by Netty's requestUri() methods. Expects leading slash.
* @return The CONNECTION_TYPE requested by the URI.
*/
public static TARGET_TYPE determineTargetType(final String uri) {
if (uri.startsWith("/jobconnectbyname") || uri.startsWith("/api/v1/jobconnectbyname")) {
return TARGET_TYPE.CONNECT_BY_NAME;
} else if (uri.startsWith("/jobconnectbyid") || uri.startsWith("/api/v1/jobconnectbyid")) {
return TARGET_TYPE.CONNECT_BY_ID;
} else if (uri.startsWith("/jobstatus/") || uri.startsWith("/api/v1/jobstatus/")) {
return TARGET_TYPE.JOB_STATUS;
} else if (uri.startsWith("/api/v1/jobs/schedulingInfo/")) {
return TARGET_TYPE.JOB_SCHEDULING_INFO;
} else if (uri.startsWith("/jobClusters/discoveryInfoStream/")) {
return TARGET_TYPE.JOB_CLUSTER_DISCOVERY;
} else if (uri.startsWith("/api/v1/metrics/")) {
return TARGET_TYPE.METRICS;
} else {
throw new IllegalArgumentException("Unable to determine push connection type from URI: " + uri);
}
}
/**
* Determines the target for a push connection request. Typically a job name or id.
*
* @param uri Request URI as returned by Netty's requestUri() methods. Expects leading slash.
* @return The target requested by the URI.
*/
public static String determineTarget(final String uri) {
String sanitized = uri.replaceFirst("^/(api/v1/)?(jobconnectbyid|jobconnectbyname|jobstatus|jobs/schedulingInfo|jobClusters/discoveryInfoStream|metrics)/", "");
QueryStringDecoder queryStringDecoder = new QueryStringDecoder(sanitized);
return queryStringDecoder.path();
}
//
// Computed Properties
//
public SinkParameters getSinkparameters() {
SinkParameters.Builder builder = new SinkParameters.Builder();
QueryStringDecoder queryStringDecoder = new QueryStringDecoder(uri);
builder.parameters(queryStringDecoder
.parameters()
.entrySet()
.stream()
.flatMap(entry -> entry.getValue()
.stream()
.map(val -> Try.of(() -> new SinkParameter(entry.getKey(), val)))
.filter(Try::isSuccess)
.map(Try::get))
.collect(Collectors.toList())
.toArray(new SinkParameter[]{}));
return builder.build();
}
//
// Static Factories
//
public static PushConnectionDetails from(String uri) {
return from(uri, List.empty());
}
public static PushConnectionDetails from(String uri, List<String> regions) {
return new PushConnectionDetails(uri, determineTarget(uri), determineTargetType(uri), regions);
}
public static PushConnectionDetails from(String uri, java.util.List<String> regions) {
return new PushConnectionDetails(uri, determineTarget(uri), determineTargetType(uri), List.ofAll(regions));
}
}
| 2,958 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/push/MantisSSEHandler.java
|
package io.mantisrx.api.push;
import com.netflix.config.DynamicIntProperty;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Constants;
import io.mantisrx.api.Util;
import io.mantisrx.server.core.master.MasterDescription;
import io.mantisrx.server.master.client.HighAvailabilityServices;
import io.mantisrx.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.*;
import lombok.extern.slf4j.Slf4j;
import mantis.io.reactivex.netty.RxNetty;
import mantis.io.reactivex.netty.channel.StringTransformer;
import mantis.io.reactivex.netty.pipeline.PipelineConfigurator;
import mantis.io.reactivex.netty.pipeline.PipelineConfigurators;
import mantis.io.reactivex.netty.protocol.http.client.HttpClient;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientRequest;
import mantis.io.reactivex.netty.protocol.http.client.HttpClientResponse;
import mantis.io.reactivex.netty.protocol.http.client.HttpResponseHeaders;
import rx.Observable;
import rx.Subscription;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* Http handler for the WebSocket/SSE paths.
*/
@Slf4j
public class MantisSSEHandler extends SimpleChannelInboundHandler<FullHttpRequest> {
private final DynamicIntProperty queueCapacity = new DynamicIntProperty("io.mantisrx.api.push.queueCapacity", 1000);
private final DynamicIntProperty writeIntervalMillis = new DynamicIntProperty("io.mantisrx.api.push.writeIntervalMillis", 50);
private final ConnectionBroker connectionBroker;
private final HighAvailabilityServices highAvailabilityServices;
private final List<String> pushPrefixes;
private Subscription subscription;
private ScheduledExecutorService scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
new ThreadFactoryBuilder().setNameFormat("sse-handler-drainer-%d").build());
private ScheduledFuture drainFuture;
private String uri;
public MantisSSEHandler(ConnectionBroker connectionBroker, HighAvailabilityServices highAvailabilityServices,
List<String> pushPrefixes) {
super(true);
this.connectionBroker = connectionBroker;
this.highAvailabilityServices = highAvailabilityServices;
this.pushPrefixes = pushPrefixes;
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, FullHttpRequest request) throws Exception {
if (Util.startsWithAnyOf(request.uri(), pushPrefixes)
&& !isWebsocketUpgrade(request)) {
if (HttpUtil.is100ContinueExpected(request)) {
send100Contine(ctx);
}
HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.OK);
HttpHeaders headers = response.headers();
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS, "Origin, X-Requested-With, Accept, Content-Type, Cache-Control");
headers.set(HttpHeaderNames.CONTENT_TYPE, "text/event-stream");
headers.set(HttpHeaderNames.CACHE_CONTROL, "no-cache, no-store, max-age=0, must-revalidate");
headers.set(HttpHeaderNames.PRAGMA, HttpHeaderValues.NO_CACHE);
headers.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED);
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
ctx.writeAndFlush(response);
uri = request.uri();
final PushConnectionDetails pcd =
isSubmitAndConnect(request)
? new PushConnectionDetails(uri, jobSubmit(request), PushConnectionDetails.TARGET_TYPE.CONNECT_BY_ID, io.vavr.collection.List.empty())
: PushConnectionDetails.from(uri);
log.info("SSE Connecting for: {}", pcd);
boolean tunnelPingsEnabled = isTunnelPingsEnabled(uri);
final String[] tags = Util.getTaglist(uri, pcd.target);
Counter numDroppedBytesCounter = SpectatorUtils.newCounter(Constants.numDroppedBytesCounterName, pcd.target, tags);
Counter numDroppedMessagesCounter = SpectatorUtils.newCounter(Constants.numDroppedMessagesCounterName, pcd.target, tags);
Counter numMessagesCounter = SpectatorUtils.newCounter(Constants.numMessagesCounterName, pcd.target, tags);
Counter numBytesCounter = SpectatorUtils.newCounter(Constants.numBytesCounterName, pcd.target, tags);
Counter drainTriggeredCounter = SpectatorUtils.newCounter(Constants.drainTriggeredCounterName, pcd.target, tags);
Counter numIncomingMessagesCounter = SpectatorUtils.newCounter(Constants.numIncomingMessagesCounterName, pcd.target, tags);
BlockingQueue<String> queue = new LinkedBlockingQueue<>(queueCapacity.get());
drainFuture = scheduledExecutorService.scheduleAtFixedRate(() -> {
try {
if (queue.size() > 0 && ctx.channel().isWritable()) {
drainTriggeredCounter.increment();
final List<String> items = new ArrayList<>(queue.size());
synchronized (queue) {
queue.drainTo(items);
}
for (String data : items) {
ctx.write(Unpooled.copiedBuffer(data, StandardCharsets.UTF_8));
numMessagesCounter.increment();
numBytesCounter.increment(data.length());
}
ctx.flush();
}
} catch (Exception ex) {
log.error("Error writing to channel", ex);
}
}, writeIntervalMillis.get(), writeIntervalMillis.get(), TimeUnit.MILLISECONDS);
this.subscription = this.connectionBroker.connect(pcd)
.doOnNext(event -> numIncomingMessagesCounter.increment())
.mergeWith(tunnelPingsEnabled
? Observable.interval(Constants.TunnelPingIntervalSecs, Constants.TunnelPingIntervalSecs,
TimeUnit.SECONDS)
.map(l -> Constants.TunnelPingMessage)
: Observable.empty())
.doOnNext(event -> {
if (!Constants.DUMMY_TIMER_DATA.equals(event)) {
String data = Constants.SSE_DATA_PREFIX + event + Constants.SSE_DATA_SUFFIX;
boolean offer = false;
synchronized (queue) {
offer = queue.offer(data);
}
if (!offer) {
numDroppedBytesCounter.increment(data.length());
numDroppedMessagesCounter.increment();
}
}
})
.subscribe();
} else {
ctx.fireChannelRead(request.retain());
}
}
private static void send100Contine(ChannelHandlerContext ctx) {
FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.CONTINUE);
ctx.writeAndFlush(response);
}
private boolean isTunnelPingsEnabled(String uri) {
QueryStringDecoder queryStringDecoder = new QueryStringDecoder(uri);
return queryStringDecoder.parameters()
.getOrDefault(Constants.TunnelPingParamName, Arrays.asList("false"))
.get(0)
.equalsIgnoreCase("true");
}
private boolean isWebsocketUpgrade(HttpRequest request) {
HttpHeaders headers = request.headers();
// Header "Connection" contains "upgrade" (case insensitive) and
// Header "Upgrade" equals "websocket" (case insensitive)
String connection = headers.get(HttpHeaderNames.CONNECTION);
String upgrade = headers.get(HttpHeaderNames.UPGRADE);
return connection != null && connection.toLowerCase().contains("upgrade") &&
upgrade != null && upgrade.toLowerCase().equals("websocket");
}
private boolean isSubmitAndConnect(HttpRequest request) {
return request.method().equals(HttpMethod.POST) && request.uri().contains("jobsubmitandconnect");
}
@Override
public void channelUnregistered(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is unregistered. URI: {}", ctx.channel(), uri);
unsubscribeIfSubscribed();
super.channelUnregistered(ctx);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
log.info("Channel {} is inactive. URI: {}", ctx.channel(), uri);
unsubscribeIfSubscribed();
super.channelInactive(ctx);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
log.warn("Exception caught by channel {}. URI: {}", ctx.channel(), uri, cause);
unsubscribeIfSubscribed();
ctx.close();
}
/** Unsubscribe if it's subscribed. */
private void unsubscribeIfSubscribed() {
if (subscription != null && !subscription.isUnsubscribed()) {
log.info("SSE unsubscribing subscription with URI: {}", uri);
subscription.unsubscribe();
}
if (drainFuture != null) {
drainFuture.cancel(false);
}
if (scheduledExecutorService != null) {
scheduledExecutorService.shutdown();
}
}
public String jobSubmit(FullHttpRequest request) {
final String API_JOB_SUBMIT_PATH = "/api/submit";
String content = request.content().toString(StandardCharsets.UTF_8);
return callPostOnMaster(highAvailabilityServices.getMasterMonitor().getMasterObservable(), API_JOB_SUBMIT_PATH, content)
.retryWhen(Util.getRetryFunc(log, API_JOB_SUBMIT_PATH))
.flatMap(masterResponse -> masterResponse.getByteBuf()
.take(1)
.map(byteBuf -> {
final String s = byteBuf.toString(StandardCharsets.UTF_8);
log.info("response: " + s);
return s;
}))
.take(1)
.toBlocking()
.first();
}
public static class MasterResponse {
private final HttpResponseStatus status;
private final Observable<ByteBuf> byteBuf;
private final HttpResponseHeaders responseHeaders;
public MasterResponse(HttpResponseStatus status, Observable<ByteBuf> byteBuf, HttpResponseHeaders responseHeaders) {
this.status = status;
this.byteBuf = byteBuf;
this.responseHeaders = responseHeaders;
}
public HttpResponseStatus getStatus() {
return status;
}
public Observable<ByteBuf> getByteBuf() {
return byteBuf;
}
public HttpResponseHeaders getResponseHeaders() { return responseHeaders; }
}
public static Observable<MasterResponse> callPostOnMaster(Observable<MasterDescription> masterObservable, String uri, String content) {
PipelineConfigurator<HttpClientResponse<ByteBuf>, HttpClientRequest<String>> pipelineConfigurator
= PipelineConfigurators.httpClientConfigurator();
return masterObservable
.filter(Objects::nonNull)
.flatMap(masterDesc -> {
HttpClient<String, ByteBuf> client =
RxNetty.<String, ByteBuf>newHttpClientBuilder(masterDesc.getHostname(), masterDesc.getApiPort())
.pipelineConfigurator(pipelineConfigurator)
.build();
HttpClientRequest<String> request = HttpClientRequest.create(HttpMethod.POST, uri);
request = request.withHeader(HttpHeaderNames.CONTENT_TYPE.toString(), HttpHeaderValues.APPLICATION_JSON.toString());
request.withRawContent(content, StringTransformer.DEFAULT_INSTANCE);
return client.submit(request)
.map(response -> new MasterResponse(response.getStatus(), response.getContent(), response.getHeaders()));
})
.take(1);
}
}
| 2,959 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/AppStreamStore.java
|
package io.mantisrx.api.services;
import com.google.common.collect.ImmutableList;
import io.mantisrx.discovery.proto.AppJobClustersMap;
import java.io.IOException;
import java.util.Collection;
/**
* Interface to get streams associated with a given app or set of apps
*/
public interface AppStreamStore {
default AppJobClustersMap getJobClusterMappings(String app) throws IOException {
return getJobClusterMappings(ImmutableList.of(app));
}
AppJobClustersMap getJobClusterMappings(Collection<String> apps) throws IOException;
}
| 2,960 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/AppStreamDiscoveryService.java
|
/**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.services;
import com.google.common.base.Preconditions;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.proto.AppDiscoveryMap;
import io.mantisrx.client.MantisClient;
import io.mantisrx.discovery.proto.AppJobClustersMap;
import io.mantisrx.server.core.JobSchedulingInfo;
import io.vavr.control.Either;
import io.vavr.control.Option;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
import rx.Scheduler;
@Slf4j
public class AppStreamDiscoveryService {
private final MantisClient mantisClient;
private final Scheduler scheduler;
private final AppStreamStore appStreamStore;
public AppStreamDiscoveryService(
MantisClient mantisClient,
Scheduler scheduler,
AppStreamStore appStreamStore) {
Preconditions.checkArgument(mantisClient != null);
Preconditions.checkArgument(appStreamStore != null);
Preconditions.checkArgument(scheduler != null);
this.mantisClient = mantisClient;
this.scheduler = scheduler;
this.appStreamStore = appStreamStore;
Counter appJobClusterMappingNullCount = SpectatorUtils.newCounter(
"appJobClusterMappingNull", "mantisapi");
Counter appJobClusterMappingRequestCount = SpectatorUtils.newCounter(
"appJobClusterMappingRequest", "mantisapi", "app", "unknown");
Counter appJobClusterMappingFailCount = SpectatorUtils.newCounter(
"appJobClusterMappingFail", "mantisapi");
}
public Either<String, AppDiscoveryMap> getAppDiscoveryMap(List<String> appNames) {
try {
AppJobClustersMap appJobClusters = getAppJobClustersMap(appNames);
//
// Lookup discovery info per stream and build mapping
//
AppDiscoveryMap adm = new AppDiscoveryMap(appJobClusters.getVersion(), appJobClusters.getTimestamp());
for (String app : appJobClusters.getMappings().keySet()) {
for (String stream : appJobClusters.getMappings().get(app).keySet()) {
String jobCluster = appJobClusters.getMappings().get(app).get(stream);
Option<JobSchedulingInfo> jobSchedulingInfo = getJobDiscoveryInfo(jobCluster);
jobSchedulingInfo.map(jsi -> {
adm.addMapping(app, stream, jsi);
return jsi;
});
}
}
return Either.right(adm);
} catch (Exception ex) {
log.error(ex.getMessage());
return Either.left(ex.getMessage());
}
}
public AppJobClustersMap getAppJobClustersMap(List<String> appNames) throws IOException {
return appStreamStore.getJobClusterMappings(appNames);
}
private Option<JobSchedulingInfo> getJobDiscoveryInfo(String jobCluster) {
JobDiscoveryService jdim = JobDiscoveryService.getInstance(mantisClient, scheduler);
return jdim
.jobDiscoveryInfoStream(jdim.key(JobDiscoveryService.LookupType.JOB_CLUSTER, jobCluster))
.map(Option::of)
.take(1)
.timeout(2, TimeUnit.SECONDS, Observable.just(Option.none()))
.doOnError((t) -> {
log.warn("Timed out looking up job discovery info for cluster: " + jobCluster + ".");
})
.subscribeOn(scheduler)
.observeOn(scheduler)
.toSingle()
.toBlocking()
.value();
}
}
| 2,961 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/JobDiscoveryService.java
|
/**
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.services;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.impl.AtomicDouble;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.api.Util;
import io.mantisrx.client.MantisClient;
import io.mantisrx.server.core.JobSchedulingInfo;
import lombok.extern.slf4j.Slf4j;
import rx.Observable;
import rx.Scheduler;
import rx.Subscription;
import rx.functions.Action1;
import rx.subjects.BehaviorSubject;
import rx.subjects.Subject;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
@Slf4j
public class JobDiscoveryService {
public enum LookupType {
JOB_CLUSTER,
JOB_ID
}
public class JobDiscoveryLookupKey {
private final LookupType lookupType;
private final String id;
public JobDiscoveryLookupKey(final LookupType lookupType, final String id) {
this.lookupType = lookupType;
this.id = id;
}
public LookupType getLookupType() {
return lookupType;
}
public String getId() {
return id;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final JobDiscoveryLookupKey that = (JobDiscoveryLookupKey) o;
return lookupType == that.lookupType &&
Objects.equals(id, that.id);
}
@Override
public int hashCode() {
return Objects.hash(lookupType, id);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("JobDiscoveryLookupKey{");
sb.append("lookupType=").append(lookupType);
sb.append(", id='").append(id).append('\'');
sb.append('}');
return sb.toString();
}
}
/**
* The Purpose of this class is to dedup multiple schedulingChanges streams for the same JobId.
* The first subscriber will cause a BehaviorSubject to be setup with data obtained from mantisClient.getSchedulingChanges
* Future subscribers will simply connect to the same Subject
* When the no. of subscribers falls to zero the Observable is unsubscribed and a cleanup callback is invoked.
*/
public class JobSchedulingInfoSubjectHolder implements AutoCloseable {
private Subscription subscription;
private final AtomicInteger subscriberCount = new AtomicInteger();
private final String jobId;
private final MantisClient mantisClient;
private AtomicBoolean inited = new AtomicBoolean(false);
private CountDownLatch initComplete = new CountDownLatch(1);
private final Action1 doOnZeroConnections;
private final Subject<JobSchedulingInfo, JobSchedulingInfo> schedulingInfoBehaviorSubjectingSubject = BehaviorSubject.create();
private final Registry registry;
private final Scheduler scheduler;
private final Counter cleanupCounter;
private final AtomicLong subscriberCountGauge;
public JobSchedulingInfoSubjectHolder(MantisClient mantisClient, String jobId, Action1 onZeroConnections, Registry registry, Scheduler scheduler) {
this(mantisClient, jobId, onZeroConnections, 5, registry, scheduler);
}
/**
* Ctor only no subscriptions happen as part of the ctor
*
* @param mantisClient - Used to get the schedulingInfo Observable
* @param jobId - JobId of job to get schedulingInfo
* @param onZeroConnections - Call back when there are no more subscriptions for this observable
* @param retryCount - No. of retires in case of error connecting to schedulingInfo
*/
JobSchedulingInfoSubjectHolder(MantisClient mantisClient,
String jobId,
Action1 onZeroConnections,
int retryCount,
Registry registry,
Scheduler scheduler) {
Preconditions.checkNotNull(mantisClient, "Mantis Client cannot be null");
Preconditions.checkNotNull(jobId, "JobId cannot be null");
Preconditions.checkArgument(!jobId.isEmpty(), "JobId cannot be empty");
Preconditions.checkNotNull(onZeroConnections, "on Zero Connections callback cannot be null");
Preconditions.checkArgument(retryCount >= 0, "Retry count cannot be less than 0");
this.jobId = jobId;
this.mantisClient = mantisClient;
this.doOnZeroConnections = onZeroConnections;
this.registry = registry;
this.scheduler = scheduler;
cleanupCounter = SpectatorUtils.newCounter("mantisapi.schedulingChanges.cleanupCount", "", "jobId", jobId);
subscriberCountGauge = SpectatorUtils.newGauge("mantisapi.schedulingChanges.subscriberCount", "",
new AtomicLong(0l), "jobId", jobId);
}
/**
* If invoked the first time it will subscribe to the schedulingInfo Observable via mantisClient and onNext
* the results to the schedulinginfoSubject
* If 2 or more threads concurrently invoke this only 1 will do the initialization while others wait.
*/
private void init() {
if (!inited.getAndSet(true)) {
subscription = mantisClient.getSchedulingChanges(jobId)
.retryWhen(Util.getRetryFunc(log, "job scheduling information for " + jobId))
.doOnError((t) -> {
schedulingInfoBehaviorSubjectingSubject.toSerialized().onError(t);
doOnZeroConnections.call(jobId);
})
.doOnCompleted(() -> {
schedulingInfoBehaviorSubjectingSubject.toSerialized().onCompleted();
doOnZeroConnections.call(jobId);
})
.subscribeOn(scheduler)
.subscribe((schedInfo) -> schedulingInfoBehaviorSubjectingSubject.onNext(schedInfo));
initComplete.countDown();
} else {
try {
initComplete.await();
} catch (InterruptedException e) {
log.error(e.getMessage());
}
}
}
/**
* For testing
*
* @return current subscription count
*/
int getSubscriptionCount() {
return subscriberCount.get();
}
/**
* If a subject holding schedulingInfo for the job exists return it as an Observable
* if not then invoke mantisClient to get an Observable of scheduling changes, write them to a Subject
* and return it as an observable
* Also keep track of subscription Count, When the subscription count falls to 0 unsubscribe from schedulingInfo Observable
*
* @return Observable of scheduling changes
*/
public Observable<JobSchedulingInfo> getSchedulingChanges() {
init();
return schedulingInfoBehaviorSubjectingSubject
.doOnSubscribe(() -> {
if (log.isDebugEnabled()) { log.debug("Subscribed"); }
subscriberCount.incrementAndGet();
subscriberCountGauge.set(subscriberCount.get());
if (log.isDebugEnabled()) { log.debug("Subscriber count " + subscriberCount.get()); }
})
.doOnUnsubscribe(() -> {
if (log.isDebugEnabled()) {log.debug("UnSubscribed"); }
int subscriberCnt = subscriberCount.decrementAndGet();
subscriberCountGauge.set(subscriberCount.get());
if (log.isDebugEnabled()) { log.debug("Subscriber count " + subscriberCnt); }
if (0 == subscriberCount.get()) {
if (log.isDebugEnabled()) { log.debug("Shutting down"); }
close();
}
})
.doOnError((t) -> close())
;
}
/**
* Invoked If schedulingInfo Observable Completes or throws an onError of if the subscription count falls to 0
* Unsubscribes from the schedulingInfoObservable and invokes doOnZeroConnection callback
*/
@Override
public void close() {
if (log.isDebugEnabled()) { log.debug("In Close Unsubscribing...." + subscription.isUnsubscribed()); }
if (inited.get() && subscription != null && !subscription.isUnsubscribed()) {
if (log.isDebugEnabled()) { log.debug("Unsubscribing...."); }
subscription.unsubscribe();
inited.set(false);
initComplete = new CountDownLatch(1);
}
cleanupCounter.increment();
this.doOnZeroConnections.call(this.jobId);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JobSchedulingInfoSubjectHolder that = (JobSchedulingInfoSubjectHolder) o;
return Objects.equals(jobId, that.jobId);
}
@Override
public int hashCode() {
return Objects.hash(jobId);
}
}
/**
* The Purpose of this class is to dedup multiple job discovery info streams for the same JobId.
* The first subscriber will cause a BehaviorSubject to be setup with data obtained from mantisClient.jobDiscoveryInfoStream
* Future subscribers will connect to the same Subject
* When the no. of subscribers falls to zero the Observable is un-subscribed and a cleanup callback is invoked.
*/
public class JobDiscoveryInfoSubjectHolder implements AutoCloseable {
private Subscription subscription;
private final AtomicInteger subscriberCount = new AtomicInteger();
private final JobDiscoveryLookupKey lookupKey;
private final MantisClient mantisClient;
private AtomicBoolean inited = new AtomicBoolean(false);
private CountDownLatch initComplete = new CountDownLatch(1);
private final Action1 doOnZeroConnections;
private final Subject<JobSchedulingInfo, JobSchedulingInfo> discoveryInfoBehaviorSubject = BehaviorSubject.create();
private final Scheduler scheduler;
private final Counter cleanupCounter;
private final AtomicLong subscriberCountGauge;
public JobDiscoveryInfoSubjectHolder(MantisClient mantisClient, JobDiscoveryLookupKey lookupKey, Action1 onZeroConnections, Scheduler scheduler) {
this(mantisClient, lookupKey, onZeroConnections, 5, scheduler);
}
/**
* Ctor only no subscriptions happen as part of the ctor
*
* @param mantisClient - Used to get the schedulingInfo Observable
* @param lookupKey - JobId or JobCluster to get schedulingInfo
* @param onZeroConnections - Call back when there are no more subscriptions for this observable
* @param retryCount - No. of retires in case of error connecting to schedulingInfo
*/
JobDiscoveryInfoSubjectHolder(MantisClient mantisClient,
JobDiscoveryLookupKey lookupKey,
Action1 onZeroConnections,
int retryCount,
Scheduler scheduler) {
Preconditions.checkNotNull(mantisClient, "Mantis Client cannot be null");
Preconditions.checkNotNull(lookupKey, "lookup key cannot be null");
Preconditions.checkArgument(lookupKey.getId() != null && !lookupKey.getId().isEmpty(), "lookup key cannot be empty or null");
Preconditions.checkNotNull(onZeroConnections, "on Zero Connections callback cannot be null");
Preconditions.checkArgument(retryCount >= 0, "Retry count cannot be less than 0");
this.lookupKey = lookupKey;
this.mantisClient = mantisClient;
this.doOnZeroConnections = onZeroConnections;
this.scheduler = scheduler;
cleanupCounter = SpectatorUtils.newCounter("mantisapi.discoveryinfo.cleanupCount", "", "lookupKey", lookupKey.getId());
subscriberCountGauge = SpectatorUtils.newGauge("mantisapi.discoveryinfo.subscriberCount", "",
new AtomicLong(0l),
"lookupKey", lookupKey.getId());
}
/**
* If invoked the first time it will subscribe to the schedulingInfo Observable via mantisClient and onNext
* the results to the schedulinginfoSubject
* If 2 or more threads concurrently invoke this only 1 will do the initialization while others wait.
*/
private void init() {
if (!inited.getAndSet(true)) {
Observable<JobSchedulingInfo> jobSchedulingInfoObs;
switch (lookupKey.getLookupType()) {
case JOB_ID:
jobSchedulingInfoObs = mantisClient.getSchedulingChanges(lookupKey.getId());
break;
case JOB_CLUSTER:
jobSchedulingInfoObs = mantisClient.jobClusterDiscoveryInfoStream(lookupKey.getId());
break;
default:
throw new IllegalArgumentException("lookup key type is not supported " + lookupKey.getLookupType());
}
subscription = jobSchedulingInfoObs
.retryWhen(Util.getRetryFunc(log, "job scheduling info for (" + lookupKey.getLookupType() + ") " + lookupKey.id))
.doOnError((t) -> {
log.info("cleanup jobDiscoveryInfo onError for {}", lookupKey);
discoveryInfoBehaviorSubject.toSerialized().onError(t);
doOnZeroConnections.call(lookupKey);
})
.doOnCompleted(() -> {
log.info("cleanup jobDiscoveryInfo onCompleted for {}", lookupKey);
discoveryInfoBehaviorSubject.toSerialized().onCompleted();
doOnZeroConnections.call(lookupKey);
})
.subscribeOn(scheduler)
.subscribe((schedInfo) -> discoveryInfoBehaviorSubject.onNext(schedInfo));
initComplete.countDown();
} else {
try {
initComplete.await();
} catch (InterruptedException e) {
log.error(e.getMessage());
}
}
}
/**
* For testing
*
* @return current subscription count
*/
int getSubscriptionCount() {
return subscriberCount.get();
}
/**
* If a subject holding schedulingInfo for the job exists return it as an Observable
* if not then invoke mantisClient to get an Observable of scheduling changes, write them to a Subject
* and return it as an observable
* Also keep track of subscription Count, When the subscription count falls to 0 unsubscribe from schedulingInfo Observable
*
* @return Observable of scheduling changes
*/
public Observable<JobSchedulingInfo> jobDiscoveryInfoStream() {
init();
return discoveryInfoBehaviorSubject
.doOnSubscribe(() -> {
if (log.isDebugEnabled()) { log.debug("Subscribed"); }
subscriberCount.incrementAndGet();
subscriberCountGauge.set(subscriberCount.get());
if (log.isDebugEnabled()) { log.debug("Subscriber count " + subscriberCount.get()); }
})
.doOnUnsubscribe(() -> {
if (log.isDebugEnabled()) {log.debug("UnSubscribed"); }
int subscriberCnt = subscriberCount.decrementAndGet();
subscriberCountGauge.set(subscriberCount.get());
if (log.isDebugEnabled()) { log.debug("Subscriber count " + subscriberCnt); }
if (0 == subscriberCount.get()) {
if (log.isDebugEnabled()) { log.debug("Shutting down"); }
close();
}
})
.doOnError((t) -> close())
;
}
/**
* Invoked If schedulingInfo Observable Completes or throws an onError of if the subscription count falls to 0
* Unsubscribes from the schedulingInfoObservable and invokes doOnZeroConnection callback
*/
@Override
public void close() {
if (log.isDebugEnabled()) { log.debug("In Close un-subscribing...." + subscription.isUnsubscribed()); }
if (inited.get() && subscription != null && !subscription.isUnsubscribed()) {
if (log.isDebugEnabled()) { log.debug("Unsubscribing...."); }
subscription.unsubscribe();
inited.set(false);
initComplete = new CountDownLatch(1);
}
cleanupCounter.increment();
log.info("jobDiscoveryInfo close for {}", lookupKey);
this.doOnZeroConnections.call(this.lookupKey);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JobDiscoveryInfoSubjectHolder that = (JobDiscoveryInfoSubjectHolder) o;
return Objects.equals(lookupKey, that.lookupKey);
}
@Override
public int hashCode() {
return Objects.hash(lookupKey);
}
}
private final MantisClient mantisClient;
private final Scheduler scheduler;
private final AtomicDouble subjectMapSizeGauge;
private int retryCount = 5;
private static JobDiscoveryService INSTANCE = null;
public static synchronized JobDiscoveryService getInstance(MantisClient mantisClient, Scheduler scheduler) {
if (INSTANCE == null) {
INSTANCE = new JobDiscoveryService(mantisClient, scheduler);
}
return INSTANCE;
}
private JobDiscoveryService(final MantisClient mClient, Scheduler scheduler) {
Preconditions.checkNotNull(mClient, "mantisClient cannot be null");
this.mantisClient = mClient;
this.subjectMapSizeGauge = SpectatorUtils.newGauge("mantisapi.discoveryInfo.subjectMapSize", "mantisapi.discoveryInfo.subjectMapSize", new AtomicDouble(0.0));
this.scheduler = scheduler;
}
/**
* For testing purposes
*
* @param cnt No of retries
*/
@VisibleForTesting
void setRetryCount(int cnt) {
this.retryCount = cnt;
}
private final ConcurrentMap<JobDiscoveryLookupKey, JobDiscoveryInfoSubjectHolder> subjectMap = new ConcurrentHashMap<>();
/**
* Invoked by the subjectHolders when the subscription count goes to 0 (or if there is an error)
*/
private final Action1<JobDiscoveryLookupKey> removeSubjectAction = key -> {
if (log.isDebugEnabled()) { log.info("Removing subject for key {}", key.toString()); }
removeSchedulingInfoSubject(key);
};
/**
* Atomically inserts a JobDiscoveryInfoSubjectHolder if absent and returns an Observable of JobSchedulingInfo to the caller
*
* @param lookupKey - Job cluster name or JobID
*
* @return
*/
public Observable<JobSchedulingInfo> jobDiscoveryInfoStream(JobDiscoveryLookupKey lookupKey) {
Preconditions.checkNotNull(lookupKey, "lookup key cannot be null for fetching job discovery info");
Preconditions.checkArgument(lookupKey.getId() != null && !lookupKey.getId().isEmpty(), "Lookup ID cannot be null or empty" + lookupKey);
subjectMapSizeGauge.set(subjectMap.size());
return subjectMap.computeIfAbsent(lookupKey, (jc) -> new JobDiscoveryInfoSubjectHolder(mantisClient, jc, removeSubjectAction, this.retryCount, scheduler)).jobDiscoveryInfoStream();
}
/**
* Intended to be called via a callback when subscriber count falls to 0
*
* @param lookupKey JobId whose entry needs to be removed
*/
private void removeSchedulingInfoSubject(JobDiscoveryLookupKey lookupKey) {
subjectMap.remove(lookupKey);
subjectMapSizeGauge.set(subjectMap.size());
}
/**
* For testing purposes
*
* @return No. of entries in the subject
*/
int getSubjectMapSize() {
return subjectMap.size();
}
/**
* For testing purposes
*/
void clearMap() {
subjectMap.clear();
}
public JobDiscoveryLookupKey key(LookupType lookupType, String jobCluster) {
return new JobDiscoveryLookupKey(lookupType, jobCluster);
}
public static final Cache<String, String> jobDiscoveryInfoCache = CacheBuilder.newBuilder()
.expireAfterWrite(250, TimeUnit.MILLISECONDS)
.maximumSize(500)
.build();
}
| 2,962 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/ConfigurationBasedAppStreamStore.java
|
package io.mantisrx.api.services;
import com.netflix.spectator.api.Counter;
import com.netflix.zuul.netty.SpectatorUtils;
import io.mantisrx.common.JsonSerializer;
import io.mantisrx.discovery.proto.AppJobClustersMap;
import io.mantisrx.shaded.org.apache.curator.framework.listen.Listenable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
@SuppressWarnings("unused")
@Slf4j
public class ConfigurationBasedAppStreamStore implements AppStreamStore {
private final JsonSerializer jsonSerializer;
private final AtomicReference<AppJobClustersMap> appJobClusterMappings = new AtomicReference<>();
private final Counter appJobClusterMappingNullCount;
private final Counter appJobClusterMappingFailCount;
private final Counter appJobClusterMappingRequestCount;
public ConfigurationBasedAppStreamStore(ConfigSource configSource) {
configSource.getListenable()
.addListener((newConfig) -> updateAppJobClustersMapping(newConfig));
this.jsonSerializer = new JsonSerializer();
updateAppJobClustersMapping(configSource.get());
this.appJobClusterMappingNullCount = SpectatorUtils.newCounter(
"appJobClusterMappingNull", "mantisapi");
this.appJobClusterMappingRequestCount = SpectatorUtils.newCounter(
"appJobClusterMappingRequest", "mantisapi", "app", "unknown");
this.appJobClusterMappingFailCount = SpectatorUtils.newCounter(
"appJobClusterMappingFail", "mantisapi");
}
@Override
public AppJobClustersMap getJobClusterMappings(Collection<String> apps) throws IOException {
return getAppJobClustersMap(apps, this.appJobClusterMappings.get());
}
private AppJobClustersMap getAppJobClustersMap(Collection<String> appNames,
@Nullable AppJobClustersMap appJobClustersMap) throws IOException {
if (appJobClustersMap != null) {
final AppJobClustersMap appJobClusters;
if (appNames.size() > 0) {
appJobClusters = appJobClustersMap.getFilteredAppJobClustersMap(new ArrayList<>(appNames));
} else {
appJobClusterMappingRequestCount.increment();
appJobClusters = appJobClustersMap;
}
return appJobClusters;
} else {
appJobClusterMappingNullCount.increment();
throw new IOException("AppJobClustersMap is null");
}
}
private void updateAppJobClustersMapping(String appJobClusterStr) {
try {
AppJobClustersMap appJobClustersMap =
jsonSerializer.fromJSON(appJobClusterStr, AppJobClustersMap.class);
log.info("appJobClustersMap updated to {}", appJobClustersMap);
appJobClusterMappings.set(appJobClustersMap);
} catch (Exception ioe) {
log.error("failed to update appJobClustersMap on Property update {}", appJobClusterStr, ioe);
appJobClusterMappingFailCount.increment();
}
}
public interface ConfigSource extends Supplier<String> {
Listenable<ConfigurationChangeListener> getListenable();
}
public interface ConfigurationChangeListener {
void onConfigChange(String config);
}
}
| 2,963 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api/services
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/artifacts/InMemoryArtifactManager.java
|
package io.mantisrx.api.services.artifacts;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import io.mantisrx.api.proto.Artifact;
public class InMemoryArtifactManager implements ArtifactManager {
private Map<String, Artifact> artifacts = new HashMap<>();
@Override
public List<String> getArtifacts() {
return artifacts
.values()
.stream()
.map(Artifact::getFileName)
.collect(Collectors.toList());
}
@Override
public Optional<Artifact> getArtifact(String name) {
return artifacts
.values()
.stream()
.filter(artifact -> artifact.getFileName().equals(name))
.findFirst();
}
@Override
public void deleteArtifact(String name) {
this.artifacts.remove(name);
}
@Override
public void putArtifact(Artifact artifact) {
this.artifacts.put(artifact.getFileName(), artifact);
}
}
| 2,964 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api/services
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/services/artifacts/ArtifactManager.java
|
package io.mantisrx.api.services.artifacts;
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.mantisrx.api.proto.Artifact;
import java.util.List;
import java.util.Optional;
public interface ArtifactManager {
List<String> getArtifacts();
Optional<Artifact> getArtifact(String name);
void deleteArtifact(String name);
void putArtifact(Artifact artifact);
}
| 2,965 |
0 |
Create_ds/mantis-api/src/main/java/io/mantisrx/api
|
Create_ds/mantis-api/src/main/java/io/mantisrx/api/initializers/MantisApiServerChannelInitializer.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.api.initializers;
import com.netflix.netty.common.HttpLifecycleChannelHandler;
import com.netflix.netty.common.channel.config.ChannelConfig;
import com.netflix.netty.common.channel.config.CommonChannelConfigKeys;
import com.netflix.zuul.netty.server.BaseZuulChannelInitializer;
import com.netflix.zuul.netty.ssl.SslContextFactory;
import io.mantisrx.api.Util;
import io.mantisrx.api.push.ConnectionBroker;
import io.mantisrx.api.push.MantisSSEHandler;
import io.mantisrx.api.push.MantisWebSocketFrameHandler;
import io.mantisrx.api.tunnel.CrossRegionHandler;
import io.mantisrx.api.tunnel.MantisCrossRegionalClient;
import io.mantisrx.server.master.client.HighAvailabilityServices;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.group.ChannelGroup;
import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler;
import io.netty.handler.ssl.SslContext;
import io.netty.handler.ssl.SslHandler;
import io.netty.handler.stream.ChunkedWriteHandler;
import java.util.List;
import javax.net.ssl.SSLException;
import rx.Scheduler;
public class MantisApiServerChannelInitializer extends BaseZuulChannelInitializer
{
private final SslContextFactory sslContextFactory;
private final SslContext sslContext;
private final boolean isSSlFromIntermediary;
private final ConnectionBroker connectionBroker;
private final HighAvailabilityServices highAvailabilityServices;
private final MantisCrossRegionalClient mantisCrossRegionalClient;
private final Scheduler scheduler;
private final List<String> pushPrefixes;
private final boolean sslEnabled;
public MantisApiServerChannelInitializer(
String metricId,
ChannelConfig channelConfig,
ChannelConfig channelDependencies,
ChannelGroup channels,
List<String> pushPrefixes,
HighAvailabilityServices highAvailabilityServices,
MantisCrossRegionalClient mantisCrossRegionalClient,
ConnectionBroker connectionBroker,
Scheduler scheduler,
boolean sslEnabled) {
super(metricId, channelConfig, channelDependencies, channels);
this.pushPrefixes = pushPrefixes;
this.connectionBroker = connectionBroker;
this.highAvailabilityServices = highAvailabilityServices;
this.mantisCrossRegionalClient = mantisCrossRegionalClient;
this.scheduler = scheduler;
this.sslEnabled = sslEnabled;
this.isSSlFromIntermediary = channelConfig.get(CommonChannelConfigKeys.isSSlFromIntermediary);
this.sslContextFactory = channelConfig.get(CommonChannelConfigKeys.sslContextFactory);
if (sslEnabled) {
try {
sslContext = sslContextFactory.createBuilderForServer().build();
} catch (SSLException e) {
throw new RuntimeException("Error configuring SslContext!", e);
}
// Enable TLS Session Tickets support.
sslContextFactory.enableSessionTickets(sslContext);
// Setup metrics tracking the OpenSSL stats.
sslContextFactory.configureOpenSslStatsMetrics(sslContext, metricId);
} else {
sslContext = null;
}
}
@Override
protected void initChannel(Channel ch) throws Exception
{
// Configure our pipeline of ChannelHandlerS.
ChannelPipeline pipeline = ch.pipeline();
storeChannel(ch);
addTimeoutHandlers(pipeline);
addPassportHandler(pipeline);
addTcpRelatedHandlers(pipeline);
if (sslEnabled) {
SslHandler sslHandler = sslContext.newHandler(ch.alloc());
sslHandler.engine().setEnabledProtocols(sslContextFactory.getProtocols());
pipeline.addLast("ssl", sslHandler);
addSslInfoHandlers(pipeline, isSSlFromIntermediary);
addSslClientCertChecks(pipeline);
}
addHttp1Handlers(pipeline);
addHttpRelatedHandlers(pipeline);
pipeline.addLast("mantishandler", new MantisChannelHandler(pushPrefixes));
}
/**
* Adds a series of handlers for providing SSE/Websocket connections
* to Mantis Jobs.
*
* @param pipeline The netty pipeline to which push handlers should be added.
* @param url The url with which to initiate the websocket handler.
*/
protected void addPushHandlers(final ChannelPipeline pipeline, String url) {
pipeline.addLast(new ChunkedWriteHandler());
pipeline.addLast(new HttpObjectAggregator(64 * 1024));
pipeline.addLast(new MantisSSEHandler(connectionBroker, highAvailabilityServices, pushPrefixes));
pipeline.addLast(new WebSocketServerProtocolHandler(url, true));
pipeline.addLast(new MantisWebSocketFrameHandler(connectionBroker));
}
/**
* Adds a series of handlers for providing SSE/Websocket connections
* to Mantis Jobs.
*
* @param pipeline The netty pipeline to which regional handlers should be added.
*/
protected void addRegionalHandlers(final ChannelPipeline pipeline) {
pipeline.addLast(new ChunkedWriteHandler());
pipeline.addLast(new HttpObjectAggregator(10 * 1024 * 1024));
pipeline.addLast(new CrossRegionHandler(pushPrefixes, mantisCrossRegionalClient, connectionBroker, scheduler));
}
/**
* The MantisChannelHandler's job is to initialize the tail end of the pipeline differently
* depending on the URI of the request. This is largely to circumvent issues with endpoint responses
* when the push handlers preceed the Zuul handlers.
*/
@Sharable
public class MantisChannelHandler extends ChannelInboundHandlerAdapter {
private final List<String> pushPrefixes;
public MantisChannelHandler(List<String> pushPrefixes) {
this.pushPrefixes = pushPrefixes;
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof HttpLifecycleChannelHandler.StartEvent) {
HttpLifecycleChannelHandler.StartEvent startEvent = (HttpLifecycleChannelHandler.StartEvent) evt;
String uri = startEvent.getRequest().uri();
ChannelPipeline pipeline = ctx.pipeline();
removeEverythingAfterThis(pipeline);
if (Util.startsWithAnyOf(uri, this.pushPrefixes)) {
addPushHandlers(pipeline, uri);
} else if(uri.startsWith("/region/")) {
addRegionalHandlers(pipeline);
} else {
addZuulHandlers(pipeline);
}
}
ctx.fireUserEventTriggered(evt);
}
}
private void removeEverythingAfterThis(ChannelPipeline pipeline) {
while (pipeline.last().getClass() != MantisChannelHandler.class) {
pipeline.removeLast();
}
}
}
| 2,966 |
0 |
Create_ds/dgs-federation-example/reviews-dgs/src/test/java/com/example/demo
|
Create_ds/dgs-federation-example/reviews-dgs/src/test/java/com/example/demo/datafetchers/ReviewsDatafetcherTest.java
|
package com.example.demo.datafetchers;
import com.example.demo.generated.client.EntitiesProjectionRoot;
import com.example.demo.generated.client.ShowRepresentation;
import com.example.demo.generated.types.Review;
import com.jayway.jsonpath.TypeRef;
import com.netflix.graphql.dgs.DgsQueryExecutor;
import com.netflix.graphql.dgs.autoconfig.DgsAutoConfiguration;
import com.netflix.graphql.dgs.client.codegen.EntitiesGraphQLQuery;
import com.netflix.graphql.dgs.client.codegen.GraphQLQueryRequest;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.assertj.core.api.Assertions.assertThat;
@SpringBootTest(classes = {DgsAutoConfiguration.class, ReviewsDatafetcher.class})
class ReviewssDatafetcherTest {
@Autowired
DgsQueryExecutor dgsQueryExecutor;
@Test
void shows() {
Map<String,Object> representation = new HashMap<>();
representation.put("__typename", "Show");
representation.put("id", "1");
List<Map<String, Object>> representationsList = new ArrayList<>();
representationsList.add(representation);
Map<String, Object> variables = new HashMap<>();
variables.put("representations", representationsList);
List<Review> reviewsList = dgsQueryExecutor.executeAndExtractJsonPathAsObject(
"query ($representations:[_Any!]!) {" +
"_entities(representations:$representations) {" +
"... on Show {" +
" reviews {" +
" starRating" +
"}}}}",
"data['_entities'][0].reviews", variables, new TypeRef<>() {
});
assertThat(reviewsList).isNotNull();
assertThat(reviewsList.size()).isEqualTo(3);
}
@Test
void showsWithEntitiesQueryBuilder() {
EntitiesGraphQLQuery entitiesQuery = new EntitiesGraphQLQuery.Builder().addRepresentationAsVariable(ShowRepresentation.newBuilder().id("1").build()).build();
GraphQLQueryRequest request = new GraphQLQueryRequest(entitiesQuery, new EntitiesProjectionRoot().onShow().reviews().starRating());
List<Review> reviewsList = dgsQueryExecutor.executeAndExtractJsonPathAsObject(
request.serialize(),
"data['_entities'][0].reviews", entitiesQuery.getVariables(), new TypeRef<>() {
});
assertThat(reviewsList).isNotNull();
assertThat(reviewsList.size()).isEqualTo(3);
}
}
| 2,967 |
0 |
Create_ds/dgs-federation-example/reviews-dgs/src/main/java/com/example
|
Create_ds/dgs-federation-example/reviews-dgs/src/main/java/com/example/demo/ReviewsDgs.java
|
package com.example.demo;
import graphql.execution.instrumentation.tracing.TracingInstrumentation;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import graphql.execution.instrumentation.Instrumentation;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
@SpringBootApplication
public class ReviewsDgs {
public static void main(String[] args) {
SpringApplication.run(ReviewsDgs.class, args);
}
/**
* If you want to leverage Apollo Tracing, as supported by java-graphql, you can just create a bean of type {@link TracingInstrumentation}.
* In this example we added a conditional property on the bean to enable/disable the Apollo Tracing.
* Enabled by default, you can turn it off by setting `graphql.tracing.enabled=false` in your application properties.
*
* @see <a href="https://github.com/apollographql/apollo-tracing">Apollo Tracing</a>
*/
@Bean
@ConditionalOnProperty( prefix = "graphql.tracing", name = "enabled", matchIfMissing = true)
public Instrumentation tracingInstrumentation(){
return new TracingInstrumentation();
}
}
| 2,968 |
0 |
Create_ds/dgs-federation-example/reviews-dgs/src/main/java/com/example/demo
|
Create_ds/dgs-federation-example/reviews-dgs/src/main/java/com/example/demo/datafetchers/ReviewsDatafetcher.java
|
package com.example.demo.datafetchers;
import com.example.demo.generated.types.Review;
import com.example.demo.generated.types.Show;
import com.netflix.graphql.dgs.DgsComponent;
import com.netflix.graphql.dgs.DgsData;
import com.netflix.graphql.dgs.DgsDataFetchingEnvironment;
import com.netflix.graphql.dgs.DgsEntityFetcher;
import java.util.*;
import java.util.stream.Collectors;
@DgsComponent
public class ReviewsDatafetcher {
Map<String, List<Review>> reviews = new HashMap<>();
public ReviewsDatafetcher() {
List<Review> review1 = new ArrayList<>();
review1.add(new Review(5));
review1.add(new Review(4));
review1.add(new Review(5));
reviews.put("1", review1);
List<Review> review2 = new ArrayList<>();
review2.add(new Review(3));
review2.add(new Review(5));
reviews.put("2", review2);
}
@DgsEntityFetcher(name = "Show")
public Show movie(Map<String, Object> values) {
return new Show((String) values.get("id"), null);
}
@DgsData(parentType = "Show", field = "reviews")
public List<Review> reviewsFetcher(DgsDataFetchingEnvironment dataFetchingEnvironment) {
Show show = dataFetchingEnvironment.getSource();
return reviews.get(show.getId());
}
}
| 2,969 |
0 |
Create_ds/dgs-federation-example/shows-dgs/src/test/java/com/example/demo
|
Create_ds/dgs-federation-example/shows-dgs/src/test/java/com/example/demo/datafetchers/ShowsDataFetcherTests.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.demo.datafetchers;
import com.example.demo.generated.client.ShowsGraphQLQuery;
import com.example.demo.generated.client.ShowsProjectionRoot;
import com.example.demo.generated.types.Show;
import com.example.demo.services.ShowsService;
import com.netflix.graphql.dgs.DgsQueryExecutor;
import com.netflix.graphql.dgs.autoconfig.DgsAutoConfiguration;
import com.netflix.graphql.dgs.client.codegen.GraphQLQueryRequest;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
@SpringBootTest(classes = {DgsAutoConfiguration.class, ShowsDataFetcher.class})
public class ShowsDataFetcherTests {
@Autowired
DgsQueryExecutor dgsQueryExecutor;
@MockBean
ShowsService showsService;
@BeforeEach
public void before() {
Mockito.when(showsService.shows()).thenAnswer(invocation -> List.of(new Show("1","mock title", 2020)));
}
@Test
public void showsWithQueryApi() {
GraphQLQueryRequest graphQLQueryRequest = new GraphQLQueryRequest(
new ShowsGraphQLQuery.Builder().build(),
new ShowsProjectionRoot().title()
);
List<String> titles = dgsQueryExecutor.executeAndExtractJsonPath(graphQLQueryRequest.serialize(), "data.shows[*].title");
assertThat(titles).containsExactly("mock title");
}
}
| 2,970 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/HMAC/__default.java
|
package HMAC;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import Wrappers_Compile.Result;
import dafny.DafnySequence;
import java.security.NoSuchAlgorithmException;
public class __default {
public static Result<DafnySequence<? extends Byte>, Error> Digest(
software.amazon.cryptography.primitives.internaldafny.types.HMacInput input
)
{
Result<HMac, Error> maybeHMac = HMac.Build(input._digestAlgorithm);
if (maybeHMac.is_Failure()) {
return Result.create_Failure(maybeHMac.dtor_error());
}
final HMac hmac = maybeHMac.Extract(HMac._typeDescriptor(), Error._typeDescriptor());
hmac.Init(input._key);
hmac.BlockUpdate(input._message);
final DafnySequence<? extends Byte> output = hmac.GetResult();
return Result.create_Success(output);
}
}
| 2,971 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/HMAC/HMac.java
|
package HMAC;
import software.amazon.cryptography.primitives.internaldafny.types.DigestAlgorithm;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import Wrappers_Compile.Result;
import dafny.Array;
import dafny.DafnySequence;
import org.bouncycastle.util.Bytes;
import software.amazon.cryptography.primitives.ToDafny;
import software.amazon.cryptography.primitives.model.AwsCryptographicPrimitivesError;
import javax.crypto.Mac;
import javax.crypto.ShortBufferException;
import javax.crypto.spec.SecretKeySpec;
import java.lang.IllegalStateException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.util.Collections;
public class HMac extends _ExternBase_HMac {
private String algorithm;
private Mac hmac;
public static Result<HMAC.HMac, Error> Build(DigestAlgorithm digest)
{
try {
final HMac output = new HMac(digest);
return Result.create_Success(output);
} catch ( NoSuchAlgorithmException ex) {
final Error err = ToDafny.Error(
AwsCryptographicPrimitivesError
.builder()
.message("Requested digest Algorithm is not supported.")
.cause(ex)
.build());
return Result.create_Failure(err);
}
}
public HMac(DigestAlgorithm digest) throws NoSuchAlgorithmException
{
if (digest.is_SHA__256()) {
algorithm = "HmacSHA256";
} else if (digest.is_SHA__384()) {
algorithm = "HmacSHA384";
} else if (digest.is_SHA__512()) {
algorithm = "HmacSHA512";
} else {
throw new NoSuchAlgorithmException();
}
hmac = Mac.getInstance(algorithm);
}
public void Init(DafnySequence<? extends Byte> key) {
final byte[] keyBytes = (byte[]) Array.unwrap(key.toArray());
try {
final SecretKeySpec secretKey = new SecretKeySpec(keyBytes, algorithm);
hmac.init(secretKey);
} catch (InvalidKeyException e) {
// Dafny preconditions should ensure it is impossible to enter here.
// In case this is ever not true, translate to a RuntimeException
// which will be bubbled up.
throw new IllegalStateException("Encountered InvalidKeyException: " + e.getMessage());
}
}
public void BlockUpdate(DafnySequence<? extends Byte> input) {
final byte[] inputBytes = (byte[]) Array.unwrap(input.toArray());
hmac.update(inputBytes);
}
public DafnySequence<? extends Byte> GetResult() {
final byte[] digest = hmac.doFinal();
return DafnySequence.fromBytes(digest);
}
}
| 2,972 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/AESEncryption/__default.java
|
package AESEncryption;
public class __default extends AESEncryption._ExternBase___default {
}
| 2,973 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/AESEncryption/AES_GCM.java
|
package AESEncryption;
import java.security.GeneralSecurityException;
import java.security.spec.AlgorithmParameterSpec;
import javax.crypto.Cipher;
import javax.crypto.SecretKey;
import javax.crypto.spec.GCMParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import software.amazon.cryptography.primitives.internaldafny.types.AESEncryptOutput;
import software.amazon.cryptography.primitives.internaldafny.types.AES__GCM;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import Random_Compile.ExternRandom;
import Wrappers_Compile.Result;
import dafny.Array;
import dafny.DafnySequence;
import software.amazon.cryptography.primitives.ToDafny;
import software.amazon.cryptography.primitives.model.OpaqueError;
public class AES_GCM {
public static Result<AESEncryptOutput, Error> AESEncryptExtern(
AES__GCM encAlg,
DafnySequence<? extends Byte> iv,
DafnySequence<? extends Byte> key,
DafnySequence<? extends Byte> msg,
DafnySequence<? extends Byte> aad
) {
byte[] keyBytes = (byte[]) Array.unwrap(key.toArray());
byte[] nonceBytes = (byte[]) Array.unwrap(iv.toArray());
byte[] plaintextBytes = (byte[]) Array.unwrap(msg.toArray());
final AlgorithmParameterSpec spec =
new GCMParameterSpec(encAlg._tagLength * 8, nonceBytes, 0, nonceBytes.length);
try {
Cipher cipher_ = Cipher.getInstance("AES/GCM/NoPadding");
SecretKey secretKey = new SecretKeySpec(keyBytes, "AES");
cipher_.init(Cipher.ENCRYPT_MODE, secretKey, spec, ExternRandom.getSecureRandom());
if (aad != null) {
byte[] aadBytes = (byte[]) Array.unwrap(aad.toArray());
cipher_.updateAAD(aadBytes);
}
byte[] cipherOutput = cipher_.doFinal(plaintextBytes);
AESEncryptOutput aesEncryptOutput = __default.EncryptionOutputFromByteSeq(
DafnySequence.fromBytes(cipherOutput),
encAlg);
return Result.create_Success(aesEncryptOutput);
} catch ( GeneralSecurityException e) {
return Result.create_Failure(ToDafny.Error(
OpaqueError.builder().obj(e).build())
);
}
}
public static Result<DafnySequence<? extends Byte>, Error> AESDecryptExtern(
AES__GCM encAlg,
DafnySequence<? extends Byte> key,
DafnySequence<? extends Byte> cipherTxt,
DafnySequence<? extends Byte> authTag,
DafnySequence<? extends Byte> iv,
DafnySequence<? extends Byte> aad
) {
byte[] keyBytes = (byte[]) Array.unwrap(key.toArray());
byte[] nonceBytes = (byte[]) Array.unwrap(iv.toArray());
byte[] ciphertextBytes = (byte[]) Array.unwrap(cipherTxt.toArray());
byte[] tagBytes = (byte[]) Array.unwrap(authTag.toArray());
byte[] ciphertextAndTag = new byte[ciphertextBytes.length + tagBytes.length];
System.arraycopy(ciphertextBytes, 0, ciphertextAndTag, 0, ciphertextBytes.length);
System.arraycopy(tagBytes, 0, ciphertextAndTag, ciphertextBytes.length, tagBytes.length);
final AlgorithmParameterSpec spec =
new GCMParameterSpec(encAlg._tagLength * 8, nonceBytes, 0, nonceBytes.length);
try {
Cipher cipher_ = Cipher.getInstance("AES/GCM/NoPadding");
SecretKey secretKey = new SecretKeySpec(keyBytes, "AES");
cipher_.init(Cipher.DECRYPT_MODE, secretKey, spec, ExternRandom.getSecureRandom());
if (aad != null) {
byte[] aadBytes = (byte[]) Array.unwrap(aad.toArray());
cipher_.updateAAD(aadBytes);
}
byte[] cipherOutput = cipher_.doFinal(ciphertextAndTag);
return Result.create_Success(DafnySequence.fromBytes(cipherOutput));
} catch ( GeneralSecurityException e) {
return Result.create_Failure(ToDafny.Error(
OpaqueError.builder().obj(e).build())
);
}
}
}
| 2,974 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Signature/__default.java
|
package Signature;
public class __default extends Signature._ExternBase___default {
}
| 2,975 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Signature/SignatureAlgorithm.java
|
package Signature;
import java.security.AlgorithmParameters;
import java.security.NoSuchAlgorithmException;
import java.security.spec.ECGenParameterSpec;
import java.security.spec.ECParameterSpec;
import java.security.spec.InvalidParameterSpecException;
import software.amazon.cryptography.primitives.internaldafny.types.DigestAlgorithm;
import software.amazon.cryptography.primitives.internaldafny.types.ECDSASignatureAlgorithm;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import Wrappers_Compile.Result;
import software.amazon.cryptography.primitives.ToDafny;
import software.amazon.cryptography.primitives.model.AwsCryptographicPrimitivesError;
import static Signature.ECDSA.SEC_P256;
import static Signature.ECDSA.SEC_P384;
import static Signature.ECDSA.SEC_PRIME_FIELD_PREFIX;
public enum SignatureAlgorithm {
P256(SEC_PRIME_FIELD_PREFIX+SEC_P256, DigestAlgorithm.create_SHA__256(), "NONEwithECDSA", (short) 71),
P384(SEC_PRIME_FIELD_PREFIX+SEC_P384, DigestAlgorithm.create_SHA__384(), "NONEwithECDSA", (short) 103);
public final String curve;
public final DigestAlgorithm messageDigestAlgorithm;
public final String rawSignatureAlgorithm;
public final short expectedSignatureLength;
SignatureAlgorithm(
final String curve,
final DigestAlgorithm messageDigestAlgorithm,
final String rawSignatureAlgorithm,
final short expectedSignatureLength
) {
this.curve = curve;
this.messageDigestAlgorithm = messageDigestAlgorithm;
this.rawSignatureAlgorithm = rawSignatureAlgorithm;
this.expectedSignatureLength = expectedSignatureLength;
}
static Result<SignatureAlgorithm, Error> signatureAlgorithm(ECDSASignatureAlgorithm dtor_signatureAlgorithm) {
final SignatureAlgorithm signatureAlgorithm;
//= aws-encryption-sdk-specification/framework/transitive-requirements.md#ecdsa
//# If specified to use ECDSA, the AWS Encryption SDK MUST use ECDSA with the following specifics:
//# - The elliptic curve is specified by the algorithm suite.
//# The specific curves are defined in
//# [Digital Signature Standard (DSS) (FIPS PUB 186-4)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf).
if (dtor_signatureAlgorithm.is_ECDSA__P256()) {
signatureAlgorithm = P256;
} else if (dtor_signatureAlgorithm.is_ECDSA__P384()) {
signatureAlgorithm = P384;
} else {
return Result.create_Failure(ToDafny.Error(
AwsCryptographicPrimitivesError.builder().message(
String.format("Requested Curve is not supported. Requested %s.",
dtor_signatureAlgorithm))
.build()));
}
return Result.create_Success(signatureAlgorithm);
}
static ECParameterSpec ecParameterSpec(
SignatureAlgorithm algorithm
) throws NoSuchAlgorithmException, InvalidParameterSpecException {
final ECGenParameterSpec genParameterSpec =
new ECGenParameterSpec(algorithm.curve);
final AlgorithmParameters parameters =
AlgorithmParameters.getInstance(ECDSA.ELLIPTIC_CURVE_ALGORITHM);
parameters.init(genParameterSpec);
return parameters.getParameterSpec(ECParameterSpec.class);
}
}
| 2,976 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Signature/ECDSA.java
|
package Signature;
import java.math.BigInteger;
import java.security.GeneralSecurityException;
import java.security.InvalidKeyException;
import java.security.KeyPair;
import java.security.KeyPairGenerator;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.security.Signature;
import java.security.SignatureException;
import java.security.interfaces.ECPrivateKey;
import java.security.interfaces.ECPublicKey;
import java.security.spec.ECGenParameterSpec;
import software.amazon.cryptography.primitives.internaldafny.types.ECDSASignatureAlgorithm;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import Digest_Compile.ExternDigest;
import Random_Compile.ExternRandom;
import Wrappers_Compile.Result;
import dafny.Array;
import dafny.DafnySequence;
import software.amazon.cryptography.primitives.ToDafny;
import software.amazon.cryptography.primitives.model.AwsCryptographicPrimitivesError;
import software.amazon.cryptography.primitives.model.OpaqueError;
import static Signature.SignatureAlgorithm.signatureAlgorithm;
public class ECDSA {
static final String ELLIPTIC_CURVE_ALGORITHM = "EC";
/* Standards for Efficient Cryptography over a prime field */
static final String SEC_PRIME_FIELD_PREFIX = "secp";
static final String SEC_P256 = "256r1";
static final String SEC_P384 = "384r1";
/* Constants used by SEC-1 v2 point compression and decompression algorithms */
static final BigInteger TWO = BigInteger.valueOf(2);
static final BigInteger THREE = BigInteger.valueOf(3);
static final BigInteger FOUR = BigInteger.valueOf(4);
public static Result<SignatureKeyPair, Error> ExternKeyGen(
ECDSASignatureAlgorithm dtor_signatureAlgorithm
) {
final Result<SignatureAlgorithm, Error> maybeSignatureAlgorithm =
signatureAlgorithm(dtor_signatureAlgorithm);
if (maybeSignatureAlgorithm.is_Failure()) {
return Result.create_Failure(maybeSignatureAlgorithm.dtor_error());
}
final ECGenParameterSpec genParameterSpec =
new ECGenParameterSpec(maybeSignatureAlgorithm.dtor_value().curve);
final SecureRandom secureRandom = ExternRandom.getSecureRandom();
final KeyPairGenerator keyGen;
try {
keyGen = KeyPairGenerator.getInstance(ELLIPTIC_CURVE_ALGORITHM);
keyGen.initialize(genParameterSpec, secureRandom);
} catch (GeneralSecurityException e) {
return Result.create_Failure(ToDafny.Error(
OpaqueError.builder().obj(e).message(e.getMessage()).cause(e).build()));
}
final KeyPair keyPair = keyGen.generateKeyPair();
// the verification key is the public key,
// this is not recorded in the spec,
// but is implied by the lack of "MUST be kept secret".
final byte[] verificationKey = PublicKeyUtils.encodeAndCompressPublicKey(
keyPair.getPublic(), dtor_signatureAlgorithm);
// the signing key is the private key, as is implied by:
//= aws-encryption-sdk-specification/framework/structures.md#signing-key
//# The value of this key MUST be kept secret.
final byte[] signingKey = PrivateKeyUtils.encodePrivateKey(
(ECPrivateKey)keyPair.getPrivate());
return Result.create_Success(SignatureKeyPair.create(
DafnySequence.fromBytes(verificationKey),
DafnySequence.fromBytes(signingKey)
));
}
public static Result<DafnySequence<? extends Byte>, Error> Sign(
ECDSASignatureAlgorithm dtor_signatureAlgorithm,
DafnySequence<? extends Byte> dtor_signingKey,
DafnySequence<? extends Byte> dtor_message
) {
final Result<SignatureAlgorithm, Error> maybeSignatureAlgorithm =
signatureAlgorithm(dtor_signatureAlgorithm);
if (maybeSignatureAlgorithm.is_Failure()) {
return Result.create_Failure(maybeSignatureAlgorithm.dtor_error());
}
SignatureAlgorithm algorithm = maybeSignatureAlgorithm.dtor_value();
final Signature signatureCipher;
try {
signatureCipher = Signature.getInstance(algorithm.rawSignatureAlgorithm);
} catch (NoSuchAlgorithmException ex) {
return Result.create_Failure(ToDafny.Error(
AwsCryptographicPrimitivesError.builder()
.message(String.format(
"Requested Signature Algorithm is not supported. Requested %s.",
algorithm.rawSignatureAlgorithm))
.cause(ex)
.build()));
}
Result<ECPrivateKey, Error> maybePrivateKey =
PrivateKeyUtils.decodePrivateKey(algorithm, dtor_signingKey);
if (maybePrivateKey.is_Failure()) {
return Result.create_Failure(maybePrivateKey.dtor_error());
}
final ECPrivateKey privateKey = maybePrivateKey.dtor_value();
final Result<byte[], Error> maybeDigest = ExternDigest.__default.internalDigest(
algorithm.messageDigestAlgorithm, dtor_message);
if (maybeDigest.is_Failure()) {
return Result.create_Failure(maybeDigest.dtor_error());
}
final byte[] digest = maybeDigest.dtor_value();
try {
signatureCipher.initSign(privateKey, ExternRandom.getSecureRandom());
} catch (InvalidKeyException ex) {
return Result.create_Failure(ToDafny.Error(
AwsCryptographicPrimitivesError.builder()
.message(String.format(
"Signature Cipher does not support provided key." +
"Signature %s" +
"Key %s",
signatureCipher, privateKey))
.cause(ex)
.build()));
}
final byte[] signatureBytes;
try {
signatureBytes = SignUtils.generateEcdsaFixedLengthSignature(
digest, signatureCipher, privateKey,
algorithm.expectedSignatureLength);
} catch (SignatureException e) {
return Result.create_Failure(ToDafny.Error(
OpaqueError.builder().obj(e).message(e.getMessage()).cause(e).build()));
}
return Result.create_Success(DafnySequence.fromBytes(signatureBytes));
}
public static Result<Boolean, Error> Verify(
ECDSASignatureAlgorithm dtor_signatureAlgorithm,
DafnySequence<? extends Byte> dtor_verificationKey,
DafnySequence<? extends Byte> dtor_message,
DafnySequence<? extends Byte> dtor_signature
) {
final Result<SignatureAlgorithm, Error> maybeSignatureAlgorithm =
signatureAlgorithm(dtor_signatureAlgorithm);
if (maybeSignatureAlgorithm.is_Failure()) {
return Result.create_Failure(maybeSignatureAlgorithm.dtor_error());
}
final SignatureAlgorithm algorithm = maybeSignatureAlgorithm.dtor_value();
Result<ECPublicKey, Error> maybePublicKey =
PublicKeyUtils.decodePublicKey(algorithm, dtor_verificationKey);
if (maybePublicKey.is_Failure()) {
return Result.create_Failure(maybePublicKey.dtor_error());
}
final ECPublicKey publicKey = maybePublicKey.dtor_value();
final Signature signatureCipher;
try {
signatureCipher = Signature.getInstance(algorithm.rawSignatureAlgorithm);
} catch (NoSuchAlgorithmException ex) {
return Result.create_Failure(ToDafny.Error(
AwsCryptographicPrimitivesError.builder()
.message(String.format(
"Requested Signature Algorithm is not supported. Requested %s.",
algorithm.rawSignatureAlgorithm))
.cause(ex)
.build()));
}
try {
signatureCipher.initVerify(publicKey);
} catch (InvalidKeyException ex) {
return Result.create_Failure(ToDafny.Error(
AwsCryptographicPrimitivesError.builder()
.message(String.format(
"Signature does not support provided key." +
"Signature %s" +
"Key %s",
signatureCipher, publicKey))
.cause(ex)
.build()));
}
final Result<byte[], Error> maybeDigest = ExternDigest.__default.internalDigest(
algorithm.messageDigestAlgorithm, dtor_message);
if (maybeDigest.is_Failure()) {
return Result.create_Failure(maybeDigest.dtor_error());
}
final byte[] digest = maybeDigest.dtor_value();
try {
signatureCipher.update(digest);
} catch (SignatureException ex) {
// For `update`, SignatureException can only be thrown if the
// signature cipher was not initialized.
// This should be impossible;
// if it happens, things are very wonky,
// and we should immediately throw.
throw new RuntimeException(ex);
}
final boolean success;
try {
// In the NET Extern,
// the signature bytes are converted via DER Deserialized.
// In the [email protected], on decryption,
// the Signature's bytes are just handed to the cipher.
// Checking the general Java default provider,
// sun.security.util.ECUtil.decodeSignature,
// explicitly states:
// "Convert the DER encoding of R and S into a concatenation of R and S".
// Which indicates that this is correct.
final byte[] signatureAsBytes = (byte[]) Array.unwrap(dtor_signature.toArray());
success = signatureCipher.verify(signatureAsBytes);
} catch (SignatureException ex) {
return Result.create_Failure(ToDafny.Error(
AwsCryptographicPrimitivesError.builder()
.message(String.format(
"Signature Cipher does not support provided key." +
"Signature %s" +
"Key %s",
signatureCipher, publicKey))
.cause(ex)
.build()));
}
return Result.create_Success(success);
}
}
| 2,977 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Signature/SignUtils.java
|
package Signature;
import org.bouncycastle.asn1.ASN1Encodable;
import org.bouncycastle.asn1.ASN1Integer;
import org.bouncycastle.asn1.ASN1Sequence;
import org.bouncycastle.asn1.DERSequence;
import java.io.IOException;
import java.security.Signature;
import java.security.SignatureException;
import java.security.interfaces.ECPrivateKey;
/** Helper methods for calculating a digital signature. */
class SignUtils {
// This is not in our spec:
// The ESDK wants a deterministic message size,
// including the signature in the footer.
// This "feature" facilitates uploading to S3,
// and use cases where "disk space" must be pre-allocated before
// receiving a data stream.
// Original Author: Bryan Donlan
static byte[] generateEcdsaFixedLengthSignature(
final byte[] digest,
final Signature signatureCipher,
final ECPrivateKey ecKey,
final short expectedLength
) throws SignatureException {
byte[] signatureBytes;
// Unfortunately, we need deterministic lengths while
// some signatures lengths are non-deterministic.
// So, we retry until we get the right length :-(
do {
signatureCipher.update(digest);
signatureBytes = signatureCipher.sign();
if (signatureBytes.length != expectedLength) {
// Most of the time, a signature of the wrong length can be fixed
// by negating s in the signature relative to the group order.
ASN1Sequence seq = ASN1Sequence.getInstance(signatureBytes);
ASN1Integer r = (ASN1Integer) seq.getObjectAt(0);
ASN1Integer s = (ASN1Integer) seq.getObjectAt(1);
s = new ASN1Integer(ecKey.getParams().getOrder().subtract(s.getPositiveValue()));
seq = new DERSequence(new ASN1Encodable[] {r, s});
try {
signatureBytes = seq.getEncoded();
} catch (IOException ex) {
throw new SignatureException(ex);
}
}
} while (signatureBytes.length != expectedLength);
return signatureBytes;
}
}
| 2,978 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Signature/PrivateKeyUtils.java
|
package Signature;
import java.math.BigInteger;
import java.security.KeyFactory;
import java.security.NoSuchAlgorithmException;
import java.security.interfaces.ECPrivateKey;
import java.security.spec.ECParameterSpec;
import java.security.spec.ECPrivateKeySpec;
import java.security.spec.InvalidKeySpecException;
import java.security.spec.InvalidParameterSpecException;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import Wrappers_Compile.Result;
import dafny.Array;
import dafny.DafnySequence;
import software.amazon.cryptography.primitives.ToDafny;
import software.amazon.cryptography.primitives.model.OpaqueError;
/** Helper methods for encoding and decoding Elliptic Curve private keys. */
class PrivateKeyUtils {
// Based on our ESDK-Net implementation,
// ../../../../../net/Extern/Signature.cs#L46
// we convert the BigInteger to Byte Array with the sign.
// Bouncy Castle NET's Source code and documents are not clear if that
// is a big-endian Two's Complement or some other representation.
// Here, we are using Java's BigInteger to get
// the big-endian Two's Complement with the sign.
// However, the private key is ephemeral;
// it will always be generated and used in the same runtime.
// As such, we do not have to ensure that our different runtimes
// encode the private key identically.
static byte[] encodePrivateKey(final ECPrivateKey privateKey) {
return privateKey.getS().toByteArray();
}
static Result<ECPrivateKey, Error> decodePrivateKey(
SignatureAlgorithm algorithm,
DafnySequence<? extends Byte> dtor_signingKey
) {
final ECPrivateKey privateKey;
try {
final ECParameterSpec ecParameterSpec = SignatureAlgorithm.ecParameterSpec(algorithm);
final byte[] keyAsBytes = (byte[]) Array.unwrap(dtor_signingKey.toArray());
final ECPrivateKeySpec privateKeySpec = new ECPrivateKeySpec(
new BigInteger(keyAsBytes), ecParameterSpec);
// The following should result in
// sun.security.ec.ECKeyFactory.implGeneratePrivate
// or something equivalent.
// "generatePrivate" is a misnomer;
// it's really a deterministic factory method.
privateKey = (ECPrivateKey) KeyFactory.getInstance(ECDSA.ELLIPTIC_CURVE_ALGORITHM)
.generatePrivate(privateKeySpec);
} catch (NoSuchAlgorithmException | InvalidParameterSpecException | InvalidKeySpecException e) {
// The private key will always be generated in this runtime (Java);
// these exceptions SHOULD BE impossible.
return Result.create_Failure(ToDafny.Error(
OpaqueError.builder().obj(e).message(e.getMessage()).cause(e).build()));
}
return Result.create_Success(privateKey);
}
}
| 2,979 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Signature/PublicKeyUtils.java
|
package Signature;
import java.math.BigInteger;
import java.security.KeyFactory;
import java.security.NoSuchAlgorithmException;
import java.security.PublicKey;
import java.security.interfaces.ECPublicKey;
import java.security.spec.ECFieldFp;
import java.security.spec.ECParameterSpec;
import java.security.spec.ECPoint;
import java.security.spec.ECPublicKeySpec;
import java.security.spec.InvalidKeySpecException;
import java.security.spec.InvalidParameterSpecException;
import java.util.Arrays;
import java.util.Objects;
import software.amazon.cryptography.primitives.internaldafny.types.ECDSASignatureAlgorithm;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import Wrappers_Compile.Result;
import dafny.Array;
import dafny.DafnySequence;
import software.amazon.cryptography.primitives.ToDafny;
import software.amazon.cryptography.primitives.model.AwsCryptographicPrimitivesError;
import software.amazon.cryptography.primitives.model.OpaqueError;
import static Signature.ECDSA.FOUR;
import static Signature.ECDSA.THREE;
import static Signature.ECDSA.TWO;
import static java.math.BigInteger.ONE;
import static java.math.BigInteger.ZERO;
/** Helper methods for encoding and decoding Elliptic Curve public keys. */
class PublicKeyUtils {
/**
* @param key The Elliptic Curve public key to
* encode and compress as described in SEC-1 v2 section 2.3.3
* @param dtor_signatureAlgorithm The Elliptic Curve algorithm
* @return byte[] The encoded and compressed public key
* @see <a href="http://www.secg.org/sec1-v2.pdf">http://www.secg.org/sec1-v2.pdf</a>
*/
static byte[] encodeAndCompressPublicKey(PublicKey key, ECDSASignatureAlgorithm dtor_signatureAlgorithm) {
Objects.requireNonNull(key, "key is required");
if (!(key instanceof ECPublicKey)) {
throw new IllegalArgumentException("key must be an instance of ECPublicKey");
}
final BigInteger x = ((ECPublicKey) key).getW().getAffineX();
final BigInteger y = ((ECPublicKey) key).getW().getAffineY();
final BigInteger compressedY = y.mod(TWO).equals(ZERO) ? TWO : THREE;
// The Dafny Source FieldSize methods includes the compressed Y-Byte.
final int xFieldSize = _ExternBase___default.FieldSize(dtor_signatureAlgorithm).intValueExact() - 1;
final byte[] xBytes = encodeAndCompressPublicKeyX(x, xFieldSize);
final byte[] compressedKey = new byte[xBytes.length + 1];
System.arraycopy(xBytes, 0, compressedKey, 1, xBytes.length);
compressedKey[0] = compressedY.byteValue();
return compressedKey;
}
/**
* Removes the leading zero sign byte from the byte array representation of a BigInteger (if
* present) and left pads with zeroes to produce a byte array of the given length.
*
* @param bigInteger The BigInteger to convert to a byte array
* @param length The length of the byte array, must be at least as long as the BigInteger byte
* array without the sign byte
* @return The byte array
*/
static byte[] encodeAndCompressPublicKeyX(final BigInteger bigInteger, final int length) {
byte[] rawBytes = bigInteger.toByteArray();
// If rawBytes is already the correct length, return it.
if (rawBytes.length == length) {
return rawBytes;
}
// If we're exactly one byte too large, but we have a leading zero byte, remove it and return.
if (rawBytes.length == length + 1 && rawBytes[0] == 0) {
return Arrays.copyOfRange(rawBytes, 1, rawBytes.length);
}
if (rawBytes.length > length) {
throw new IllegalArgumentException(
"Length must be at least as long as the BigInteger byte array "
+ "without the sign byte");
}
final byte[] paddedResult = new byte[length];
System.arraycopy(rawBytes, 0, paddedResult, length - rawBytes.length, rawBytes.length);
return paddedResult;
}
static Result<ECPublicKey, Error> decodePublicKey(
SignatureAlgorithm algorithm,
DafnySequence<? extends Byte> dtor_verificationKey
) {
final ECPublicKey publicKey;
try {
final ECParameterSpec ecParameterSpec = SignatureAlgorithm.ecParameterSpec(algorithm);
final byte[] keyAsBytes = (byte[]) Array.unwrap(dtor_verificationKey.toArray());
final ECPublicKeySpec publicKeySpec = new ECPublicKeySpec(
byteArrayToECPoint(keyAsBytes, ecParameterSpec), ecParameterSpec);
// The following should result in
// sun.security.ec.ECKeyFactory.implGeneratePublic
// or something equivalent.
// "generatePublic" is a misnomer;
// it's really a deterministic factory method.
publicKey = (ECPublicKey) KeyFactory.getInstance(ECDSA.ELLIPTIC_CURVE_ALGORITHM)
.generatePublic(publicKeySpec);
} catch (ECDecodingException ex) {
return Result.create_Failure(ToDafny.Error(
AwsCryptographicPrimitivesError.builder()
.message(String.format(
"Could not decode Elliptic Curve point due to: %s.",
ex.getMessage()))
.cause(ex)
.build()));
} catch ( NoSuchAlgorithmException | InvalidKeySpecException | InvalidParameterSpecException e) {
return Result.create_Failure(ToDafny.Error(
OpaqueError.builder().obj(e).message(e.getMessage()).cause(e).build()));
}
return Result.create_Success(publicKey);
}
/**
* Decodes a compressed elliptic curve point as described in SEC-1 v2 section 2.3.4.<p>
* Original Author: Wesley Rosenblum
*
* @param keyAsBytes The encoded and compressed Elliptic Curve public key.
* @param ecParameterSpec Elliptic Curve parameter spec describing the curve.
* @return The Elliptic Curve point.
* @see <a href="http://www.secg.org/sec1-v2.pdf">http://www.secg.org/sec1-v2.pdf</a>
*/
static ECPoint byteArrayToECPoint(
final byte[] keyAsBytes,
final ECParameterSpec ecParameterSpec
) throws ECDecodingException {
final BigInteger x = new BigInteger(1, Arrays.copyOfRange(keyAsBytes, 1, keyAsBytes.length));
final byte compressedY = keyAsBytes[0];
final BigInteger yOrder;
if (compressedY == TWO.byteValue()) {
yOrder = ZERO;
} else if (compressedY == THREE.byteValue()) {
yOrder = ONE;
} else {
throw new ECDecodingException("Compressed y value was invalid");
}
final BigInteger p = ((ECFieldFp) ecParameterSpec.getCurve().getField()).getP();
final BigInteger a = ecParameterSpec.getCurve().getA();
final BigInteger b = ecParameterSpec.getCurve().getB();
// alpha must be equal to y^2, this is validated below
final BigInteger alpha = x.modPow(THREE, p).add(a.multiply(x).mod(p)).add(b).mod(p);
final BigInteger beta;
if (p.mod(FOUR).equals(THREE)) {
beta = alpha.modPow(p.add(ONE).divide(FOUR), p);
} else {
throw new ECDecodingException("Curve not supported at this time");
}
//noinspection SuspiciousNameCombination
final BigInteger y = beta.mod(TWO).equals(yOrder) ? beta : p.subtract(beta);
// Validate that Y is a root of Y^2 to prevent invalid point attacks
if (!alpha.equals(y.modPow(TWO, p))) {
throw new ECDecodingException("Y was invalid");
}
return new ECPoint(x, y);
}
static class ECDecodingException extends RuntimeException {
ECDecodingException(String message) {
super(message);
}
}
}
| 2,980 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Dafny/Aws/Cryptography
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Dafny/Aws/Cryptography/Primitives/__default.java
|
package software.amazon.cryptography.primitives.internaldafny;
public class __default extends software.amazon.cryptography.primitives.internaldafny._ExternBase___default {
}
| 2,981 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Dafny/Aws/Cryptography/Primitives
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Dafny/Aws/Cryptography/Primitives/Types/__default.java
|
package software.amazon.cryptography.primitives.internaldafny.types;
public class __default extends software.amazon.cryptography.primitives.internaldafny.types._ExternBase___default{
}
| 2,982 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Digest_Compile/ExternDigest.java
|
package Digest_Compile;
import software.amazon.cryptography.primitives.internaldafny.types.DigestAlgorithm;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import Wrappers_Compile.Result;
import dafny.Array;
import dafny.DafnySequence;
import software.amazon.cryptography.primitives.ToDafny;
import software.amazon.cryptography.primitives.model.AwsCryptographicPrimitivesError;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
public class ExternDigest {
public static class __default {
public static Result<DafnySequence<? extends Byte>, Error> Digest(
DigestAlgorithm digestAlgorithm,
DafnySequence<? extends Byte> dtor_message
) {
final Result<byte[], Error> maybeDigest = internalDigest(digestAlgorithm, dtor_message);
if (maybeDigest.is_Failure()) {
return Result.create_Failure(maybeDigest.dtor_error());
}
return Result.create_Success(DafnySequence.fromBytes(
maybeDigest.dtor_value()));
}
public static Result<byte[], Error> internalDigest(
DigestAlgorithm digestAlgorithm,
DafnySequence<? extends Byte> dtor_message
) {
try {
final MessageDigest hash = getHash(digestAlgorithm);
final byte[] messageBytes = (byte[]) Array.unwrap(dtor_message.toArray());
hash.update(messageBytes);
final byte[] digest = hash.digest();
return Result.create_Success(digest);
} catch ( NoSuchAlgorithmException ex) {
final Error err = ToDafny.Error(
AwsCryptographicPrimitivesError
.builder()
.message("Requested digest Algorithm is not supported.")
.cause(ex)
.build());
return Result.create_Failure(err);
}
}
private static MessageDigest getHash(DigestAlgorithm digestAlgorithm) throws NoSuchAlgorithmException {
if (digestAlgorithm.is_SHA__256()) {
return MessageDigest.getInstance("SHA-256");
} else if (digestAlgorithm.is_SHA__384()) {
return MessageDigest.getInstance("SHA-384");
} else if (digestAlgorithm.is_SHA__512()) {
return MessageDigest.getInstance("SHA-512");
} else {
throw new NoSuchAlgorithmException();
}
}
}
}
| 2,983 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/RSAEncryption/__default.java
|
package RSAEncryption;
public class __default extends RSAEncryption._ExternBase___default {
}
| 2,984 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/RSAEncryption/RSA.java
|
package RSAEncryption;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import software.amazon.cryptography.primitives.internaldafny.types.RSAPaddingMode;
import Random_Compile.ExternRandom;
import Wrappers_Compile.Result;
import dafny.Array;
import dafny.DafnySequence;
import dafny.Tuple2;
import dafny.TypeDescriptor;
import org.bouncycastle.asn1.pkcs.PrivateKeyInfo;
import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo;
import org.bouncycastle.crypto.AsymmetricBlockCipher;
import org.bouncycastle.crypto.AsymmetricCipherKeyPair;
import org.bouncycastle.crypto.KeyGenerationParameters;
import org.bouncycastle.crypto.digests.SHA1Digest;
import org.bouncycastle.crypto.digests.SHA256Digest;
import org.bouncycastle.crypto.digests.SHA384Digest;
import org.bouncycastle.crypto.digests.SHA512Digest;
import org.bouncycastle.crypto.encodings.OAEPEncoding;
import org.bouncycastle.crypto.encodings.PKCS1Encoding;
import org.bouncycastle.crypto.engines.RSABlindedEngine;
import org.bouncycastle.crypto.generators.RSAKeyPairGenerator;
import org.bouncycastle.crypto.params.AsymmetricKeyParameter;
import org.bouncycastle.crypto.params.RSAKeyGenerationParameters;
import org.bouncycastle.crypto.params.RSAKeyParameters;
import org.bouncycastle.crypto.util.PrivateKeyFactory;
import org.bouncycastle.crypto.util.PrivateKeyInfoFactory;
import org.bouncycastle.crypto.util.PublicKeyFactory;
import org.bouncycastle.crypto.util.SubjectPublicKeyInfoFactory;
import org.bouncycastle.util.io.pem.PemWriter;
import org.bouncycastle.util.io.pem.PemReader;
import org.bouncycastle.util.io.pem.PemObject;
import software.amazon.cryptography.primitives.ToDafny;
import software.amazon.cryptography.primitives.model.OpaqueError;
import java.security.SecureRandom;
import java.security.spec.X509EncodedKeySpec;
import java.nio.charset.StandardCharsets;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.StringWriter;
import java.nio.ByteBuffer;
import java.math.BigInteger;
import static software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence;
public class RSA {
private static int RSA_KEY_LEN_MAX = 4096;
private static int RSA_PUBLIC_EXPONENT = 65537;
private static int RSA_CERTAINTY = 256;
private static String RSA_ERROR_MSG = String.format(
"AWS Crypto will not generate an RSA Key with length greater than %s", RSA_KEY_LEN_MAX);
public static Tuple2<DafnySequence<? extends Byte>, DafnySequence<? extends Byte>> GenerateKeyPairExtern(int lengthBits) {
try {
if (lengthBits > RSA_KEY_LEN_MAX) {
throw new RuntimeException(RSA_ERROR_MSG);
}
RSAKeyPairGenerator keygen = new RSAKeyPairGenerator();
final SecureRandom secureRandom = ExternRandom.getSecureRandom();
KeyGenerationParameters keyGenerationParameters = new RSAKeyGenerationParameters(
BigInteger.valueOf(RSA_PUBLIC_EXPONENT), secureRandom, lengthBits, RSA_CERTAINTY
);
keygen.init(keyGenerationParameters);
AsymmetricCipherKeyPair keyPair = keygen.generateKeyPair();
return Tuple2.create(
GetPemBytes(keyPair.getPublic()),
GetPemBytes(keyPair.getPrivate())
);
} catch (Exception e) {
throw new RuntimeException("Unable to create RSA Key Pair");
}
}
public static Result<Integer, Error> GetRSAKeyModulusLengthExtern(DafnySequence<? extends Byte> dtor_publicKey) {
try {
byte[] pemBytes = (byte[]) Array.unwrap(dtor_publicKey.toArray());
RSAKeyParameters keyParams = ParsePublicRsaPemBytes(pemBytes);
return Result.create_Success(keyParams.getModulus().bitLength());
} catch (Exception e) {
return Result.create_Failure(ToDafny.Error(
OpaqueError.builder().obj(e).message(e.getMessage()).cause(e).build())
);
}
}
// GetPemBytes represents a helper method that takes an AsymmetricKeyParameter and returns the corresponding
// private key or public key, PEM encoded, as UTF-8 bytes.
// Public keys are DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280.
// Private keys are DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958.
private static DafnySequence<? extends Byte> GetPemBytes(AsymmetricKeyParameter keyParameter) throws IOException {
if (keyParameter.isPrivate()) {
PrivateKeyInfo privateKey = PrivateKeyInfoFactory.createPrivateKeyInfo(keyParameter);
StringWriter stringWriter = new StringWriter();
PemWriter pemWriter = new PemWriter(stringWriter);
pemWriter.writeObject(new PemObject("PRIVATE KEY", privateKey.getEncoded()));
pemWriter.close();
ByteBuffer outBuffer = StandardCharsets.UTF_8.encode(stringWriter.toString());
return ByteSequence(outBuffer, 0, outBuffer.limit());
} else {
SubjectPublicKeyInfo publicKey = SubjectPublicKeyInfoFactory.createSubjectPublicKeyInfo(keyParameter);
StringWriter stringWriter = new StringWriter();
PemWriter pemWriter = new PemWriter(stringWriter);
pemWriter.writeObject(new PemObject("PUBLIC KEY", publicKey.getEncoded()));
pemWriter.close();
ByteBuffer outBuffer = StandardCharsets.UTF_8.encode(stringWriter.toString());
return ByteSequence(outBuffer, 0, outBuffer.limit());
}
}
// Parses UTF8-encoded, PEM-encoded RSA Public keys.
private static RSAKeyParameters ParsePublicRsaPemBytes(byte[] pem) throws IOException {
PemReader pemReader = new PemReader(new InputStreamReader(new ByteArrayInputStream(pem)));
PemObject pemObject = pemReader.readPemObject();
byte[] content = pemObject.getContent();
RSAKeyParameters publicKeyParams = (RSAKeyParameters) PublicKeyFactory.createKey(content);
return publicKeyParams;
}
// Parses UTF8-encoded, PEM-encoded RSA Private keys.
private static RSAKeyParameters ParsePrivateRsaPemBytes(byte[] pem) throws IOException {
PemReader pemReader = new PemReader(new InputStreamReader(new ByteArrayInputStream(pem)));
PemObject pemObject = pemReader.readPemObject();
byte[] content = pemObject.getContent();
RSAKeyParameters privateKeyParams = (RSAKeyParameters) PrivateKeyFactory.createKey(content);
return privateKeyParams;
}
// GetEngineForPadding represents a helper method that takes in a RSAPaddingMode and returns a
// AsymmetricBlockCipher for the RsaBlindedEngine that uses the appropriate digest or throws a
// RSAUnsupportedPaddingSchemeException if no valid padding exists
private static AsymmetricBlockCipher GetEngineForPadding(RSAPaddingMode paddingMode) {
if (paddingMode.is_OAEP__SHA1()) {
return new OAEPEncoding(new RSABlindedEngine(), new SHA1Digest());
} else if (paddingMode.is_OAEP__SHA256()) {
return new OAEPEncoding(new RSABlindedEngine(), new SHA256Digest());
} else if (paddingMode.is_OAEP__SHA384()) {
return new OAEPEncoding(new RSABlindedEngine(), new SHA384Digest());
} else if (paddingMode.is_OAEP__SHA512()) {
return new OAEPEncoding(new RSABlindedEngine(), new SHA512Digest());
} else if (paddingMode.is_PKCS1()) {
return new PKCS1Encoding(new RSABlindedEngine());
} else {
throw new RuntimeException(String.format("Invalid RSA Padding Scheme %s", paddingMode));
}
}
public static Result<DafnySequence<? extends Byte>, Error> DecryptExtern(
RSAPaddingMode dtor_padding,
DafnySequence<? extends Byte> dtor_privateKey,
DafnySequence<? extends Byte> dtor_cipherText
) {
try {
byte[] privateKey = (byte[]) Array.unwrap(dtor_privateKey.toArray());
RSAKeyParameters keyParameter = ParsePrivateRsaPemBytes(privateKey);
byte[] ciphertext = (byte[]) Array.unwrap(dtor_cipherText.toArray());
AsymmetricBlockCipher engine = GetEngineForPadding(dtor_padding);
engine.init(false, keyParameter);
return Result.create_Success(
DafnySequence.fromBytes(
engine.processBlock(
ciphertext,
0,
ciphertext.length
)
)
);
} catch (Exception e) {
return Result.create_Failure(ToDafny.Error(
OpaqueError.builder().obj(e).message(e.getMessage()).cause(e).build())
);
}
}
public static Result<DafnySequence<? extends Byte>, Error> EncryptExtern(
RSAPaddingMode dtor_padding,
DafnySequence<? extends Byte> dtor_publicKey,
DafnySequence<? extends Byte> dtor_plaintext
) {
try {
byte[] publicKey = (byte[]) Array.unwrap(dtor_publicKey.toArray());
RSAKeyParameters publicKeyParam = ParsePublicRsaPemBytes(publicKey);
AsymmetricBlockCipher engine = GetEngineForPadding(dtor_padding);
engine.init(true, publicKeyParam);
return Result.create_Success(
DafnySequence.fromBytes(
engine.processBlock(
(byte[]) Array.unwrap(dtor_plaintext.toArray()),
0,
dtor_plaintext.toArray().length())
)
);
} catch (Exception e) {
return Result.create_Failure(ToDafny.Error(
OpaqueError.builder().obj(e).message(e.getMessage()).cause(e).build())
);
}
}
}
| 2,985 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/Random_Compile/ExternRandom.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package Random_Compile;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import Wrappers_Compile.Result;
import dafny.DafnySequence;
import software.amazon.cryptography.primitives.ToDafny;
import software.amazon.cryptography.primitives.model.OpaqueError;
import java.security.SecureRandom;
public class ExternRandom {
public static class __default {
public static Result<DafnySequence<? extends Byte>, Error> GenerateBytes(final int len) {
try {
// We should revisit if there are limits on amount of
// bytes we can request with different crypto providers
final byte[] result = new byte[len];
final SecureRandom secureRandom = getSecureRandom();
secureRandom.nextBytes(result);
return Result.create_Success(DafnySequence.fromBytes(result));
} catch (Exception e) {
return Result.create_Failure(ToDafny.Error(
OpaqueError.builder().obj(e).cause(e).message(e.getMessage()).build()));
}
}
}
// SecureRandom objects can both be expensive to initialize and incur synchronization costs.
// This allows us to minimize both initializations and keep SecureRandom usage thread local
// to avoid lock contention.
private static final ThreadLocal<SecureRandom> LOCAL_RANDOM =
ThreadLocal.withInitial(() -> {
//= compliance/data-format/message-header.txt#2.5.1.6
//# While
//# implementations cannot guarantee complete uniqueness, implementations
//# MUST use a good source of randomness when generating messages IDs in
//# order to make the chance of duplicate IDs negligible.
final SecureRandom rnd = new SecureRandom();
rnd.nextBoolean(); // Force seeding
return rnd;
});
public static SecureRandom getSecureRandom() {
return LOCAL_RANDOM.get();
}
}
| 2,986 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/java/AesKdfCtr/__default.java
|
package AesKdfCtr;
import java.security.GeneralSecurityException;
import javax.crypto.Cipher;
import javax.crypto.SecretKey;
import javax.crypto.spec.SecretKeySpec;
import javax.crypto.spec.IvParameterSpec;
import Wrappers_Compile.Result;
import dafny.Array;
import dafny.DafnySequence;
import software.amazon.cryptography.primitives.ToDafny;
import software.amazon.cryptography.primitives.model.OpaqueError;
public class __default {
public static Wrappers_Compile.Result<dafny.DafnySequence<? extends Byte>, software.amazon.cryptography.primitives.internaldafny.types.Error>
AesKdfCtrStream(dafny.DafnySequence<? extends Byte> iv, dafny.DafnySequence<? extends Byte> key, int length)
{
byte[] keyBytes = (byte[]) Array.unwrap(key.toArray());
byte[] nonceBytes = (byte[]) Array.unwrap(iv.toArray());
byte[] plaintext = new byte[length];
try {
Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
SecretKey secretKey = new SecretKeySpec(keyBytes, "AES");
IvParameterSpec ivSpec = new IvParameterSpec(nonceBytes);
cipher.init(Cipher.ENCRYPT_MODE, secretKey, ivSpec);
byte[] ciphertext = cipher.doFinal(plaintext);
return Result.create_Success(DafnySequence.fromBytes(ciphertext));
} catch ( GeneralSecurityException e) {
return Result.create_Failure(ToDafny.Error(
OpaqueError.builder().obj(e).build())
);
}
}
}
| 2,987 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/ToNative.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives;
import dafny.DafnySequence;
import java.lang.Boolean;
import java.lang.Byte;
import java.lang.IllegalArgumentException;
import java.lang.RuntimeException;
import java.nio.ByteBuffer;
import software.amazon.cryptography.primitives.internaldafny.types.AES__CTR;
import software.amazon.cryptography.primitives.internaldafny.types.AES__GCM;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import software.amazon.cryptography.primitives.internaldafny.types.Error_AwsCryptographicPrimitivesError;
import software.amazon.cryptography.primitives.internaldafny.types.Error_CollectionOfErrors;
import software.amazon.cryptography.primitives.internaldafny.types.Error_Opaque;
import software.amazon.cryptography.primitives.internaldafny.types.IAwsCryptographicPrimitivesClient;
import software.amazon.cryptography.primitives.model.AESDecryptInput;
import software.amazon.cryptography.primitives.model.AESEncryptInput;
import software.amazon.cryptography.primitives.model.AESEncryptOutput;
import software.amazon.cryptography.primitives.model.AES_CTR;
import software.amazon.cryptography.primitives.model.AES_GCM;
import software.amazon.cryptography.primitives.model.AesKdfCtrInput;
import software.amazon.cryptography.primitives.model.AwsCryptographicPrimitivesError;
import software.amazon.cryptography.primitives.model.CollectionOfErrors;
import software.amazon.cryptography.primitives.model.CryptoConfig;
import software.amazon.cryptography.primitives.model.DigestAlgorithm;
import software.amazon.cryptography.primitives.model.DigestInput;
import software.amazon.cryptography.primitives.model.ECDSASignInput;
import software.amazon.cryptography.primitives.model.ECDSASignatureAlgorithm;
import software.amazon.cryptography.primitives.model.ECDSAVerifyInput;
import software.amazon.cryptography.primitives.model.GenerateECDSASignatureKeyInput;
import software.amazon.cryptography.primitives.model.GenerateECDSASignatureKeyOutput;
import software.amazon.cryptography.primitives.model.GenerateRSAKeyPairInput;
import software.amazon.cryptography.primitives.model.GenerateRSAKeyPairOutput;
import software.amazon.cryptography.primitives.model.GenerateRandomBytesInput;
import software.amazon.cryptography.primitives.model.GetRSAKeyModulusLengthInput;
import software.amazon.cryptography.primitives.model.GetRSAKeyModulusLengthOutput;
import software.amazon.cryptography.primitives.model.HMacInput;
import software.amazon.cryptography.primitives.model.HkdfExpandInput;
import software.amazon.cryptography.primitives.model.HkdfExtractInput;
import software.amazon.cryptography.primitives.model.HkdfInput;
import software.amazon.cryptography.primitives.model.KdfCtrInput;
import software.amazon.cryptography.primitives.model.OpaqueError;
import software.amazon.cryptography.primitives.model.RSADecryptInput;
import software.amazon.cryptography.primitives.model.RSAEncryptInput;
import software.amazon.cryptography.primitives.model.RSAPaddingMode;
import software.amazon.cryptography.primitives.model.RSAPrivateKey;
import software.amazon.cryptography.primitives.model.RSAPublicKey;
public class ToNative {
public static OpaqueError Error(Error_Opaque dafnyValue) {
OpaqueError.Builder nativeBuilder = OpaqueError.builder();
nativeBuilder.obj(dafnyValue.dtor_obj());
return nativeBuilder.build();
}
public static CollectionOfErrors Error(Error_CollectionOfErrors dafnyValue) {
CollectionOfErrors.Builder nativeBuilder = CollectionOfErrors.builder();
nativeBuilder.list(
software.amazon.smithy.dafny.conversion.ToNative.Aggregate.GenericToList(
dafnyValue.dtor_list(),
ToNative::Error));
nativeBuilder.message(software.amazon.smithy.dafny.conversion.ToNative.Simple.String(dafnyValue.dtor_message()));
return nativeBuilder.build();
}
public static AwsCryptographicPrimitivesError Error(
Error_AwsCryptographicPrimitivesError dafnyValue) {
AwsCryptographicPrimitivesError.Builder nativeBuilder = AwsCryptographicPrimitivesError.builder();
nativeBuilder.message(software.amazon.smithy.dafny.conversion.ToNative.Simple.String(dafnyValue.dtor_message()));
return nativeBuilder.build();
}
public static RuntimeException Error(Error dafnyValue) {
if (dafnyValue.is_AwsCryptographicPrimitivesError()) {
return ToNative.Error((Error_AwsCryptographicPrimitivesError) dafnyValue);
}
if (dafnyValue.is_Opaque()) {
return ToNative.Error((Error_Opaque) dafnyValue);
}
if (dafnyValue.is_CollectionOfErrors()) {
return ToNative.Error((Error_CollectionOfErrors) dafnyValue);
}
OpaqueError.Builder nativeBuilder = OpaqueError.builder();
nativeBuilder.obj(dafnyValue);
return nativeBuilder.build();
}
public static AES_CTR AES_CTR(AES__CTR dafnyValue) {
AES_CTR.Builder nativeBuilder = AES_CTR.builder();
nativeBuilder.keyLength((dafnyValue.dtor_keyLength()));
nativeBuilder.nonceLength((dafnyValue.dtor_nonceLength()));
return nativeBuilder.build();
}
public static AES_GCM AES_GCM(AES__GCM dafnyValue) {
AES_GCM.Builder nativeBuilder = AES_GCM.builder();
nativeBuilder.keyLength((dafnyValue.dtor_keyLength()));
nativeBuilder.tagLength((dafnyValue.dtor_tagLength()));
nativeBuilder.ivLength((dafnyValue.dtor_ivLength()));
return nativeBuilder.build();
}
public static AESDecryptInput AESDecryptInput(
software.amazon.cryptography.primitives.internaldafny.types.AESDecryptInput dafnyValue) {
AESDecryptInput.Builder nativeBuilder = AESDecryptInput.builder();
nativeBuilder.encAlg(ToNative.AES_GCM(dafnyValue.dtor_encAlg()));
nativeBuilder.key(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_key()));
nativeBuilder.cipherTxt(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_cipherTxt()));
nativeBuilder.authTag(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_authTag()));
nativeBuilder.iv(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_iv()));
nativeBuilder.aad(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_aad()));
return nativeBuilder.build();
}
public static ByteBuffer AESDecryptOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static AESEncryptInput AESEncryptInput(
software.amazon.cryptography.primitives.internaldafny.types.AESEncryptInput dafnyValue) {
AESEncryptInput.Builder nativeBuilder = AESEncryptInput.builder();
nativeBuilder.encAlg(ToNative.AES_GCM(dafnyValue.dtor_encAlg()));
nativeBuilder.iv(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_iv()));
nativeBuilder.key(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_key()));
nativeBuilder.msg(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_msg()));
nativeBuilder.aad(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_aad()));
return nativeBuilder.build();
}
public static AESEncryptOutput AESEncryptOutput(
software.amazon.cryptography.primitives.internaldafny.types.AESEncryptOutput dafnyValue) {
AESEncryptOutput.Builder nativeBuilder = AESEncryptOutput.builder();
nativeBuilder.cipherText(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_cipherText()));
nativeBuilder.authTag(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_authTag()));
return nativeBuilder.build();
}
public static AesKdfCtrInput AesKdfCtrInput(
software.amazon.cryptography.primitives.internaldafny.types.AesKdfCtrInput dafnyValue) {
AesKdfCtrInput.Builder nativeBuilder = AesKdfCtrInput.builder();
nativeBuilder.ikm(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_ikm()));
nativeBuilder.expectedLength((dafnyValue.dtor_expectedLength()));
if (dafnyValue.dtor_nonce().is_Some()) {
nativeBuilder.nonce(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_nonce().dtor_value()));
}
return nativeBuilder.build();
}
public static ByteBuffer AesKdfCtrOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static CryptoConfig CryptoConfig(
software.amazon.cryptography.primitives.internaldafny.types.CryptoConfig dafnyValue) {
CryptoConfig.Builder nativeBuilder = CryptoConfig.builder();
return nativeBuilder.build();
}
public static DigestInput DigestInput(
software.amazon.cryptography.primitives.internaldafny.types.DigestInput dafnyValue) {
DigestInput.Builder nativeBuilder = DigestInput.builder();
nativeBuilder.digestAlgorithm(ToNative.DigestAlgorithm(dafnyValue.dtor_digestAlgorithm()));
nativeBuilder.message(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_message()));
return nativeBuilder.build();
}
public static ByteBuffer DigestOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static ECDSASignInput ECDSASignInput(
software.amazon.cryptography.primitives.internaldafny.types.ECDSASignInput dafnyValue) {
ECDSASignInput.Builder nativeBuilder = ECDSASignInput.builder();
nativeBuilder.signatureAlgorithm(ToNative.ECDSASignatureAlgorithm(dafnyValue.dtor_signatureAlgorithm()));
nativeBuilder.signingKey(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_signingKey()));
nativeBuilder.message(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_message()));
return nativeBuilder.build();
}
public static ByteBuffer ECDSASignOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static ECDSAVerifyInput ECDSAVerifyInput(
software.amazon.cryptography.primitives.internaldafny.types.ECDSAVerifyInput dafnyValue) {
ECDSAVerifyInput.Builder nativeBuilder = ECDSAVerifyInput.builder();
nativeBuilder.signatureAlgorithm(ToNative.ECDSASignatureAlgorithm(dafnyValue.dtor_signatureAlgorithm()));
nativeBuilder.verificationKey(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_verificationKey()));
nativeBuilder.message(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_message()));
nativeBuilder.signature(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_signature()));
return nativeBuilder.build();
}
public static Boolean ECDSAVerifyOutput(Boolean dafnyValue) {
return (dafnyValue);
}
public static GenerateECDSASignatureKeyInput GenerateECDSASignatureKeyInput(
software.amazon.cryptography.primitives.internaldafny.types.GenerateECDSASignatureKeyInput dafnyValue) {
GenerateECDSASignatureKeyInput.Builder nativeBuilder = GenerateECDSASignatureKeyInput.builder();
nativeBuilder.signatureAlgorithm(ToNative.ECDSASignatureAlgorithm(dafnyValue.dtor_signatureAlgorithm()));
return nativeBuilder.build();
}
public static GenerateECDSASignatureKeyOutput GenerateECDSASignatureKeyOutput(
software.amazon.cryptography.primitives.internaldafny.types.GenerateECDSASignatureKeyOutput dafnyValue) {
GenerateECDSASignatureKeyOutput.Builder nativeBuilder = GenerateECDSASignatureKeyOutput.builder();
nativeBuilder.signatureAlgorithm(ToNative.ECDSASignatureAlgorithm(dafnyValue.dtor_signatureAlgorithm()));
nativeBuilder.verificationKey(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_verificationKey()));
nativeBuilder.signingKey(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_signingKey()));
return nativeBuilder.build();
}
public static GenerateRandomBytesInput GenerateRandomBytesInput(
software.amazon.cryptography.primitives.internaldafny.types.GenerateRandomBytesInput dafnyValue) {
GenerateRandomBytesInput.Builder nativeBuilder = GenerateRandomBytesInput.builder();
nativeBuilder.length((dafnyValue.dtor_length()));
return nativeBuilder.build();
}
public static ByteBuffer GenerateRandomBytesOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static GenerateRSAKeyPairInput GenerateRSAKeyPairInput(
software.amazon.cryptography.primitives.internaldafny.types.GenerateRSAKeyPairInput dafnyValue) {
GenerateRSAKeyPairInput.Builder nativeBuilder = GenerateRSAKeyPairInput.builder();
nativeBuilder.lengthBits((dafnyValue.dtor_lengthBits()));
return nativeBuilder.build();
}
public static GenerateRSAKeyPairOutput GenerateRSAKeyPairOutput(
software.amazon.cryptography.primitives.internaldafny.types.GenerateRSAKeyPairOutput dafnyValue) {
GenerateRSAKeyPairOutput.Builder nativeBuilder = GenerateRSAKeyPairOutput.builder();
nativeBuilder.publicKey(ToNative.RSAPublicKey(dafnyValue.dtor_publicKey()));
nativeBuilder.privateKey(ToNative.RSAPrivateKey(dafnyValue.dtor_privateKey()));
return nativeBuilder.build();
}
public static GetRSAKeyModulusLengthInput GetRSAKeyModulusLengthInput(
software.amazon.cryptography.primitives.internaldafny.types.GetRSAKeyModulusLengthInput dafnyValue) {
GetRSAKeyModulusLengthInput.Builder nativeBuilder = GetRSAKeyModulusLengthInput.builder();
nativeBuilder.publicKey(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_publicKey()));
return nativeBuilder.build();
}
public static GetRSAKeyModulusLengthOutput GetRSAKeyModulusLengthOutput(
software.amazon.cryptography.primitives.internaldafny.types.GetRSAKeyModulusLengthOutput dafnyValue) {
GetRSAKeyModulusLengthOutput.Builder nativeBuilder = GetRSAKeyModulusLengthOutput.builder();
nativeBuilder.length((dafnyValue.dtor_length()));
return nativeBuilder.build();
}
public static HkdfExpandInput HkdfExpandInput(
software.amazon.cryptography.primitives.internaldafny.types.HkdfExpandInput dafnyValue) {
HkdfExpandInput.Builder nativeBuilder = HkdfExpandInput.builder();
nativeBuilder.digestAlgorithm(ToNative.DigestAlgorithm(dafnyValue.dtor_digestAlgorithm()));
nativeBuilder.prk(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_prk()));
nativeBuilder.info(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_info()));
nativeBuilder.expectedLength((dafnyValue.dtor_expectedLength()));
return nativeBuilder.build();
}
public static ByteBuffer HkdfExpandOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static HkdfExtractInput HkdfExtractInput(
software.amazon.cryptography.primitives.internaldafny.types.HkdfExtractInput dafnyValue) {
HkdfExtractInput.Builder nativeBuilder = HkdfExtractInput.builder();
nativeBuilder.digestAlgorithm(ToNative.DigestAlgorithm(dafnyValue.dtor_digestAlgorithm()));
if (dafnyValue.dtor_salt().is_Some()) {
nativeBuilder.salt(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_salt().dtor_value()));
}
nativeBuilder.ikm(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_ikm()));
return nativeBuilder.build();
}
public static ByteBuffer HkdfExtractOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static HkdfInput HkdfInput(
software.amazon.cryptography.primitives.internaldafny.types.HkdfInput dafnyValue) {
HkdfInput.Builder nativeBuilder = HkdfInput.builder();
nativeBuilder.digestAlgorithm(ToNative.DigestAlgorithm(dafnyValue.dtor_digestAlgorithm()));
if (dafnyValue.dtor_salt().is_Some()) {
nativeBuilder.salt(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_salt().dtor_value()));
}
nativeBuilder.ikm(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_ikm()));
nativeBuilder.info(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_info()));
nativeBuilder.expectedLength((dafnyValue.dtor_expectedLength()));
return nativeBuilder.build();
}
public static ByteBuffer HkdfOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static HMacInput HMacInput(
software.amazon.cryptography.primitives.internaldafny.types.HMacInput dafnyValue) {
HMacInput.Builder nativeBuilder = HMacInput.builder();
nativeBuilder.digestAlgorithm(ToNative.DigestAlgorithm(dafnyValue.dtor_digestAlgorithm()));
nativeBuilder.key(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_key()));
nativeBuilder.message(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_message()));
return nativeBuilder.build();
}
public static ByteBuffer HMacOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static KdfCtrInput KdfCtrInput(
software.amazon.cryptography.primitives.internaldafny.types.KdfCtrInput dafnyValue) {
KdfCtrInput.Builder nativeBuilder = KdfCtrInput.builder();
nativeBuilder.digestAlgorithm(ToNative.DigestAlgorithm(dafnyValue.dtor_digestAlgorithm()));
nativeBuilder.ikm(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_ikm()));
nativeBuilder.expectedLength((dafnyValue.dtor_expectedLength()));
if (dafnyValue.dtor_purpose().is_Some()) {
nativeBuilder.purpose(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_purpose().dtor_value()));
}
if (dafnyValue.dtor_nonce().is_Some()) {
nativeBuilder.nonce(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_nonce().dtor_value()));
}
return nativeBuilder.build();
}
public static ByteBuffer KdfCtrOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static RSADecryptInput RSADecryptInput(
software.amazon.cryptography.primitives.internaldafny.types.RSADecryptInput dafnyValue) {
RSADecryptInput.Builder nativeBuilder = RSADecryptInput.builder();
nativeBuilder.padding(ToNative.RSAPaddingMode(dafnyValue.dtor_padding()));
nativeBuilder.privateKey(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_privateKey()));
nativeBuilder.cipherText(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_cipherText()));
return nativeBuilder.build();
}
public static ByteBuffer RSADecryptOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static RSAEncryptInput RSAEncryptInput(
software.amazon.cryptography.primitives.internaldafny.types.RSAEncryptInput dafnyValue) {
RSAEncryptInput.Builder nativeBuilder = RSAEncryptInput.builder();
nativeBuilder.padding(ToNative.RSAPaddingMode(dafnyValue.dtor_padding()));
nativeBuilder.publicKey(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_publicKey()));
nativeBuilder.plaintext(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_plaintext()));
return nativeBuilder.build();
}
public static ByteBuffer RSAEncryptOutput(DafnySequence<? extends Byte> dafnyValue) {
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue);
}
public static RSAPrivateKey RSAPrivateKey(
software.amazon.cryptography.primitives.internaldafny.types.RSAPrivateKey dafnyValue) {
RSAPrivateKey.Builder nativeBuilder = RSAPrivateKey.builder();
nativeBuilder.lengthBits((dafnyValue.dtor_lengthBits()));
nativeBuilder.pem(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_pem()));
return nativeBuilder.build();
}
public static RSAPublicKey RSAPublicKey(
software.amazon.cryptography.primitives.internaldafny.types.RSAPublicKey dafnyValue) {
RSAPublicKey.Builder nativeBuilder = RSAPublicKey.builder();
nativeBuilder.lengthBits((dafnyValue.dtor_lengthBits()));
nativeBuilder.pem(software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(dafnyValue.dtor_pem()));
return nativeBuilder.build();
}
public static DigestAlgorithm DigestAlgorithm(
software.amazon.cryptography.primitives.internaldafny.types.DigestAlgorithm dafnyValue) {
if (dafnyValue.is_SHA__512()) {
return DigestAlgorithm.SHA_512;
}
if (dafnyValue.is_SHA__384()) {
return DigestAlgorithm.SHA_384;
}
if (dafnyValue.is_SHA__256()) {
return DigestAlgorithm.SHA_256;
}
throw new IllegalArgumentException("No entry of software.amazon.cryptography.primitives.model.DigestAlgorithm matches the input : " + dafnyValue);
}
public static ECDSASignatureAlgorithm ECDSASignatureAlgorithm(
software.amazon.cryptography.primitives.internaldafny.types.ECDSASignatureAlgorithm dafnyValue) {
if (dafnyValue.is_ECDSA__P384()) {
return ECDSASignatureAlgorithm.ECDSA_P384;
}
if (dafnyValue.is_ECDSA__P256()) {
return ECDSASignatureAlgorithm.ECDSA_P256;
}
throw new IllegalArgumentException("No entry of software.amazon.cryptography.primitives.model.ECDSASignatureAlgorithm matches the input : " + dafnyValue);
}
public static RSAPaddingMode RSAPaddingMode(
software.amazon.cryptography.primitives.internaldafny.types.RSAPaddingMode dafnyValue) {
if (dafnyValue.is_PKCS1()) {
return RSAPaddingMode.PKCS1;
}
if (dafnyValue.is_OAEP__SHA1()) {
return RSAPaddingMode.OAEP_SHA1;
}
if (dafnyValue.is_OAEP__SHA256()) {
return RSAPaddingMode.OAEP_SHA256;
}
if (dafnyValue.is_OAEP__SHA384()) {
return RSAPaddingMode.OAEP_SHA384;
}
if (dafnyValue.is_OAEP__SHA512()) {
return RSAPaddingMode.OAEP_SHA512;
}
throw new IllegalArgumentException("No entry of software.amazon.cryptography.primitives.model.RSAPaddingMode matches the input : " + dafnyValue);
}
public static AtomicPrimitives AwsCryptographicPrimitives(
IAwsCryptographicPrimitivesClient dafnyValue) {
return new AtomicPrimitives(dafnyValue);
}
}
| 2,988 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/AtomicPrimitives.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives;
import Wrappers_Compile.Result;
import dafny.DafnySequence;
import java.lang.Boolean;
import java.lang.Byte;
import java.lang.IllegalArgumentException;
import java.nio.ByteBuffer;
import java.util.Objects;
import software.amazon.cryptography.primitives.internaldafny.AtomicPrimitivesClient;
import software.amazon.cryptography.primitives.internaldafny.__default;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import software.amazon.cryptography.primitives.internaldafny.types.IAwsCryptographicPrimitivesClient;
import software.amazon.cryptography.primitives.model.AESDecryptInput;
import software.amazon.cryptography.primitives.model.AESEncryptInput;
import software.amazon.cryptography.primitives.model.AESEncryptOutput;
import software.amazon.cryptography.primitives.model.AesKdfCtrInput;
import software.amazon.cryptography.primitives.model.CryptoConfig;
import software.amazon.cryptography.primitives.model.DigestInput;
import software.amazon.cryptography.primitives.model.ECDSASignInput;
import software.amazon.cryptography.primitives.model.ECDSAVerifyInput;
import software.amazon.cryptography.primitives.model.GenerateECDSASignatureKeyInput;
import software.amazon.cryptography.primitives.model.GenerateECDSASignatureKeyOutput;
import software.amazon.cryptography.primitives.model.GenerateRSAKeyPairInput;
import software.amazon.cryptography.primitives.model.GenerateRSAKeyPairOutput;
import software.amazon.cryptography.primitives.model.GenerateRandomBytesInput;
import software.amazon.cryptography.primitives.model.GetRSAKeyModulusLengthInput;
import software.amazon.cryptography.primitives.model.GetRSAKeyModulusLengthOutput;
import software.amazon.cryptography.primitives.model.HMacInput;
import software.amazon.cryptography.primitives.model.HkdfExpandInput;
import software.amazon.cryptography.primitives.model.HkdfExtractInput;
import software.amazon.cryptography.primitives.model.HkdfInput;
import software.amazon.cryptography.primitives.model.KdfCtrInput;
import software.amazon.cryptography.primitives.model.RSADecryptInput;
import software.amazon.cryptography.primitives.model.RSAEncryptInput;
public class AtomicPrimitives {
private final IAwsCryptographicPrimitivesClient _impl;
protected AtomicPrimitives(BuilderImpl builder) {
CryptoConfig input = builder.CryptoConfig();
software.amazon.cryptography.primitives.internaldafny.types.CryptoConfig dafnyValue = ToDafny.CryptoConfig(input);
Result<AtomicPrimitivesClient, Error> result = __default.AtomicPrimitives(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
this._impl = result.dtor_value();
}
AtomicPrimitives(IAwsCryptographicPrimitivesClient impl) {
this._impl = impl;
}
public static Builder builder() {
return new BuilderImpl();
}
public ByteBuffer AESDecrypt(AESDecryptInput input) {
software.amazon.cryptography.primitives.internaldafny.types.AESDecryptInput dafnyValue = ToDafny.AESDecryptInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.AESDecrypt(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
public AESEncryptOutput AESEncrypt(AESEncryptInput input) {
software.amazon.cryptography.primitives.internaldafny.types.AESEncryptInput dafnyValue = ToDafny.AESEncryptInput(input);
Result<software.amazon.cryptography.primitives.internaldafny.types.AESEncryptOutput, Error> result = this._impl.AESEncrypt(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return ToNative.AESEncryptOutput(result.dtor_value());
}
public ByteBuffer AesKdfCounterMode(AesKdfCtrInput input) {
software.amazon.cryptography.primitives.internaldafny.types.AesKdfCtrInput dafnyValue = ToDafny.AesKdfCtrInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.AesKdfCounterMode(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
public ByteBuffer Digest(DigestInput input) {
software.amazon.cryptography.primitives.internaldafny.types.DigestInput dafnyValue = ToDafny.DigestInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.Digest(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
public ByteBuffer ECDSASign(ECDSASignInput input) {
software.amazon.cryptography.primitives.internaldafny.types.ECDSASignInput dafnyValue = ToDafny.ECDSASignInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.ECDSASign(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
public Boolean ECDSAVerify(ECDSAVerifyInput input) {
software.amazon.cryptography.primitives.internaldafny.types.ECDSAVerifyInput dafnyValue = ToDafny.ECDSAVerifyInput(input);
Result<Boolean, Error> result = this._impl.ECDSAVerify(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return (result.dtor_value());
}
public GenerateECDSASignatureKeyOutput GenerateECDSASignatureKey(
GenerateECDSASignatureKeyInput input) {
software.amazon.cryptography.primitives.internaldafny.types.GenerateECDSASignatureKeyInput dafnyValue = ToDafny.GenerateECDSASignatureKeyInput(input);
Result<software.amazon.cryptography.primitives.internaldafny.types.GenerateECDSASignatureKeyOutput, Error> result = this._impl.GenerateECDSASignatureKey(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return ToNative.GenerateECDSASignatureKeyOutput(result.dtor_value());
}
public ByteBuffer GenerateRandomBytes(GenerateRandomBytesInput input) {
software.amazon.cryptography.primitives.internaldafny.types.GenerateRandomBytesInput dafnyValue = ToDafny.GenerateRandomBytesInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.GenerateRandomBytes(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
public GenerateRSAKeyPairOutput GenerateRSAKeyPair(GenerateRSAKeyPairInput input) {
software.amazon.cryptography.primitives.internaldafny.types.GenerateRSAKeyPairInput dafnyValue = ToDafny.GenerateRSAKeyPairInput(input);
Result<software.amazon.cryptography.primitives.internaldafny.types.GenerateRSAKeyPairOutput, Error> result = this._impl.GenerateRSAKeyPair(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return ToNative.GenerateRSAKeyPairOutput(result.dtor_value());
}
public GetRSAKeyModulusLengthOutput GetRSAKeyModulusLength(GetRSAKeyModulusLengthInput input) {
software.amazon.cryptography.primitives.internaldafny.types.GetRSAKeyModulusLengthInput dafnyValue = ToDafny.GetRSAKeyModulusLengthInput(input);
Result<software.amazon.cryptography.primitives.internaldafny.types.GetRSAKeyModulusLengthOutput, Error> result = this._impl.GetRSAKeyModulusLength(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return ToNative.GetRSAKeyModulusLengthOutput(result.dtor_value());
}
public ByteBuffer Hkdf(HkdfInput input) {
software.amazon.cryptography.primitives.internaldafny.types.HkdfInput dafnyValue = ToDafny.HkdfInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.Hkdf(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
public ByteBuffer HkdfExpand(HkdfExpandInput input) {
software.amazon.cryptography.primitives.internaldafny.types.HkdfExpandInput dafnyValue = ToDafny.HkdfExpandInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.HkdfExpand(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
public ByteBuffer HkdfExtract(HkdfExtractInput input) {
software.amazon.cryptography.primitives.internaldafny.types.HkdfExtractInput dafnyValue = ToDafny.HkdfExtractInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.HkdfExtract(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
public ByteBuffer HMac(HMacInput input) {
software.amazon.cryptography.primitives.internaldafny.types.HMacInput dafnyValue = ToDafny.HMacInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.HMac(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
public ByteBuffer KdfCounterMode(KdfCtrInput input) {
software.amazon.cryptography.primitives.internaldafny.types.KdfCtrInput dafnyValue = ToDafny.KdfCtrInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.KdfCounterMode(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
public ByteBuffer RSADecrypt(RSADecryptInput input) {
software.amazon.cryptography.primitives.internaldafny.types.RSADecryptInput dafnyValue = ToDafny.RSADecryptInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.RSADecrypt(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
public ByteBuffer RSAEncrypt(RSAEncryptInput input) {
software.amazon.cryptography.primitives.internaldafny.types.RSAEncryptInput dafnyValue = ToDafny.RSAEncryptInput(input);
Result<DafnySequence<? extends Byte>, Error> result = this._impl.RSAEncrypt(dafnyValue);
if (result.is_Failure()) {
throw ToNative.Error(result.dtor_error());
}
return software.amazon.smithy.dafny.conversion.ToNative.Simple.ByteBuffer(result.dtor_value());
}
protected IAwsCryptographicPrimitivesClient impl() {
return this._impl;
}
public interface Builder {
Builder CryptoConfig(CryptoConfig CryptoConfig);
CryptoConfig CryptoConfig();
AtomicPrimitives build();
}
static class BuilderImpl implements Builder {
protected CryptoConfig CryptoConfig;
protected BuilderImpl() {
}
public Builder CryptoConfig(CryptoConfig CryptoConfig) {
this.CryptoConfig = CryptoConfig;
return this;
}
public CryptoConfig CryptoConfig() {
return this.CryptoConfig;
}
public AtomicPrimitives build() {
if (Objects.isNull(this.CryptoConfig())) {
throw new IllegalArgumentException("Missing value for required field `CryptoConfig`");
}
return new AtomicPrimitives(this);
}
}
}
| 2,989 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/ToDafny.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives;
import Wrappers_Compile.Option;
import dafny.DafnySequence;
import java.lang.Boolean;
import java.lang.Byte;
import java.lang.Character;
import java.lang.Integer;
import java.lang.RuntimeException;
import java.nio.ByteBuffer;
import java.util.Objects;
import software.amazon.cryptography.primitives.internaldafny.types.AESDecryptInput;
import software.amazon.cryptography.primitives.internaldafny.types.AESEncryptInput;
import software.amazon.cryptography.primitives.internaldafny.types.AESEncryptOutput;
import software.amazon.cryptography.primitives.internaldafny.types.AES__CTR;
import software.amazon.cryptography.primitives.internaldafny.types.AES__GCM;
import software.amazon.cryptography.primitives.internaldafny.types.AesKdfCtrInput;
import software.amazon.cryptography.primitives.internaldafny.types.CryptoConfig;
import software.amazon.cryptography.primitives.internaldafny.types.DigestAlgorithm;
import software.amazon.cryptography.primitives.internaldafny.types.DigestInput;
import software.amazon.cryptography.primitives.internaldafny.types.ECDSASignInput;
import software.amazon.cryptography.primitives.internaldafny.types.ECDSASignatureAlgorithm;
import software.amazon.cryptography.primitives.internaldafny.types.ECDSAVerifyInput;
import software.amazon.cryptography.primitives.internaldafny.types.Error;
import software.amazon.cryptography.primitives.internaldafny.types.Error_AwsCryptographicPrimitivesError;
import software.amazon.cryptography.primitives.internaldafny.types.GenerateECDSASignatureKeyInput;
import software.amazon.cryptography.primitives.internaldafny.types.GenerateECDSASignatureKeyOutput;
import software.amazon.cryptography.primitives.internaldafny.types.GenerateRSAKeyPairInput;
import software.amazon.cryptography.primitives.internaldafny.types.GenerateRSAKeyPairOutput;
import software.amazon.cryptography.primitives.internaldafny.types.GenerateRandomBytesInput;
import software.amazon.cryptography.primitives.internaldafny.types.GetRSAKeyModulusLengthInput;
import software.amazon.cryptography.primitives.internaldafny.types.GetRSAKeyModulusLengthOutput;
import software.amazon.cryptography.primitives.internaldafny.types.HMacInput;
import software.amazon.cryptography.primitives.internaldafny.types.HkdfExpandInput;
import software.amazon.cryptography.primitives.internaldafny.types.HkdfExtractInput;
import software.amazon.cryptography.primitives.internaldafny.types.HkdfInput;
import software.amazon.cryptography.primitives.internaldafny.types.IAwsCryptographicPrimitivesClient;
import software.amazon.cryptography.primitives.internaldafny.types.KdfCtrInput;
import software.amazon.cryptography.primitives.internaldafny.types.RSADecryptInput;
import software.amazon.cryptography.primitives.internaldafny.types.RSAEncryptInput;
import software.amazon.cryptography.primitives.internaldafny.types.RSAPaddingMode;
import software.amazon.cryptography.primitives.internaldafny.types.RSAPrivateKey;
import software.amazon.cryptography.primitives.internaldafny.types.RSAPublicKey;
import software.amazon.cryptography.primitives.model.AES_CTR;
import software.amazon.cryptography.primitives.model.AES_GCM;
import software.amazon.cryptography.primitives.model.AwsCryptographicPrimitivesError;
import software.amazon.cryptography.primitives.model.CollectionOfErrors;
import software.amazon.cryptography.primitives.model.OpaqueError;
public class ToDafny {
public static Error Error(RuntimeException nativeValue) {
if (nativeValue instanceof AwsCryptographicPrimitivesError) {
return ToDafny.Error((AwsCryptographicPrimitivesError) nativeValue);
}
if (nativeValue instanceof OpaqueError) {
return ToDafny.Error((OpaqueError) nativeValue);
}
if (nativeValue instanceof CollectionOfErrors) {
return ToDafny.Error((CollectionOfErrors) nativeValue);
}
return Error.create_Opaque(nativeValue);
}
public static Error Error(OpaqueError nativeValue) {
return Error.create_Opaque(nativeValue.obj());
}
public static Error Error(CollectionOfErrors nativeValue) {
DafnySequence<? extends Error> list = software.amazon.smithy.dafny.conversion.ToDafny.Aggregate.GenericToSequence(
nativeValue.list(),
ToDafny::Error,
Error._typeDescriptor());
DafnySequence<? extends Character> message = software.amazon.smithy.dafny.conversion.ToDafny.Simple.CharacterSequence(nativeValue.getMessage());
return Error.create_CollectionOfErrors(list, message);
}
public static AES__CTR AES_CTR(AES_CTR nativeValue) {
Integer keyLength;
keyLength = (nativeValue.keyLength());
Integer nonceLength;
nonceLength = (nativeValue.nonceLength());
return new AES__CTR(keyLength, nonceLength);
}
public static AES__GCM AES_GCM(AES_GCM nativeValue) {
Integer keyLength;
keyLength = (nativeValue.keyLength());
Integer tagLength;
tagLength = (nativeValue.tagLength());
Integer ivLength;
ivLength = (nativeValue.ivLength());
return new AES__GCM(keyLength, tagLength, ivLength);
}
public static AESDecryptInput AESDecryptInput(
software.amazon.cryptography.primitives.model.AESDecryptInput nativeValue) {
AES__GCM encAlg;
encAlg = ToDafny.AES_GCM(nativeValue.encAlg());
DafnySequence<? extends Byte> key;
key = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.key());
DafnySequence<? extends Byte> cipherTxt;
cipherTxt = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.cipherTxt());
DafnySequence<? extends Byte> authTag;
authTag = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.authTag());
DafnySequence<? extends Byte> iv;
iv = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.iv());
DafnySequence<? extends Byte> aad;
aad = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.aad());
return new AESDecryptInput(encAlg, key, cipherTxt, authTag, iv, aad);
}
public static DafnySequence<? extends Byte> AESDecryptOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> plaintext;
plaintext = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return plaintext;
}
public static AESEncryptInput AESEncryptInput(
software.amazon.cryptography.primitives.model.AESEncryptInput nativeValue) {
AES__GCM encAlg;
encAlg = ToDafny.AES_GCM(nativeValue.encAlg());
DafnySequence<? extends Byte> iv;
iv = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.iv());
DafnySequence<? extends Byte> key;
key = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.key());
DafnySequence<? extends Byte> msg;
msg = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.msg());
DafnySequence<? extends Byte> aad;
aad = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.aad());
return new AESEncryptInput(encAlg, iv, key, msg, aad);
}
public static AESEncryptOutput AESEncryptOutput(
software.amazon.cryptography.primitives.model.AESEncryptOutput nativeValue) {
DafnySequence<? extends Byte> cipherText;
cipherText = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.cipherText());
DafnySequence<? extends Byte> authTag;
authTag = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.authTag());
return new AESEncryptOutput(cipherText, authTag);
}
public static AesKdfCtrInput AesKdfCtrInput(
software.amazon.cryptography.primitives.model.AesKdfCtrInput nativeValue) {
DafnySequence<? extends Byte> ikm;
ikm = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.ikm());
Integer expectedLength;
expectedLength = (nativeValue.expectedLength());
Option<DafnySequence<? extends Byte>> nonce;
nonce = Objects.nonNull(nativeValue.nonce()) ?
Option.create_Some(software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.nonce()))
: Option.create_None();
return new AesKdfCtrInput(ikm, expectedLength, nonce);
}
public static DafnySequence<? extends Byte> AesKdfCtrOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> okm;
okm = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return okm;
}
public static CryptoConfig CryptoConfig(
software.amazon.cryptography.primitives.model.CryptoConfig nativeValue) {
return new CryptoConfig();
}
public static DigestInput DigestInput(
software.amazon.cryptography.primitives.model.DigestInput nativeValue) {
DigestAlgorithm digestAlgorithm;
digestAlgorithm = ToDafny.DigestAlgorithm(nativeValue.digestAlgorithm());
DafnySequence<? extends Byte> message;
message = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.message());
return new DigestInput(digestAlgorithm, message);
}
public static DafnySequence<? extends Byte> DigestOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> digest;
digest = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return digest;
}
public static ECDSASignInput ECDSASignInput(
software.amazon.cryptography.primitives.model.ECDSASignInput nativeValue) {
ECDSASignatureAlgorithm signatureAlgorithm;
signatureAlgorithm = ToDafny.ECDSASignatureAlgorithm(nativeValue.signatureAlgorithm());
DafnySequence<? extends Byte> signingKey;
signingKey = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.signingKey());
DafnySequence<? extends Byte> message;
message = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.message());
return new ECDSASignInput(signatureAlgorithm, signingKey, message);
}
public static DafnySequence<? extends Byte> ECDSASignOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> signature;
signature = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return signature;
}
public static ECDSAVerifyInput ECDSAVerifyInput(
software.amazon.cryptography.primitives.model.ECDSAVerifyInput nativeValue) {
ECDSASignatureAlgorithm signatureAlgorithm;
signatureAlgorithm = ToDafny.ECDSASignatureAlgorithm(nativeValue.signatureAlgorithm());
DafnySequence<? extends Byte> verificationKey;
verificationKey = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.verificationKey());
DafnySequence<? extends Byte> message;
message = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.message());
DafnySequence<? extends Byte> signature;
signature = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.signature());
return new ECDSAVerifyInput(signatureAlgorithm, verificationKey, message, signature);
}
public static Boolean ECDSAVerifyOutput(Boolean nativeValue) {
Boolean success;
success = (nativeValue);
return success;
}
public static GenerateECDSASignatureKeyInput GenerateECDSASignatureKeyInput(
software.amazon.cryptography.primitives.model.GenerateECDSASignatureKeyInput nativeValue) {
ECDSASignatureAlgorithm signatureAlgorithm;
signatureAlgorithm = ToDafny.ECDSASignatureAlgorithm(nativeValue.signatureAlgorithm());
return new GenerateECDSASignatureKeyInput(signatureAlgorithm);
}
public static GenerateECDSASignatureKeyOutput GenerateECDSASignatureKeyOutput(
software.amazon.cryptography.primitives.model.GenerateECDSASignatureKeyOutput nativeValue) {
ECDSASignatureAlgorithm signatureAlgorithm;
signatureAlgorithm = ToDafny.ECDSASignatureAlgorithm(nativeValue.signatureAlgorithm());
DafnySequence<? extends Byte> verificationKey;
verificationKey = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.verificationKey());
DafnySequence<? extends Byte> signingKey;
signingKey = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.signingKey());
return new GenerateECDSASignatureKeyOutput(signatureAlgorithm, verificationKey, signingKey);
}
public static GenerateRandomBytesInput GenerateRandomBytesInput(
software.amazon.cryptography.primitives.model.GenerateRandomBytesInput nativeValue) {
Integer length;
length = (nativeValue.length());
return new GenerateRandomBytesInput(length);
}
public static DafnySequence<? extends Byte> GenerateRandomBytesOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> data;
data = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return data;
}
public static GenerateRSAKeyPairInput GenerateRSAKeyPairInput(
software.amazon.cryptography.primitives.model.GenerateRSAKeyPairInput nativeValue) {
Integer lengthBits;
lengthBits = (nativeValue.lengthBits());
return new GenerateRSAKeyPairInput(lengthBits);
}
public static GenerateRSAKeyPairOutput GenerateRSAKeyPairOutput(
software.amazon.cryptography.primitives.model.GenerateRSAKeyPairOutput nativeValue) {
RSAPublicKey publicKey;
publicKey = ToDafny.RSAPublicKey(nativeValue.publicKey());
RSAPrivateKey privateKey;
privateKey = ToDafny.RSAPrivateKey(nativeValue.privateKey());
return new GenerateRSAKeyPairOutput(publicKey, privateKey);
}
public static GetRSAKeyModulusLengthInput GetRSAKeyModulusLengthInput(
software.amazon.cryptography.primitives.model.GetRSAKeyModulusLengthInput nativeValue) {
DafnySequence<? extends Byte> publicKey;
publicKey = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.publicKey());
return new GetRSAKeyModulusLengthInput(publicKey);
}
public static GetRSAKeyModulusLengthOutput GetRSAKeyModulusLengthOutput(
software.amazon.cryptography.primitives.model.GetRSAKeyModulusLengthOutput nativeValue) {
Integer length;
length = (nativeValue.length());
return new GetRSAKeyModulusLengthOutput(length);
}
public static HkdfExpandInput HkdfExpandInput(
software.amazon.cryptography.primitives.model.HkdfExpandInput nativeValue) {
DigestAlgorithm digestAlgorithm;
digestAlgorithm = ToDafny.DigestAlgorithm(nativeValue.digestAlgorithm());
DafnySequence<? extends Byte> prk;
prk = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.prk());
DafnySequence<? extends Byte> info;
info = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.info());
Integer expectedLength;
expectedLength = (nativeValue.expectedLength());
return new HkdfExpandInput(digestAlgorithm, prk, info, expectedLength);
}
public static DafnySequence<? extends Byte> HkdfExpandOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> okm;
okm = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return okm;
}
public static HkdfExtractInput HkdfExtractInput(
software.amazon.cryptography.primitives.model.HkdfExtractInput nativeValue) {
DigestAlgorithm digestAlgorithm;
digestAlgorithm = ToDafny.DigestAlgorithm(nativeValue.digestAlgorithm());
Option<DafnySequence<? extends Byte>> salt;
salt = Objects.nonNull(nativeValue.salt()) ?
Option.create_Some(software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.salt()))
: Option.create_None();
DafnySequence<? extends Byte> ikm;
ikm = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.ikm());
return new HkdfExtractInput(digestAlgorithm, salt, ikm);
}
public static DafnySequence<? extends Byte> HkdfExtractOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> prk;
prk = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return prk;
}
public static HkdfInput HkdfInput(
software.amazon.cryptography.primitives.model.HkdfInput nativeValue) {
DigestAlgorithm digestAlgorithm;
digestAlgorithm = ToDafny.DigestAlgorithm(nativeValue.digestAlgorithm());
Option<DafnySequence<? extends Byte>> salt;
salt = Objects.nonNull(nativeValue.salt()) ?
Option.create_Some(software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.salt()))
: Option.create_None();
DafnySequence<? extends Byte> ikm;
ikm = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.ikm());
DafnySequence<? extends Byte> info;
info = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.info());
Integer expectedLength;
expectedLength = (nativeValue.expectedLength());
return new HkdfInput(digestAlgorithm, salt, ikm, info, expectedLength);
}
public static DafnySequence<? extends Byte> HkdfOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> okm;
okm = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return okm;
}
public static HMacInput HMacInput(
software.amazon.cryptography.primitives.model.HMacInput nativeValue) {
DigestAlgorithm digestAlgorithm;
digestAlgorithm = ToDafny.DigestAlgorithm(nativeValue.digestAlgorithm());
DafnySequence<? extends Byte> key;
key = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.key());
DafnySequence<? extends Byte> message;
message = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.message());
return new HMacInput(digestAlgorithm, key, message);
}
public static DafnySequence<? extends Byte> HMacOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> digest;
digest = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return digest;
}
public static KdfCtrInput KdfCtrInput(
software.amazon.cryptography.primitives.model.KdfCtrInput nativeValue) {
DigestAlgorithm digestAlgorithm;
digestAlgorithm = ToDafny.DigestAlgorithm(nativeValue.digestAlgorithm());
DafnySequence<? extends Byte> ikm;
ikm = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.ikm());
Integer expectedLength;
expectedLength = (nativeValue.expectedLength());
Option<DafnySequence<? extends Byte>> purpose;
purpose = Objects.nonNull(nativeValue.purpose()) ?
Option.create_Some(software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.purpose()))
: Option.create_None();
Option<DafnySequence<? extends Byte>> nonce;
nonce = Objects.nonNull(nativeValue.nonce()) ?
Option.create_Some(software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.nonce()))
: Option.create_None();
return new KdfCtrInput(digestAlgorithm, ikm, expectedLength, purpose, nonce);
}
public static DafnySequence<? extends Byte> KdfCtrOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> okm;
okm = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return okm;
}
public static RSADecryptInput RSADecryptInput(
software.amazon.cryptography.primitives.model.RSADecryptInput nativeValue) {
RSAPaddingMode padding;
padding = ToDafny.RSAPaddingMode(nativeValue.padding());
DafnySequence<? extends Byte> privateKey;
privateKey = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.privateKey());
DafnySequence<? extends Byte> cipherText;
cipherText = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.cipherText());
return new RSADecryptInput(padding, privateKey, cipherText);
}
public static DafnySequence<? extends Byte> RSADecryptOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> plaintext;
plaintext = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return plaintext;
}
public static RSAEncryptInput RSAEncryptInput(
software.amazon.cryptography.primitives.model.RSAEncryptInput nativeValue) {
RSAPaddingMode padding;
padding = ToDafny.RSAPaddingMode(nativeValue.padding());
DafnySequence<? extends Byte> publicKey;
publicKey = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.publicKey());
DafnySequence<? extends Byte> plaintext;
plaintext = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.plaintext());
return new RSAEncryptInput(padding, publicKey, plaintext);
}
public static DafnySequence<? extends Byte> RSAEncryptOutput(ByteBuffer nativeValue) {
DafnySequence<? extends Byte> cipherText;
cipherText = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue);
return cipherText;
}
public static RSAPrivateKey RSAPrivateKey(
software.amazon.cryptography.primitives.model.RSAPrivateKey nativeValue) {
Integer lengthBits;
lengthBits = (nativeValue.lengthBits());
DafnySequence<? extends Byte> pem;
pem = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.pem());
return new RSAPrivateKey(lengthBits, pem);
}
public static RSAPublicKey RSAPublicKey(
software.amazon.cryptography.primitives.model.RSAPublicKey nativeValue) {
Integer lengthBits;
lengthBits = (nativeValue.lengthBits());
DafnySequence<? extends Byte> pem;
pem = software.amazon.smithy.dafny.conversion.ToDafny.Simple.ByteSequence(nativeValue.pem());
return new RSAPublicKey(lengthBits, pem);
}
public static Error Error(AwsCryptographicPrimitivesError nativeValue) {
DafnySequence<? extends Character> message;
message = software.amazon.smithy.dafny.conversion.ToDafny.Simple.CharacterSequence(nativeValue.message());
return new Error_AwsCryptographicPrimitivesError(message);
}
public static DigestAlgorithm DigestAlgorithm(
software.amazon.cryptography.primitives.model.DigestAlgorithm nativeValue) {
switch (nativeValue) {
case SHA_512: {
return DigestAlgorithm.create_SHA__512();
}
case SHA_384: {
return DigestAlgorithm.create_SHA__384();
}
case SHA_256: {
return DigestAlgorithm.create_SHA__256();
}
default: {
throw new RuntimeException("Cannot convert " + nativeValue + " to software.amazon.cryptography.primitives.internaldafny.types.DigestAlgorithm.");
}
}
}
public static ECDSASignatureAlgorithm ECDSASignatureAlgorithm(
software.amazon.cryptography.primitives.model.ECDSASignatureAlgorithm nativeValue) {
switch (nativeValue) {
case ECDSA_P384: {
return ECDSASignatureAlgorithm.create_ECDSA__P384();
}
case ECDSA_P256: {
return ECDSASignatureAlgorithm.create_ECDSA__P256();
}
default: {
throw new RuntimeException("Cannot convert " + nativeValue + " to software.amazon.cryptography.primitives.internaldafny.types.ECDSASignatureAlgorithm.");
}
}
}
public static RSAPaddingMode RSAPaddingMode(
software.amazon.cryptography.primitives.model.RSAPaddingMode nativeValue) {
switch (nativeValue) {
case PKCS1: {
return RSAPaddingMode.create_PKCS1();
}
case OAEP_SHA1: {
return RSAPaddingMode.create_OAEP__SHA1();
}
case OAEP_SHA256: {
return RSAPaddingMode.create_OAEP__SHA256();
}
case OAEP_SHA384: {
return RSAPaddingMode.create_OAEP__SHA384();
}
case OAEP_SHA512: {
return RSAPaddingMode.create_OAEP__SHA512();
}
default: {
throw new RuntimeException("Cannot convert " + nativeValue + " to software.amazon.cryptography.primitives.internaldafny.types.RSAPaddingMode.");
}
}
}
public static IAwsCryptographicPrimitivesClient AwsCryptographicPrimitives(
AtomicPrimitives nativeValue) {
return nativeValue.impl();
}
}
| 2,990 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/model/GenerateRSAKeyPairOutput.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives.model;
import java.util.Objects;
public class GenerateRSAKeyPairOutput {
private final RSAPublicKey publicKey;
private final RSAPrivateKey privateKey;
protected GenerateRSAKeyPairOutput(BuilderImpl builder) {
this.publicKey = builder.publicKey();
this.privateKey = builder.privateKey();
}
public RSAPublicKey publicKey() {
return this.publicKey;
}
public RSAPrivateKey privateKey() {
return this.privateKey;
}
public Builder toBuilder() {
return new BuilderImpl(this);
}
public static Builder builder() {
return new BuilderImpl();
}
public interface Builder {
Builder publicKey(RSAPublicKey publicKey);
RSAPublicKey publicKey();
Builder privateKey(RSAPrivateKey privateKey);
RSAPrivateKey privateKey();
GenerateRSAKeyPairOutput build();
}
static class BuilderImpl implements Builder {
protected RSAPublicKey publicKey;
protected RSAPrivateKey privateKey;
protected BuilderImpl() {
}
protected BuilderImpl(GenerateRSAKeyPairOutput model) {
this.publicKey = model.publicKey();
this.privateKey = model.privateKey();
}
public Builder publicKey(RSAPublicKey publicKey) {
this.publicKey = publicKey;
return this;
}
public RSAPublicKey publicKey() {
return this.publicKey;
}
public Builder privateKey(RSAPrivateKey privateKey) {
this.privateKey = privateKey;
return this;
}
public RSAPrivateKey privateKey() {
return this.privateKey;
}
public GenerateRSAKeyPairOutput build() {
if (Objects.isNull(this.publicKey())) {
throw new IllegalArgumentException("Missing value for required field `publicKey`");
}
if (Objects.isNull(this.privateKey())) {
throw new IllegalArgumentException("Missing value for required field `privateKey`");
}
return new GenerateRSAKeyPairOutput(this);
}
}
}
| 2,991 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/model/GenerateRandomBytesInput.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives.model;
public class GenerateRandomBytesInput {
private final int length;
protected GenerateRandomBytesInput(BuilderImpl builder) {
this.length = builder.length();
}
public int length() {
return this.length;
}
public Builder toBuilder() {
return new BuilderImpl(this);
}
public static Builder builder() {
return new BuilderImpl();
}
public interface Builder {
Builder length(int length);
int length();
GenerateRandomBytesInput build();
}
static class BuilderImpl implements Builder {
protected int length;
private boolean _lengthSet = false;
protected BuilderImpl() {
}
protected BuilderImpl(GenerateRandomBytesInput model) {
this.length = model.length();
this._lengthSet = true;
}
public Builder length(int length) {
this.length = length;
this._lengthSet = true;
return this;
}
public int length() {
return this.length;
}
public GenerateRandomBytesInput build() {
if (!this._lengthSet) {
throw new IllegalArgumentException("Missing value for required field `length`");
}
if (this._lengthSet && this.length() < 0) {
throw new IllegalArgumentException("`length` must be greater than or equal to 0");
}
return new GenerateRandomBytesInput(this);
}
}
}
| 2,992 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/model/HkdfInput.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives.model;
import java.nio.ByteBuffer;
import java.util.Objects;
public class HkdfInput {
private final DigestAlgorithm digestAlgorithm;
private final ByteBuffer salt;
private final ByteBuffer ikm;
private final ByteBuffer info;
private final int expectedLength;
protected HkdfInput(BuilderImpl builder) {
this.digestAlgorithm = builder.digestAlgorithm();
this.salt = builder.salt();
this.ikm = builder.ikm();
this.info = builder.info();
this.expectedLength = builder.expectedLength();
}
public DigestAlgorithm digestAlgorithm() {
return this.digestAlgorithm;
}
public ByteBuffer salt() {
return this.salt;
}
public ByteBuffer ikm() {
return this.ikm;
}
public ByteBuffer info() {
return this.info;
}
public int expectedLength() {
return this.expectedLength;
}
public Builder toBuilder() {
return new BuilderImpl(this);
}
public static Builder builder() {
return new BuilderImpl();
}
public interface Builder {
Builder digestAlgorithm(DigestAlgorithm digestAlgorithm);
DigestAlgorithm digestAlgorithm();
Builder salt(ByteBuffer salt);
ByteBuffer salt();
Builder ikm(ByteBuffer ikm);
ByteBuffer ikm();
Builder info(ByteBuffer info);
ByteBuffer info();
Builder expectedLength(int expectedLength);
int expectedLength();
HkdfInput build();
}
static class BuilderImpl implements Builder {
protected DigestAlgorithm digestAlgorithm;
protected ByteBuffer salt;
protected ByteBuffer ikm;
protected ByteBuffer info;
protected int expectedLength;
private boolean _expectedLengthSet = false;
protected BuilderImpl() {
}
protected BuilderImpl(HkdfInput model) {
this.digestAlgorithm = model.digestAlgorithm();
this.salt = model.salt();
this.ikm = model.ikm();
this.info = model.info();
this.expectedLength = model.expectedLength();
this._expectedLengthSet = true;
}
public Builder digestAlgorithm(DigestAlgorithm digestAlgorithm) {
this.digestAlgorithm = digestAlgorithm;
return this;
}
public DigestAlgorithm digestAlgorithm() {
return this.digestAlgorithm;
}
public Builder salt(ByteBuffer salt) {
this.salt = salt;
return this;
}
public ByteBuffer salt() {
return this.salt;
}
public Builder ikm(ByteBuffer ikm) {
this.ikm = ikm;
return this;
}
public ByteBuffer ikm() {
return this.ikm;
}
public Builder info(ByteBuffer info) {
this.info = info;
return this;
}
public ByteBuffer info() {
return this.info;
}
public Builder expectedLength(int expectedLength) {
this.expectedLength = expectedLength;
this._expectedLengthSet = true;
return this;
}
public int expectedLength() {
return this.expectedLength;
}
public HkdfInput build() {
if (Objects.isNull(this.digestAlgorithm())) {
throw new IllegalArgumentException("Missing value for required field `digestAlgorithm`");
}
if (Objects.isNull(this.ikm())) {
throw new IllegalArgumentException("Missing value for required field `ikm`");
}
if (Objects.isNull(this.info())) {
throw new IllegalArgumentException("Missing value for required field `info`");
}
if (!this._expectedLengthSet) {
throw new IllegalArgumentException("Missing value for required field `expectedLength`");
}
if (this._expectedLengthSet && this.expectedLength() < 0) {
throw new IllegalArgumentException("`expectedLength` must be greater than or equal to 0");
}
return new HkdfInput(this);
}
}
}
| 2,993 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/model/KdfCtrInput.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives.model;
import java.nio.ByteBuffer;
import java.util.Objects;
public class KdfCtrInput {
private final DigestAlgorithm digestAlgorithm;
private final ByteBuffer ikm;
private final int expectedLength;
private final ByteBuffer purpose;
private final ByteBuffer nonce;
protected KdfCtrInput(BuilderImpl builder) {
this.digestAlgorithm = builder.digestAlgorithm();
this.ikm = builder.ikm();
this.expectedLength = builder.expectedLength();
this.purpose = builder.purpose();
this.nonce = builder.nonce();
}
public DigestAlgorithm digestAlgorithm() {
return this.digestAlgorithm;
}
public ByteBuffer ikm() {
return this.ikm;
}
public int expectedLength() {
return this.expectedLength;
}
public ByteBuffer purpose() {
return this.purpose;
}
public ByteBuffer nonce() {
return this.nonce;
}
public Builder toBuilder() {
return new BuilderImpl(this);
}
public static Builder builder() {
return new BuilderImpl();
}
public interface Builder {
Builder digestAlgorithm(DigestAlgorithm digestAlgorithm);
DigestAlgorithm digestAlgorithm();
Builder ikm(ByteBuffer ikm);
ByteBuffer ikm();
Builder expectedLength(int expectedLength);
int expectedLength();
Builder purpose(ByteBuffer purpose);
ByteBuffer purpose();
Builder nonce(ByteBuffer nonce);
ByteBuffer nonce();
KdfCtrInput build();
}
static class BuilderImpl implements Builder {
protected DigestAlgorithm digestAlgorithm;
protected ByteBuffer ikm;
protected int expectedLength;
private boolean _expectedLengthSet = false;
protected ByteBuffer purpose;
protected ByteBuffer nonce;
protected BuilderImpl() {
}
protected BuilderImpl(KdfCtrInput model) {
this.digestAlgorithm = model.digestAlgorithm();
this.ikm = model.ikm();
this.expectedLength = model.expectedLength();
this._expectedLengthSet = true;
this.purpose = model.purpose();
this.nonce = model.nonce();
}
public Builder digestAlgorithm(DigestAlgorithm digestAlgorithm) {
this.digestAlgorithm = digestAlgorithm;
return this;
}
public DigestAlgorithm digestAlgorithm() {
return this.digestAlgorithm;
}
public Builder ikm(ByteBuffer ikm) {
this.ikm = ikm;
return this;
}
public ByteBuffer ikm() {
return this.ikm;
}
public Builder expectedLength(int expectedLength) {
this.expectedLength = expectedLength;
this._expectedLengthSet = true;
return this;
}
public int expectedLength() {
return this.expectedLength;
}
public Builder purpose(ByteBuffer purpose) {
this.purpose = purpose;
return this;
}
public ByteBuffer purpose() {
return this.purpose;
}
public Builder nonce(ByteBuffer nonce) {
this.nonce = nonce;
return this;
}
public ByteBuffer nonce() {
return this.nonce;
}
public KdfCtrInput build() {
if (Objects.isNull(this.digestAlgorithm())) {
throw new IllegalArgumentException("Missing value for required field `digestAlgorithm`");
}
if (Objects.isNull(this.ikm())) {
throw new IllegalArgumentException("Missing value for required field `ikm`");
}
if (!this._expectedLengthSet) {
throw new IllegalArgumentException("Missing value for required field `expectedLength`");
}
if (this._expectedLengthSet && this.expectedLength() < 0) {
throw new IllegalArgumentException("`expectedLength` must be greater than or equal to 0");
}
return new KdfCtrInput(this);
}
}
}
| 2,994 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/model/GetRSAKeyModulusLengthInput.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives.model;
import java.nio.ByteBuffer;
import java.util.Objects;
public class GetRSAKeyModulusLengthInput {
private final ByteBuffer publicKey;
protected GetRSAKeyModulusLengthInput(BuilderImpl builder) {
this.publicKey = builder.publicKey();
}
public ByteBuffer publicKey() {
return this.publicKey;
}
public Builder toBuilder() {
return new BuilderImpl(this);
}
public static Builder builder() {
return new BuilderImpl();
}
public interface Builder {
Builder publicKey(ByteBuffer publicKey);
ByteBuffer publicKey();
GetRSAKeyModulusLengthInput build();
}
static class BuilderImpl implements Builder {
protected ByteBuffer publicKey;
protected BuilderImpl() {
}
protected BuilderImpl(GetRSAKeyModulusLengthInput model) {
this.publicKey = model.publicKey();
}
public Builder publicKey(ByteBuffer publicKey) {
this.publicKey = publicKey;
return this;
}
public ByteBuffer publicKey() {
return this.publicKey;
}
public GetRSAKeyModulusLengthInput build() {
if (Objects.isNull(this.publicKey())) {
throw new IllegalArgumentException("Missing value for required field `publicKey`");
}
return new GetRSAKeyModulusLengthInput(this);
}
}
}
| 2,995 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/model/CryptoConfig.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives.model;
public class CryptoConfig {
protected CryptoConfig(BuilderImpl builder) {
}
public Builder toBuilder() {
return new BuilderImpl(this);
}
public static Builder builder() {
return new BuilderImpl();
}
public interface Builder {
CryptoConfig build();
}
static class BuilderImpl implements Builder {
protected BuilderImpl() {
}
protected BuilderImpl(CryptoConfig model) {
}
public CryptoConfig build() {
return new CryptoConfig(this);
}
}
}
| 2,996 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/model/HkdfExpandInput.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives.model;
import java.nio.ByteBuffer;
import java.util.Objects;
public class HkdfExpandInput {
private final DigestAlgorithm digestAlgorithm;
private final ByteBuffer prk;
private final ByteBuffer info;
private final int expectedLength;
protected HkdfExpandInput(BuilderImpl builder) {
this.digestAlgorithm = builder.digestAlgorithm();
this.prk = builder.prk();
this.info = builder.info();
this.expectedLength = builder.expectedLength();
}
public DigestAlgorithm digestAlgorithm() {
return this.digestAlgorithm;
}
public ByteBuffer prk() {
return this.prk;
}
public ByteBuffer info() {
return this.info;
}
public int expectedLength() {
return this.expectedLength;
}
public Builder toBuilder() {
return new BuilderImpl(this);
}
public static Builder builder() {
return new BuilderImpl();
}
public interface Builder {
Builder digestAlgorithm(DigestAlgorithm digestAlgorithm);
DigestAlgorithm digestAlgorithm();
Builder prk(ByteBuffer prk);
ByteBuffer prk();
Builder info(ByteBuffer info);
ByteBuffer info();
Builder expectedLength(int expectedLength);
int expectedLength();
HkdfExpandInput build();
}
static class BuilderImpl implements Builder {
protected DigestAlgorithm digestAlgorithm;
protected ByteBuffer prk;
protected ByteBuffer info;
protected int expectedLength;
private boolean _expectedLengthSet = false;
protected BuilderImpl() {
}
protected BuilderImpl(HkdfExpandInput model) {
this.digestAlgorithm = model.digestAlgorithm();
this.prk = model.prk();
this.info = model.info();
this.expectedLength = model.expectedLength();
this._expectedLengthSet = true;
}
public Builder digestAlgorithm(DigestAlgorithm digestAlgorithm) {
this.digestAlgorithm = digestAlgorithm;
return this;
}
public DigestAlgorithm digestAlgorithm() {
return this.digestAlgorithm;
}
public Builder prk(ByteBuffer prk) {
this.prk = prk;
return this;
}
public ByteBuffer prk() {
return this.prk;
}
public Builder info(ByteBuffer info) {
this.info = info;
return this;
}
public ByteBuffer info() {
return this.info;
}
public Builder expectedLength(int expectedLength) {
this.expectedLength = expectedLength;
this._expectedLengthSet = true;
return this;
}
public int expectedLength() {
return this.expectedLength;
}
public HkdfExpandInput build() {
if (Objects.isNull(this.digestAlgorithm())) {
throw new IllegalArgumentException("Missing value for required field `digestAlgorithm`");
}
if (Objects.isNull(this.prk())) {
throw new IllegalArgumentException("Missing value for required field `prk`");
}
if (Objects.isNull(this.info())) {
throw new IllegalArgumentException("Missing value for required field `info`");
}
if (!this._expectedLengthSet) {
throw new IllegalArgumentException("Missing value for required field `expectedLength`");
}
if (this._expectedLengthSet && this.expectedLength() < 0) {
throw new IllegalArgumentException("`expectedLength` must be greater than or equal to 0");
}
return new HkdfExpandInput(this);
}
}
}
| 2,997 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/model/ECDSAVerifyInput.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives.model;
import java.nio.ByteBuffer;
import java.util.Objects;
public class ECDSAVerifyInput {
private final ECDSASignatureAlgorithm signatureAlgorithm;
private final ByteBuffer verificationKey;
private final ByteBuffer message;
private final ByteBuffer signature;
protected ECDSAVerifyInput(BuilderImpl builder) {
this.signatureAlgorithm = builder.signatureAlgorithm();
this.verificationKey = builder.verificationKey();
this.message = builder.message();
this.signature = builder.signature();
}
public ECDSASignatureAlgorithm signatureAlgorithm() {
return this.signatureAlgorithm;
}
public ByteBuffer verificationKey() {
return this.verificationKey;
}
public ByteBuffer message() {
return this.message;
}
public ByteBuffer signature() {
return this.signature;
}
public Builder toBuilder() {
return new BuilderImpl(this);
}
public static Builder builder() {
return new BuilderImpl();
}
public interface Builder {
Builder signatureAlgorithm(ECDSASignatureAlgorithm signatureAlgorithm);
ECDSASignatureAlgorithm signatureAlgorithm();
Builder verificationKey(ByteBuffer verificationKey);
ByteBuffer verificationKey();
Builder message(ByteBuffer message);
ByteBuffer message();
Builder signature(ByteBuffer signature);
ByteBuffer signature();
ECDSAVerifyInput build();
}
static class BuilderImpl implements Builder {
protected ECDSASignatureAlgorithm signatureAlgorithm;
protected ByteBuffer verificationKey;
protected ByteBuffer message;
protected ByteBuffer signature;
protected BuilderImpl() {
}
protected BuilderImpl(ECDSAVerifyInput model) {
this.signatureAlgorithm = model.signatureAlgorithm();
this.verificationKey = model.verificationKey();
this.message = model.message();
this.signature = model.signature();
}
public Builder signatureAlgorithm(ECDSASignatureAlgorithm signatureAlgorithm) {
this.signatureAlgorithm = signatureAlgorithm;
return this;
}
public ECDSASignatureAlgorithm signatureAlgorithm() {
return this.signatureAlgorithm;
}
public Builder verificationKey(ByteBuffer verificationKey) {
this.verificationKey = verificationKey;
return this;
}
public ByteBuffer verificationKey() {
return this.verificationKey;
}
public Builder message(ByteBuffer message) {
this.message = message;
return this;
}
public ByteBuffer message() {
return this.message;
}
public Builder signature(ByteBuffer signature) {
this.signature = signature;
return this;
}
public ByteBuffer signature() {
return this.signature;
}
public ECDSAVerifyInput build() {
if (Objects.isNull(this.signatureAlgorithm())) {
throw new IllegalArgumentException("Missing value for required field `signatureAlgorithm`");
}
if (Objects.isNull(this.verificationKey())) {
throw new IllegalArgumentException("Missing value for required field `verificationKey`");
}
if (Objects.isNull(this.message())) {
throw new IllegalArgumentException("Missing value for required field `message`");
}
if (Objects.isNull(this.signature())) {
throw new IllegalArgumentException("Missing value for required field `signature`");
}
return new ECDSAVerifyInput(this);
}
}
}
| 2,998 |
0 |
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives
|
Create_ds/aws-cryptographic-material-providers-library-java/AwsCryptographyPrimitives/runtimes/java/src/main/smithy-generated/software/amazon/cryptography/primitives/model/HMacOutput.java
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Do not modify this file. This file is machine generated, and any changes to it will be overwritten.
package software.amazon.cryptography.primitives.model;
import java.nio.ByteBuffer;
import java.util.Objects;
public class HMacOutput {
private final ByteBuffer digest;
protected HMacOutput(BuilderImpl builder) {
this.digest = builder.digest();
}
public ByteBuffer digest() {
return this.digest;
}
public Builder toBuilder() {
return new BuilderImpl(this);
}
public static Builder builder() {
return new BuilderImpl();
}
public interface Builder {
Builder digest(ByteBuffer digest);
ByteBuffer digest();
HMacOutput build();
}
static class BuilderImpl implements Builder {
protected ByteBuffer digest;
protected BuilderImpl() {
}
protected BuilderImpl(HMacOutput model) {
this.digest = model.digest();
}
public Builder digest(ByteBuffer digest) {
this.digest = digest;
return this;
}
public ByteBuffer digest() {
return this.digest;
}
public HMacOutput build() {
if (Objects.isNull(this.digest())) {
throw new IllegalArgumentException("Missing value for required field `digest`");
}
return new HMacOutput(this);
}
}
}
| 2,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.