本文整理了Java中org.apache.flink.api.common.operators.Keys
类的一些代码示例,展示了Keys
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Keys
类的具体详情如下:
包路径:org.apache.flink.api.common.operators.Keys
类名称:Keys
暂无
代码示例来源:origin: apache/flink
public TaggedValue getInput2AsTaggedValue() {
final int[] groupedKeys;
if (keys2 != null) {
groupedKeys = keys2.computeLogicalKeyPositions();
}
else {
groupedKeys = null;
}
return convertTypeInfoToTaggedValue(Input.INPUT_2, in2Type, "", null, groupedKeys);
}
代码示例来源:origin: apache/flink
public static
if (!(typeInfo instanceof CompositeType)) {
throw new InvalidTypesException(
"This key operation requires a composite type such as Tuples, POJOs, or Case Classes.");
}
CompositeType
int[] logicalKeyPositiOns= keys.computeLogicalKeyPositions();
int numKeyFields = logicalKeyPositions.length;
TypeInformation>[] typeInfos = keys.getKeyFieldTypes();
// use ascending order here, the code paths for that are usually a slight bit faster
boolean[] orders = new boolean[numKeyFields];
for (int i = 0; i
}
TypeComparator
return new ComparableKeySelector<>(comparator, numKeyFields, new TupleTypeInfo<>(typeInfos));
}
代码示例来源:origin: apache/flink
public static
Keys
if (!(typeInfo instanceof CompositeType)) {
throw new InvalidTypesException(
"This key operation requires a composite type such as Tuples, POJOs, case classes, etc");
}
if (partitioner != null) {
keys.validateCustomPartitioner(partitioner, null);
}
CompositeType
int[] logicalKeyPositiOns= keys.computeLogicalKeyPositions();
if (logicalKeyPositions.length != 1) {
throw new IllegalArgumentException("There must be exactly 1 key specified");
}
TypeComparator
logicalKeyPositions, new boolean[] { true }, 0, executionConfig);
return new OneKeySelector<>(comparator);
}
代码示例来源:origin: apache/flink
private static
Ordering ordering = new Ordering();
final int[] logicalKeyPositiOns= pKeys.computeLogicalKeyPositions();
if (orders == null) {
for (int key : logicalKeyPositions) {
ordering.appendOrdering(key, null, Order.ASCENDING);
}
} else {
final TypeInformation>[] originalKeyFieldTypes = pKeys.getOriginalKeyFieldTypes();
int index = 0;
for (int i = 0; i
for (int j = index; j
}
index += typeTotalFields;
}
}
return ordering;
}
代码示例来源:origin: apache/flink
PartitionOperator(DataSet customPartitioner, partitionerTypeInfo, DataDistribution distribution, String partitionLocationName) {private
TypeInformation
super(input, input.getType());
Preconditions.checkNotNull(pMethod);
Preconditions.checkArgument(pKeys != null || pMethod == PartitionMethod.REBALANCE, "Partitioning requires keys");
Preconditions.checkArgument(pMethod != PartitionMethod.CUSTOM || customPartitioner != null, "Custom partioning requires a partitioner.");
Preconditions.checkArgument(distribution == null || pMethod == PartitionMethod.RANGE, "Customized data distribution is only neccessary for range partition.");
if (distribution != null) {
Preconditions.checkArgument(pKeys.getNumberOfKeyFields() <= distribution.getNumberOfFields(), "The distribution must provide at least as many fields as flat key fields are specified.");
Preconditions.checkArgument(Arrays.equals(pKeys.getKeyFieldTypes(), Arrays.copyOfRange(distribution.getKeyTypes(), 0, pKeys.getNumberOfKeyFields())),
"The types of the flat key fields must be equal to the types of the fields of the distribution.");
}
if (customPartitioner != null) {
pKeys.validateCustomPartitioner(customPartitioner, partitionerTypeInfo);
}
this.pMethod = pMethod;
this.pKeys = pKeys;
this.partitiOnLocationName= partitionLocationName;
this.customPartitiOner= customPartitioner;
this.distribution = distribution;
}
代码示例来源:origin: apache/flink
if (!keys1.areCompatible(keys2)) {
throw new InvalidProgramException("The types of the key fields do not match.");
int[] positiOns= keys1.computeLogicalKeyPositions();
((SolutionSetPlaceHolder>) input1).checkJoinKeyFields(positions);
} else {
int[] positiOns= keys2.computeLogicalKeyPositions();
((SolutionSetPlaceHolder>) input2).checkJoinKeyFields(positions);
} else {
代码示例来源:origin: apache/flink
NOTE: A custom partitioner can only be used with single-field CoGroup keys, not with composite CoGroup keys./**
* Sets a custom partitioner for the CoGroup operation. The partitioner will be called on the join keys to determine
* the partition a key should be assigned to. The partitioner is evaluated on both inputs in the
* same way.
*
*
*
* @param partitioner The custom partitioner to be used.
* @return This CoGroup operator, to allow for function chaining.
*/
public CoGroupOperator
if (partitioner != null) {
keys1.validateCustomPartitioner(partitioner, null);
keys2.validateCustomPartitioner(partitioner, null);
}
this.customPartitiOner= getInput1().clean(partitioner);
return this;
}
代码示例来源:origin: apache/flink
/**
* Check if two sets of keys are compatible to each other (matching types, key counts)
*/
public boolean areCompatible(Keys> other) throws IncompatibleKeysException {
TypeInformation>[] thisKeyFieldTypes = this.getKeyFieldTypes();
TypeInformation>[] otherKeyFieldTypes = other.getKeyFieldTypes();
if (thisKeyFieldTypes.length != otherKeyFieldTypes.length) {
throw new IncompatibleKeysException(IncompatibleKeysException.SIZE_MISMATCH_MESSAGE);
} else {
for (int i = 0; i
throw new IncompatibleKeysException(thisKeyFieldTypes[i], otherKeyFieldTypes[i] );
}
}
}
return true;
}
代码示例来源:origin: apache/flink
public boolean isEmpty() {
return getNumberOfKeyFields() == 0;
}
代码示例来源:origin: apache/flink
/**
* Sets the order of keys for range partitioning.
* NOTE: Only valid for {@link PartitionMethod#RANGE}.
*
* @param orders array of orders for each specified partition key
* @return The partitioneOperator with properly set orders for given keys
*/
@PublicEvolving
public PartitionOperator
Preconditions.checkState(pMethod == PartitionMethod.RANGE, "Orders cannot be applied for %s partition " +
"method", pMethod);
Preconditions.checkArgument(pKeys.getOriginalKeyFieldTypes().length == orders.length, "The number of key " +
"fields and orders should be the same.");
this.orders = orders;
return this;
}
代码示例来源:origin: apache/flink
@Test
public void testAreCompatible1() throws Keys.IncompatibleKeysException {
TypeInformation
TypeInformation
new TupleTypeInfo<>(BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
Keys
new KeySelector1(),
t1,
BasicTypeInfo.STRING_TYPE_INFO
);
Keys
new KeySelector2(),
t2,
BasicTypeInfo.STRING_TYPE_INFO
);
Assert.assertTrue(k1.areCompatible(k2));
Assert.assertTrue(k2.areCompatible(k1));
}
代码示例来源:origin: apache/flink
public Grouping(DataSet
if (set == null || keys == null) {
throw new NullPointerException();
}
if (keys.isEmpty()) {
throw new InvalidProgramException("The grouping keys must not be empty.");
}
this.inputDataSet = set;
this.keys = keys;
}
代码示例来源:origin: apache/flink
String name = getName() != null ? getName() : "CoGroup at " + defaultName;
try {
keys1.areCompatible(keys2);
} catch (IncompatibleKeysException e) {
throw new InvalidProgramException("The types of the key fields do not match.", e);
keys1.areCompatible(keys2);
} catch (IncompatibleKeysException e) {
throw new InvalidProgramException("The types of the key fields do not match.", e);
int[] logicalKeyPositions1 = keys1.computeLogicalKeyPositions();
int[] logicalKeyPositions2 = keys2.computeLogicalKeyPositions();
代码示例来源:origin: com.alibaba.blink/flink-java
PartitionOperator(DataSet customPartitioner, partitionerTypeInfo, DataDistribution distribution, String partitionLocationName) {private
TypeInformation
super(input, input.getType());
Preconditions.checkNotNull(pMethod);
Preconditions.checkArgument(pKeys != null || pMethod == PartitionMethod.REBALANCE, "Partitioning requires keys");
Preconditions.checkArgument(pMethod != PartitionMethod.CUSTOM || customPartitioner != null, "Custom partioning requires a partitioner.");
Preconditions.checkArgument(distribution == null || pMethod == PartitionMethod.RANGE, "Customized data distribution is only neccessary for range partition.");
if (distribution != null) {
Preconditions.checkArgument(pKeys.getNumberOfKeyFields() <= distribution.getNumberOfFields(), "The distribution must provide at least as many fields as flat key fields are specified.");
Preconditions.checkArgument(Arrays.equals(pKeys.getKeyFieldTypes(), Arrays.copyOfRange(distribution.getKeyTypes(), 0, pKeys.getNumberOfKeyFields())),
"The types of the flat key fields must be equal to the types of the fields of the distribution.");
}
if (customPartitioner != null) {
pKeys.validateCustomPartitioner(customPartitioner, partitionerTypeInfo);
}
this.pMethod = pMethod;
this.pKeys = pKeys;
this.partitiOnLocationName= partitionLocationName;
this.customPartitiOner= customPartitioner;
this.distribution = distribution;
}
代码示例来源:origin: com.alibaba.blink/flink-java
private static
Ordering ordering = new Ordering();
final int[] logicalKeyPositiOns= pKeys.computeLogicalKeyPositions();
if (orders == null) {
for (int key : logicalKeyPositions) {
ordering.appendOrdering(key, null, Order.ASCENDING);
}
} else {
final TypeInformation>[] originalKeyFieldTypes = pKeys.getOriginalKeyFieldTypes();
int index = 0;
for (int i = 0; i
for (int j = index; j
}
index += typeTotalFields;
}
}
return ordering;
}
代码示例来源:origin: apache/flink
NOTE: A custom partitioner can only be used with single-field join keys, not with composite join keys./**
* Sets a custom partitioner for this join. The partitioner will be called on the join keys to determine
* the partition a key should be assigned to. The partitioner is evaluated on both join inputs in the
* same way.
*
*
*
* @param partitioner The custom partitioner to be used.
* @return This join operator, to allow for function chaining.
*/
public JoinOperator
if (partitioner != null) {
keys1.validateCustomPartitioner(partitioner, null);
keys2.validateCustomPartitioner(partitioner, null);
}
this.customPartitiOner= getInput1().clean(partitioner);
return this;
}
代码示例来源:origin: org.apache.flink/flink-core
/**
* Check if two sets of keys are compatible to each other (matching types, key counts)
*/
public boolean areCompatible(Keys> other) throws IncompatibleKeysException {
TypeInformation>[] thisKeyFieldTypes = this.getKeyFieldTypes();
TypeInformation>[] otherKeyFieldTypes = other.getKeyFieldTypes();
if (thisKeyFieldTypes.length != otherKeyFieldTypes.length) {
throw new IncompatibleKeysException(IncompatibleKeysException.SIZE_MISMATCH_MESSAGE);
} else {
for (int i = 0; i
throw new IncompatibleKeysException(thisKeyFieldTypes[i], otherKeyFieldTypes[i] );
}
}
}
return true;
}
代码示例来源:origin: apache/flink
public
super(set, keys);
if (!(this.keys instanceof Keys.SelectorFunctionKeys)) {
throw new InvalidProgramException("Sorting on KeySelector keys only works with KeySelector grouping.");
}
TypeInformation> sortKeyType = keySelector.getKeyType();
if (!sortKeyType.isSortKeyType()) {
throw new InvalidProgramException("Key type " + sortKeyType + " is not sortable.");
}
this.groupSortKeyPositiOns= keySelector.computeLogicalKeyPositions();
for (int i = 0; i
}
this.groupSortSelectorFunctiOnKey= keySelector;
this.groupSortOrders = new Order[groupSortKeyPositions.length];
Arrays.fill(this.groupSortOrders, order);
}
代码示例来源:origin: apache/flink
@Test
public void testOriginalTypes1() throws Keys.IncompatibleKeysException {
TypeInformation
BasicTypeInfo.INT_TYPE_INFO,
BasicTypeInfo.STRING_TYPE_INFO
);
Keys
new KeySelector2(),
t2,
BasicTypeInfo.STRING_TYPE_INFO
);
Assert.assertArrayEquals(
new TypeInformation>[] { BasicTypeInfo.STRING_TYPE_INFO },
k.getOriginalKeyFieldTypes()
);
}
代码示例来源:origin: apache/flink
@Test
public void testAreCompatible3() throws Keys.IncompatibleKeysException {
TypeInformation
TypeInformation
Keys.ExpressionKeys
Keys
new KeySelector1(),
t2,
BasicTypeInfo.STRING_TYPE_INFO
);
Assert.assertTrue(sk2.areCompatible(ek1));
}