diff --git a/flink-annotations/src/main/java/org/apache/flink/annotation/docs/Documentation.java b/flink-annotations/src/main/java/org/apache/flink/annotation/docs/Documentation.java index 5e772c8cabf20..a59844ac4f821 100644 --- a/flink-annotations/src/main/java/org/apache/flink/annotation/docs/Documentation.java +++ b/flink-annotations/src/main/java/org/apache/flink/annotation/docs/Documentation.java @@ -136,7 +136,7 @@ public String toString() { /** * Annotation used on config option fields or options class to mark them as a suffix-option; i.e., a config option - * where the key is only a suffix, with the prefix being danymically provided at runtime. + * where the key is only a suffix, with the prefix being dynamically provided at runtime. */ @Target({ElementType.FIELD, ElementType.TYPE}) @Retention(RetentionPolicy.RUNTIME) diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java index 7bbbe7b9e6b43..5a1ecf13b2b82 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java @@ -56,7 +56,7 @@ public interface ElasticsearchApiCallBridge extends Ser * Creates a {@link BulkProcessor.Builder} for creating the bulk processor. * * @param client the Elasticsearch client. - * @param listener the bulk processor listender. + * @param listener the bulk processor listener. * @return the bulk processor builder. */ BulkProcessor.Builder createBulkProcessorBuilder(C client, BulkProcessor.Listener listener); diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchOptions.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchOptions.java index b07190c56dce4..9cd781a5f1bcc 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchOptions.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/ElasticsearchOptions.java @@ -48,7 +48,7 @@ public enum BackOffType { .stringType() .asList() .noDefaultValue() - .withDescription("Elasticseatch hosts to connect to."); + .withDescription("Elasticsearch hosts to connect to."); public static final ConfigOption INDEX_OPTION = ConfigOptions.key("index") .stringType() diff --git a/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSink.java index b8476138624ce..c9465a22dd78b 100644 --- a/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSink.java +++ b/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSink.java @@ -101,7 +101,7 @@ public Builder(List httpHosts, ElasticsearchSinkFunction elasticsea /** * Sets the maximum number of actions to buffer for each bulk request. * - * @param numMaxActions the maxinum number of actions to buffer per bulk request. + * @param numMaxActions the maximum number of actions to buffer per bulk request. */ public void setBulkFlushMaxActions(int numMaxActions) { this.bulkRequestsConfig.put(CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, String.valueOf(numMaxActions)); diff --git a/flink-connectors/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch7/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch7/ElasticsearchSink.java index 5b874e29491eb..5aae895481ace 100644 --- a/flink-connectors/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch7/ElasticsearchSink.java +++ b/flink-connectors/flink-connector-elasticsearch7/src/main/java/org/apache/flink/streaming/connectors/elasticsearch7/ElasticsearchSink.java @@ -101,7 +101,7 @@ public Builder(List httpHosts, ElasticsearchSinkFunction elasticsea /** * Sets the maximum number of actions to buffer for each bulk request. * - * @param numMaxActions the maxinum number of actions to buffer per bulk request. + * @param numMaxActions the maximum number of actions to buffer per bulk request. */ public void setBulkFlushMaxActions(int numMaxActions) { Preconditions.checkArgument( diff --git a/flink-connectors/flink-sql-connector-hbase/src/main/resources/hbase-default.xml b/flink-connectors/flink-sql-connector-hbase/src/main/resources/hbase-default.xml index a3bee0c18b173..cd2c15e3b72d5 100644 --- a/flink-connectors/flink-sql-connector-hbase/src/main/resources/hbase-default.xml +++ b/flink-connectors/flink-sql-connector-hbase/src/main/resources/hbase-default.xml @@ -257,7 +257,7 @@ possible configurations would overwhelm and obscure the important. updates are blocked and flushes are forced. Defaults to 40% of heap (0.4). Updates are blocked and flushes are forced until size of all memstores in a region server hits hbase.regionserver.global.memstore.size.lower.limit. - The default value in this configuration has been intentionally left emtpy in order to + The default value in this configuration has been intentionally left empty in order to honor the old hbase.regionserver.global.memstore.upperLimit property if present. @@ -267,7 +267,7 @@ possible configurations would overwhelm and obscure the important. Defaults to 95% of hbase.regionserver.global.memstore.size (0.95). A 100% value for this value causes the minimum possible flushing to occur when updates are blocked due to memstore limiting. - The default value in this configuration has been intentionally left emtpy in order to + The default value in this configuration has been intentionally left empty in order to honor the old hbase.regionserver.global.memstore.lowerLimit property if present. diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/Types.java b/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/Types.java index 34e0bef62e282..31c758706544a 100644 --- a/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/Types.java +++ b/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/Types.java @@ -170,7 +170,7 @@ public class Types { public static final TypeInformation LOCAL_DATE_TIME = LocalTimeTypeInfo.LOCAL_DATE_TIME; /** - * Returns type infomation for {@link java.time.Instant}. Supports a null value. + * Returns type information for {@link java.time.Instant}. Supports a null value. */ public static final TypeInformation INSTANT = BasicTypeInfo.INSTANT_TYPE_INFO; diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerConfigSnapshot.java b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerConfigSnapshot.java index 5d315ba3bda83..eafa88b933167 100644 --- a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerConfigSnapshot.java +++ b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerConfigSnapshot.java @@ -62,7 +62,7 @@ public final void setPriorSerializer(TypeSerializer serializer) { * Set the user code class loader. * Only relevant if this configuration instance was deserialized from binary form. * - *

This method is not part of the public user-facing API, and cannot be overriden. + *

This method is not part of the public user-facing API, and cannot be overridden. * * @param userCodeClassLoader user code class loader. */ diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSnapshot.java b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSnapshot.java index 0dff5b29aa5ae..a48f610facb8f 100644 --- a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSnapshot.java +++ b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSnapshot.java @@ -39,7 +39,7 @@ * *

  • Compatibility checks for new serializers: when new serializers are available, * they need to be checked whether or not they are compatible to read the data written by the previous serializer. - * This is performed by providing the new serializer to the correspondibng serializer configuration + * This is performed by providing the new serializer to the corresponding serializer configuration * snapshots in checkpoints.
  • * *
  • Factory for a read serializer when schema conversion is required: in the case that new diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/MissingTypeInfo.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/MissingTypeInfo.java index 1dd7f01f8be0d..e702079e348a4 100644 --- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/MissingTypeInfo.java +++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/MissingTypeInfo.java @@ -36,7 +36,7 @@ public class MissingTypeInfo extends TypeInformation { public MissingTypeInfo(String functionName) { - this(functionName, new InvalidTypesException("An unknown error occured.")); + this(functionName, new InvalidTypesException("An unknown error occurred.")); } public MissingTypeInfo(String functionName, InvalidTypesException typeException) { diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java index 227848916c210..529fd0d1a61d2 100644 --- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java +++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java @@ -195,7 +195,7 @@ public int hash(T value) { try { code += this.comparators[i].hash(accessField(keyFields[i], value)); }catch(NullPointerException npe) { - throw new RuntimeException("A NullPointerException occured while accessing a key field in a POJO. " + + throw new RuntimeException("A NullPointerException occurred while accessing a key field in a POJO. " + "Most likely, the value grouped/joined on is null. Field name: "+keyFields[i].getName(), npe); } } diff --git a/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/distcp/DistCp.java b/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/distcp/DistCp.java index 3e6624119931c..7a677f5b24ea3 100644 --- a/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/distcp/DistCp.java +++ b/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/distcp/DistCp.java @@ -50,7 +50,7 @@ * It's a simple reimplementation of Hadoop distcp * (see http://hadoop.apache.org/docs/r1.2.1/distcp.html) * with a dynamic input format - * Note that this tool does not deal with retriability. Additionally, empty directories are not copied over. + * Note that this tool does not deal with retrievability. Additionally, empty directories are not copied over. * *

    When running locally, local file systems paths can be used. * However, in a distributed environment HDFS paths must be provided both as input and output. diff --git a/flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/TimestampFormat.java b/flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/TimestampFormat.java index e9db8de2a7df3..f06bfada53d34 100644 --- a/flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/TimestampFormat.java +++ b/flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/TimestampFormat.java @@ -29,7 +29,7 @@ public enum TimestampFormat { * TIMESTAMP_WITH_LOCAL_TIMEZONE in "yyyy-MM-dd HH:mm:ss.s{precision}'Z'" and output in the same format.*/ SQL, - /** Options to specify TIMESTAMP/TIMESTAMP_WITH_LOCAL_ZONE format. It will pase TIMESTAMP in "yyyy-MM-ddTHH:mm:ss.s{precision}" format, + /** Options to specify TIMESTAMP/TIMESTAMP_WITH_LOCAL_ZONE format. It will parse TIMESTAMP in "yyyy-MM-ddTHH:mm:ss.s{precision}" format, * TIMESTAMP_WITH_LOCAL_TIMEZONE in "yyyy-MM-ddTHH:mm:ss.s{precision}'Z'" and output in the same format.*/ ISO_8601 } diff --git a/flink-java/src/main/java/org/apache/flink/api/java/operators/DataSink.java b/flink-java/src/main/java/org/apache/flink/api/java/operators/DataSink.java index cd25aa452c5cd..8615ae7c21de8 100644 --- a/flink-java/src/main/java/org/apache/flink/api/java/operators/DataSink.java +++ b/flink-java/src/main/java/org/apache/flink/api/java/operators/DataSink.java @@ -142,7 +142,7 @@ public DataSink sortLocalOutput(int field, Order order) { this.sortOrders = new Order[flatKeys.length]; Arrays.fill(this.sortOrders, order); } else { - // append sorting info to exising info + // append sorting info to existing info int oldLength = this.sortKeyPositions.length; int newLength = oldLength + flatKeys.length; this.sortKeyPositions = Arrays.copyOf(this.sortKeyPositions, newLength); diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/metric/directed/EdgeMetrics.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/metric/directed/EdgeMetrics.java index 796667f042f90..810433cb7c7b8 100644 --- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/metric/directed/EdgeMetrics.java +++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/library/metric/directed/EdgeMetrics.java @@ -74,7 +74,7 @@ public class EdgeMetrics, VV, EV> * Implementation notes: * *

    Use aggregator to replace SumEdgeStats when aggregators are rewritten to use - * a hash-combineable hashable-reduce. + * a hash-combinable hashable-reduce. * *

    Use distinct to replace ReduceEdgeStats when the combiner can be disabled * with a sorted-reduce forced. diff --git a/flink-ml-parent/flink-ml-lib/src/main/java/org/apache/flink/ml/common/linalg/DenseMatrix.java b/flink-ml-parent/flink-ml-lib/src/main/java/org/apache/flink/ml/common/linalg/DenseMatrix.java index 2b25aa1b4a727..430c868216917 100644 --- a/flink-ml-parent/flink-ml-lib/src/main/java/org/apache/flink/ml/common/linalg/DenseMatrix.java +++ b/flink-ml-parent/flink-ml-lib/src/main/java/org/apache/flink/ml/common/linalg/DenseMatrix.java @@ -55,7 +55,7 @@ public class DenseMatrix implements Serializable { * Construct an m-by-n matrix of zeros. * * @param m Number of rows. - * @param n Number of colums. + * @param n Number of columns. */ public DenseMatrix(int m, int n) { this(m, n, new double[m * n], false); diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/rpc/FencedRpcEndpoint.java b/flink-runtime/src/main/java/org/apache/flink/runtime/rpc/FencedRpcEndpoint.java index 6dc9773de4954..0b5f6194e0d23 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/rpc/FencedRpcEndpoint.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/rpc/FencedRpcEndpoint.java @@ -126,7 +126,7 @@ protected void runAsyncWithoutFencing(Runnable runnable) { * Run the given callable in the main thread of the RpcEndpoint without checking the fencing * token. This allows to run operations outside of the fencing token scope. * - * @param callable to run in the main thread of the rpc endpoint without checkint the fencing token. + * @param callable to run in the main thread of the rpc endpoint without checking the fencing token. * @param timeout for the operation. * @return Future containing the callable result. */ diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/dispatcher/DispatcherTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/dispatcher/DispatcherTest.java index c04e7657a36a5..854cfcdbfca3d 100755 --- a/flink-runtime/src/test/java/org/apache/flink/runtime/dispatcher/DispatcherTest.java +++ b/flink-runtime/src/test/java/org/apache/flink/runtime/dispatcher/DispatcherTest.java @@ -436,7 +436,7 @@ public void testWaitingForJobMasterLeadership() throws Exception { } /** - * Tests that the {@link Dispatcher} fails fatally if the recoverd jobs cannot be started. + * Tests that the {@link Dispatcher} fails fatally if the recovered jobs cannot be started. * See FLINK-9097. */ @Test diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/WriteFormatAsCsv.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/WriteFormatAsCsv.java index fc95455cce5a7..02f285a1370e0 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/WriteFormatAsCsv.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/WriteFormatAsCsv.java @@ -47,7 +47,7 @@ protected void write(String path, ArrayList tupleList) { outStream.println(strTuple.substring(1, strTuple.length() - 1)); } } catch (IOException e) { - throw new RuntimeException("Exception occured while writing file " + path, e); + throw new RuntimeException("Exception occurred while writing file " + path, e); } } diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/WriteFormatAsText.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/WriteFormatAsText.java index 57c79a80f1fe7..9424b1fa57370 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/WriteFormatAsText.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/WriteFormatAsText.java @@ -46,7 +46,7 @@ public void write(String path, ArrayList tupleList) { outStream.println(tupleToWrite); } } catch (IOException e) { - throw new RuntimeException("Exception occured while writing file " + path, e); + throw new RuntimeException("Exception occurred while writing file " + path, e); } } } diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/TimerSerializer.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/TimerSerializer.java index 1abf8f2a4f952..0caf21628d22d 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/TimerSerializer.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/TimerSerializer.java @@ -217,7 +217,7 @@ public TypeSerializer getNamespaceSerializer() { } /** - * Snaphot of a {@link TimerSerializer}. + * Snapshot of a {@link TimerSerializer}. * * @param type of key. * @param type of namespace. diff --git a/flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/DistinctType.java b/flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/DistinctType.java index 54c4b938c0727..bb613d5b6ba17 100644 --- a/flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/DistinctType.java +++ b/flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/DistinctType.java @@ -38,7 +38,7 @@ * *

    Distinct types are implicitly final and do not support super types. * - *

    Most other properties are forwarded from the source type. Thus, ordering and comparision among + *

    Most other properties are forwarded from the source type. Thus, ordering and comparison among * the same distinct types are supported. * *

    The serialized string representation is the fully qualified name of this type which means that diff --git a/flink-table/flink-table-common/src/test/java/org/apache/flink/table/catalog/CatalogTest.java b/flink-table/flink-table-common/src/test/java/org/apache/flink/table/catalog/CatalogTest.java index ca7bda851af69..9df41aedf7a9f 100644 --- a/flink-table/flink-table-common/src/test/java/org/apache/flink/table/catalog/CatalogTest.java +++ b/flink-table/flink-table-common/src/test/java/org/apache/flink/table/catalog/CatalogTest.java @@ -1360,7 +1360,7 @@ public Optional getDetailedDescription() { } // ------ equality check utils ------ - // Can be overriden by sub test class + // Can be overridden by sub test class protected void checkEquals(CatalogFunction f1, CatalogFunction f2) { assertEquals(f1.getClassName(), f2.getClassName()); diff --git a/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/functions/utils/AggSqlFunction.scala b/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/functions/utils/AggSqlFunction.scala index c4b627a04f5b6..c30cfad481bb1 100644 --- a/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/functions/utils/AggSqlFunction.scala +++ b/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/functions/utils/AggSqlFunction.scala @@ -66,7 +66,7 @@ class AggSqlFunction( fromDataTypeToLogicalType(externalResultType), typeFactory)), createOperandTypeInference(displayName, aggregateFunction, typeFactory, externalAccType), createOperandTypeChecker(displayName, aggregateFunction, externalAccType), - // Do not need to provide a calcite aggregateFunction here. Flink aggregateion function + // Do not need to provide a calcite aggregateFunction here. Flink aggregation function // will be generated when translating the calcite relnode to flink runtime execution plan null, false, diff --git a/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecGroupAggregateBase.scala b/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecGroupAggregateBase.scala index 502a932211272..9bed7733a3af7 100644 --- a/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecGroupAggregateBase.scala +++ b/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecGroupAggregateBase.scala @@ -61,7 +61,7 @@ abstract class BatchExecGroupAggregateBase( with BatchPhysicalRel { if (grouping.isEmpty && auxGrouping.nonEmpty) { - throw new TableException("auxGrouping should be empty if grouping is emtpy.") + throw new TableException("auxGrouping should be empty if grouping is empty.") } override def deriveRowType(): RelDataType = outputRowType diff --git a/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecWindowAggregateBase.scala b/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecWindowAggregateBase.scala index 919f0b4a5dc81..63ba6af7952f3 100644 --- a/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecWindowAggregateBase.scala +++ b/flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecWindowAggregateBase.scala @@ -47,7 +47,7 @@ abstract class BatchExecWindowAggregateBase( with BatchPhysicalRel { if (grouping.isEmpty && auxGrouping.nonEmpty) { - throw new TableException("auxGrouping should be empty if grouping is emtpy.") + throw new TableException("auxGrouping should be empty if grouping is empty.") } def getGrouping: Array[Int] = grouping diff --git a/flink-table/flink-table-runtime-blink/src/main/java/org/apache/flink/table/runtime/operators/window/internal/MergingWindowSet.java b/flink-table/flink-table-runtime-blink/src/main/java/org/apache/flink/table/runtime/operators/window/internal/MergingWindowSet.java index 915f2e858e75f..6e7b01ac96ee6 100644 --- a/flink-table/flink-table-runtime-blink/src/main/java/org/apache/flink/table/runtime/operators/window/internal/MergingWindowSet.java +++ b/flink-table/flink-table-runtime-blink/src/main/java/org/apache/flink/table/runtime/operators/window/internal/MergingWindowSet.java @@ -171,7 +171,7 @@ public W addWindow(W newWindow, MergeFunction mergeFunction) throws Exception resultWindow = mergeResult; } - // if our new window is the same as a pre-exising window, nothing to do + // if our new window is the same as a pre-existing window, nothing to do if (mergedWindows.isEmpty()) { continue; } @@ -199,7 +199,7 @@ public W addWindow(W newWindow, MergeFunction mergeFunction) throws Exception // don't merge the new window itself, it never had any state associated with it // i.e. if we are only merging one pre-existing window into itself - // without extending the pre-exising window + // without extending the pre-existing window if (!(mergedWindows.contains(mergeResult) && mergedWindows.size() == 1)) { mergeFunction.merge(mergeResult, mergedWindows, diff --git a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java index 31f47561b354b..637e93afa54b0 100644 --- a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java +++ b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java @@ -150,7 +150,7 @@ public static void prepare(TemporaryFolder tempFolder) { populateJavaPropertyVariables(); } catch (Exception e) { - throw new RuntimeException("Exception occured while preparing secure environment.", e); + throw new RuntimeException("Exception occurred while preparing secure environment.", e); } } diff --git a/flink-yarn/src/main/java/org/apache/flink/yarn/YarnClusterDescriptor.java b/flink-yarn/src/main/java/org/apache/flink/yarn/YarnClusterDescriptor.java index 6d00fbac5dc12..5d6a2bc582831 100644 --- a/flink-yarn/src/main/java/org/apache/flink/yarn/YarnClusterDescriptor.java +++ b/flink-yarn/src/main/java/org/apache/flink/yarn/YarnClusterDescriptor.java @@ -1147,7 +1147,7 @@ private void failSessionDuringDeployment(YarnClient yarnClient, YarnClientApplic yarnClient.killApplication(yarnApplication.getNewApplicationResponse().getApplicationId()); } catch (Exception e) { // we only log a debug message here because the "killApplication" call is a best-effort - // call (we don't know if the application has been deployed when the error occured). + // call (we don't know if the application has been deployed when the error occurred). LOG.debug("Error while killing YARN application", e); } }