diff --git a/docs/content/docs/deployment/elastic_scaling.md b/docs/content/docs/deployment/elastic_scaling.md index aa4bbf82f7068..24680ce477569 100644 --- a/docs/content/docs/deployment/elastic_scaling.md +++ b/docs/content/docs/deployment/elastic_scaling.md @@ -98,7 +98,7 @@ If you want the JobManager to stop after a certain time without enough TaskManag With Reactive Mode enabled, the [`jobmanager.adaptive-scheduler.resource-stabilization-timeout`]({{< ref "docs/deployment/config">}}#jobmanager-adaptive-scheduler-resource-stabilization-timeout) configuration key will default to `0`: Flink will start running the job, as soon as there are sufficient resources available. In scenarios where TaskManagers are not connecting at the same time, but slowly one after another, this behavior leads to a job restart whenever a TaskManager connects. Increase this configuration value if you want to wait for the resources to stabilize before scheduling the job. -Additionally, one can configure [`jobmanager.adaptive-scheduler.min-parallelism-increase`]({{< ref "docs/deployment/config">}}#jobmanager-adaptive-scheduler-min-parallelism-increase): This configuration option specifics the minumum amount of additional, aggregate parallelism increase before triggering a scale-up. For example if you have a job with a source (parallelism=2) and a sink (parallelism=2), the aggregate parallelism is 4. By default, the configuration key is set to 1, so any increase in the aggregate parallelism will trigger a restart. +Additionally, one can configure [`jobmanager.adaptive-scheduler.min-parallelism-increase`]({{< ref "docs/deployment/config">}}#jobmanager-adaptive-scheduler-min-parallelism-increase): This configuration option specifics the minimum amount of additional, aggregate parallelism increase before triggering a scale-up. For example if you have a job with a source (parallelism=2) and a sink (parallelism=2), the aggregate parallelism is 4. By default, the configuration key is set to 1, so any increase in the aggregate parallelism will trigger a restart. #### Recommendations diff --git a/flink-clients/src/test/java/org/apache/flink/client/program/DefaultPackagedProgramRetrieverTest.java b/flink-clients/src/test/java/org/apache/flink/client/program/DefaultPackagedProgramRetrieverTest.java index d35c3f6696b4d..c7c5bc2e9be02 100644 --- a/flink-clients/src/test/java/org/apache/flink/client/program/DefaultPackagedProgramRetrieverTest.java +++ b/flink-clients/src/test/java/org/apache/flink/client/program/DefaultPackagedProgramRetrieverTest.java @@ -542,7 +542,7 @@ private static List extractRelativizedURLsForJarsFromDirectory(File dire throws MalformedURLException { Preconditions.checkArgument( directory.listFiles() != null, - "The passed File does not seem to be a directory or is not acessible: " + "The passed File does not seem to be a directory or is not accessible: " + directory.getAbsolutePath()); final List relativizedURLs = new ArrayList<>(); diff --git a/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/SourceReaderOptions.java b/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/SourceReaderOptions.java index e0d436a6581d0..fa20439f4101d 100644 --- a/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/SourceReaderOptions.java +++ b/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/SourceReaderOptions.java @@ -22,7 +22,7 @@ import org.apache.flink.configuration.ConfigOptions; import org.apache.flink.configuration.Configuration; -/** The options tht can be set for the {@link SourceReaderBase}. */ +/** The options that can be set for the {@link SourceReaderBase}. */ public class SourceReaderOptions { public static final ConfigOption SOURCE_READER_CLOSE_TIMEOUT = diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/batch/connectors/cassandra/CustomCassandraAnnotatedPojo.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/batch/connectors/cassandra/CustomCassandraAnnotatedPojo.java index a62cda3331452..ad212653f8bf9 100644 --- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/batch/connectors/cassandra/CustomCassandraAnnotatedPojo.java +++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/batch/connectors/cassandra/CustomCassandraAnnotatedPojo.java @@ -35,7 +35,7 @@ public class CustomCassandraAnnotatedPojo { @Column(name = "batch_id") private Integer batchId; - /** Necessary for the driver's mapper instanciation. */ + /** Necessary for the driver's mapper instantiation. */ public CustomCassandraAnnotatedPojo() {} public CustomCassandraAnnotatedPojo(String id, Integer counter, Integer batchId) { diff --git a/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/enumerate/BlockSplittingRecursiveEnumerator.java b/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/enumerate/BlockSplittingRecursiveEnumerator.java index 9c11de7a71b55..029d9cfbda721 100644 --- a/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/enumerate/BlockSplittingRecursiveEnumerator.java +++ b/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/enumerate/BlockSplittingRecursiveEnumerator.java @@ -138,7 +138,7 @@ private static BlockLocation[] getBlockLocationsForFile(FileStatus file, FileSys // the // file (too expensive) but make some sanity checks to catch early the common cases where // incorrect - // bloc info is returned by the implementation. + // block info is returned by the implementation. long totalLen = 0L; for (BlockLocation block : blocks) { diff --git a/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/reader/SimpleStreamFormat.java b/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/reader/SimpleStreamFormat.java index 935d142533537..d5c931aa40fb6 100644 --- a/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/reader/SimpleStreamFormat.java +++ b/flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/reader/SimpleStreamFormat.java @@ -34,7 +34,7 @@ *

This format makes no difference between creating readers from scratch (new file) or from a * checkpoint. Because of that, if the reader actively checkpoints its position (via the {@link * Reader#getCheckpointedPosition()} method) then the checkpointed offset must be a byte offset in - * the file from which the stream can be resumed as if it were te beginning of the file. + * the file from which the stream can be resumed as if it were the beginning of the file. * *

For all other details, please check the docs of {@link StreamFormat}. * diff --git a/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/writer/FileWriterBucketStateSerializerMigrationTest.java b/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/writer/FileWriterBucketStateSerializerMigrationTest.java index ce9a9c37aaaea..2b8f1ece7b3f5 100644 --- a/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/writer/FileWriterBucketStateSerializerMigrationTest.java +++ b/flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/writer/FileWriterBucketStateSerializerMigrationTest.java @@ -229,7 +229,7 @@ private void testDeserializationFull(final boolean withInProgress, final String .map(file -> file.getFileName().toString()) .collect(Collectors.toSet()); - // after restoring all pending files are comitted. + // after restoring all pending files are committed. // there is no "inporgress" in file name for the committed files. for (int i = 0; i < noOfPendingCheckpoints; i++) { final String part = "part-0-" + i; diff --git a/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowDataAsyncLookupFunction.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowDataAsyncLookupFunction.java index 5ce904d869159..a41c7a595ba53 100644 --- a/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowDataAsyncLookupFunction.java +++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowDataAsyncLookupFunction.java @@ -59,7 +59,7 @@ import java.util.concurrent.TimeUnit; /** - * The HBaseRowDataAsyncLookupFunction is an implemenation to lookup HBase data by rowkey in async + * The HBaseRowDataAsyncLookupFunction is an implementation to lookup HBase data by rowkey in async * fashion. It looks up the result as {@link RowData}. */ @Internal diff --git a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/functions/hive/HiveFunctionWrapper.java b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/functions/hive/HiveFunctionWrapper.java index e6c1326f1cfc7..79e5865bc4436 100644 --- a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/functions/hive/HiveFunctionWrapper.java +++ b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/functions/hive/HiveFunctionWrapper.java @@ -25,7 +25,7 @@ import java.io.Serializable; /** - * A wrapper of Hive functions that instantiate function instances and ser/de functino instance + * A wrapper of Hive functions that instantiate function instances and ser/de function instance * cross process boundary. * * @param The type of UDF. diff --git a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/HiveParserCalcitePlanner.java b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/HiveParserCalcitePlanner.java index d5b3a8ac52b2a..ecc4e34e78427 100644 --- a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/HiveParserCalcitePlanner.java +++ b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/HiveParserCalcitePlanner.java @@ -819,7 +819,7 @@ private RelNode genTableLogicalPlan(String tableAlias, HiveParserQB qb) // 3. Get Table Logical Schema (Row Type) // NOTE: Table logical schema = Non Partition Cols + Partition Cols + Virtual Cols - // 3.1 Add Column info for non partion cols (Object Inspector fields) + // 3.1 Add Column info for non partition cols (Object Inspector fields) StructObjectInspector rowObjectInspector = (StructObjectInspector) table.getDeserializer().getObjectInspector(); List fields = rowObjectInspector.getAllStructFieldRefs(); diff --git a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/HiveParserTypeCheckProcFactory.java b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/HiveParserTypeCheckProcFactory.java index d23bd2e5b575e..6a3cb9bd66757 100644 --- a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/HiveParserTypeCheckProcFactory.java +++ b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/HiveParserTypeCheckProcFactory.java @@ -940,7 +940,7 @@ protected void validateUDF( if (fi.getGenericUDTF() != null) { throw new SemanticException(ErrorMsg.UDTF_INVALID_LOCATION.getMsg()); } - // UDAF in filter condition, group-by caluse, param of funtion, etc. + // UDAF in filter condition, group-by caluse, param of function, etc. if (fi.getGenericUDAFResolver() != null) { if (isFunction) { throw new SemanticException( diff --git a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveASTParseDriver.java b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveASTParseDriver.java index e671ae4618331..191958702082d 100644 --- a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveASTParseDriver.java +++ b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveASTParseDriver.java @@ -48,11 +48,11 @@ public class HiveASTParseDriver { // for the lexical analysis part of antlr. By converting the token stream into // upper case at the time when lexical rules are checked, this class ensures that the // lexical rules need to just match the token with upper case letters as opposed to - // combination of upper case and lower case characteres. This is purely used for matching + // combination of upper case and lower case characters. This is purely used for matching // lexical // rules. The actual token text is stored in the same way as the user input without // actually converting it into an upper case. The token values are generated by the consume() - // function of the super class ANTLRStringStream. The LA() function is the lookahead funtion + // function of the super class ANTLRStringStream. The LA() function is the lookahead function // and is purely used for matching lexical rules. This also means that the grammar will only // accept capitalized tokens in case it is run from other tools like antlrworks which // do not have the ANTLRNoCaseStringStream implementation. diff --git a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserBaseSemanticAnalyzer.java b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserBaseSemanticAnalyzer.java index c7eb61f0b45ff..c277781fdde59 100644 --- a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserBaseSemanticAnalyzer.java +++ b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserBaseSemanticAnalyzer.java @@ -2366,7 +2366,7 @@ public void analyzeRowFormat(HiveParserASTNode child) throws SemanticException { nullFormat = unescapeSQLString(rowChild.getChild(0).getText()); break; default: - throw new AssertionError("Unkown Token: " + rowChild); + throw new AssertionError("Unknown Token: " + rowChild); } } } diff --git a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserQBParseInfo.java b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserQBParseInfo.java index 17fc7287554ef..0c37710202b08 100644 --- a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserQBParseInfo.java +++ b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserQBParseInfo.java @@ -79,7 +79,7 @@ public class HiveParserQBParseInfo { */ private final HashMap destToSortby; - /** Maping from table/subquery aliases to all the associated lateral view nodes. */ + /** Mapping from table/subquery aliases to all the associated lateral view nodes. */ private final HashMap> aliasToLateralViews; private final HashMap destToLateralView; diff --git a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserQBSubQuery.java b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserQBSubQuery.java index 47d7fc455c275..61db3c7b81113 100644 --- a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserQBSubQuery.java +++ b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserQBSubQuery.java @@ -552,7 +552,7 @@ public boolean subqueryRestrictionsCheck( */ // Following is special cases for different type of subqueries which have aggregate and no // implicit group by - // and are correlatd + // and are correlated // * EXISTS/NOT EXISTS - NOT allowed, throw an error for now. We plan to allow this later // * SCALAR - only allow if it has non equi join predicate. This should return true since // later in subquery remove diff --git a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserWindowingSpec.java b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserWindowingSpec.java index 911717053ac73..1db23f4e8e687 100644 --- a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserWindowingSpec.java +++ b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/table/planner/delegation/hive/copy/HiveParserWindowingSpec.java @@ -71,7 +71,7 @@ public void validateAndMakeEffective() throws SemanticException { wFn.setWindowSpec(wdwSpec); } - // 2. A Window Spec with no Parition Spec, is Partitioned on a Constant(number 0) + // 2. A Window Spec with no Partition Spec, is Partitioned on a Constant(number 0) applyConstantPartition(wdwSpec); // 3. For missing Wdw Frames or for Frames with only a Start Boundary, completely diff --git a/flink-connectors/flink-connector-jdbc/src/main/java/org/apache/flink/connector/jdbc/dialect/JdbcDialect.java b/flink-connectors/flink-connector-jdbc/src/main/java/org/apache/flink/connector/jdbc/dialect/JdbcDialect.java index 7bd937f064358..47149cc4219a6 100644 --- a/flink-connectors/flink-connector-jdbc/src/main/java/org/apache/flink/connector/jdbc/dialect/JdbcDialect.java +++ b/flink-connectors/flink-connector-jdbc/src/main/java/org/apache/flink/connector/jdbc/dialect/JdbcDialect.java @@ -93,7 +93,7 @@ default String quoteIdentifier(String identifier) { /** * Get dialect upsert statement, the database has its own upsert syntax, such as Mysql using - * DUPLICATE KEY UPDATE, and PostgresSQL using ON CONFLICT... DO UPDATE SET.. + * DUPLICATE KEY UPDATE, and PostgreSQL using ON CONFLICT... DO UPDATE SET.. * * @return None if dialect does not support upsert statement, the writer will degrade to the use * of select + update/insert, this performance is poor. diff --git a/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/TopicListSubscriber.java b/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/TopicListSubscriber.java index caa1ea013ae7d..b2ad844ab9d19 100644 --- a/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/TopicListSubscriber.java +++ b/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/TopicListSubscriber.java @@ -33,7 +33,7 @@ import static org.apache.flink.connector.kafka.source.enumerator.subscriber.KafkaSubscriberUtils.getTopicMetadata; /** - * A subscriber to a fixed list of topics. The subscribed topics must hav existed in the Kafka + * A subscriber to a fixed list of topics. The subscribed topics must have existed in the Kafka * cluster, otherwise an exception will be thrown. */ class TopicListSubscriber implements KafkaSubscriber { diff --git a/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchema.java b/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchema.java index 24410797044c2..0eccbc94d56ca 100644 --- a/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchema.java +++ b/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchema.java @@ -67,7 +67,7 @@ default void open(DeserializationSchema.InitializationContext context) throws Ex * ConsumerRecord ConsumerRecords}. * *

Note that the {@link KafkaDeserializationSchema#isEndOfStream(Object)} method will no - * longer be used to determin the end of the stream. + * longer be used to determine the end of the stream. * * @param kafkaDeserializationSchema the legacy {@link KafkaDeserializationSchema} to use. * @param the return type of the deserialized record. diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java index add4d2fa248fc..5e1000928b393 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java @@ -261,7 +261,7 @@ public void testConfigureDisableOffsetCommitWithoutCheckpointing() throws Except } /** - * Tests that subscribed partitions didn't change when there's no change on the intial topics. + * Tests that subscribed partitions didn't change when there's no change on the initial topics. * (filterRestoredPartitionsWithDiscovered is active) */ @Test @@ -300,7 +300,7 @@ public void testSetFilterRestoredParitionsWithAddedTopic() throws Exception { } /** - * Tests that subscribed partitions are the same when there's no change on the intial topics. + * Tests that subscribed partitions are the same when there's no change on the initial topics. * (filterRestoredPartitionsWithDiscovered is disabled) */ @Test diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java index 1898b16816bfa..d857a0a41d779 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java @@ -168,7 +168,7 @@ public void setClientAndEnsureNoJobIsLingering() throws Exception { // ------------------------------------------------------------------------ /** - * Test that ensures the KafkaConsumer is properly failing if the topic doesnt exist and a wrong + * Test that ensures the KafkaConsumer is properly failing if the topic doesn't exist and a wrong * broker was specified. * * @throws Exception diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java index 128c173cefed9..ab5af396191d3 100644 --- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java +++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java @@ -72,7 +72,7 @@ public enum RecordPublisherType { POLLING } - /** The EFO registration type reprsents how we are going to de-/register efo consumer. */ + /** The EFO registration type represents how we are going to de-/register efo consumer. */ public enum EFORegistrationType { /** diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java index acdb2aacd3f38..37b12c20b691d 100644 --- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java +++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java @@ -247,7 +247,7 @@ public class KinesisDataFetcher { * The most recent watermark, calculated from the per shard watermarks. The initial value will * never be emitted and also apply after recovery. The fist watermark that will be emitted is * derived from actually consumed records. In case of recovery and replay, the watermark will - * rewind, consistent wth the shard consumer sequence. + * rewind, consistent with the shard consumer sequence. */ private long lastWatermark = Long.MIN_VALUE; diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java index 421f3e2b3f82f..c8e54c02d7673 100644 --- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java +++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java @@ -433,7 +433,7 @@ public static Properties replaceDeprecatedProducerKeys(Properties configProps) { } /** - * A set of configuration paremeters associated with the describeStreams API may be used if: 1) + * A set of configuration parameters associated with the describeStreams API may be used if: 1) * an legacy client wants to consume from Kinesis 2) a current client wants to consumer from * DynamoDB streams * diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StartCursor.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StartCursor.java index 1ad485e72b7cb..9b7c7a431eb54 100644 --- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StartCursor.java +++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StartCursor.java @@ -67,7 +67,7 @@ static StartCursor fromMessageId(MessageId messageId) { /** * @param messageId Find the available message id and start consuming from it. - * @param inclusive {@code ture} would include the given message id. + * @param inclusive {@code true} would include the given message id. */ static StartCursor fromMessageId(MessageId messageId, boolean inclusive) { return new MessageIdStartCursor(messageId, inclusive); diff --git a/flink-connectors/flink-connector-pulsar/src/test/resources/containers/txnStandalone.conf b/flink-connectors/flink-connector-pulsar/src/test/resources/containers/txnStandalone.conf index 2c541ad257d5e..f1a403658c01d 100644 --- a/flink-connectors/flink-connector-pulsar/src/test/resources/containers/txnStandalone.conf +++ b/flink-connectors/flink-connector-pulsar/src/test/resources/containers/txnStandalone.conf @@ -465,7 +465,7 @@ tokenAudience= # Authentication plugin to use when connecting to bookies bookkeeperClientAuthenticationPlugin= -# BookKeeper auth plugin implementatation specifics parameters name and values +# BookKeeper auth plugin implementation specifics parameters name and values bookkeeperClientAuthenticationParametersName= bookkeeperClientAuthenticationParameters= @@ -904,7 +904,7 @@ journalSyncData=false # For each ledger dir, maximum disk space which can be used. # Default is 0.95f. i.e. 95% of disk can be used at most after which nothing will -# be written to that partition. If all ledger dir partions are full, then bookie +# be written to that partition. If all ledger dir partitions are full, then bookie # will turn to readonly mode if 'readOnlyModeEnabled=true' is set, else it will # shutdown. # Valid values should be in between 0 and 1 (exclusive). diff --git a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQDeserializationSchema.java b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQDeserializationSchema.java index 717ae66094ed6..f2e22ec1e4419 100644 --- a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQDeserializationSchema.java +++ b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/RMQDeserializationSchema.java @@ -93,7 +93,7 @@ interface RMQCollector extends Collector { *

If not set explicitly, the {@link AMQP.BasicProperties#getCorrelationId()} and {@link * Envelope#getDeliveryTag()} will be used. * - *

NOTE:Can be called once for a single invokation of a {@link + *

NOTE:Can be called once for a single invocation of a {@link * RMQDeserializationSchema#deserialize(Envelope, AMQP.BasicProperties, byte[], * RMQCollector)} method. * diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerConfigSnapshot.java b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerConfigSnapshot.java index 4cffc0a5354f8..2ab732af12ea7 100644 --- a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerConfigSnapshot.java +++ b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerConfigSnapshot.java @@ -191,7 +191,7 @@ public TypeSerializerSchemaCompatibility resolveSchemaCompatibility( * "redirect" the compatibility check to the corresponding {code TypeSerializerSnapshot} class. * *

Please note that if it is possible to directly override {@link - * TypeSerializerConfigSnapshot#resolveSchemaCompatibility} and preform the redirection logic + * TypeSerializerConfigSnapshot#resolveSchemaCompatibility} and perform the redirection logic * there, then that is the preferred way. This interface is useful for cases where there is not * enough information, and the new serializer should assist with the redirection. */ diff --git a/flink-core/src/main/java/org/apache/flink/configuration/CheckpointingOptions.java b/flink-core/src/main/java/org/apache/flink/configuration/CheckpointingOptions.java index 47286e8383157..e60c5a20542f4 100644 --- a/flink-core/src/main/java/org/apache/flink/configuration/CheckpointingOptions.java +++ b/flink-core/src/main/java/org/apache/flink/configuration/CheckpointingOptions.java @@ -147,7 +147,7 @@ public class CheckpointingOptions { .defaultValue(1) .withDescription("The maximum number of completed checkpoints to retain."); - /** @deprecated Checkpoints are aways asynchronous. */ + /** @deprecated Checkpoints are always asynchronous. */ @Deprecated public static final ConfigOption ASYNC_SNAPSHOTS = ConfigOptions.key("state.backend.async") diff --git a/flink-core/src/main/java/org/apache/flink/configuration/ConfigOption.java b/flink-core/src/main/java/org/apache/flink/configuration/ConfigOption.java index a4e41ccbe6243..6afdcee8469cf 100644 --- a/flink-core/src/main/java/org/apache/flink/configuration/ConfigOption.java +++ b/flink-core/src/main/java/org/apache/flink/configuration/ConfigOption.java @@ -161,7 +161,7 @@ public ConfigOption withDeprecatedKeys(String... deprecatedKeys) { /** * Creates a new config option, using this option's key and default value, and adding the given - * description. The given description is used when generation the configuration documention. + * description. The given description is used when generation the configuration documentation. * * @param description The description for this option. * @return A new config option, with given description. @@ -172,7 +172,7 @@ public ConfigOption withDescription(final String description) { /** * Creates a new config option, using this option's key and default value, and adding the given - * description. The given description is used when generation the configuration documention. + * description. The given description is used when generation the configuration documentation. * * @param description The description for this option. * @return A new config option, with given description. diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/RecoverableFsDataOutputStream.java b/flink-core/src/main/java/org/apache/flink/core/fs/RecoverableFsDataOutputStream.java index 78f6288057a27..d2b35324314ed 100644 --- a/flink-core/src/main/java/org/apache/flink/core/fs/RecoverableFsDataOutputStream.java +++ b/flink-core/src/main/java/org/apache/flink/core/fs/RecoverableFsDataOutputStream.java @@ -51,7 +51,7 @@ public abstract class RecoverableFsDataOutputStream extends FSDataOutputStream { * as a "close in order to dispose" or "close on failure". * *

In order to persist all previously written data, one needs to call the {@link - * #closeForCommit()} method and call {@link Committer#commit()} on the retured committer + * #closeForCommit()} method and call {@link Committer#commit()} on the returned committer * object. * * @throws IOException Thrown if an error occurred during closing. diff --git a/flink-core/src/main/java/org/apache/flink/util/InstantiationUtil.java b/flink-core/src/main/java/org/apache/flink/util/InstantiationUtil.java index 878abcc136038..c36306070b427 100644 --- a/flink-core/src/main/java/org/apache/flink/util/InstantiationUtil.java +++ b/flink-core/src/main/java/org/apache/flink/util/InstantiationUtil.java @@ -213,7 +213,7 @@ protected Class resolveProxyClass(String[] interfaces) /** * An {@link ObjectInputStream} that ignores serialVersionUID mismatches when deserializing - * objects of anonymous classes or our Scala serializer classes and also replaces occurences of + * objects of anonymous classes or our Scala serializer classes and also replaces occurrences of * GenericData.Array (from Avro) by a dummy class so that the KryoSerializer can still be * deserialized without Avro being on the classpath. * diff --git a/flink-core/src/test/java/org/apache/flink/api/common/io/DelimitedInputFormatTest.java b/flink-core/src/test/java/org/apache/flink/api/common/io/DelimitedInputFormatTest.java index 1b2bd8641dce9..ce17dddf60681 100644 --- a/flink-core/src/test/java/org/apache/flink/api/common/io/DelimitedInputFormatTest.java +++ b/flink-core/src/test/java/org/apache/flink/api/common/io/DelimitedInputFormatTest.java @@ -349,7 +349,7 @@ public void testReadWithBufferSizeIsMultiple() throws IOException { assertTrue(format.reachedEnd()); format.close(); - // this one must have read one too many, because the next split will skipp the trailing + // this one must have read one too many, because the next split will skip the trailing // remainder // which happens to be one full record assertEquals(3, count); diff --git a/flink-core/src/test/java/org/apache/flink/types/RecordTest.java b/flink-core/src/test/java/org/apache/flink/types/RecordTest.java index 95cd2f6526160..5a07fb9fee0c5 100644 --- a/flink-core/src/test/java/org/apache/flink/types/RecordTest.java +++ b/flink-core/src/test/java/org/apache/flink/types/RecordTest.java @@ -821,7 +821,7 @@ public void testUnionFields() { final Value[][] values = new Value[][] { {new IntValue(56), null, new IntValue(-7628761)}, - {null, new StringValue("Hellow Test!"), null}, + {null, new StringValue("Hello Test!"), null}, {null, null, null, null, null, null, null, null}, { null, null, null, null, null, null, null, null, null, null, null, null, diff --git a/flink-dist/src/main/flink-bin/bin/config.sh b/flink-dist/src/main/flink-bin/bin/config.sh index a16db37b8b332..42119b7aa7ace 100755 --- a/flink-dist/src/main/flink-bin/bin/config.sh +++ b/flink-dist/src/main/flink-bin/bin/config.sh @@ -375,7 +375,7 @@ if [ -n "${HBASE_CONF_DIR}" ]; then INTERNAL_HADOOP_CLASSPATHS="${INTERNAL_HADOOP_CLASSPATHS}:${HBASE_CONF_DIR}" fi -# Auxilliary function which extracts the name of host from a line which +# Auxiliary function which extracts the name of host from a line which # also potentially includes topology information and the taskManager type extractHostName() { # handle comments: extract first part of string (before first # character) @@ -502,7 +502,7 @@ extractExecutionResults() { execution_results=$(echo "${output}" | grep ${EXECUTION_PREFIX}) num_lines=$(echo "${execution_results}" | wc -l) - # explicit check for empty result, becuase if execution_results is empty, then wc returns 1 + # explicit check for empty result, because if execution_results is empty, then wc returns 1 if [[ -z ${execution_results} ]]; then echo "[ERROR] The execution result is empty." 1>&2 exit 1 diff --git a/flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/FsStateChangelogOptions.java b/flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/FsStateChangelogOptions.java index 38a7a5b53547c..12b439c19f945 100644 --- a/flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/FsStateChangelogOptions.java +++ b/flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/FsStateChangelogOptions.java @@ -121,7 +121,7 @@ public class FsStateChangelogOptions { .intType() .defaultValue(3) .withDescription( - "Maximum number of attempts (including the initial one) to peform a particular upload. " + "Maximum number of attempts (including the initial one) to perform a particular upload. " + "Only takes effect if " + RETRY_POLICY.key() + " is fixed."); diff --git a/flink-end-to-end-tests/flink-netty-shuffle-memory-control-test/src/main/java/org/apache/flink/streaming/tests/NettyShuffleMemoryControlTestProgram.java b/flink-end-to-end-tests/flink-netty-shuffle-memory-control-test/src/main/java/org/apache/flink/streaming/tests/NettyShuffleMemoryControlTestProgram.java index 1d87aec7a9129..a08c03b087917 100644 --- a/flink-end-to-end-tests/flink-netty-shuffle-memory-control-test/src/main/java/org/apache/flink/streaming/tests/NettyShuffleMemoryControlTestProgram.java +++ b/flink-end-to-end-tests/flink-netty-shuffle-memory-control-test/src/main/java/org/apache/flink/streaming/tests/NettyShuffleMemoryControlTestProgram.java @@ -82,7 +82,7 @@ public static void main(String[] args) throws Exception { mapParallelism); checkArgument( reduceParallelism > 0, - "The number of reduce tasks should be positve, but it is {}", + "The number of reduce tasks should be positive, but it is {}", reduceParallelism); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); diff --git a/flink-end-to-end-tests/flink-python-test/python/python_job.py b/flink-end-to-end-tests/flink-python-test/python/python_job.py index 2852bc583dbef..421f9d2ac3237 100644 --- a/flink-end-to-end-tests/flink-python-test/python/python_job.py +++ b/flink-end-to-end-tests/flink-python-test/python/python_job.py @@ -35,7 +35,7 @@ def word_count(): t_env = TableEnvironment.create(EnvironmentSettings.in_batch_mode()) - # used to test pipeline.jars and pipleline.classpaths + # used to test pipeline.jars and pipeline.classpaths config_key = sys.argv[1] config_value = sys.argv[2] t_env.get_config().get_configuration().set_string(config_key, config_value) diff --git a/flink-end-to-end-tests/test-scripts/common_yarn_docker.sh b/flink-end-to-end-tests/test-scripts/common_yarn_docker.sh index f64097644d1c5..66ab3738a9b77 100755 --- a/flink-end-to-end-tests/test-scripts/common_yarn_docker.sh +++ b/flink-end-to-end-tests/test-scripts/common_yarn_docker.sh @@ -197,7 +197,7 @@ function wait_for_single_yarn_application { echo "Application ID: $application_id" - # wait for the application to finish succesfully + # wait for the application to finish successfully start_time=$(date +%s) application_state="UNDEFINED" while [[ $application_state != "FINISHED" ]]; do diff --git a/flink-end-to-end-tests/test-scripts/kafka-common.sh b/flink-end-to-end-tests/test-scripts/kafka-common.sh index 9631fc5f5d285..592d476b6c5c4 100644 --- a/flink-end-to-end-tests/test-scripts/kafka-common.sh +++ b/flink-end-to-end-tests/test-scripts/kafka-common.sh @@ -95,7 +95,7 @@ function start_kafka_cluster { start_time=$(date +%s) # - # Wait for the broker info to appear in ZK. We assume propery registration once an entry + # Wait for the broker info to appear in ZK. We assume property registration once an entry # similar to this is in ZK: {"listener_security_protocol_map":{"PLAINTEXT":"PLAINTEXT"},"endpoints":["PLAINTEXT://my-host:9092"],"jmx_port":-1,"host":"honorary-pig","timestamp":"1583157804932","port":9092,"version":4} # while ! [[ $($KAFKA_DIR/bin/zookeeper-shell.sh localhost:2181 get /brokers/ids/0 2>&1) =~ .*listener_security_protocol_map.* ]]; do diff --git a/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/misc/CollectionExecutionExample.java b/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/misc/CollectionExecutionExample.java index c601f3f7c12ff..b47fc2b993909 100644 --- a/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/misc/CollectionExecutionExample.java +++ b/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/misc/CollectionExecutionExample.java @@ -80,7 +80,7 @@ public static void main(String[] args) throws Exception { EMail[] emailsArray = { new EMail(1, "Re: Meeting", "How about 1pm?"), - new EMail(1, "Re: Meeting", "Sorry, I'm not availble"), + new EMail(1, "Re: Meeting", "Sorry, I'm not available"), new EMail(3, "Re: Re: Project proposal", "Give me a few more days to think about it.") }; diff --git a/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/join/WindowJoin.java b/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/join/WindowJoin.java index 9e0b641128f51..538ab98349d86 100644 --- a/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/join/WindowJoin.java +++ b/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/join/WindowJoin.java @@ -39,9 +39,9 @@ * Example illustrating a windowed stream join between two data streams. * *

The example works on two input streams with pairs (name, grade) and (name, salary) - * respectively. It joins the steams based on "name" within a configurable window. + * respectively. It joins the streams based on "name" within a configurable window. * - *

The example uses a built-in sample data generator that generates the steams of pairs at a + *

The example uses a built-in sample data generator that generates the streams of pairs at a * configurable rate. */ @SuppressWarnings("serial") diff --git a/flink-examples/flink-examples-streaming/src/main/scala/org/apache/flink/streaming/scala/examples/join/WindowJoin.scala b/flink-examples/flink-examples-streaming/src/main/scala/org/apache/flink/streaming/scala/examples/join/WindowJoin.scala index 0e6fc3785d128..3793a281829cd 100644 --- a/flink-examples/flink-examples-streaming/src/main/scala/org/apache/flink/streaming/scala/examples/join/WindowJoin.scala +++ b/flink-examples/flink-examples-streaming/src/main/scala/org/apache/flink/streaming/scala/examples/join/WindowJoin.scala @@ -28,10 +28,10 @@ import org.apache.flink.streaming.api.windowing.time.Time * Example illustrating a windowed stream join between two data streams. * * The example works on two input streams with pairs (name, grade) and (name, salary) - * respectively. It joins the steams based on "name" within a configurable window. + * respectively. It joins the streams based on "name" within a configurable window. * * The example uses a built-in sample data generator that generates - * the steams of pairs at a configurable rate. + * the streams of pairs at a configurable rate. */ object WindowJoin { @@ -40,9 +40,9 @@ object WindowJoin { // ************************************************************************* case class Grade(name: String, grade: Int) - + case class Salary(name: String, salary: Int) - + case class Person(name: String, grade: Int, salary: Int) // ************************************************************************* diff --git a/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/RowDataToAvroConverters.java b/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/RowDataToAvroConverters.java index d36e174d2c346..f48eaa6c3d232 100644 --- a/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/RowDataToAvroConverters.java +++ b/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/RowDataToAvroConverters.java @@ -67,7 +67,7 @@ public interface RowDataToAvroConverter extends Serializable { // -------------------------------------------------------------------------------- /** - * Creates a runtime converter accroding to the given logical type that converts objects of + * Creates a runtime converter according to the given logical type that converts objects of * Flink Table & SQL internal data structures to corresponding Avro data structures. */ public static RowDataToAvroConverter createConverter(LogicalType type) { diff --git a/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroFactory.java b/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroFactory.java index 45513292bceed..18ba9c88d3441 100644 --- a/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroFactory.java +++ b/flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/typeutils/AvroFactory.java @@ -68,7 +68,7 @@ public final class AvroFactory { * Creates Avro Writer and Reader for a specific type. * *

Given an input type, and possible the current schema, and a previously known schema (also - * known as writer schema) create will deduce the best way to initalize a reader and writer + * known as writer schema) create will deduce the best way to initialize a reader and writer * according to the following rules: * *

    diff --git a/flink-formats/flink-orc-nohive/src/main/java/org/apache/flink/orc/nohive/vector/AbstractOrcNoHiveVector.java b/flink-formats/flink-orc-nohive/src/main/java/org/apache/flink/orc/nohive/vector/AbstractOrcNoHiveVector.java index 90d67cf456ea9..53888e434eb6a 100644 --- a/flink-formats/flink-orc-nohive/src/main/java/org/apache/flink/orc/nohive/vector/AbstractOrcNoHiveVector.java +++ b/flink-formats/flink-orc-nohive/src/main/java/org/apache/flink/orc/nohive/vector/AbstractOrcNoHiveVector.java @@ -67,7 +67,7 @@ public static org.apache.flink.table.data.vector.ColumnVector createFlinkVector( return new OrcNoHiveTimestampVector((TimestampColumnVector) vector); } else { throw new UnsupportedOperationException( - "Unsupport vector: " + vector.getClass().getName()); + "Unsupported vector: " + vector.getClass().getName()); } } diff --git a/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/AbstractOrcColumnVector.java b/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/AbstractOrcColumnVector.java index fc7d00b056cd8..9699b7ee57612 100644 --- a/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/AbstractOrcColumnVector.java +++ b/flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/AbstractOrcColumnVector.java @@ -82,7 +82,7 @@ public static org.apache.flink.table.data.vector.ColumnVector createFlinkVector( return new OrcMapColumnVector((MapColumnVector) vector, (MapType) logicalType); } else { throw new UnsupportedOperationException( - "Unsupport vector: " + vector.getClass().getName()); + "Unsupported vector: " + vector.getClass().getName()); } } diff --git a/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java b/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java index 4ad46fb862a1f..1c4ddaa7fc7ed 100644 --- a/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java +++ b/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java @@ -1535,7 +1535,7 @@ public SortPartitionOperator sortPartition(KeySelector keyExtractor *
      *
    • A directory is created and multiple files are written underneath. (Default behavior) *
      - * This sink creates a directory called "path1", and files "1", "2" ... are writen + * This sink creates a directory called "path1", and files "1", "2" ... are written * underneath depending on parallelism *
      {@code .
      diff --git a/flink-java/src/main/java/org/apache/flink/api/java/operators/PartitionOperator.java b/flink-java/src/main/java/org/apache/flink/api/java/operators/PartitionOperator.java
      index 234e6f1dd6a37..258ed21114b50 100644
      --- a/flink-java/src/main/java/org/apache/flink/api/java/operators/PartitionOperator.java
      +++ b/flink-java/src/main/java/org/apache/flink/api/java/operators/PartitionOperator.java
      @@ -122,10 +122,10 @@ private 

      PartitionOperator( "Partitioning requires keys"); Preconditions.checkArgument( pMethod != PartitionMethod.CUSTOM || customPartitioner != null, - "Custom partioning requires a partitioner."); + "Custom partitioning requires a partitioner."); Preconditions.checkArgument( distribution == null || pMethod == PartitionMethod.RANGE, - "Customized data distribution is only neccessary for range partition."); + "Customized data distribution is only necessary for range partition."); if (distribution != null) { Preconditions.checkArgument(