Skip to content

Commit

Permalink
[hoxfix] Fix various typos
Browse files Browse the repository at this point in the history
  • Loading branch information
brandonJY committed Aug 17, 2020
1 parent bafb89b commit 8044b47
Show file tree
Hide file tree
Showing 29 changed files with 31 additions and 31 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ public String toString() {

/**
* Annotation used on config option fields or options class to mark them as a suffix-option; i.e., a config option
* where the key is only a suffix, with the prefix being danymically provided at runtime.
* where the key is only a suffix, with the prefix being dynamically provided at runtime.
*/
@Target({ElementType.FIELD, ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ public interface ElasticsearchApiCallBridge<C extends AutoCloseable> extends Ser
* Creates a {@link BulkProcessor.Builder} for creating the bulk processor.
*
* @param client the Elasticsearch client.
* @param listener the bulk processor listender.
* @param listener the bulk processor listener.
* @return the bulk processor builder.
*/
BulkProcessor.Builder createBulkProcessorBuilder(C client, BulkProcessor.Listener listener);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public enum BackOffType {
.stringType()
.asList()
.noDefaultValue()
.withDescription("Elasticseatch hosts to connect to.");
.withDescription("Elasticsearch hosts to connect to.");
public static final ConfigOption<String> INDEX_OPTION =
ConfigOptions.key("index")
.stringType()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ public Builder(List<HttpHost> httpHosts, ElasticsearchSinkFunction<T> elasticsea
/**
* Sets the maximum number of actions to buffer for each bulk request.
*
* @param numMaxActions the maxinum number of actions to buffer per bulk request.
* @param numMaxActions the maximum number of actions to buffer per bulk request.
*/
public void setBulkFlushMaxActions(int numMaxActions) {
this.bulkRequestsConfig.put(CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, String.valueOf(numMaxActions));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ public Builder(List<HttpHost> httpHosts, ElasticsearchSinkFunction<T> elasticsea
/**
* Sets the maximum number of actions to buffer for each bulk request.
*
* @param numMaxActions the maxinum number of actions to buffer per bulk request.
* @param numMaxActions the maximum number of actions to buffer per bulk request.
*/
public void setBulkFlushMaxActions(int numMaxActions) {
Preconditions.checkArgument(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ possible configurations would overwhelm and obscure the important.
updates are blocked and flushes are forced. Defaults to 40% of heap (0.4).
Updates are blocked and flushes are forced until size of all memstores
in a region server hits hbase.regionserver.global.memstore.size.lower.limit.
The default value in this configuration has been intentionally left emtpy in order to
The default value in this configuration has been intentionally left empty in order to
honor the old hbase.regionserver.global.memstore.upperLimit property if present.</description>
</property>
<property>
Expand All @@ -267,7 +267,7 @@ possible configurations would overwhelm and obscure the important.
Defaults to 95% of hbase.regionserver.global.memstore.size (0.95).
A 100% value for this value causes the minimum possible flushing to occur when updates are
blocked due to memstore limiting.
The default value in this configuration has been intentionally left emtpy in order to
The default value in this configuration has been intentionally left empty in order to
honor the old hbase.regionserver.global.memstore.lowerLimit property if present.</description>
</property>
<property>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ public class Types {
public static final TypeInformation<LocalDateTime> LOCAL_DATE_TIME = LocalTimeTypeInfo.LOCAL_DATE_TIME;

/**
* Returns type infomation for {@link java.time.Instant}. Supports a null value.
* Returns type information for {@link java.time.Instant}. Supports a null value.
*/
public static final TypeInformation<Instant> INSTANT = BasicTypeInfo.INSTANT_TYPE_INFO;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ public final void setPriorSerializer(TypeSerializer<T> serializer) {
* Set the user code class loader.
* Only relevant if this configuration instance was deserialized from binary form.
*
* <p>This method is not part of the public user-facing API, and cannot be overriden.
* <p>This method is not part of the public user-facing API, and cannot be overridden.
*
* @param userCodeClassLoader user code class loader.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
*
* <li><strong>Compatibility checks for new serializers:</strong> when new serializers are available,
* they need to be checked whether or not they are compatible to read the data written by the previous serializer.
* This is performed by providing the new serializer to the correspondibng serializer configuration
* This is performed by providing the new serializer to the corresponding serializer configuration
* snapshots in checkpoints.</li>
*
* <li><strong>Factory for a read serializer when schema conversion is required:</strong> in the case that new
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ public class MissingTypeInfo extends TypeInformation<InvalidTypesException> {


public MissingTypeInfo(String functionName) {
this(functionName, new InvalidTypesException("An unknown error occured."));
this(functionName, new InvalidTypesException("An unknown error occurred."));
}

public MissingTypeInfo(String functionName, InvalidTypesException typeException) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ public int hash(T value) {
try {
code += this.comparators[i].hash(accessField(keyFields[i], value));
}catch(NullPointerException npe) {
throw new RuntimeException("A NullPointerException occured while accessing a key field in a POJO. " +
throw new RuntimeException("A NullPointerException occurred while accessing a key field in a POJO. " +
"Most likely, the value grouped/joined on is null. Field name: "+keyFields[i].getName(), npe);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
* It's a simple reimplementation of Hadoop distcp
* (see <a href="http:https://hadoop.apache.org/docs/r1.2.1/distcp.html">http:https://hadoop.apache.org/docs/r1.2.1/distcp.html</a>)
* with a dynamic input format
* Note that this tool does not deal with retriability. Additionally, empty directories are not copied over.
* Note that this tool does not deal with retrievability. Additionally, empty directories are not copied over.
*
* <p>When running locally, local file systems paths can be used.
* However, in a distributed environment HDFS paths must be provided both as input and output.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ public enum TimestampFormat {
* TIMESTAMP_WITH_LOCAL_TIMEZONE in "yyyy-MM-dd HH:mm:ss.s{precision}'Z'" and output in the same format.*/
SQL,

/** Options to specify TIMESTAMP/TIMESTAMP_WITH_LOCAL_ZONE format. It will pase TIMESTAMP in "yyyy-MM-ddTHH:mm:ss.s{precision}" format,
/** Options to specify TIMESTAMP/TIMESTAMP_WITH_LOCAL_ZONE format. It will parse TIMESTAMP in "yyyy-MM-ddTHH:mm:ss.s{precision}" format,
* TIMESTAMP_WITH_LOCAL_TIMEZONE in "yyyy-MM-ddTHH:mm:ss.s{precision}'Z'" and output in the same format.*/
ISO_8601
}
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ public DataSink<T> sortLocalOutput(int field, Order order) {
this.sortOrders = new Order[flatKeys.length];
Arrays.fill(this.sortOrders, order);
} else {
// append sorting info to exising info
// append sorting info to existing info
int oldLength = this.sortKeyPositions.length;
int newLength = oldLength + flatKeys.length;
this.sortKeyPositions = Arrays.copyOf(this.sortKeyPositions, newLength);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ public class EdgeMetrics<K extends Comparable<K>, VV, EV>
* Implementation notes:
*
* <p>Use aggregator to replace SumEdgeStats when aggregators are rewritten to use
* a hash-combineable hashable-reduce.
* a hash-combinable hashable-reduce.
*
* <p>Use distinct to replace ReduceEdgeStats when the combiner can be disabled
* with a sorted-reduce forced.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ public class DenseMatrix implements Serializable {
* Construct an m-by-n matrix of zeros.
*
* @param m Number of rows.
* @param n Number of colums.
* @param n Number of columns.
*/
public DenseMatrix(int m, int n) {
this(m, n, new double[m * n], false);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ protected void runAsyncWithoutFencing(Runnable runnable) {
* Run the given callable in the main thread of the RpcEndpoint without checking the fencing
* token. This allows to run operations outside of the fencing token scope.
*
* @param callable to run in the main thread of the rpc endpoint without checkint the fencing token.
* @param callable to run in the main thread of the rpc endpoint without checking the fencing token.
* @param timeout for the operation.
* @return Future containing the callable result.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -436,7 +436,7 @@ public void testWaitingForJobMasterLeadership() throws Exception {
}

/**
* Tests that the {@link Dispatcher} fails fatally if the recoverd jobs cannot be started.
* Tests that the {@link Dispatcher} fails fatally if the recovered jobs cannot be started.
* See FLINK-9097.
*/
@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ protected void write(String path, ArrayList<IN> tupleList) {
outStream.println(strTuple.substring(1, strTuple.length() - 1));
}
} catch (IOException e) {
throw new RuntimeException("Exception occured while writing file " + path, e);
throw new RuntimeException("Exception occurred while writing file " + path, e);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ public void write(String path, ArrayList<IN> tupleList) {
outStream.println(tupleToWrite);
}
} catch (IOException e) {
throw new RuntimeException("Exception occured while writing file " + path, e);
throw new RuntimeException("Exception occurred while writing file " + path, e);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ public TypeSerializer<N> getNamespaceSerializer() {
}

/**
* Snaphot of a {@link TimerSerializer}.
* Snapshot of a {@link TimerSerializer}.
*
* @param <K> type of key.
* @param <N> type of namespace.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
*
* <p>Distinct types are implicitly final and do not support super types.
*
* <p>Most other properties are forwarded from the source type. Thus, ordering and comparision among
* <p>Most other properties are forwarded from the source type. Thus, ordering and comparison among
* the same distinct types are supported.
*
* <p>The serialized string representation is the fully qualified name of this type which means that
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1360,7 +1360,7 @@ public Optional<String> getDetailedDescription() {
}

// ------ equality check utils ------
// Can be overriden by sub test class
// Can be overridden by sub test class

protected void checkEquals(CatalogFunction f1, CatalogFunction f2) {
assertEquals(f1.getClassName(), f2.getClassName());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ class AggSqlFunction(
fromDataTypeToLogicalType(externalResultType), typeFactory)),
createOperandTypeInference(displayName, aggregateFunction, typeFactory, externalAccType),
createOperandTypeChecker(displayName, aggregateFunction, externalAccType),
// Do not need to provide a calcite aggregateFunction here. Flink aggregateion function
// Do not need to provide a calcite aggregateFunction here. Flink aggregation function
// will be generated when translating the calcite relnode to flink runtime execution plan
null,
false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ abstract class BatchExecGroupAggregateBase(
with BatchPhysicalRel {

if (grouping.isEmpty && auxGrouping.nonEmpty) {
throw new TableException("auxGrouping should be empty if grouping is emtpy.")
throw new TableException("auxGrouping should be empty if grouping is empty.")
}

override def deriveRowType(): RelDataType = outputRowType
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ abstract class BatchExecWindowAggregateBase(
with BatchPhysicalRel {

if (grouping.isEmpty && auxGrouping.nonEmpty) {
throw new TableException("auxGrouping should be empty if grouping is emtpy.")
throw new TableException("auxGrouping should be empty if grouping is empty.")
}

def getGrouping: Array[Int] = grouping
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ public W addWindow(W newWindow, MergeFunction<W> mergeFunction) throws Exception
resultWindow = mergeResult;
}

// if our new window is the same as a pre-exising window, nothing to do
// if our new window is the same as a pre-existing window, nothing to do
if (mergedWindows.isEmpty()) {
continue;
}
Expand Down Expand Up @@ -199,7 +199,7 @@ public W addWindow(W newWindow, MergeFunction<W> mergeFunction) throws Exception

// don't merge the new window itself, it never had any state associated with it
// i.e. if we are only merging one pre-existing window into itself
// without extending the pre-exising window
// without extending the pre-existing window
if (!(mergedWindows.contains(mergeResult) && mergedWindows.size() == 1)) {
mergeFunction.merge(mergeResult,
mergedWindows,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ public static void prepare(TemporaryFolder tempFolder) {
populateJavaPropertyVariables();

} catch (Exception e) {
throw new RuntimeException("Exception occured while preparing secure environment.", e);
throw new RuntimeException("Exception occurred while preparing secure environment.", e);
}

}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1147,7 +1147,7 @@ private void failSessionDuringDeployment(YarnClient yarnClient, YarnClientApplic
yarnClient.killApplication(yarnApplication.getNewApplicationResponse().getApplicationId());
} catch (Exception e) {
// we only log a debug message here because the "killApplication" call is a best-effort
// call (we don't know if the application has been deployed when the error occured).
// call (we don't know if the application has been deployed when the error occurred).
LOG.debug("Error while killing YARN application", e);
}
}
Expand Down

0 comments on commit 8044b47

Please sign in to comment.