Skip to content

Commit

Permalink
[FLINK-22782][docs] Remove legacy planner from Chinese docs
Browse files Browse the repository at this point in the history
This closes apache#16018.
  • Loading branch information
SteNicholas authored and twalthr committed May 31, 2021
1 parent e920950 commit 884ff61
Show file tree
Hide file tree
Showing 21 changed files with 238 additions and 502 deletions.
4 changes: 2 additions & 2 deletions docs/content.zh/docs/connectors/table/hive/hive_dialect.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ Flink SQL> set table.sql-dialect=default; -- to use default dialect
{{< tab "Java" >}}
```java

EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner()...build();
EnvironmentSettings settings = EnvironmentSettings.newInstance().build();
TableEnvironment tableEnv = TableEnvironment.create(settings);
// to use hive dialect
tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE);
Expand All @@ -82,7 +82,7 @@ tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);
```python
from pyflink.table import *

settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
settings = EnvironmentSettings.new_instance().in_batch_mode().build()
t_env = TableEnvironment.create(settings)

# to use hive dialect
Expand Down
6 changes: 3 additions & 3 deletions docs/content.zh/docs/connectors/table/hive/overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ export HADOOP_CLASSPATH=`hadoop classpath`

```java

EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().build();
EnvironmentSettings settings = EnvironmentSettings.newInstance().build();
TableEnvironment tableEnv = TableEnvironment.create(settings);

String name = "myhive";
Expand All @@ -330,7 +330,7 @@ tableEnv.useCatalog("myhive");

```scala

val settings = EnvironmentSettings.newInstance().useBlinkPlanner().build()
val settings = EnvironmentSettings.newInstance().build()
val tableEnv = TableEnvironment.create(settings)

val name = "myhive"
Expand All @@ -349,7 +349,7 @@ tableEnv.useCatalog("myhive")
from pyflink.table import *
from pyflink.table.catalog import HiveCatalog

settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
settings = EnvironmentSettings.new_instance().in_batch_mode().build()
t_env = TableEnvironment.create(settings)

catalog_name = "myhive"
Expand Down
2 changes: 1 addition & 1 deletion docs/content.zh/docs/connectors/table/jdbc.md
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,7 @@ tableEnv.useCatalog("mypg")
```python
from pyflink.table.catalog import JdbcCatalog

environment_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
environment_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
t_env = TableEnvironment.create(environment_settings)

name = "mypg"
Expand Down
2 changes: 1 addition & 1 deletion docs/content.zh/docs/dev/python/dependency_management.md
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.TableEnvironment;

TableEnvironment tEnv = TableEnvironment.create(
EnvironmentSettings.newInstance().useBlinkPlanner().inBatchMode().build());
EnvironmentSettings.newInstance().inBatchMode().build());
tEnv.getConfig().getConfiguration().set(CoreOptions.DEFAULT_PARALLELISM, 1);

// register the Python UDF
Expand Down
2 changes: 1 addition & 1 deletion docs/content.zh/docs/dev/python/python_config.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ The config options could be set as following in a Table API program:
```python
from pyflink.table import TableEnvironment, EnvironmentSettings

env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
t_env = TableEnvironment.create(env_settings)

config = t_env.get_config().get_configuration()
Expand Down
20 changes: 10 additions & 10 deletions docs/content.zh/docs/dev/python/table/intro_to_table_api.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Python Table API 程序的基本结构
from pyflink.table import EnvironmentSettings, TableEnvironment

# 1. 创建 TableEnvironment
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)

# 2. 创建 source 表
Expand Down Expand Up @@ -92,11 +92,11 @@ table_env.execute_sql("INSERT INTO print SELECT * FROM datagen").wait()
from pyflink.table import EnvironmentSettings, TableEnvironment

# create a blink streaming TableEnvironment
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)

# or create a blink batch TableEnvironment
env_settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_batch_mode().build()
table_env = TableEnvironment.create(env_settings)
```

Expand Down Expand Up @@ -133,7 +133,7 @@ table_env = TableEnvironment.create(env_settings)
from pyflink.table import EnvironmentSettings, TableEnvironment

# 创建 blink 批 TableEnvironment
env_settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_batch_mode().build()
table_env = TableEnvironment.create(env_settings)

table = table_env.from_elements([(1, 'Hi'), (2, 'Hello')])
Expand Down Expand Up @@ -197,7 +197,7 @@ print('Now the type of the "id" column is %s.' % type)
from pyflink.table import EnvironmentSettings, TableEnvironment

# 创建 blink 流 TableEnvironment
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)

table_env.execute_sql("""
Expand Down Expand Up @@ -277,7 +277,7 @@ new_table.to_pandas()
from pyflink.table import EnvironmentSettings, TableEnvironment

# 通过 batch table environment 来执行查询
env_settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_batch_mode().build()
table_env = TableEnvironment.create(env_settings)

orders = table_env.from_elements([('Jack', 'FRANCE', 10), ('Rose', 'ENGLAND', 30), ('Jack', 'FRANCE', 20)],
Expand Down Expand Up @@ -312,7 +312,7 @@ from pyflink.table.udf import udf
import pandas as pd

# 通过 batch table environment 来执行查询
env_settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_batch_mode().build()
table_env = TableEnvironment.create(env_settings)

orders = table_env.from_elements([('Jack', 'FRANCE', 10), ('Rose', 'ENGLAND', 30), ('Jack', 'FRANCE', 20)],
Expand Down Expand Up @@ -348,7 +348,7 @@ Flink 的 SQL 基于 [Apache Calcite](https://calcite.apache.org),它实现了
from pyflink.table import EnvironmentSettings, TableEnvironment

# 通过 stream table environment 来执行查询
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)


Expand Down Expand Up @@ -634,7 +634,7 @@ Table API 提供了一种机制来查看 `Table` 的逻辑查询计划和优化
# 使用流模式 TableEnvironment
from pyflink.table import EnvironmentSettings, TableEnvironment

env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)

table1 = table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['id', 'data'])
Expand Down Expand Up @@ -687,7 +687,7 @@ Stage 136 : Data Source
# 使用流模式 TableEnvironment
from pyflink.table import EnvironmentSettings, TableEnvironment

env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
table_env = TableEnvironment.create(environment_settings=env_settings)

table1 = table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['id', 'data'])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ from pyflink.table import EnvironmentSettings, TableEnvironment
from pyflink.table.expressions import col
from pyflink.table.types import DataTypes

env_settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_batch_mode().build()
table_env = TableEnvironment.create(env_settings)

table = table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['id', 'data'])
Expand Down Expand Up @@ -101,7 +101,7 @@ from pyflink.common import Row
from pyflink.table.udf import udtf
from pyflink.table import DataTypes, EnvironmentSettings, TableEnvironment

env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)

table = table_env.from_elements([(1, 'Hi,Flink'), (2, 'Hello')], ['id', 'data'])
Expand Down Expand Up @@ -181,7 +181,7 @@ agg = udaf(function,

# aggregate with a python general aggregate function

env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)
t = table_env.from_elements([(1, 2), (2, 1), (1, 3)], ['a', 'b'])

Expand All @@ -196,7 +196,7 @@ result.to_pandas()
# 1 2 1 1

# aggregate with a python vectorized aggregate function
env_settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
env_settings = EnvironmentSettings.new_instance().in_batch_mode().build()
table_env = TableEnvironment.create(env_settings)

t = table_env.from_elements([(1, 2), (2, 1), (1, 3)], ['a', 'b'])
Expand Down Expand Up @@ -256,7 +256,7 @@ class Top2(TableAggregateFunction):
[DataTypes.FIELD("a", DataTypes.BIGINT())])


env_settings = EnvironmentSettings.new_instance().use_blink_planner().in_streaming_mode().build()
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)
# the result type and accumulator type can also be specified in the udtaf decorator:
# top2 = udtaf(Top2(), result_type=DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT())]), accumulator_type=DataTypes.ARRAY(DataTypes.BIGINT()))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ t_env.sql_query("SELECT a FROM source_table") \
from pyflink.table import TableEnvironment, EnvironmentSettings

def log_processing():
env_settings = EnvironmentSettings.new_instance().use_blink_planner().is_streaming_mode().build()
env_settings = EnvironmentSettings.new_instance().is_streaming_mode().build()
t_env = TableEnvironment.create(env_settings)
# specify connector and format jars
t_env.get_config().get_configuration().set_string("pipeline.jars", "file:https:///my/jar/path/connector.jar;file:https:///my/jar/path/json.jar")
Expand Down
12 changes: 6 additions & 6 deletions docs/content.zh/docs/dev/python/table/udfs/python_udfs.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class HashCode(ScalarFunction):
def eval(self, s):
return hash(s) * self.factor

settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
settings = EnvironmentSettings.new_instance().in_batch_mode().build()
table_env = TableEnvironment.create(settings)

hash_code = udf(HashCode(), result_type=DataTypes.BIGINT())
Expand Down Expand Up @@ -80,7 +80,7 @@ public class HashCode extends ScalarFunction {
'''
from pyflink.table.expressions import call

settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
settings = EnvironmentSettings.new_instance().in_batch_mode().build()
table_env = TableEnvironment.create(settings)

# 注册 Java 函数
Expand Down Expand Up @@ -148,7 +148,7 @@ class Split(TableFunction):
for s in string.split(" "):
yield s, len(s)

env_settings = EnvironmentSettings.new_instance().use_blink_planner().is_streaming_mode().build()
env_settings = EnvironmentSettings.new_instance().is_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)
my_table = ... # type: Table, table schema: [a: String]

Expand Down Expand Up @@ -186,7 +186,7 @@ public class Split extends TableFunction<Tuple2<String, Integer>> {
'''
from pyflink.table.expressions import call

env_settings = EnvironmentSettings.new_instance().use_blink_planner().is_streaming_mode().build()
env_settings = EnvironmentSettings.new_instance().is_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)
my_table = ... # type: Table, table schema: [a: String]

Expand Down Expand Up @@ -301,7 +301,7 @@ class WeightedAvg(AggregateFunction):
DataTypes.FIELD("f1", DataTypes.BIGINT())])


env_settings = EnvironmentSettings.new_instance().use_blink_planner().is_streaming_mode().build()
env_settings = EnvironmentSettings.new_instance().is_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)
# the result type and accumulator type can also be specified in the udaf decorator:
# weighted_avg = udaf(WeightedAvg(), result_type=DataTypes.BIGINT(), accumulator_type=...)
Expand Down Expand Up @@ -476,7 +476,7 @@ class Top2(TableAggregateFunction):
[DataTypes.FIELD("a", DataTypes.BIGINT())])


env_settings = EnvironmentSettings.new_instance().use_blink_planner().in_streaming_mode().build()
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().build()
table_env = TableEnvironment.create(env_settings)
# the result type and accumulator type can also be specified in the udtaf decorator:
# top2 = udtaf(Top2(), result_type=DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT())]), accumulator_type=DataTypes.ARRAY(DataTypes.BIGINT()))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ under the License.
def add(i, j):
return i + j

settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
settings = EnvironmentSettings.new_instance().in_batch_mode().build()
table_env = TableEnvironment.create(settings)

# use the vectorized Python scalar function in Python Table API
Expand Down Expand Up @@ -87,7 +87,7 @@ and `Over Window Aggregation` 使用它:
def mean_udaf(v):
return v.mean()

settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
settings = EnvironmentSettings.new_instance().in_batch_mode().build()
table_env = TableEnvironment.create(settings)

my_table = ... # type: Table, table schema: [a: String, b: BigInt, c: BigInt]
Expand Down
4 changes: 2 additions & 2 deletions docs/content.zh/docs/dev/python/table_api_tutorial.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ $ python -m pip install apache-flink
编写Flink Python Table API程序的第一步是创建`TableEnvironment`。这是Python Table API作业的入口类。

```python
settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
settings = EnvironmentSettings.new_instance().in_batch_mode().build()
t_env = TableEnvironment.create(settings)
```

Expand Down Expand Up @@ -147,7 +147,7 @@ from pyflink.table import DataTypes, TableEnvironment, EnvironmentSettings
from pyflink.table.descriptors import Schema, OldCsv, FileSystem
from pyflink.table.expressions import lit

settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
settings = EnvironmentSettings.new_instance().in_batch_mode().build()
t_env = TableEnvironment.create(settings)

# write all the data to one file
Expand Down
2 changes: 1 addition & 1 deletion docs/content.zh/docs/dev/table/catalogs.md
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ from pyflink.table import *
from pyflink.table.catalog import HiveCatalog, CatalogDatabase, ObjectPath, CatalogBaseTable
from pyflink.table.descriptors import Kafka

settings = EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build()
settings = EnvironmentSettings.new_instance().in_batch_mode().build()
t_env = TableEnvironment.create(settings)

# Create a HiveCatalog
Expand Down
Loading

0 comments on commit 884ff61

Please sign in to comment.