Skip to content
This repository has been archived by the owner on Nov 22, 2022. It is now read-only.

Commit

Permalink
Disallow missing, star and unused imports (#518)
Browse files Browse the repository at this point in the history
- Resolves #516
- Resolves #517
  • Loading branch information
zero323 committed Sep 6, 2020
1 parent 56aab84 commit 897330a
Show file tree
Hide file tree
Showing 77 changed files with 395 additions and 349 deletions.
18 changes: 18 additions & 0 deletions mypy.ini
Original file line number Diff line number Diff line change
@@ -1 +1,19 @@
[mypy]

[mypy-pyspark.cloudpickle.*]
ignore_errors = True

[mypy-py4j.*]
ignore_missing_imports = True

[mypy-numpy.*]
ignore_missing_imports = True

[mypy-scipy.*]
ignore_missing_imports = True

[mypy-pandas.*]
ignore_missing_imports = True

[mypy-pyarrow]
ignore_missing_imports = True
50 changes: 29 additions & 21 deletions third_party/3/pyspark/__init__.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -16,36 +16,49 @@
# specific language governing permissions and limitations
# under the License.

# Stubs for pyspark (Python 3)
#

from typing import Callable, Optional, TypeVar

from pyspark.status import *
from pyspark.accumulators import (
from pyspark.accumulators import ( # noqa: F401
Accumulator as Accumulator,
AccumulatorParam as AccumulatorParam,
)
from pyspark.broadcast import Broadcast as Broadcast
from pyspark.conf import SparkConf as SparkConf
from pyspark.context import SparkContext as SparkContext
from pyspark.files import SparkFiles as SparkFiles
from pyspark.profiler import BasicProfiler as BasicProfiler, Profiler as Profiler
from pyspark.rdd import RDD as RDD, RDDBarrier as RDDBarrier
from pyspark.serializers import (
from pyspark.broadcast import Broadcast as Broadcast # noqa: F401
from pyspark.conf import SparkConf as SparkConf # noqa: F401
from pyspark.context import SparkContext as SparkContext # noqa: F401
from pyspark.files import SparkFiles as SparkFiles # noqa: F401
from pyspark.status import (
StatusTracker as StatusTracker,
SparkJobInfo as SparkJobInfo,
SparkStageInfo as SparkStageInfo,
) # noqa: F401
from pyspark.profiler import ( # noqa: F401
BasicProfiler as BasicProfiler,
Profiler as Profiler,
)
from pyspark.rdd import RDD as RDD, RDDBarrier as RDDBarrier # noqa: F401
from pyspark.serializers import ( # noqa: F401
MarshalSerializer as MarshalSerializer,
PickleSerializer as PickleSerializer,
)
from pyspark.storagelevel import StorageLevel as StorageLevel
from pyspark.taskcontext import (
from pyspark.status import ( # noqa: F401
SparkJobInfo as SparkJobInfo,
SparkStageInfo as SparkStageInfo,
StatusTracker as StatusTracker,
)
from pyspark.storagelevel import StorageLevel as StorageLevel # noqa: F401
from pyspark.taskcontext import ( # noqa: F401
BarrierTaskContext as BarrierTaskContext,
BarrierTaskInfo as BarrierTaskInfo,
TaskContext as TaskContext,
)
from pyspark.util import InheritableThread as InheritableThread
from pyspark.util import InheritableThread as InheritableThread # noqa: F401

# Compatiblity imports
from pyspark.sql import SQLContext as SQLContext, HiveContext as HiveContext, Row as Row
from pyspark.sql import ( # noqa: F401
SQLContext as SQLContext,
HiveContext as HiveContext,
Row as Row,
)

T = TypeVar("T")
F = TypeVar("F", bound=Callable)
Expand All @@ -58,8 +71,3 @@ def copy_func(
doc: Optional[str] = ...,
) -> F: ...
def keyword_only(func: F) -> F: ...

# Names in __all__ with no definition:
# SparkJobInfo
# SparkStageInfo
# StatusTracker
2 changes: 1 addition & 1 deletion third_party/3/pyspark/_typing.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
# specific language governing permissions and limitations
# under the License.

from typing import Any, Generic, Iterable, List, Optional, Sized, TypeVar, Union
from typing import Iterable, Sized, TypeVar, Union
from typing_extensions import Protocol

T = TypeVar("T", covariant=True)
Expand Down
1 change: 0 additions & 1 deletion third_party/3/pyspark/accumulators.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ T = TypeVar("T")
U = TypeVar("U", bound=SupportsIAdd)

import socketserver as SocketServer
from typing import Any

class Accumulator(Generic[T]):
aid: int
Expand Down
86 changes: 0 additions & 86 deletions third_party/3/pyspark/cloudpickle.pyi

This file was deleted.

2 changes: 1 addition & 1 deletion third_party/3/pyspark/conf.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
#

from typing import overload
from typing import Any, List, Optional, Tuple
from typing import List, Optional, Tuple

from py4j.java_gateway import JVMView, JavaObject # type: ignore[import]

Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/context.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ from py4j.java_gateway import JavaGateway, JavaObject # type: ignore[import]
from pyspark.accumulators import Accumulator, AccumulatorParam
from pyspark.broadcast import Broadcast
from pyspark.conf import SparkConf
from pyspark.profiler import Profiler
from pyspark.profiler import Profiler # noqa: F401
from pyspark.resource.information import ResourceInformation
from pyspark.rdd import RDD
from pyspark.serializers import Serializer
Expand Down
7 changes: 6 additions & 1 deletion third_party/3/pyspark/daemon.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,12 @@
# specific language governing permissions and limitations
# under the License.

from pyspark.serializers import UTF8Deserializer as UTF8Deserializer, read_int as read_int, write_int as write_int, write_with_length as write_with_length # type: ignore[attr-defined]
from pyspark.serializers import ( # noqa: F401
UTF8Deserializer as UTF8Deserializer,
read_int as read_int,
write_int as write_int,
write_with_length as write_with_length,
)
from typing import Any

def compute_real_exit_code(exit_code: Any): ...
Expand Down
5 changes: 1 addition & 4 deletions third_party/3/pyspark/join.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,7 @@
# specific language governing permissions and limitations
# under the License.

# Stubs for pyspark.join (Python 3)
#

from typing import Hashable, Iterable, List, Optional, Tuple, TypeVar
from typing import Hashable, Iterable, Optional, Tuple, TypeVar

from pyspark.resultiterable import ResultIterable
import pyspark.rdd
Expand Down
9 changes: 6 additions & 3 deletions third_party/3/pyspark/ml/__init__.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
# specific language governing permissions and limitations
# under the License.

from pyspark.ml import (
from pyspark.ml import ( # noqa: F401
classification as classification,
clustering as clustering,
evaluation as evaluation,
Expand All @@ -31,12 +31,15 @@ from pyspark.ml import (
tuning as tuning,
util as util,
)
from pyspark.ml.base import (
from pyspark.ml.base import ( # noqa: F401
Estimator as Estimator,
Model as Model,
PredictionModel as PredictionModel,
Predictor as Predictor,
Transformer as Transformer,
UnaryTransformer as UnaryTransformer,
)
from pyspark.ml.pipeline import Pipeline as Pipeline, PipelineModel as PipelineModel
from pyspark.ml.pipeline import ( # noqa: F401
Pipeline as Pipeline,
PipelineModel as PipelineModel,
)
24 changes: 14 additions & 10 deletions third_party/3/pyspark/ml/base.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -18,35 +18,39 @@

from typing import overload
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Optional,
Tuple,
Type,
TypeVar,
)
from pyspark.ml._typing import M, P, T, ParamMap

import _thread

import abc
from abc import abstractmethod
from pyspark.ml.common import inherit_doc as inherit_doc
from pyspark.ml.param import Params, Param
from pyspark.ml.param.shared import *
from pyspark.sql.column import Column
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.functions import udf as udf
from pyspark.sql.types import (
from pyspark import since as since # noqa: F401
from pyspark.ml.common import inherit_doc as inherit_doc # noqa: F401
from pyspark.ml.param.shared import (
HasFeaturesCol as HasFeaturesCol,
HasInputCol as HasInputCol,
HasLabelCol as HasLabelCol,
HasOutputCol as HasOutputCol,
HasPredictionCol as HasPredictionCol,
Params as Params,
)
from pyspark.sql.functions import udf as udf # noqa: F401
from pyspark.sql.types import ( # noqa: F401
DataType,
StructField as StructField,
StructType as StructType,
)

from pyspark.sql.dataframe import DataFrame

class _FitMultipleIterator:
fitSingleModel: Callable[[int], Transformer]
numModel: int
Expand Down
48 changes: 30 additions & 18 deletions third_party/3/pyspark/ml/classification.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -16,36 +16,48 @@
# specific language governing permissions and limitations
# under the License.

from typing import Any, Dict, List, Optional, Type, TypeVar
from typing import Any, List, Optional, Type
from pyspark.ml._typing import JM, M, P, T, ParamMap

import abc
from abc import abstractmethod
from pyspark.ml.base import Estimator, Model, Transformer, PredictionModel, Predictor
from pyspark.ml import Estimator, Model, PredictionModel, Predictor, Transformer
from pyspark.ml.base import _PredictorParams
from pyspark.ml.linalg import Matrix, Vector
from pyspark.ml.param.shared import *
from pyspark.ml.param.shared import (
HasAggregationDepth,
HasBlockSize,
HasElasticNetParam,
HasFitIntercept,
HasMaxIter,
HasParallelism,
HasProbabilityCol,
HasRawPredictionCol,
HasRegParam,
HasSeed,
HasSolver,
HasStandardization,
HasStepSize,
HasThreshold,
HasThresholds,
HasTol,
HasWeightCol,
)
from pyspark.ml.regression import _FactorizationMachinesParams
from pyspark.ml.tree import (
_DecisionTreeModel,
_DecisionTreeParams,
_TreeEnsembleModel,
_RandomForestParams,
_GBTParams,
_HasVarianceImpurity,
_RandomForestParams,
_TreeClassifierParams,
_TreeEnsembleParams,
)
from pyspark.ml.regression import (
_FactorizationMachinesParams,
DecisionTreeRegressionModel,
)
from pyspark.ml.util import *
from pyspark.ml.wrapper import (
JavaPredictionModel,
JavaPredictor,
JavaWrapper,
JavaTransformer,
_TreeEnsembleModel,
)
from pyspark.ml.util import HasTrainingSummary, JavaMLReadable, JavaMLWritable
from pyspark.ml.wrapper import JavaPredictionModel, JavaPredictor, JavaWrapper

from pyspark.ml.linalg import Matrix, Vector
from pyspark.ml.param import Param
from pyspark.ml.regression import DecisionTreeRegressionModel
from pyspark.sql.dataframe import DataFrame

class _ClassifierParams(HasRawPredictionCol, _PredictorParams): ...
Expand Down
Loading

0 comments on commit 897330a

Please sign in to comment.