Skip to content
This repository has been archived by the owner on Nov 22, 2022. It is now read-only.

Commit

Permalink
Use more precise type ignores (#401)
Browse files Browse the repository at this point in the history
* Use type: ignore[override] for Column ne eq

* Use type: ignore[import] for py4j imports

* Use type: ignore[import] for pandas imports

* Use type: ignore[import] for numpy imports
  • Loading branch information
zero323 committed Apr 28, 2020
1 parent 0184a69 commit 16d11e6
Show file tree
Hide file tree
Showing 28 changed files with 37 additions and 39 deletions.
2 changes: 1 addition & 1 deletion third_party/3/pyspark/conf.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import overload
from typing import Any, List, Optional, Tuple

from py4j.java_gateway import JVMView, JavaObject # type: ignore
from py4j.java_gateway import JVMView, JavaObject # type: ignore[import]

class SparkConf:
def __init__(
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/context.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, TypeVar

from py4j.java_gateway import JavaGateway, JavaObject # type: ignore
from py4j.java_gateway import JavaGateway, JavaObject # type: ignore[import]

from pyspark.accumulators import Accumulator, AccumulatorParam
from pyspark.broadcast import Broadcast
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/ml/clustering.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ from pyspark.ml.param.shared import *
from pyspark.ml.stat import MultivariateGaussian
from pyspark.sql.dataframe import DataFrame

from numpy import ndarray # type: ignore
from numpy import ndarray # type: ignore[import]

class ClusteringSummary(JavaWrapper):
@property
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/ml/image.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ from typing import Any, Dict, List

from pyspark.sql.types import Row, StructType

from numpy import ndarray # type: ignore
from numpy import ndarray # type: ignore[import]

class _ImageSchema:
def __init__(self) -> None: ...
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/ml/linalg/__init__.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from pyspark.ml import linalg as newlinalg
from pyspark.sql.types import StructType, UserDefinedType

from numpy import float64, ndarray # type: ignore
from numpy import float64, ndarray # type: ignore[import]

class VectorUDT(UserDefinedType):
@classmethod
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/ml/stat.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ from pyspark.ml.wrapper import JavaWrapper
from pyspark.sql.column import Column
from pyspark.sql.dataframe import DataFrame

from py4j.java_gateway import JavaObject # type: ignore
from py4j.java_gateway import JavaObject # type: ignore[import]

class ChiSquareTest:
@staticmethod
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/mllib/_typing.pyi
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from typing import Any, Iterable, List, Optional, Tuple, TypeVar, Union
from pyspark.mllib.linalg import Vector
from numpy import ndarray # type: ignore
from numpy import ndarray # type: ignore[import]

VectorLike = Union[Vector, List[float], Tuple[float, ...]]
7 changes: 3 additions & 4 deletions third_party/3/pyspark/mllib/classification.pyi
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
# Stubs for pyspark.mllib.classification (Python 3.5)
#

from typing import overload
from typing import Any, Optional, Union

from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.mllib._typing import VectorLike
from pyspark.mllib.linalg import Vector
from pyspark.mllib.regression import LabeledPoint, LinearModel, StreamingLinearAlgorithm
from pyspark.mllib.util import Saveable, Loader
from pyspark.streaming.dstream import DStream
from numpy import float64, ndarray # type: ignore

from numpy import float64, ndarray # type: ignore[import]

class LinearClassificationModel(LinearModel):
def __init__(self, weights: Vector, intercept: float) -> None: ...
Expand Down
4 changes: 2 additions & 2 deletions third_party/3/pyspark/mllib/clustering.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ from typing import Any, List, NamedTuple, Optional, Tuple, TypeVar

import array

from numpy import float64, int64, ndarray # type: ignore
from py4j.java_gateway import JavaObject # type: ignore
from numpy import float64, int64, ndarray # type: ignore[import]
from py4j.java_gateway import JavaObject # type: ignore[import]

from pyspark.mllib._typing import VectorLike
from pyspark.context import SparkContext
Expand Down
4 changes: 3 additions & 1 deletion third_party/3/pyspark/mllib/feature.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,16 @@

from typing import overload
from typing import Any, Iterable, Hashable, List, Tuple

from pyspark.mllib._typing import VectorLike
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.mllib.common import JavaModelWrapper
from pyspark.mllib.linalg import Vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.util import JavaLoader, JavaSaveable
from py4j.java_collections import JavaMap # type: ignore

from py4j.java_collections import JavaMap # type: ignore[import]

class VectorTransformer:
@overload
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/mllib/linalg/__init__.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ from typing import overload
from typing import Any, Dict, Generic, Iterable, List, Optional, Tuple, TypeVar, Union
from pyspark.ml import linalg as newlinalg
from pyspark.sql.types import StructType, UserDefinedType
from numpy import float64, ndarray # type: ignore
from numpy import float64, ndarray # type: ignore[import]

QT = TypeVar("QT")
RT = TypeVar("RT")
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/mllib/linalg/distributed.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ from pyspark.storagelevel import StorageLevel
from pyspark.mllib.common import JavaModelWrapper
from pyspark.mllib.linalg import Vector, Matrix, QRDecomposition
from pyspark.mllib.stat import MultivariateStatisticalSummary
from numpy import ndarray # type: ignore
from numpy import ndarray # type: ignore[import]

VectorLike = Union[Vector, Sequence[Union[float, int]]]

Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/mllib/regression.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ from pyspark.context import SparkContext
from pyspark.mllib.linalg import Vector
from pyspark.mllib.util import Saveable, Loader
from pyspark.streaming.dstream import DStream
from numpy import ndarray # type: ignore
from numpy import ndarray # type: ignore[import]

K = TypeVar("K")

Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/mllib/stat/KernelDensity.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from typing import Any, Iterable
from pyspark.rdd import RDD
from numpy import ndarray # type: ignore
from numpy import ndarray # type: ignore[import]

xrange: range

Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/mllib/stat/_statistics.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import List, Optional, overload, Union
from typing_extensions import Literal

from numpy import ndarray # type: ignore
from numpy import ndarray # type: ignore[import]

from pyspark.mllib.common import JavaModelWrapper
from pyspark.mllib.linalg import Vector, Matrix
Expand Down
4 changes: 2 additions & 2 deletions third_party/3/pyspark/rdd.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ from typing import (
)
from typing_extensions import Literal

from numpy import int32, int64, float32, float64, ndarray # type: ignore
from numpy import int32, int64, float32, float64, ndarray # type: ignore[import]

from pyspark._typing import SupportsOrdering
from pyspark.sql.pandas._typing import (
Expand All @@ -34,7 +34,7 @@ from pyspark.statcounter import StatCounter
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StructType
from pyspark.sql._typing import RowLike
from py4j.java_gateway import JavaObject # type: ignore
from py4j.java_gateway import JavaObject # type: ignore[import]

T = TypeVar("T")
U = TypeVar("U")
Expand Down
4 changes: 2 additions & 2 deletions third_party/3/pyspark/sql/_typing.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ import pyspark.sql.types
from pyspark.sql.column import Column

from pyspark.sql.pandas._typing import DataFrameLike, SeriesLike
import pandas.core.frame # type: ignore
import pandas.core.series # type: ignore
import pandas.core.frame # type: ignore[import]
import pandas.core.series # type: ignore[import]

ColumnOrName = Union[pyspark.sql.column.Column, str]
DecimalLiteral = decimal.Decimal
Expand Down
6 changes: 3 additions & 3 deletions third_party/3/pyspark/sql/column.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ from pyspark.sql._typing import LiteralType, DecimalLiteral, DateTimeLiteral
from pyspark.sql.types import *
from pyspark.sql.window import WindowSpec

from py4j.java_gateway import JavaObject # type: ignore
from py4j.java_gateway import JavaObject # type: ignore[import]

class Column:
def __init__(self, JavaObject) -> None: ...
Expand All @@ -29,8 +29,8 @@ class Column:
def __rmod__(self, other: Union[bool, int, float, DecimalLiteral]) -> Column: ...
def __pow__(self, other: Union[Column, LiteralType, DecimalLiteral]) -> Column: ...
def __rpow__(self, other: Union[LiteralType, DecimalLiteral]) -> Column: ...
def __eq__(self, other: Union[Column, LiteralType, DateTimeLiteral, DecimalLiteral]) -> Column: ... # type: ignore
def __ne__(self, other: Any) -> Column: ... # type: ignore
def __eq__(self, other: Union[Column, LiteralType, DateTimeLiteral, DecimalLiteral]) -> Column: ... # type: ignore[override]
def __ne__(self, other: Any) -> Column: ... # type: ignore[override]
def __lt__(
self, other: Union[Column, LiteralType, DateTimeLiteral, DecimalLiteral]
) -> Column: ...
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/sql/conf.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#

from typing import Any, Optional
from py4j.java_gateway import JavaObject # type: ignore
from py4j.java_gateway import JavaObject # type: ignore[import]

class RuntimeConfig:
def __init__(self, jconf: JavaObject) -> None: ...
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/sql/context.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import overload
from typing import Any, Callable, Iterable, List, Optional, Tuple, TypeVar, Union

from py4j.java_gateway import JavaObject # type: ignore
from py4j.java_gateway import JavaObject # type: ignore[import]

from pyspark.sql._typing import (
DateTimeLiteral,
Expand Down
4 changes: 2 additions & 2 deletions third_party/3/pyspark/sql/dataframe.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ from typing import (
Union,
)

import pandas.core.frame # type: ignore
from py4j.java_gateway import JavaObject # type: ignore
import pandas.core.frame # type: ignore[import]
from py4j.java_gateway import JavaObject # type: ignore[import]

from pyspark.sql._typing import ColumnOrName, LiteralType
from pyspark.sql.pandas._typing import MapIterPandasUserDefinedFunction
Expand Down
3 changes: 0 additions & 3 deletions third_party/3/pyspark/sql/functions.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,6 @@
from typing import overload
from typing import Any, Optional, Union, Dict, Callable

import pandas.core.frame # type: ignore
import pandas.core.series # type: ignore

from pyspark.sql._typing import (
AtomicDataTypeOrString,
ColumnOrName,
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/sql/group.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ from pyspark.sql.column import Column
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.pandas.group_ops import PandasGroupedOpsMixin
from pyspark.sql.types import *
from py4j.java_gateway import JavaObject # type: ignore
from py4j.java_gateway import JavaObject # type: ignore[import]

class GroupedData(PandasGroupedOpsMixin):
sql_ctx: SQLContext
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/sql/session.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import overload
from typing import Any, Iterable, List, Optional, Tuple, TypeVar, Union

from py4j.java_gateway import JavaObject # type: ignore
from py4j.java_gateway import JavaObject # type: ignore[import]

from pyspark.sql._typing import DateTimeLiteral, LiteralType, DecimalLiteral, RowLike
from pyspark.sql.pandas._typing import DataFrameLike
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/sql/streaming.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ from pyspark.sql.readwriter import OptionUtils
from pyspark.sql.types import Row, StructType
from pyspark.sql.utils import StreamingQueryException

from py4j.java_gateway import JavaObject # type: ignore
from py4j.java_gateway import JavaObject # type: ignore[import]

class StreamingQuery:
def __init__(self, jsq: JavaObject) -> None: ...
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/sql/window.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from typing import Any, Union
from pyspark.sql._typing import ColumnOrName
from py4j.java_gateway import JavaObject # type: ignore
from py4j.java_gateway import JavaObject # type: ignore[import]

class Window:
unboundedPreceding: int
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/status.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#

from typing import Any, List, NamedTuple, Optional
from py4j.java_gateway import JavaArray, JavaObject # type: ignore
from py4j.java_gateway import JavaArray, JavaObject # type: ignore[import]

class SparkJobInfo(NamedTuple):
jobId: int
Expand Down
2 changes: 1 addition & 1 deletion third_party/3/pyspark/streaming/context.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from typing import Any, Callable, List, Optional, TypeVar, Union

from py4j.java_gateway import JavaObject # type: ignore
from py4j.java_gateway import JavaObject # type: ignore[import]

from pyspark.context import SparkContext
from pyspark.rdd import RDD
Expand Down

0 comments on commit 16d11e6

Please sign in to comment.