From ad0240a867bb1622c79abac850e1dc30cb4b3be1 Mon Sep 17 00:00:00 2001 From: luzpaz Date: Thu, 15 Jul 2021 19:46:58 -0400 Subject: [PATCH] Fix various typos (#2617) Found via `codespell -q 3 -L ans,hist,whis` --- doc/Makefile | 2 +- doc/docstrings/histplot.ipynb | 2 +- doc/docstrings/swarmplot.ipynb | 2 +- doc/introduction.ipynb | 2 +- doc/releases/v0.5.0.txt | 2 +- doc/releases/v0.6.0.txt | 2 +- doc/releases/v0.8.0.txt | 2 +- doc/sphinxext/gallery_generator.py | 2 +- doc/tutorial/color_palettes.ipynb | 6 +++--- doc/tutorial/data_structure.ipynb | 6 +++--- doc/tutorial/distributions.ipynb | 8 ++++---- doc/tutorial/function_overview.ipynb | 6 +++--- seaborn/_core.py | 10 +++++----- seaborn/_statistics.py | 2 +- seaborn/categorical.py | 10 +++++----- seaborn/distributions.py | 8 ++++---- seaborn/external/kde.py | 2 +- seaborn/matrix.py | 8 ++++---- seaborn/relational.py | 4 ++-- seaborn/tests/test_algorithms.py | 2 +- seaborn/tests/test_categorical.py | 2 +- seaborn/tests/test_core.py | 4 ++-- seaborn/tests/test_relational.py | 2 +- seaborn/tests/test_utils.py | 2 +- 24 files changed, 49 insertions(+), 49 deletions(-) diff --git a/doc/Makefile b/doc/Makefile index 8ed9484cd5..4a7fad0fdf 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -18,7 +18,7 @@ I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . help: @echo "Please use \`make ' where is one of" - @echo " clean to remove genrated output" + @echo " clean to remove generated output" @echo " html to make standalone HTML files" @echo " notebooks to make the Jupyter notebook-based tutorials" @echo " dirhtml to make HTML files named index.html in directories" diff --git a/doc/docstrings/histplot.ipynb b/doc/docstrings/histplot.ipynb index 99ed6c551d..d715849205 100644 --- a/doc/docstrings/histplot.ipynb +++ b/doc/docstrings/histplot.ipynb @@ -179,7 +179,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "To compare the distribution of subsets that differ substantially in size, use indepdendent density normalization:" + "To compare the distribution of subsets that differ substantially in size, use independent density normalization:" ] }, { diff --git a/doc/docstrings/swarmplot.ipynb b/doc/docstrings/swarmplot.ipynb index ebc74b92ab..b47560fc1f 100644 --- a/doc/docstrings/swarmplot.ipynb +++ b/doc/docstrings/swarmplot.ipynb @@ -147,7 +147,7 @@ "cell_type": "raw", "metadata": {}, "source": [ - "The \"orientation\" of the plot (defined as the direction along which quantitative relationships are preserved) is usualy inferred automatically. But in ambiguous cases, such as when both axis variables are numeric, it can be specified:" + "The \"orientation\" of the plot (defined as the direction along which quantitative relationships are preserved) is usually inferred automatically. But in ambiguous cases, such as when both axis variables are numeric, it can be specified:" ] }, { diff --git a/doc/introduction.ipynb b/doc/introduction.ipynb index ec58f6126f..9de6e5c9f9 100644 --- a/doc/introduction.ipynb +++ b/doc/introduction.ipynb @@ -431,7 +431,7 @@ "Relationship to matplotlib\n", "--------------------------\n", "\n", - "Seaborn's integration with matplotlib allows you to use it across the many environments that matplotlib supports, inlcuding exploratory analysis in notebooks, real-time interaction in GUI applications, and archival output in a number of raster and vector formats.\n", + "Seaborn's integration with matplotlib allows you to use it across the many environments that matplotlib supports, including exploratory analysis in notebooks, real-time interaction in GUI applications, and archival output in a number of raster and vector formats.\n", "\n", "While you can be productive using only seaborn functions, full customization of your graphics will require some knowledge of matplotlib's concepts and API. One aspect of the learning curve for new users of seaborn will be knowing when dropping down to the matplotlib layer is necessary to achieve a particular customization. On the other hand, users coming from matplotlib will find that much of their knowledge transfers.\n", "\n", diff --git a/doc/releases/v0.5.0.txt b/doc/releases/v0.5.0.txt index 0fd1aefdb5..53af8c58a6 100644 --- a/doc/releases/v0.5.0.txt +++ b/doc/releases/v0.5.0.txt @@ -13,7 +13,7 @@ Plotting functions - :func:`lmplot` and :func:`pairplot` get a new keyword argument, ``markers``. This can be a single kind of marker or a list of different markers for each level of the ``hue`` variable. Using different markers for different hues should let plots be more comprehensible when reproduced to black-and-white (i.e. when printed). See the `github pull request (#323) `_ for examples. -- More generally, there is a new keyword argument in :class:`FacetGrid` and :class:`PairGrid`, ``hue_kws``. This similarly lets plot aesthetics vary across the levels of the hue variable, but more flexibily. ``hue_kws`` should be a dictionary that maps the name of keyword arguments to lists of values that are as long as the number of levels of the hue variable. +- More generally, there is a new keyword argument in :class:`FacetGrid` and :class:`PairGrid`, ``hue_kws``. This similarly lets plot aesthetics vary across the levels of the hue variable, but more flexibly. ``hue_kws`` should be a dictionary that maps the name of keyword arguments to lists of values that are as long as the number of levels of the hue variable. - The argument ``subplot_kws`` has been added to ``FacetGrid``. This allows for faceted plots with custom projections, including `maps with Cartopy `_. diff --git a/doc/releases/v0.6.0.txt b/doc/releases/v0.6.0.txt index 0f2bb122c6..a8d0da6634 100644 --- a/doc/releases/v0.6.0.txt +++ b/doc/releases/v0.6.0.txt @@ -13,7 +13,7 @@ In version 0.6, the "categorical" plots have been unified with a common API. Thi The categorical functions now each accept the same formats of input data and can be invoked in the same way. They can plot using long- or wide-form data, and can be drawn vertically or horizontally. When long-form data is used, the orientation of the plots is inferred from the types of the input data. Additionally, all functions natively take a ``hue`` variable to add a second layer of categorization. -With the (in some cases new) API, these functions can all be drawn correctly by :class:`FacetGrid`. However, ``factorplot`` can also now create faceted verisons of any of these kinds of plots, so in most cases it will be unnecessary to use :class:`FacetGrid` directly. By default, ``factorplot`` draws a point plot, but this is controlled by the ``kind`` parameter. +With the (in some cases new) API, these functions can all be drawn correctly by :class:`FacetGrid`. However, ``factorplot`` can also now create faceted versions of any of these kinds of plots, so in most cases it will be unnecessary to use :class:`FacetGrid` directly. By default, ``factorplot`` draws a point plot, but this is controlled by the ``kind`` parameter. Here are details on what has changed in the process of unifying these APIs: diff --git a/doc/releases/v0.8.0.txt b/doc/releases/v0.8.0.txt index 073d81a862..4de9bef67d 100644 --- a/doc/releases/v0.8.0.txt +++ b/doc/releases/v0.8.0.txt @@ -14,7 +14,7 @@ v0.8.0 (July 2017) - Added ``"auto"`` as a (default) option for tick labels in :func:`heatmap` and :func:`clustermap`. This will try to estimate how many ticks can be labeled without the text objects overlapping, which should improve performance for larger matrices. -- Added the ``dodge`` parameter to :func:`boxplot`, :func:`violinplot`, and :func:`barplot` to allow use of ``hue`` without changing the position or width of the plot elements, as when the ``hue`` varible is not nested within the main categorical variable. +- Added the ``dodge`` parameter to :func:`boxplot`, :func:`violinplot`, and :func:`barplot` to allow use of ``hue`` without changing the position or width of the plot elements, as when the ``hue`` variable is not nested within the main categorical variable. - Correspondingly, the ``split`` parameter for :func:`stripplot` and :func:`swarmplot` has been renamed to ``dodge`` for consistency with the other categorical functions (and for differentiation from the meaning of ``split`` in :func:`violinplot`). diff --git a/doc/sphinxext/gallery_generator.py b/doc/sphinxext/gallery_generator.py index ff546a8419..9f6bd26106 100644 --- a/doc/sphinxext/gallery_generator.py +++ b/doc/sphinxext/gallery_generator.py @@ -109,7 +109,7 @@ def execfile(filename, globals=None, locals=None): }} .caption {{ - position: absolue; + position: absolute; width: 180px; top: 170px; text-align: center !important; diff --git a/doc/tutorial/color_palettes.ipynb b/doc/tutorial/color_palettes.ipynb index 616318db46..05f73ac2fe 100644 --- a/doc/tutorial/color_palettes.ipynb +++ b/doc/tutorial/color_palettes.ipynb @@ -298,9 +298,9 @@ "cell_type": "raw", "metadata": {}, "source": [ - "With the hue-based palette, it's quite difficult to ascertain the shape of the bivariate distribution. In contrast, the luminance palette makes it much more clear that there are two prominant peaks.\n", + "With the hue-based palette, it's quite difficult to ascertain the shape of the bivariate distribution. In contrast, the luminance palette makes it much more clear that there are two prominent peaks.\n", "\n", - "Varying luminance helps you see structure in data, and changes in luminance are more intuitively processed as changes in importance. But the plot on the right does not use a grayscale colormap. Its colorfulness makes it more interesting, and the subtle hue variation increases the perceptual distance between two values. As a result, small differencess slightly easier to resolve.\n", + "Varying luminance helps you see structure in data, and changes in luminance are more intuitively processed as changes in importance. But the plot on the right does not use a grayscale colormap. Its colorfulness makes it more interesting, and the subtle hue variation increases the perceptual distance between two values. As a result, small differences slightly easier to resolve.\n", "\n", "These examples show that color palette choices are about more than aesthetics: the colors you choose can reveal patterns in your data if used effectively or hide them if used poorly. There is not one optimal palette, but there are palettes that are better or worse for particular datasets and visualization approaches.\n", "\n", @@ -626,7 +626,7 @@ "cell_type": "raw", "metadata": {}, "source": [ - "Interally, seaborn uses the discrete version for categorical data and the continuous version when in numeric mapping mode. Discrete sequential colormaps can be well-suited for visualizing categorical data with an intrinsic ordering, especially if there is some hue variation." + "Internally, seaborn uses the discrete version for categorical data and the continuous version when in numeric mapping mode. Discrete sequential colormaps can be well-suited for visualizing categorical data with an intrinsic ordering, especially if there is some hue variation." ] }, { diff --git a/doc/tutorial/data_structure.ipynb b/doc/tutorial/data_structure.ipynb index 69bd2b19e4..e7d67ede5a 100644 --- a/doc/tutorial/data_structure.ipynb +++ b/doc/tutorial/data_structure.ipynb @@ -97,7 +97,7 @@ "cell_type": "raw", "metadata": {}, "source": [ - "The advantage of long-form data is that it lends itself well to this explicit specification of the plot. It can accomodate datasets of arbitrary complexity, so long as the variables and observations can be clearly defined. But this format takes some getting used to, because it is often not the model of the data that one has in their head.\n", + "The advantage of long-form data is that it lends itself well to this explicit specification of the plot. It can accommodate datasets of arbitrary complexity, so long as the variables and observations can be clearly defined. But this format takes some getting used to, because it is often not the model of the data that one has in their head.\n", "\n", "Wide-form data\n", "~~~~~~~~~~~~~~\n", @@ -145,7 +145,7 @@ "cell_type": "raw", "metadata": {}, "source": [ - "This plot looks very similar to the one before. Seaborn has assigned the index of the dataframe to ``x``, the values of the dataframe to ``y``, and it has drawn a separate line for each month. There is a notable difference between the two plots, however. When the dataset went through the \"pivot\" operation that converted it from long-form to wide-form, the information about what the values mean was lost. As a result, there is no y axis label. (The lines also have dashes here, becuase :func:`relplot` has mapped the column variable to both the ``hue`` and ``style`` semantic so that the plot is more accessible. We didn't do that in the long-form case, but we could have by setting ``style=\"month\"``).\n", + "This plot looks very similar to the one before. Seaborn has assigned the index of the dataframe to ``x``, the values of the dataframe to ``y``, and it has drawn a separate line for each month. There is a notable difference between the two plots, however. When the dataset went through the \"pivot\" operation that converted it from long-form to wide-form, the information about what the values mean was lost. As a result, there is no y axis label. (The lines also have dashes here, because :func:`relplot` has mapped the column variable to both the ``hue`` and ``style`` semantic so that the plot is more accessible. We didn't do that in the long-form case, but we could have by setting ``style=\"month\"``).\n", "\n", "Thus far, we did much less typing while using wide-form data and made nearly the same plot. This seems easier! But a big advantage of long-form data is that, once you have the data in the correct format, you no longer need to think about its *structure*. You can design your plots by thinking only about the variables contained within it. For example, to draw lines that represent the monthly time series for each year, simply reassign the variables:" ] @@ -181,7 +181,7 @@ "source": [ "(This example also illustrates another wrinkle, which is that seaborn currently considers the column variable in a wide-form dataset to be categorical regardless of its datatype, whereas, because the long-form variable is numeric, it is assigned a quantitative color palette and legend. This may change in the future).\n", "\n", - "The absence of explicit variable assignments also means that each plot type needs to define a fixed mapping between the dimensions of the wide-form data and the roles in the plot. Because ths natural mapping may vary across plot types, the results are less predictable when using wide-form data. For example, the :ref:`categorical ` plots assign the *column* dimension of the table to ``x`` and then aggregate across the rows (ignoring the index):" + "The absence of explicit variable assignments also means that each plot type needs to define a fixed mapping between the dimensions of the wide-form data and the roles in the plot. Because this natural mapping may vary across plot types, the results are less predictable when using wide-form data. For example, the :ref:`categorical ` plots assign the *column* dimension of the table to ``x`` and then aggregate across the rows (ignoring the index):" ] }, { diff --git a/doc/tutorial/distributions.ipynb b/doc/tutorial/distributions.ipynb index 59452c5a13..7cabe730c3 100644 --- a/doc/tutorial/distributions.ipynb +++ b/doc/tutorial/distributions.ipynb @@ -24,7 +24,7 @@ "\n", "The :ref:`distributions module ` contains several functions designed to answer questions such as these. The axes-level functions are :func:`histplot`, :func:`kdeplot`, :func:`ecdfplot`, and :func:`rugplot`. They are grouped together within the figure-level :func:`displot`, :func:`jointplot`, and :func:`pairplot` functions.\n", "\n", - "There are several different approaches to visualizing a distribution, and each has its relative advantages and drawbacks. It is important to understand theses factors so that you can choose the best approach for your particular aim." + "There are several different approaches to visualizing a distribution, and each has its relative advantages and drawbacks. It is important to understand these factors so that you can choose the best approach for your particular aim." ] }, { @@ -415,7 +415,7 @@ "Kernel density estimation pitfalls\n", "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", "\n", - "KDE plots have many advantages. Important features of the data are easy to discern (central tendency, bimodality, skew), and they afford easy comparisons between subsets. But there are also situations where KDE poorly represents the underlying data. This is because the logic of KDE assumes that the underlying distribution is smooth and unbounded. One way this assumption can fail is when a varible reflects a quantity that is naturally bounded. If there are observations lying close to the bound (for example, small values of a variable that cannot be negative), the KDE curve may extend to unrealistic values:" + "KDE plots have many advantages. Important features of the data are easy to discern (central tendency, bimodality, skew), and they afford easy comparisons between subsets. But there are also situations where KDE poorly represents the underlying data. This is because the logic of KDE assumes that the underlying distribution is smooth and unbounded. One way this assumption can fail is when a variable reflects a quantity that is naturally bounded. If there are observations lying close to the bound (for example, small values of a variable that cannot be negative), the KDE curve may extend to unrealistic values:" ] }, { @@ -431,7 +431,7 @@ "cell_type": "raw", "metadata": {}, "source": [ - "This can be partially avoided with the ``cut`` parameter, which specifies how far the curve should extend beyond the extreme datapoints. But this influences only where the curve is drawn; the density estimate will still smooth over the range where no data can exist, causing it to be artifically low at the extremes of the distribution:" + "This can be partially avoided with the ``cut`` parameter, which specifies how far the curve should extend beyond the extreme datapoints. But this influences only where the curve is drawn; the density estimate will still smooth over the range where no data can exist, causing it to be artificially low at the extremes of the distribution:" ] }, { @@ -559,7 +559,7 @@ "cell_type": "raw", "metadata": {}, "source": [ - "A bivariate histogram bins the data within rectangles that tile the plot and then shows the count of observations within each rectangle with the fill color (analagous to a :func:`heatmap`). Similarly, a bivariate KDE plot smoothes the (x, y) observations with a 2D Gaussian. The default representation then shows the *contours* of the 2D density:" + "A bivariate histogram bins the data within rectangles that tile the plot and then shows the count of observations within each rectangle with the fill color (analogous to a :func:`heatmap`). Similarly, a bivariate KDE plot smoothes the (x, y) observations with a 2D Gaussian. The default representation then shows the *contours* of the 2D density:" ] }, { diff --git a/doc/tutorial/function_overview.ipynb b/doc/tutorial/function_overview.ipynb index ba5184830a..2189596450 100644 --- a/doc/tutorial/function_overview.ipynb +++ b/doc/tutorial/function_overview.ipynb @@ -198,7 +198,7 @@ "cell_type": "raw", "metadata": {}, "source": [ - "You'll notice that the figure-level plots look mostly like their axes-level counterparts, but there are a few differences. Notably, the legend is placed ouside the plot. They also have a slightly different shape (more on that shortly).\n", + "You'll notice that the figure-level plots look mostly like their axes-level counterparts, but there are a few differences. Notably, the legend is placed outside the plot. They also have a slightly different shape (more on that shortly).\n", "\n", "The most useful feature offered by the figure-level functions is that they can easily create figures with multiple subplots. For example, instead of stacking the three distributions for each species of penguins in the same axes, we can \"facet\" them by plotting each distribution across the columns of the figure:" ] @@ -437,7 +437,7 @@ "cell_type": "raw", "metadata": {}, "source": [ - "On balance, the figure-level functions add some additional complexity that can make things more confusing for beginners, but their distinct features give them additional power. The tutorial documentaion mostly uses the figure-level functions, because they produce slightly cleaner plots, and we generally recommend their use for most applications. The one situation where they are not a good choice is when you need to make a complex, standalone figure that composes multiple different plot kinds. At this point, it's recommended to set up the figure using matplotlib directly and to fill in the individual components using axes-level functions." + "On balance, the figure-level functions add some additional complexity that can make things more confusing for beginners, but their distinct features give them additional power. The tutorial documentation mostly uses the figure-level functions, because they produce slightly cleaner plots, and we generally recommend their use for most applications. The one situation where they are not a good choice is when you need to make a complex, standalone figure that composes multiple different plot kinds. At this point, it's recommended to set up the figure using matplotlib directly and to fill in the individual components using axes-level functions." ] }, { @@ -447,7 +447,7 @@ "Combining multiple views on the data\n", "------------------------------------\n", "\n", - "Two important plotting functions in seaborn don't fit cleanly into the classification scheme discussed above. These functions, :func:`jointplot` and :func:`pairplot`, employ multiple kinds of plots from different modules to represent mulitple aspects of a dataset in a single figure. Both plots are figure-level functions and create figures with multiple subplots by default. But they use different objects to manage the figure: :class:`JointGrid` and :class:`PairGrid`, respectively.\n", + "Two important plotting functions in seaborn don't fit cleanly into the classification scheme discussed above. These functions, :func:`jointplot` and :func:`pairplot`, employ multiple kinds of plots from different modules to represent multiple aspects of a dataset in a single figure. Both plots are figure-level functions and create figures with multiple subplots by default. But they use different objects to manage the figure: :class:`JointGrid` and :class:`PairGrid`, respectively.\n", "\n", ":func:`jointplot` plots the relationship or joint distribution of two variables while adding marginal axes that show the univariate distribution of each one separately:" ] diff --git a/seaborn/_core.py b/seaborn/_core.py index ed57a01450..dd14986ff8 100644 --- a/seaborn/_core.py +++ b/seaborn/_core.py @@ -869,7 +869,7 @@ def _assign_variables_longform(self, data=None, **kwargs): # want, whereas DataFrame.to_dict() gives a nested dict instead of # a dict of series. - # Variables can also be extraced from the index attribute + # Variables can also be extracted from the index attribute # TODO is this the most general way to enable it? # There is no index.to_dict on multiindex, unfortunately try: @@ -977,7 +977,7 @@ def iter_data( """ # TODO should this default to using all (non x/y?) semantics? - # or define groupping vars somewhere? + # or define grouping vars somewhere? if grouping_vars is None: grouping_vars = [] elif isinstance(grouping_vars, str): @@ -1047,7 +1047,7 @@ def iter_data( try: data_subset = grouped_data.get_group(pd_key) except KeyError: - # XXX we are adding this to allow backwards compatability + # XXX we are adding this to allow backwards compatibility # with the empty artists that old categorical plots would # add (before 0.12), which we may decide to break, in which # case this option could be removed @@ -1546,7 +1546,7 @@ def infer_orient(x=None, y=None, orient=None, require_numeric=True): Raises ------ ValueError: When `orient` is not None and does not start with "h" or "v" - TypeError: When dependant variable is not numeric, with `require_numeric` + TypeError: When dependent variable is not numeric, with `require_numeric` """ @@ -1630,7 +1630,7 @@ def unique_dashes(n): (5, 1, 1, 1), ] - # Now programatically build as many as we need + # Now programmatically build as many as we need p = 3 while len(dashes) < n: diff --git a/seaborn/_statistics.py b/seaborn/_statistics.py index 9a0f36ba77..537fbb4f5b 100644 --- a/seaborn/_statistics.py +++ b/seaborn/_statistics.py @@ -393,7 +393,7 @@ def __call__(self, x1, x2=None, weights=None): class ECDF: """Univariate empirical cumulative distribution estimator.""" def __init__(self, stat="proportion", complementary=False): - """Initialize the class with its paramters + """Initialize the class with its parameters Parameters ---------- diff --git a/seaborn/categorical.py b/seaborn/categorical.py index 7e8f8cb9fa..48eaa61105 100644 --- a/seaborn/categorical.py +++ b/seaborn/categorical.py @@ -103,7 +103,7 @@ def __init__( self.var_levels[self.cat_axis] = cat_levels def _hue_backcompat(self, color, palette, hue_order, force_hue=False): - """Implement backwards compatability for hue parametrization. + """Implement backwards compatibility for hue parametrization. Note: the force_hue parameter is used so that functions can be shown to pass existing tests during refactoring and then tested for new behavior. @@ -273,7 +273,7 @@ def plot_strips( else: points.set_edgecolors(edgecolor) - # TODO XXX fully impelement legend + # TODO XXX fully implement legend show_legend = not self._redundant_hue and self.input_format != "wide" if "hue" in self.variables and show_legend: for level in self._hue_map.levels: @@ -363,7 +363,7 @@ def draw(points, renderer, *, center=center): _draw_figure(ax.figure) - # TODO XXX fully impelment legend + # TODO XXX fully implement legend show_legend = not self._redundant_hue and self.input_format != "wide" if "hue" in self.variables and show_legend: # TODO and legend: for level in self._hue_map.levels: @@ -3655,7 +3655,7 @@ def catplot( if kind == "strip": - # TODO get these defaults programatically? + # TODO get these defaults programmatically? jitter = kwargs.pop("jitter", True) dodge = kwargs.pop("dodge", False) edgecolor = kwargs.pop("edgecolor", "gray") # XXX TODO default @@ -3677,7 +3677,7 @@ def catplot( elif kind == "swarm": - # TODO get these defaults programatically? + # TODO get these defaults programmatically? dodge = kwargs.pop("dodge", False) edgecolor = kwargs.pop("edgecolor", "gray") # XXX TODO default warn_thresh = kwargs.pop("warn_thresh", .05) diff --git a/seaborn/distributions.py b/seaborn/distributions.py index c613af6c85..abcf1aa6b5 100644 --- a/seaborn/distributions.py +++ b/seaborn/distributions.py @@ -685,7 +685,7 @@ def plot_univariate_histogram( linewidth = min(default_linewidth, max_linewidth) - # If not filling, don't let lines dissapear + # If not filling, don't let lines disappear if not fill: min_linewidth = .5 linewidth = max(linewidth, min_linewidth) @@ -1447,7 +1447,7 @@ def histplot( A histogram is a classic visualization tool that represents the distribution of one or more variables by counting the number of observations that fall within -disrete bins. +discrete bins. This function can normalize the statistic computed within each bin to estimate frequency, density or probability mass, and it can add a smooth curve obtained @@ -1750,7 +1750,7 @@ def kdeplot( Plot univariate or bivariate distributions using kernel density estimation. A kernel density estimate (KDE) plot is a method for visualizing the -distribution of observations in a dataset, analagous to a histogram. KDE +distribution of observations in a dataset, analogous to a histogram. KDE represents the data using a continuous probability density curve in one or more dimensions. @@ -2093,7 +2093,7 @@ def rugplot( Plot marginal distributions by drawing ticks along the x and y axes. This function is intended to complement other plots by showing the location -of individual observations in an unobstrusive way. +of individual observations in an unobtrusive way. Parameters ---------- diff --git a/seaborn/external/kde.py b/seaborn/external/kde.py index 1b1d04db31..6584048bc5 100644 --- a/seaborn/external/kde.py +++ b/seaborn/external/kde.py @@ -12,7 +12,7 @@ - logpdf - resample -Additionally, the numpy.linalg module was subsituted for scipy.linalg, +Additionally, the numpy.linalg module was substituted for scipy.linalg, and the examples section (with doctests) was removed from the docstring The original scipy license is copied below: diff --git a/seaborn/matrix.py b/seaborn/matrix.py index c47774c581..2f550026f5 100644 --- a/seaborn/matrix.py +++ b/seaborn/matrix.py @@ -109,7 +109,7 @@ def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt, plot_data = np.asarray(data) data = pd.DataFrame(plot_data) - # Validate the mask and convet to DataFrame + # Validate the mask and convert to DataFrame mask = _matrix_mask(data, mask) plot_data = np.ma.masked_where(np.asarray(mask), plot_data) @@ -434,7 +434,7 @@ def heatmap( See Also -------- - clustermap : Plot a matrix using hierachical clustering to arrange the + clustermap : Plot a matrix using hierarchical clustering to arrange the rows and columns. Examples @@ -864,7 +864,7 @@ def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None, # Initialize the colorbar axes in the gridspec so that tight_layout # works. We will move it where it belongs later. This is a hack. self.ax_cbar = self.fig.add_subplot(self.gs[0, 0]) - self.cax = self.ax_cbar # Backwards compatability + self.cax = self.ax_cbar # Backwards compatibility self.cbar_pos = cbar_pos self.dendrogram_row = None @@ -1001,7 +1001,7 @@ def dim_ratios(self, colors, dendrogram_ratio, colors_ratio): ratios = [dendrogram_ratio] if colors is not None: - # Colors are encoded as rgb, so ther is an extra dimention + # Colors are encoded as rgb, so there is an extra dimension if np.ndim(colors) > 2: n_colors = len(colors) else: diff --git a/seaborn/relational.py b/seaborn/relational.py index 34a649a76c..86cf692b10 100644 --- a/seaborn/relational.py +++ b/seaborn/relational.py @@ -582,7 +582,7 @@ def plot(self, ax, kws): p = [self._style_map(val, "path") for val in data["style"]] points.set_paths(p) - # Apply dependant default attributes + # Apply dependent default attributes if "linewidth" not in kws: sizes = points.get_sizes() @@ -697,7 +697,7 @@ def lineplot( Whether to draw the confidence intervals with translucent error bands or discrete error bars. err_kws : dict of keyword arguments - Additional paramters to control the aesthetics of the error bars. The + Additional parameters to control the aesthetics of the error bars. The kwargs are passed either to :meth:`matplotlib.axes.Axes.fill_between` or :meth:`matplotlib.axes.Axes.errorbar`, depending on ``err_style``. {params.rel.legend} diff --git a/seaborn/tests/test_algorithms.py b/seaborn/tests/test_algorithms.py index e1ae1ffb7b..1baef034e4 100644 --- a/seaborn/tests/test_algorithms.py +++ b/seaborn/tests/test_algorithms.py @@ -35,7 +35,7 @@ def test_bootstrap_length(random): def test_bootstrap_range(random): - """Test that boostrapping a random array stays within the right range.""" + """Test that bootstrapping a random array stays within the right range.""" a_norm = np.random.randn(1000) amin, amax = a_norm.min(), a_norm.max() out = algo.bootstrap(a_norm) diff --git a/seaborn/tests/test_categorical.py b/seaborn/tests/test_categorical.py index a7d9f26e2d..57ae714224 100644 --- a/seaborn/tests/test_categorical.py +++ b/seaborn/tests/test_categorical.py @@ -1229,7 +1229,7 @@ def test_draw_to_density(self): # p.dwidth will be 1 for easier testing p.width = 2 - # Test verical plots + # Test vertical plots support = np.array([.2, .6]) density = np.array([.1, .4]) diff --git a/seaborn/tests/test_core.py b/seaborn/tests/test_core.py index 7f8173d210..0687224f4e 100644 --- a/seaborn/tests/test_core.py +++ b/seaborn/tests/test_core.py @@ -212,7 +212,7 @@ def test_hue_map_categorical(self, wide_df, long_df): assert m.levels == [pd.Timestamp(t) for t in long_df["t"].unique()] assert m.map_type == "datetime" - # Test excplicit categories + # Test explicit categories p = VectorPlotter(data=long_df, variables=dict(x="x", hue="a_cat")) m = HueMapping(p) assert m.levels == long_df["a_cat"].cat.categories.to_list() @@ -560,7 +560,7 @@ def test_map_style(self, long_df): assert m(key, "marker") == markers[key] assert m(key, "dashes") == dashes[key] - # Test excplicit categories + # Test explicit categories p = VectorPlotter(data=long_df, variables=dict(x="x", style="a_cat")) m = StyleMapping(p) assert m.levels == long_df["a_cat"].cat.categories.to_list() diff --git a/seaborn/tests/test_relational.py b/seaborn/tests/test_relational.py index 75c29c99bb..fadbc911e0 100644 --- a/seaborn/tests/test_relational.py +++ b/seaborn/tests/test_relational.py @@ -1125,7 +1125,7 @@ def test_nonmapped_dashes(self): ax = lineplot(x=[1, 2], y=[1, 2], dashes=(2, 1)) line = ax.lines[0] - # Not a great test, but lines don't expose the dash style publically + # Not a great test, but lines don't expose the dash style publicly assert line.get_linestyle() == "--" def test_lineplot_axes(self, wide_df): diff --git a/seaborn/tests/test_utils.py b/seaborn/tests/test_utils.py index cab8a357f2..ee8516340a 100644 --- a/seaborn/tests/test_utils.py +++ b/seaborn/tests/test_utils.py @@ -342,7 +342,7 @@ def check_load_dataset(name): def check_load_cached_dataset(name): - # Test the cacheing using a temporary file. + # Test the caching using a temporary file. with tempfile.TemporaryDirectory() as tmpdir: # download and cache ds = load_dataset(name, cache=True, data_home=tmpdir)