diff --git a/pandas/_config/__init__.py b/pandas/_config/__init__.py index 463e8af7cc561..ee709eff2eeae 100644 --- a/pandas/_config/__init__.py +++ b/pandas/_config/__init__.py @@ -33,3 +33,8 @@ def using_string_dtype() -> bool: _mode_options = _global_config["future"] return _mode_options["infer_string"] + + +def is_nan_na() -> bool: + _mode_options = _global_config["mode"] + return _mode_options["nan_is_na"] diff --git a/pandas/_libs/missing.pyi b/pandas/_libs/missing.pyi index 6bf30a03cef32..64256ae4b36ad 100644 --- a/pandas/_libs/missing.pyi +++ b/pandas/_libs/missing.pyi @@ -14,3 +14,4 @@ def isneginf_scalar(val: object) -> bool: ... def checknull(val: object) -> bool: ... def isnaobj(arr: np.ndarray) -> npt.NDArray[np.bool_]: ... def is_numeric_na(values: np.ndarray) -> npt.NDArray[np.bool_]: ... +def is_pdna_or_none(values: np.ndarray) -> npt.NDArray[np.bool_]: ... diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index c7f905c4d0be0..164a47cb5adb7 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -249,6 +249,24 @@ cdef bint checknull_with_nat_and_na(object obj): return checknull_with_nat(obj) or obj is C_NA +@cython.wraparound(False) +@cython.boundscheck(False) +def is_pdna_or_none(values: ndarray) -> ndarray: + cdef: + ndarray[uint8_t] result + Py_ssize_t i, N + object val + + N = len(values) + result = np.zeros(N, dtype=np.uint8) + + for i in range(N): + val = values[i] + if val is None or val is C_NA: + result[i] = True + return result.view(bool) + + @cython.wraparound(False) @cython.boundscheck(False) def is_numeric_na(values: ndarray) -> ndarray: diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 5b94f45490da4..1f5813940c058 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -1461,7 +1461,7 @@ def _maybe_upcast( if isinstance(arr, IntegerArray) and arr.isna().all(): # use null instead of int64 in pyarrow arr = arr.to_numpy(na_value=None) - arr = ArrowExtensionArray(pa.array(arr, from_pandas=True)) + arr = ArrowExtensionArray(pa.array(arr)) return arr diff --git a/pandas/conftest.py b/pandas/conftest.py index f81dc0af234e3..1b035cd82b74e 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -2122,3 +2122,10 @@ def temp_file(tmp_path): def monkeysession(): with pytest.MonkeyPatch.context() as mp: yield mp + + +@pytest.fixture(params=[True, False]) +def using_nan_is_na(request): + opt = request.param + with pd.option_context("mode.nan_is_na", opt): + yield opt diff --git a/pandas/core/arrays/_utils.py b/pandas/core/arrays/_utils.py index 6b46396d5efdf..e511b481887a9 100644 --- a/pandas/core/arrays/_utils.py +++ b/pandas/core/arrays/_utils.py @@ -7,7 +7,10 @@ import numpy as np +from pandas._config import is_nan_na + from pandas._libs import lib +from pandas._libs.missing import NA from pandas.errors import LossySetitemError from pandas.core.dtypes.cast import np_can_hold_element @@ -21,7 +24,11 @@ def to_numpy_dtype_inference( - arr: ArrayLike, dtype: npt.DTypeLike | None, na_value, hasna: bool + arr: ArrayLike, + dtype: npt.DTypeLike | None, + na_value, + hasna: bool, + is_pyarrow: bool = True, ) -> tuple[npt.DTypeLike, Any]: if dtype is None and is_numeric_dtype(arr.dtype): dtype_given = False @@ -34,7 +41,11 @@ def to_numpy_dtype_inference( else: dtype = arr.dtype.numpy_dtype # type: ignore[union-attr] if na_value is lib.no_default: - na_value = np.nan + if is_pyarrow and not is_nan_na(): + na_value = NA + dtype = np.dtype(object) + else: + na_value = np.nan else: dtype = arr.dtype.numpy_dtype # type: ignore[union-attr] elif dtype is not None: diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index ad1d576bfec32..7aeeefbe2913a 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -16,7 +16,10 @@ import numpy as np +from pandas._config import is_nan_na + from pandas._libs import lib +from pandas._libs.missing import is_pdna_or_none from pandas._libs.tslibs import ( Timedelta, Timestamp, @@ -32,6 +35,7 @@ from pandas.core.dtypes.cast import ( can_hold_element, + construct_1d_object_array_from_listlike, infer_dtype_from_scalar, ) from pandas.core.dtypes.common import ( @@ -325,6 +329,11 @@ def _from_sequence_of_strings( """ Construct a new ExtensionArray from a sequence of strings. """ + mask = isna(strings) + + if isinstance(strings, cls): + strings = strings._pa_array + pa_type = to_pyarrow_type(dtype) if ( pa_type is None @@ -343,17 +352,21 @@ def _from_sequence_of_strings( from pandas.core.tools.datetimes import to_datetime scalars = to_datetime(strings, errors="raise").date + + scalars = pa.array(scalars, mask=mask.view(bool), type=pa_type) + elif pa.types.is_duration(pa_type): from pandas.core.tools.timedeltas import to_timedelta scalars = to_timedelta(strings, errors="raise") + if pa_type.unit != "ns": # GH51175: test_from_sequence_of_strings_pa_array # attempt to parse as int64 reflecting pyarrow's # duration to string casting behavior mask = isna(scalars) if not isinstance(strings, (pa.Array, pa.ChunkedArray)): - strings = pa.array(strings, type=pa.string(), from_pandas=True) + strings = pa.array(strings, type=pa.string(), mask=mask) strings = pc.if_else(mask, None, strings) try: scalars = strings.cast(pa.int64()) @@ -374,7 +387,7 @@ def _from_sequence_of_strings( if isinstance(strings, (pa.Array, pa.ChunkedArray)): scalars = strings else: - scalars = pa.array(strings, type=pa.string(), from_pandas=True) + scalars = pa.array(strings, type=pa.string(), mask=mask) scalars = pc.if_else(pc.equal(scalars, "1.0"), "1", scalars) scalars = pc.if_else(pc.equal(scalars, "0.0"), "0", scalars) scalars = scalars.cast(pa.bool_()) @@ -386,6 +399,11 @@ def _from_sequence_of_strings( from pandas.core.tools.numeric import to_numeric scalars = to_numeric(strings, errors="raise") + if isinstance(strings, (pa.Array, pa.ChunkedArray)): + scalars = strings.cast(pa_type) + elif mask is not None: + scalars = pa.array(scalars, mask=mask, type=pa_type) + else: raise NotImplementedError( f"Converting strings to {pa_type} is not implemented." @@ -428,7 +446,7 @@ def _box_pa_scalar(cls, value, pa_type: pa.DataType | None = None) -> pa.Scalar: """ if isinstance(value, pa.Scalar): pa_scalar = value - elif isna(value): + elif isna(value) and not (lib.is_float(value) and not is_nan_na()): pa_scalar = pa.scalar(None, type=pa_type) else: # Workaround https://github.com/apache/arrow/issues/37291 @@ -445,7 +463,7 @@ def _box_pa_scalar(cls, value, pa_type: pa.DataType | None = None) -> pa.Scalar: value = value.as_unit(pa_type.unit) value = value._value - pa_scalar = pa.scalar(value, type=pa_type, from_pandas=True) + pa_scalar = pa.scalar(value, type=pa_type) if pa_type is not None and pa_scalar.type != pa_type: pa_scalar = pa_scalar.cast(pa_type) @@ -477,6 +495,13 @@ def _box_pa_array( if copy: value = value.copy() pa_array = value.__arrow_array__() + + elif hasattr(value, "__arrow_array__"): + # e.g. StringArray + if copy: + value = value.copy() + pa_array = value.__arrow_array__() + else: if ( isinstance(value, np.ndarray) @@ -530,11 +555,32 @@ def _box_pa_array( pa_array = pa.array(dta._ndarray, type=pa_type, mask=dta_mask) return pa_array + mask = None + if is_nan_na(): + try: + arr_value = np.asarray(value) + if arr_value.ndim > 1: + # e.g. test_fixed_size_list we have list data. ndim > 1 + # means there were no scalar (NA) entries. + mask = np.zeros(len(value), dtype=np.bool_) + else: + mask = isna(arr_value) + except ValueError: + # Ragged data that numpy raises on + arr_value = construct_1d_object_array_from_listlike(value) + mask = isna(arr_value) + elif ( + getattr(value, "dtype", None) is None or value.dtype.kind not in "iumMf" + ): + arr_value = np.asarray(value, dtype=object) + # similar to isna(value) but exclude NaN, NaT, nat-like, nan-like + mask = is_pdna_or_none(arr_value) # type: ignore[assignment] + try: - pa_array = pa.array(value, type=pa_type, from_pandas=True) + pa_array = pa.array(value, type=pa_type, mask=mask) except (pa.ArrowInvalid, pa.ArrowTypeError): # GH50430: let pyarrow infer type, then cast - pa_array = pa.array(value, from_pandas=True) + pa_array = pa.array(value, mask=mask) if pa_type is None and pa.types.is_duration(pa_array.type): # Workaround https://github.com/apache/arrow/issues/37291 @@ -542,7 +588,7 @@ def _box_pa_array( value = to_timedelta(value) value = value.to_numpy() - pa_array = pa.array(value, type=pa_type, from_pandas=True) + pa_array = pa.array(value, type=pa_type) if pa.types.is_duration(pa_array.type) and pa_array.null_count > 0: # GH52843: upstream bug for duration types when originally @@ -877,7 +923,13 @@ def _logical_method(self, other, op) -> Self: return self._evaluate_op_method(other, op, ARROW_LOGICAL_FUNCS) def _arith_method(self, other, op) -> Self: - return self._evaluate_op_method(other, op, ARROW_ARITHMETIC_FUNCS) + result = self._evaluate_op_method(other, op, ARROW_ARITHMETIC_FUNCS) + if is_nan_na() and result.dtype.kind == "f": + parr = result._pa_array + mask = pc.is_nan(parr).to_numpy() + arr = pc.replace_with_mask(parr, mask, pa.scalar(None, type=parr.type)) + result = type(self)(arr) + return result def equals(self, other) -> bool: if not isinstance(other, ArrowExtensionArray): @@ -1208,7 +1260,7 @@ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: if not len(values): return np.zeros(len(self), dtype=bool) - result = pc.is_in(self._pa_array, value_set=pa.array(values, from_pandas=True)) + result = pc.is_in(self._pa_array, value_set=pa.array(values)) # pyarrow 2.0.0 returned nulls, so we explicitly specify dtype to convert nulls # to False return np.array(result, dtype=np.bool_) @@ -1460,7 +1512,9 @@ def to_numpy( na_value: object = lib.no_default, ) -> np.ndarray: original_na_value = na_value - dtype, na_value = to_numpy_dtype_inference(self, dtype, na_value, self._hasna) + dtype, na_value = to_numpy_dtype_inference( + self, dtype, na_value, self._hasna, is_pyarrow=True + ) pa_type = self._pa_array.type if not self._hasna or isna(na_value) or pa.types.is_null(pa_type): data = self @@ -1489,7 +1543,11 @@ def to_numpy( pa.types.is_floating(pa_type) and ( na_value is np.nan - or (original_na_value is lib.no_default and is_float_dtype(dtype)) + or ( + original_na_value is lib.no_default + and is_float_dtype(dtype) + and is_nan_na() + ) ) ): result = data._pa_array.to_numpy() @@ -2015,7 +2073,7 @@ def __setitem__(self, key, value) -> None: raise ValueError("Length of indexer and values mismatch") chunks = [ *self._pa_array[:key].chunks, - pa.array([value], type=self._pa_array.type, from_pandas=True), + pa.array([value], type=self._pa_array.type), *self._pa_array[key + 1 :].chunks, ] data = pa.chunked_array(chunks).combine_chunks() @@ -2069,7 +2127,7 @@ def _rank_calc( pa_type = pa.float64() else: pa_type = pa.uint64() - result = pa.array(ranked, type=pa_type, from_pandas=True) + result = pa.array(ranked, type=pa_type) return result data = self._pa_array.combine_chunks() @@ -2321,7 +2379,7 @@ def _to_numpy_and_type(value) -> tuple[np.ndarray, pa.DataType | None]: right, right_type = _to_numpy_and_type(right) pa_type = left_type or right_type result = np.where(cond, left, right) - return pa.array(result, type=pa_type, from_pandas=True) + return pa.array(result, type=pa_type) @classmethod def _replace_with_mask( @@ -2362,9 +2420,10 @@ def _replace_with_mask( replacements = np.array(replacements, dtype=object) elif isinstance(replacements, pa.Scalar): replacements = replacements.as_py() + result = np.array(values, dtype=object) result[mask] = replacements - return pa.array(result, type=values.type, from_pandas=True) + return pa.array(result, type=values.type) # ------------------------------------------------------------------ # GroupBy Methods @@ -2443,7 +2502,7 @@ def _groupby_op( return type(self)(pa_result) else: # DatetimeArray, TimedeltaArray - pa_result = pa.array(result, from_pandas=True) + pa_result = pa.array(result) return type(self)(pa_result) def _apply_elementwise(self, func: Callable) -> list[list[Any]]: diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index fefd70fef35c9..05591b2c225b7 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -496,7 +496,9 @@ def to_numpy( array([ True, False, False]) """ hasna = self._hasna - dtype, na_value = to_numpy_dtype_inference(self, dtype, na_value, hasna) + dtype, na_value = to_numpy_dtype_inference( + self, dtype, na_value, hasna, is_pyarrow=False + ) if dtype is None: dtype = object diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 198dc4c483277..719686ab71a29 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -502,6 +502,12 @@ def _str_map_str_or_object( if self.dtype.storage == "pyarrow": import pyarrow as pa + # TODO: shouldn't this already be caught my passed mask? + # it isn't in test_extract_expand_capture_groups_index + # mask = mask | np.array( + # [x is libmissing.NA for x in result], dtype=bool + # ) + result = pa.array( result, mask=mask, type=pa.large_string(), from_pandas=True ) @@ -754,7 +760,7 @@ def __arrow_array__(self, type=None): values = self._ndarray.copy() values[self.isna()] = None - return pa.array(values, type=type, from_pandas=True) + return pa.array(values, type=type) def _values_for_factorize(self) -> tuple[np.ndarray, libmissing.NAType | float]: # type: ignore[override] arr = self._ndarray diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index bf7e8fb02b58e..a8014afb225bb 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -427,6 +427,15 @@ def is_terminal() -> bool: validator=is_one_of_factory([True, False, "warn"]), ) +with cf.config_prefix("mode"): + cf.register_option( + "nan_is_na", + True, + "Whether to make ArrowDtype arrays consistently treat NaN as " + "interchangeable with pd.NA", + validator=is_one_of_factory([True, False]), + ) + # user warnings chained_assignment = """ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cbd853886a0f4..a48c14aea4302 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9893,7 +9893,7 @@ def where( def where( self, cond, - other=np.nan, + other=lib.no_default, *, inplace: bool = False, axis: Axis | None = None, diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 59911a57acc02..1c79f24a9fd96 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -18,6 +18,8 @@ import numpy as np +from pandas._config import option_context + from pandas._libs import lib from pandas._libs.json import ( ujson_dumps, @@ -994,9 +996,13 @@ def _read_ujson(self) -> DataFrame | Series: else: obj = self._get_object_parser(self.data) if self.dtype_backend is not lib.no_default: - return obj.convert_dtypes( - infer_objects=False, dtype_backend=self.dtype_backend - ) + with option_context("mode.nan_is_na", True): + # The construction above takes "null" to NaN, which we want to + # convert to NA. But .convert_dtypes to pyarrow doesn't allow + # that, so we do a 2-step conversion through numpy-nullable. + return obj.convert_dtypes( + infer_objects=False, dtype_backend=self.dtype_backend + ) else: return obj @@ -1071,9 +1077,13 @@ def __next__(self) -> DataFrame | Series: raise ex if self.dtype_backend is not lib.no_default: - return obj.convert_dtypes( - infer_objects=False, dtype_backend=self.dtype_backend - ) + with option_context("mode.nan_is_na", True): + # The construction above takes "null" to NaN, which we want to + # convert to NA. But .convert_dtypes to pyarrow doesn't allow + # that, so we do a 2-step conversion through numpy-nullable. + return obj.convert_dtypes( + infer_objects=False, dtype_backend=self.dtype_backend + ) else: return obj diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 4d766d6664218..4d56edfa9ffae 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -276,14 +276,14 @@ def test_compare_scalar(self, data, comparison_op): self._compare_other(ser, data, comparison_op, data[0]) @pytest.mark.parametrize("na_action", [None, "ignore"]) - def test_map(self, data_missing, na_action): + def test_map(self, data_missing, na_action, using_nan_is_na): if data_missing.dtype.kind in "mM": result = data_missing.map(lambda x: x, na_action=na_action) expected = data_missing.to_numpy(dtype=object) tm.assert_numpy_array_equal(result, expected) else: result = data_missing.map(lambda x: x, na_action=na_action) - if data_missing.dtype == "float32[pyarrow]": + if data_missing.dtype == "float32[pyarrow]" and using_nan_is_na: # map roundtrips through objects, which converts to float64 expected = data_missing.to_numpy(dtype="float64", na_value=np.nan) else: @@ -700,7 +700,7 @@ def test_setitem_preserves_views(self, data): @pytest.mark.parametrize("dtype_backend", ["pyarrow", no_default]) @pytest.mark.parametrize("engine", ["c", "python"]) - def test_EA_types(self, engine, data, dtype_backend, request): + def test_EA_types(self, engine, data, dtype_backend, request, using_nan_is_na): pa_dtype = data.dtype.pyarrow_dtype if pa.types.is_decimal(pa_dtype): request.applymarker( @@ -721,7 +721,10 @@ def test_EA_types(self, engine, data, dtype_backend, request): pytest.mark.xfail(reason="CSV parsers don't correctly handle binary") ) df = pd.DataFrame({"with_dtype": pd.Series(data, dtype=str(data.dtype))}) - csv_output = df.to_csv(index=False, na_rep=np.nan) + if not using_nan_is_na: + csv_output = df.to_csv(index=False, na_rep="NA") + else: + csv_output = df.to_csv(index=False, na_rep=np.nan) if pa.types.is_binary(pa_dtype): csv_output = BytesIO(csv_output) else: @@ -1512,7 +1515,8 @@ def test_pickle_roundtrip(data): def test_astype_from_non_pyarrow(data): # GH49795 - pd_array = data._pa_array.to_pandas().array + np_arr = data.to_numpy() + pd_array = pd.array(np_arr, dtype=np_arr.dtype) result = pd_array.astype(data.dtype) assert not isinstance(pd_array.dtype, ArrowDtype) assert isinstance(result.dtype, ArrowDtype) @@ -1534,7 +1538,7 @@ def test_astype_errors_ignore(): tm.assert_frame_equal(result, expected) -def test_to_numpy_with_defaults(data): +def test_to_numpy_with_defaults(data, using_nan_is_na): # GH49973 result = data.to_numpy() @@ -1546,20 +1550,23 @@ def test_to_numpy_with_defaults(data): else: expected = np.array(data._pa_array) - if data._hasna and not is_numeric_dtype(data.dtype): + if data._hasna and (not is_numeric_dtype(data.dtype) or not using_nan_is_na): expected = expected.astype(object) expected[pd.isna(data)] = pd.NA tm.assert_numpy_array_equal(result, expected) -def test_to_numpy_int_with_na(): +def test_to_numpy_int_with_na(using_nan_is_na): # GH51227: ensure to_numpy does not convert int to float data = [1, None] arr = pd.array(data, dtype="int64[pyarrow]") result = arr.to_numpy() - expected = np.array([1, np.nan]) - assert isinstance(result[0], float) + if not using_nan_is_na: + expected = np.array([1, pd.NA], dtype=object) + else: + expected = np.array([1, np.nan]) + assert isinstance(result[0], float) tm.assert_numpy_array_equal(result, expected) @@ -2868,7 +2875,7 @@ def test_dt_components(): ) result = ser.dt.components expected = pd.DataFrame( - [[1, 0, 0, 2, 0, 3, 4], [None, None, None, None, None, None, None]], + [[1, 0, 0, 2, 0, 3, 4], [pd.NA, pd.NA, pd.NA, pd.NA, pd.NA, pd.NA, pd.NA]], columns=[ "days", "hours", @@ -2893,7 +2900,10 @@ def test_dt_components_large_values(): ) result = ser.dt.components expected = pd.DataFrame( - [[365, 23, 59, 59, 999, 0, 0], [None, None, None, None, None, None, None]], + [ + [365, 23, 59, 59, 999, 0, 0], + [pd.NA, pd.NA, pd.NA, pd.NA, pd.NA, pd.NA, pd.NA], + ], columns=[ "days", "hours", @@ -3517,10 +3527,13 @@ def test_cast_dictionary_different_value_dtype(arrow_type): assert result.dtypes.iloc[0] == data_type -def test_map_numeric_na_action(): +def test_map_numeric_na_action(using_nan_is_na): ser = pd.Series([32, 40, None], dtype="int64[pyarrow]") result = ser.map(lambda x: 42, na_action="ignore") - expected = pd.Series([42.0, 42.0, np.nan], dtype="float64") + if not using_nan_is_na: + expected = pd.Series([42.0, 42.0, pd.NA], dtype="object") + else: + expected = pd.Series([42.0, 42.0, np.nan], dtype="float64") tm.assert_series_equal(result, expected) @@ -3576,3 +3589,58 @@ def test_timestamp_dtype_matches_to_datetime(): expected = pd.Series([ts], dtype=dtype1).convert_dtypes(dtype_backend="pyarrow") tm.assert_series_equal(result, expected) + + +def test_ops_with_nan_is_na(using_nan_is_na): + # GH#61732 + ser = pd.Series([-1, 0, 1], dtype="int64[pyarrow]") + + result = ser - np.nan + if using_nan_is_na: + assert result.isna().all() + else: + assert not result.isna().any() + + result = ser * np.nan + if using_nan_is_na: + assert result.isna().all() + else: + assert not result.isna().any() + + result = ser / 0 + if using_nan_is_na: + assert result.isna()[1] + else: + assert not result.isna()[1] + + +def test_setitem_float_nan_is_na(using_nan_is_na): + # GH#61732 + import pyarrow as pa + + ser = pd.Series([-1, 0, 1], dtype="int64[pyarrow]") + + if using_nan_is_na: + ser[1] = np.nan + assert ser.isna()[1] + else: + msg = "Could not convert nan with type float: tried to convert to int64" + with pytest.raises(pa.lib.ArrowInvalid, match=msg): + ser[1] = np.nan + + ser = pd.Series([-1, np.nan, 1], dtype="float64[pyarrow]") + if using_nan_is_na: + assert ser.isna()[1] + assert ser[1] is pd.NA + + ser[1] = np.nan + assert ser[1] is pd.NA + + else: + assert not ser.isna()[1] + assert isinstance(ser[1], float) + assert np.isnan(ser[1]) + + ser[2] = np.nan + assert isinstance(ser[2], float) + assert np.isnan(ser[2]) diff --git a/pandas/tests/frame/methods/test_convert_dtypes.py b/pandas/tests/frame/methods/test_convert_dtypes.py index ab847e2f8e81e..cd850f8019ea1 100644 --- a/pandas/tests/frame/methods/test_convert_dtypes.py +++ b/pandas/tests/frame/methods/test_convert_dtypes.py @@ -59,7 +59,7 @@ def test_convert_dtypes_retain_column_names(self): tm.assert_index_equal(result.columns, df.columns) assert result.columns.name == "cols" - def test_pyarrow_dtype_backend(self): + def test_pyarrow_dtype_backend(self, using_nan_is_na): pa = pytest.importorskip("pyarrow") df = pd.DataFrame( { @@ -73,6 +73,8 @@ def test_pyarrow_dtype_backend(self): } ) result = df.convert_dtypes(dtype_backend="pyarrow") + + item = None if using_nan_is_na else np.nan expected = pd.DataFrame( { "a": pd.arrays.ArrowExtensionArray( @@ -80,7 +82,7 @@ def test_pyarrow_dtype_backend(self): ), "b": pd.arrays.ArrowExtensionArray(pa.array(["x", "y", None])), "c": pd.arrays.ArrowExtensionArray(pa.array([True, False, None])), - "d": pd.arrays.ArrowExtensionArray(pa.array([None, 100.5, 200.0])), + "d": pd.arrays.ArrowExtensionArray(pa.array([item, 100.5, 200.0])), "e": pd.arrays.ArrowExtensionArray( pa.array( [ diff --git a/pandas/tests/groupby/methods/test_kurt.py b/pandas/tests/groupby/methods/test_kurt.py index 21b7c50c3c5aa..7aac23c2147fb 100644 --- a/pandas/tests/groupby/methods/test_kurt.py +++ b/pandas/tests/groupby/methods/test_kurt.py @@ -43,7 +43,7 @@ def test_groupby_kurt_arrow_float64(dtype): # Test groupby.kurt() with float64[pyarrow] and Float64 dtypes df = pd.DataFrame( { - "x": [1.0, np.nan, 3.2, 4.8, 2.3, 1.9, 8.9], + "x": [1.0, pd.NA, 3.2, 4.8, 2.3, 1.9, 8.9], "y": [1.6, 3.3, 3.2, 6.8, 1.3, 2.9, 9.0], }, dtype=dtype, diff --git a/pandas/tests/groupby/test_reductions.py b/pandas/tests/groupby/test_reductions.py index 014558bbf4bba..08cf1047f316c 100644 --- a/pandas/tests/groupby/test_reductions.py +++ b/pandas/tests/groupby/test_reductions.py @@ -381,8 +381,10 @@ def test_first_last_skipna(any_real_nullable_dtype, sort, skipna, how): df = DataFrame( { "a": [2, 1, 1, 2, 3, 3], - "b": [na_value, 3.0, na_value, 4.0, np.nan, np.nan], - "c": [na_value, 3.0, na_value, 4.0, np.nan, np.nan], + # TODO: test that has mixed na_value and NaN either working for + # float or raising for int? + "b": [na_value, 3.0, na_value, 4.0, na_value, na_value], + "c": [na_value, 3.0, na_value, 4.0, na_value, na_value], }, dtype=any_real_nullable_dtype, ) diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 90fda2c10962b..3ebf4416f7289 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -2056,9 +2056,10 @@ def test_writer_118_exceptions(self, temp_file): ["numpy_nullable", pytest.param("pyarrow", marks=td.skip_if_no("pyarrow"))], ) def test_read_write_ea_dtypes(self, dtype_backend, temp_file, tmp_path): + dtype = "Int64" if dtype_backend == "numpy_nullable" else "int64[pyarrow]" df = DataFrame( { - "a": [1, 2, None], + "a": pd.array([1, 2, None], dtype=dtype), "b": ["a", "b", "c"], "c": [True, False, None], "d": [1.5, 2.5, 3.5], diff --git a/pandas/tests/series/methods/test_rank.py b/pandas/tests/series/methods/test_rank.py index 7c6a7893ba3a0..580f747817dea 100644 --- a/pandas/tests/series/methods/test_rank.py +++ b/pandas/tests/series/methods/test_rank.py @@ -269,14 +269,35 @@ def test_rank_signature(self): with pytest.raises(ValueError, match=msg): s.rank("average") - def test_rank_tie_methods(self, ser, results, dtype, using_infer_string): + def test_rank_tie_methods( + self, ser, results, dtype, using_infer_string, using_nan_is_na + ): method, exp = results - if dtype == "int64" or (not using_infer_string and dtype == "str"): + if ( + dtype == "int64" + or (dtype in ["int64[pyarrow]", "uint64[pyarrow]"] and not using_nan_is_na) + or (not using_infer_string and dtype == "str") + ): pytest.skip("int64/str does not support NaN") ser = ser if dtype is None else ser.astype(dtype) result = ser.rank(method=method) - tm.assert_series_equal(result, Series(exp, dtype=expected_dtype(dtype, method))) + if dtype == "float64[pyarrow]" and not using_nan_is_na: + # the NaNs are not treated as NA + exp = exp.copy() + if method == "average": + exp[np.isnan(ser)] = 9.5 + elif method == "dense": + exp[np.isnan(ser)] = 6 + elif method == "max": + exp[np.isnan(ser)] = 10 + elif method == "min": + exp[np.isnan(ser)] = 9 + elif method == "first": + exp[np.isnan(ser)] = [9, 10] + + expected = Series(exp, dtype=expected_dtype(dtype, method)) + tm.assert_series_equal(result, expected) @pytest.mark.parametrize("na_option", ["top", "bottom", "keep"]) @pytest.mark.parametrize( @@ -321,6 +342,8 @@ def test_rank_tie_methods_on_infs_nans( order = [ranks[1], ranks[0], ranks[2]] elif na_option == "bottom": order = [ranks[0], ranks[2], ranks[1]] + elif dtype == "float64[pyarrow]": + order = [ranks[0], [NA] * chunk, ranks[1]] else: order = [ranks[0], [np.nan] * chunk, ranks[1]] expected = order if ascending else order[::-1] @@ -384,10 +407,16 @@ def test_rank_dense_method(self, dtype, ser, exp): expected = Series(exp).astype(expected_dtype(dtype, "dense")) tm.assert_series_equal(result, expected) - def test_rank_descending(self, ser, results, dtype, using_infer_string): + def test_rank_descending( + self, ser, results, dtype, using_infer_string, using_nan_is_na + ): method, _ = results - if dtype == "int64" or (not using_infer_string and dtype == "str"): - s = ser.dropna() + if ( + dtype == "int64" + or (dtype in ["int64[pyarrow]"] and not using_nan_is_na) + or (not using_infer_string and dtype == "str") + ): + s = ser.dropna().astype(dtype) else: s = ser.astype(dtype) diff --git a/pandas/tests/series/test_npfuncs.py b/pandas/tests/series/test_npfuncs.py index 11a51c4700d5c..b72ac8efbaa6d 100644 --- a/pandas/tests/series/test_npfuncs.py +++ b/pandas/tests/series/test_npfuncs.py @@ -38,9 +38,15 @@ def test_numpy_argwhere(index): @td.skip_if_no("pyarrow") -def test_log_arrow_backed_missing_value(): +def test_log_arrow_backed_missing_value(using_nan_is_na): # GH#56285 ser = Series([1, 2, None], dtype="float64[pyarrow]") - result = np.log(ser) - expected = np.log(Series([1, 2, None], dtype="float64")) - tm.assert_series_equal(result, expected) + if using_nan_is_na: + result = np.log(ser) + expected = np.log(Series([1, 2, None], dtype="float64")) + tm.assert_series_equal(result, expected) + else: + # we get cast to object which raises + msg = "loop of ufunc does not support argument" + with pytest.raises(TypeError, match=msg): + np.log(ser) diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py index 893f526fb3eb0..e3471c2e3ac0d 100644 --- a/pandas/tests/tools/test_to_numeric.py +++ b/pandas/tests/tools/test_to_numeric.py @@ -898,7 +898,7 @@ def test_to_numeric_dtype_backend_error(dtype_backend): dtype = "double[pyarrow]" else: dtype = "Float64" - expected = Series([np.nan, np.nan, np.nan], dtype=dtype) + expected = Series([pd.NA, pd.NA, pd.NA], dtype=dtype) tm.assert_series_equal(result, expected)