Skip to content

Commit c263888

Browse files
authored
update phi::errors to common::errors [fluid_ops] (#76638)
1 parent 7235a00 commit c263888

File tree

18 files changed

+50
-50
lines changed

18 files changed

+50
-50
lines changed

paddle/fluid/eager/activation_offloader.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -103,10 +103,10 @@ void ActivationOffloaderWithPlace::SetSkipTensors(
103103
for (auto &t : tensors) {
104104
auto dense_tensor = GetDenseTensorImpl(t);
105105
if (dense_tensor != nullptr && dense_tensor->place() == place_) {
106-
PADDLE_ENFORCE_EQ(
107-
dense_tensor->meta().is_contiguous(),
108-
true,
109-
phi::errors::InvalidArgument("Only contiguous tensor is supported."));
106+
PADDLE_ENFORCE_EQ(dense_tensor->meta().is_contiguous(),
107+
true,
108+
common::errors::InvalidArgument(
109+
"Only contiguous tensor is supported."));
110110
VLOG(10) << "SetSkip " << GetTensorMetaString(dense_tensor);
111111
skip_tensors_.insert(std::move(dense_tensor));
112112
}
@@ -161,7 +161,7 @@ size_t ActivationOffloaderWithPlace::Offload(size_t size) {
161161
PADDLE_ENFORCE_GE(
162162
cnt,
163163
1,
164-
phi::errors::InvalidArgument("Invalid reference count %d", cnt));
164+
common::errors::InvalidArgument("Invalid reference count %d", cnt));
165165
if (ref_cnt > cnt) {
166166
VLOG(7) << "Cannot offload tensor because its reference is not unique: "
167167
<< GetTensorMetaString(dense_tensor)

paddle/fluid/pybind/args_mapper.cc

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -86,11 +86,11 @@ void ArgMaxMinMapper(PyObject* args,
8686
raise ValueError(
8787
"the value of 'dtype' in argmax could not be None, but received None")
8888
*/
89-
PADDLE_ENFORCE_NE(
90-
dtype_obj,
91-
Py_None,
92-
phi::errors::InvalidArgument("the value of 'dtype' in argmax and argmin "
93-
"could not be None, but received None"));
89+
PADDLE_ENFORCE_NE(dtype_obj,
90+
Py_None,
91+
common::errors::InvalidArgument(
92+
"the value of 'dtype' in argmax and argmin "
93+
"could not be None, but received None"));
9494
*dtype = CastPyArg2DataType(dtype_obj, "argmax", 3, phi::DataType::INT64);
9595
// Check Remaining Params validity if needed
9696
CheckRemainingParamsValidity(args, kwargs, remaining_kwargs, nargs);
@@ -145,11 +145,11 @@ void ArgMaxMinMapper(PyObject* args,
145145
}
146146
*keepdims = CastPyArg2Boolean(keepdims_obj, "argmax", 2, false);
147147

148-
PADDLE_ENFORCE_NE(
149-
dtype_obj,
150-
Py_None,
151-
phi::errors::InvalidArgument("the value of 'dtype' in argmax and argmin "
152-
"could not be None, but received None"));
148+
PADDLE_ENFORCE_NE(dtype_obj,
149+
Py_None,
150+
common::errors::InvalidArgument(
151+
"the value of 'dtype' in argmax and argmin "
152+
"could not be None, but received None"));
153153
*dtype = CastPyArg2DataType(dtype_obj, "argmax", 3, phi::DataType::INT64);
154154

155155
// Check Remaining Params validity if needed
@@ -314,7 +314,7 @@ void GeluMapper(PyObject* args,
314314
approximate = nullptr;
315315
PADDLE_ENFORCE_NE(approximate,
316316
nullptr,
317-
phi::errors::InvalidArgument(
317+
common::errors::InvalidArgument(
318318
"the value of approximate in gelu should be 'tanh' "
319319
"or 'none', but received %s",
320320
approximate_str.c_str()));
@@ -357,7 +357,7 @@ void GeluMapper(PyObject* args,
357357
approximate = nullptr;
358358
PADDLE_ENFORCE_NE(approximate,
359359
nullptr,
360-
phi::errors::InvalidArgument(
360+
common::errors::InvalidArgument(
361361
"the value of approximate in gelu should be 'tanh' "
362362
"or 'none', but received %s",
363363
approximate_str.c_str()));

paddle/fluid/pybind/eager_py_layer.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ static void PyLayerAddOffloadActivation(PyLayerObject* ctx,
169169
const std::string& name) {
170170
PADDLE_ENFORCE_NOT_NULL(
171171
ctx,
172-
phi::errors::InvalidArgument("PyLayerObject should not be nullptr."));
172+
common::errors::InvalidArgument("PyLayerObject should not be nullptr."));
173173
if (ctx->container_be_packed) {
174174
VLOG(10) << "Return directly because of packed value";
175175
return;
@@ -644,7 +644,7 @@ PyObject* pylayer_method_apply(PyObject* cls,
644644
auto grad_node = ctx->grad_node.lock();
645645
PADDLE_ENFORCE_NOT_NULL(
646646
grad_node,
647-
phi::errors::InvalidArgument("%s : Cannot be null", classname));
647+
common::errors::InvalidArgument("%s : Cannot be null", classname));
648648
PyLayerAddOffloadActivation(ctx, grad_node->name());
649649
}
650650
#endif

paddle/phi/api/include/compat/ATen/core/TensorBase.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -152,11 +152,11 @@ class PADDLE_API TensorBase {
152152
bool copy = false,
153153
std::optional<at::MemoryFormat> memory_format = std::nullopt) const {
154154
if (options.device_opt().has_value()) {
155-
PADDLE_THROW(phi::errors::Unimplemented(
155+
PADDLE_THROW(common::errors::Unimplemented(
156156
"The `to` method with device option is not supported yet."));
157157
}
158158
if (memory_format.has_value()) {
159-
PADDLE_THROW(phi::errors::Unimplemented(
159+
PADDLE_THROW(common::errors::Unimplemented(
160160
"The `to` method with memory_format option is not supported yet."));
161161
}
162162
return paddle::experimental::cast(

paddle/phi/core/dense_tensor.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -89,30 +89,30 @@ class PADDLE_API DenseTensor : public TensorBase,
8989
/// Supports negative indices, which count from the last dimension.
9090
/// \param dim The dimension index to retrieve. Must be in the range [0, ndim)
9191
/// or [-ndim, -1]. \return The size of the tensor along the given dimension.
92-
/// \throws phi::errors::OutOfRange if the tensor is empty or the index is out
93-
/// of range.
92+
/// \throws common::errors::OutOfRange if the tensor is empty or the index is
93+
/// out of range.
9494
const int64_t dims(int dim) const {
9595
int ndim = meta_.dims.size();
9696

9797
// Ensure the tensor has at least one dimension
9898
PADDLE_ENFORCE_GE(ndim,
9999
1,
100-
phi::errors::OutOfRange(
100+
common::errors::OutOfRange(
101101
"dims expects at least a 1-dimensional tensor"));
102102

103103
// Check if the index is within the valid range [-ndim, ndim)
104104
PADDLE_ENFORCE_GE(
105105
dim,
106106
-ndim,
107-
phi::errors::OutOfRange(
107+
common::errors::OutOfRange(
108108
"dims: dimension index (%d) must be in range [-%d, %d)",
109109
dim,
110110
ndim,
111111
ndim));
112112
PADDLE_ENFORCE_LT(
113113
dim,
114114
ndim,
115-
phi::errors::OutOfRange(
115+
common::errors::OutOfRange(
116116
"dims: dimension index (%d) must be in range [-%d, %d)",
117117
dim,
118118
ndim,

paddle/phi/core/platform/device/xpu/xpu_resource_pool.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ XpuEventResourcePool::XpuEventResourcePool() {
7474
if (xpu_event_query(event) == XPU_SUCCESS) {
7575
xpu_event_destroy(event);
7676
} else {
77-
PADDLE_THROW(phi::errors::InvalidArgument(
77+
PADDLE_THROW(common::errors::InvalidArgument(
7878
"event not finished, can not destroy"));
7979
}
8080
};

paddle/phi/kernels/cpu/random_kernel.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ void RandomKernel(const Context& dev_ctx,
4141

4242
PADDLE_ENFORCE_LT(from,
4343
to,
44-
phi::errors::InvalidArgument(
44+
common::errors::InvalidArgument(
4545
"random expects 'from' casted to dtype to be less "
4646
"than 'to' casted to dtype, but got from=%d >= to=%d",
4747
from,

paddle/phi/kernels/cpu/range_kernel.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ void RangeKernel(const Context& dev_ctx,
6666
T step_value = step.to<T>();
6767
if constexpr (std::is_floating_point_v<T>) {
6868
if (std::isnan(end_value)) {
69-
PADDLE_THROW(phi::errors::InvalidArgument(
69+
PADDLE_THROW(common::errors::InvalidArgument(
7070
"The end value of range cannot be NaN. Please check your input."));
7171
}
7272
}

paddle/phi/kernels/funcs/radix_sort.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ void RadixSortPairsImpl(const phi::GPUContext& dev_ctx,
5454
PADDLE_ENFORCE_LE(
5555
n,
5656
std::numeric_limits<int>::max(),
57-
phi::errors::InvalidArgument(
57+
common::errors::InvalidArgument(
5858
"CUB sort does not support sorting more than INT_MAX elements"));
5959

6060
using key_t_ = typename CudaType<key_t>::type;

paddle/phi/kernels/funcs/radix_sort.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,19 +53,19 @@ void RadixSortPairs(const phi::GPUContext& dev_ctx,
5353
PADDLE_ENFORCE_EQ(
5454
std::is_trivially_copyable<value_t>::value,
5555
true,
56-
phi::errors::InvalidArgument(
56+
common::errors::InvalidArgument(
5757
"RadixSortPairs value type must be trivially copyable"));
5858

5959
using opaque_t = OpaqueTypeRadix<sizeof(value_t)>;
6060
PADDLE_ENFORCE_EQ(
6161
sizeof(value_t) <= 8 && (sizeof(value_t) & (sizeof(value_t) - 1)) == 0,
6262
true,
63-
phi::errors::InvalidArgument(
63+
common::errors::InvalidArgument(
6464
"Unsupported value_t size (must be 1, 2, 4, or 8 bytes)"));
6565
PADDLE_ENFORCE_EQ(
6666
sizeof(value_t),
6767
alignof(value_t),
68-
phi::errors::InvalidArgument("Expected value_t to be size-aligned"));
68+
common::errors::InvalidArgument("Expected value_t to be size-aligned"));
6969

7070
RadixSortPairsImpl<key_t, sizeof(value_t)>(
7171
dev_ctx,

0 commit comments

Comments
 (0)