diff --git a/tensorflow_text/core/kernels/constrained_sequence_kernel.cc b/tensorflow_text/core/kernels/constrained_sequence_kernel.cc index ceb02fa09..160af2494 100644 --- a/tensorflow_text/core/kernels/constrained_sequence_kernel.cc +++ b/tensorflow_text/core/kernels/constrained_sequence_kernel.cc @@ -62,8 +62,7 @@ absl::Status ValidateConstraintTensor(const Tensor &tensor, const bool use_start_end_states, const string &name) { if (tensor.shape().dims() != 2) { - return InvalidArgument( - tensorflow::strings::StrCat(name, " must be of rank 2")); + return InvalidArgument(absl::StrCat(name, " must be of rank 2")); } int expected_size = use_start_end_states ? num_states + 1 : num_states; if (tensor.shape().dim_size(0) != expected_size) { @@ -110,7 +109,7 @@ class ConstrainedSequence : public OpKernel { const int num_scores = scores.num_scores(); OP_REQUIRES(context, lengths_tensor.NumElements() == batch_size, - InvalidArgument(tensorflow::strings::StrCat( + InvalidArgument(absl::StrCat( "There should be exactly one length for every batch " "element. Found ", lengths_tensor.NumElements(), diff --git a/tensorflow_text/core/kernels/split_merge_tokenize_kernel.cc b/tensorflow_text/core/kernels/split_merge_tokenize_kernel.cc index 6a5372dd0..0ee4b1e28 100644 --- a/tensorflow_text/core/kernels/split_merge_tokenize_kernel.cc +++ b/tensorflow_text/core/kernels/split_merge_tokenize_kernel.cc @@ -65,24 +65,24 @@ bool IsBreakChar(absl::string_view text) { return u_isUWhiteSpace(c); } -Status TokenizeByLabel(const absl::string_view& text, - const Tensor& labels_tensor, - bool force_split_at_break_character, - std::vector* tokens, - std::vector* begin_offset, - std::vector* end_offset, int* num_tokens) { +absl::Status TokenizeByLabel(const absl::string_view& text, + const Tensor& labels_tensor, + bool force_split_at_break_character, + std::vector* tokens, + std::vector* begin_offset, + std::vector* end_offset, int* num_tokens) { std::vector chars; if (!GetUTF8Chars(text, &chars)) { - return Status(static_cast<::absl::StatusCode>( - absl::StatusCode::kInvalidArgument), - absl::StrCat("Input string is not utf8 valid: ", text)); + return absl::Status( + static_cast<::absl::StatusCode>(absl::StatusCode::kInvalidArgument), + absl::StrCat("Input string is not utf8 valid: ", text)); } if (chars.size() > labels_tensor.dim_size(0)) { - return Status(static_cast<::absl::StatusCode>( - absl::StatusCode::kInvalidArgument), - absl::StrCat("Number of labels ", labels_tensor.dim_size(0), - " is insufficient for text ", text)); + return absl::Status( + static_cast<::absl::StatusCode>(absl::StatusCode::kInvalidArgument), + absl::StrCat("Number of labels ", labels_tensor.dim_size(0), + " is insufficient for text ", text)); } const int split_label = 0; diff --git a/tensorflow_text/core/kernels/tokenizer_from_logits_kernel.cc b/tensorflow_text/core/kernels/tokenizer_from_logits_kernel.cc index 49b4ff840..c58aa51fd 100644 --- a/tensorflow_text/core/kernels/tokenizer_from_logits_kernel.cc +++ b/tensorflow_text/core/kernels/tokenizer_from_logits_kernel.cc @@ -68,22 +68,22 @@ bool IsBreakChar(absl::string_view text) { // allows us to retrieve the corresponding data from logits. I.e., the logits // for the i-th character from text are logits(batch_index, i, 0) (for the // "split" action) and logits(batch_index, i, 1) (for the "merge" action). -Status TokenizeByLogits(const absl::string_view& text, - const TTypes::Tensor& logits, - int batch_index, - bool force_split_at_break_character, - std::vector* tokens, - std::vector* begin_offset, - std::vector* end_offset, int* num_tokens) { +absl::Status TokenizeByLogits(const absl::string_view& text, + const TTypes::Tensor& logits, + int batch_index, + bool force_split_at_break_character, + std::vector* tokens, + std::vector* begin_offset, + std::vector* end_offset, int* num_tokens) { std::vector chars; if (!GetUTF8Chars(text, &chars)) { - return Status( + return absl::Status( static_cast(absl::StatusCode::kInvalidArgument), absl::StrCat("Input string is not utf8 valid: ", text)); } if (chars.size() > logits.dimension(1)) { - return Status( + return absl::Status( static_cast(absl::StatusCode::kInvalidArgument), absl::StrCat("Number of logits, ", logits.dimension(1), ", is insufficient for text \"", text, "\"")); diff --git a/tensorflow_text/core/kernels/wordpiece_kernel.cc b/tensorflow_text/core/kernels/wordpiece_kernel.cc index b86ad0f54..634675e1c 100644 --- a/tensorflow_text/core/kernels/wordpiece_kernel.cc +++ b/tensorflow_text/core/kernels/wordpiece_kernel.cc @@ -82,8 +82,8 @@ bool GetSplitUnknownCharacters(OpKernelConstruction* ctx) { return split_unknown_characters; } -Status GetTableHandle(const string& input_name, OpKernelContext* ctx, - string* container, string* table_handle) { +absl::Status GetTableHandle(const string& input_name, OpKernelContext* ctx, + string* container, string* table_handle) { { mutex* mu; TF_RETURN_IF_ERROR(ctx->input_ref_mutex(input_name, &mu)); @@ -105,8 +105,8 @@ Status GetTableHandle(const string& input_name, OpKernelContext* ctx, // Gets the LookupTable stored in the ctx->resource_manager() with key // passed by attribute with name input_name, returns null if the table // doesn't exist. -Status GetLookupTable(const string& input_name, OpKernelContext* ctx, - lookup::LookupInterface** table) { +absl::Status GetLookupTable(const string& input_name, OpKernelContext* ctx, + lookup::LookupInterface** table) { string container; string table_handle; DataType handle_dtype; @@ -135,7 +135,7 @@ class LookupTableVocab : public WordpieceVocab { Tensor default_value_; }; -Status ToStatus(const LookupStatus& status) { +absl::Status ToStatus(const LookupStatus& status) { if (status.success) { return absl::OkStatus(); }