diff --git a/dipu/SupportedDiopiFunctions.txt b/dipu/SupportedDiopiFunctions.txt index 547e75955..0f6a3b0ff 100644 --- a/dipu/SupportedDiopiFunctions.txt +++ b/dipu/SupportedDiopiFunctions.txt @@ -101,6 +101,7 @@ diopiForeachmulInpTensor diopiForeachmulScalar diopiForeachmulTensor diopiForeachnormScalar +diopiFusedAdamW diopiGather diopiGe diopiGeInp diff --git a/dipu/scripts/autogen_diopi_wrapper/diopi_functions.yaml b/dipu/scripts/autogen_diopi_wrapper/diopi_functions.yaml index 2759f7fb6..8e3b03ff0 100755 --- a/dipu/scripts/autogen_diopi_wrapper/diopi_functions.yaml +++ b/dipu/scripts/autogen_diopi_wrapper/diopi_functions.yaml @@ -1325,6 +1325,42 @@ ::diopiConstTensorHandle_t self_dtype_diopi = dipu::diopi_helper::toDiopiTensorHandle(self_dtype); interface: diopiProd(ctx, out, self_dtype_diopi, nullptr) +- schema: "_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()" + custom_code_at_the_beginning: | + std::vector diopiTensorHandles_self(self.size()); + for(size_t i=0; i < self.size(); ++i){ + diopiConstTensorHandle_t const_handle = dipu::diopi_helper::toDiopiTensorHandle(self.at(i)); + diopiTensorHandle_t handle = const_cast(const_handle); + diopiTensorHandles_self[i] = handle; + } + std::vector diopiTensorHandles_grads(grads.size()); + for(size_t i=0; i < grads.size(); ++i){ + diopiTensorHandles_grads[i] = dipu::diopi_helper::toDiopiTensorHandle(grads.at(i)); + } + std::vector diopiTensorHandles_exp_avgs(exp_avgs.size()); + for(size_t i=0; i < exp_avgs.size(); ++i){ + diopiConstTensorHandle_t const_handle = dipu::diopi_helper::toDiopiTensorHandle(exp_avgs.at(i)); + diopiTensorHandle_t handle = const_cast(const_handle); + diopiTensorHandles_exp_avgs[i] = handle; + } + std::vector diopiTensorHandles_exp_avg_sqs(exp_avg_sqs.size()); + for(size_t i=0; i < exp_avg_sqs.size(); ++i){ + diopiConstTensorHandle_t const_handle = dipu::diopi_helper::toDiopiTensorHandle(exp_avg_sqs.at(i)); + diopiTensorHandle_t handle = const_cast(const_handle); + diopiTensorHandles_exp_avg_sqs[i] = handle; + } + std::vector diopiTensorHandles_max_exp_avg_sqs(max_exp_avg_sqs.size()); + for(size_t i=0; i < max_exp_avg_sqs.size(); ++i){ + diopiConstTensorHandle_t const_handle = dipu::diopi_helper::toDiopiTensorHandle(max_exp_avg_sqs.at(i)); + diopiTensorHandle_t handle = const_cast(const_handle); + diopiTensorHandles_max_exp_avg_sqs[i] = handle; + } + std::vector diopiTensorHandles_state_steps(state_steps.size(), nullptr); + for(size_t i=0; i < state_steps.size(); ++i){ + diopiTensorHandles_state_steps[i] = dipu::diopi_helper::toDiopiTensorHandle(state_steps.at(i)); + } + interface: diopiFusedAdamW(ctx, diopiTensorHandles_self.data(), diopiTensorHandles_grads.data(), diopiTensorHandles_exp_avgs.data(), diopiTensorHandles_exp_avg_sqs.data(), diopiTensorHandles_max_exp_avg_sqs.data(), diopiTensorHandles_state_steps.data(), static_cast(self.size()), lr, beta1, beta2, eps, weight_decay, amsgrad, maximize) + - schema: prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) custom_code_at_the_beginning: | const auto self_dtype = at::native::to(self, dtype); diff --git a/dipu/third_party/DIOPI b/dipu/third_party/DIOPI index 65930a539..02f03c6ab 160000 --- a/dipu/third_party/DIOPI +++ b/dipu/third_party/DIOPI @@ -1 +1 @@ -Subproject commit 65930a539938b692a84ba77027e91686b3d2516d +Subproject commit 02f03c6abb20aa39d1d978436a53a2e4ec242d65