!46788 solve the bug of compile error sparsereorder_bp error and doc error

Merge pull request !46788 from zong_shuai/compile_bp_doc
This commit is contained in:
i-robot 2022-12-15 09:45:06 +00:00 committed by Gitee
commit 9ef3ac407e
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
4 changed files with 8 additions and 11 deletions

View File

@ -15,7 +15,6 @@
*/
#include <stdlib.h>
#include <math.h>
#include "cdist_grad_impl.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
@ -44,7 +43,6 @@ __global__ void CdistGradOne(T *grad, T *dist, T *t1, T *t2, T *res, double p, i
return;
}
const T grad_k = grad[current];
const T dist_k = dist[current];
const int current_l = current / r_size;
const int current_k = current % r_size;
@ -88,9 +86,10 @@ __global__ void CdistGradLessthanTwo(T *grad, T *dist, T *t1, T *t2, T *res, dou
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
double dist_k_pow = pow(static_cast<double>(dist_k), p - 1);
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
const T diff = *self_m - *self_n;
T res = (sign(diff) * pow(abs(diff), p - 1) * (grad_k) / pow(dist_k, p - 1));
T res = static_cast<T>(sign(diff) * pow(static_cast<double>(abs(diff)), p - 1) * grad_k / dist_k_pow);
MsAtomicAdd(res_m, res);
}
}
@ -152,10 +151,10 @@ __global__ void CdistGradP(T *grad, T *dist, T *t1, T *t2, T *res, double p, int
const T * self_m = start + current_i;
const T * self_n = t2 + current_l * x2_size + n * col + current_i;
T * res_m = res + current_l * x1_size + m * col + current_i;
T dist_k_pow = pow(dist_k, p - 1);
double dist_k_pow = pow(static_cast<double>(dist_k), p - 1);
for (; self_m < end; self_m += stride, self_n += stride, res_m += stride) {
const T diff = *self_m - *self_n;
T res_num = diff * pow(abs(diff), p - 2) * grad_k / pow(dist_k, p - 1);
T res_num = static_cast<T>(diff * pow(static_cast<double>(abs(diff)), p - 2) * grad_k / dist_k_pow);
MsAtomicAdd(res_m, res_num);
}
}

View File

@ -16,7 +16,6 @@
#include "cdist_impl.cuh"
#include <float.h>
#include <math.h>
static const int forward_threads = 256;
@ -134,7 +133,7 @@ __global__ void CdistP(T *x1, T *x2, T *result, double p, const int64_t r2, cons
const T *b = x2 + l * l2_size + j * m + threadIdx.x;
T res = 0.0;
for (; a < end; a += stride, b += stride) {
res += pow(abs(*a - *b), p);
res += static_cast<T>(pow(static_cast<double>(abs(*a - *b)), p));
}
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
@ -157,7 +156,7 @@ __global__ void CdistP(T *x1, T *x2, T *result, double p, const int64_t r2, cons
}
if (threadIdx.x == 0) {
result[blockIdx.x] = pow(res, 1.0 / p);
result[blockIdx.x] = static_cast<T>(pow(static_cast<double>(res), 1.0 / p));
}
return;
}

View File

@ -380,7 +380,7 @@ def get_bprop_sparse_reorder(self):
output = sparse_reorder_op(indices, entry_indices, shape)
inverted_permutation = F.sort(output[1].astype(mstype.float32))[1]
axis = 0
return None, gather_op(dout[1], inverted_permutation, axis), None
return zeros_like(indices), gather_op(dout[1], inverted_permutation, axis), zeros_like(shape)
return bprop

View File

@ -9791,8 +9791,7 @@ class SparseApplyAdagradDA(Primitive):
TypeError: If `grad` is not a Tensor.
TypeError: If `lr`, `l1`, `l2` or `global_step` is neither a Number nor a Tensor.
TypeError: If use_locking is not a bool.
TypeError: If dtype of `var`, `grad_accum`, `grad_square_accum`, `grad_accum`,
`lr`, `l1`, `l2` is neither float16 nor float32.
TypeError: If dtype of `var`, `grad_accum`, `grad_square_accum`, `grad_accum` is not the same.
TypeError: If dtype of `grad_accum`, `grad_square_accum`, `grad_accum`
is not same as `var`.
TypeError: If dtype of `indices` is neither int32 nor int64.