!27949 fix mac compile and fix st
Merge pull request !27949 from xulei/fix_mac
This commit is contained in:
commit
d12b05f1f8
|
@ -1202,7 +1202,7 @@ template <typename T>
|
|||
Status TensorRowReplace(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, int row) {
|
||||
RETURN_UNEXPECTED_IF_NULL(output);
|
||||
auto iter_in = input->begin<T>();
|
||||
auto iter_out = (*output)->begin<T>() + (*output)->shape()[-1] * row;
|
||||
auto iter_out = (*output)->begin<T>() + static_cast<ptrdiff_t>((*output)->shape()[-1] * row);
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(iter_out <= (*output)->end<T>(), "TensorRowReplace: pointer out of bounds");
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(input->Size() <= (*output)->shape()[-1], "TensorRowReplace: pointer out of bounds");
|
||||
for (; iter_in != input->end<T>(); ++iter_in, ++iter_out) {
|
||||
|
@ -1215,7 +1215,7 @@ template <typename T>
|
|||
Status TensorRowAt(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, int rank_index) {
|
||||
RETURN_UNEXPECTED_IF_NULL(output);
|
||||
RETURN_IF_NOT_OK(Tensor::CreateEmpty(TensorShape({input->shape()[-1]}), input->type(), output));
|
||||
auto iter_in = input->begin<T>() + input->shape()[-1] * rank_index;
|
||||
auto iter_in = input->begin<T>() + static_cast<ptrdiff_t>(input->shape()[-1] * rank_index);
|
||||
auto iter_out = (*output)->begin<T>();
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(iter_in <= input->end<T>(), "TensorRowAt: pointer out of bounds");
|
||||
for (; iter_out != (*output)->end<T>(); ++iter_in, ++iter_out) {
|
||||
|
|
|
@ -59,7 +59,7 @@ def test_time_distributed_maxpool2d():
|
|||
time_distributed = TestTimeDistributed(pool, time_axis=1, reshape_with_axis=0)
|
||||
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
|
||||
for i in range(output.shape[1]):
|
||||
assert np.all(output[:, i, :] == output_expect)
|
||||
assert np.all(np.abs(output[:, i, :] - output_expect) < 1e-5)
|
||||
print("MaxPooling2D layer wrapped successful")
|
||||
|
||||
|
||||
|
@ -74,7 +74,7 @@ def test_time_distributed_dense():
|
|||
time_distributed = TestTimeDistributed(dense, time_axis=1, reshape_with_axis=0)
|
||||
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
|
||||
for i in range(output.shape[1]):
|
||||
assert np.all(output[:, i, :] == output_expect)
|
||||
assert np.all(np.abs(output[:, i, :] - output_expect) < 1e-5)
|
||||
print("Dense layer wrapped successful")
|
||||
|
||||
|
||||
|
@ -90,7 +90,7 @@ def test_time_distributed_dense_pynative():
|
|||
time_distributed = TestTimeDistributed(dense, time_axis=1, reshape_with_axis=0)
|
||||
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
|
||||
for i in range(output.shape[1]):
|
||||
assert np.all(output[:, i, :] == output_expect)
|
||||
assert np.all(np.abs(output[:, i, :] - output_expect) < 1e-5)
|
||||
print("Dense layer with pynative mode wrapped successful")
|
||||
|
||||
|
||||
|
@ -105,7 +105,7 @@ def test_time_distributed_dense_with_reshape_axis_not_first():
|
|||
time_distributed = TestTimeDistributed(dense, time_axis=0, reshape_with_axis=1)
|
||||
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
|
||||
for i in range(output.shape[0]):
|
||||
assert np.all(output[i, :] == output_expect)
|
||||
assert np.all(np.abs(output[i, :] - output_expect) < 1e-5)
|
||||
print("Dense layer wrapped successful")
|
||||
|
||||
|
||||
|
@ -120,7 +120,7 @@ def test_time_distributed_argmax():
|
|||
time_distributed = TestTimeDistributed(argmax, time_axis=1, reshape_with_axis=0)
|
||||
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
|
||||
for i in range(output.shape[1]):
|
||||
assert np.all(output[:, i] == output_expect)
|
||||
assert np.all(np.abs(output[:, i] - output_expect) < 1e-5)
|
||||
print("Argmax op wrapped successful")
|
||||
|
||||
|
||||
|
@ -135,7 +135,7 @@ def test_time_distributed_flatten():
|
|||
time_distributed = TestTimeDistributed(flatten, time_axis=1, reshape_with_axis=0)
|
||||
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
|
||||
for i in range(output.shape[1]):
|
||||
assert np.all(output[:, i, :] == output_expect)
|
||||
assert np.all(np.abs(output[:, i, :] - output_expect) < 1e-5)
|
||||
print("Flatten op wrapped successful")
|
||||
|
||||
|
||||
|
@ -150,7 +150,7 @@ def test_time_distributed_conv2d_no_reshape_axis():
|
|||
time_distributed = TestTimeDistributed(conv2d, time_axis=1)
|
||||
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
|
||||
for i in range(output.shape[1]):
|
||||
assert np.all(output[:, i, :] == output_expect)
|
||||
assert np.all(np.abs(output[:, i, :] - output_expect) < 1e-5)
|
||||
print("Conv2D layer with no reshape axis wrapped successful")
|
||||
|
||||
|
||||
|
@ -165,7 +165,7 @@ def test_time_distributed_maxpool2d_no_reshape_axis():
|
|||
time_distributed = TestTimeDistributed(pool, time_axis=1)
|
||||
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
|
||||
for i in range(output.shape[1]):
|
||||
assert np.all(output[:, i, :] == output_expect)
|
||||
assert np.all(np.abs(output[:, i, :] - output_expect) < 1e-5)
|
||||
print("MaxPooling2D layer with no reshape axis wrapped successful")
|
||||
|
||||
|
||||
|
@ -180,7 +180,7 @@ def test_time_distributed_dense_no_reshape_axis():
|
|||
time_distributed = TestTimeDistributed(dense, time_axis=1)
|
||||
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
|
||||
for i in range(output.shape[1]):
|
||||
assert np.all(output[:, i, :] == output_expect)
|
||||
assert np.all(np.abs(output[:, i, :] - output_expect) < 1e-5)
|
||||
print("Dense layer with no reshape axis wrapped successful")
|
||||
|
||||
|
||||
|
@ -195,7 +195,7 @@ def test_time_distributed_argmax_no_reshape_axis():
|
|||
time_distributed = TestTimeDistributed(argmax, time_axis=1)
|
||||
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
|
||||
for i in range(output.shape[1]):
|
||||
assert np.all(output[:, i] == output_expect)
|
||||
assert np.all(np.abs(output[:, i] - output_expect) < 1e-5)
|
||||
print("Argmax op with no reshape axis wrapped successful")
|
||||
|
||||
|
||||
|
@ -210,5 +210,5 @@ def test_time_distributed_flatten_no_reshape_axis():
|
|||
time_distributed = TestTimeDistributed(flatten, time_axis=1)
|
||||
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
|
||||
for i in range(output.shape[1]):
|
||||
assert np.all(output[:, i, :] == output_expect)
|
||||
assert np.all(np.abs(output[:, i, :] - output_expect) < 1e-5)
|
||||
print("Flatten op with no reshape axis wrapped successful")
|
||||
|
|
Loading…
Reference in New Issue