forked from mindspore-Ecosystem/mindspore
remove _quant_ops.py from __init__.py
This commit is contained in:
parent
934bdce432
commit
bbce6faff9
|
@ -32,7 +32,7 @@ from .activation import get_activation
|
|||
from ..cell import Cell
|
||||
from . import conv, basic
|
||||
from ..._checkparam import ParamValidator as validator
|
||||
|
||||
from ...ops.operations import _quant_ops as Q
|
||||
|
||||
__all__ = [
|
||||
'Conv2dBnAct',
|
||||
|
@ -242,11 +242,11 @@ class BatchNormFoldCell(Cell):
|
|||
self.epsilon = epsilon
|
||||
self.is_gpu = context.get_context('device_target') == "GPU"
|
||||
if self.is_gpu:
|
||||
self.bn_train = P.BatchNormFold(momentum, epsilon, is_training=True, freeze_bn=freeze_bn)
|
||||
self.bn_infer = P.BatchNormFold(momentum, epsilon, is_training=False, freeze_bn=freeze_bn)
|
||||
self.bn_train = Q.BatchNormFold(momentum, epsilon, is_training=True, freeze_bn=freeze_bn)
|
||||
self.bn_infer = Q.BatchNormFold(momentum, epsilon, is_training=False, freeze_bn=freeze_bn)
|
||||
else:
|
||||
self.bn_reduce = P.BNTrainingReduce()
|
||||
self.bn_update = P.BatchNormFoldD(momentum, epsilon, is_training=True, freeze_bn=freeze_bn)
|
||||
self.bn_update = Q.BatchNormFoldD(momentum, epsilon, is_training=True, freeze_bn=freeze_bn)
|
||||
|
||||
def construct(self, x, mean, variance, global_step):
|
||||
if self.is_gpu:
|
||||
|
@ -337,11 +337,11 @@ class FakeQuantWithMinMax(Cell):
|
|||
|
||||
# init fake quant relative op
|
||||
if per_channel:
|
||||
quant_fun = partial(P.FakeQuantPerChannel, channel_axis=self.channel_axis)
|
||||
ema_fun = partial(P.FakeQuantMinMaxPerChannelUpdate, channel_axis=self.channel_axis)
|
||||
quant_fun = partial(Q.FakeQuantPerChannel, channel_axis=self.channel_axis)
|
||||
ema_fun = partial(Q.FakeQuantMinMaxPerChannelUpdate, channel_axis=self.channel_axis)
|
||||
else:
|
||||
quant_fun = P.FakeQuantPerLayer
|
||||
ema_fun = P.FakeQuantMinMaxPerLayerUpdate
|
||||
quant_fun = Q.FakeQuantPerLayer
|
||||
ema_fun = Q.FakeQuantMinMaxPerLayerUpdate
|
||||
|
||||
if self.is_ascend:
|
||||
self.fake_quant = quant_fun(num_bits=self.num_bits,
|
||||
|
@ -510,13 +510,13 @@ class Conv2dBatchNormQuant(Cell):
|
|||
symmetric=symmetric,
|
||||
narrow_range=narrow_range)
|
||||
self.batchnorm_fold = BatchNormFoldCell(epsilon=eps, momentum=momentum, freeze_bn=freeze_bn)
|
||||
self.correct_mul = P.CorrectionMul(channel_axis)
|
||||
self.correct_mul = Q.CorrectionMul(channel_axis)
|
||||
if context.get_context('device_target') == "Ascend":
|
||||
self.batchnorm_fold2_train = P.BatchNormFold2_D(freeze_bn=freeze_bn)
|
||||
self.batchnorm_fold2_infer = P.BatchNormFold2_D(freeze_bn=0)
|
||||
self.batchnorm_fold2_train = Q.BatchNormFold2_D(freeze_bn=freeze_bn)
|
||||
self.batchnorm_fold2_infer = Q.BatchNormFold2_D(freeze_bn=0)
|
||||
elif context.get_context('device_target') == "GPU":
|
||||
self.batchnorm_fold2_train = P.BatchNormFold2(freeze_bn=freeze_bn)
|
||||
self.batchnorm_fold2_infer = P.BatchNormFold2(freeze_bn=0)
|
||||
self.batchnorm_fold2_train = Q.BatchNormFold2(freeze_bn=freeze_bn)
|
||||
self.batchnorm_fold2_infer = Q.BatchNormFold2(freeze_bn=0)
|
||||
else:
|
||||
raise ValueError("Unsupported platform: {}".format(context.get_context('device_target')))
|
||||
self.step = Parameter(initializer('normal', [1], dtype=mstype.int32), name='step', requires_grad=False)
|
||||
|
|
|
@ -16,15 +16,16 @@
|
|||
"""Generate bprop for aware quantization ops"""
|
||||
|
||||
from .. import operations as P
|
||||
from ..operations import _quant_ops as Q
|
||||
from .grad_base import bprop_getters
|
||||
from ..composite.multitype_ops.zeros_like_impl import zeros_like
|
||||
from ... import context
|
||||
|
||||
|
||||
@bprop_getters.register(P.FakeQuantPerLayer)
|
||||
@bprop_getters.register(Q.FakeQuantPerLayer)
|
||||
def get_bprop_fakequant_with_minmax(self):
|
||||
"""Generate bprop for FakeQuantPerLayer for GPU and Ascend"""
|
||||
op = P.FakeQuantPerLayerGrad(
|
||||
op = Q.FakeQuantPerLayerGrad(
|
||||
num_bits=self.num_bits, quant_delay=self.quant_delay)
|
||||
|
||||
def bprop(x, x_min, x_max, out, dout):
|
||||
|
@ -34,10 +35,10 @@ def get_bprop_fakequant_with_minmax(self):
|
|||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(P.FakeQuantPerChannel)
|
||||
@bprop_getters.register(Q.FakeQuantPerChannel)
|
||||
def get_bprop_fakequant_with_minmax_perchannel(self):
|
||||
"""Generate bprop for FakeQuantPerChannel"""
|
||||
op = P.FakeQuantPerChannelGrad(num_bits=self.num_bits,
|
||||
op = Q.FakeQuantPerChannelGrad(num_bits=self.num_bits,
|
||||
quant_delay=self.quant_delay,
|
||||
symmetric=self.symmetric,
|
||||
narrow_range=self.symmetric,
|
||||
|
@ -50,10 +51,10 @@ def get_bprop_fakequant_with_minmax_perchannel(self):
|
|||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(P.BatchNormFold)
|
||||
@bprop_getters.register(Q.BatchNormFold)
|
||||
def get_bprop_batchnorm_fold(self):
|
||||
"""Generate bprop for BatchNormFold for GPU"""
|
||||
op = P.BatchNormFoldGrad(self.epsilon, self.is_training, self.freeze_bn)
|
||||
op = Q.BatchNormFoldGrad(self.epsilon, self.is_training, self.freeze_bn)
|
||||
|
||||
def bprop(x, mean, variance, global_step, out, dout):
|
||||
dx = op(dout[0], dout[1], x, out[0], out[1], global_step)
|
||||
|
@ -62,11 +63,11 @@ def get_bprop_batchnorm_fold(self):
|
|||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(P.CorrectionMul)
|
||||
@bprop_getters.register(Q.CorrectionMul)
|
||||
def get_bprop_correction_mul(self):
|
||||
"""Generate bprop for CorrectionMul for Ascend and GPU"""
|
||||
grad_dx = P.CorrectionMulGrad(self.channel_axis)
|
||||
grad_d_batch_std = P.CorrectionMulGradReduce(self.channel_axis)
|
||||
grad_dx = Q.CorrectionMulGrad(self.channel_axis)
|
||||
grad_d_batch_std = Q.CorrectionMulGradReduce(self.channel_axis)
|
||||
|
||||
def bprop(x, batch_std, running_std, out, dout):
|
||||
dx, d_batch_std = grad_dx(dout, x, batch_std, running_std)
|
||||
|
@ -83,10 +84,10 @@ def get_bprop_correction_mul(self):
|
|||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(P.BatchNormFold2)
|
||||
@bprop_getters.register(Q.BatchNormFold2)
|
||||
def get_bprop_batchnorm_fold2(self):
|
||||
"""Generate bprop for BatchNormFold2 for GPU"""
|
||||
op_f = P.BatchNormFold2Grad(freeze_bn=self.freeze_bn)
|
||||
op_f = Q.BatchNormFold2Grad(freeze_bn=self.freeze_bn)
|
||||
|
||||
def bprop(x, beta, gamma, batch_std, batch_mean, running_std, running_mean, global_step, out, dout):
|
||||
d_batch_std, d_batch_mean, d_beta, d_gamma, d_x = op_f(dout, x, gamma, batch_std, batch_mean, running_std,
|
||||
|
@ -97,10 +98,10 @@ def get_bprop_batchnorm_fold2(self):
|
|||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(P.BatchNormFoldD)
|
||||
@bprop_getters.register(Q.BatchNormFoldD)
|
||||
def get_bprop_BatchNormFold(self):
|
||||
"""Generate bprop for BatchNormFold for Ascend"""
|
||||
op = P.BatchNormFoldGradD(self.epsilon, self.is_training, self.freeze_bn)
|
||||
op = Q.BatchNormFoldGradD(self.epsilon, self.is_training, self.freeze_bn)
|
||||
|
||||
def bprop(x, x_sum, x_square_sum, mean, variance, out, dout):
|
||||
dx = op(dout[1], dout[2], x, out[1], out[2])
|
||||
|
@ -117,11 +118,11 @@ def get_bprop_BNTrainingReduce(self):
|
|||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(P.BatchNormFold2_D)
|
||||
@bprop_getters.register(Q.BatchNormFold2_D)
|
||||
def get_bprop_batchnorm_fold2_(self):
|
||||
"""Generate bprop for BatchNormFold2 for Ascend"""
|
||||
op_reduce = P.BatchNormFold2GradReduce(freeze_bn=self.freeze_bn)
|
||||
op_f = P.BatchNormFold2GradD(freeze_bn=self.freeze_bn)
|
||||
op_reduce = Q.BatchNormFold2GradReduce(freeze_bn=self.freeze_bn)
|
||||
op_f = Q.BatchNormFold2GradD(freeze_bn=self.freeze_bn)
|
||||
|
||||
def bprop(x, beta, gamma, batch_std, batch_mean, running_std, out, dout):
|
||||
dout_reduce, dout_x_reduce = op_reduce(dout, x)
|
||||
|
@ -132,7 +133,7 @@ def get_bprop_batchnorm_fold2_(self):
|
|||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(P.FakeQuantMinMaxPerLayerUpdate)
|
||||
@bprop_getters.register(Q.FakeQuantMinMaxPerLayerUpdate)
|
||||
def get_bprop_fakequant_with_minmax_per_layer_update(self):
|
||||
"""Generate bprop for FakeQuantMinMaxPerLayerUpdate for Ascend"""
|
||||
|
||||
|
@ -142,7 +143,7 @@ def get_bprop_fakequant_with_minmax_per_layer_update(self):
|
|||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(P.FakeQuantMinMaxPerChannelUpdate)
|
||||
@bprop_getters.register(Q.FakeQuantMinMaxPerChannelUpdate)
|
||||
def get_bprop_fakequant_with_minmax_per_channel_update(self):
|
||||
"""Generate bprop for FakeQuantMinMaxPerChannelUpdate for Ascend"""
|
||||
|
||||
|
|
|
@ -76,7 +76,6 @@ from .nn_ops import (LSTM, SGD, Adam, SparseApplyAdam, SparseApplyLazyAdam, Appl
|
|||
ApplyRMSProp, ApplyCenteredRMSProp, BasicLSTMCell, InTopK)
|
||||
from .other_ops import (Assign, IOU, BoundingBoxDecode, BoundingBoxEncode,
|
||||
CheckValid, MakeRefKey, Partial, Depend, CheckBprop)
|
||||
from ._quant_ops import *
|
||||
from .thor_ops import *
|
||||
|
||||
__all__ = [
|
||||
|
|
|
@ -69,7 +69,7 @@ class FakeQuantPerLayer(PrimitiveWithInfer):
|
|||
>>> input_tensor = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
|
||||
>>> min_tensor = Tensor(np.array([-6]), mstype.float32)
|
||||
>>> max_tensor = Tensor(np.array([6]), mstype.float32)
|
||||
>>> output_tensor = P.FakeQuantPerLayer(num_bits=8)(input_tensor, min_tensor, max_tensor)
|
||||
>>> output_tensor = FakeQuantPerLayer(num_bits=8)(input_tensor, min_tensor, max_tensor)
|
||||
"""
|
||||
support_quant_bit = [4, 7, 8]
|
||||
|
||||
|
@ -129,7 +129,7 @@ class FakeQuantPerLayerGrad(PrimitiveWithInfer):
|
|||
Performs grad of FakeQuantPerLayerGrad operation.
|
||||
|
||||
Examples:
|
||||
>>> fake_min_max_grad = P.FakeQuantPerLayerGrad()
|
||||
>>> fake_min_max_grad = FakeQuantPerLayerGrad()
|
||||
>>> dout = Tensor(np.array([[-2.3, 1.2], [5.7, 0.2]]), mindspore.float32)
|
||||
>>> input_x = Tensor(np.array([[18, -23], [0.2, 6]]), mindspore.float32)
|
||||
>>> _min = Tensor(np.array([-4]), mindspore.float32)
|
||||
|
@ -206,7 +206,7 @@ class FakeQuantPerChannel(PrimitiveWithInfer):
|
|||
- Tensor, has the same type as input.
|
||||
|
||||
Examples:
|
||||
>>> fake_quant = P.FakeQuantPerChannel()
|
||||
>>> fake_quant = FakeQuantPerChannel()
|
||||
>>> input_x = Tensor(np.array([3, 4, 5, -2, -3, -1]).reshape(3, 2), mindspore.float32)
|
||||
>>> _min = Tensor(np.linspace(-2, 2, 12).reshape(3, 2, 2), mindspore.float32)
|
||||
>>> _max = Tensor(np.linspace(8, 12, 12).reshape(3, 2, 2), mindspore.float32)
|
||||
|
@ -275,7 +275,7 @@ class FakeQuantPerChannelGrad(PrimitiveWithInfer):
|
|||
Performs grad of FakeQuantPerChannelGrad operation.
|
||||
|
||||
Examples:
|
||||
>>> fqmmpc_grad = P.FakeQuantPerChannelGrad()
|
||||
>>> fqmmpc_grad = FakeQuantPerChannelGrad()
|
||||
>>> input_x = Tensor(np.random.randint(-4, 4, (2, 3, 4)), mindspore.float32)
|
||||
>>> dout = Tensor(np.random.randint(-2, 2, (2, 3, 4)), mindspore.float32)
|
||||
>>> _min = Tensor(np.random.randint(-8, 2, (2, 3, 4)), mindspore.float32)
|
||||
|
@ -858,7 +858,7 @@ class FakeQuantMinMaxPerLayerUpdate(PrimitiveWithInfer):
|
|||
>>> input_tensor = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
|
||||
>>> min_tensor = Tensor(np.array([-6]), mstype.float32)
|
||||
>>> max_tensor = Tensor(np.array([6]), mstype.float32)
|
||||
>>> output_tensor = P.FakeQuantWithMinMax(num_bits=8)(input_tensor, min_tensor, max_tensor)
|
||||
>>> output_tensor = FakeQuantWithMinMax(num_bits=8)(input_tensor, min_tensor, max_tensor)
|
||||
"""
|
||||
support_quant_bit = [4, 7, 8]
|
||||
|
||||
|
@ -932,7 +932,7 @@ class FakeQuantMinMaxPerChannelUpdate(PrimitiveWithInfer):
|
|||
>>> x = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
|
||||
>>> min = Tensor(np.random.uniform(-1, 1, size=16), mstype.float32)
|
||||
>>> max = Tensor(np.random.uniform(-1, 1, size=16), mstype.float32)
|
||||
>>> output_tensor = P.FakeQuantWithMinMax(num_bits=8)(x, min, max)
|
||||
>>> output_tensor = FakeQuantWithMinMax(num_bits=8)(x, min, max)
|
||||
"""
|
||||
support_quant_bit = [4, 7, 8]
|
||||
|
||||
|
|
|
@ -253,7 +253,7 @@ def convert_quant_network(network,
|
|||
symmetric (bool): Quantization algorithm use symmetric or not. Default: False.
|
||||
narrow_range (bool): Quantization algorithm use narrow range or not. Default: False.
|
||||
|
||||
returns:
|
||||
Returns:
|
||||
Cell, Network which has change to aware quantization training network.
|
||||
"""
|
||||
net = ConvertToQuantNetwork(
|
||||
|
|
|
@ -21,6 +21,7 @@ import mindspore.nn as nn
|
|||
from mindspore import Tensor
|
||||
from mindspore.common.api import ms_function
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.ops.operations import _quant_ops as Q
|
||||
|
||||
context.set_context(device_target='GPU')
|
||||
|
||||
|
@ -28,7 +29,7 @@ context.set_context(device_target='GPU')
|
|||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.op = P.BatchNormFold2(100000)
|
||||
self.op = Q.BatchNormFold2(100000)
|
||||
|
||||
@ms_function
|
||||
def construct(self, x, beta, gamma, batch_std, batch_mean, running_std, running_mean, current_step):
|
||||
|
|
|
@ -20,7 +20,7 @@ import mindspore.context as context
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import ms_function
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.ops.operations import _quant_ops as Q
|
||||
|
||||
context.set_context(device_target='GPU')
|
||||
|
||||
|
@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
|
|||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.op = P.BatchNormFoldGrad(freeze_bn=10)
|
||||
self.op = Q.BatchNormFoldGrad(freeze_bn=10)
|
||||
|
||||
@ms_function
|
||||
def construct(self, d_batch_mean, d_batch_std, x, batch_mean, batch_std, current_step):
|
||||
|
|
|
@ -20,7 +20,7 @@ import mindspore.context as context
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import ms_function
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.ops.operations import _quant_ops as Q
|
||||
|
||||
context.set_context(device_target='GPU')
|
||||
|
||||
|
@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
|
|||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.op = P.BatchNormFold(momentum=0.9, freeze_bn=10)
|
||||
self.op = Q.BatchNormFold(momentum=0.9, freeze_bn=10)
|
||||
|
||||
@ms_function
|
||||
def construct(self, x, mean, variance, current_step):
|
||||
|
|
|
@ -20,7 +20,7 @@ import mindspore.context as context
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import ms_function
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.ops.operations import _quant_ops as Q
|
||||
|
||||
context.set_context(device_target='GPU')
|
||||
|
||||
|
@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
|
|||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.op_w = P.CorrectionMulGrad()
|
||||
self.op_w = Q.CorrectionMulGrad()
|
||||
|
||||
@ms_function
|
||||
def construct(self, dy, x, batch_std, running_std):
|
||||
|
|
|
@ -20,7 +20,7 @@ import mindspore.context as context
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import ms_function
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.ops.operations import _quant_ops as Q
|
||||
|
||||
context.set_context(device_target='GPU')
|
||||
|
||||
|
@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
|
|||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.op = P.CorrectionMul()
|
||||
self.op = Q.CorrectionMul()
|
||||
|
||||
@ms_function
|
||||
def construct(self, x, batch_var, moving_var):
|
||||
|
|
Loading…
Reference in New Issue