change package name akg to _akg
This commit is contained in:
parent
268d358a1d
commit
8cd3308a4c
|
@ -18,7 +18,7 @@ import sys
|
|||
import os
|
||||
|
||||
def AKGAddPath():
|
||||
"""akg add path."""
|
||||
"""_akg add path."""
|
||||
pwd = os.path.dirname(os.path.realpath(__file__))
|
||||
tvm_path = os.path.realpath(pwd)
|
||||
if tvm_path not in sys.path:
|
||||
|
@ -32,12 +32,12 @@ class AKGMetaPathFinder:
|
|||
"""class AKGMetaPath finder."""
|
||||
|
||||
def find_module(self, fullname, path=None):
|
||||
"""method akg find module."""
|
||||
if fullname.startswith("akg.tvm"):
|
||||
rname = fullname[4:]
|
||||
"""method _akg find module."""
|
||||
if fullname.startswith("_akg.tvm"):
|
||||
rname = fullname[5:]
|
||||
return AKGMetaPathLoader(rname)
|
||||
if fullname.startswith("akg.topi"):
|
||||
rname = fullname[4:]
|
||||
if fullname.startswith("_akg.topi"):
|
||||
rname = fullname[5:]
|
||||
return AKGMetaPathLoader(rname)
|
||||
return None
|
||||
|
|
@ -14,9 +14,9 @@
|
|||
|
||||
"""cast"""
|
||||
import logging
|
||||
import akg.tvm
|
||||
from akg.ops.math import cast
|
||||
from akg.topi.generic import schedule_elemwise
|
||||
import _akg.tvm
|
||||
from _akg.ops.math import cast
|
||||
from _akg.topi.generic import schedule_elemwise
|
||||
|
||||
def Cast(x, dst_type):
|
||||
"""cast."""
|
||||
|
@ -34,10 +34,10 @@ def gpu_schedule_Cast(outs):
|
|||
sch (schedule.Schedule): The created schedule.
|
||||
"""
|
||||
device = 'cuda'
|
||||
ctx = akg.tvm.context(device, 0)
|
||||
ctx = _akg.tvm.context(device, 0)
|
||||
if not ctx.exist:
|
||||
logging.info("Skip because %s is not enabled", device)
|
||||
return None
|
||||
with akg.tvm.target.create(device):
|
||||
with _akg.tvm.target.create(device):
|
||||
sch = schedule_elemwise(outs)
|
||||
return sch
|
|
@ -15,7 +15,7 @@
|
|||
"""default schedule function for GPU"""
|
||||
from queue import Queue
|
||||
|
||||
import akg.tvm as tvm
|
||||
import _akg.tvm as tvm
|
||||
|
||||
DEFAULT_GPU_THREAD = 1024
|
||||
|
||||
|
@ -31,7 +31,7 @@ def default_schedule(outs):
|
|||
sch (schedule.Schedule): The created schedule.
|
||||
"""
|
||||
if not isinstance(outs, tvm.tensor.Tensor) and not isinstance(outs, list):
|
||||
raise ValueError("outs should be list of akg.tvm.tensor.Tensor or akg.tvm.tensor.Tensor")
|
||||
raise ValueError("outs should be list of _akg.tvm.tensor.Tensor or _akg.tvm.tensor.Tensor")
|
||||
device = 'cuda'
|
||||
ctx = tvm.context(device, 0)
|
||||
if not ctx.exist:
|
|
@ -12,9 +12,9 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""equal"""
|
||||
import akg.tvm
|
||||
from akg.ops.math import equal
|
||||
from akg.topi.generic import schedule_elemwise
|
||||
import _akg.tvm
|
||||
from _akg.ops.math import equal
|
||||
from _akg.topi.generic import schedule_elemwise
|
||||
|
||||
def Equal(x, y):
|
||||
"""equal."""
|
||||
|
@ -32,9 +32,9 @@ def gpu_schedule_Equal(outs):
|
|||
sch (schedule.Schedule): The created schedule.
|
||||
"""
|
||||
device = 'cuda'
|
||||
ctx = akg.tvm.context(device, 0)
|
||||
ctx = _akg.tvm.context(device, 0)
|
||||
if not ctx.exist:
|
||||
raise SystemError("Skip because %s is not enabled" % device)
|
||||
with akg.tvm.target.create(device):
|
||||
with _akg.tvm.target.create(device):
|
||||
sch = schedule_elemwise(outs)
|
||||
return sch
|
|
@ -13,8 +13,8 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""mean op compute and schedule"""
|
||||
import akg.tvm as tvm
|
||||
from akg.ops.math.mean import mean
|
||||
import _akg.tvm as tvm
|
||||
from _akg.ops.math.mean import mean
|
||||
from .default_schedule import DEFAULT_GPU_THREAD
|
||||
|
||||
def Mean(x, axis=None, keepdims=True):
|
|
@ -13,9 +13,9 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""mean_grad"""
|
||||
import akg.tvm as tvm
|
||||
import akg
|
||||
from akg.ops.math import mean
|
||||
import _akg.tvm as tvm
|
||||
import _akg
|
||||
from _akg.ops.math import mean
|
||||
from .default_schedule import DEFAULT_GPU_THREAD
|
||||
|
||||
|
||||
|
@ -30,7 +30,7 @@ def mean_ad(head, input_shape, axis, keepdims):
|
|||
if tensor_b.op.name == "mean_output":
|
||||
tensor_b = tensor_b.op.input_tensors[0]
|
||||
|
||||
jacs = list(akg.differentiate(tensor_b, [tensor_a], head))
|
||||
jacs = list(_akg.differentiate(tensor_b, [tensor_a], head))
|
||||
return jacs[0]
|
||||
|
||||
|
|
@ -13,9 +13,9 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""mul"""
|
||||
import akg.topi as topi
|
||||
import akg.tvm as tvm
|
||||
from akg.ops.math import mul
|
||||
import _akg.topi as topi
|
||||
import _akg.tvm as tvm
|
||||
from _akg.ops.math import mul
|
||||
|
||||
def Mul(x, y):
|
||||
"""mul."""
|
|
@ -13,9 +13,9 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""relu6"""
|
||||
import akg.topi as topi
|
||||
import akg.tvm as tvm
|
||||
from akg.topi import tag
|
||||
import _akg.topi as topi
|
||||
import _akg.tvm as tvm
|
||||
from _akg.topi import tag
|
||||
|
||||
@tvm.tag_scope(tag=tag.ELEMWISE)
|
||||
def topi_nn_relu6(x):
|
|
@ -13,8 +13,8 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""relu6 grad"""
|
||||
import akg.topi as topi
|
||||
import akg.tvm as tvm
|
||||
import _akg.topi as topi
|
||||
import _akg.tvm as tvm
|
||||
|
||||
def ReLU6Grad(y_grad, x):
|
||||
"""
|
|
@ -13,8 +13,8 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""squeeze"""
|
||||
import akg.topi as topi
|
||||
import akg.tvm as tvm
|
||||
import _akg.topi as topi
|
||||
import _akg.tvm as tvm
|
||||
|
||||
def Squeeze(x, axis=None):
|
||||
"""
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""squeeze grad"""
|
||||
import akg.topi as topi
|
||||
import _akg.topi as topi
|
||||
|
||||
def SqueezeGrad(y_grad, x_shape, axis=None):
|
||||
"""
|
|
@ -12,9 +12,9 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""tile"""
|
||||
import akg.tvm
|
||||
from akg.ops.array import tile
|
||||
from akg.topi.generic import schedule_elemwise
|
||||
import _akg.tvm
|
||||
from _akg.ops.array import tile
|
||||
from _akg.topi.generic import schedule_elemwise
|
||||
|
||||
def Tile(x, multiples):
|
||||
"""tile."""
|
||||
|
@ -31,9 +31,9 @@ def gpu_schedule_Tile(outs):
|
|||
sch (schedule.Schedule): The created schedule.
|
||||
"""
|
||||
device = 'cuda'
|
||||
ctx = akg.tvm.context(device, 0)
|
||||
ctx = _akg.tvm.context(device, 0)
|
||||
if not ctx.exist:
|
||||
raise SystemError("Skip because %s is not enabled" % device)
|
||||
with akg.tvm.target.create(device):
|
||||
with _akg.tvm.target.create(device):
|
||||
s = schedule_elemwise(outs)
|
||||
return s
|
|
@ -20,9 +20,9 @@ import logging
|
|||
import traceback
|
||||
import os.path
|
||||
from pathlib import Path
|
||||
import akg.tvm
|
||||
from akg.utils import validation_check as vc_util
|
||||
from akg.utils.dsl_create import TensorUtils
|
||||
import _akg.tvm
|
||||
from _akg.utils import validation_check as vc_util
|
||||
from _akg.utils.dsl_create import TensorUtils
|
||||
from . import gpu
|
||||
from . import op_build
|
||||
|
||||
|
@ -67,7 +67,7 @@ def compilewithjson(json_str):
|
|||
tensor_shape = input_desc[0]['shape']
|
||||
tensor_shape = (1,) if not tensor_shape else tensor_shape
|
||||
vc_util.shape_dtype_max_size_check(tensor_shape)
|
||||
args[input_desc[0]['name']] = akg.tvm.placeholder(
|
||||
args[input_desc[0]['name']] = _akg.tvm.placeholder(
|
||||
shape=tensor_shape, name=input_desc[0]['tensor_name'], dtype=input_desc[0]['data_type'])
|
||||
tsr.append(args[input_desc[0]['name']])
|
||||
else:
|
||||
|
@ -76,7 +76,7 @@ def compilewithjson(json_str):
|
|||
tensor_shape = tmp_desc['shape']
|
||||
tensor_shape = (1,) if not tensor_shape else tensor_shape
|
||||
vc_util.shape_dtype_max_size_check(tensor_shape)
|
||||
tmp_input.append(akg.tvm.placeholder(
|
||||
tmp_input.append(_akg.tvm.placeholder(
|
||||
shape=tensor_shape, name=tmp_desc['tensor_name'], dtype=tmp_desc['data_type']))
|
||||
args[input_desc[0]['name']] = tmp_input
|
||||
tsr = tsr + tmp_input
|
|
@ -19,10 +19,10 @@ import types
|
|||
import typing
|
||||
import logging
|
||||
import traceback
|
||||
import akg.tvm
|
||||
import akg
|
||||
from akg import save_gpu_param as gpu_utils
|
||||
from akg.utils import validation_check as vc_util
|
||||
import _akg.tvm
|
||||
import _akg
|
||||
from _akg import save_gpu_param as gpu_utils
|
||||
from _akg.utils import validation_check as vc_util
|
||||
|
||||
MS_CUDA_KERNEL_PATH = "/tmp/cuda_meta/"
|
||||
|
||||
|
@ -38,21 +38,21 @@ def op_build(opnames, computes, args, custom_schedule, device, kernel_name, attr
|
|||
return None
|
||||
|
||||
schedule_name = 'gpu_schedule_' + opnames[0]
|
||||
schedule_func = getattr(akg.gpu, schedule_name)
|
||||
schedule_func = getattr(_akg.gpu, schedule_name)
|
||||
if not isinstance(schedule_func, (types.FunctionType, typing.Callable)):
|
||||
logging.error("no schedule func found %s", str(schedule_name))
|
||||
return None
|
||||
|
||||
ptx_file = os.path.realpath(MS_CUDA_KERNEL_PATH + kernel_name + ".ptx")
|
||||
if os.path.exists(ptx_file):
|
||||
os.remove(ptx_file)
|
||||
os.chmod(ptx_file, 0o600)
|
||||
try:
|
||||
with open(ptx_file, 'at') as file:
|
||||
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
|
||||
file.seek(0, 2)
|
||||
if file.tell() == 0:
|
||||
s = schedule_func(computes)
|
||||
foo = akg.tvm.build(s, args, device, name=kernel_name)
|
||||
foo = _akg.tvm.build(s, args, device, name=kernel_name)
|
||||
ptx_code = foo.imported_modules[0].get_source("ptx")
|
||||
file.write(ptx_code)
|
||||
json_file = os.path.realpath(MS_CUDA_KERNEL_PATH + kernel_name + ".json")
|
|
@ -13,12 +13,12 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""operator dsl function: tile"""
|
||||
import akg.tvm
|
||||
import akg.topi
|
||||
from akg.utils import validation_check as vc_util
|
||||
import _akg.tvm
|
||||
import _akg.topi
|
||||
from _akg.utils import validation_check as vc_util
|
||||
|
||||
|
||||
@vc_util.check_input_type(akg.tvm.tensor.Tensor, (list, tuple))
|
||||
@vc_util.check_input_type(_akg.tvm.tensor.Tensor, (list, tuple))
|
||||
def tile(data, multiples):
|
||||
"""
|
||||
Repeats the data in the specified dimensions according to the multiples.
|
||||
|
@ -32,5 +32,5 @@ def tile(data, multiples):
|
|||
"""
|
||||
vc_util.check_shape(data.shape)
|
||||
vc_util.check_int_list(multiples, "multiples")
|
||||
output = akg.topi.tile(data, multiples)
|
||||
output = _akg.topi.tile(data, multiples)
|
||||
return output
|
|
@ -13,12 +13,12 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""operator dsl function: cast"""
|
||||
import akg.tvm
|
||||
import akg.topi
|
||||
from akg.utils import validation_check as vc_util
|
||||
import _akg.tvm
|
||||
import _akg.topi
|
||||
from _akg.utils import validation_check as vc_util
|
||||
|
||||
|
||||
@vc_util.check_input_type(akg.tvm.tensor.Tensor, str)
|
||||
@vc_util.check_input_type(_akg.tvm.tensor.Tensor, str)
|
||||
def cast(data, dst_type):
|
||||
"""
|
||||
cast data to target type.
|
||||
|
@ -31,6 +31,6 @@ def cast(data, dst_type):
|
|||
tvm.tensor.Tensor, type is dst_type.
|
||||
"""
|
||||
vc_util.check_shape(data.shape)
|
||||
out = akg.topi.cast(data, dst_type)
|
||||
out = _akg.topi.cast(data, dst_type)
|
||||
|
||||
return out
|
|
@ -13,13 +13,13 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""operator dsl function: equal"""
|
||||
import akg.tvm
|
||||
import akg.topi
|
||||
from akg.utils.dsl_create import produce_shapes
|
||||
from akg.utils import validation_check as vc_util
|
||||
import _akg.tvm
|
||||
import _akg.topi
|
||||
from _akg.utils.dsl_create import produce_shapes
|
||||
from _akg.utils import validation_check as vc_util
|
||||
|
||||
|
||||
@vc_util.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor)
|
||||
@vc_util.check_input_type(_akg.tvm.tensor.Tensor, _akg.tvm.tensor.Tensor)
|
||||
def equal(input1, input2):
|
||||
"""
|
||||
check whether input1 equals to input2.
|
||||
|
@ -42,13 +42,13 @@ def equal(input1, input2):
|
|||
dtype = input1.dtype
|
||||
|
||||
# get equal compute
|
||||
t_value = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, dtype), "T")
|
||||
f_value = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(0, dtype), "F")
|
||||
t_value = _akg.tvm.compute(shape, lambda *indice: _akg.tvm.const(1, dtype), "T")
|
||||
f_value = _akg.tvm.compute(shape, lambda *indice: _akg.tvm.const(0, dtype), "F")
|
||||
|
||||
input1_bro = akg.topi.broadcast_to(input1, shape)
|
||||
input2_bro = akg.topi.broadcast_to(input2, shape)
|
||||
c_out = akg.tvm.compute(shape, lambda *indice: akg.tvm.expr.Select(input1_bro[indice] == input2_bro[indice],
|
||||
t_value[indice], f_value[indice]), name="C")
|
||||
res = akg.tvm.compute(shape, lambda *indice: c_out(*indice).astype("bool"), name="res")
|
||||
input1_bro = _akg.topi.broadcast_to(input1, shape)
|
||||
input2_bro = _akg.topi.broadcast_to(input2, shape)
|
||||
c_out = _akg.tvm.compute(shape, lambda *indice: _akg.tvm.expr.Select(input1_bro[indice] == input2_bro[indice],
|
||||
t_value[indice], f_value[indice]), name="C")
|
||||
res = _akg.tvm.compute(shape, lambda *indice: c_out(*indice).astype("bool"), name="res")
|
||||
|
||||
return res
|
|
@ -13,14 +13,14 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""operator dsl function: mean"""
|
||||
import akg.topi
|
||||
import akg.tvm
|
||||
from akg.utils import format_transform as ft_util
|
||||
from akg.utils import validation_check as vc_util
|
||||
from akg.ops.math import sum
|
||||
import _akg.topi
|
||||
import _akg.tvm
|
||||
from _akg.utils import format_transform as ft_util
|
||||
from _akg.utils import validation_check as vc_util
|
||||
from _akg.ops.math import sum
|
||||
|
||||
|
||||
@vc_util.check_input_type(akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None)))
|
||||
@vc_util.check_input_type(_akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None)))
|
||||
def mean(data, axis=None, keepdims=False):
|
||||
"""
|
||||
Computes the mean of the values of a Tensor over the whole dataset.
|
||||
|
@ -42,6 +42,6 @@ def mean(data, axis=None, keepdims=False):
|
|||
for i in axis:
|
||||
count *= shape[i]
|
||||
output, _ = sum.sum_value(data, axis, keepdims)
|
||||
res = akg.topi.divide(output, count)
|
||||
res = _akg.topi.divide(output, count)
|
||||
|
||||
return res
|
|
@ -13,11 +13,11 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""operator dsl function: mul"""
|
||||
import akg.topi
|
||||
from akg.utils import validation_check as vc_util
|
||||
import _akg.topi
|
||||
from _akg.utils import validation_check as vc_util
|
||||
|
||||
|
||||
@vc_util.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor)
|
||||
@vc_util.check_input_type(_akg.tvm.tensor.Tensor, _akg.tvm.tensor.Tensor)
|
||||
def mul(l_input, r_input):
|
||||
"""
|
||||
Calculate x * y element-wise.
|
||||
|
@ -38,6 +38,6 @@ def mul(l_input, r_input):
|
|||
vc_util.check_shape(shape2)
|
||||
vc_util.auto_broadcast_check(shape1, shape2)
|
||||
vc_util.elemwise_dtype_check(l_input.dtype, r_input.dtype)
|
||||
output = akg.topi.multiply(l_input, r_input)
|
||||
output = _akg.topi.multiply(l_input, r_input)
|
||||
|
||||
return output
|
|
@ -13,12 +13,12 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""operator dsl function: sub"""
|
||||
import akg.topi
|
||||
import akg.tvm
|
||||
from akg.utils import validation_check as vc_util
|
||||
import _akg.topi
|
||||
import _akg.tvm
|
||||
from _akg.utils import validation_check as vc_util
|
||||
|
||||
|
||||
@vc_util.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor)
|
||||
@vc_util.check_input_type(_akg.tvm.tensor.Tensor, _akg.tvm.tensor.Tensor)
|
||||
def sub(data1, data2):
|
||||
"""
|
||||
Computes data1 - data2 elementwise, broadcast is supported.
|
||||
|
@ -35,6 +35,6 @@ def sub(data1, data2):
|
|||
vc_util.check_shape(data2.shape)
|
||||
vc_util.auto_broadcast_check(data1.shape, data2.shape)
|
||||
|
||||
res = akg.topi.subtract(data1, data2)
|
||||
res = _akg.topi.subtract(data1, data2)
|
||||
|
||||
return res
|
|
@ -14,13 +14,13 @@
|
|||
|
||||
"""operator dsl function: sum"""
|
||||
|
||||
import akg.topi
|
||||
import akg.tvm
|
||||
from akg.utils import format_transform as ft_util
|
||||
from akg.utils import validation_check as vc_util
|
||||
import _akg.topi
|
||||
import _akg.tvm
|
||||
from _akg.utils import format_transform as ft_util
|
||||
from _akg.utils import validation_check as vc_util
|
||||
|
||||
|
||||
@vc_util.check_input_type(akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None)))
|
||||
@vc_util.check_input_type(_akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None)))
|
||||
def sum_value(inputs, axis=None, keepdims=False):
|
||||
"""
|
||||
Compute the sum of elements across dimensions of a tensor.
|
||||
|
@ -38,8 +38,8 @@ def sum_value(inputs, axis=None, keepdims=False):
|
|||
vc_util.check_shape(inputs.shape)
|
||||
|
||||
if not axis:
|
||||
output = akg.topi.identity(inputs)
|
||||
output = _akg.topi.identity(inputs)
|
||||
else:
|
||||
output = akg.topi.sum(inputs, axis=axis, keepdims=keepdims)
|
||||
output = _akg.topi.sum(inputs, axis=axis, keepdims=keepdims)
|
||||
|
||||
return output
|
|
@ -15,9 +15,9 @@
|
|||
"""save gpu param"""
|
||||
import os
|
||||
import hashlib
|
||||
import akg.tvm
|
||||
from akg.tvm import schedule
|
||||
from akg.utils import validation_check as vc_util
|
||||
import _akg.tvm
|
||||
from _akg.tvm import schedule
|
||||
from _akg.utils import validation_check as vc_util
|
||||
|
||||
|
||||
def get_dim(dim, axis=True):
|
||||
|
@ -66,7 +66,7 @@ def save_gpu_params(s, args, kernel_info):
|
|||
ptx_code = kernel_info[0]
|
||||
file_name = kernel_info[1]
|
||||
kernel_name = kernel_info[2]
|
||||
ir = str(akg.tvm.lower(s, args, simple_mode=True))
|
||||
ir = str(_akg.tvm.lower(s, args, simple_mode=True))
|
||||
file_path = os.path.realpath(file_name)
|
||||
if os.path.exists(file_path):
|
||||
os.remove(file_path)
|
|
@ -13,8 +13,8 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""dsl create helping function"""
|
||||
import akg
|
||||
from akg.utils import format_transform as ft_util
|
||||
import _akg
|
||||
from _akg.utils import format_transform as ft_util
|
||||
|
||||
class TensorUtils:
|
||||
"""Class for creating tensor."""
|
||||
|
@ -33,11 +33,11 @@ class TensorUtils:
|
|||
"""update tensor attrs."""
|
||||
tensor_attrs = cls.get_tensor_attrs(tensor)
|
||||
tensor_attrs.update(attrs)
|
||||
tensor = akg.tvm.compute(tensor.shape,
|
||||
lambda *indice: tensor[indice],
|
||||
name=tensor.op.name,
|
||||
tag=tensor.op.tag,
|
||||
attrs=tensor_attrs)
|
||||
tensor = _akg.tvm.compute(tensor.shape,
|
||||
lambda *indice: tensor[indice],
|
||||
name=tensor.op.name,
|
||||
tag=tensor.op.tag,
|
||||
attrs=tensor_attrs)
|
||||
return tensor
|
||||
|
||||
@classmethod
|
||||
|
@ -61,7 +61,7 @@ class TensorUtils:
|
|||
raise RuntimeError("Shape of the input_tensor and the output_tensor should be equal, "
|
||||
"but got %s and %s"%(input_tensor_shape, output_tensor_shape))
|
||||
output_tensor = cls.update_tensor_attrs(output_tensor, {cls.CREATE_SCH_ONLY: 1})
|
||||
data_buf = akg.tvm.decl_buffer(input_tensor.shape, input_tensor.dtype, name=buffer_name)
|
||||
data_buf = _akg.tvm.decl_buffer(input_tensor.shape, input_tensor.dtype, name=buffer_name)
|
||||
binds_info = {input_tensor: data_buf, output_tensor: data_buf}
|
||||
return output_tensor, binds_info
|
||||
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""format transform function"""
|
||||
import akg
|
||||
import _akg
|
||||
|
||||
def refine_reduce_axis(input, axis):
|
||||
"""make reduce axis legal."""
|
||||
|
@ -43,11 +43,11 @@ def refine_reduce_axis(input, axis):
|
|||
|
||||
|
||||
def get_shape_from_tensor(data):
|
||||
"""translate akg.tvm.shape to list type in python."""
|
||||
"""translate _akg.tvm.shape to list type in python."""
|
||||
tvm_shape = data.shape
|
||||
py_shape = []
|
||||
for i in tvm_shape:
|
||||
if isinstance(i, akg.tvm.expr.Var):
|
||||
if isinstance(i, _akg.tvm.expr.Var):
|
||||
py_shape.append(i)
|
||||
else:
|
||||
py_shape.append(i.value)
|
||||
|
@ -55,10 +55,10 @@ def get_shape_from_tensor(data):
|
|||
|
||||
|
||||
def tvm_shape_to_list(tvm_shape):
|
||||
"""translate akg.tvm.shape to list type in python."""
|
||||
"""translate _akg.tvm.shape to list type in python."""
|
||||
py_shape = []
|
||||
for i in tvm_shape:
|
||||
if isinstance(i, akg.tvm.expr.Var):
|
||||
if isinstance(i, _akg.tvm.expr.Var):
|
||||
py_shape.append(i)
|
||||
else:
|
||||
py_shape.append(i.value)
|
||||
|
@ -67,9 +67,9 @@ def tvm_shape_to_list(tvm_shape):
|
|||
|
||||
def get_shape(data):
|
||||
"""get shape and save it as list."""
|
||||
if isinstance(data, akg.tvm.tensor.Tensor):
|
||||
if isinstance(data, _akg.tvm.tensor.Tensor):
|
||||
shape = get_shape_from_tensor(data)
|
||||
elif isinstance(data, akg.tvm.container.Array):
|
||||
elif isinstance(data, _akg.tvm.container.Array):
|
||||
shape = tvm_shape_to_list(data)
|
||||
elif isinstance(data, int):
|
||||
shape = [data]
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
"""validation check functions"""
|
||||
from functools import wraps, reduce
|
||||
from akg.utils.format_transform import get_shape
|
||||
from _akg.utils.format_transform import get_shape
|
||||
|
||||
MAX_DATA_SIZE = 2 ** 31
|
||||
|
|
@ -32,7 +32,7 @@ def _compiletask(platform, *jsons):
|
|||
|
||||
"""
|
||||
if platform == "AKG":
|
||||
p = __import__("akg", globals(), locals(), ['ms'], 0)
|
||||
p = __import__("_akg", globals(), locals(), ['ms'], 0)
|
||||
func = getattr(p.ms, "compilewithjson")
|
||||
for json_item in jsons:
|
||||
res = func(json_item)
|
||||
|
|
|
@ -37,7 +37,7 @@ constexpr auto kProcessorCuda = "cuda";
|
|||
constexpr auto kJsonSuffix = ".json";
|
||||
constexpr auto kInfoSuffix = ".info";
|
||||
constexpr unsigned int AUTODIFF_COMPILE_OVERTIME = 600;
|
||||
constexpr auto kAkgModule = "akg";
|
||||
constexpr auto kAkgModule = "_akg";
|
||||
constexpr auto kArgDataformat = "data_format";
|
||||
|
||||
const std::vector<std::string> support_devices = {"aicore", "aicpu", "cuda"};
|
||||
|
|
|
@ -77,11 +77,11 @@ cp -rf "${BUILD_PATH}/../mindspore/ops" "${PACKAGE_PATH}/mindspore"
|
|||
cp -rf "${BUILD_PATH}/../mindspore/communication" "${PACKAGE_PATH}/mindspore"
|
||||
|
||||
if [[ "X$2" = "Xgpu" ]]; then
|
||||
echo "package akg when gpu enable."
|
||||
cp -rf "${BASEPATH}/mindspore/akg" "${PACKAGE_PATH}"
|
||||
echo "package _akg when gpu enable."
|
||||
cp -rf "${BASEPATH}/mindspore/_akg" "${PACKAGE_PATH}"
|
||||
if [[ -d "${BUILD_PATH}/mindspore/incubator-tvm" ]]; then
|
||||
cp -rf "${BUILD_PATH}/mindspore/incubator-tvm/topi/python/topi" "${PACKAGE_PATH}/akg"
|
||||
cp -rf "${BUILD_PATH}/mindspore/incubator-tvm/python/tvm" "${PACKAGE_PATH}/akg"
|
||||
cp -rf "${BUILD_PATH}/mindspore/incubator-tvm/topi/python/topi" "${PACKAGE_PATH}/_akg"
|
||||
cp -rf "${BUILD_PATH}/mindspore/incubator-tvm/python/tvm" "${PACKAGE_PATH}/_akg"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
|
2
setup.py
2
setup.py
|
@ -137,7 +137,7 @@ class BuildPy(build_py):
|
|||
super().run()
|
||||
mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', 'mindspore')
|
||||
update_permissions(mindspore_dir)
|
||||
mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', 'akg')
|
||||
mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', '_akg')
|
||||
update_permissions(mindspore_dir)
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue