forked from mindspore-Ecosystem/mindspore
delete export macro in plugin
Signed-off-by: zhoufeng <zhoufeng54@huawei.com>
This commit is contained in:
parent
19735e8afd
commit
143ccea93f
|
@ -33,7 +33,7 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace device {
|
||||
class BACKEND_EXPORT AscendDataQueueDynamic : public DataQueue {
|
||||
class AscendDataQueueDynamic : public DataQueue {
|
||||
public:
|
||||
explicit AscendDataQueueDynamic(const std::string &channel_name, const size_t capacity);
|
||||
~AscendDataQueueDynamic() override = default;
|
||||
|
|
|
@ -39,7 +39,7 @@ ORIGIN_METHOD(GetDeviceId, int);
|
|||
ORIGIN_METHOD(CreateCommForGroup, bool, const std::string &, const std::vector<unsigned int> &);
|
||||
ORIGIN_METHOD(DestroyHcclComm, void);
|
||||
|
||||
class BACKEND_EXPORT HcclCollectiveGroup {
|
||||
class HcclCollectiveGroup {
|
||||
public:
|
||||
HcclCollectiveGroup(HcclCollectiveGroup const &) = delete;
|
||||
HcclCollectiveGroup &operator=(const HcclCollectiveGroup &) = delete;
|
||||
|
|
|
@ -108,8 +108,8 @@ class MemoryProfiling {
|
|||
return instance;
|
||||
}
|
||||
|
||||
BACKEND_EXPORT std::shared_ptr<GraphMemory> AddGraphMemoryNode(uint32_t graph_id);
|
||||
BACKEND_EXPORT std::shared_ptr<GraphMemory> GetGraphMemoryNode(uint32_t graph_id) const;
|
||||
std::shared_ptr<GraphMemory> AddGraphMemoryNode(uint32_t graph_id);
|
||||
std::shared_ptr<GraphMemory> GetGraphMemoryNode(uint32_t graph_id) const;
|
||||
void SetDeviceMemSize(uint64_t size) { device_mem_size_ = size; }
|
||||
bool MemoryToPB();
|
||||
void SaveMemoryProfiling();
|
||||
|
|
|
@ -31,10 +31,10 @@ namespace profiler {
|
|||
namespace ascend {
|
||||
class ParallelStrategy {
|
||||
public:
|
||||
BACKEND_EXPORT static std::shared_ptr<ParallelStrategy> &GetInstance();
|
||||
static std::shared_ptr<ParallelStrategy> &GetInstance();
|
||||
ParallelStrategy() = default;
|
||||
~ParallelStrategy() {}
|
||||
BACKEND_EXPORT void DumpProfileParallelStrategy(const FuncGraphPtr &func_graph);
|
||||
void DumpProfileParallelStrategy(const FuncGraphPtr &func_graph);
|
||||
void SaveParallelStrategyToFile();
|
||||
std::string GetParallelStrategyForReport();
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace opt {
|
||||
class BACKEND_EXPORT InsertPlaceholderForDynamicRNN : public PatternProcessPass {
|
||||
class InsertPlaceholderForDynamicRNN : public PatternProcessPass {
|
||||
public:
|
||||
explicit InsertPlaceholderForDynamicRNN(bool multigraph = true)
|
||||
: PatternProcessPass("add_placeholder_for_dynamic_rnn", multigraph) {}
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
// This pass will split `ClipByNorm` op to smaller ops, such as `square`, `sqrt`, `reducesum` to achieve same function
|
||||
namespace mindspore {
|
||||
namespace opt {
|
||||
class BACKEND_EXPORT ClipByNormFissionGe : public PatternProcessPass {
|
||||
class ClipByNormFissionGe : public PatternProcessPass {
|
||||
public:
|
||||
explicit ClipByNormFissionGe(bool multigraph = true) : PatternProcessPass("clip_by_norm_fission_ge", multigraph) {}
|
||||
~ClipByNormFissionGe() override = default;
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
// This pass will split `ClipByNorm` op to smaller ops, such as `square`, `sqrt`, `reducesum` to achieve same function
|
||||
namespace mindspore::opt {
|
||||
class BACKEND_EXPORT AscendClipByNormFission : public PatternProcessPass {
|
||||
class AscendClipByNormFission : public PatternProcessPass {
|
||||
public:
|
||||
explicit AscendClipByNormFission(bool multigraph = true)
|
||||
: PatternProcessPass("ascend_clip_by_norm_fission", multigraph) {}
|
||||
|
|
Loading…
Reference in New Issue