forked from mindspore-Ecosystem/mindspore
Clean up the view code
This commit is contained in:
parent
295e55aec7
commit
f2e34b2eaf
|
@ -121,8 +121,6 @@ Status MapOp::GenerateWorkerJob(const std::unique_ptr<MapWorkerJob> *worker_job)
|
||||||
for (size_t i = 0; i < tfuncs_.size(); i++) {
|
for (size_t i = 0; i < tfuncs_.size(); i++) {
|
||||||
// Currently we only have CPU as the device target
|
// Currently we only have CPU as the device target
|
||||||
// In the future, we will have heuristic or control from user to select target device
|
// In the future, we will have heuristic or control from user to select target device
|
||||||
// MapTargetDevice target_device;
|
|
||||||
// RETURN_IF_NOT_OK(SelectTarget(tfuncs_[i], &target_device));
|
|
||||||
MapTargetDevice target_device = MapTargetDevice::kCpu;
|
MapTargetDevice target_device = MapTargetDevice::kCpu;
|
||||||
|
|
||||||
switch (target_device) {
|
switch (target_device) {
|
||||||
|
@ -222,7 +220,6 @@ Status MapOp::operator()() {
|
||||||
RETURN_IF_NOT_OK(child_[0]->GetNextBuffer(&buff, 0));
|
RETURN_IF_NOT_OK(child_[0]->GetNextBuffer(&buff, 0));
|
||||||
}
|
}
|
||||||
// End() is commented out because it might never be called due to the lack of EOF when EpochCtrl is -1
|
// End() is commented out because it might never be called due to the lack of EOF when EpochCtrl is -1
|
||||||
// RETURN_IF_NOT_OK(callback_manager_.End(CallbackParam(op_current_epochs_, ep_step, total_step)));
|
|
||||||
// Handle eof logic, this code might never be reached if epoch_ctrl = -1.
|
// Handle eof logic, this code might never be reached if epoch_ctrl = -1.
|
||||||
std::unique_ptr<MapWorkerJob> worker_job = std::make_unique<MapWorkerJob>(std::move(buff));
|
std::unique_ptr<MapWorkerJob> worker_job = std::make_unique<MapWorkerJob>(std::move(buff));
|
||||||
RETURN_IF_NOT_OK(local_queues_[num_buf++ % num_workers_]->Add(std::move(worker_job)));
|
RETURN_IF_NOT_OK(local_queues_[num_buf++ % num_workers_]->Add(std::move(worker_job)));
|
||||||
|
|
|
@ -155,8 +155,6 @@ Status Sampler::AddChild(std::shared_ptr<Sampler> child) {
|
||||||
|
|
||||||
child_.push_back(child);
|
child_.push_back(child);
|
||||||
|
|
||||||
// doesn't work, protected?
|
|
||||||
// child->AddParent(this);
|
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,6 @@ Status GraphDataServer::Init() {
|
||||||
#else
|
#else
|
||||||
set_state(kGdsInitializing);
|
set_state(kGdsInitializing);
|
||||||
RETURN_IF_NOT_OK(async_server_->Run());
|
RETURN_IF_NOT_OK(async_server_->Run());
|
||||||
// RETURN_IF_NOT_OK(InitGraphDataImpl());
|
|
||||||
RETURN_IF_NOT_OK(tg_->CreateAsyncTask("init graph data impl", std::bind(&GraphDataServer::InitGraphDataImpl, this)));
|
RETURN_IF_NOT_OK(tg_->CreateAsyncTask("init graph data impl", std::bind(&GraphDataServer::InitGraphDataImpl, this)));
|
||||||
for (int32_t i = 0; i < num_workers_; ++i) {
|
for (int32_t i = 0; i < num_workers_; ++i) {
|
||||||
RETURN_IF_NOT_OK(
|
RETURN_IF_NOT_OK(
|
||||||
|
|
|
@ -101,7 +101,6 @@ grpc::Status GraphDataServiceImpl::ClientUnRegister(grpc::ServerContext *context
|
||||||
|
|
||||||
grpc::Status GraphDataServiceImpl::GetGraphData(grpc::ServerContext *context, const GnnGraphDataRequestPb *request,
|
grpc::Status GraphDataServiceImpl::GetGraphData(grpc::ServerContext *context, const GnnGraphDataRequestPb *request,
|
||||||
GnnGraphDataResponsePb *response) {
|
GnnGraphDataResponsePb *response) {
|
||||||
// MS_LOG(INFO) << "#### receive GetGraphData:" << request->op_name();
|
|
||||||
Status s;
|
Status s;
|
||||||
auto iter = g_get_graph_data_func_.find(request->op_name());
|
auto iter = g_get_graph_data_func_.find(request->op_name());
|
||||||
if (iter != g_get_graph_data_func_.end()) {
|
if (iter != g_get_graph_data_func_.end()) {
|
||||||
|
@ -115,7 +114,6 @@ grpc::Status GraphDataServiceImpl::GetGraphData(grpc::ServerContext *context, co
|
||||||
} else {
|
} else {
|
||||||
response->set_error_msg("Invalid op name.");
|
response->set_error_msg("Invalid op name.");
|
||||||
}
|
}
|
||||||
// MS_LOG(INFO) << "#### end receive GetGraphData:" << request->op_name();
|
|
||||||
return ::grpc::Status::OK;
|
return ::grpc::Status::OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,6 +40,7 @@ LiteMat::LiteMat(int width, LDataType data_type) {
|
||||||
dims_ = 0;
|
dims_ = 0;
|
||||||
data_type_ = LDataType::UINT8;
|
data_type_ = LDataType::UINT8;
|
||||||
ref_count_ = 0;
|
ref_count_ = 0;
|
||||||
|
size_ = 0;
|
||||||
Init(width, data_type);
|
Init(width, data_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,6 +54,7 @@ LiteMat::LiteMat(int width, int height, LDataType data_type) {
|
||||||
dims_ = 0;
|
dims_ = 0;
|
||||||
data_type_ = LDataType::UINT8;
|
data_type_ = LDataType::UINT8;
|
||||||
ref_count_ = 0;
|
ref_count_ = 0;
|
||||||
|
size_ = 0;
|
||||||
Init(width, height, data_type);
|
Init(width, height, data_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,6 +68,7 @@ LiteMat::LiteMat(int width, int height, int channel, LDataType data_type) {
|
||||||
dims_ = 0;
|
dims_ = 0;
|
||||||
data_type_ = LDataType::UINT8;
|
data_type_ = LDataType::UINT8;
|
||||||
ref_count_ = 0;
|
ref_count_ = 0;
|
||||||
|
size_ = 0;
|
||||||
Init(width, height, channel, data_type);
|
Init(width, height, channel, data_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,6 +90,7 @@ LiteMat::LiteMat(const LiteMat &m) {
|
||||||
dims_ = m.dims_;
|
dims_ = m.dims_;
|
||||||
data_type_ = m.data_type_;
|
data_type_ = m.data_type_;
|
||||||
ref_count_ = m.ref_count_;
|
ref_count_ = m.ref_count_;
|
||||||
|
size_ = 0;
|
||||||
if (ref_count_) {
|
if (ref_count_) {
|
||||||
addRef(ref_count_, 1);
|
addRef(ref_count_, 1);
|
||||||
}
|
}
|
||||||
|
@ -111,6 +115,7 @@ LiteMat &LiteMat::operator=(const LiteMat &m) {
|
||||||
dims_ = m.dims_;
|
dims_ = m.dims_;
|
||||||
data_type_ = m.data_type_;
|
data_type_ = m.data_type_;
|
||||||
ref_count_ = m.ref_count_;
|
ref_count_ = m.ref_count_;
|
||||||
|
size_ = 0;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -180,6 +185,7 @@ void LiteMat::Release() {
|
||||||
channel_ = 0;
|
channel_ = 0;
|
||||||
c_step_ = 0;
|
c_step_ = 0;
|
||||||
ref_count_ = 0;
|
ref_count_ = 0;
|
||||||
|
size_ = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *LiteMat::AlignMalloc(unsigned int size) {
|
void *LiteMat::AlignMalloc(unsigned int size) {
|
||||||
|
|
|
@ -48,7 +48,6 @@ enum ColumnDataType {
|
||||||
ColumnNoDataType = 6
|
ColumnNoDataType = 6
|
||||||
};
|
};
|
||||||
|
|
||||||
// mapping as {"bytes", "string", "int32", "int64", "float32", "float64"};
|
|
||||||
const uint32_t ColumnDataTypeSize[kDataTypes] = {1, 1, 4, 8, 4, 8};
|
const uint32_t ColumnDataTypeSize[kDataTypes] = {1, 1, 4, 8, 4, 8};
|
||||||
|
|
||||||
const std::vector<std::string> ColumnDataTypeNameNormalized = {"uint8", "string", "int32",
|
const std::vector<std::string> ColumnDataTypeNameNormalized = {"uint8", "string", "int32",
|
||||||
|
|
|
@ -199,9 +199,6 @@ class ShardReader {
|
||||||
/// \return null
|
/// \return null
|
||||||
void SetAllInIndex(bool all_in_index) { all_in_index_ = all_in_index; }
|
void SetAllInIndex(bool all_in_index) { all_in_index_ = all_in_index; }
|
||||||
|
|
||||||
/// \brief get NLP flag
|
|
||||||
bool GetNlpFlag();
|
|
||||||
|
|
||||||
/// \brief get all classes
|
/// \brief get all classes
|
||||||
MSRStatus GetAllClasses(const std::string &category_field, std::set<std::string> &categories);
|
MSRStatus GetAllClasses(const std::string &category_field, std::set<std::string> &categories);
|
||||||
|
|
||||||
|
@ -264,9 +261,6 @@ class ShardReader {
|
||||||
MSRStatus CreateTasks(const std::vector<std::tuple<int, int, int, uint64_t>> &row_group_summary,
|
MSRStatus CreateTasks(const std::vector<std::tuple<int, int, int, uint64_t>> &row_group_summary,
|
||||||
const std::vector<std::shared_ptr<ShardOperator>> &operators);
|
const std::vector<std::shared_ptr<ShardOperator>> &operators);
|
||||||
|
|
||||||
/// \brief set NLP flag
|
|
||||||
void CheckNlp();
|
|
||||||
|
|
||||||
/// \brief check if all specified columns are in index table
|
/// \brief check if all specified columns are in index table
|
||||||
void CheckIfColumnInIndex(const std::vector<std::string> &columns);
|
void CheckIfColumnInIndex(const std::vector<std::string> &columns);
|
||||||
|
|
||||||
|
|
|
@ -257,7 +257,9 @@ std::pair<MSRStatus, sqlite3 *> ShardIndexGenerator::CreateDatabase(int shard_no
|
||||||
sql += ",INC_" + std::to_string(field_no++) + " INT, " + ret.second + " " + type;
|
sql += ",INC_" + std::to_string(field_no++) + " INT, " + ret.second + " " + type;
|
||||||
}
|
}
|
||||||
sql += ", PRIMARY KEY(ROW_ID";
|
sql += ", PRIMARY KEY(ROW_ID";
|
||||||
for (uint64_t i = 0; i < fields_.size(); ++i) sql += ",INC_" + std::to_string(i);
|
for (uint64_t i = 0; i < fields_.size(); ++i) {
|
||||||
|
sql += ",INC_" + std::to_string(i);
|
||||||
|
}
|
||||||
sql += "));";
|
sql += "));";
|
||||||
if (ExecuteSQL(sql, db, "create table successfully.") != SUCCESS) {
|
if (ExecuteSQL(sql, db, "create table successfully.") != SUCCESS) {
|
||||||
return {FAILED, nullptr};
|
return {FAILED, nullptr};
|
||||||
|
|
|
@ -35,17 +35,16 @@ Type StringToNum(const std::string &str) {
|
||||||
return num;
|
return num;
|
||||||
}
|
}
|
||||||
|
|
||||||
ShardReader::ShardReader() {
|
ShardReader::ShardReader()
|
||||||
task_id_ = 0;
|
: header_size_(0),
|
||||||
deliver_id_ = 0;
|
page_size_(0),
|
||||||
shard_count_ = 0;
|
shard_count_(0),
|
||||||
n_consumer_ = 0;
|
n_consumer_(0),
|
||||||
page_size_ = 0;
|
num_padded_(0),
|
||||||
header_size_ = 0;
|
num_rows_(0),
|
||||||
num_rows_ = 0;
|
total_blob_size_(0),
|
||||||
total_blob_size_ = 0;
|
task_id_(0),
|
||||||
num_padded_ = 0;
|
deliver_id_(0) {}
|
||||||
}
|
|
||||||
|
|
||||||
std::pair<MSRStatus, std::vector<std::string>> ShardReader::GetMeta(const std::string &file_path, json &meta_data) {
|
std::pair<MSRStatus, std::vector<std::string>> ShardReader::GetMeta(const std::string &file_path, json &meta_data) {
|
||||||
if (!IsLegalFile(file_path)) {
|
if (!IsLegalFile(file_path)) {
|
||||||
|
@ -880,13 +879,7 @@ MSRStatus ShardReader::Open(const std::vector<std::string> &file_paths, bool loa
|
||||||
if (n_consumer < kMinConsumerCount) {
|
if (n_consumer < kMinConsumerCount) {
|
||||||
n_consumer = kMinConsumerCount;
|
n_consumer = kMinConsumerCount;
|
||||||
}
|
}
|
||||||
vector<std::string> blob_fields = GetBlobFields().second;
|
|
||||||
for (unsigned int i = 0; i < selected_columns.size(); ++i) {
|
|
||||||
if (!std::any_of(blob_fields.begin(), blob_fields.end(),
|
|
||||||
[&selected_columns, i](std::string item) { return selected_columns[i] == item; })) {
|
|
||||||
selected_columns_.push_back(selected_columns[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
selected_columns_ = selected_columns;
|
selected_columns_ = selected_columns;
|
||||||
|
|
||||||
if (CheckColumnList(selected_columns_) == FAILED) {
|
if (CheckColumnList(selected_columns_) == FAILED) {
|
||||||
|
|
|
@ -16,7 +16,6 @@
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import copy
|
import copy
|
||||||
# from mindspore._c_dataengine import CacheClient
|
|
||||||
|
|
||||||
from ..core.validator_helpers import type_check, check_uint32, check_uint64
|
from ..core.validator_helpers import type_check, check_uint32, check_uint64
|
||||||
|
|
||||||
|
@ -39,7 +38,6 @@ class DatasetCache:
|
||||||
self.port = port
|
self.port = port
|
||||||
self.prefetch_size = prefetch_size
|
self.prefetch_size = prefetch_size
|
||||||
# temporary disable cache feature in the current release
|
# temporary disable cache feature in the current release
|
||||||
# self.cache_client = CacheClient(session_id, size, spilling, hostname, port, prefetch_size)
|
|
||||||
self.cache_client = None
|
self.cache_client = None
|
||||||
|
|
||||||
def GetStat(self):
|
def GetStat(self):
|
||||||
|
|
|
@ -89,7 +89,6 @@ class GraphData:
|
||||||
while self._graph_data.is_stoped() is not True:
|
while self._graph_data.is_stoped() is not True:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
# self._graph_data.stop()
|
|
||||||
raise Exception("Graph data server receives KeyboardInterrupt")
|
raise Exception("Graph data server receives KeyboardInterrupt")
|
||||||
|
|
||||||
@check_gnn_get_all_nodes
|
@check_gnn_get_all_nodes
|
||||||
|
|
|
@ -1235,5 +1235,4 @@ def check_cache_option(cache):
|
||||||
"""Sanity check for cache parameter"""
|
"""Sanity check for cache parameter"""
|
||||||
if cache is not None:
|
if cache is not None:
|
||||||
# temporary disable cache feature in the current release
|
# temporary disable cache feature in the current release
|
||||||
# type_check(cache, (cache_client.DatasetCache,), "cache")
|
|
||||||
raise ValueError("Caching is disabled in the current release")
|
raise ValueError("Caching is disabled in the current release")
|
||||||
|
|
|
@ -12,9 +12,9 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
import mindspore.dataset as ds
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
|
import mindspore.dataset as ds
|
||||||
|
|
||||||
DATA_FILE = '../data/dataset/testCSV/1.csv'
|
DATA_FILE = '../data/dataset/testCSV/1.csv'
|
||||||
|
|
||||||
|
|
|
@ -16,9 +16,9 @@
|
||||||
Testing Epoch Control op in DE
|
Testing Epoch Control op in DE
|
||||||
"""
|
"""
|
||||||
import itertools
|
import itertools
|
||||||
import cv2
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
|
import cv2
|
||||||
|
|
||||||
import mindspore.dataset as ds
|
import mindspore.dataset as ds
|
||||||
import mindspore.dataset.vision.c_transforms as vision
|
import mindspore.dataset.vision.c_transforms as vision
|
||||||
|
|
|
@ -17,11 +17,11 @@ This is the test module for saveOp.
|
||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
from string import punctuation
|
from string import punctuation
|
||||||
|
import numpy as np
|
||||||
|
import pytest
|
||||||
import mindspore.dataset as ds
|
import mindspore.dataset as ds
|
||||||
from mindspore import log as logger
|
from mindspore import log as logger
|
||||||
from mindspore.mindrecord import FileWriter
|
from mindspore.mindrecord import FileWriter
|
||||||
import numpy as np
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
CV_FILE_NAME1 = "../data/mindrecord/testMindDataSet/temp.mindrecord"
|
CV_FILE_NAME1 = "../data/mindrecord/testMindDataSet/temp.mindrecord"
|
||||||
CV_FILE_NAME2 = "../data/mindrecord/testMindDataSet/auto.mindrecord"
|
CV_FILE_NAME2 = "../data/mindrecord/testMindDataSet/auto.mindrecord"
|
||||||
|
|
|
@ -12,8 +12,8 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
import mindspore._c_dataengine as cde
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import mindspore._c_dataengine as cde
|
||||||
|
|
||||||
import mindspore.common.dtype as mstype
|
import mindspore.common.dtype as mstype
|
||||||
import mindspore.dataset as ds
|
import mindspore.dataset as ds
|
||||||
|
|
|
@ -18,9 +18,9 @@ import json
|
||||||
import os
|
import os
|
||||||
import itertools
|
import itertools
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
import numpy as np
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import matplotlib.patches as patches
|
import matplotlib.patches as patches
|
||||||
import numpy as np
|
|
||||||
# import jsbeautifier
|
# import jsbeautifier
|
||||||
import mindspore.dataset as ds
|
import mindspore.dataset as ds
|
||||||
from mindspore import log as logger
|
from mindspore import log as logger
|
||||||
|
|
Loading…
Reference in New Issue