fix waring

This commit is contained in:
shenwei41 2021-08-09 10:07:04 +08:00
parent 4d71b3bd76
commit 59efbb88ec
40 changed files with 239 additions and 140 deletions

View File

@ -61,27 +61,27 @@ Status PyDSCallback::ExecutePyfunc(py::function f, const CallbackParam &cb_param
} }
return Status::OK(); return Status::OK();
} }
void PyDSCallback::setBegin(py::function f) { void PyDSCallback::setBegin(const py::function &f) {
begin_func_ = f; begin_func_ = f;
begin_needed_ = true; begin_needed_ = true;
} }
void PyDSCallback::setEnd(py::function f) { void PyDSCallback::setEnd(const py::function &f) {
end_func_ = f; end_func_ = f;
end_needed_ = true; end_needed_ = true;
} }
void PyDSCallback::setEpochBegin(py::function f) { void PyDSCallback::setEpochBegin(const py::function &f) {
epoch_begin_func_ = f; epoch_begin_func_ = f;
epoch_begin_needed_ = true; epoch_begin_needed_ = true;
} }
void PyDSCallback::setEpochEnd(py::function f) { void PyDSCallback::setEpochEnd(const py::function &f) {
epoch_end_func_ = f; epoch_end_func_ = f;
epoch_end_needed_ = true; epoch_end_needed_ = true;
} }
void PyDSCallback::setStepBegin(py::function f) { void PyDSCallback::setStepBegin(const py::function &f) {
step_begin_func_ = f; step_begin_func_ = f;
step_begin_needed_ = true; step_begin_needed_ = true;
} }
void PyDSCallback::setStepEnd(py::function f) { void PyDSCallback::setStepEnd(const py::function &f) {
step_end_func_ = f; step_end_func_ = f;
step_end_needed_ = true; step_end_needed_ = true;
} }

View File

@ -44,12 +44,12 @@ class PyDSCallback : public DSCallback {
~PyDSCallback() = default; ~PyDSCallback() = default;
void setBegin(py::function f); void setBegin(const py::function &f);
void setEnd(py::function f); void setEnd(const py::function &f);
void setEpochBegin(py::function f); void setEpochBegin(const py::function &f);
void setEpochEnd(py::function f); void setEpochEnd(const py::function &f);
void setStepBegin(py::function f); void setStepBegin(const py::function &f);
void setStepEnd(py::function f); void setStepEnd(const py::function &f);
/// \brief actual callback function for begin, needs to be overridden in the derived class /// \brief actual callback function for begin, needs to be overridden in the derived class
/// \param cb_param, callback parameter passed in from DatasetOp when calling the callback /// \param cb_param, callback parameter passed in from DatasetOp when calling the callback

View File

@ -508,7 +508,9 @@ Status Tensor::GetItemPtr(uchar **ptr, const std::vector<dsize_t> &index, offset
RETURN_IF_NOT_OK(shape_.ToFlatIndex(index, &flat_idx)); RETURN_IF_NOT_OK(shape_.ToFlatIndex(index, &flat_idx));
offset_t length_temp = 0; offset_t length_temp = 0;
RETURN_IF_NOT_OK(GetStringAt(flat_idx, ptr, &length_temp)); RETURN_IF_NOT_OK(GetStringAt(flat_idx, ptr, &length_temp));
if (length != nullptr) *length = length_temp; if (length != nullptr) {
*length = length_temp;
}
return Status::OK(); return Status::OK();
} else { } else {
std::string err = "data type not compatible"; std::string err = "data type not compatible";

View File

@ -68,7 +68,7 @@ class Tensor {
Tensor(const Tensor &other) = delete; Tensor(const Tensor &other) = delete;
Tensor &operator=(const Tensor &other) = delete; Tensor &operator=(const Tensor &other) = delete;
/// Create a tensor using shape and type. This constructor should not be used directly, use CreateFromTensor instead /// Create a tensor using shape and type. This constructor should not be used directly, use CreateFromTensor instead.
/// \note The shape and type information should be known and valid /// \note The shape and type information should be known and valid
/// \note The constructor does not allocate data /// \note The constructor does not allocate data
/// \param shape TensorShape /// \param shape TensorShape

View File

@ -73,7 +73,7 @@ Status CacheClientGreeter::DoServiceStop() {
void *tag; void *tag;
while (cq_.Next(&tag, &success)) { while (cq_.Next(&tag, &success)) {
auto r = reinterpret_cast<CacheClientRequestTag *>(tag); auto r = reinterpret_cast<CacheClientRequestTag *>(tag);
req_.erase(r->seqNo_); (void)req_.erase(r->seqNo_);
} }
} }
return Status::OK(); return Status::OK();

View File

@ -408,7 +408,7 @@ void DataSchema::Print(std::ostream &out) const {
// Adds a column descriptor to the schema // Adds a column descriptor to the schema
Status DataSchema::AddColumn(const ColDescriptor &cd) { Status DataSchema::AddColumn(const ColDescriptor &cd) {
// Sanity check there's not a duplicate name before adding the column // Sanity check there's not a duplicate name before adding the column
for (int32_t i = 0; i < col_descs_.size(); ++i) { for (auto i = 0; i < col_descs_.size(); ++i) {
if (col_descs_[i].name() == cd.name()) { if (col_descs_[i].name() == cd.name()) {
std::ostringstream ss; std::ostringstream ss;
ss << "column name '" << cd.name() << "' already exists in schema."; ss << "column name '" << cd.name() << "' already exists in schema.";

View File

@ -118,7 +118,7 @@ bool AlbumOp::CheckImageType(const std::string &file_name, bool *valid) {
return true; return true;
} }
Status AlbumOp::LoadImageTensor(const std::string &image_file_path, uint32_t col_num, TensorRow *row) { Status AlbumOp::LoadImageTensor(const std::string &image_file_path, int32_t col_num, TensorRow *row) {
TensorPtr image; TensorPtr image;
std::ifstream fs; std::ifstream fs;
fs.open(image_file_path, std::ios::binary | std::ios::in); fs.open(image_file_path, std::ios::binary | std::ios::in);
@ -168,7 +168,7 @@ Status AlbumOp::LoadImageTensor(const std::string &image_file_path, uint32_t col
return Status::OK(); return Status::OK();
} }
Status AlbumOp::LoadStringArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row) { Status AlbumOp::LoadStringArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row) {
std::vector<std::string> data = json_obj; std::vector<std::string> data = json_obj;
MS_LOG(INFO) << "String array label found: " << data << "."; MS_LOG(INFO) << "String array label found: " << data << ".";
@ -178,7 +178,7 @@ Status AlbumOp::LoadStringArrayTensor(const nlohmann::json &json_obj, uint32_t c
return Status::OK(); return Status::OK();
} }
Status AlbumOp::LoadStringTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row) { Status AlbumOp::LoadStringTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row) {
std::string data = json_obj; std::string data = json_obj;
// now we iterate over the elements in json // now we iterate over the elements in json
@ -189,7 +189,7 @@ Status AlbumOp::LoadStringTensor(const nlohmann::json &json_obj, uint32_t col_nu
return Status::OK(); return Status::OK();
} }
Status AlbumOp::LoadIntArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row) { Status AlbumOp::LoadIntArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row) {
TensorPtr label; TensorPtr label;
// consider templating this function to handle all ints // consider templating this function to handle all ints
if (data_schema_->column(col_num).type() == DataType::DE_INT64) { if (data_schema_->column(col_num).type() == DataType::DE_INT64) {
@ -218,7 +218,7 @@ Status AlbumOp::LoadIntArrayTensor(const nlohmann::json &json_obj, uint32_t col_
return Status::OK(); return Status::OK();
} }
Status AlbumOp::LoadFloatArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row) { Status AlbumOp::LoadFloatArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row) {
TensorPtr float_array; TensorPtr float_array;
// consider templating this function to handle all ints // consider templating this function to handle all ints
if (data_schema_->column(col_num).type() == DataType::DE_FLOAT64) { if (data_schema_->column(col_num).type() == DataType::DE_FLOAT64) {
@ -247,7 +247,7 @@ Status AlbumOp::LoadFloatArrayTensor(const nlohmann::json &json_obj, uint32_t co
return Status::OK(); return Status::OK();
} }
Status AlbumOp::LoadIDTensor(const std::string &file, uint32_t col_num, TensorRow *row) { Status AlbumOp::LoadIDTensor(const std::string &file, int32_t col_num, TensorRow *row) {
if (data_schema_->column(col_num).type() == DataType::DE_STRING) { if (data_schema_->column(col_num).type() == DataType::DE_STRING) {
TensorPtr id; TensorPtr id;
RETURN_IF_NOT_OK(Tensor::CreateScalar<std::string>(file, &id)); RETURN_IF_NOT_OK(Tensor::CreateScalar<std::string>(file, &id));
@ -263,7 +263,7 @@ Status AlbumOp::LoadIDTensor(const std::string &file, uint32_t col_num, TensorRo
return Status::OK(); return Status::OK();
} }
Status AlbumOp::LoadEmptyTensor(uint32_t col_num, TensorRow *row) { Status AlbumOp::LoadEmptyTensor(int32_t col_num, TensorRow *row) {
// hack to get the file name without extension, the 1 is to get rid of the backslash character // hack to get the file name without extension, the 1 is to get rid of the backslash character
TensorPtr empty_tensor; TensorPtr empty_tensor;
RETURN_IF_NOT_OK(Tensor::CreateEmpty(TensorShape({0}), data_schema_->column(col_num).type(), &empty_tensor)); RETURN_IF_NOT_OK(Tensor::CreateEmpty(TensorShape({0}), data_schema_->column(col_num).type(), &empty_tensor));
@ -275,7 +275,7 @@ Status AlbumOp::LoadEmptyTensor(uint32_t col_num, TensorRow *row) {
// So we actually have to check what type we want to fill the tensor with. // So we actually have to check what type we want to fill the tensor with.
// Float64 doesn't work with reinterpret cast here. Otherwise we limit the float in the schema to // Float64 doesn't work with reinterpret cast here. Otherwise we limit the float in the schema to
// only be float32, seems like a weird limitation to impose // only be float32, seems like a weird limitation to impose
Status AlbumOp::LoadFloatTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row) { Status AlbumOp::LoadFloatTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row) {
TensorPtr float_tensor; TensorPtr float_tensor;
if (data_schema_->column(col_num).type() == DataType::DE_FLOAT64) { if (data_schema_->column(col_num).type() == DataType::DE_FLOAT64) {
double data = json_obj; double data = json_obj;
@ -291,7 +291,7 @@ Status AlbumOp::LoadFloatTensor(const nlohmann::json &json_obj, uint32_t col_num
} }
// Loads a tensor with int value, we have to cast the value to type specified in the schema. // Loads a tensor with int value, we have to cast the value to type specified in the schema.
Status AlbumOp::LoadIntTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row) { Status AlbumOp::LoadIntTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row) {
TensorPtr int_tensor; TensorPtr int_tensor;
if (data_schema_->column(col_num).type() == DataType::DE_INT64) { if (data_schema_->column(col_num).type() == DataType::DE_INT64) {
int64_t data = json_obj; int64_t data = json_obj;

View File

@ -88,62 +88,62 @@ class AlbumOp : public MappableLeafOp {
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in, out] row Tensor row to push to /// \param[in, out] row Tensor row to push to
/// \return Status The status code returned /// \return Status The status code returned
Status LoadImageTensor(const std::string &image_file, uint32_t col_num, TensorRow *row); Status LoadImageTensor(const std::string &image_file, int32_t col_num, TensorRow *row);
/// \brief Load vector of ints to tensor, append tensor to tensor row /// \brief Load vector of ints to tensor, append tensor to tensor row
/// \param[in] json_obj Json object containing multi-dimensional label /// \param[in] json_obj Json object containing multi-dimensional label
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in, out] row Tensor row to push to /// \param[in, out] row Tensor row to push to
/// \return Status The status code returned /// \return Status The status code returned
Status LoadIntArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row); Status LoadIntArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row);
/// \brief Load vector of floatss to tensor, append tensor to tensor row /// \brief Load vector of floatss to tensor, append tensor to tensor row
/// \param[in] json_obj Json object containing array data /// \param[in] json_obj Json object containing array data
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in, out] row Tensor row to push to /// \param[in, out] row Tensor row to push to
/// \return Status The status code returned /// \return Status The status code returned
Status LoadFloatArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row); Status LoadFloatArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row);
/// \brief Load string array into a tensor, append tensor to tensor row /// \brief Load string array into a tensor, append tensor to tensor row
/// \param[in] json_obj Json object containing string tensor /// \param[in] json_obj Json object containing string tensor
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in, out] row Tensor row to push to /// \param[in, out] row Tensor row to push to
/// \return Status The status code returned /// \return Status The status code returned
Status LoadStringArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row); Status LoadStringArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row);
/// \brief Load string into a tensor, append tensor to tensor row /// \brief Load string into a tensor, append tensor to tensor row
/// \param[in] json_obj Json object containing string tensor /// \param[in] json_obj Json object containing string tensor
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in, out] row Tensor row to push to /// \param[in, out] row Tensor row to push to
/// \return Status The status code returned /// \return Status The status code returned
Status LoadStringTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row); Status LoadStringTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row);
/// \brief Load float value to tensor row /// \brief Load float value to tensor row
/// \param[in] json_obj Json object containing float /// \param[in] json_obj Json object containing float
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in, out] row Tensor row to push to /// \param[in, out] row Tensor row to push to
/// \return Status The status code returned /// \return Status The status code returned
Status LoadFloatTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row); Status LoadFloatTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row);
/// \brief Load int value to tensor row /// \brief Load int value to tensor row
/// \param[in] json_obj Json object containing int /// \param[in] json_obj Json object containing int
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in, out] row Tensor row to push to /// \param[in, out] row Tensor row to push to
/// \return Status The status code returned /// \return Status The status code returned
Status LoadIntTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorRow *row); Status LoadIntTensor(const nlohmann::json &json_obj, int32_t col_num, TensorRow *row);
/// \brief Load empty tensor to tensor row /// \brief Load empty tensor to tensor row
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in, out] row Tensor row to push to /// \param[in, out] row Tensor row to push to
/// \return Status The status code returned /// \return Status The status code returned
Status LoadEmptyTensor(uint32_t col_num, TensorRow *row); Status LoadEmptyTensor(int32_t col_num, TensorRow *row);
/// \brief Load id from file name to tensor row /// \brief Load id from file name to tensor row
/// \param[in] file The file name to get ID from /// \param[in] file The file name to get ID from
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in, out] row Tensor row to push to /// \param[in, out] row Tensor row to push to
/// \return Status The status code returned /// \return Status The status code returned
Status LoadIDTensor(const std::string &file, uint32_t col_num, TensorRow *row); Status LoadIDTensor(const std::string &file, int32_t col_num, TensorRow *row);
/// \brief Load a tensor row according to a json file /// \brief Load a tensor row according to a json file
/// \param[in] row_id_type row_id - id for this tensor row /// \param[in] row_id_type row_id - id for this tensor row

View File

@ -368,7 +368,7 @@ Status CifarOp::CountTotalRows(const std::string &dir, const std::string &usage,
Status CifarOp::ComputeColMap() { Status CifarOp::ComputeColMap() {
// set the column name map (base class field) // set the column name map (base class field)
if (column_name_id_map_.empty()) { if (column_name_id_map_.empty()) {
for (uint32_t i = 0; i < data_schema_->NumColumns(); ++i) { for (int32_t i = 0; i < data_schema_->NumColumns(); ++i) {
column_name_id_map_[data_schema_->column(i).name()] = i; column_name_id_map_[data_schema_->column(i).name()] = i;
} }
} else { } else {

View File

@ -223,7 +223,7 @@ Status MindRecordOp::GetRowFromReader(TensorRow *fetched_row, uint64_t row_id, i
Status MindRecordOp::LoadTensorRow(TensorRow *tensor_row, const std::vector<uint8_t> &columns_blob, Status MindRecordOp::LoadTensorRow(TensorRow *tensor_row, const std::vector<uint8_t> &columns_blob,
const mindrecord::json &columns_json, const mindrecord::TaskType task_type) { const mindrecord::json &columns_json, const mindrecord::TaskType task_type) {
for (uint32_t i_col = 0; i_col < columns_to_load_.size(); i_col++) { for (int32_t i_col = 0; i_col < columns_to_load_.size(); i_col++) {
auto column_name = columns_to_load_[i_col]; auto column_name = columns_to_load_[i_col];
// Initialize column parameters // Initialize column parameters

View File

@ -31,10 +31,18 @@ Status DatasetCacheImpl::Build() {
CacheClient::Builder builder; CacheClient::Builder builder;
builder.SetSessionId(session_id_).SetCacheMemSz(cache_mem_sz_).SetSpill(spill_); builder.SetSessionId(session_id_).SetCacheMemSz(cache_mem_sz_).SetSpill(spill_);
if (hostname_) builder.SetHostname(hostname_.value()); if (hostname_) {
if (port_) builder.SetPort(port_.value()); (void)builder.SetHostname(hostname_.value());
if (num_connections_) builder.SetNumConnections(num_connections_.value()); }
if (prefetch_sz_) builder.SetPrefetchSize(prefetch_sz_.value()); if (port_) {
(void)builder.SetPort(port_.value());
}
if (num_connections_) {
(void)builder.SetNumConnections(num_connections_.value());
}
if (prefetch_sz_) {
(void)builder.SetPrefetchSize(prefetch_sz_.value());
}
return builder.Build(&cache_client_); return builder.Build(&cache_client_);
} }

View File

@ -71,13 +71,13 @@ Status EpochCtrlNode::ValidateParams() {
} }
// Visitor accepting method for IRNodePass // Visitor accepting method for IRNodePass
Status EpochCtrlNode::Accept(IRNodePass *p, bool *const modified) { Status EpochCtrlNode::Accept(IRNodePass *const p, bool *const modified) {
// Downcast shared pointer then call visitor // Downcast shared pointer then call visitor
return p->Visit(shared_from_base<EpochCtrlNode>(), modified); return p->Visit(shared_from_base<EpochCtrlNode>(), modified);
} }
// Visitor accepting method for IRNodePass // Visitor accepting method for IRNodePass
Status EpochCtrlNode::AcceptAfter(IRNodePass *p, bool *const modified) { Status EpochCtrlNode::AcceptAfter(IRNodePass *const p, bool *const modified) {
// Downcast shared pointer then call visitor // Downcast shared pointer then call visitor
return p->VisitAfter(shared_from_base<EpochCtrlNode>(), modified); return p->VisitAfter(shared_from_base<EpochCtrlNode>(), modified);
} }

View File

@ -67,13 +67,13 @@ class EpochCtrlNode : public RepeatNode {
/// \param[in] p The node to visit /// \param[in] p The node to visit
/// \param[out] modified Indicator if the node was modified /// \param[out] modified Indicator if the node was modified
/// \return Status of the node visit /// \return Status of the node visit
Status Accept(IRNodePass *p, bool *const modified) override; Status Accept(IRNodePass *const p, bool *const modified) override;
/// \brief Base-class override for accepting IRNodePass visitor /// \brief Base-class override for accepting IRNodePass visitor
/// \param[in] p The node to visit /// \param[in] p The node to visit
/// \param[out] modified Indicator if the node was modified /// \param[out] modified Indicator if the node was modified
/// \return Status of the node visit /// \return Status of the node visit
Status AcceptAfter(IRNodePass *p, bool *const modified) override; Status AcceptAfter(IRNodePass *const p, bool *const modified) override;
}; };
} // namespace dataset } // namespace dataset

View File

@ -83,7 +83,7 @@ Status AlbumNode::Build(std::vector<std::shared_ptr<DatasetOp>> *const node_ops)
} }
// Get the shard id of node // Get the shard id of node
Status AlbumNode::GetShardId(int32_t *shard_id) { Status AlbumNode::GetShardId(int32_t *const shard_id) {
*shard_id = sampler_->ShardId(); *shard_id = sampler_->ShardId();
return Status::OK(); return Status::OK();

View File

@ -59,7 +59,7 @@ class AlbumNode : public MappableSourceNode {
/// \brief Get the shard id of node /// \brief Get the shard id of node
/// \return Status Status::OK() if get shard id successfully /// \return Status Status::OK() if get shard id successfully
Status GetShardId(int32_t *shard_id) override; Status GetShardId(int32_t *const shard_id) override;
/// \brief Base-class override for GetDatasetSize /// \brief Base-class override for GetDatasetSize
/// \param[in] size_getter Shared pointer to DatasetSizeGetter /// \param[in] size_getter Shared pointer to DatasetSizeGetter

View File

@ -70,7 +70,7 @@ Status MnistNode::Build(std::vector<std::shared_ptr<DatasetOp>> *const node_ops)
} }
// Get the shard id of node // Get the shard id of node
Status MnistNode::GetShardId(int32_t *shard_id) { Status MnistNode::GetShardId(int32_t *const shard_id) {
*shard_id = sampler_->ShardId(); *shard_id = sampler_->ShardId();
return Status::OK(); return Status::OK();

View File

@ -58,7 +58,7 @@ class MnistNode : public MappableSourceNode {
/// \brief Get the shard id of node /// \brief Get the shard id of node
/// \return Status Status::OK() if get shard id successfully /// \return Status Status::OK() if get shard id successfully
Status GetShardId(int32_t *shard_id) override; Status GetShardId(int32_t *const shard_id) override;
/// \brief Base-class override for GetDatasetSize /// \brief Base-class override for GetDatasetSize
/// \param[in] size_getter Shared pointer to DatasetSizeGetter /// \param[in] size_getter Shared pointer to DatasetSizeGetter

View File

@ -118,7 +118,7 @@ Status RandomNode::Build(std::vector<std::shared_ptr<DatasetOp>> *const node_ops
} }
// Get the shard id of node // Get the shard id of node
Status RandomNode::GetShardId(int32_t *shard_id) { Status RandomNode::GetShardId(int32_t *const shard_id) {
// RandomDataset doesn't support multiple shards // RandomDataset doesn't support multiple shards
*shard_id = 0; *shard_id = 0;
return Status::OK(); return Status::OK();

View File

@ -80,7 +80,7 @@ class RandomNode : public NonMappableSourceNode {
/// \brief Get the shard id of node /// \brief Get the shard id of node
/// \return Status Status::OK() if get shard id successfully /// \return Status Status::OK() if get shard id successfully
Status GetShardId(int32_t *shard_id) override; Status GetShardId(int32_t *const shard_id) override;
/// \brief Base-class override for GetDatasetSize /// \brief Base-class override for GetDatasetSize
/// \param[in] size_getter Shared pointer to DatasetSizeGetter /// \param[in] size_getter Shared pointer to DatasetSizeGetter

View File

@ -156,7 +156,7 @@ Status TFRecordNode::Build(std::vector<std::shared_ptr<DatasetOp>> *const node_o
} }
// Get the shard id of node // Get the shard id of node
Status TFRecordNode::GetShardId(int32_t *shard_id) { Status TFRecordNode::GetShardId(int32_t *const shard_id) {
*shard_id = shard_id_; *shard_id = shard_id_;
return Status::OK(); return Status::OK();
@ -259,7 +259,7 @@ Status TFRecordNode::Accept(IRNodePass *p, bool *const modified) {
} }
// Visitor accepting method for IRNodePass // Visitor accepting method for IRNodePass
Status TFRecordNode::AcceptAfter(IRNodePass *p, bool *const modified) { Status TFRecordNode::AcceptAfter(IRNodePass *const p, bool *const modified) {
// Downcast shared pointer then call visitor // Downcast shared pointer then call visitor
return p->VisitAfter(shared_from_base<TFRecordNode>(), modified); return p->VisitAfter(shared_from_base<TFRecordNode>(), modified);
} }

View File

@ -95,7 +95,7 @@ class TFRecordNode : public NonMappableSourceNode {
/// \brief Get the shard id of node /// \brief Get the shard id of node
/// \return Status Status::OK() if get shard id successfully /// \return Status Status::OK() if get shard id successfully
Status GetShardId(int32_t *shard_id) override; Status GetShardId(int32_t *const shard_id) override;
/// \brief Base-class override for GetDatasetSize /// \brief Base-class override for GetDatasetSize
/// \param[in] size_getter Shared pointer to DatasetSizeGetter /// \param[in] size_getter Shared pointer to DatasetSizeGetter
@ -152,7 +152,7 @@ class TFRecordNode : public NonMappableSourceNode {
/// \param[in] p The node to visit /// \param[in] p The node to visit
/// \param[out] modified Indicator if the node was modified /// \param[out] modified Indicator if the node was modified
/// \return Status of the node visit /// \return Status of the node visit
Status AcceptAfter(IRNodePass *p, bool *const modified) override; Status AcceptAfter(IRNodePass *const p, bool *const modified) override;
private: private:
std::vector<std::string> dataset_files_; std::vector<std::string> dataset_files_;

View File

@ -192,7 +192,7 @@ Status RepeatPass::VisitAfter(std::shared_ptr<TransferNode> node, bool *const mo
} }
// Adds an operator to the cached operator stack save area // Adds an operator to the cached operator stack save area
void RepeatPass::AddToCachedNodeStack(std::shared_ptr<DatasetNode> node) { cached_node_stacks_.push(node); } void RepeatPass::AddToCachedNodeStack(const std::shared_ptr<DatasetNode> &node) { cached_node_stacks_.push(node); }
// Pops an operator from the cached operator stack save area // Pops an operator from the cached operator stack save area
std::shared_ptr<DatasetNode> RepeatPass::PopFromCachedNodeStack() { std::shared_ptr<DatasetNode> RepeatPass::PopFromCachedNodeStack() {

View File

@ -112,7 +112,7 @@ class RepeatPass : public IRNodePass {
/// \brief Adds an operator to the cached stack save area /// \brief Adds an operator to the cached stack save area
/// \param node - The dataset node to add to cached stack /// \param node - The dataset node to add to cached stack
/// \return Status The status code returned /// \return Status The status code returned
void AddToCachedNodeStack(std::shared_ptr<DatasetNode> node); void AddToCachedNodeStack(const std::shared_ptr<DatasetNode> &node);
/// \brief Pops an operator from the cached stack save area /// \brief Pops an operator from the cached stack save area
/// \return shared_ptr to the popped dataset node /// \return shared_ptr to the popped dataset node

View File

@ -41,9 +41,15 @@ Status CropOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector<T
RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs)); RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs));
outputs.clear(); outputs.clear();
TensorShape out = TensorShape{height_, width_}; TensorShape out = TensorShape{height_, width_};
if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 2) {
if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); (void)outputs.emplace_back(out);
if (!outputs.empty()) return Status::OK(); }
if (inputs[0].Rank() == 3) {
(void)outputs.emplace_back(out.AppendDim(inputs[0][2]));
}
if (!outputs.empty()) {
return Status::OK();
}
return Status(StatusCode::kMDUnexpectedError, return Status(StatusCode::kMDUnexpectedError,
"Crop: invalid input shape, expected 2D or 3D input, but got input dimension is:" + "Crop: invalid input shape, expected 2D or 3D input, but got input dimension is:" +
std::to_string(inputs[0].Rank())); std::to_string(inputs[0].Rank()));

View File

@ -31,8 +31,12 @@ Status HwcToChwOp::OutputShape(const std::vector<TensorShape> &inputs, std::vect
outputs.clear(); outputs.clear();
TensorShape in = inputs[0]; TensorShape in = inputs[0];
TensorShape out = TensorShape{in[2], in[0], in[1]}; TensorShape out = TensorShape{in[2], in[0], in[1]};
if (inputs[0].Rank() == 3) outputs.emplace_back(out); if (inputs[0].Rank() == 3) {
if (!outputs.empty()) return Status::OK(); (void)outputs.emplace_back(out);
}
if (!outputs.empty()) {
return Status::OK();
}
return Status( return Status(
StatusCode::kMDUnexpectedError, StatusCode::kMDUnexpectedError,
"HWC2CHW: invalid input shape, expected 3D input, but got input dimension is:" + std::to_string(inputs[0].Rank())); "HWC2CHW: invalid input shape, expected 3D input, but got input dimension is:" + std::to_string(inputs[0].Rank()));

View File

@ -1015,7 +1015,7 @@ std::vector<std::vector<float>> GetDefaultBoxes(BoxesConfig config) {
} }
scales.push_back(1.0f); scales.push_back(1.0f);
std::vector<std::vector<float>> default_boxes; std::vector<std::vector<float>> default_boxes;
for (int i = 0; i < config.feature_size.size(); i++) { for (auto i = 0; i < config.feature_size.size(); i++) {
float sk1 = scales[i]; float sk1 = scales[i];
float sk2 = scales[i + 1]; float sk2 = scales[i + 1];
float sk3 = sqrt(sk1 * sk2); float sk3 = sqrt(sk1 * sk2);
@ -1069,10 +1069,10 @@ void ConvertBoxes(std::vector<std::vector<float>> &boxes, const std::vector<std:
std::vector<int> ApplyNms(const std::vector<std::vector<float>> &all_boxes, std::vector<float> &all_scores, float thres, std::vector<int> ApplyNms(const std::vector<std::vector<float>> &all_boxes, std::vector<float> &all_scores, float thres,
int max_boxes) { int max_boxes) {
int boxes_num = all_boxes.size(); size_t boxes_num = all_boxes.size();
std::vector<float> areas(boxes_num); std::vector<float> areas(boxes_num);
std::vector<int> order(boxes_num); std::vector<int> order(boxes_num);
for (int i = 0; i < boxes_num; i++) { for (auto i = 0; i < boxes_num; i++) {
if (all_boxes[i].size() < 4) { if (all_boxes[i].size() < 4) {
return {}; return {};
} }

View File

@ -410,7 +410,7 @@ bool WarpAffineBilinear(const LiteMat &src, LiteMat &dst, const LiteMat &M, int
int *a = &_a[0], *b = a + dst.width_; int *a = &_a[0], *b = a + dst.width_;
const int SCALE = 1 << 10; const int SCALE = 1 << 10;
const int B_SIZE = 64; const int B_SIZE = 64;
int16_t WH[B_SIZE * B_SIZE * 2]; int16_t *WH = new int16_t[B_SIZE * B_SIZE * 2];
int16_t A_Ptr[B_SIZE * B_SIZE]; int16_t A_Ptr[B_SIZE * B_SIZE];
int r_delta = SCALE / kTabSz / 2; int r_delta = SCALE / kTabSz / 2;
int x, y, x1, y1; int x, y, x1, y1;
@ -449,7 +449,7 @@ bool WarpAffineBilinear(const LiteMat &src, LiteMat &dst, const LiteMat &M, int
Remap(src, lite_part, _HW, _matA, borderType, borderValue); Remap(src, lite_part, _HW, _matA, borderType, borderValue);
} }
} }
delete[] WH;
delete[] _a; delete[] _a;
return true; return true;
} }

View File

@ -61,9 +61,15 @@ Status RandomCropAndResizeOp::OutputShape(const std::vector<TensorShape> &inputs
RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs)); RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs));
outputs.clear(); outputs.clear();
TensorShape out = TensorShape{target_height_, target_width_}; TensorShape out = TensorShape{target_height_, target_width_};
if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 2) {
if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); (void)outputs.emplace_back(out);
if (!outputs.empty()) return Status::OK(); }
if (inputs[0].Rank() == 3) {
(void)outputs.emplace_back(out.AppendDim(inputs[0][2]));
}
if (!outputs.empty()) {
return Status::OK();
}
return Status(StatusCode::kMDUnexpectedError, "RandomCropAndResize: invalid input shape"); return Status(StatusCode::kMDUnexpectedError, "RandomCropAndResize: invalid input shape");
} }
Status RandomCropAndResizeOp::GetCropBox(int h_in, int w_in, int *x, int *y, int *crop_height, int *crop_width) { Status RandomCropAndResizeOp::GetCropBox(int h_in, int w_in, int *x, int *y, int *crop_height, int *crop_width) {

View File

@ -143,9 +143,15 @@ Status RandomCropOp::OutputShape(const std::vector<TensorShape> &inputs, std::ve
RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs)); RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs));
outputs.clear(); outputs.clear();
TensorShape out = TensorShape{crop_height_, crop_width_}; TensorShape out = TensorShape{crop_height_, crop_width_};
if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 2) {
if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); (void)outputs.emplace_back(out);
if (!outputs.empty()) return Status::OK(); }
if (inputs[0].Rank() == 3) {
(void)outputs.emplace_back(out.AppendDim(inputs[0][2]));
}
if (!outputs.empty()) {
return Status::OK();
}
return Status(StatusCode::kMDUnexpectedError, return Status(StatusCode::kMDUnexpectedError,
"RandomCrop: invalid input shape, expected 2D or 3D input, but got input dimension is:" + "RandomCrop: invalid input shape, expected 2D or 3D input, but got input dimension is:" +
std::to_string(inputs[0].Rank())); std::to_string(inputs[0].Rank()));

View File

@ -61,9 +61,15 @@ Status ResizeOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector
outputW = size2_; outputW = size2_;
} }
TensorShape out = TensorShape{outputH, outputW}; TensorShape out = TensorShape{outputH, outputW};
if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 2) {
if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); (void)outputs.emplace_back(out);
if (!outputs.empty()) return Status::OK(); }
if (inputs[0].Rank() == 3) {
(void)outputs.emplace_back(out.AppendDim(inputs[0][2]));
}
if (!outputs.empty()) {
return Status::OK();
}
return Status(StatusCode::kMDUnexpectedError, "Resize: invalid input wrong shape."); return Status(StatusCode::kMDUnexpectedError, "Resize: invalid input wrong shape.");
} }
} // namespace dataset } // namespace dataset

View File

@ -49,8 +49,8 @@ class UniformAugOp : public TensorOp {
std::string Name() const override { return kUniformAugOp; } std::string Name() const override { return kUniformAugOp; }
private: private:
int32_t num_ops_;
std::vector<std::shared_ptr<TensorOp>> tensor_op_list_; std::vector<std::shared_ptr<TensorOp>> tensor_op_list_;
int32_t num_ops_;
std::mt19937 rnd_; std::mt19937 rnd_;
}; };
} // namespace dataset } // namespace dataset

View File

@ -223,7 +223,7 @@ MSRStatus ShardIndexGenerator::CreateShardNameTable(sqlite3 *db, const std::stri
sql = "INSERT INTO SHARD_NAME (NAME) VALUES (:SHARD_NAME);"; sql = "INSERT INTO SHARD_NAME (NAME) VALUES (:SHARD_NAME);";
sqlite3_stmt *stmt = nullptr; sqlite3_stmt *stmt = nullptr;
if (sqlite3_prepare_v2(db, common::SafeCStr(sql), -1, &stmt, 0) != SQLITE_OK) { if (sqlite3_prepare_v2(db, common::SafeCStr(sql), -1, &stmt, 0) != SQLITE_OK) {
if (stmt) { if (stmt != nullptr) {
(void)sqlite3_finalize(stmt); (void)sqlite3_finalize(stmt);
} }
MS_LOG(ERROR) << "SQL error: could not prepare statement, sql: " << sql; MS_LOG(ERROR) << "SQL error: could not prepare statement, sql: " << sql;

View File

@ -877,7 +877,9 @@ std::pair<MSRStatus, std::vector<json>> ShardReader::GetLabels(int page_id, int
sqlite3_free(errmsg); sqlite3_free(errmsg);
} }
std::vector<json> ret; std::vector<json> ret;
for (unsigned int i = 0; i < labels_ptr->size(); ++i) ret.emplace_back(json{}); for (unsigned int i = 0; i < labels_ptr->size(); ++i) {
(void)ret.emplace_back(json{});
}
for (unsigned int i = 0; i < labels_ptr->size(); ++i) { for (unsigned int i = 0; i < labels_ptr->size(); ++i) {
json construct_json; json construct_json;
for (unsigned int j = 0; j < columns.size(); ++j) { for (unsigned int j = 0; j < columns.size(); ++j) {

View File

@ -482,8 +482,6 @@ def check_filename(path):
if filename.startswith(' ') or filename.endswith(' '): if filename.startswith(' ') or filename.endswith(' '):
raise ValueError("filename should not start/end with space.") raise ValueError("filename should not start/end with space.")
return True
def check_dir(dataset_dir): def check_dir(dataset_dir):
""" """

View File

@ -838,7 +838,6 @@ def check_schema(method):
[schema_file], _ = parse_user_args(method, *args, **kwargs) [schema_file], _ = parse_user_args(method, *args, **kwargs)
if schema_file is not None: if schema_file is not None:
type_check(schema_file, (str,), "schema_file")
check_file(schema_file) check_file(schema_file)
return method(self, *args, **kwargs) return method(self, *args, **kwargs)

View File

@ -269,10 +269,6 @@ extern "C" int MDToDApi_GetNext(MDToDApi *pMDToDApi, MDToDResult_t *results) {
MS_LOG(INFO) << "Start GetNext [1]" << pMDToDApi; MS_LOG(INFO) << "Start GetNext [1]" << pMDToDApi;
// get next row for dataset // get next row for dataset
std::unordered_map<std::string, std::shared_ptr<Tensor>> row; std::unordered_map<std::string, std::shared_ptr<Tensor>> row;
if (pMDToDApi->_iter == nullptr) {
MS_LOG(ERROR) << "GetNext called with no iteratoe. abort";
return -1;
}
// create Execute functions, this replaces Map in Pipeline // create Execute functions, this replaces Map in Pipeline
bool ret = pMDToDApi->_iter->GetNextRow(&row); bool ret = pMDToDApi->_iter->GetNextRow(&row);

View File

@ -177,7 +177,7 @@ bool AlbumOp::IsReadColumn(const std::string &column_name) {
return false; return false;
} }
Status AlbumOp::LoadImageTensor(const std::string &image_file_path, uint32_t col_num, TensorPtr *tensor) { Status AlbumOp::LoadImageTensor(const std::string &image_file_path, int32_t col_num, TensorPtr *tensor) {
TensorPtr image; TensorPtr image;
TensorPtr rotate_tensor; TensorPtr rotate_tensor;
std::ifstream fs; std::ifstream fs;
@ -257,7 +257,7 @@ int AlbumOp::GetOrientation(const std::string &folder_path) {
return code; return code;
} }
Status AlbumOp::LoadStringArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor) { Status AlbumOp::LoadStringArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor) {
std::vector<std::string> data = json_obj.get<std::vector<std::string>>(); std::vector<std::string> data = json_obj.get<std::vector<std::string>>();
MS_LOG(INFO) << "String array label found: " << data << "."; MS_LOG(INFO) << "String array label found: " << data << ".";
@ -265,7 +265,7 @@ Status AlbumOp::LoadStringArrayTensor(const nlohmann::json &json_obj, uint32_t c
return Status::OK(); return Status::OK();
} }
Status AlbumOp::LoadStringTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor) { Status AlbumOp::LoadStringTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor) {
std::string data = json_obj; std::string data = json_obj;
// now we iterate over the elements in json // now we iterate over the elements in json
@ -275,7 +275,7 @@ Status AlbumOp::LoadStringTensor(const nlohmann::json &json_obj, uint32_t col_nu
return Status::OK(); return Status::OK();
} }
Status AlbumOp::LoadIntArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor) { Status AlbumOp::LoadIntArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor) {
// consider templating this function to handle all ints // consider templating this function to handle all ints
if (data_schema_->column(col_num).type() == DataType::DE_INT64) { if (data_schema_->column(col_num).type() == DataType::DE_INT64) {
std::vector<int64_t> data; std::vector<int64_t> data;
@ -302,7 +302,7 @@ Status AlbumOp::LoadIntArrayTensor(const nlohmann::json &json_obj, uint32_t col_
return Status::OK(); return Status::OK();
} }
Status AlbumOp::LoadFloatArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor) { Status AlbumOp::LoadFloatArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor) {
// consider templating this function to handle all ints // consider templating this function to handle all ints
if (data_schema_->column(col_num).type() == DataType::DE_FLOAT64) { if (data_schema_->column(col_num).type() == DataType::DE_FLOAT64) {
std::vector<double> data; std::vector<double> data;
@ -329,7 +329,7 @@ Status AlbumOp::LoadFloatArrayTensor(const nlohmann::json &json_obj, uint32_t co
return Status::OK(); return Status::OK();
} }
Status AlbumOp::LoadIDTensor(const std::string &file, uint32_t col_num, TensorPtr *tensor) { Status AlbumOp::LoadIDTensor(const std::string &file, int32_t col_num, TensorPtr *tensor) {
if (data_schema_->column(col_num).type() == DataType::DE_STRING) { if (data_schema_->column(col_num).type() == DataType::DE_STRING) {
RETURN_IF_NOT_OK(Tensor::CreateScalar<std::string>(file, tensor)); RETURN_IF_NOT_OK(Tensor::CreateScalar<std::string>(file, tensor));
return Status::OK(); return Status::OK();
@ -341,7 +341,7 @@ Status AlbumOp::LoadIDTensor(const std::string &file, uint32_t col_num, TensorPt
return Status::OK(); return Status::OK();
} }
Status AlbumOp::LoadEmptyTensor(uint32_t col_num, TensorPtr *tensor) { Status AlbumOp::LoadEmptyTensor(int32_t col_num, TensorPtr *tensor) {
// hack to get the file name without extension, the 1 is to get rid of the backslash character // hack to get the file name without extension, the 1 is to get rid of the backslash character
RETURN_IF_NOT_OK(Tensor::CreateEmpty(TensorShape({0}), data_schema_->column(col_num).type(), tensor)); RETURN_IF_NOT_OK(Tensor::CreateEmpty(TensorShape({0}), data_schema_->column(col_num).type(), tensor));
return Status::OK(); return Status::OK();
@ -351,7 +351,7 @@ Status AlbumOp::LoadEmptyTensor(uint32_t col_num, TensorPtr *tensor) {
// So we actually have to check what type we want to fill the tensor with. // So we actually have to check what type we want to fill the tensor with.
// Float64 doesn't work with reinterpret cast here. Otherwise we limit the float in the schema to // Float64 doesn't work with reinterpret cast here. Otherwise we limit the float in the schema to
// only be float32, seems like a weird limitation to impose // only be float32, seems like a weird limitation to impose
Status AlbumOp::LoadFloatTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor) { Status AlbumOp::LoadFloatTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor) {
if (data_schema_->column(col_num).type() == DataType::DE_FLOAT64) { if (data_schema_->column(col_num).type() == DataType::DE_FLOAT64) {
double data = json_obj; double data = json_obj;
MS_LOG(INFO) << "double found: " << json_obj << "."; MS_LOG(INFO) << "double found: " << json_obj << ".";
@ -365,7 +365,7 @@ Status AlbumOp::LoadFloatTensor(const nlohmann::json &json_obj, uint32_t col_num
} }
// Loads a tensor with int value, we have to cast the value to type specified in the schema. // Loads a tensor with int value, we have to cast the value to type specified in the schema.
Status AlbumOp::LoadIntTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor) { Status AlbumOp::LoadIntTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor) {
if (data_schema_->column(col_num).type() == DataType::DE_INT64) { if (data_schema_->column(col_num).type() == DataType::DE_INT64) {
int64_t data = json_obj; int64_t data = json_obj;
MS_LOG(INFO) << "int64 found: " << json_obj << "."; MS_LOG(INFO) << "int64 found: " << json_obj << ".";

View File

@ -93,62 +93,62 @@ class AlbumOp {
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in,out] Tensor to push to /// \param[in,out] Tensor to push to
/// \return Status The error code returned /// \return Status The error code returned
Status LoadImageTensor(const std::string &image_file, uint32_t col_num, TensorPtr *tensor); Status LoadImageTensor(const std::string &image_file, int32_t col_num, TensorPtr *tensor);
/// \brief Load vector of ints to tensor, append tensor to tensor /// \brief Load vector of ints to tensor, append tensor to tensor
/// \param[in] json_obj Json object containing multi-dimensional label /// \param[in] json_obj Json object containing multi-dimensional label
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in,out] Tensor to push to /// \param[in,out] Tensor to push to
/// \return Status The error code returned /// \return Status The error code returned
Status LoadIntArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor); Status LoadIntArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor);
/// \brief Load vector of floatss to tensor, append tensor to tensor /// \brief Load vector of floatss to tensor, append tensor to tensor
/// \param[in] json_obj Json object containing array data /// \param[in] json_obj Json object containing array data
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in,out] Tensor to push to /// \param[in,out] Tensor to push to
/// \return Status The error code returned /// \return Status The error code returned
Status LoadFloatArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor); Status LoadFloatArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor);
/// \brief Load string array into a tensor, append tensor to tensor /// \brief Load string array into a tensor, append tensor to tensor
/// \param[in] json_obj Json object containing string tensor /// \param[in] json_obj Json object containing string tensor
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in,out] Tensor to push to /// \param[in,out] Tensor to push to
/// \return Status The error code returned /// \return Status The error code returned
Status LoadStringArrayTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor); Status LoadStringArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor);
/// \brief Load string into a tensor, append tensor to tensor /// \brief Load string into a tensor, append tensor to tensor
/// \param[in] json_obj Json object containing string tensor /// \param[in] json_obj Json object containing string tensor
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in,out] Tensor to push to /// \param[in,out] Tensor to push to
/// \return Status The error code returned /// \return Status The error code returned
Status LoadStringTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor); Status LoadStringTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor);
/// \brief Load float value to tensor /// \brief Load float value to tensor
/// \param[in] json_obj Json object containing float /// \param[in] json_obj Json object containing float
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in,out] Tensor to push to /// \param[in,out] Tensor to push to
/// \return Status The error code returned /// \return Status The error code returned
Status LoadFloatTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor); Status LoadFloatTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor);
/// \brief Load int value to tensor /// \brief Load int value to tensor
/// \param[in] json_obj Json object containing int /// \param[in] json_obj Json object containing int
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in,out] Tensor to push to /// \param[in,out] Tensor to push to
/// \return Status The error code returned /// \return Status The error code returned
Status LoadIntTensor(const nlohmann::json &json_obj, uint32_t col_num, TensorPtr *tensor); Status LoadIntTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor);
/// \brief Load empty tensor to tensor /// \brief Load empty tensor to tensor
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in,out] Tensor to push to /// \param[in,out] Tensor to push to
/// \return Status The error code returned /// \return Status The error code returned
Status LoadEmptyTensor(uint32_t col_num, TensorPtr *tensor); Status LoadEmptyTensor(int32_t col_num, TensorPtr *tensor);
/// \brief Load id from file name to tensor /// \brief Load id from file name to tensor
/// \param[in] file The file name to get ID from /// \param[in] file The file name to get ID from
/// \param[in] col_num Column num in schema /// \param[in] col_num Column num in schema
/// \param[in,out] Tensor to push to /// \param[in,out] Tensor to push to
/// \return Status The error code returned /// \return Status The error code returned
Status LoadIDTensor(const std::string &file, uint32_t col_num, TensorPtr *tensor); Status LoadIDTensor(const std::string &file, int32_t col_num, TensorPtr *tensor);
/// \brief Load a tensor according to a json file /// \brief Load a tensor according to a json file
/// \param[in] row_id_type row_id - id for this tensor row /// \param[in] row_id_type row_id - id for this tensor row

View File

@ -48,7 +48,7 @@ def test_schema_exception():
with pytest.raises(TypeError) as info: with pytest.raises(TypeError) as info:
ds.Schema(1) ds.Schema(1)
assert "Argument schema_file with value 1 is not of type [<class 'str'>]" in str(info.value) assert "path: 1 is not string" in str(info.value)
with pytest.raises(RuntimeError) as info: with pytest.raises(RuntimeError) as info:
schema = ds.Schema(SCHEMA_FILE) schema = ds.Schema(SCHEMA_FILE)

View File

@ -1,6 +1,6 @@
diff -Npur sqlite-version-3.32.2/src/expr.c sqlite-version-3.32.2-patched/src/expr.c diff -Npur sqlite-version-3.32.2-new/src/expr.c sqlite-version-3.32.2/src/expr.c
--- sqlite-version-3.32.2/src/expr.c 2020-06-04 08:58:43.000000000 -0400 --- sqlite-version-3.32.2-new/src/expr.c 2020-06-04 08:58:43.000000000 -0400
+++ sqlite-version-3.32.2-patched/src/expr.c 2021-04-29 04:06:04.544208700 -0400 +++ sqlite-version-3.32.2/src/expr.c 2021-08-04 11:57:45.029230992 -0400
@@ -3813,6 +3813,7 @@ expr_code_doover: @@ -3813,6 +3813,7 @@ expr_code_doover:
AggInfo *pAggInfo = pExpr->pAggInfo; AggInfo *pAggInfo = pExpr->pAggInfo;
struct AggInfo_col *pCol; struct AggInfo_col *pCol;
@ -32,9 +32,9 @@ diff -Npur sqlite-version-3.32.2/src/expr.c sqlite-version-3.32.2-patched/src/ex
int i; int i;
struct SrcCount *p = pWalker->u.pSrcCount; struct SrcCount *p = pWalker->u.pSrcCount;
SrcList *pSrc = p->pSrc; SrcList *pSrc = p->pSrc;
diff -Npur sqlite-version-3.32.2/src/global.c sqlite-version-3.32.2-patched/src/global.c diff -Npur sqlite-version-3.32.2-new/src/global.c sqlite-version-3.32.2/src/global.c
--- sqlite-version-3.32.2/src/global.c 2020-06-04 08:58:43.000000000 -0400 --- sqlite-version-3.32.2-new/src/global.c 2020-06-04 08:58:43.000000000 -0400
+++ sqlite-version-3.32.2-patched/src/global.c 2021-04-29 04:06:04.544208700 -0400 +++ sqlite-version-3.32.2/src/global.c 2021-08-04 11:57:45.033230992 -0400
@@ -300,6 +300,11 @@ sqlite3_uint64 sqlite3NProfileCnt = 0; @@ -300,6 +300,11 @@ sqlite3_uint64 sqlite3NProfileCnt = 0;
int sqlite3PendingByte = 0x40000000; int sqlite3PendingByte = 0x40000000;
#endif #endif
@ -47,9 +47,9 @@ diff -Npur sqlite-version-3.32.2/src/global.c sqlite-version-3.32.2-patched/src/
#include "opcodes.h" #include "opcodes.h"
/* /*
** Properties of opcodes. The OPFLG_INITIALIZER macro is ** Properties of opcodes. The OPFLG_INITIALIZER macro is
diff -Npur sqlite-version-3.32.2/src/resolve.c sqlite-version-3.32.2-patched/src/resolve.c diff -Npur sqlite-version-3.32.2-new/src/resolve.c sqlite-version-3.32.2/src/resolve.c
--- sqlite-version-3.32.2/src/resolve.c 2020-06-04 08:58:43.000000000 -0400 --- sqlite-version-3.32.2-new/src/resolve.c 2020-06-04 08:58:43.000000000 -0400
+++ sqlite-version-3.32.2-patched/src/resolve.c 2021-04-29 04:06:04.545208700 -0400 +++ sqlite-version-3.32.2/src/resolve.c 2021-08-04 11:57:45.033230992 -0400
@@ -1715,6 +1715,14 @@ static int resolveSelectStep(Walker *pWa @@ -1715,6 +1715,14 @@ static int resolveSelectStep(Walker *pWa
return WRC_Abort; return WRC_Abort;
} }
@ -65,9 +65,9 @@ diff -Npur sqlite-version-3.32.2/src/resolve.c sqlite-version-3.32.2-patched/src
} }
#endif #endif
diff -Npur sqlite-version-3.32.2/src/select.c sqlite-version-3.32.2-patched/src/select.c diff -Npur sqlite-version-3.32.2-new/src/select.c sqlite-version-3.32.2/src/select.c
--- sqlite-version-3.32.2/src/select.c 2020-06-04 08:58:43.000000000 -0400 --- sqlite-version-3.32.2-new/src/select.c 2020-06-04 08:58:43.000000000 -0400
+++ sqlite-version-3.32.2-patched/src/select.c 2021-04-29 04:07:21.458212191 -0400 +++ sqlite-version-3.32.2/src/select.c 2021-08-04 12:27:34.737267443 -0400
@@ -15,20 +15,6 @@ @@ -15,20 +15,6 @@
#include "sqliteInt.h" #include "sqliteInt.h"
@ -89,7 +89,27 @@ diff -Npur sqlite-version-3.32.2/src/select.c sqlite-version-3.32.2-patched/src/
** An instance of the following object is used to record information about ** An instance of the following object is used to record information about
** how to process the DISTINCT keyword, to simplify passing that information ** how to process the DISTINCT keyword, to simplify passing that information
** into the selectInnerLoop() routine. ** into the selectInnerLoop() routine.
@@ -4426,11 +4412,14 @@ static int pushDownWhereTerms( @@ -2717,9 +2703,7 @@ static int multiSelect(
selectOpName(p->op)));
rc = sqlite3Select(pParse, p, &uniondest);
testcase( rc!=SQLITE_OK );
- /* Query flattening in sqlite3Select() might refill p->pOrderBy.
- ** Be sure to delete p->pOrderBy, therefore, to avoid a memory leak. */
- sqlite3ExprListDelete(db, p->pOrderBy);
+ assert( p->pOrderBy==0 );
pDelete = p->pPrior;
p->pPrior = pPrior;
p->pOrderBy = 0;
@@ -4105,7 +4089,7 @@ static int flattenSubquery(
** We look at every expression in the outer query and every place we see
** "a" we substitute "x*3" and every place we see "b" we substitute "y+10".
*/
- if( pSub->pOrderBy ){
+ if( pSub->pOrderBy && (pParent->selFlags & SF_NoopOrderBy)==0 ){
/* At this point, any non-zero iOrderByCol values indicate that the
** ORDER BY column expression is identical to the iOrderByCol'th
** expression returned by SELECT statement pSub. Since these values
@@ -4426,11 +4410,14 @@ static int pushDownWhereTerms(
){ ){
Expr *pNew; Expr *pNew;
int nChng = 0; int nChng = 0;
@ -105,7 +125,7 @@ diff -Npur sqlite-version-3.32.2/src/select.c sqlite-version-3.32.2-patched/src/
#endif #endif
#ifdef SQLITE_DEBUG #ifdef SQLITE_DEBUG
@@ -5553,7 +5542,9 @@ static void explainSimpleCount( @@ -5553,7 +5540,9 @@ static void explainSimpleCount(
static int havingToWhereExprCb(Walker *pWalker, Expr *pExpr){ static int havingToWhereExprCb(Walker *pWalker, Expr *pExpr){
if( pExpr->op!=TK_AND ){ if( pExpr->op!=TK_AND ){
Select *pS = pWalker->u.pSelect; Select *pS = pWalker->u.pSelect;
@ -116,7 +136,7 @@ diff -Npur sqlite-version-3.32.2/src/select.c sqlite-version-3.32.2-patched/src/
sqlite3 *db = pWalker->pParse->db; sqlite3 *db = pWalker->pParse->db;
Expr *pNew = sqlite3Expr(db, TK_INTEGER, "1"); Expr *pNew = sqlite3Expr(db, TK_INTEGER, "1");
if( pNew ){ if( pNew ){
@@ -5766,6 +5757,9 @@ int sqlite3Select( @@ -5766,6 +5755,9 @@ int sqlite3Select(
} }
if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1;
memset(&sAggInfo, 0, sizeof(sAggInfo)); memset(&sAggInfo, 0, sizeof(sAggInfo));
@ -126,7 +146,15 @@ diff -Npur sqlite-version-3.32.2/src/select.c sqlite-version-3.32.2-patched/src/
#if SELECTTRACE_ENABLED #if SELECTTRACE_ENABLED
SELECTTRACE(1,pParse,p, ("begin processing:\n", pParse->addrExplain)); SELECTTRACE(1,pParse,p, ("begin processing:\n", pParse->addrExplain));
if( sqlite3SelectTrace & 0x100 ){ if( sqlite3SelectTrace & 0x100 ){
@@ -5804,19 +5798,6 @@ int sqlite3Select( @@ -5787,6 +5779,7 @@ int sqlite3Select(
sqlite3ExprListDelete(db, p->pOrderBy);
p->pOrderBy = 0;
p->selFlags &= ~SF_Distinct;
+ p->selFlags |= SF_NoopOrderBy;
}
sqlite3SelectPrep(pParse, p, 0);
if( pParse->nErr || db->mallocFailed ){
@@ -5804,19 +5797,6 @@ int sqlite3Select(
generateColumnNames(pParse, p); generateColumnNames(pParse, p);
} }
@ -146,7 +174,7 @@ diff -Npur sqlite-version-3.32.2/src/select.c sqlite-version-3.32.2-patched/src/
pTabList = p->pSrc; pTabList = p->pSrc;
isAgg = (p->selFlags & SF_Aggregate)!=0; isAgg = (p->selFlags & SF_Aggregate)!=0;
memset(&sSort, 0, sizeof(sSort)); memset(&sSort, 0, sizeof(sSort));
@@ -6144,7 +6125,7 @@ int sqlite3Select( @@ -6144,7 +6124,7 @@ int sqlite3Select(
if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct
&& sqlite3ExprListCompare(sSort.pOrderBy, pEList, -1)==0 && sqlite3ExprListCompare(sSort.pOrderBy, pEList, -1)==0
#ifndef SQLITE_OMIT_WINDOWFUNC #ifndef SQLITE_OMIT_WINDOWFUNC
@ -155,7 +183,7 @@ diff -Npur sqlite-version-3.32.2/src/select.c sqlite-version-3.32.2-patched/src/
#endif #endif
){ ){
p->selFlags &= ~SF_Distinct; p->selFlags &= ~SF_Distinct;
@@ -6791,6 +6772,14 @@ int sqlite3Select( @@ -6791,6 +6771,14 @@ int sqlite3Select(
select_end: select_end:
sqlite3ExprListDelete(db, pMinMaxOrderBy); sqlite3ExprListDelete(db, pMinMaxOrderBy);
sqlite3DbFree(db, sAggInfo.aCol); sqlite3DbFree(db, sAggInfo.aCol);
@ -170,9 +198,9 @@ diff -Npur sqlite-version-3.32.2/src/select.c sqlite-version-3.32.2-patched/src/
sqlite3DbFree(db, sAggInfo.aFunc); sqlite3DbFree(db, sAggInfo.aFunc);
#if SELECTTRACE_ENABLED #if SELECTTRACE_ENABLED
SELECTTRACE(0x1,pParse,p,("end processing\n")); SELECTTRACE(0x1,pParse,p,("end processing\n"));
diff -Npur sqlite-version-3.32.2/src/sqliteInt.h sqlite-version-3.32.2-patched/src/sqliteInt.h diff -Npur sqlite-version-3.32.2-new/src/sqliteInt.h sqlite-version-3.32.2/src/sqliteInt.h
--- sqlite-version-3.32.2/src/sqliteInt.h 2020-06-04 08:58:43.000000000 -0400 --- sqlite-version-3.32.2-new/src/sqliteInt.h 2020-06-04 08:58:43.000000000 -0400
+++ sqlite-version-3.32.2-patched/src/sqliteInt.h 2021-04-29 04:06:04.547208700 -0400 +++ sqlite-version-3.32.2/src/sqliteInt.h 2021-08-04 12:28:22.825268422 -0400
@@ -976,7 +976,12 @@ typedef INT16_TYPE LogEst; @@ -976,7 +976,12 @@ typedef INT16_TYPE LogEst;
*/ */
#if defined(SQLITE_ENABLE_SELECTTRACE) #if defined(SQLITE_ENABLE_SELECTTRACE)
@ -211,7 +239,15 @@ diff -Npur sqlite-version-3.32.2/src/sqliteInt.h sqlite-version-3.32.2-patched/s
** The datatype ynVar is a signed integer, either 16-bit or 32-bit. ** The datatype ynVar is a signed integer, either 16-bit or 32-bit.
** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater ** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater
** than 32767 we have to make it 32-bit. 16-bit is preferred because ** than 32767 we have to make it 32-bit. 16-bit is preferred because
@@ -4546,10 +4566,11 @@ extern const unsigned char sqlite3UpperT @@ -3105,6 +3125,7 @@ struct Select {
#define SF_WhereBegin 0x0080000 /* Really a WhereBegin() call. Debug Only */
#define SF_WinRewrite 0x0100000 /* Window function rewrite accomplished */
#define SF_View 0x0200000 /* SELECT statement is a view */
+#define SF_NoopOrderBy 0x0400000 /* ORDER BY is ignored for this query */
/*
** The results of a SELECT can be distributed in several ways, as defined
@@ -4546,10 +4567,11 @@ extern const unsigned char sqlite3UpperT
extern const unsigned char sqlite3CtypeMap[]; extern const unsigned char sqlite3CtypeMap[];
extern SQLITE_WSD struct Sqlite3Config sqlite3Config; extern SQLITE_WSD struct Sqlite3Config sqlite3Config;
extern FuncDefHash sqlite3BuiltinFunctions; extern FuncDefHash sqlite3BuiltinFunctions;
@ -224,9 +260,9 @@ diff -Npur sqlite-version-3.32.2/src/sqliteInt.h sqlite-version-3.32.2-patched/s
#ifdef VDBE_PROFILE #ifdef VDBE_PROFILE
extern sqlite3_uint64 sqlite3NProfileCnt; extern sqlite3_uint64 sqlite3NProfileCnt;
#endif #endif
diff -Npur sqlite-version-3.32.2/src/test1.c sqlite-version-3.32.2-patched/src/test1.c diff -Npur sqlite-version-3.32.2-new/src/test1.c sqlite-version-3.32.2/src/test1.c
--- sqlite-version-3.32.2/src/test1.c 2020-06-04 08:58:43.000000000 -0400 --- sqlite-version-3.32.2-new/src/test1.c 2020-06-04 08:58:43.000000000 -0400
+++ sqlite-version-3.32.2-patched/src/test1.c 2021-04-29 04:06:04.548208700 -0400 +++ sqlite-version-3.32.2/src/test1.c 2021-08-04 11:57:45.037230992 -0400
@@ -8164,7 +8164,7 @@ int Sqlitetest1_Init(Tcl_Interp *interp) @@ -8164,7 +8164,7 @@ int Sqlitetest1_Init(Tcl_Interp *interp)
#endif #endif
#endif #endif
@ -236,9 +272,9 @@ diff -Npur sqlite-version-3.32.2/src/test1.c sqlite-version-3.32.2-patched/src/t
#endif #endif
for(i=0; i<sizeof(aCmd)/sizeof(aCmd[0]); i++){ for(i=0; i<sizeof(aCmd)/sizeof(aCmd[0]); i++){
diff -Npur sqlite-version-3.32.2/src/window.c sqlite-version-3.32.2-patched/src/window.c diff -Npur sqlite-version-3.32.2-new/src/window.c sqlite-version-3.32.2/src/window.c
--- sqlite-version-3.32.2/src/window.c 2020-06-04 08:58:43.000000000 -0400 --- sqlite-version-3.32.2-new/src/window.c 2020-06-04 08:58:43.000000000 -0400
+++ sqlite-version-3.32.2-patched/src/window.c 2021-04-29 04:06:04.548208700 -0400 +++ sqlite-version-3.32.2/src/window.c 2021-08-04 11:57:45.041230992 -0400
@@ -942,7 +942,7 @@ static int sqlite3WindowExtraAggFuncDept @@ -942,7 +942,7 @@ static int sqlite3WindowExtraAggFuncDept
*/ */
int sqlite3WindowRewrite(Parse *pParse, Select *p){ int sqlite3WindowRewrite(Parse *pParse, Select *p){
@ -248,13 +284,13 @@ diff -Npur sqlite-version-3.32.2/src/window.c sqlite-version-3.32.2-patched/src/
Vdbe *v = sqlite3GetVdbe(pParse); Vdbe *v = sqlite3GetVdbe(pParse);
sqlite3 *db = pParse->db; sqlite3 *db = pParse->db;
Select *pSub = 0; /* The subquery */ Select *pSub = 0; /* The subquery */
diff -Npur sqlite-version-3.32.2/test/having.test sqlite-version-3.32.2-patched/test/having.test diff -Npur sqlite-version-3.32.2-new/test/having.test sqlite-version-3.32.2/test/having.test
--- sqlite-version-3.32.2/test/having.test 2020-06-04 08:58:43.000000000 -0400 --- sqlite-version-3.32.2-new/test/having.test 2020-06-04 08:58:43.000000000 -0400
+++ sqlite-version-3.32.2-patched/test/having.test 2021-04-29 04:08:11.785214475 -0400 +++ sqlite-version-3.32.2/test/having.test 2021-08-04 11:57:45.041230992 -0400
@@ -154,5 +154,24 @@ do_execsql_test 4.3 { @@ -154,5 +154,24 @@ do_execsql_test 4.3 {
SELECT a, sum(b) FROM t3 WHERE nondeter(a) GROUP BY a SELECT a, sum(b) FROM t3 WHERE nondeter(a) GROUP BY a
} {1 4 2 2} } {1 4 2 2}
+#------------------------------------------------------------------------- +#-------------------------------------------------------------------------
+reset_db +reset_db
+do_execsql_test 5.0 { +do_execsql_test 5.0 {
@ -274,11 +310,41 @@ diff -Npur sqlite-version-3.32.2/test/having.test sqlite-version-3.32.2-patched/
+ SELECT x FROM t2 WHERE a=2 GROUP BY y HAVING 0 + SELECT x FROM t2 WHERE a=2 GROUP BY y HAVING 0
+ ) FROM t1; + ) FROM t1;
+} {b {}} +} {b {}}
finish_test finish_test
diff -Npur sqlite-version-3.32.2/test/window1.test sqlite-version-3.32.2-patched/test/window1.test diff -Npur sqlite-version-3.32.2-new/test/selectA.test sqlite-version-3.32.2/test/selectA.test
--- sqlite-version-3.32.2/test/window1.test 2020-06-04 08:58:43.000000000 -0400 --- sqlite-version-3.32.2-new/test/selectA.test 2020-06-04 08:58:43.000000000 -0400
+++ sqlite-version-3.32.2-patched/test/window1.test 2021-04-29 04:06:04.549208700 -0400 +++ sqlite-version-3.32.2/test/selectA.test 2021-08-04 12:29:43.021270055 -0400
@@ -1446,5 +1446,26 @@ do_execsql_test 6.1 {
SELECT * FROM (SELECT a FROM t1 UNION SELECT b FROM t2) WHERE a=a;
} {12345}
+# 2020-06-15 ticket 8f157e8010b22af0
+#
+reset_db
+do_execsql_test 7.1 {
+ CREATE TABLE t1(c1); INSERT INTO t1 VALUES(12),(123),(1234),(NULL),('abc');
+ CREATE TABLE t2(c2); INSERT INTO t2 VALUES(44),(55),(123);
+ CREATE TABLE t3(c3,c4); INSERT INTO t3 VALUES(66,1),(123,2),(77,3);
+ CREATE VIEW t4 AS SELECT c3 FROM t3;
+ CREATE VIEW t5 AS SELECT c3 FROM t3 ORDER BY c4;
+}
+do_execsql_test 7.2 {
+ SELECT * FROM t1, t2 WHERE c1=(SELECT 123 INTERSECT SELECT c2 FROM t4) AND c1=123;
+} {123 123}
+do_execsql_test 7.3 {
+ SELECT * FROM t1, t2 WHERE c1=(SELECT 123 INTERSECT SELECT c2 FROM t5) AND c1=123;
+} {123 123}
+do_execsql_test 7.4 {
+ CREATE TABLE a(b);
+ CREATE VIEW c(d) AS SELECT b FROM a ORDER BY b;
+ SELECT sum(d) OVER( PARTITION BY(SELECT 0 FROM c JOIN a WHERE b =(SELECT b INTERSECT SELECT d FROM c) AND b = 123)) FROM c;
+} {}
finish_test
diff -Npur sqlite-version-3.32.2-new/test/window1.test sqlite-version-3.32.2/test/window1.test
--- sqlite-version-3.32.2-new/test/window1.test 2020-06-04 08:58:43.000000000 -0400
+++ sqlite-version-3.32.2/test/window1.test 2021-08-04 11:57:45.041230992 -0400
@@ -1743,5 +1743,47 @@ do_execsql_test 53.0 { @@ -1743,5 +1743,47 @@ do_execsql_test 53.0 {
WHERE a.c); WHERE a.c);
} {4 4 4 4} } {4 4 4 4}