forked from mindspore-Ecosystem/mindspore
commit
8539de0e01
|
@ -1175,9 +1175,9 @@ Status Blackman(std::shared_ptr<Tensor> *output, int len) {
|
|||
RETURN_IF_NOT_OK(Tensor::CreateEmpty(TensorShape({len}), DataType(DataType::DE_FLOAT32), output));
|
||||
// Blackman window function.
|
||||
auto iter = (*output)->begin<float>();
|
||||
float alpha = 0.42;
|
||||
float half = 0.5;
|
||||
float delta = 0.08;
|
||||
const float alpha = 0.42;
|
||||
const float half = 0.5;
|
||||
const float delta = 0.08;
|
||||
for (ptrdiff_t i = 0; i < len; ++i) {
|
||||
*(iter + i) = alpha - half * std::cos(TWO * PI * i / len) + delta * std::cos(TWO * TWO * PI * i / len);
|
||||
}
|
||||
|
@ -1200,7 +1200,7 @@ Status Hann(std::shared_ptr<Tensor> *output, int len) {
|
|||
RETURN_IF_NOT_OK(Tensor::CreateEmpty(TensorShape({len}), DataType(DataType::DE_FLOAT32), output));
|
||||
// Hann window function.
|
||||
auto iter = (*output)->begin<float>();
|
||||
float half = 0.5;
|
||||
const float half = 0.5;
|
||||
for (ptrdiff_t i = 0; i < len; ++i) {
|
||||
*(iter + i) = half - half * std::cos(TWO * PI * i / len);
|
||||
}
|
||||
|
|
|
@ -306,10 +306,10 @@ Status IWSLTOp::CleanXmlFile(const std::string &src_file_path, const std::string
|
|||
std::ofstream new_file(new_file_path);
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(new_file.is_open(), "Invalid file, failed to open file: " + new_file_path);
|
||||
|
||||
while (src_doc && target_doc) {
|
||||
while (src_doc != nullptr && target_doc != nullptr) {
|
||||
XMLElement *src_seg = src_doc->FirstChildElement("seg");
|
||||
XMLElement *target_seg = target_doc->FirstChildElement("seg");
|
||||
while (src_seg && target_seg) {
|
||||
while (src_seg != nullptr && target_seg != nullptr) {
|
||||
src_content = src_seg->GetText();
|
||||
target_content = target_seg->GetText();
|
||||
RETURN_IF_NOT_OK(Trim(&src_content, " "));
|
||||
|
|
|
@ -211,7 +211,7 @@ Status TedliumOp::ReadSph(const Path &file_sph_path, double start_time, double e
|
|||
for (int32_t i = 0; i < size; ++i) {
|
||||
char bh = temp[2 * i];
|
||||
char bl = temp[2 * i + 1];
|
||||
// SPH aduio files is big-endian, so we should convert the two bytes of data into int16_t based
|
||||
// SPH audio files is big-endian, so we should convert the two bytes of data into int16_t based
|
||||
// on the high 8 bits and the low 8 bits.
|
||||
int16_t s = static_cast<int16_t>(((bh & 0x00FF) << 8) | (bl & 0x00FF));
|
||||
// Data normalization: Convert the data from the interval [-32768,32767] to the interval [-1,1].
|
||||
|
|
|
@ -113,7 +113,6 @@ class AGNewsNode : public NonMappableSourceNode {
|
|||
private:
|
||||
std::string dataset_dir_;
|
||||
std::string usage_;
|
||||
char field_delim_;
|
||||
std::vector<std::shared_ptr<CsvBase>> column_defaults_;
|
||||
std::vector<std::string> column_names_;
|
||||
int64_t num_samples_;
|
||||
|
|
|
@ -107,7 +107,6 @@ class AmazonReviewNode : public NonMappableSourceNode {
|
|||
private:
|
||||
std::string dataset_dir_;
|
||||
std::string usage_;
|
||||
char field_delim_;
|
||||
std::vector<std::shared_ptr<CsvBase>> column_defaults_;
|
||||
std::vector<std::string> column_names_;
|
||||
int64_t num_samples_;
|
||||
|
|
|
@ -122,7 +122,6 @@ class SogouNewsNode : public NonMappableSourceNode {
|
|||
private:
|
||||
std::string dataset_dir_;
|
||||
std::string usage_;
|
||||
char field_delim_;
|
||||
std::vector<std::shared_ptr<CsvBase>> column_defaults_;
|
||||
std::vector<std::string> column_names_;
|
||||
int64_t num_samples_;
|
||||
|
|
|
@ -122,7 +122,6 @@ class YelpReviewNode : public NonMappableSourceNode {
|
|||
private:
|
||||
std::string dataset_dir_;
|
||||
std::string usage_;
|
||||
char field_delim_;
|
||||
std::vector<std::shared_ptr<CsvBase>> column_defaults_;
|
||||
std::vector<std::string> column_names_;
|
||||
int64_t num_samples_;
|
||||
|
|
|
@ -95,7 +95,6 @@ class TransferNode : public DatasetNode {
|
|||
std::string queue_name_;
|
||||
int32_t device_id_;
|
||||
std::string device_type_;
|
||||
int32_t prefetch_size_;
|
||||
bool send_epoch_end_;
|
||||
int32_t total_batch_;
|
||||
bool create_data_info_queue_;
|
||||
|
|
|
@ -33,6 +33,7 @@ AutoTune::AutoTune(TreeAdapter *tree_adap, ProfilingManager *profiling_mgr)
|
|||
profiling_manager_(profiling_mgr),
|
||||
leaf_op_id_(-1),
|
||||
cur_epoch_(1),
|
||||
mode_(0),
|
||||
skip_bool_(true),
|
||||
last_step_profiled_(0) {
|
||||
tree_modifier_ = std::make_unique<TreeModifier>(tree_adapter_);
|
||||
|
|
|
@ -101,9 +101,11 @@ static bool ResizeBilinear3C(const unsigned char *src, int src_width, int src_he
|
|||
int16_t *y_weight = reinterpret_cast<int16_t *>(x_weight + dst_width);
|
||||
|
||||
if (!InitBilinearWeight(x_offset, x_weight, scale_width, dst_width, src_width, 3)) {
|
||||
delete[] data_buf;
|
||||
return false;
|
||||
}
|
||||
if (!InitBilinearWeight(y_offset, y_weight, scale_height, dst_height, src_height, 1)) {
|
||||
delete[] data_buf;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -192,9 +194,11 @@ static bool ResizeBilinear1C(const unsigned char *src, int src_width, int src_he
|
|||
int16_t *y_weight = reinterpret_cast<int16_t *>(x_weight + dst_width);
|
||||
|
||||
if (!InitBilinearWeight(x_offset, x_weight, scale_width, dst_width, src_width, 1)) {
|
||||
delete[] data_buf;
|
||||
return false;
|
||||
}
|
||||
if (!InitBilinearWeight(y_offset, y_weight, scale_height, dst_height, src_height, 1)) {
|
||||
delete[] data_buf;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -647,6 +647,7 @@ class EnWik9Dataset(SourceDataset):
|
|||
return cde.EnWik9Node(self.dataset_dir, self.num_samples, self.shuffle_flag, self.num_shards,
|
||||
self.shard_id)
|
||||
|
||||
|
||||
class IMDBDataset(MappableDataset):
|
||||
"""
|
||||
A source dataset for reading and parsing Internet Movie Database (IMDb).
|
||||
|
|
|
@ -2214,7 +2214,8 @@ class ImageFolderDataset(MappableDataset):
|
|||
>>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,
|
||||
... class_indexing={"cat":0, "dog":1})
|
||||
>>>
|
||||
>>> # 3) Read all samples (image files) in image_folder_dataset_dir with extensions .JPEG and .png (case sensitive)
|
||||
>>> # 3) Read all samples (image files) in image_folder_dataset_dir with extensions .JPEG
|
||||
>>> # and .png (case sensitive)
|
||||
>>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,
|
||||
... extensions=[".JPEG", ".png"])
|
||||
|
||||
|
|
Loading…
Reference in New Issue