modify if xxx is True to if xxx and modify if xxx is not True to if not xxx

This commit is contained in:
muchenjin 2022-01-12 22:31:49 +08:00
parent a743cc8d88
commit 718862fd6f
20 changed files with 38 additions and 38 deletions

View File

@ -388,7 +388,7 @@ class FileWriter:
if not self._writer.get_shard_header():
self._writer.set_shard_header(self._header)
ret = self._writer.commit()
if self._index_generator is True:
if self._index_generator:
if self._append:
self._generator = ShardIndexGenerator(self._file_name, self._append)
elif len(self._paths) >= 1:
@ -470,7 +470,7 @@ class FileWriter:
return False, error
elif len(v) == 2 and 'type' in v:
res_1, res_2 = self._validate_array(k, v)
if res_1 is not True:
if not res_1:
return res_1, res_2
else:
error = "Field '{}' contains illegal attributes.".format(v)

View File

@ -483,7 +483,7 @@ class Cell(Cell_):
item.init_data()
elif isinstance(item, numpy.ndarray):
raise TypeError("For 'Cell', inputs should not be numpy array.")
if self.requires_grad is True:
if self.requires_grad:
_pynative_executor.set_grad_flag(True)
_pynative_executor.new_graph(self, *args, **kwargs)
cast_inputs = self.auto_cast_inputs(args)
@ -1518,7 +1518,7 @@ class Cell(Cell_):
def _set_recompute_scope(self, mode):
prefix = 'recompute_'
if mode is True:
if mode:
if self._scope is None:
self._scope = prefix
elif not self._scope.startswith(prefix):

View File

@ -221,7 +221,7 @@ class _BatchNorm(Cell):
self.moving_mean,
self.moving_variance)[0]
if self.use_batch_statistics is True:
if self.use_batch_statistics:
return self.bn_train(x,
self.gamma,
self.beta,

View File

@ -239,7 +239,7 @@ class ConfusionMatrixMetric(Metric):
y_pred = self._convert_data(inputs[0])
y = self._convert_data(inputs[1])
if self.calculation_method is True:
if self.calculation_method:
score, not_nans = self.confusion_matrix(y_pred, y)
not_nans = int(not_nans.item())
self._total_num += score.item() * not_nans
@ -260,7 +260,7 @@ class ConfusionMatrixMetric(Metric):
ndarray, the computed result.
"""
if self.calculation_method is True:
if self.calculation_method:
if self._class_num == 0:
raise RuntimeError("The 'ConfusionMatrixMetric' can not be calculated, because the number of samples "
"is 0, please check whether your inputs(predicted value, true value) are empty, or "

View File

@ -376,7 +376,7 @@ class _Linear(Cell):
def construct(self, x):
out_shape = P.Shape()(x)[:-1] + (self.out_channels,)
x = P.Reshape()(x, (-1, self.in_channels))
if self.expert_flag is True:
if self.expert_flag:
x = P.Reshape()(x, (self.expert_num, -1, self.in_channels))
weight = self.cast(self.weight, self.dtype)
x = self.matmul(x, weight)

View File

@ -270,7 +270,7 @@ class Router(Cell):
def construct(self, input_tensor):
input_tensor = self.cast(input_tensor, mstype.float32)
if self.noisy_policy == "jitter" and self.training is True:
if self.noisy_policy == "jitter" and self.training:
# Here, we temporarily implement the multiplicative jitter this way,
# for the lack of UniforReal parallel operator.
input_tensor = self.mul(input_tensor, self.noise)

View File

@ -1309,7 +1309,7 @@ class TransformerEncoderLayer(Cell):
parallel_config=parallel_config)
_check_moe_config(moe_config, parallel_config)
self.use_moe = (moe_config.expert_num > 1)
if self.use_moe is True:
if self.use_moe:
self.output = MoE(hidden_size=hidden_size,
dropout_rate=hidden_dropout_rate,
ffn_hidden_size=ffn_hidden_size,
@ -1378,7 +1378,7 @@ class TransformerEncoderLayer(Cell):
output_x = self.layernorm2(x)
output_x = F.cast(output_x, self.dtype)
aux_loss = None
if self.use_moe is True:
if self.use_moe:
mlp_logit, aux_loss = self.output(output_x)
else:
mlp_logit = self.output(output_x)
@ -1416,7 +1416,7 @@ class TransformerEncoderLayer(Cell):
output = self.add(x, mlp_logit)
output = F.reshape(output, x_shape)
if self.use_moe is True:
if self.use_moe:
return output, layer_present, aux_loss
return output, layer_present
@ -1588,7 +1588,7 @@ class TransformerDecoderLayer(Cell):
"divisibled by 'parallel_config.model_parallel', but got the ffn_hidden_size is {} "
"and parallel_config.model_parallel is {}."
.format(ffn_hidden_size, parallel_config.model_parallel))
if use_past is True:
if use_past:
raise ValueError(f"The {self.cls_name} does not support use_past=True.")
self.batch_size = batch_size
self.use_past = use_past
@ -1632,7 +1632,7 @@ class TransformerDecoderLayer(Cell):
self.cross_attention_layernorm.shard(((parallel_config.data_parallel, 1),))
_check_moe_config(moe_config, parallel_config)
self.use_moe = (moe_config.expert_num > 1)
if self.use_moe is True:
if self.use_moe:
self.output = MoE(hidden_size=hidden_size,
dropout_rate=hidden_dropout_rate,
ffn_hidden_size=ffn_hidden_size,
@ -1718,7 +1718,7 @@ class TransformerDecoderLayer(Cell):
output_x = self.layernorm2(x)
output_x = F.cast(output_x, self.dtype)
aux_loss = None
if self.use_moe is True:
if self.use_moe:
mlp_logit, aux_loss = self.output(output_x)
else:
mlp_logit = self.output(output_x)
@ -1756,7 +1756,7 @@ class TransformerDecoderLayer(Cell):
output = self.add(x, mlp_logit)
output = F.reshape(output, hidden_shape)
if self.use_moe is True:
if self.use_moe:
return output, layer_present, aux_loss
return output, layer_present
@ -2044,7 +2044,7 @@ class TransformerEncoder(Cell):
def construct(self, hidden_states, attention_mask, init_reset=True, batch_valid_length=None):
present_layer = ()
if self.use_moe is True:
if self.use_moe:
accum_loss = self.aux_loss
for i in range(self.num_layers):
hidden_states, present, aux_loss = self.blocks[i](hidden_states,
@ -2242,7 +2242,7 @@ class TransformerDecoder(Cell):
def construct(self, hidden_states, attention_mask, encoder_output=None, memory_mask=None,
init_reset=True, batch_valid_length=None):
present_layer = ()
if self.use_moe is True:
if self.use_moe:
accum_loss = self.aux_loss
for i in range(self.num_layers):
hidden_states, present, aux_loss = self.blocks[i](hidden_states,
@ -2433,7 +2433,7 @@ class Transformer(Cell):
if encoder_layers <= 0 < decoder_layers:
raise ValueError(f"Transformer doest support encoder layer {encoder_layers} and decoder"
f"layer {decoder_layers}, please use TransformerDecoder")
if encoder_layers > 0 and decoder_layers > 0 and use_past is True:
if encoder_layers > 0 and decoder_layers > 0 and use_past:
raise ValueError(f"The {self.cls_name} with encoder and decoder does not support use_past=True.")
if _get_parallel_mode() in (ParallelMode.AUTO_PARALLEL,):
raise RuntimeError(f"The {self.cls_name} does not support auto parallel mode now.")
@ -2503,7 +2503,7 @@ class Transformer(Cell):
decoder_layer_present = None
accum_loss = self.aux_loss
if self.encoder is not None:
if self.use_moe is True:
if self.use_moe:
encoder_output, encoder_layer_present, encoder_aux_loss = self.encoder(encoder_inputs, encoder_masks,
init_reset, batch_valid_length)
accum_loss = self.add(accum_loss, encoder_aux_loss)
@ -2514,7 +2514,7 @@ class Transformer(Cell):
if self.decoder is not None:
# decoder mask should be created outside of the model
if self.use_moe is True:
if self.use_moe:
decoder_output, decoder_layer_present, decoder_aux_loss = self.decoder(decoder_inputs, decoder_masks,
encoder_output, memory_mask,
init_reset, batch_valid_length)
@ -2526,6 +2526,6 @@ class Transformer(Cell):
memory_mask, init_reset,
batch_valid_length)
output = decoder_output
if self.use_moe is True:
if self.use_moe:
return output, encoder_layer_present, decoder_layer_present, accum_loss
return output, encoder_layer_present, decoder_layer_present

View File

@ -37,7 +37,7 @@ def get_bprop_masked_select(self):
dinput, dvalue = binop_grad_common(input_data, mask, dinput, dvalue)
dvalue = sum_op(dvalue)
dinput = F.cast(dinput, F.dtype(input_data))
if is_instance_op(value, mstype.number) is True:
if is_instance_op(value, mstype.number):
dvalue = 0
else:
dvalue = F.cast(dvalue, F.dtype(value))

View File

@ -70,7 +70,7 @@ def get_bprop_index_lerp(self):
dend = mul_op(dout, weight)
dweight = mul_op(dout, sub_op(end, start))
dstart, dend = binop_grad_common(start, end, dstart, dend)
if is_instance_op(weight, mstype.number) is True:
if is_instance_op(weight, mstype.number):
dweight = 0
else:
_, dweight = binop_grad_common(start, weight, dstart, dweight)

View File

@ -66,7 +66,7 @@ if __name__ == "__main__":
# If the specified bprop source directory is not on the mindspore installed path,
# copy the bprop source files to the installed path.
backup_suffix = "_generate_bak"
if copy_flag is True:
if copy_flag:
shutil.rmtree(bprop_installed_dir + backup_suffix, ignore_errors=True)
os.rename(bprop_installed_dir, bprop_installed_dir + backup_suffix)
os.mkdir(bprop_installed_dir)
@ -81,7 +81,7 @@ if __name__ == "__main__":
# If the specified bprop source directory is not on the mindspore installed path,
# copy the generated mindir files to the mindir directory relative to the specified path.
if copy_flag is True:
if copy_flag:
shutil.rmtree(bprop_installed_dir)
os.rename(bprop_installed_dir + backup_suffix, bprop_installed_dir)
ls = os.listdir(bprop_mindir_export_dir)

View File

@ -5683,7 +5683,7 @@ class IsClose(Primitive):
validator.check_value_type('rtol', rtol, [float], self.name)
validator.check_value_type('atol', atol, [float], self.name)
validator.check_value_type('equal_nan', equal_nan, [bool], self.name)
if equal_nan is not True:
if not equal_nan:
raise ValueError("For IsClose, the `equal_nan` must be True, but got False.")
validator.check_non_negative_float(rtol, 'rtol', self.name)
validator.check_non_negative_float(atol, 'atol', self.name)

View File

@ -55,7 +55,7 @@ def fwrite_format(output_data_path, data_source=None, is_print=False, is_start=F
is_start (bool): Whether is the first line of the output file, will remove the old file if True."
"""
if is_start is True and os.path.exists(output_data_path):
if is_start and os.path.exists(output_data_path):
os.remove(output_data_path)
if isinstance(data_source, str) and data_source.startswith("title:"):

View File

@ -446,7 +446,7 @@ class ModelCheckpoint(Callback):
return True
elif self._config.save_checkpoint_seconds and self._config.save_checkpoint_seconds > 0:
self._cur_time = time.time()
if (self._cur_time - self._last_time) > self._config.save_checkpoint_seconds or force_to_save is True:
if (self._cur_time - self._last_time) > self._config.save_checkpoint_seconds or force_to_save:
self._last_time = self._cur_time
return True

View File

@ -756,7 +756,7 @@ class Model:
>>> model.train(2, dataset)
"""
dataset_sink_mode = Validator.check_bool(dataset_sink_mode)
if isinstance(self._train_network, nn.GraphCell) and dataset_sink_mode is True:
if isinstance(self._train_network, nn.GraphCell) and dataset_sink_mode:
raise ValueError("Dataset sink mode is currently not supported when training with a GraphCell.")
if hasattr(train_dataset, '_warmup_epoch') and train_dataset._warmup_epoch != epoch:
@ -942,7 +942,7 @@ class Model:
if not self._metric_fns:
raise ValueError("The model argument 'metrics' can not be None or empty, "
"you should set the argument 'metrics' for model.")
if isinstance(self._eval_network, nn.GraphCell) and dataset_sink_mode is True:
if isinstance(self._eval_network, nn.GraphCell) and dataset_sink_mode:
raise ValueError("Sink mode is currently not supported when evaluating with a GraphCell.")
cb_params = _InternalCallbackParam()
@ -1019,7 +1019,7 @@ class Model:
dataset_sink_mode = Validator.check_bool(dataset_sink_mode)
if not dataset_sink_mode:
raise ValueError("Only dataset sink mode is supported for now.")
if isinstance(self._train_network, nn.GraphCell) and dataset_sink_mode is True:
if isinstance(self._train_network, nn.GraphCell) and dataset_sink_mode:
raise ValueError("Dataset sink mode is currently not supported when training with a GraphCell.")
Validator.check_is_int(sink_size)
dataset_size = train_dataset.get_dataset_size()

View File

@ -97,7 +97,7 @@ class FakeData:
self.is_onehot = True
self.fakedata_mode = fakedata_mode
if use_parallel is True:
if use_parallel:
init(backend_name='nccl')
self.rank_size = get_group_size()
self.rank_id = get_rank()

View File

@ -99,7 +99,7 @@ class FakeData:
self.is_onehot = True
self.fakedata_mode = fakedata_mode
if use_parallel is True:
if use_parallel:
init(backend_name='hccl')
self.rank_size = get_group_size()
self.rank_id = get_rank()

View File

@ -102,7 +102,7 @@ class FakeData:
self.is_onehot = True
self.fakedata_mode = fakedata_mode
if use_parallel is True:
if use_parallel:
init(backend_name='hccl')
self.rank_size = get_group_size()
self.rank_id = get_rank()

View File

@ -81,7 +81,7 @@ class FakeData:
self.is_onehot = True
self.fakedata_mode = fakedata_mode
if use_parallel is True:
if use_parallel:
init()
self.rank_size = get_group_size()
self.rank_id = get_rank()

View File

@ -574,7 +574,7 @@ class Model:
>>> model.train(2, dataset)
"""
repeat_count = train_dataset.get_repeat_count()
if epoch != repeat_count and dataset_sink_mode is True:
if epoch != repeat_count and dataset_sink_mode:
logger.warning(f"The epoch_size {epoch} is not the same with dataset repeat_count {repeat_count}")
dataset_sink_mode = Validator.check_bool(dataset_sink_mode)
_device_number_check(self._parallel_mode, self._device_number)

View File

@ -346,7 +346,7 @@ def vm_impl_momentum(self):
learning_rate = np.full(shape, learning_rate.asnumpy())
momentum = np.full(shape, momentum.asnumpy())
accumulation = accumulation * momentum + gradient
if use_nesterov is True:
if use_nesterov:
variable -= gradient * learning_rate + accumulation * momentum * learning_rate
else:
variable -= accumulation * learning_rate