提交 a346a1b2 编写于 作者: S simson

Second round of the enhancement of API comments

上级 72d2fc74
......@@ -206,23 +206,24 @@ class _MindSporeFunction:
def ms_function(fn=None, obj=None, input_signature=None):
"""
Creates a callable MindSpore graph from a python function.
Create a callable MindSpore graph from a python function.
This allows the MindSpore runtime to apply optimizations based on graph.
Args:
fn (Function): The Python function that will be run as a graph. Default: None.
obj (Object): The Python Object that provide information for identify compiled function. Default: None.
input_signature (MetaTensor): The MetaTensor to describe the input arguments. The MetaTensor specifies
obj (Object): The Python Object that provides the information for identifying the compiled function.Default:
None.
input_signature (MetaTensor): The MetaTensor which describes the input arguments. The MetaTensor specifies
the shape and dtype of the Tensor and they will be supplied to this function. If input_signature
is specified, every input to `fn` must be a `Tensor`. And the input parameters of `fn` cannot accept
`**kwargs`. The shape and dtype of actual inputs should keep same with input_signature, or TypeError
will be raised. Default: None.
is specified, each input to `fn` must be a `Tensor`. And the input parameters of `fn` cannot accept
`**kwargs`. The shape and dtype of actual inputs should keep the same as input_signature. Otherwise,
TypeError will be raised. Default: None.
Returns:
Function, if `fn` is not None, returns a callable that will execute the compiled function; If `fn` is None,
returns a decorator and when this decorator invokes with a single `fn` argument, the callable is equal to the
case when `fn` is not None.
Function, if `fn` is not None, returns a callable function that will execute the compiled function; If `fn` is
None, returns a decorator and when this decorator invokes with a single `fn` argument, the callable function is
equal to the case when `fn` is not None.
Examples:
>>> def tensor_add(x, y):
......
......@@ -166,7 +166,7 @@ def pytype_to_dtype(obj):
def get_py_obj_dtype(obj):
"""
Get the corresponding MindSpore data type by python type or variable.
Get the MindSpore data type which corresponds to python type or variable.
Args:
obj: An object of python type, or a variable in python type.
......@@ -186,7 +186,7 @@ def get_py_obj_dtype(obj):
def dtype_to_nptype(type_):
"""
Get numpy data type corresponding to MindSpore dtype.
Convert MindSpore dtype to numpy data type.
Args:
type_ (:class:`mindspore.dtype`): MindSpore's dtype.
......@@ -213,7 +213,7 @@ def dtype_to_nptype(type_):
def dtype_to_pytype(type_):
"""
Get python type corresponding to MindSpore dtype.
Convert MindSpore dtype to python data type.
Args:
type_ (:class:`mindspore.dtype`): MindSpore's dtype.
......
......@@ -53,7 +53,7 @@ class Parameter(MetaTensor):
name (str): Name of the child parameter.
requires_grad (bool): True if the parameter requires gradient. Default: True.
layerwise_parallel (bool): A kind of model parallel mode. When layerwise_parallel is true in paralle mode,
broadcast and gradients communication would not be applied on parameters. Default: False.
broadcast and gradients communication would not be applied to parameters. Default: False.
"""
__base_type__ = {}
......@@ -196,7 +196,7 @@ class Parameter(MetaTensor):
@property
def is_init(self):
"""Get init status of the parameter."""
"""Get the initialization status of the parameter."""
return self._is_init
@is_init.setter
......@@ -322,7 +322,7 @@ class Parameter(MetaTensor):
def init_data(self, layout=None, set_sliced=False):
"""
Init data of the parameter.
Initialize the parameter data.
Args:
layout (list[list[int]]): Parameter slice layout [dev_mat, tensor_map, slice_shape].
......@@ -330,11 +330,11 @@ class Parameter(MetaTensor):
- dev_mat (list[int]): Device matrix.
- tensor_map (list[int]): Tensor map.
- slice_shape (list[int]): Shape of slice.
set_sliced (bool): True if should set parameter sliced after init the data of initializer.
set_sliced (bool): True if the parameter is set sliced after initializing the data.
Default: False.
Returns:
Parameter, the `Parameter` after init data. If current `Parameter` already initialized before,
Parameter, the `Parameter` after initializing data. If current `Parameter` was already initialized before,
returns the same initialized `Parameter`.
"""
if self.init_mode is None:
......@@ -371,7 +371,7 @@ class ParameterTuple(tuple):
Class for storing tuple of parameters.
Note:
Used to store the parameters of the network into the parameter tuple collection.
It is used to store the parameters of the network into the parameter tuple collection.
"""
def __new__(cls, iterable):
"""Create instance object of ParameterTuple."""
......
......@@ -29,14 +29,14 @@ np_types = (np.int8, np.int16, np.int32, np.int64,
class Tensor(Tensor_):
"""
Tensor for data storage.
Tensor is used for data storage.
Tensor inherits tensor object in C++ side, some functions are implemented
in C++ side and some functions are implemented in Python layer.
Tensor inherits tensor object in C++.
Some functions are implemented in C++ and some functions are implemented in Python.
Args:
input_data (Tensor, float, int, bool, tuple, list, numpy.ndarray): Input data of the tensor.
dtype (:class:`mindspore.dtype`): Should be None, bool or numeric type defined in `mindspore.dtype`.
dtype (:class:`mindspore.dtype`): Input data should be None, bool or numeric type defined in `mindspore.dtype`.
The argument is used to define the data type of the output tensor. If it is None, the data type of the
output tensor will be as same as the `input_data`. Default: None.
......@@ -44,13 +44,13 @@ class Tensor(Tensor_):
Tensor, with the same shape as `input_data`.
Examples:
>>> # init a tensor with input data
>>> # initialize a tensor with input data
>>> t1 = Tensor(np.zeros([1, 2, 3]), mindspore.float32)
>>> assert isinstance(t1, Tensor)
>>> assert t1.shape == (1, 2, 3)
>>> assert t1.dtype == mindspore.float32
>>>
>>> # init a tensor with a float scalar
>>> # initialize a tensor with a float scalar
>>> t2 = Tensor(0.1)
>>> assert isinstance(t2, Tensor)
>>> assert t2.dtype == mindspore.float64
......@@ -280,18 +280,18 @@ class IndexedSlices:
The dense tensor dense represented by an IndexedSlices slices has
`dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]`.
IndexedSlices can only be used in `Cell`'s contruct method.
IndexedSlices can only be used in the `Cell`'s construct method.
Pynative mode not supported at the moment.
It is not supported in pynative mode at the moment.
Args:
indices (Tensor): A 1-D integer Tensor of shape [D0].
values (Tensor): A Tensor of any dtype of shape [D0, D1, ..., Dn].
dense_shape (tuple): A integer tuple containing the shape
dense_shape (tuple): An integer tuple which contains the shape
of the corresponding dense tensor.
Returns:
IndexedSlices, composed of `indices`, `values`, `dense_shape`.
IndexedSlices, composed of `indices`, `values`, and `dense_shape`.
Examples:
>>> class Net(nn.Cell):
......@@ -327,7 +327,7 @@ class SparseTensor:
"""
A sparse representation of a set of nonzero elememts from a tensor at given indices.
SparseTensor can only be used in `Cell`'s contruct method.
SparseTensor can only be used in the `Cell`'s construct method.
Pynative mode not supported at the moment.
......@@ -344,7 +344,7 @@ class SparseTensor:
which specifies the dense_shape of the sparse tensor.
Returns:
SparseTensor, composed of `indices`, `values`, `dense_shape`.
SparseTensor, composed of `indices`, `values`, and `dense_shape`.
Examples:
>>> class Net(nn.Cell):
......
......@@ -220,7 +220,7 @@ class Optimizer(Cell):
"""Check weight decay, and convert int to float."""
if isinstance(weight_decay, (float, int)):
weight_decay = float(weight_decay)
validator.check_number_range("weight_decay", weight_decay, 0.0, 1.0, Rel.INC_BOTH, self.cls_name)
validator.check_number_range("weight_decay", weight_decay, 0.0, float("inf"), Rel.INC_LEFT, self.cls_name)
return weight_decay
raise TypeError("Weight decay should be int or float.")
......
......@@ -364,8 +364,8 @@ def prim_attr_register(fn):
def constexpr(fn=None, get_instance=True, name=None):
"""
Makes a PrimitiveWithInfer operator that can infer the value at compile time. We can define a function
to compute between constant variable and used in constructß.
Make a PrimitiveWithInfer operator that can infer the value at compile time. We can use it to define a function to
compute constant value using the constants in the constructor.
Args:
fn (function): A `fn` use as the infer_value of the output operator.
......
......@@ -39,19 +39,20 @@ def _send_data_no_flag(dataset, epoch_num):
class DatasetHelper:
"""
Help function to use the Minddata dataset.
Help function to use the MindData dataset.
According to different context, change the iter of dataset, to use the same for loop in different context.
According to different contexts, change the iterations of dataset and use the same iteration for loop in different
contexts.
Note:
The iter of DatasetHelper will give one epoch data.
The iteration of DatasetHelper will provide one epoch data.
Args:
dataset (DataSet): The training dataset iterator.
dataset_sink_mode (bool): If true use GetNext to fetch the data, or else feed the data from host. Default: True.
sink_size (int): Control the amount of data each sink.
If sink_size=-1, sink the complete dataset each epoch.
If sink_size>0, sink sink_size data each epoch. Default: -1.
sink_size (int): Control the amount of data in each sink.
If sink_size=-1, sink the complete dataset for each epoch.
If sink_size>0, sink sink_size data for each epoch. Default: -1.
epoch_num (int): Control the number of epoch data to send. Default: 1.
Examples:
......@@ -90,11 +91,11 @@ class DatasetHelper:
# A temp solution for loop sink. Delete later
def types_shapes(self):
"""Get the types and shapes from dataset on current config."""
"""Get the types and shapes from dataset on the current configuration."""
return self.iter.types_shapes()
def sink_size(self):
"""Get sink_size for every iteration."""
"""Get sink_size for each iteration."""
return self.iter.get_sink_size()
def stop_send(self):
......
......@@ -45,20 +45,20 @@ class Model:
`Model` groups layers into an object with training and inference features.
Args:
network (Cell): The training or testing network.
network (Cell): A training or testing network.
loss_fn (Cell): Objective function, if loss_fn is None, the
network should contain the logic of loss and grads calculation, and the logic
of parallel if needed. Default: None.
optimizer (Cell): Optimizer for updating the weights. Default: None.
metrics (Union[dict, set]): Dict or set of metrics to be evaluated by the model during
metrics (Union[dict, set]): A Dictionary or a set of metrics to be evaluated by the model during
training and testing. eg: {'accuracy', 'recall'}. Default: None.
eval_network (Cell): Network for evaluation. If not defined, `network` and `loss_fn` would be wrapped as
`eval_network`. Default: None.
eval_indexes (list): In case of defining the `eval_network`, if `eval_indexes` is None, all outputs of
eval_indexes (list): When defining the `eval_network`, if `eval_indexes` is None, all outputs of the
`eval_network` would be passed to metrics, otherwise `eval_indexes` must contain three
elements, representing the positions of loss value, predict value and label, the loss
value would be passed to `Loss` metric, predict value and label would be passed to other
metric. Default: None.
elements, including the positions of loss value, predicted value and label. The loss
value would be passed to the `Loss` metric, the predicted value and label would be passed
to other metric. Default: None.
amp_level (str): Option for argument `level` in `mindspore.amp.build_train_network`, level for mixed
precision training. Supports [O0, O2, O3]. Default: "O0".
......@@ -68,10 +68,11 @@ class Model:
O2 is recommended on GPU, O3 is recommended on Ascend.
loss_scale_manager (Union[None, LossScaleManager]): If None, not scale the loss, or else
scale the loss by LossScaleManager. If it is set, overwrite the level setting. It's a eyword argument.
loss_scale_manager (Union[None, LossScaleManager]): If it is None, the loss would not be scaled. Otherwise,
scale the loss by LossScaleManager. It is a key argument.
e.g. Use `loss_scale_manager=None` to set the value.
keep_batchnorm_fp32 (bool): Keep Batchnorm run in `float32`. If set, overwrite the level setting. Default: True.
keep_batchnorm_fp32 (bool): Keep Batchnorm running in `float32`. If it is set to true, the level setting before
will be overwritten. Default: True.
Examples:
>>> class Net(nn.Cell):
......@@ -255,16 +256,16 @@ class Model:
def init(self, train_dataset=None, valid_dataset=None):
"""
Initializes compute graphs and data graphs with sink mode.
Initialize compute graphs and data graphs with the sink mode.
Note:
Pre-init process only supports `GRAPH_MODE` and `Ascend` target currently.
Args:
train_dataset (Dataset): A training dataset iterator. If define `train_dataset`, training graphs will be
train_dataset (Dataset): A training dataset iterator. If `train_dataset` is defined, training graphs will be
initialized. Default: None.
valid_dataset (Dataset): A evaluating dataset iterator. If define `valid_dataset`, evaluation graphs will
be initialized, and `metrics` in `Model` can not be None. Default: None.
valid_dataset (Dataset): A evaluating dataset iterator. If `valid_dataset` is defined, evaluation graphs
will be initialized, and `metrics` in `Model` can not be None. Default: None.
Examples:
>>> train_dataset = get_train_dataset()
......@@ -327,15 +328,16 @@ class Model:
Args:
epoch (int): Total number of iterations on the data.
train_dataset (Dataset): A training dataset iterator. If there is no
loss_fn, a tuple with multiply data (data1, data2, data3, ...) will be
loss_fn, a tuple with multiple data (data1, data2, data3, ...) will be
returned and passed to the network. Otherwise, a tuple (data, label) will
be returned, and the data and label are passed to the network and loss
be returned. The data and label would be passed to the network and loss
function respectively.
callbacks (list): List of callback object. Callbacks which should be executed while training. Default: None.
dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True.
callbacks (list): List of callback objects which should be executed while training. Default: None.
dataset_sink_mode (bool): Determine whether the data should be passed through the dataset channel.
Default: True.
Configure pynative mode, the training process will be performed with
dataset not sink.
sink_size (int): Control the amount of data each sink. Default: -1.
sink_size (int): Control the amount of data in each sink. Default: -1.
"""
epoch = check_int_positive(epoch)
self._train_network.set_train()
......@@ -392,13 +394,13 @@ class Model:
Args:
epoch (int): Total number of iterations on the data.
train_dataset (Dataset): A training dataset iterator. If there is no
loss_fn, a tuple with multiply data (data1, data2, data3, ...) should be
loss_fn, a tuple with multiple data (data1, data2, data3, ...) should be
returned and passed to the network. Otherwise, a tuple (data, label) should
be returned, and the data and label are passed to the network and loss
be returned. The data and label would be passed to the network and loss
function respectively.
list_callback (Callback): Executor of callback list. Default: None.
cb_params (_InternalCallbackParam): Callback parameters. Default: None.
sink_size (int): Control the amount of data each sink. Default: -1.
sink_size (int): Control the amount of data in each sink. Default: -1.
"""
if sink_size == -1:
epoch_num = epoch
......@@ -450,9 +452,9 @@ class Model:
Args:
epoch (int): Total number of iterations on the data.
train_dataset (Dataset): A training dataset iterator. If there is no
loss_fn, a tuple with multiply data (data1, data2, data3, ...) should be
loss_fn, a tuple with multiple data (data1, data2, data3, ...) should be
returned and passed to the network. Otherwise, a tuple (data, label) should
be returned, and the data and label are passed to the network and loss
be returned. The data and label would be passed to the network and loss
function respectively.
list_callback (Callback): Executor of callback list. Default: None.
cb_params (_InternalCallbackParam): Callback parameters. Default: None.
......@@ -518,7 +520,7 @@ class Model:
CPU is not supported when dataset_sink_mode is true.
If dataset_sink_mode is True, epoch of training should be equal to the count of repeat
operation in dataset processing. Otherwise, errors could occur since the amount of data
is not the amount training requires.
is not equal to the required amount of training .
If dataset_sink_mode is True, data will be sent to device. If device is Ascend, features
of data will be transferred one by one. The limitation of data transmission per time is 256M.
......@@ -527,18 +529,18 @@ class Model:
When dataset_sink_mode is set to true and sink_size>0, each epoch sink sink_size
steps on the data instead of total number of iterations.
train_dataset (Dataset): A training dataset iterator. If there is no
loss_fn, a tuple with multiply data (data1, data2, data3, ...) should be
loss_fn, a tuple with multiple data (data1, data2, data3, ...) should be
returned and passed to the network. Otherwise, a tuple (data, label) should
be returned, and the data and label are passed to the network and loss
be returned. The data and label would be passed to the network and loss
function respectively.
callbacks (list): List of callback object. Callbacks which should be excuted while training. Default: None.
callbacks (list): List of callback objects which should be executed while training. Default: None.
dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True.
Configure pynative mode, the training process will be performed with
dataset not sink.
sink_size (int): Control the amount of data each sink.
If sink_size=-1, sink the complete dataset each epoch.
If sink_size>0, sink sink_size data each epoch.
If dataset_sink_mode is False, set sink_size invalid. Default: -1.
sink_size (int): Control the amount of data in each sink.
If sink_size=-1, sink the complete dataset for each epoch.
If sink_size>0, sink sink_size data for each epoch.
If dataset_sink_mode is False, set sink_size as invalid. Default: -1.
Examples:
>>> dataset = get_dataset()
......@@ -573,7 +575,7 @@ class Model:
cb_params (_InternalCallbackParam): Callback parameters. Default: None.
Returns:
Dict, returns the loss value & metrics values for the model in test mode.
Dict, which returns the loss value and metrics values for the model in the test mode.
"""
run_context = RunContext(cb_params)
......@@ -612,7 +614,7 @@ class Model:
cb_params (_InternalCallbackParam): Callback parameters. Default: None.
Returns:
Dict, returns the loss value & metrics values for the model in test mode.
Dict, which returns the loss value and metrics values for the model in the test mode.
"""
run_context = RunContext(cb_params)
list_callback.begin(run_context)
......@@ -650,12 +652,11 @@ class Model:
Args:
valid_dataset (Dataset): Dataset to evaluate the model.
callbacks (list): List of callback object. Callbacks which should be excuted
while training. Default: None.
callbacks (list): List of callback objects which should be executed while training. Default: None.
dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True.
Returns:
Dict, returns the loss value & metrics values for the model in test mode.
Dict, which returns the loss value and metrics values for the model in the test mode.
Examples:
>>> dataset = get_dataset()
......@@ -690,9 +691,9 @@ class Model:
def predict(self, *predict_data):
"""
Generates output predictions for the input samples.
Generate output predictions for the input samples.
Data could be single tensor, or list of tensor, tuple of tensor.
Data could be a single tensor, a list of tensor, or a tuple of tensor.
Note:
Batch data should be put together in one tensor.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册