to_string.py 12.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
Z
zhiboniu 已提交
16
from ..framework import core
17
from paddle.fluid.data_feeder import check_type, convert_dtype
18

19 20
__all__ = []

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35

class PrintOptions(object):
    precision = 8
    threshold = 1000
    edgeitems = 3
    linewidth = 80
    sci_mode = False


DEFAULT_PRINT_OPTIONS = PrintOptions()


def set_printoptions(precision=None,
                     threshold=None,
                     edgeitems=None,
36 37
                     sci_mode=None,
                     linewidth=None):
38 39 40 41 42
    """Set the printing options for Tensor.

    Args:
        precision (int, optional): Number of digits of the floating number, default 8.
        threshold (int, optional): Total number of elements printed, default 1000.
43
        edgeitems (int, optional): Number of elements in summary at the beginning and ending of each dimension, default 3.
44
        sci_mode (bool, optional): Format the floating number with scientific notation or not, default False.
45
        linewidth (int, optional): Number of characters each line, default 80.
46 47


48 49 50 51 52 53 54 55
    Returns:
        None.

    Examples:
        .. code-block:: python

            import paddle

C
cnn 已提交
56
            paddle.seed(10)
57 58 59
            a = paddle.rand([10, 20])
            paddle.set_printoptions(4, 100, 3)
            print(a)
60

61
            '''
62 63 64 65
            Tensor(shape=[10, 20], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
                   [[0.0002, 0.8503, 0.0135, ..., 0.9508, 0.2621, 0.6661],
                    [0.9710, 0.2605, 0.9950, ..., 0.4427, 0.9241, 0.9363],
                    [0.0948, 0.3226, 0.9955, ..., 0.1198, 0.0889, 0.9231],
66
                    ...,
67 68 69
                    [0.7206, 0.0941, 0.5292, ..., 0.4856, 0.1379, 0.0351],
                    [0.1745, 0.5621, 0.3602, ..., 0.2998, 0.4011, 0.1764],
                    [0.0728, 0.7786, 0.0314, ..., 0.2583, 0.1654, 0.0637]])
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
            '''
    """
    kwargs = {}

    if precision is not None:
        check_type(precision, 'precision', (int), 'set_printoptions')
        DEFAULT_PRINT_OPTIONS.precision = precision
        kwargs['precision'] = precision
    if threshold is not None:
        check_type(threshold, 'threshold', (int), 'set_printoptions')
        DEFAULT_PRINT_OPTIONS.threshold = threshold
        kwargs['threshold'] = threshold
    if edgeitems is not None:
        check_type(edgeitems, 'edgeitems', (int), 'set_printoptions')
        DEFAULT_PRINT_OPTIONS.edgeitems = edgeitems
        kwargs['edgeitems'] = edgeitems
86 87 88 89
    if linewidth is not None:
        check_type(linewidth, 'linewidth', (int), 'set_printoptions')
        DEFAULT_PRINT_OPTIONS.linewidth = linewidth
        kwargs['linewidth'] = linewidth
90 91 92 93 94 95 96
    if sci_mode is not None:
        check_type(sci_mode, 'sci_mode', (bool), 'set_printoptions')
        DEFAULT_PRINT_OPTIONS.sci_mode = sci_mode
        kwargs['sci_mode'] = sci_mode
    core.set_printoptions(**kwargs)


97
def _to_summary(var):
98 99
    edgeitems = DEFAULT_PRINT_OPTIONS.edgeitems

100 101 102 103
    # Handle tensor of shape contains 0, like [0, 2], [3, 0, 3]
    if np.prod(var.shape) == 0:
        return np.array([])

104 105 106 107
    if len(var.shape) == 0:
        return var
    elif len(var.shape) == 1:
        if var.shape[0] > 2 * edgeitems:
zhouweiwei2014's avatar
zhouweiwei2014 已提交
108
            return np.concatenate([var[:edgeitems], var[(-1 * edgeitems):]])
109 110 111 112 113 114
        else:
            return var
    else:
        # recursively handle all dimensions
        if var.shape[0] > 2 * edgeitems:
            begin = [x for x in var[:edgeitems]]
zhouweiwei2014's avatar
zhouweiwei2014 已提交
115
            end = [x for x in var[(-1 * edgeitems):]]
116
            return np.stack([_to_summary(x) for x in (begin + end)])
117
        else:
118
            return np.stack([_to_summary(x) for x in var])
119 120


121
def _format_item(np_var, max_width=0, signed=False):
122 123 124 125 126 127 128 129 130 131 132 133 134
    if np_var.dtype == np.float32 or np_var.dtype == np.float64 or np_var.dtype == np.float16:
        if DEFAULT_PRINT_OPTIONS.sci_mode:
            item_str = '{{:.{}e}}'.format(
                DEFAULT_PRINT_OPTIONS.precision).format(np_var)
        elif np.ceil(np_var) == np_var:
            item_str = '{:.0f}.'.format(np_var)
        else:
            item_str = '{{:.{}f}}'.format(
                DEFAULT_PRINT_OPTIONS.precision).format(np_var)
    else:
        item_str = '{}'.format(np_var)

    if max_width > len(item_str):
135 136 137 138 139 140 141 142
        if signed:  # handle sign character for tenosr with negative item
            if np_var < 0:
                return item_str.ljust(max_width)
            else:
                return ' ' + item_str.ljust(max_width - 1)
        else:
            return item_str.ljust(max_width)
    else:  # used for _get_max_width
143 144 145 146
        return item_str


def _get_max_width(var):
147
    # return max_width for a scalar
148
    max_width = 0
149 150 151 152
    signed = False
    for item in list(var.flatten()):
        if (not signed) and (item < 0):
            signed = True
153 154 155
        item_str = _format_item(item)
        max_width = max(max_width, len(item_str))

156
    return max_width, signed
157

158

159 160 161 162 163 164 165 166 167 168 169
def _format_tensor(var, summary, indent=0, max_width=0, signed=False):
    """
    Format a tensor

    Args:
        var(Tensor): The tensor to be formatted.
        summary(bool): Do summary or not. If true, some elements will not be printed, and be replaced with "...".
        indent(int): The indent of each line.
        max_width(int): The max width of each elements in var.
        signed(bool): Print +/- or not.
    """
170
    edgeitems = DEFAULT_PRINT_OPTIONS.edgeitems
171
    linewidth = DEFAULT_PRINT_OPTIONS.linewidth
172 173

    if len(var.shape) == 0:
L
Leo Chen 已提交
174 175
        # currently, shape = [], i.e., scaler tensor is not supported.
        # If it is supported, it should be formatted like this.
176
        return _format_item(var, max_width, signed)
177
    elif len(var.shape) == 1:
178 179 180 181 182
        item_length = max_width + 2
        items_per_line = (linewidth - indent) // item_length
        items_per_line = max(1, items_per_line)

        if summary and var.shape[0] > 2 * edgeitems:
183
            items = [
184
                _format_item(item, max_width, signed)
zhouweiwei2014's avatar
zhouweiwei2014 已提交
185
                for item in list(var)[:edgeitems]
186
            ] + ['...'] + [
187
                _format_item(item, max_width, signed)
zhouweiwei2014's avatar
zhouweiwei2014 已提交
188
                for item in list(var)[(-1 * edgeitems):]
189 190 191
            ]
        else:
            items = [
192
                _format_item(item, max_width, signed) for item in list(var)
193
            ]
194 195 196 197
        lines = [
            items[i:i + items_per_line]
            for i in range(0, len(items), items_per_line)
        ]
198 199
        s = (',\n' + ' ' * (indent + 1)).join(
            [', '.join(line) for line in lines])
200 201 202
        return '[' + s + ']'
    else:
        # recursively handle all dimensions
203
        if summary and var.shape[0] > 2 * edgeitems:
204
            vars = [
205
                _format_tensor(x, summary, indent + 1, max_width, signed)
206
                for x in var[:edgeitems]
207
            ] + ['...'] + [
208
                _format_tensor(x, summary, indent + 1, max_width, signed)
zhouweiwei2014's avatar
zhouweiwei2014 已提交
209
                for x in var[(-1 * edgeitems):]
210 211
            ]
        else:
212
            vars = [
213
                _format_tensor(x, summary, indent + 1, max_width, signed)
214 215
                for x in var
            ]
216 217 218 219 220 221 222 223

        return '[' + (',' + '\n' * (len(var.shape) - 1) + ' ' *
                      (indent + 1)).join(vars) + ']'


def to_string(var, prefix='Tensor'):
    indent = len(prefix) + 1

224 225 226 227
    dtype = convert_dtype(var.dtype)
    if var.dtype == core.VarDesc.VarType.BF16:
        dtype = 'bfloat16'

228 229 230 231 232 233
    _template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})"

    tensor = var.value().get_tensor()
    if not tensor._is_initialized():
        return "Tensor(Not initialized)"

234 235
    if var.dtype == core.VarDesc.VarType.BF16:
        var = var.astype('float32')
236 237
    np_var = var.numpy()

238 239 240 241 242 243 244
    if len(var.shape) == 0:
        size = 0
    else:
        size = 1
        for dim in var.shape:
            size *= dim

245
    summary = False
246
    if size > DEFAULT_PRINT_OPTIONS.threshold:
247
        summary = True
248

249
    max_width, signed = _get_max_width(_to_summary(np_var))
250

251 252 253 254 255
    data = _format_tensor(np_var,
                          summary,
                          indent=indent,
                          max_width=max_width,
                          signed=signed)
256

257 258 259 260 261 262 263
    return _template.format(prefix=prefix,
                            shape=var.shape,
                            dtype=dtype,
                            place=var._place_str,
                            stop_gradient=var.stop_gradient,
                            indent=' ' * indent,
                            data=data)
264 265


266
def _format_dense_tensor(tensor, indent):
267 268 269
    if tensor.dtype == core.VarDesc.VarType.BF16:
        tensor = tensor.astype('float32')

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
    np_tensor = tensor.numpy()

    if len(tensor.shape) == 0:
        size = 0
    else:
        size = 1
        for dim in tensor.shape:
            size *= dim

    sumary = False
    if size > DEFAULT_PRINT_OPTIONS.threshold:
        sumary = True

    max_width, signed = _get_max_width(_to_summary(np_tensor))

285 286 287 288 289
    data = _format_tensor(np_tensor,
                          sumary,
                          indent=indent,
                          max_width=max_width,
                          signed=signed)
290 291 292 293 294 295
    return data


def sparse_tensor_to_string(tensor, prefix='Tensor'):
    indent = len(prefix) + 1
    if tensor.is_sparse_coo():
296
        _template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient}, \n{indent}{indices}, \n{indent}{values})"
297 298
        indices_tensor = tensor.indices()
        values_tensor = tensor.values()
299 300 301 302 303 304 305 306 307 308 309 310
        indices_data = 'indices=' + _format_dense_tensor(
            indices_tensor, indent + len('indices='))
        values_data = 'values=' + _format_dense_tensor(values_tensor,
                                                       indent + len('values='))
        return _template.format(prefix=prefix,
                                shape=tensor.shape,
                                dtype=tensor.dtype,
                                place=tensor._place_str,
                                stop_gradient=tensor.stop_gradient,
                                indent=' ' * indent,
                                indices=indices_data,
                                values=values_data)
311
    else:
312
        _template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient}, \n{indent}{crows}, \n{indent}{cols}, \n{indent}{values})"
313 314 315
        crows_tensor = tensor.crows()
        cols_tensor = tensor.cols()
        elements_tensor = tensor.values()
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
        crows_data = 'crows=' + _format_dense_tensor(crows_tensor,
                                                     indent + len('crows='))
        cols_data = 'cols=' + _format_dense_tensor(cols_tensor,
                                                   indent + len('cols='))
        values_data = 'values=' + _format_dense_tensor(elements_tensor,
                                                       indent + len('values='))

        return _template.format(prefix=prefix,
                                shape=tensor.shape,
                                dtype=tensor.dtype,
                                place=tensor._place_str,
                                stop_gradient=tensor.stop_gradient,
                                indent=' ' * indent,
                                crows=crows_data,
                                cols=cols_data,
                                values=values_data)
332 333 334 335 336


def tensor_to_string(tensor, prefix='Tensor'):
    indent = len(prefix) + 1

337 338 339 340
    dtype = convert_dtype(tensor.dtype)
    if tensor.dtype == core.VarDesc.VarType.BF16:
        dtype = 'bfloat16'

341 342 343 344
    _template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})"

    if tensor.is_sparse():
        return sparse_tensor_to_string(tensor, prefix)
345 346 347

    if not tensor._is_dense_tensor_hold_allocation():
        return "Tensor(Not initialized)"
348 349
    else:
        data = _format_dense_tensor(tensor, indent)
350 351 352 353 354 355 356
        return _template.format(prefix=prefix,
                                shape=tensor.shape,
                                dtype=dtype,
                                place=tensor._place_str,
                                stop_gradient=tensor.stop_gradient,
                                indent=' ' * indent,
                                data=data)