einsum.py 35.2 KB
Newer Older
T
Tongxin Bai 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import itertools
16
import numpy as np
T
Tongxin Bai 已提交
17 18
import re

19
from .linalg import matmul, transpose
Z
zhiboniu 已提交
20
from .manipulation import squeeze, unsqueeze, reshape
T
Tongxin Bai 已提交
21 22
from .math import multiply
from .math import sum as paddle_sum
23
from ..fluid.framework import _in_legacy_dygraph
24
from paddle import _C_ops, _legacy_C_ops
25
from ..fluid.data_feeder import check_type, check_variable_and_dtype
26
from ..fluid.layer_helper import LayerHelper
27
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode
28 29 30
import collections
import string
import opt_einsum
T
Tongxin Bai 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47

__all__ = []


def parse_op_labels(labelstr, operand):
    '''
    Parse labels for an input operand.

    Parameters
    ----------
    labelstr:
        the input label string
    operand:
        the input operand

    Returns
    -------
48 49
    the input operand's full label string in which all anonymous dimensions are
    labeled in dots.
T
Tongxin Bai 已提交
50 51 52
    '''
    # Sanity checks
    for c in labelstr.replace('.', ''):
53 54 55
        assert (
            c.isalpha()
        ), f"Invalid equation: {c} is not a valid label, which should be letters."
T
Tongxin Bai 已提交
56

57 58 59
    assert (
        labelstr.replace('...', '', 1).find('.') == -1
    ), "Invalid equation: `.` is found outside of an ellipsis."
T
Tongxin Bai 已提交
60 61 62 63 64 65 66

    # Check shape. Note, in Paddle a tensor rank is always nonzero
    ndims = len(operand.shape)
    assert ndims > 0

    full_labelstr = labelstr.replace('...', '.' * (ndims - len(labelstr) + 3))

67 68 69
    assert (
        len(full_labelstr) == ndims
    ), f"Invalid equation: the label string '{labelstr}' misses dimensions."
T
Tongxin Bai 已提交
70 71 72 73 74 75 76

    return full_labelstr


def parse_labels(labelstr, operands):
    '''
    Parse label strings for all input operands.
77

T
Tongxin Bai 已提交
78 79 80 81 82 83
    Parameters
    ----------
    labelstr:
        The equation's label string
    operands:
        The input operands
84

T
Tongxin Bai 已提交
85 86 87 88 89 90 91 92
    Returns
    -------
    list of full label strings for all input operands
    '''

    nop_labels = labelstr.split(',')
    assert len(nop_labels) == len(operands), (
        f"Invalid equation: the number of operands is {len(operands)}, "
93 94
        f"but found {len(nop_labels)} segments in the label equation."
    )
T
Tongxin Bai 已提交
95 96 97 98 99 100

    return list(map(parse_op_labels, nop_labels, operands))


def validate_rhs(rhs, input_labels, n_bcast_dims):
    '''
101
    Check whether the equation's right hand side is valid
T
Tongxin Bai 已提交
102 103 104
    '''
    # Sanity check.
    if n_bcast_dims > 0:
105 106 107
        assert (
            '...' in rhs
        ), "Invalid equation: missing ellipsis in output labels."
T
Tongxin Bai 已提交
108 109 110 111 112 113 114 115 116 117 118

    rhs = rhs.replace('...', '')
    rhs_set = set(rhs)

    # Hidden assumption: availble labels don't include '.'
    assert '.' not in input_labels

    # Verify that output labels all come from the set of input labels
    non_input_labels = rhs_set.difference(input_labels)
    assert not non_input_labels, (
        f"Invalid equation: "
119 120
        f"output label {sorted(non_input_labels)} not used by any input."
    )
T
Tongxin Bai 已提交
121
    # Verify that output labels are not duplicate
122 123 124
    assert len(rhs) == len(
        rhs_set
    ), "Invalid equation: duplicate output labels are found."
T
Tongxin Bai 已提交
125 126 127 128


def build_view(in_labels, out_labels):
    '''
129 130
    Build an inverse map of dimension indices. Three conditions must hold for
    the result to be meaningful.
T
Tongxin Bai 已提交
131 132 133 134 135 136 137 138 139 140
    First, no duplicate letter labels in each label string.
    Second, the number of dots in dimout_labels >= that in in_labels.
    Third, dots are contiguous in each label string.

    Parameters
    ----------
    in_labels:
        The dimension labels to map to
    out_labels:
        The dimension labels to map from
141

T
Tongxin Bai 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
    Returns
    -------
    The inverse map from out_labels to in_labels. The length of the inverse map equals that of
    out_labels. -1 is filled if there's no matching intput dimension for a specific label.

    Examples
    --------
    in_labels = 'ij..', out_labels = '..ji'
    inv_map = [2, 3, 1, 0]
    in_labels = 'ij..', out_labels = '..kji'
    inv_map = [2, 3, -1, 1, 0]
    '''

    inv_map = [-1] * len(out_labels)

    # First build the broadcast dimension mapping
    # Find the broadcast index range in out_labels
    r = re.search(r'\.+', out_labels)
    if r:
        start, end = r.start(), r.end()
        s = re.search(r'\.+', in_labels)
        # fill the broadcast dimension indices from right to left.
        if s:
            for ax, dim in zip(
166 167
                range(start, end)[::-1], range(s.start(), s.end())[::-1]
            ):
T
Tongxin Bai 已提交
168 169
                inv_map[ax] = dim

170
    # Now work on non-broadcast dimensions
T
Tongxin Bai 已提交
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
    if r:
        it = itertools.chain(range(start), range(end, len(out_labels)))
    else:
        it = iter(range(len(out_labels)))

    for i in it:
        inv_map[i] = in_labels.find(out_labels[i])

    return inv_map


def build_global_view(nop_labels, rhs, n_bcast_dims):
    '''
    Build the global view, which is a layout of all dimension labels
    plus an index table that maps from the layout to the dimensions
    in each operand. In the global view, the dimensions are arranged
    such that output ones are put on the left and contraction ones
188
    are put on the right.
T
Tongxin Bai 已提交
189 190 191 192 193 194 195 196 197

    Parameters
    ----------
    nop_labels:
        The input full label strings of all input operands
    rhs:
        The equation right hand side
    n_bcast_dims:
        The maxium number of broadcast dimensions
198

T
Tongxin Bai 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
    Returns
    -------
    A tuple of g_labels, g_view, g_nout, g_count
    g_labels:
        the layout of all labels in a string
    g_view:
        the index table
    g_nout:
        the number of output dimensions
    g_count:
        the counter array for dimension contractions
    '''
    # Put all labels in alphabetical order
    concat = sorted(''.join(nop_labels).replace('.', ''))
    labels, count = [], []
    for a, b in zip(['.'] + concat, concat):
        if a != b:
            labels.append(b)
            count.append(1)
        else:
            count[-1] += 1

    if rhs != None:
        validate_rhs(rhs, labels, n_bcast_dims)
        g_labels_out = rhs.replace('...', '.' * n_bcast_dims)
    else:
        g_labels_out = '.' * n_bcast_dims + ''.join(
226 227
            l for l, c in zip(labels, count) if c == 1
        )
T
Tongxin Bai 已提交
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244

    for i in range(len(count))[::-1]:
        if labels[i] in g_labels_out:
            labels.pop(i)
            count.pop(i)

    g_labels_sum = ''.join(labels)
    g_labels = g_labels_out + g_labels_sum
    g_view = list(map(lambda i: build_view(i, g_labels), nop_labels))
    g_nout = len(g_labels_out)
    g_count = count

    return g_labels, g_view, g_nout, g_count


def build_global_shape(g_view, g_labels, op_shapes):
    '''
245
    The global shape is the shape of all dimensions rearranged and broadcasting
T
Tongxin Bai 已提交
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
    to the global view. It's a reference data structure for einsum planning.

    Parameters
    ----------
    g_view:
        the global view
    op_shapes:
        the shapes of the all operands

    Returns
    -------
    g_shape:
        the global shape vector
    g_masks:
        list of shape masks for each operand. A dimension's shape mask is a boolean
        indicating whether its size > 1, in other words, it's not squeezable
    '''
    view_shapes = []
    g_masks = []

    for view, op_shape in zip(g_view, op_shapes):
        view_shapes.append([op_shape[dim] if dim > -1 else 1 for dim in view])

    g_shape = [set(sizes_per_ax) - {1} for sizes_per_ax in zip(*view_shapes)]

    non_bcastable = [ax for ax, sizes in enumerate(g_shape) if len(sizes) > 1]

    assert not non_bcastable, (
        f"Invalid operands: label {g_labels[non_bcastable[0]]} "
275 276
        f"corresponds to non-broadcastable dimensions."
    )
T
Tongxin Bai 已提交
277 278 279

    g_shape = [sizes.pop() if len(sizes) > 0 else 1 for sizes in g_shape]

280 281 282
    g_masks = [
        [s > 1 or s == -1 for s in view_shape] for view_shape in view_shapes
    ]
T
Tongxin Bai 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296

    return g_shape, g_masks


def has_duplicated_labels(labels):
    '''
    Returns True if there is any duplicate label.
    '''
    labels = labels.replace('.', '')
    return len(labels) > len(set(labels))


def diagonalize(labels, operand):
    '''
297 298
    Merges dimensions with duplicate labels.

T
Tongxin Bai 已提交
299
    For those dimensions with duplicate labels, merge them into one dimension
300 301
    which represents the diagonal elements. This requires the dimensions with
    duplicate labels are equal sized.
302

T
Tongxin Bai 已提交
303
    Examples
304
    --------
T
Tongxin Bai 已提交
305 306
    'ijj...i' would be merged into 'ij...'
    '''
307 308 309
    assert not has_duplicated_labels(
        labels
    ), 'Duplicate labels are not supported.'
T
Tongxin Bai 已提交
310

311
    return labels, operand
T
Tongxin Bai 已提交
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327


def plan_reduce(plan, op, reduce_dims, keepdim):
    '''
    Add reduce to the plan
    '''
    varname = f'op{op}'

    f = lambda var, dims: paddle_sum(var, dims, keepdim=keepdim)
    step = f, [varname], varname, reduce_dims
    plan.add_step(step)


def plan_scalar_prod(plan, op1, op2):
    varnames = [f'op{op1}', f'op{op2}']
    f = lambda var1, var2: paddle_sum(var1) * var2
328
    # f = lambda var1, var2: var1 * var2
T
Tongxin Bai 已提交
329 330 331 332
    step = f, varnames, varnames[1]
    plan.add_step(step)


333
def plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K):
T
Tongxin Bai 已提交
334 335 336 337 338 339 340 341 342
    '''
    plan matmul
    '''
    # Transpose and re-shape op1 and op2 in I, J1, K and I, J2, K
    # Then apply matmul(x, y, transpose_x=False, tranpose_y=True)
    var1, var2 = f'op{op1}', f'op{op2}'

    op1_view, op2_view = [g_view[op] for op in (op1, op2)]

343 344 345 346
    I1 = [idx for idx in I if op1_view[idx] >= 0]
    I2 = [idx for idx in I if op2_view[idx] >= 0]
    op1_view = np.array(op1_view)
    op1_dims = op1_view[I1 + J1 + K]
T
Tongxin Bai 已提交
347

348 349
    op2_view = np.array(op2_view)
    op2_dims = op2_view[I2 + J2 + K]
T
Tongxin Bai 已提交
350

351 352 353 354
    op1_mask, op2_mask = [g_supports[op] for op in (op1, op2)]
    op1_vshape = np.array([s if m else 1 for s, m in zip(g_shape, op1_mask)])
    op2_vshape = np.array([s if m else 1 for s, m in zip(g_shape, op2_mask)])
    vshape = np.maximum(op1_vshape, op2_vshape)
T
Tongxin Bai 已提交
355

356
    i1, i2, j1, j2, k = map(len, (I1, I2, J1, J2, K))
T
Tongxin Bai 已提交
357

358
    if any(op1_dims != np.arange(len(op1_dims))):
T
Tongxin Bai 已提交
359
        # print(f'perm1: {perm1}')
360
        step = transpose, [var1], var1, list(op1_dims)
T
Tongxin Bai 已提交
361 362
        plan.add_step(step)

363
    if any(op2_dims != np.arange(len(op2_dims))):
T
Tongxin Bai 已提交
364
        # print(f'perm2: {perm2}')
365
        step = transpose, [var2], var2, list(op2_dims)
T
Tongxin Bai 已提交
366 367
        plan.add_step(step)

368
    # Check if conditions hold for turnning the operation into a matmul
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
    if (
        j1 + j2 > 0
        and k > 0
        and -1 not in np.concatenate((op1_vshape, op2_vshape))
    ):
        op1_shape = (
            list(op1_vshape[I])
            + [np.prod(op1_vshape[J1])]
            + [np.prod(op1_vshape[K])]
        )
        op2_shape = (
            list(op2_vshape[I])
            + [np.prod(op2_vshape[J2])]
            + [np.prod(op2_vshape[K])]
        )
T
Tongxin Bai 已提交
384

385 386
        # Merge J dims and K dims by reshaping
        step = reshape, [var1], var1, op1_shape
T
Tongxin Bai 已提交
387
        plan.add_step(step)
388
        step = reshape, [var2], var2, op2_shape
T
Tongxin Bai 已提交
389 390 391 392 393 394
        plan.add_step(step)

        # Matmul
        step = matmul, [var1, var2], var2, False, True
        plan.add_step(step)

395 396 397
        # Reshape back
        shape = list(vshape[I + J1 + J2])
        step = reshape, [var2], var2, shape
T
Tongxin Bai 已提交
398 399
        plan.add_step(step)

400 401 402 403 404
    elif j1 == j2 == k == 1:
        # Can still do matmul even unknown shapes are present
        step = matmul, [var1, var2], var2, False, True
        plan.add_step(step)

405
    # In the rest cases we opt for ops other than matmul
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
    else:
        # unsqueeze operands include J1...J2... dimensions
        if j2:
            fill = list(range(i1 + j1, i1 + j1 + j2))
            step = unsqueeze, [var1], var1, fill
            plan.add_step(step)
        if j1:
            fill = list(range(i2, i2 + j1))
            step = unsqueeze, [var2], var2, fill
            plan.add_step(step)
        # In case of no dimensions to contract, do an elementwise multiply
        if k == 0:
            # make broadcast
            step = multiply, [var1, var2], var2
            plan.add_step(step)
        # Contract and no join, turn into a dot
        elif j1 + j2 == 0 and k == 1:
            step = unsqueeze, [var1], var1, [-2]
            plan.add_step(step)
            step = unsqueeze, [var2], var2, [-1]
            plan.add_step(step)
            step = matmul, [var1, var2], var2
            plan.add_step(step)
            step = squeeze, [var2], var2, [-1, -2]
            plan.add_step(step)
431
        elif j1 + j2 == 0 and -1 not in np.concatenate(
432 433
            (op1_vshape[K], op2_vshape[K])
        ):
434
            assert all(op1_vshape[K] == op2_vshape[K])
435 436 437 438 439 440
            step = (
                reshape,
                [var1],
                var1,
                list(op1_vshape[I]) + [1] + [np.prod(op1_vshape[K])],
            )
441
            plan.add_step(step)
442 443 444 445 446 447
            step = (
                reshape,
                [var2],
                var2,
                list(op2_vshape[I]) + [1] + [np.prod(op2_vshape[K])],
            )
448 449 450 451 452 453 454 455 456 457 458
            plan.add_step(step)
            step = matmul, [var1, var2], var2, False, True
            plan.add_step(step)
            step = squeeze, [var2], var2, [-1, -2]
            plan.add_step(step)
        else:
            step = multiply, [var1, var2], var2
            plan.add_step(step)
            reduce_dims = list(range(-k, 0))
            plan_reduce(plan, op2, reduce_dims, keepdim=False)

T
Tongxin Bai 已提交
459 460
    # Wrap up, updating auxiliary data
    # Updating g_mask for I and J axes
461 462
    for ax in I + J1 + J2:
        op2_mask[ax] = vshape[ax] > 1 or vshape[ax] == -1
T
Tongxin Bai 已提交
463 464 465 466 467 468 469 470 471 472

    for ax in K:
        op2_mask[ax] = False

    for ax in range(len(op2_view)):
        op2_view[ax] = -1
    dim = 0
    for ax in I + J1 + J2:
        op2_view[ax], dim = dim, dim + 1

473 474
    g_view[op2] = list(op2_view)

T
Tongxin Bai 已提交
475

476 477 478
def plan_summation(
    plan, g_view, op1, op2, g_supports, g_shape, g_count, n_bcast
):
T
Tongxin Bai 已提交
479 480 481 482
    '''
    Plan various kinds of summation
    '''
    op1_view, op2_view = g_view[op1], g_view[op2]
483
    op1_mask, op2_mask = g_supports[op1], g_supports[op2]
T
Tongxin Bai 已提交
484 485 486 487 488 489 490 491

    ndim = len(op1_view)
    nout = ndim - len(g_count)

    count = [0] * nout + g_count

    I, K, J1, J2 = list(range(n_bcast)), [], [], []

492 493 494
    for ax, dim1, dim2 in zip(
        range(n_bcast, ndim), op1_view[n_bcast:], op2_view[n_bcast:]
    ):
T
Tongxin Bai 已提交
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515

        if (dim1 != -1) != (dim2 != -1):
            if dim1 != -1:
                J1.append(ax)
            else:
                J2.append(ax)
        elif dim1 != -1:
            fold = int(op1_mask[ax]) + int(op2_mask[ax])
            if ax >= nout and fold == count[ax]:
                # Ready to fold the dimensions
                K.append(ax)
                count[ax] -= fold
            else:
                I.append(ax)
                count[ax] -= max(fold - 1, 0)

    # Update g_count
    g_count[:] = count[nout:]

    # Now it's OK to merge the K dims as the same shape holds
    # print(f'I: {I}   J1: {J1}    J2: {J2}   K: {K}')
516
    plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K)
T
Tongxin Bai 已提交
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587


def rearrange(axes):
    perm, fill = [], []
    for ax, dim in enumerate(axes):
        if dim < 0:
            fill.append(ax)
        else:
            perm.append(dim)
    # Trivial permutation returns []
    if all(i == dim for i, dim in enumerate(perm)):
        perm = []

    return perm, fill


def plan_broadcast(plan, operands, nop_axes):
    '''
    Plan broadcast across
    '''
    nop = len(operands)
    varnames = [f'op{i}' for i in range(nop)]

    for i, op_axes in zip(range(nop), nop_axes):
        # Re-arrange the dimesions according to the global layout
        perm, fill = rearrange(op_axes)
        var = varnames[i]
        if perm:
            step = transpose, [var], var, perm
            plan.add_step(step)
        if fill:
            step = unsqueeze, [var], var, fill
            plan.add_step(step)

    def f(*args):
        expr = ' * '.join(varnames)
        return eval(expr, dict(zip(varnames, args)))

    step = f, varnames, None
    plan.add_step(step)


class Plan:
    def __init__(self):
        self.env = {}
        self.steps = []

    def add_step(self, step):
        self.steps.append(step)

    def get_var(self, varname):
        return self.env[varname] if varname in self.env else None

    def set_var(self, varname, var):
        self.env[varname] = var

    def show(self):
        res = None
        for f, in_varnames, out_varname, *args in self.steps:
            print(repr((out_varname, f, *in_varnames, *args)))
        return res

    def execute(self):
        res = None
        for f, in_varnames, out_varname, *args in self.steps:
            res = f(*map(self.get_var, in_varnames), *args)
            if out_varname:
                self.set_var(out_varname, res)
        return res


588
def plan_einsum(operands, g_view, g_shape, g_supports, g_count, n_bcast):
T
Tongxin Bai 已提交
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
    '''
    Plans the actual execution steps.
    Results
    -------
    the execution plan
    '''
    nop = len(operands)
    ndim = len(g_view[0])
    nout = ndim - len(g_count)

    # Initialize a plan with an environment
    plan = Plan()
    op_names = [f'op{i}' for i in range(nop)]
    list(map(plan.set_var, op_names, operands))

    # In case no dimensions to combine, do broadcast straight across
    if not g_count:
        plan_broadcast(plan, operands, g_view)
        return plan

609 610 611
    # Down count degenerate contraction dimensions.
    for view, support in zip(g_view, g_supports):
        # To collect the down count number, we use a type casting trick
T
Tongxin Bai 已提交
612
        down_count = [
613 614
            int((d + 1) and (not s))
            for d, s in zip(view[nout:], support[nout:])
T
Tongxin Bai 已提交
615
        ]
616 617
        for i, count in enumerate(down_count):
            g_count[i] -= count
T
Tongxin Bai 已提交
618

619 620
    # Reduce any dimension for which g_support is set and g_count == 1
    for i, view, mask in zip(range(nop), g_view, g_supports):
T
Tongxin Bai 已提交
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
        to_reduce = []
        for dim, masked, count in zip(view[nout:], mask[nout:], g_count):
            to_reduce.append(dim if (masked and count == 1) else -1)

        reduce_dims = list(filter(lambda x: x > -1, to_reduce))
        if reduce_dims:
            plan_reduce(plan, i, reduce_dims, keepdim=True)

        # Unset mask and decrease g_count for the reduced dimensions
        for i, d in enumerate(to_reduce):
            ax = i + nout
            mask[ax] = mask[ax] and (d == -1)
            g_count[i] -= 0 if d == -1 else 1

    # Plan the summations over the operand sequence
    for i in range(nop):
        # plan a single step

        if i == 0:
            continue

        # We'd like to arrange the dimensions in the following way:
        # [I...  J... K...]
        # [I...  J... K...]
645 646
        # where
        #       I... are aligned and not to be combined immediately
T
Tongxin Bai 已提交
647 648 649 650 651 652 653 654 655 656 657 658
        #       J... are not aligned and not to be combined immediately
        #       K... are aligned and should be immediately combined
        # At this point the non-trivial broadcast dimensinos in K are already reduced
        # and removed. That means all K dimensions are aligned and their sizes are not 1.
        # We then inspect the layout of I,J,K plus the above observation to make
        # specializatoin decisions.  The current strategy is set as follows:
        #  (1) if I... J... K... are all empty, it's multiplying a scalar
        #  (2) if K... are empty, better use a broadcast
        #  (3) if I... J... empty and K... not empty, a vector-vector multiply (or a dot)
        #  (4) Elsewise, either I... or J... not empty, and K... not empty, use a general matmul

        # Resolve the summation kind: dot, matmul or *
659 660
        if not any(g_supports[i - 1]):
            # op1 is a one element tensor.
T
Tongxin Bai 已提交
661 662
            plan_scalar_prod(plan, i - 1, i)
        else:
663 664 665
            plan_summation(
                plan, g_view, i - 1, i, g_supports, g_shape, g_count, n_bcast
            )
T
Tongxin Bai 已提交
666 667 668

    # for ax, dim in enumerate(g_view[nop-1][:nout]):
    #     assert dim == ax
669
    assert all(not masked for masked in g_supports[nop - 1][nout:])
T
Tongxin Bai 已提交
670 671 672 673

    view = g_view[-1]
    if any(ax != dim for ax, dim in enumerate(view[:nout])):
        perm = [dim for dim in view if dim >= 0]
674 675 676 677
        if sorted(perm) != perm:
            varname = f'op{nop-1}'
            step = transpose, [varname], varname, perm
            plan.add_step(step)
T
Tongxin Bai 已提交
678
        dim = 0
679
        unsqueeze_dims = []
T
Tongxin Bai 已提交
680 681 682
        for ax, d in enumerate(view):
            if d != -1:
                view[ax], dim = dim, dim + 1
683 684 685 686 687 688 689
        for ax, d in enumerate(view[:nout]):
            if d == -1:
                unsqueeze_dims.append(ax)
        if unsqueeze_dims:
            varname = f'op{nop-1}'
            step = unsqueeze, [varname], varname, unsqueeze_dims
            plan.add_step(step)
T
Tongxin Bai 已提交
690 691 692 693 694 695 696 697 698 699 700

    squeeze_dims = [dim for dim in view[nout:] if dim != -1]
    if squeeze_dims:
        # plan_reduce(plan, nop-1, reduce_dims, keepdim=False)
        varname = f'op{nop-1}'
        step = squeeze, [varname], varname, squeeze_dims
        plan.add_step(step)

    return plan


701 702 703 704 705 706
def preprocess(equation, *operands):
    """
    check equation / raise error, default right labels generation
    """
    equation = equation.replace(" ", "")
    nop = len(operands)
707 708 709
    assert nop > 0, (
        "Required at least one operand in Einsum API, but received %s " % nop
    )
710 711 712 713 714 715 716 717 718 719 720 721 722

    # Part the equation to left hand side and right hand side
    lhs, *rhs = equation.lower().split('->')
    assert len(rhs) < 2, "Invalid equation: multiple `->` were found."

    labels = parse_labels(lhs, operands)
    # Note, we distinguish between 'ij->' and 'ij' by setting rhs to '' and None
    rhs = rhs[0] if rhs else None
    if rhs is None:
        rhs = rhs_inference(lhs)

    assert len(lhs.split(',')) == len(operands), (
        f"Invalid equation: the number of operands is {len(operands)}, "
723 724
        f"but found {len(lhs.split(','))} segments in the label equation."
    )
725

726 727 728
    assert not (
        '...' in lhs and '...' not in rhs
    ), 'Invalid equation: missing ellipsis in output labels.'
729

730 731 732
    assert not (
        len(list(filter(has_duplicated_labels, lhs.split(',')))) > 0
    ), 'Duplicate labels are not supported.'
733 734

    assert not has_duplicated_labels(
735 736
        rhs
    ), 'Invalid equation: duplicate output labels are found.'
737 738 739 740 741

    return lhs, rhs, labels


def parse_fake_shape(equation, operands, labels):
742
    """
743
    this shape is just used for operands planning. may differ with the original shape.
744
    for example:
745 746 747 748 749 750 751 752 753
    ... is replaced by 1
    -1  is replaced by 1
    Results
    -------
    list of shape
    """
    shaped = collections.namedtuple('shaped', ['shape'])

    def fake_shape(label, op):
754 755 756 757
        assert len(op.shape) == len(label), (
            "length of shape and length of label must be the same, but received %d != %d"
            % (len(op.shape), len(label))
        )
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
        fakes = [s for i, (l, s) in enumerate(zip(label, op.shape)) if l != '.']
        fakes = list(map(abs, fakes))  # make -1 -> 1
        if '.' in label:
            fakes.insert(label.index('.'), 1)
        return shaped(fakes)

    out = list(map(fake_shape, labels, operands))
    return out


def rhs_inference(lhs):
    def is_free(key):
        return cnt.get(key) == 1 and key not in ['.', ',']

    cnt = collections.Counter(lhs)
    rhs = "..." if '...' in lhs else ""
    rhs = rhs + "".join(filter(is_free, sorted(cnt.elements())))
    return rhs


def gen_equation_for_opteinsum(lhs, rhs):
779
    """
780 781 782 783 784 785 786
    1. gen rhs if rhs is None
    2. '...' -> 'A'
    """

    def get_used_label(counter):
        used = set(counter.elements())
        for c in string.ascii_lowercase:
787 788
            if c not in used:
                return c
789 790 791 792 793 794 795 796 797 798 799 800 801
        raise ValueError(
            "You have used all `a` - `z`, there can't find a unused for einsum optimization"
        )

    cnt = collections.Counter(lhs)
    broadcast_label = get_used_label(cnt)
    if rhs is None:
        rhs = rhs_inference(lhs)
    lhs = lhs.replace("...", broadcast_label)
    rhs = rhs.replace("...", broadcast_label)
    return lhs + "->" + rhs, broadcast_label


802
def einsum_v2(equation, *operands):
803
    """
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
    einsum v2 implementation.
    1. Implement C++ EinsumOp.
    2. V2 create the EinsumOp to calculate, so just a little verifty work in python.
    3. V2 use opt_einsum.contract_path to optimize the multivariable einsum.
    """
    n_op = len(operands)
    lhs, rhs, labels = preprocess(equation, *operands)

    if n_op <= 2:
        return gen_einsum_op(lhs + '->' + rhs, *operands)

    shapes = parse_fake_shape(lhs, operands, labels)
    opt_equation, broadcast_label = gen_equation_for_opteinsum(lhs, rhs)
    _, cons = opt_einsum.contract_path(opt_equation, *shapes, einsum_call=True)
    var_list = list(operands)
    for path in cons:
        (a, b), _, eq, *__ = path
821 822 823
        assert (
            a > b
        ), "Assume the first var_idx is smaller than the second_idx. opt_einsum can guarantee it."
824 825 826
        var_s = [var_list.pop(a), var_list.pop(b)]
        eq = eq.replace(broadcast_label, "...")
        var_list.append(gen_einsum_op(eq, *var_s))
827 828 829
    assert (
        len(var_list) == 1
    ), "There must be one elements in list, but received %d." % len(var_list)
830 831 832 833
    return var_list[0]


def gen_einsum_op(equation, *operands):
834 835
    """
    EinsumOp Python Interface:
836 837 838
    """
    assert len(operands) <= 2, "Only support two operands in EinsumOp."
    if in_dygraph_mode():
839
        return _C_ops.einsum(operands, equation)[0]
840

841 842
    if _in_legacy_dygraph():
        # dygraph
843 844 845
        return _legacy_C_ops.einsum(
            operands, len(operands), len(operands), 'equation', equation
        )[0]
846

847 848 849 850 851 852 853
    for inp in operands:
        check_variable_and_dtype(inp, 'dtype', ['float32', 'float64'], 'einsum')
    check_type(equation, 'equation', str, 'einsum')
    helper = LayerHelper('einsum', **locals())
    out = helper.create_variable_for_type_inference(dtype=operands[0].dtype)
    attrs = dict()
    attrs['equation'] = equation
854 855 856 857
    caches = [
        helper.create_variable_for_type_inference(dtype=operands[0].dtype)
        for i in range(len(operands))
    ]
858 859 860 861
    xshape = [
        helper.create_variable_for_type_inference(dtype=operands[0].dtype)
        for i in range(len(operands))
    ]
862 863 864 865 866 867
    helper.append_op(
        type='einsum',
        inputs={'Operands': operands},
        outputs={'Out': out, "InnerCache": caches, "XShape": xshape},
        attrs=attrs,
    )
868 869 870
    return out


T
Tongxin Bai 已提交
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
def einsum(equation, *operands):
    r"""
    einsum(equation, *operands)

    The current version of this API should be used in dygraph only mode.

    Einsum offers a tensor operation API which allows using the Einstein summation
    convention or Einstain notation. It takes as input one or multiple tensors and
    produces as output one tensor.

    Einsum is able to perform a variety of tensor operations. Following lists a few:

        - for single operand
            - trace
            - diagonal
            - transpose
            - sum
        - for double operands
            - dot
            - outer
            - broadcasting and elementwise multiply
            - matrix multiply
            - batched matrix multiply
        - for many operads
            - broadcasting multiply
            - chained matrix multiply
897

T
Tongxin Bai 已提交
898 899 900 901 902 903 904
    **The summation notation**

        - The tensor dimensions are labeled using uncased English letters. E.g., `ijk`
        relates to a three dimensional tensor whose dimensions are labeled i, j, and k.
        - The equation is `,` separated into terms, each being a distinct input's
        dimension label string.
        - Ellipsis `...` enables broadcasting by automatically converting the unlabeled
905
        dimensions into broadcasting dimensions.
T
Tongxin Bai 已提交
906 907 908 909 910 911 912 913 914 915 916 917 918
        - Singular labels are called free labels, duplicate are dummy labels. Dummy labeled
        dimensions will be reduced and removed in the output.
        - Output labels can be explicitly specified on the right hand side of `->` or omitted.
        In the latter case, the output labels will be inferred from the input labels.
            - Inference of output labels
                - Broadcasting label `...`, if present, is put on the leftmost position.
                - Free labels are reordered alphabetically and put after `...`.
            - On explicit output labels
                - If broadcasting is enabled, then `...` must be present.
                - The output labels can be an empty, an indication to output as a scalar
                the sum over the original output.
                - Non-input labels are invalid.
                - Duplicate labels are invalid.
H
HongyuJia 已提交
919
                - For any dummy label which is present for the output, it's promoted to
T
Tongxin Bai 已提交
920 921 922 923
                a free label.
                - For any free label which is not present for the output, it's lowered to
                a dummy label.
        - Examples
Z
zhiboniu 已提交
924
            - '...ij, ...jk', where i and k are free labels, j is dummy. The output label
T
Tongxin Bai 已提交
925
            string is '...ik'
926
            - 'ij -> i', where i is a free label and j is a dummy label.
Z
zhiboniu 已提交
927
            - '...ij, ...jk -> ...ijk', where i, j and k are all free labels.
T
Tongxin Bai 已提交
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
            - '...ij, ...jk -> ij', an invalid equation since `...` is not present for
            the output.

    **The summation rule**

    The summation procedure can be outlined as follows, although the actual steps taken
    may vary significantly due to implementation specific optimization.

        - Step 1: preparation for broadcasting, that is, transposing and unsqueezing
        the input operands to have each resulting dimension identically labeled across
        all the input operands.
        - Step 2: broadcasting multiply all the resulting operands from step 1.
        - Step 3: reducing dummy labeled dimensions.
        - Step 4: transposing the result tensor to match the output labels.

    **On trace and diagonal**

945
    The trace and diagonal are planned yet unimplemented features.
T
Tongxin Bai 已提交
946 947 948 949 950 951 952

    Args:
        equation (`str`):
            The summation terms using the Einstein summation notation.
        operands (`list|Tensor`):
            The input tensors over which to compute the Einstein summation. The number of
            operands should equal the number of input terms in the equation.
953

T
Tongxin Bai 已提交
954 955
    Returns:
        result (`Tensor`): the result tensor.
956

T
Tongxin Bai 已提交
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
    Examples:
        .. code-block:: python

        import paddle
        paddle.seed(102)
        x = paddle.rand([4])
        y = paddle.rand([5])

        # sum
        print(paddle.einsum('i->', x))
        # Tensor(shape=[], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
        #   1.95791852)

        # dot
        print(paddle.einsum('i,i->', x, x))
        # Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
        #   [1.45936954])
974

T
Tongxin Bai 已提交
975 976 977 978 979 980 981
        # outer
        print(paddle.einsum("i,j->ij", x, y))
        # Tensor(shape=[4, 5], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
        #   [[0.00079869, 0.00120950, 0.00136844, 0.00187187, 0.00192194],
        #    [0.23455200, 0.35519385, 0.40186870, 0.54970956, 0.56441545],
        #    [0.11773264, 0.17828843, 0.20171674, 0.27592498, 0.28330654],
        #    [0.32897076, 0.49817693, 0.56364071, 0.77099484, 0.79162055]])
982

T
Tongxin Bai 已提交
983 984
        A = paddle.rand([2, 3, 2])
        B = paddle.rand([2, 2, 3])
985

T
Tongxin Bai 已提交
986 987 988 989 990 991 992 993 994 995
        # transpose
        print(paddle.einsum('ijk->kji', A))
        #  Tensor(shape=[2, 3, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
        #   [[[0.95649719, 0.49684682],
        #     [0.80071914, 0.46258664],
        #     [0.49814570, 0.33383518]],
        #
        #    [[0.07637714, 0.29374704],
        #     [0.51470858, 0.51907635],
        #     [0.99066722, 0.55802226]]])
996

T
Tongxin Bai 已提交
997 998 999 1000 1001 1002 1003 1004 1005 1006
        # batch matrix multiplication
        print(paddle.einsum('ijk, ikl->ijl', A,B))
        # Tensor(shape=[2, 3, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
        #   [[[0.32172769, 0.50617385, 0.41394392],
        #     [0.51736701, 0.49921003, 0.38730967],
        #     [0.69078457, 0.42282537, 0.30161136]],
        #
        #    [[0.32043904, 0.18164253, 0.27810261],
        #     [0.50226176, 0.24512935, 0.39881429],
        #     [0.51476848, 0.23367381, 0.39229113]]])
1007

T
Tongxin Bai 已提交
1008 1009 1010 1011 1012 1013 1014 1015
        # Ellipsis transpose
        print(paddle.einsum('...jk->...kj', A))
        # Tensor(shape=[2, 2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
        #   [[[0.95649719, 0.80071914, 0.49814570],
        #     [0.07637714, 0.51470858, 0.99066722]],
        #
        #    [[0.49684682, 0.46258664, 0.33383518],
        #     [0.29374704, 0.51907635, 0.55802226]]])
1016

T
Tongxin Bai 已提交
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
        # Ellipsis batch matrix multiplication
        print(paddle.einsum('...jk, ...kl->...jl', A,B))
        # Tensor(shape=[2, 3, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
        #   [[[0.32172769, 0.50617385, 0.41394392],
        #     [0.51736701, 0.49921003, 0.38730967],
        #     [0.69078457, 0.42282537, 0.30161136]],
        #
        #    [[0.32043904, 0.18164253, 0.27810261],
        #     [0.50226176, 0.24512935, 0.39881429],
        #     [0.51476848, 0.23367381, 0.39229113]]])
    """
1028
    import os
1029

1030
    if int(os.environ.get('FLAGS_new_einsum', "1")):
1031
        return einsum_v2(equation, *operands)
T
Tongxin Bai 已提交
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054

    nop = len(operands)
    assert nop > 0, "At least one operand is expected."

    # Part the equation to left hand side and right hand side
    lhs, *rhs = equation.lower().replace(' ', '').split('->')
    assert len(rhs) < 2, "Invalid equation: multiple `->` were found."

    # Note, we distinguish between 'ij->' and 'ij' by setting rhs to '' and None
    rhs = rhs[0] if rhs else None

    # Parse labels for each operand and count the number of occurrences for each alphabet label
    nop_labels = parse_labels(lhs, operands)

    # Diagonalize the operands which have duplicate labels
    nop_labels, operands = list(zip(*map(diagonalize, nop_labels, operands)))

    # To handle broadcasting, we should first know how many dimensions are there
    # We need to use that number to generate output labels
    # e.g. 1 for ['ij', 'i.', '.k']
    n_bcast_dims = max(map(lambda s: s.count('.'), nop_labels))

    # Build the data structures for planning. It's helpful to think of all the operands
1055
    # broadcasting together from a global view. In this view, dimensions from multiple
T
Tongxin Bai 已提交
1056 1057
    # operands are mapped to the same position if they are labeled uniquely. Broadcasting
    # dimensions are mapped to adjacent positions with the right bound fixed. Subject to
1058
    # each operand, the map is injective but for all operands the map is on-to.
T
Tongxin Bai 已提交
1059
    # g_labels:
1060
    #   The labels of the global view
T
Tongxin Bai 已提交
1061 1062 1063 1064 1065 1066 1067 1068
    # g_view:
    #   Includes a list of maps from each operand's dimensions to the global view's dimensions
    #   which we refer to as ax or axes in the code to distinguish from operand's dims
    # g_shape:
    #   The shape of the global view. The size of each dimension is what the aligned dimensions
    #   should broadcast to
    # g_nout:
    #   Number of output axes
1069 1070
    # g_supports
    #   Booleans indicating each operand's non-trivial dimensions
T
Tongxin Bai 已提交
1071 1072 1073
    # g_count
    #   Counting how many non-trivial dimensions remain for each ax

1074
    g_labels, g_view, g_nout, g_count = build_global_view(
1075 1076 1077 1078 1079
        nop_labels, rhs, n_bcast_dims
    )
    g_shape, g_supports = build_global_shape(
        g_view, g_labels, [op.shape for op in operands]
    )
T
Tongxin Bai 已提交
1080 1081

    # Now we're ready to build up an execution plan
1082
    args = operands, g_view, g_shape, g_supports, g_count, n_bcast_dims
T
Tongxin Bai 已提交
1083 1084 1085 1086
    plan = plan_einsum(*args)
    result = plan.execute()

    return result