einsum.py 35.6 KB
Newer Older
T
Tongxin Bai 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import itertools
16
import numpy as np
T
Tongxin Bai 已提交
17 18
import re

19
from .linalg import dot, matmul, transpose
Z
zhiboniu 已提交
20
from .manipulation import squeeze, unsqueeze, reshape
T
Tongxin Bai 已提交
21 22
from .math import multiply
from .math import sum as paddle_sum
23
from ..fluid.framework import _in_legacy_dygraph
24
from paddle import _C_ops, _legacy_C_ops
25 26 27 28 29
from ..fluid.data_feeder import (
    check_variable_and_dtype,
    check_type,
    check_dtype,
)
30
from ..fluid.layer_helper import LayerHelper
31 32 33 34 35
from ..fluid.framework import (
    _non_static_mode,
    in_dygraph_mode,
    _in_legacy_dygraph,
)
36 37 38
import collections
import string
import opt_einsum
T
Tongxin Bai 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57

from paddle.common_ops_import import dygraph_only

__all__ = []


def parse_op_labels(labelstr, operand):
    '''
    Parse labels for an input operand.

    Parameters
    ----------
    labelstr:
        the input label string
    operand:
        the input operand

    Returns
    -------
58 59
    the input operand's full label string in which all anonymous dimensions are
    labeled in dots.
T
Tongxin Bai 已提交
60 61 62
    '''
    # Sanity checks
    for c in labelstr.replace('.', ''):
63 64 65
        assert (
            c.isalpha()
        ), f"Invalid equation: {c} is not a valid label, which should be letters."
T
Tongxin Bai 已提交
66

67 68 69
    assert (
        labelstr.replace('...', '', 1).find('.') == -1
    ), f"Invalid equation: `.` is found outside of an ellipsis."
T
Tongxin Bai 已提交
70 71 72 73 74 75 76

    # Check shape. Note, in Paddle a tensor rank is always nonzero
    ndims = len(operand.shape)
    assert ndims > 0

    full_labelstr = labelstr.replace('...', '.' * (ndims - len(labelstr) + 3))

77 78 79
    assert (
        len(full_labelstr) == ndims
    ), f"Invalid equation: the label string '{labelstr}' misses dimensions."
T
Tongxin Bai 已提交
80 81 82 83 84 85 86

    return full_labelstr


def parse_labels(labelstr, operands):
    '''
    Parse label strings for all input operands.
87

T
Tongxin Bai 已提交
88 89 90 91 92 93
    Parameters
    ----------
    labelstr:
        The equation's label string
    operands:
        The input operands
94

T
Tongxin Bai 已提交
95 96 97 98 99 100 101 102
    Returns
    -------
    list of full label strings for all input operands
    '''

    nop_labels = labelstr.split(',')
    assert len(nop_labels) == len(operands), (
        f"Invalid equation: the number of operands is {len(operands)}, "
103 104
        f"but found {len(nop_labels)} segments in the label equation."
    )
T
Tongxin Bai 已提交
105 106 107 108 109 110

    return list(map(parse_op_labels, nop_labels, operands))


def validate_rhs(rhs, input_labels, n_bcast_dims):
    '''
111
    Check whether the equation's right hand side is valid
T
Tongxin Bai 已提交
112 113 114
    '''
    # Sanity check.
    if n_bcast_dims > 0:
115 116 117
        assert (
            '...' in rhs
        ), f"Invalid equation: missing ellipsis in output labels."
T
Tongxin Bai 已提交
118 119 120 121 122 123 124 125 126 127 128

    rhs = rhs.replace('...', '')
    rhs_set = set(rhs)

    # Hidden assumption: availble labels don't include '.'
    assert '.' not in input_labels

    # Verify that output labels all come from the set of input labels
    non_input_labels = rhs_set.difference(input_labels)
    assert not non_input_labels, (
        f"Invalid equation: "
129 130
        f"output label {sorted(non_input_labels)} not used by any input."
    )
T
Tongxin Bai 已提交
131
    # Verify that output labels are not duplicate
132 133 134
    assert len(rhs) == len(
        rhs_set
    ), f"Invalid equation: duplicate output labels are found."
T
Tongxin Bai 已提交
135 136 137 138


def build_view(in_labels, out_labels):
    '''
139 140
    Build an inverse map of dimension indices. Three conditions must hold for
    the result to be meaningful.
T
Tongxin Bai 已提交
141 142 143 144 145 146 147 148 149 150
    First, no duplicate letter labels in each label string.
    Second, the number of dots in dimout_labels >= that in in_labels.
    Third, dots are contiguous in each label string.

    Parameters
    ----------
    in_labels:
        The dimension labels to map to
    out_labels:
        The dimension labels to map from
151

T
Tongxin Bai 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
    Returns
    -------
    The inverse map from out_labels to in_labels. The length of the inverse map equals that of
    out_labels. -1 is filled if there's no matching intput dimension for a specific label.

    Examples
    --------
    in_labels = 'ij..', out_labels = '..ji'
    inv_map = [2, 3, 1, 0]
    in_labels = 'ij..', out_labels = '..kji'
    inv_map = [2, 3, -1, 1, 0]
    '''

    inv_map = [-1] * len(out_labels)

    # First build the broadcast dimension mapping
    # Find the broadcast index range in out_labels
    r = re.search(r'\.+', out_labels)
    if r:
        start, end = r.start(), r.end()
        s = re.search(r'\.+', in_labels)
        # fill the broadcast dimension indices from right to left.
        if s:
            for ax, dim in zip(
176 177
                range(start, end)[::-1], range(s.start(), s.end())[::-1]
            ):
T
Tongxin Bai 已提交
178 179
                inv_map[ax] = dim

180
    # Now work on non-broadcast dimensions
T
Tongxin Bai 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
    if r:
        it = itertools.chain(range(start), range(end, len(out_labels)))
    else:
        it = iter(range(len(out_labels)))

    for i in it:
        inv_map[i] = in_labels.find(out_labels[i])

    return inv_map


def build_global_view(nop_labels, rhs, n_bcast_dims):
    '''
    Build the global view, which is a layout of all dimension labels
    plus an index table that maps from the layout to the dimensions
    in each operand. In the global view, the dimensions are arranged
    such that output ones are put on the left and contraction ones
198
    are put on the right.
T
Tongxin Bai 已提交
199 200 201 202 203 204 205 206 207

    Parameters
    ----------
    nop_labels:
        The input full label strings of all input operands
    rhs:
        The equation right hand side
    n_bcast_dims:
        The maxium number of broadcast dimensions
208

T
Tongxin Bai 已提交
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
    Returns
    -------
    A tuple of g_labels, g_view, g_nout, g_count
    g_labels:
        the layout of all labels in a string
    g_view:
        the index table
    g_nout:
        the number of output dimensions
    g_count:
        the counter array for dimension contractions
    '''
    # Put all labels in alphabetical order
    concat = sorted(''.join(nop_labels).replace('.', ''))
    labels, count = [], []
    for a, b in zip(['.'] + concat, concat):
        if a != b:
            labels.append(b)
            count.append(1)
        else:
            count[-1] += 1

    if rhs != None:
        validate_rhs(rhs, labels, n_bcast_dims)
        g_labels_out = rhs.replace('...', '.' * n_bcast_dims)
    else:
        g_labels_out = '.' * n_bcast_dims + ''.join(
236 237
            l for l, c in zip(labels, count) if c == 1
        )
T
Tongxin Bai 已提交
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254

    for i in range(len(count))[::-1]:
        if labels[i] in g_labels_out:
            labels.pop(i)
            count.pop(i)

    g_labels_sum = ''.join(labels)
    g_labels = g_labels_out + g_labels_sum
    g_view = list(map(lambda i: build_view(i, g_labels), nop_labels))
    g_nout = len(g_labels_out)
    g_count = count

    return g_labels, g_view, g_nout, g_count


def build_global_shape(g_view, g_labels, op_shapes):
    '''
255
    The global shape is the shape of all dimensions rearranged and broadcasting
T
Tongxin Bai 已提交
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
    to the global view. It's a reference data structure for einsum planning.

    Parameters
    ----------
    g_view:
        the global view
    op_shapes:
        the shapes of the all operands

    Returns
    -------
    g_shape:
        the global shape vector
    g_masks:
        list of shape masks for each operand. A dimension's shape mask is a boolean
        indicating whether its size > 1, in other words, it's not squeezable
    '''
    view_shapes = []
    g_masks = []

    for view, op_shape in zip(g_view, op_shapes):
        view_shapes.append([op_shape[dim] if dim > -1 else 1 for dim in view])

    g_shape = [set(sizes_per_ax) - {1} for sizes_per_ax in zip(*view_shapes)]

    non_bcastable = [ax for ax, sizes in enumerate(g_shape) if len(sizes) > 1]

    assert not non_bcastable, (
        f"Invalid operands: label {g_labels[non_bcastable[0]]} "
285 286
        f"corresponds to non-broadcastable dimensions."
    )
T
Tongxin Bai 已提交
287 288 289

    g_shape = [sizes.pop() if len(sizes) > 0 else 1 for sizes in g_shape]

290 291 292
    g_masks = [
        [s > 1 or s == -1 for s in view_shape] for view_shape in view_shapes
    ]
T
Tongxin Bai 已提交
293 294 295 296 297 298 299 300 301 302 303 304 305 306

    return g_shape, g_masks


def has_duplicated_labels(labels):
    '''
    Returns True if there is any duplicate label.
    '''
    labels = labels.replace('.', '')
    return len(labels) > len(set(labels))


def diagonalize(labels, operand):
    '''
307 308
    Merges dimensions with duplicate labels.

T
Tongxin Bai 已提交
309
    For those dimensions with duplicate labels, merge them into one dimension
310 311
    which represents the diagonal elements. This requires the dimensions with
    duplicate labels are equal sized.
312

T
Tongxin Bai 已提交
313
    Examples
314
    --------
T
Tongxin Bai 已提交
315 316
    'ijj...i' would be merged into 'ij...'
    '''
317 318 319
    assert not has_duplicated_labels(
        labels
    ), f'Duplicate labels are not supported.'
T
Tongxin Bai 已提交
320

321
    return labels, operand
T
Tongxin Bai 已提交
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337


def plan_reduce(plan, op, reduce_dims, keepdim):
    '''
    Add reduce to the plan
    '''
    varname = f'op{op}'

    f = lambda var, dims: paddle_sum(var, dims, keepdim=keepdim)
    step = f, [varname], varname, reduce_dims
    plan.add_step(step)


def plan_scalar_prod(plan, op1, op2):
    varnames = [f'op{op1}', f'op{op2}']
    f = lambda var1, var2: paddle_sum(var1) * var2
338
    # f = lambda var1, var2: var1 * var2
T
Tongxin Bai 已提交
339 340 341 342
    step = f, varnames, varnames[1]
    plan.add_step(step)


343
def plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K):
T
Tongxin Bai 已提交
344 345 346 347 348 349 350 351 352
    '''
    plan matmul
    '''
    # Transpose and re-shape op1 and op2 in I, J1, K and I, J2, K
    # Then apply matmul(x, y, transpose_x=False, tranpose_y=True)
    var1, var2 = f'op{op1}', f'op{op2}'

    op1_view, op2_view = [g_view[op] for op in (op1, op2)]

353 354 355 356
    I1 = [idx for idx in I if op1_view[idx] >= 0]
    I2 = [idx for idx in I if op2_view[idx] >= 0]
    op1_view = np.array(op1_view)
    op1_dims = op1_view[I1 + J1 + K]
T
Tongxin Bai 已提交
357

358 359
    op2_view = np.array(op2_view)
    op2_dims = op2_view[I2 + J2 + K]
T
Tongxin Bai 已提交
360

361 362 363 364
    op1_mask, op2_mask = [g_supports[op] for op in (op1, op2)]
    op1_vshape = np.array([s if m else 1 for s, m in zip(g_shape, op1_mask)])
    op2_vshape = np.array([s if m else 1 for s, m in zip(g_shape, op2_mask)])
    vshape = np.maximum(op1_vshape, op2_vshape)
T
Tongxin Bai 已提交
365

366
    i1, i2, j1, j2, k = map(len, (I1, I2, J1, J2, K))
T
Tongxin Bai 已提交
367

368
    if any(op1_dims != np.arange(len(op1_dims))):
T
Tongxin Bai 已提交
369
        # print(f'perm1: {perm1}')
370
        step = transpose, [var1], var1, list(op1_dims)
T
Tongxin Bai 已提交
371 372
        plan.add_step(step)

373
    if any(op2_dims != np.arange(len(op2_dims))):
T
Tongxin Bai 已提交
374
        # print(f'perm2: {perm2}')
375
        step = transpose, [var2], var2, list(op2_dims)
T
Tongxin Bai 已提交
376 377
        plan.add_step(step)

378
    # Check if conditions hold for turnning the operation into a matmul
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
    if (
        j1 + j2 > 0
        and k > 0
        and -1 not in np.concatenate((op1_vshape, op2_vshape))
    ):
        op1_shape = (
            list(op1_vshape[I])
            + [np.prod(op1_vshape[J1])]
            + [np.prod(op1_vshape[K])]
        )
        op2_shape = (
            list(op2_vshape[I])
            + [np.prod(op2_vshape[J2])]
            + [np.prod(op2_vshape[K])]
        )
T
Tongxin Bai 已提交
394

395 396
        # Merge J dims and K dims by reshaping
        step = reshape, [var1], var1, op1_shape
T
Tongxin Bai 已提交
397
        plan.add_step(step)
398
        step = reshape, [var2], var2, op2_shape
T
Tongxin Bai 已提交
399 400 401 402 403 404
        plan.add_step(step)

        # Matmul
        step = matmul, [var1, var2], var2, False, True
        plan.add_step(step)

405 406 407
        # Reshape back
        shape = list(vshape[I + J1 + J2])
        step = reshape, [var2], var2, shape
T
Tongxin Bai 已提交
408 409
        plan.add_step(step)

410 411 412 413 414
    elif j1 == j2 == k == 1:
        # Can still do matmul even unknown shapes are present
        step = matmul, [var1, var2], var2, False, True
        plan.add_step(step)

415
    # In the rest cases we opt for ops other than matmul
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
    else:
        # unsqueeze operands include J1...J2... dimensions
        if j2:
            fill = list(range(i1 + j1, i1 + j1 + j2))
            step = unsqueeze, [var1], var1, fill
            plan.add_step(step)
        if j1:
            fill = list(range(i2, i2 + j1))
            step = unsqueeze, [var2], var2, fill
            plan.add_step(step)
        # In case of no dimensions to contract, do an elementwise multiply
        if k == 0:
            # make broadcast
            step = multiply, [var1, var2], var2
            plan.add_step(step)
        # Contract and no join, turn into a dot
        elif j1 + j2 == 0 and k == 1:
            step = unsqueeze, [var1], var1, [-2]
            plan.add_step(step)
            step = unsqueeze, [var2], var2, [-1]
            plan.add_step(step)
            step = matmul, [var1, var2], var2
            plan.add_step(step)
            step = squeeze, [var2], var2, [-1, -2]
            plan.add_step(step)
441
        elif j1 + j2 == 0 and not -1 in np.concatenate(
442 443
            (op1_vshape[K], op2_vshape[K])
        ):
444
            assert all(op1_vshape[K] == op2_vshape[K])
445 446 447 448 449 450
            step = (
                reshape,
                [var1],
                var1,
                list(op1_vshape[I]) + [1] + [np.prod(op1_vshape[K])],
            )
451
            plan.add_step(step)
452 453 454 455 456 457
            step = (
                reshape,
                [var2],
                var2,
                list(op2_vshape[I]) + [1] + [np.prod(op2_vshape[K])],
            )
458 459 460 461 462 463 464 465 466 467 468
            plan.add_step(step)
            step = matmul, [var1, var2], var2, False, True
            plan.add_step(step)
            step = squeeze, [var2], var2, [-1, -2]
            plan.add_step(step)
        else:
            step = multiply, [var1, var2], var2
            plan.add_step(step)
            reduce_dims = list(range(-k, 0))
            plan_reduce(plan, op2, reduce_dims, keepdim=False)

T
Tongxin Bai 已提交
469 470
    # Wrap up, updating auxiliary data
    # Updating g_mask for I and J axes
471 472
    for ax in I + J1 + J2:
        op2_mask[ax] = vshape[ax] > 1 or vshape[ax] == -1
T
Tongxin Bai 已提交
473 474 475 476 477 478 479 480 481 482

    for ax in K:
        op2_mask[ax] = False

    for ax in range(len(op2_view)):
        op2_view[ax] = -1
    dim = 0
    for ax in I + J1 + J2:
        op2_view[ax], dim = dim, dim + 1

483 484
    g_view[op2] = list(op2_view)

T
Tongxin Bai 已提交
485

486 487 488
def plan_summation(
    plan, g_view, op1, op2, g_supports, g_shape, g_count, n_bcast
):
T
Tongxin Bai 已提交
489 490 491 492
    '''
    Plan various kinds of summation
    '''
    op1_view, op2_view = g_view[op1], g_view[op2]
493
    op1_mask, op2_mask = g_supports[op1], g_supports[op2]
T
Tongxin Bai 已提交
494 495 496 497 498 499 500 501

    ndim = len(op1_view)
    nout = ndim - len(g_count)

    count = [0] * nout + g_count

    I, K, J1, J2 = list(range(n_bcast)), [], [], []

502 503 504
    for ax, dim1, dim2 in zip(
        range(n_bcast, ndim), op1_view[n_bcast:], op2_view[n_bcast:]
    ):
T
Tongxin Bai 已提交
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525

        if (dim1 != -1) != (dim2 != -1):
            if dim1 != -1:
                J1.append(ax)
            else:
                J2.append(ax)
        elif dim1 != -1:
            fold = int(op1_mask[ax]) + int(op2_mask[ax])
            if ax >= nout and fold == count[ax]:
                # Ready to fold the dimensions
                K.append(ax)
                count[ax] -= fold
            else:
                I.append(ax)
                count[ax] -= max(fold - 1, 0)

    # Update g_count
    g_count[:] = count[nout:]

    # Now it's OK to merge the K dims as the same shape holds
    # print(f'I: {I}   J1: {J1}    J2: {J2}   K: {K}')
526
    plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K)
T
Tongxin Bai 已提交
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597


def rearrange(axes):
    perm, fill = [], []
    for ax, dim in enumerate(axes):
        if dim < 0:
            fill.append(ax)
        else:
            perm.append(dim)
    # Trivial permutation returns []
    if all(i == dim for i, dim in enumerate(perm)):
        perm = []

    return perm, fill


def plan_broadcast(plan, operands, nop_axes):
    '''
    Plan broadcast across
    '''
    nop = len(operands)
    varnames = [f'op{i}' for i in range(nop)]

    for i, op_axes in zip(range(nop), nop_axes):
        # Re-arrange the dimesions according to the global layout
        perm, fill = rearrange(op_axes)
        var = varnames[i]
        if perm:
            step = transpose, [var], var, perm
            plan.add_step(step)
        if fill:
            step = unsqueeze, [var], var, fill
            plan.add_step(step)

    def f(*args):
        expr = ' * '.join(varnames)
        return eval(expr, dict(zip(varnames, args)))

    step = f, varnames, None
    plan.add_step(step)


class Plan:
    def __init__(self):
        self.env = {}
        self.steps = []

    def add_step(self, step):
        self.steps.append(step)

    def get_var(self, varname):
        return self.env[varname] if varname in self.env else None

    def set_var(self, varname, var):
        self.env[varname] = var

    def show(self):
        res = None
        for f, in_varnames, out_varname, *args in self.steps:
            print(repr((out_varname, f, *in_varnames, *args)))
        return res

    def execute(self):
        res = None
        for f, in_varnames, out_varname, *args in self.steps:
            res = f(*map(self.get_var, in_varnames), *args)
            if out_varname:
                self.set_var(out_varname, res)
        return res


598
def plan_einsum(operands, g_view, g_shape, g_supports, g_count, n_bcast):
T
Tongxin Bai 已提交
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
    '''
    Plans the actual execution steps.
    Results
    -------
    the execution plan
    '''
    nop = len(operands)
    ndim = len(g_view[0])
    nout = ndim - len(g_count)

    # Initialize a plan with an environment
    plan = Plan()
    op_names = [f'op{i}' for i in range(nop)]
    list(map(plan.set_var, op_names, operands))

    # In case no dimensions to combine, do broadcast straight across
    if not g_count:
        plan_broadcast(plan, operands, g_view)
        return plan

619 620 621
    # Down count degenerate contraction dimensions.
    for view, support in zip(g_view, g_supports):
        # To collect the down count number, we use a type casting trick
T
Tongxin Bai 已提交
622
        down_count = [
623 624
            int((d + 1) and (not s))
            for d, s in zip(view[nout:], support[nout:])
T
Tongxin Bai 已提交
625
        ]
626 627
        for i, count in enumerate(down_count):
            g_count[i] -= count
T
Tongxin Bai 已提交
628

629 630
    # Reduce any dimension for which g_support is set and g_count == 1
    for i, view, mask in zip(range(nop), g_view, g_supports):
T
Tongxin Bai 已提交
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
        to_reduce = []
        for dim, masked, count in zip(view[nout:], mask[nout:], g_count):
            to_reduce.append(dim if (masked and count == 1) else -1)

        reduce_dims = list(filter(lambda x: x > -1, to_reduce))
        if reduce_dims:
            plan_reduce(plan, i, reduce_dims, keepdim=True)

        # Unset mask and decrease g_count for the reduced dimensions
        for i, d in enumerate(to_reduce):
            ax = i + nout
            mask[ax] = mask[ax] and (d == -1)
            g_count[i] -= 0 if d == -1 else 1

    # Plan the summations over the operand sequence
    for i in range(nop):
        # plan a single step

        if i == 0:
            continue

        # We'd like to arrange the dimensions in the following way:
        # [I...  J... K...]
        # [I...  J... K...]
655 656
        # where
        #       I... are aligned and not to be combined immediately
T
Tongxin Bai 已提交
657 658 659 660 661 662 663 664 665 666 667 668
        #       J... are not aligned and not to be combined immediately
        #       K... are aligned and should be immediately combined
        # At this point the non-trivial broadcast dimensinos in K are already reduced
        # and removed. That means all K dimensions are aligned and their sizes are not 1.
        # We then inspect the layout of I,J,K plus the above observation to make
        # specializatoin decisions.  The current strategy is set as follows:
        #  (1) if I... J... K... are all empty, it's multiplying a scalar
        #  (2) if K... are empty, better use a broadcast
        #  (3) if I... J... empty and K... not empty, a vector-vector multiply (or a dot)
        #  (4) Elsewise, either I... or J... not empty, and K... not empty, use a general matmul

        # Resolve the summation kind: dot, matmul or *
669 670
        if not any(g_supports[i - 1]):
            # op1 is a one element tensor.
T
Tongxin Bai 已提交
671 672
            plan_scalar_prod(plan, i - 1, i)
        else:
673 674 675
            plan_summation(
                plan, g_view, i - 1, i, g_supports, g_shape, g_count, n_bcast
            )
T
Tongxin Bai 已提交
676 677 678

    # for ax, dim in enumerate(g_view[nop-1][:nout]):
    #     assert dim == ax
679
    assert all(not masked for masked in g_supports[nop - 1][nout:])
T
Tongxin Bai 已提交
680 681 682 683

    view = g_view[-1]
    if any(ax != dim for ax, dim in enumerate(view[:nout])):
        perm = [dim for dim in view if dim >= 0]
684 685 686 687
        if sorted(perm) != perm:
            varname = f'op{nop-1}'
            step = transpose, [varname], varname, perm
            plan.add_step(step)
T
Tongxin Bai 已提交
688
        dim = 0
689
        unsqueeze_dims = []
T
Tongxin Bai 已提交
690 691 692
        for ax, d in enumerate(view):
            if d != -1:
                view[ax], dim = dim, dim + 1
693 694 695 696 697 698 699
        for ax, d in enumerate(view[:nout]):
            if d == -1:
                unsqueeze_dims.append(ax)
        if unsqueeze_dims:
            varname = f'op{nop-1}'
            step = unsqueeze, [varname], varname, unsqueeze_dims
            plan.add_step(step)
T
Tongxin Bai 已提交
700 701 702 703 704 705 706 707 708 709 710

    squeeze_dims = [dim for dim in view[nout:] if dim != -1]
    if squeeze_dims:
        # plan_reduce(plan, nop-1, reduce_dims, keepdim=False)
        varname = f'op{nop-1}'
        step = squeeze, [varname], varname, squeeze_dims
        plan.add_step(step)

    return plan


711 712 713 714 715 716
def preprocess(equation, *operands):
    """
    check equation / raise error, default right labels generation
    """
    equation = equation.replace(" ", "")
    nop = len(operands)
717 718 719
    assert nop > 0, (
        "Required at least one operand in Einsum API, but received %s " % nop
    )
720 721 722 723 724 725 726 727 728 729 730 731 732

    # Part the equation to left hand side and right hand side
    lhs, *rhs = equation.lower().split('->')
    assert len(rhs) < 2, "Invalid equation: multiple `->` were found."

    labels = parse_labels(lhs, operands)
    # Note, we distinguish between 'ij->' and 'ij' by setting rhs to '' and None
    rhs = rhs[0] if rhs else None
    if rhs is None:
        rhs = rhs_inference(lhs)

    assert len(lhs.split(',')) == len(operands), (
        f"Invalid equation: the number of operands is {len(operands)}, "
733 734
        f"but found {len(lhs.split(','))} segments in the label equation."
    )
735

736 737 738
    assert not (
        '...' in lhs and '...' not in rhs
    ), f'Invalid equation: missing ellipsis in output labels.'
739

740 741 742
    assert not (
        len(list(filter(has_duplicated_labels, lhs.split(',')))) > 0
    ), f'Duplicate labels are not supported.'
743 744

    assert not has_duplicated_labels(
745 746
        rhs
    ), f'Invalid equation: duplicate output labels are found.'
747 748 749 750 751

    return lhs, rhs, labels


def parse_fake_shape(equation, operands, labels):
752 753
    """

754
    this shape is just used for operands planning. may differ with the original shape.
755
    for example:
756 757 758 759 760
    ... is replaced by 1
    -1  is replaced by 1
    Results
    -------
    list of shape
761

762 763 764 765
    """
    shaped = collections.namedtuple('shaped', ['shape'])

    def fake_shape(label, op):
766 767 768 769
        assert len(op.shape) == len(label), (
            "length of shape and length of label must be the same, but received %d != %d"
            % (len(op.shape), len(label))
        )
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
        fakes = [s for i, (l, s) in enumerate(zip(label, op.shape)) if l != '.']
        fakes = list(map(abs, fakes))  # make -1 -> 1
        if '.' in label:
            fakes.insert(label.index('.'), 1)
        return shaped(fakes)

    out = list(map(fake_shape, labels, operands))
    return out


def rhs_inference(lhs):
    def is_free(key):
        return cnt.get(key) == 1 and key not in ['.', ',']

    cnt = collections.Counter(lhs)
    rhs = "..." if '...' in lhs else ""
    rhs = rhs + "".join(filter(is_free, sorted(cnt.elements())))
    return rhs


def gen_equation_for_opteinsum(lhs, rhs):
791
    """
792 793 794 795 796 797 798
    1. gen rhs if rhs is None
    2. '...' -> 'A'
    """

    def get_used_label(counter):
        used = set(counter.elements())
        for c in string.ascii_lowercase:
799 800
            if c not in used:
                return c
801 802 803 804 805 806 807 808 809 810 811 812 813
        raise ValueError(
            "You have used all `a` - `z`, there can't find a unused for einsum optimization"
        )

    cnt = collections.Counter(lhs)
    broadcast_label = get_used_label(cnt)
    if rhs is None:
        rhs = rhs_inference(lhs)
    lhs = lhs.replace("...", broadcast_label)
    rhs = rhs.replace("...", broadcast_label)
    return lhs + "->" + rhs, broadcast_label


814
def einsum_v2(equation, *operands):
815
    """
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
    einsum v2 implementation.
    1. Implement C++ EinsumOp.
    2. V2 create the EinsumOp to calculate, so just a little verifty work in python.
    3. V2 use opt_einsum.contract_path to optimize the multivariable einsum.
    """
    n_op = len(operands)
    lhs, rhs, labels = preprocess(equation, *operands)

    if n_op <= 2:
        return gen_einsum_op(lhs + '->' + rhs, *operands)

    shapes = parse_fake_shape(lhs, operands, labels)
    opt_equation, broadcast_label = gen_equation_for_opteinsum(lhs, rhs)
    _, cons = opt_einsum.contract_path(opt_equation, *shapes, einsum_call=True)
    var_list = list(operands)
    for path in cons:
        (a, b), _, eq, *__ = path
833 834 835
        assert (
            a > b
        ), "Assume the first var_idx is smaller than the second_idx. opt_einsum can guarantee it."
836 837 838
        var_s = [var_list.pop(a), var_list.pop(b)]
        eq = eq.replace(broadcast_label, "...")
        var_list.append(gen_einsum_op(eq, *var_s))
839 840 841
    assert (
        len(var_list) == 1
    ), "There must be one elements in list, but received %d." % len(var_list)
842 843 844 845
    return var_list[0]


def gen_einsum_op(equation, *operands):
846 847
    """
    EinsumOp Python Interface:
848 849 850
    """
    assert len(operands) <= 2, "Only support two operands in EinsumOp."
    if in_dygraph_mode():
851
        return _C_ops.einsum(operands, equation)[0]
852

853 854
    if _in_legacy_dygraph():
        # dygraph
855 856 857
        return _legacy_C_ops.einsum(
            operands, len(operands), len(operands), 'equation', equation
        )[0]
858

859 860 861 862 863 864 865
    for inp in operands:
        check_variable_and_dtype(inp, 'dtype', ['float32', 'float64'], 'einsum')
    check_type(equation, 'equation', str, 'einsum')
    helper = LayerHelper('einsum', **locals())
    out = helper.create_variable_for_type_inference(dtype=operands[0].dtype)
    attrs = dict()
    attrs['equation'] = equation
866 867 868 869
    caches = [
        helper.create_variable_for_type_inference(dtype=operands[0].dtype)
        for i in range(len(operands))
    ]
870 871 872 873
    xshape = [
        helper.create_variable_for_type_inference(dtype=operands[0].dtype)
        for i in range(len(operands))
    ]
874 875 876 877 878 879
    helper.append_op(
        type='einsum',
        inputs={'Operands': operands},
        outputs={'Out': out, "InnerCache": caches, "XShape": xshape},
        attrs=attrs,
    )
880 881 882
    return out


T
Tongxin Bai 已提交
883 884
def einsum(equation, *operands):
    r"""
885

T
Tongxin Bai 已提交
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
    einsum(equation, *operands)

    The current version of this API should be used in dygraph only mode.

    Einsum offers a tensor operation API which allows using the Einstein summation
    convention or Einstain notation. It takes as input one or multiple tensors and
    produces as output one tensor.

    Einsum is able to perform a variety of tensor operations. Following lists a few:

        - for single operand
            - trace
            - diagonal
            - transpose
            - sum
        - for double operands
            - dot
            - outer
            - broadcasting and elementwise multiply
            - matrix multiply
            - batched matrix multiply
        - for many operads
            - broadcasting multiply
            - chained matrix multiply
910

T
Tongxin Bai 已提交
911 912 913
    **The summation notation**

        - The tensor dimensions are labeled using uncased English letters. E.g., `ijk`
914
          relates to a three dimensional tensor whose dimensions are labeled i, j, and k.
T
Tongxin Bai 已提交
915
        - The equation is `,` separated into terms, each being a distinct input's
916
          dimension label string.
T
Tongxin Bai 已提交
917
        - Ellipsis `...` enables broadcasting by automatically converting the unlabeled
918
          dimensions into broadcasting dimensions.
T
Tongxin Bai 已提交
919
        - Singular labels are called free labels, duplicate are dummy labels. Dummy labeled
920 921
          dimensions will be reduced and removed in the output.
        - Output labels can be explicitly specified on the right hand side of `->` or omitted. In the latter case, the output labels will be inferred from the input labels.
T
Tongxin Bai 已提交
922 923 924 925 926 927
            - Inference of output labels
                - Broadcasting label `...`, if present, is put on the leftmost position.
                - Free labels are reordered alphabetically and put after `...`.
            - On explicit output labels
                - If broadcasting is enabled, then `...` must be present.
                - The output labels can be an empty, an indication to output as a scalar
928
                  the sum over the original output.
T
Tongxin Bai 已提交
929 930
                - Non-input labels are invalid.
                - Duplicate labels are invalid.
931 932
                - For any dummy label which is present for the output, it's promoted to
                  a free label.
T
Tongxin Bai 已提交
933
                - For any free label which is not present for the output, it's lowered to
934 935
                  a dummy label.

T
Tongxin Bai 已提交
936
        - Examples
Z
zhiboniu 已提交
937
            - '...ij, ...jk', where i and k are free labels, j is dummy. The output label
938 939
              string is '...ik'
            - 'ij -> i', where i is a free label and j is a dummy label.
Z
zhiboniu 已提交
940
            - '...ij, ...jk -> ...ijk', where i, j and k are all free labels.
T
Tongxin Bai 已提交
941
            - '...ij, ...jk -> ij', an invalid equation since `...` is not present for
942
              the output.
T
Tongxin Bai 已提交
943 944 945 946 947 948 949

    **The summation rule**

    The summation procedure can be outlined as follows, although the actual steps taken
    may vary significantly due to implementation specific optimization.

        - Step 1: preparation for broadcasting, that is, transposing and unsqueezing
950 951
          the input operands to have each resulting dimension identically labeled across
          all the input operands.
T
Tongxin Bai 已提交
952 953 954 955 956 957
        - Step 2: broadcasting multiply all the resulting operands from step 1.
        - Step 3: reducing dummy labeled dimensions.
        - Step 4: transposing the result tensor to match the output labels.

    **On trace and diagonal**

958
    The trace and diagonal are planned yet unimplemented features.
T
Tongxin Bai 已提交
959 960 961 962 963 964 965

    Args:
        equation (`str`):
            The summation terms using the Einstein summation notation.
        operands (`list|Tensor`):
            The input tensors over which to compute the Einstein summation. The number of
            operands should equal the number of input terms in the equation.
966

T
Tongxin Bai 已提交
967
    Returns:
968 969
        result (`Tensor`), the result tensor.

T
Tongxin Bai 已提交
970 971 972
    Examples:
        .. code-block:: python

973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
            import paddle
            paddle.seed(102)
            x = paddle.rand([4])
            y = paddle.rand([5])

            # sum
            print(paddle.einsum('i->', x))
            # Tensor(shape=[], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #   1.95791852)

            # dot
            print(paddle.einsum('i,i->', x, x))
            # Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #   [1.45936954])

            # outer
            print(paddle.einsum("i,j->ij", x, y))
            # Tensor(shape=[4, 5], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #   [[0.00079869, 0.00120950, 0.00136844, 0.00187187, 0.00192194],
            #    [0.23455200, 0.35519385, 0.40186870, 0.54970956, 0.56441545],
            #    [0.11773264, 0.17828843, 0.20171674, 0.27592498, 0.28330654],
            #    [0.32897076, 0.49817693, 0.56364071, 0.77099484, 0.79162055]])

            A = paddle.rand([2, 3, 2])
            B = paddle.rand([2, 2, 3])

            # transpose
            print(paddle.einsum('ijk->kji', A))
            #  Tensor(shape=[2, 3, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #   [[[0.95649719, 0.49684682],
            #     [0.80071914, 0.46258664],
            #     [0.49814570, 0.33383518]],
            #
            #    [[0.07637714, 0.29374704],
            #     [0.51470858, 0.51907635],
            #     [0.99066722, 0.55802226]]])

            # batch matrix multiplication
            print(paddle.einsum('ijk, ikl->ijl', A,B))
            # Tensor(shape=[2, 3, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #   [[[0.32172769, 0.50617385, 0.41394392],
            #     [0.51736701, 0.49921003, 0.38730967],
            #     [0.69078457, 0.42282537, 0.30161136]],
            #
            #    [[0.32043904, 0.18164253, 0.27810261],
            #     [0.50226176, 0.24512935, 0.39881429],
            #     [0.51476848, 0.23367381, 0.39229113]]])

            # Ellipsis transpose
            print(paddle.einsum('...jk->...kj', A))
            # Tensor(shape=[2, 2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #   [[[0.95649719, 0.80071914, 0.49814570],
            #     [0.07637714, 0.51470858, 0.99066722]],
            #
            #    [[0.49684682, 0.46258664, 0.33383518],
            #     [0.29374704, 0.51907635, 0.55802226]]])

            # Ellipsis batch matrix multiplication
            print(paddle.einsum('...jk, ...kl->...jl', A,B))
            # Tensor(shape=[2, 3, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #   [[[0.32172769, 0.50617385, 0.41394392],
            #     [0.51736701, 0.49921003, 0.38730967],
            #     [0.69078457, 0.42282537, 0.30161136]],
            #
            #    [[0.32043904, 0.18164253, 0.27810261],
            #     [0.50226176, 0.24512935, 0.39881429],
            #     [0.51476848, 0.23367381, 0.39229113]]])

T
Tongxin Bai 已提交
1041
    """
1042
    import os
1043

1044
    if int(os.environ.get('FLAGS_new_einsum', "1")):
1045
        return einsum_v2(equation, *operands)
T
Tongxin Bai 已提交
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068

    nop = len(operands)
    assert nop > 0, "At least one operand is expected."

    # Part the equation to left hand side and right hand side
    lhs, *rhs = equation.lower().replace(' ', '').split('->')
    assert len(rhs) < 2, "Invalid equation: multiple `->` were found."

    # Note, we distinguish between 'ij->' and 'ij' by setting rhs to '' and None
    rhs = rhs[0] if rhs else None

    # Parse labels for each operand and count the number of occurrences for each alphabet label
    nop_labels = parse_labels(lhs, operands)

    # Diagonalize the operands which have duplicate labels
    nop_labels, operands = list(zip(*map(diagonalize, nop_labels, operands)))

    # To handle broadcasting, we should first know how many dimensions are there
    # We need to use that number to generate output labels
    # e.g. 1 for ['ij', 'i.', '.k']
    n_bcast_dims = max(map(lambda s: s.count('.'), nop_labels))

    # Build the data structures for planning. It's helpful to think of all the operands
1069
    # broadcasting together from a global view. In this view, dimensions from multiple
T
Tongxin Bai 已提交
1070 1071
    # operands are mapped to the same position if they are labeled uniquely. Broadcasting
    # dimensions are mapped to adjacent positions with the right bound fixed. Subject to
1072
    # each operand, the map is injective but for all operands the map is on-to.
T
Tongxin Bai 已提交
1073
    # g_labels:
1074
    #   The labels of the global view
T
Tongxin Bai 已提交
1075 1076 1077 1078 1079 1080 1081 1082
    # g_view:
    #   Includes a list of maps from each operand's dimensions to the global view's dimensions
    #   which we refer to as ax or axes in the code to distinguish from operand's dims
    # g_shape:
    #   The shape of the global view. The size of each dimension is what the aligned dimensions
    #   should broadcast to
    # g_nout:
    #   Number of output axes
1083 1084
    # g_supports
    #   Booleans indicating each operand's non-trivial dimensions
T
Tongxin Bai 已提交
1085 1086 1087
    # g_count
    #   Counting how many non-trivial dimensions remain for each ax

1088
    g_labels, g_view, g_nout, g_count = build_global_view(
1089 1090 1091 1092 1093
        nop_labels, rhs, n_bcast_dims
    )
    g_shape, g_supports = build_global_shape(
        g_view, g_labels, [op.shape for op in operands]
    )
T
Tongxin Bai 已提交
1094 1095

    # Now we're ready to build up an execution plan
1096
    args = operands, g_view, g_shape, g_supports, g_count, n_bcast_dims
T
Tongxin Bai 已提交
1097 1098 1099 1100
    plan = plan_einsum(*args)
    result = plan.execute()

    return result