distributions.py 22.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from . import control_flow
from . import tensor
from . import nn
import math
import numpy as np
import warnings
21
import paddle
22

23 24 25 26 27 28
from ..data_feeder import (
    convert_dtype,
    check_variable_and_dtype,
    check_type,
    check_dtype,
)
29

30
__all__ = ['Uniform', 'Normal', 'Categorical', 'MultivariateNormalDiag']
31 32


33
class Distribution:
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
    """
    Distribution is the abstract base class for probability distributions.
    """

    def sample(self):
        """Sampling from the distribution."""
        raise NotImplementedError

    def entropy(self):
        """The entropy of the distribution."""
        raise NotImplementedError

    def kl_divergence(self, other):
        """The KL-divergence between self distributions and other."""
        raise NotImplementedError

    def log_prob(self, value):
        """Log probability density/mass function."""
        raise NotImplementedError

    def _validate_args(self, *args):
        """
        Argument validation for distribution args
        Args:
            value (float, list, numpy.ndarray, Variable)
        Raises
            ValueError: if one argument is Variable, all arguments should be Variable
        """
        is_variable = False
        is_number = False
        for arg in args:
            if isinstance(arg, tensor.Variable):
                is_variable = True
            else:
                is_number = True

        if is_variable and is_number:
            raise ValueError(
72 73
                'if one argument is Variable, all arguments should be Variable'
            )
74 75 76 77 78 79 80 81 82 83 84 85 86 87

        return is_variable

    def _to_variable(self, *args):
        """
        Argument convert args to Variable

        Args:
            value (float, list, numpy.ndarray, Variable)
        Returns:
            Variable of args.
        """
        numpy_args = []
        variable_args = []
88
        tmp = 0.0
89 90 91 92 93 94 95

        for arg in args:
            valid_arg = False
            for cls in [float, list, np.ndarray, tensor.Variable]:
                if isinstance(arg, cls):
                    valid_arg = True
                    break
96 97 98
            assert (
                valid_arg
            ), "type of input args must be float, list, numpy.ndarray or Variable."
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
            if isinstance(arg, float):
                arg = np.zeros(1) + arg
            arg_np = np.array(arg)
            arg_dtype = arg_np.dtype
            if str(arg_dtype) not in ['float32']:
                warnings.warn(
                    "data type of argument only support float32, your argument will be convert to float32."
                )
                arg_np = arg_np.astype('float32')
            tmp = tmp + arg_np
            numpy_args.append(arg_np)

        dtype = tmp.dtype
        for arg in numpy_args:
            arg_broadcasted, _ = np.broadcast_arrays(arg, tmp)
            arg_variable = tensor.create_tensor(dtype=dtype)
            tensor.assign(arg_broadcasted, arg_variable)
            variable_args.append(arg_variable)

        return tuple(variable_args)


class Uniform(Distribution):
122
    r"""Uniform distribution with `low` and `high` parameters.
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145

    Mathematical Details

    The probability density function (pdf) is,

    .. math::

        pdf(x; a, b) = \\frac{1}{Z}, \ a <=x <b

    .. math::

        Z = b - a

    In the above equation:

    * :math:`low = a`,
    * :math:`high = b`,
    * :math:`Z`: is the normalizing constant.

    The parameters `low` and `high` must be shaped in a way that supports
    broadcasting (e.g., `high - low` is a valid operation).

    Args:
L
LielinJiang 已提交
146 147
        low(float|list|numpy.ndarray|Variable): The lower boundary of uniform distribution.The data type is float32
        high(float|list|numpy.ndarray|Variable): The higher boundary of uniform distribution.The data type is float32
148 149 150 151

    Examples:
        .. code-block:: python

L
LielinJiang 已提交
152
          import numpy as np
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
          from paddle.fluid import layers
          from paddle.fluid.layers import Uniform

          # Without broadcasting, a single uniform distribution [3, 4]:
          u1 = Uniform(low=3.0, high=4.0)
          # 2 distributions [1, 3], [2, 4]
          u2 = Uniform(low=[1.0, 2.0],
                        high=[3.0, 4.0])
          # 4 distributions
          u3 = Uniform(low=[[1.0, 2.0],
                    [3.0, 4.0]],
               high=[[1.5, 2.5],
                     [3.5, 4.5]])

          # With broadcasting:
          u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0])

L
LielinJiang 已提交
170 171 172 173
          # Complete example
          value_npdata = np.array([0.8], dtype="float32")
          value_tensor = layers.create_tensor(dtype="float32")
          layers.assign(value_npdata, value_tensor)
174

L
LielinJiang 已提交
175
          uniform = Uniform([0.], [2.])
176

L
LielinJiang 已提交
177 178
          sample = uniform.sample([2])
          # a random tensor created by uniform distribution with shape: [2, 1]
179
          entropy = uniform.entropy()
L
LielinJiang 已提交
180 181 182
          # [0.6931472] with shape: [1]
          lp = uniform.log_prob(value_tensor)
          # [-0.6931472] with shape: [1]
183 184 185
    """

    def __init__(self, low, high):
186 187 188 189 190 191
        check_type(
            low, 'low', (float, np.ndarray, tensor.Variable, list), 'Uniform'
        )
        check_type(
            high, 'high', (float, np.ndarray, tensor.Variable, list), 'Uniform'
        )
192

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
        self.all_arg_is_float = False
        self.batch_size_unknown = False
        if self._validate_args(low, high):
            self.batch_size_unknown = True
            self.low = low
            self.high = high
        else:
            if isinstance(low, float) and isinstance(high, float):
                self.all_arg_is_float = True
            self.low, self.high = self._to_variable(low, high)

    def sample(self, shape, seed=0):
        """Generate samples of the specified shape.

        Args:
          shape (list): 1D `int32`. Shape of the generated samples.
          seed (int): Python integer number.

        Returns:
L
LielinJiang 已提交
212
          Variable: A tensor with prepended dimensions shape.The data type is float32.
213 214

        """
215 216 217
        check_type(shape, 'shape', (list), 'sample')
        check_type(seed, 'seed', (int), 'sample')

218 219 220 221
        batch_shape = list((self.low + self.high).shape)
        if self.batch_size_unknown:
            output_shape = shape + batch_shape
            zero_tmp = tensor.fill_constant_batch_size_like(
222 223
                self.low + self.high, batch_shape + shape, self.low.dtype, 0.0
            )
224 225 226 227
            uniform_random_tmp = (
                paddle.tensor.random.uniform_random_batch_size_like(
                    zero_tmp, zero_tmp.shape, min=0.0, max=1.0, seed=seed
                )
228 229 230 231 232
            )
            output = (
                uniform_random_tmp * (zero_tmp + self.high - self.low)
                + self.low
            )
233
            return paddle.reshape(output, output_shape)
234 235
        else:
            output_shape = shape + batch_shape
236 237 238 239 240 241 242 243
            output = (
                nn.uniform_random(output_shape, seed=seed)
                * (
                    tensor.zeros(output_shape, dtype=self.low.dtype)
                    + (self.high - self.low)
                )
                + self.low
            )
244
            if self.all_arg_is_float:
245
                return paddle.reshape(output, shape)
246 247 248 249 250 251 252 253 254 255
            else:
                return output

    def log_prob(self, value):
        """Log probability density/mass function.

        Args:
          value (Variable): The input tensor.

        Returns:
L
LielinJiang 已提交
256
          Variable: log probability.The data type is same with value.
257 258

        """
259 260 261
        check_variable_and_dtype(
            value, 'value', ['float32', 'float64'], 'log_prob'
        )
262

263 264 265 266 267 268 269 270 271 272
        lb_bool = control_flow.less_than(self.low, value)
        ub_bool = control_flow.less_than(value, self.high)
        lb = tensor.cast(lb_bool, dtype=value.dtype)
        ub = tensor.cast(ub_bool, dtype=value.dtype)
        return nn.log(lb * ub) - nn.log(self.high - self.low)

    def entropy(self):
        """Shannon entropy in nats.

        Returns:
L
LielinJiang 已提交
273
          Variable: Shannon entropy of uniform distribution.The data type is float32.
274 275 276 277 278 279

        """
        return nn.log(self.high - self.low)


class Normal(Distribution):
280
    r"""The Normal distribution with location `loc` and `scale` parameters.
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300

    Mathematical details

    The probability density function (pdf) is,

    .. math::

        pdf(x; \mu, \sigma) = \\frac{1}{Z}e^{\\frac {-0.5 (x - \mu)^2}  {\sigma^2} }

    .. math::

        Z = (2 \pi \sigma^2)^{0.5}

    In the above equation:

    * :math:`loc = \mu`: is the mean.
    * :math:`scale = \sigma`: is the std.
    * :math:`Z`: is the normalization constant.

    Args:
L
LielinJiang 已提交
301 302
        loc(float|list|numpy.ndarray|Variable): The mean of normal distribution.The data type is float32.
        scale(float|list|numpy.ndarray|Variable): The std of normal distribution.The data type is float32.
303 304 305

    Examples:
        .. code-block:: python
306

307
          import numpy as np
308 309 310 311 312 313 314
          from paddle.fluid import layers
          from paddle.fluid.layers import Normal

          # Define a single scalar Normal distribution.
          dist = Normal(loc=0., scale=3.)
          # Define a batch of two scalar valued Normals.
          # The first has mean 1 and standard deviation 11, the second 2 and 22.
L
LielinJiang 已提交
315
          dist = Normal(loc=[1., 2.], scale=[11., 22.])
316 317 318 319 320
          # Get 3 samples, returning a 3 x 2 tensor.
          dist.sample([3])

          # Define a batch of two scalar valued Normals.
          # Both have mean 1, but different standard deviations.
L
LielinJiang 已提交
321
          dist = Normal(loc=1., scale=[11., 22.])
322

L
LielinJiang 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
          # Complete example
          value_npdata = np.array([0.8], dtype="float32")
          value_tensor = layers.create_tensor(dtype="float32")
          layers.assign(value_npdata, value_tensor)

          normal_a = Normal([0.], [1.])
          normal_b = Normal([0.5], [2.])

          sample = normal_a.sample([2])
          # a random tensor created by normal distribution with shape: [2, 1]
          entropy = normal_a.entropy()
          # [1.4189385] with shape: [1]
          lp = normal_a.log_prob(value_tensor)
          # [-1.2389386] with shape: [1]
          kl = normal_a.kl_divergence(normal_b)
          # [0.34939718] with shape: [1]
339 340 341
    """

    def __init__(self, loc, scale):
342 343 344 345 346 347
        check_type(
            loc, 'loc', (float, np.ndarray, tensor.Variable, list), 'Normal'
        )
        check_type(
            scale, 'scale', (float, np.ndarray, tensor.Variable, list), 'Normal'
        )
348

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
        self.batch_size_unknown = False
        self.all_arg_is_float = False
        if self._validate_args(loc, scale):
            self.batch_size_unknown = True
            self.loc = loc
            self.scale = scale
        else:
            if isinstance(loc, float) and isinstance(scale, float):
                self.all_arg_is_float = True
            self.loc, self.scale = self._to_variable(loc, scale)

    def sample(self, shape, seed=0):
        """Generate samples of the specified shape.

        Args:
          shape (list): 1D `int32`. Shape of the generated samples.
          seed (int): Python integer number.

        Returns:
L
LielinJiang 已提交
368
          Variable: A tensor with prepended dimensions shape.The data type is float32.
369 370

        """
371 372 373 374

        check_type(shape, 'shape', (list), 'sample')
        check_type(seed, 'seed', (int), 'sample')

375 376 377 378 379
        batch_shape = list((self.loc + self.scale).shape)

        if self.batch_size_unknown:
            output_shape = shape + batch_shape
            zero_tmp = tensor.fill_constant_batch_size_like(
380 381
                self.loc + self.scale, batch_shape + shape, self.loc.dtype, 0.0
            )
382
            zero_tmp_shape = nn.shape(zero_tmp)
383 384 385
            normal_random_tmp = nn.gaussian_random(
                zero_tmp_shape, mean=0.0, std=1.0, seed=seed
            )
386
            output = normal_random_tmp * (zero_tmp + self.scale) + self.loc
387
            return paddle.reshape(output, output_shape)
388 389
        else:
            output_shape = shape + batch_shape
390 391 392 393 394 395 396 397
            output = (
                nn.gaussian_random(output_shape, mean=0.0, std=1.0, seed=seed)
                * (
                    tensor.zeros(output_shape, dtype=self.loc.dtype)
                    + self.scale
                )
                + self.loc
            )
398
            if self.all_arg_is_float:
399
                return paddle.reshape(output, shape)
400 401 402 403 404 405 406
            else:
                return output

    def entropy(self):
        """Shannon entropy in nats.

        Returns:
L
LielinJiang 已提交
407
          Variable: Shannon entropy of normal distribution.The data type is float32.
408 409 410

        """
        batch_shape = list((self.loc + self.scale).shape)
411 412 413 414 415 416
        zero_tmp = tensor.fill_constant_batch_size_like(
            self.loc + self.scale, batch_shape, self.loc.dtype, 0.0
        )
        return (
            0.5 + 0.5 * math.log(2 * math.pi) + nn.log((self.scale + zero_tmp))
        )
417 418 419 420 421 422 423 424

    def log_prob(self, value):
        """Log probability density/mass function.

        Args:
          value (Variable): The input tensor.

        Returns:
L
LielinJiang 已提交
425
          Variable: log probability.The data type is same with value.
426 427

        """
428 429 430
        check_variable_and_dtype(
            value, 'value', ['float32', 'float64'], 'log_prob'
        )
431

432 433
        var = self.scale * self.scale
        log_scale = nn.log(self.scale)
434 435 436 437 438
        return (
            -1.0 * ((value - self.loc) * (value - self.loc)) / (2.0 * var)
            - log_scale
            - math.log(math.sqrt(2.0 * math.pi))
        )
439 440 441 442 443 444 445 446

    def kl_divergence(self, other):
        """The KL-divergence between two normal distributions.

        Args:
            other (Normal): instance of Normal.

        Returns:
L
LielinJiang 已提交
447
            Variable: kl-divergence between two normal distributions.The data type is float32.
448 449

        """
450 451 452

        check_type(other, 'other', Normal, 'kl_divergence')

453
        var_ratio = self.scale / other.scale
454
        var_ratio = var_ratio * var_ratio
455
        t1 = (self.loc - other.loc) / other.scale
456 457
        t1 = t1 * t1
        return 0.5 * (var_ratio + t1 - 1.0 - nn.log(var_ratio))
458 459 460


class Categorical(Distribution):
461
    r"""
462 463 464
    Categorical distribution is a discrete probability distribution that
    describes the possible results of a random variable that can take on
    one of K possible categories, with the probability of each category
465 466
    separately specified.

467 468 469 470 471 472 473 474 475 476
    The probability mass function (pmf) is:

    .. math::

        pmf(k; p_i) = \prod_{i=1}^{k} p_i^{[x=i]}

    In the above equation:

    * :math:`[x=i]` : it evaluates to 1 if :math:`x==i` , 0 otherwise.

477
    Args:
478
        logits(list|numpy.ndarray|Variable): The logits input of categorical distribution. The data type is float32.
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493

    Examples:
        .. code-block:: python

          import numpy as np
          from paddle.fluid import layers
          from paddle.fluid.layers import Categorical

          a_logits_npdata = np.array([-0.602,-0.602], dtype="float32")
          a_logits_tensor = layers.create_tensor(dtype="float32")
          layers.assign(a_logits_npdata, a_logits_tensor)

          b_logits_npdata = np.array([-0.102,-0.112], dtype="float32")
          b_logits_tensor = layers.create_tensor(dtype="float32")
          layers.assign(b_logits_npdata, b_logits_tensor)
494

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
          a = Categorical(a_logits_tensor)
          b = Categorical(b_logits_tensor)

          a.entropy()
          # [0.6931472] with shape: [1]

          b.entropy()
          # [0.6931347] with shape: [1]

          a.kl_divergence(b)
          # [1.2516975e-05] with shape: [1]

    """

    def __init__(self, logits):
        """
        Args:
512
            logits(list|numpy.ndarray|Variable): The logits input of categorical distribution. The data type is float32.
513
        """
514 515 516
        check_type(
            logits, 'logits', (np.ndarray, tensor.Variable, list), 'Categorical'
        )
517

518 519 520 521 522 523 524 525 526
        if self._validate_args(logits):
            self.logits = logits
        else:
            self.logits = self._to_variable(logits)[0]

    def kl_divergence(self, other):
        """The KL-divergence between two Categorical distributions.

        Args:
527
            other (Categorical): instance of Categorical. The data type is float32.
528 529 530 531 532

        Returns:
            Variable: kl-divergence between two Categorical distributions.

        """
533
        check_type(other, 'other', Categorical, 'kl_divergence')
534

535 536 537
        logits = self.logits - paddle.max(self.logits, axis=-1, keepdim=True)
        other_logits = other.logits - paddle.max(
            other.logits, axis=-1, keepdim=True
538
        )
539 540
        e_logits = paddle.exp(logits)
        other_e_logits = paddle.exp(other_logits)
541 542
        z = paddle.sum(e_logits, axis=-1, keepdim=True)
        other_z = paddle.sum(other_e_logits, axis=-1, keepdim=True)
543
        prob = e_logits / z
544
        kl = paddle.sum(
545
            prob * (logits - nn.log(z) - other_logits + nn.log(other_z)),
546 547
            axis=-1,
            keepdim=True,
548
        )
549 550 551 552 553 554 555

        return kl

    def entropy(self):
        """Shannon entropy in nats.

        Returns:
556
          Variable: Shannon entropy of Categorical distribution. The data type is float32.
557 558

        """
559
        logits = self.logits - paddle.max(self.logits, axis=-1, keepdim=True)
560
        e_logits = paddle.exp(logits)
561 562
        z = paddle.sum(e_logits, axis=-1, keepdim=True)

563
        prob = e_logits / z
564 565
        entropy = -1.0 * paddle.sum(
            prob * (logits - nn.log(z)), axis=-1, keepdim=True
566
        )
567 568 569 570 571

        return entropy


class MultivariateNormalDiag(Distribution):
572
    r"""
573 574 575
    A multivariate normal (also called Gaussian) distribution parameterized by a mean vector
    and a covariance matrix.

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
    The probability density function (pdf) is:

    .. math::

        pdf(x; loc, scale) = \\frac{e^{-\\frac{||y||^2}{2}}}{Z}

    where:
    .. math::

        y = inv(scale) @ (x - loc)
        Z = (2\\pi)^{0.5k} |det(scale)|


    In the above equation:

    * :math:`inv` : denotes to take the inverse of the matrix.
    * :math:`@` : denotes matrix multiplication.
    * :math:`det` : denotes to evaluate the determinant.

595
    Args:
596 597 598 599 600
        loc(list|numpy.ndarray|Variable): The mean of multivariateNormal distribution with shape :math:`[k]` .
            The data type is float32.
        scale(list|numpy.ndarray|Variable): The positive definite diagonal covariance matrix of multivariateNormal
            distribution  with shape :math:`[k, k]` . All elements are 0 except diagonal elements. The data type is
            float32.
601 602 603

    Examples:
        .. code-block:: python
604

605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
            import numpy as np
            from paddle.fluid import layers
            from paddle.fluid.layers import MultivariateNormalDiag

            a_loc_npdata = np.array([0.3,0.5],dtype="float32")
            a_loc_tensor = layers.create_tensor(dtype="float32")
            layers.assign(a_loc_npdata, a_loc_tensor)


            a_scale_npdata = np.array([[0.4,0],[0,0.5]],dtype="float32")
            a_scale_tensor = layers.create_tensor(dtype="float32")
            layers.assign(a_scale_npdata, a_scale_tensor)

            b_loc_npdata = np.array([0.2,0.4],dtype="float32")
            b_loc_tensor = layers.create_tensor(dtype="float32")
            layers.assign(b_loc_npdata, b_loc_tensor)

            b_scale_npdata = np.array([[0.3,0],[0,0.4]],dtype="float32")
            b_scale_tensor = layers.create_tensor(dtype="float32")
            layers.assign(b_scale_npdata, b_scale_tensor)

            a = MultivariateNormalDiag(a_loc_tensor, a_scale_tensor)
            b = MultivariateNormalDiag(b_loc_tensor, b_scale_tensor)
628

629 630 631
            a.entropy()
            # [2.033158] with shape: [1]
            b.entropy()
T
tianshuo78520a 已提交
632
            # [1.7777451] with shape: [1]
633 634 635

            a.kl_divergence(b)
            # [0.06542051] with shape: [1]
636

637 638 639
    """

    def __init__(self, loc, scale):
640 641 642 643 644 645 646 647 648 649 650 651
        check_type(
            loc,
            'loc',
            (np.ndarray, tensor.Variable, list),
            'MultivariateNormalDiag',
        )
        check_type(
            scale,
            'scale',
            (np.ndarray, tensor.Variable, list),
            'MultivariateNormalDiag',
        )
652

653 654 655 656 657 658 659 660 661 662 663
        if self._validate_args(loc, scale):
            self.loc = loc
            self.scale = scale
        else:
            self.loc, self.scale = self._to_variable(loc, scale)

    def _det(self, value):

        batch_shape = list(value.shape)
        one_all = tensor.ones(shape=batch_shape, dtype=self.loc.dtype)
        one_diag = tensor.diag(
664 665
            tensor.ones(shape=[batch_shape[0]], dtype=self.loc.dtype)
        )
666
        det_diag = paddle.prod(value + one_all - one_diag)
667 668 669 670 671 672 673 674

        return det_diag

    def _inv(self, value):

        batch_shape = list(value.shape)
        one_all = tensor.ones(shape=batch_shape, dtype=self.loc.dtype)
        one_diag = tensor.diag(
675 676
            tensor.ones(shape=[batch_shape[0]], dtype=self.loc.dtype)
        )
677
        inv_diag = paddle.pow(value, (one_all - 2 * one_diag))
678 679 680 681 682 683 684

        return inv_diag

    def entropy(self):
        """Shannon entropy in nats.

        Returns:
685
          Variable: Shannon entropy of Multivariate Normal distribution. The data type is float32.
686 687

        """
688 689 690 691
        entropy = 0.5 * (
            self.scale.shape[0] * (1.0 + math.log(2 * math.pi))
            + nn.log(self._det(self.scale))
        )
692 693 694 695 696 697 698 699 700 701

        return entropy

    def kl_divergence(self, other):
        """The KL-divergence between two Multivariate Normal distributions.

        Args:
            other (MultivariateNormalDiag): instance of Multivariate Normal.

        Returns:
702
            Variable: kl-divergence between two Multivariate Normal distributions. The data type is float32.
703 704

        """
705
        check_type(other, 'other', MultivariateNormalDiag, 'kl_divergence')
706

707
        tr_cov_matmul = paddle.sum(self._inv(other.scale) * self.scale)
708 709 710
        loc_matmul_cov = nn.matmul(
            (other.loc - self.loc), self._inv(other.scale)
        )
711 712 713 714 715 716
        tri_matmul = nn.matmul(loc_matmul_cov, (other.loc - self.loc))
        k = list(self.scale.shape)[0]
        ln_cov = nn.log(self._det(other.scale)) - nn.log(self._det(self.scale))
        kl = 0.5 * (tr_cov_matmul + tri_matmul - k + ln_cov)

        return kl