From 33ced53e5234a79f235ebe6d4ab160b5878b5c81 Mon Sep 17 00:00:00 2001 From: Varuna Jayasiri Date: Wed, 22 Jun 2022 17:23:28 +0530 Subject: [PATCH] experiment links --- docs/diffusion/ddpm/experiment.html | 2 +- docs/diffusion/ddpm/index.html | 2 +- docs/diffusion/ddpm/readme.html | 4 ++-- docs/sitemap.xml | 4 ++-- labml_nn/activations/fta/__init__.py | 6 ++--- labml_nn/activations/fta/experiment.py | 6 ++--- labml_nn/capsule_networks/__init__.py | 1 - labml_nn/capsule_networks/mnist.py | 2 -- labml_nn/diffusion/ddpm/__init__.py | 6 ++--- labml_nn/diffusion/ddpm/experiment.ipynb | 22 ++++++++++++++----- labml_nn/diffusion/ddpm/experiment.py | 6 ++--- labml_nn/diffusion/ddpm/readme.md | 5 +++-- labml_nn/normalization/deep_norm/__init__.py | 7 +++--- .../normalization/deep_norm/experiment.ipynb | 1 - .../normalization/deep_norm/experiment.py | 1 - 15 files changed, 41 insertions(+), 34 deletions(-) diff --git a/docs/diffusion/ddpm/experiment.html b/docs/diffusion/ddpm/experiment.html index a41c71f4..59d9c1f8 100644 --- a/docs/diffusion/ddpm/experiment.html +++ b/docs/diffusion/ddpm/experiment.html @@ -70,10 +70,10 @@ #

Denoising Diffusion Probabilistic Models (DDPM) training

+

Open In Colab Open In Comet

This trains a DDPM based model on CelebA HQ dataset. You can find the download instruction in this discussion on fast.ai. Save the images inside data/celebA folder.

The paper had used a exponential moving average of the model with a decay of . We have skipped this for simplicity.

-

Open In Colab Open In Comet

diff --git a/docs/diffusion/ddpm/index.html b/docs/diffusion/ddpm/index.html index 4dac1061..8ba7e412 100644 --- a/docs/diffusion/ddpm/index.html +++ b/docs/diffusion/ddpm/index.html @@ -70,6 +70,7 @@ #

Denoising Diffusion Probabilistic Models (DDPM)

+

Open In Colab Open In Comet

This is a PyTorch implementation/tutorial of the paper Denoising Diffusion Probabilistic Models.

In simple terms, we get an image from data and add noise step by step. Then We train a model to predict that noise at each step and use the model to generate images.

The following definitions and derivations show how this works. For details please refer to the paper.

@@ -297,7 +298,6 @@ M834 80h400000v40h-400000z"> when and for discarding the weighting in . Discarding the weights increase the weight given to higher (which have higher noise levels), therefore increasing the sample quality.

This file implements the loss calculation and a basic sampling method that we use to generate images during training.

Here is the UNet model that gives and training code. This file can generate samples and interpolations from a trained model.

-

Open In Colab Open In Comet

diff --git a/docs/diffusion/ddpm/readme.html b/docs/diffusion/ddpm/readme.html index 568c4d2e..1e99395c 100644 --- a/docs/diffusion/ddpm/readme.html +++ b/docs/diffusion/ddpm/readme.html @@ -70,10 +70,10 @@ #

Denoising Diffusion Probabilistic Models (DDPM)

+

Open In Colab Open In Comet

This is a PyTorch implementation/tutorial of the paper Denoising Diffusion Probabilistic Models.

In simple terms, we get an image from data and add noise step by step. Then We train a model to predict that noise at each step and use the model to generate images.

-

Here is the UNet model that predicts the noise and training code. This file can generate samples and interpolations from a trained model.

-

View Run

+

Here is the UNet model that predicts the noise and training code. This file can generate samples and interpolations from a trained model.

diff --git a/docs/sitemap.xml b/docs/sitemap.xml index d4c4d364..2cf57f05 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -372,7 +372,7 @@ https://nn.labml.ai/diffusion/ddpm/unet.html - 2021-10-24T16:30:00+00:00 + 2022-06-09T16:30:00+00:00 1.00 @@ -400,7 +400,7 @@ https://nn.labml.ai/diffusion/ddpm/evaluate.html - 2021-10-24T16:30:00+00:00 + 2022-06-09T16:30:00+00:00 1.00 diff --git a/labml_nn/activations/fta/__init__.py b/labml_nn/activations/fta/__init__.py index fd24407e..664d2196 100644 --- a/labml_nn/activations/fta/__init__.py +++ b/labml_nn/activations/fta/__init__.py @@ -8,6 +8,9 @@ summary: > # Fuzzy Tiling Activations (FTA) +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/activations/fta/experiment.ipynb) +[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/fta/69be11f83693407f82a86dcbb232bcfe?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&viewId=rlJOpXDGtL8zbkcX66R77P5me&xAxis=step) + This is a [PyTorch](https://pytorch.org) implementation/tutorial of [Fuzzy Tiling Activations: A Simple Approach to Learning Sparse Representations Online](https://papers.labml.ai/paper/aca66d8edc8911eba3db37f65e372566). @@ -54,9 +57,6 @@ FTA uses this to create soft boundaries between bins. $$\phi_\eta(z) = 1 - I_{\eta,+} \big( \max(\mathbf{c} - z, 0) + \max(z - \delta - \mathbf{c}, 0) \big)$$ [Here's a simple experiment](experiment.html) that uses FTA in a transformer. - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/activations/fta/experiment.ipynb) -[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/fta/69be11f83693407f82a86dcbb232bcfe?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&viewId=rlJOpXDGtL8zbkcX66R77P5me&xAxis=step) """ import torch diff --git a/labml_nn/activations/fta/experiment.py b/labml_nn/activations/fta/experiment.py index be740a02..f5f53789 100644 --- a/labml_nn/activations/fta/experiment.py +++ b/labml_nn/activations/fta/experiment.py @@ -7,6 +7,9 @@ summary: > # [Fuzzy Tiling Activation](index.html) Experiment +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/activations/fta/experiment.ipynb) +[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/fta/69be11f83693407f82a86dcbb232bcfe?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&viewId=rlJOpXDGtL8zbkcX66R77P5me&xAxis=step) + Here we train a transformer that uses [Fuzzy Tiling Activation](index.html) in the [Feed-Forward Network](../../transformers/feed_forward.html). We use it for a language model and train it on Tiny Shakespeare dataset @@ -14,9 +17,6 @@ for demonstration. However, this is probably not the ideal task for FTA, and we believe FTA is more suitable for modeling data with continuous variables. - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/activations/fta/experiment.ipynb) -[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/fta/69be11f83693407f82a86dcbb232bcfe?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&viewId=rlJOpXDGtL8zbkcX66R77P5me&xAxis=step) """ import copy diff --git a/labml_nn/capsule_networks/__init__.py b/labml_nn/capsule_networks/__init__.py index 145052d9..1c0836e3 100644 --- a/labml_nn/capsule_networks/__init__.py +++ b/labml_nn/capsule_networks/__init__.py @@ -28,7 +28,6 @@ Here's a notebook for training a Capsule Network on MNIST dataset. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/capsule_networks/mnist.ipynb) [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e7c08e08586711ebb3e30242ac1c0002) -[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/capsule-networks/reports/capsule-networks) """ import torch.nn as nn diff --git a/labml_nn/capsule_networks/mnist.py b/labml_nn/capsule_networks/mnist.py index 24504e8c..f9eb475f 100644 --- a/labml_nn/capsule_networks/mnist.py +++ b/labml_nn/capsule_networks/mnist.py @@ -10,8 +10,6 @@ This is an annotated PyTorch code to classify MNIST digits with PyTorch. This paper implements the experiment described in paper [Dynamic Routing Between Capsules](https://papers.labml.ai/paper/1710.09829). - -[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=mnist)](https://www.comet.ml/labml/capsule-networks/reports/capsule-networks) """ from typing import Any diff --git a/labml_nn/diffusion/ddpm/__init__.py b/labml_nn/diffusion/ddpm/__init__.py index de1f24ac..e4fc9684 100644 --- a/labml_nn/diffusion/ddpm/__init__.py +++ b/labml_nn/diffusion/ddpm/__init__.py @@ -8,6 +8,9 @@ summary: > # Denoising Diffusion Probabilistic Models (DDPM) +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/diffusion/ddpm/experiment.ipynb) +[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/diffuse/view/FknjSiKWotr8fgZerpC1sV1cy/panels) + This is a [PyTorch](https://pytorch.org) implementation/tutorial of the paper [Denoising Diffusion Probabilistic Models](https://papers.labml.ai/paper/2006.11239). @@ -156,9 +159,6 @@ training. Here is the [UNet model](unet.html) that gives $\textcolor{lightgreen}{\epsilon_\theta}(x_t, t)$ and [training code](experiment.html). [This file](evaluate.html) can generate samples and interpolations from a trained model. - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/diffusion/ddpm/experiment.ipynb) -[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/diffuse/1260757bcd6148e084ad3a46c38ac5c4?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step) """ from typing import Tuple, Optional diff --git a/labml_nn/diffusion/ddpm/experiment.ipynb b/labml_nn/diffusion/ddpm/experiment.ipynb index ea7b9ffd..0126ff4f 100644 --- a/labml_nn/diffusion/ddpm/experiment.ipynb +++ b/labml_nn/diffusion/ddpm/experiment.ipynb @@ -11,7 +11,7 @@ "source": [ "[![Github](https://img.shields.io/github/stars/labmlai/annotated_deep_learning_paper_implementations?style=social)](https://github.com/labmlai/annotated_deep_learning_paper_implementations)\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/diffusion/ddpm/experiment.ipynb)\n", - "[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/diffuse/1260757bcd6148e084ad3a46c38ac5c4?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step)\n", + "[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/diffuse/view/FknjSiKWotr8fgZerpC1sV1cy/panels)\n", "\n", "## [Denoising Diffusion Probabilistic Models (DDPM)](https://nn.labml.ai/diffusion/ddpm/index.html)\n", "\n", @@ -201,7 +201,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Initializ" ] @@ -209,7 +213,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "configs.init()" @@ -282,7 +290,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [] } @@ -314,4 +326,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/labml_nn/diffusion/ddpm/experiment.py b/labml_nn/diffusion/ddpm/experiment.py index e34d710f..e2681cea 100644 --- a/labml_nn/diffusion/ddpm/experiment.py +++ b/labml_nn/diffusion/ddpm/experiment.py @@ -8,15 +8,15 @@ summary: > # [Denoising Diffusion Probabilistic Models (DDPM)](index.html) training +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/diffusion/ddpm/experiment.ipynb) +[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/diffuse/view/FknjSiKWotr8fgZerpC1sV1cy/panels) + This trains a DDPM based model on CelebA HQ dataset. You can find the download instruction in this [discussion on fast.ai](https://forums.fast.ai/t/download-celeba-hq-dataset/45873/3). Save the images inside [`data/celebA` folder](#dataset_path). The paper had used a exponential moving average of the model with a decay of $0.9999$. We have skipped this for simplicity. - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/diffusion/ddpm/experiment.ipynb) -[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/diffuse/1260757bcd6148e084ad3a46c38ac5c4?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step) """ from typing import List diff --git a/labml_nn/diffusion/ddpm/readme.md b/labml_nn/diffusion/ddpm/readme.md index 378b5af8..ff92474f 100644 --- a/labml_nn/diffusion/ddpm/readme.md +++ b/labml_nn/diffusion/ddpm/readme.md @@ -1,5 +1,8 @@ # [Denoising Diffusion Probabilistic Models (DDPM)](https://nn.labml.ai/diffusion/ddpm/index.html) +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/diffusion/ddpm/experiment.ipynb) +[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=capsule_networks&file=model)](https://www.comet.ml/labml/diffuse/view/FknjSiKWotr8fgZerpC1sV1cy/panels) + This is a [PyTorch](https://pytorch.org) implementation/tutorial of the paper [Denoising Diffusion Probabilistic Models](https://papers.labml.ai/paper/2006.11239). @@ -11,5 +14,3 @@ Here is the [UNet model](https://nn.labml.ai/diffusion/ddpm/unet.html) that pred [training code](https://nn.labml.ai/diffusion/ddpm/experiment.html). [This file](https://nn.labml.ai/diffusion/ddpm/evaluate.html) can generate samples and interpolations from a trained model. - -[![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/a44333ea251411ec8007d1a1762ed686) diff --git a/labml_nn/normalization/deep_norm/__init__.py b/labml_nn/normalization/deep_norm/__init__.py index bc7842fb..2222d2d4 100644 --- a/labml_nn/normalization/deep_norm/__init__.py +++ b/labml_nn/normalization/deep_norm/__init__.py @@ -7,6 +7,9 @@ summary: > # DeepNorm +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/normalization/deep_norm/experiment.ipynb) +[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=deep_norm&file=model)](https://www.comet.ml/labml/deep-norm/61d817f80ff143c8825fba4aacd431d4?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step) + This is a [PyTorch](https://pytorch.org) implementation of the DeepNorm from the paper [DeepNet: Scaling Transformers to 1,000 Layers](https://papers.labml.ai/paper/2203.00555). @@ -66,10 +69,6 @@ Where $N$ is the number of layers in the encoder and $M$ is the number of layers Refer to [the paper](https://papers.labml.ai/paper/2203.00555) for derivation. [Here is an experiment implementation](experiment.html) that uses DeepNorm. - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/normalization/deep_norm/experiment.ipynb) -[![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/ec8e4dacb7f311ec8d1cd37d50b05c3d) -[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=deep_norm&file=model)](https://www.comet.ml/labml/deep-norm/61d817f80ff143c8825fba4aacd431d4?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step) """ from typing import Union, List diff --git a/labml_nn/normalization/deep_norm/experiment.ipynb b/labml_nn/normalization/deep_norm/experiment.ipynb index df5f443c..9dd84b4f 100644 --- a/labml_nn/normalization/deep_norm/experiment.ipynb +++ b/labml_nn/normalization/deep_norm/experiment.ipynb @@ -11,7 +11,6 @@ "source": [ "[![Github](https://img.shields.io/github/stars/labmlai/annotated_deep_learning_paper_implementations?style=social)](https://github.com/labmlai/annotated_deep_learning_paper_implementations)\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/normalization/deep_norm/experiment.ipynb)\n", - "[![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/ec8e4dacb7f311ec8d1cd37d50b05c3d)\n", "[![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=deep_norm&file=colab)](https://www.comet.ml/labml/deep-norm/61d817f80ff143c8825fba4aacd431d4?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step)\n", "\n", "## DeepNorm\n", diff --git a/labml_nn/normalization/deep_norm/experiment.py b/labml_nn/normalization/deep_norm/experiment.py index ade34a97..845397bf 100644 --- a/labml_nn/normalization/deep_norm/experiment.py +++ b/labml_nn/normalization/deep_norm/experiment.py @@ -8,7 +8,6 @@ summary: > # [DeepNorm](index.html) Experiment [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/normalization/deep_norm/experiment.ipynb) -[![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/ec8e4dacb7f311ec8d1cd37d50b05c3d) [![Open In Comet](https://images.labml.ai/images/comet.svg?experiment=deep_norm&file=experiment)](https://www.comet.ml/labml/deep-norm/61d817f80ff143c8825fba4aacd431d4?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step) """ -- GitLab