diff --git a/tensor_regression_layer.ipynb b/tensor_regression_layer.ipynb index b5ba132..52725bd 100644 --- a/tensor_regression_layer.ipynb +++ b/tensor_regression_layer.ipynb @@ -6,7 +6,7 @@ "source": [ "# Tensor Regression Networks with ``TensorLy`` and ``PyTorch`` as a backend\n", "\n", - "In this notebook, we will show how to combine TensorLy and MXNet in to implement the tensor regression layer, as defined in **Tensor Contraction & Regression Networks**, _Jean Kossaifi, Zachary C. Lipton, Aran Khanna, Tommaso Furlanello and Anima Anandkumar_, [ArXiV pre-publication](https://arxiv.org/abs/1707.08308).\n", + "In this notebook, we will show how to combine TensorLy and PyTorch in to implement the tensor regression layer, as defined in **Tensor Contraction & Regression Networks**, _Jean Kossaifi, Zachary C. Lipton, Aran Khanna, Tommaso Furlanello and Anima Anandkumar_, [ArXiV pre-publication](https://arxiv.org/abs/1707.08308).\n", "\n", "\n", "Specifically, we use [TensorLy](http://tensorly.org/dev/index.html) for the tensor operations, with the [PyTorch](http://pytorch.org/) backend.\n", @@ -124,7 +124,7 @@ " return F.log_softmax(x)\n", "```\n", "\n", - "In this notebook, we will demonstrate how to implement easily the TRL using TensorLy and MXNet." + "In this notebook, we will demonstrate how to implement easily the TRL using TensorLy and PyTorch." ] }, { @@ -142,16 +142,13 @@ "source": [ "import torch\n", "import torch.nn as nn\n", - "from torch.autograd import Variable\n", "import torch.optim as optim\n", "from torchvision import datasets, transforms\n", "import torch.nn.functional as F\n", "\n", "import numpy as np\n", "\n", - "import tensorly as tl\n", - "from tensorly.tenalg import inner\n", - "from tensorly.random import check_random_state" + "from tensorly.tenalg import inner" ] }, { @@ -187,7 +184,7 @@ "batch_size = 16\n", "device = 'cuda:0'\n", "# to run on CPU, uncomment the following line:\n", - "device = 'cpu'" + "# device = 'cpu'" ] }, { @@ -274,7 +271,7 @@ " def penalty(self, order=2):\n", " penalty = tl.norm(self.core, order)\n", " for f in self.factors:\n", - " penatly = penalty + tl.norm(f, order)\n", + " penalty = penalty + tl.norm(f, order)\n", " return penalty\n" ] }, @@ -395,7 +392,7 @@ ], "source": [ "n_epoch = 5 # Number of epochs\n", - "regularizer = 0.001\n", + "regularizer = 0.0001\n", "\n", "model = model.to(device)\n", "\n", diff --git a/tt-compression.ipynb b/tt-compression.ipynb index 1893ad6..24f7ca9 100644 --- a/tt-compression.ipynb +++ b/tt-compression.ipynb @@ -32,8 +32,7 @@ "\n", "import numpy as np\n", "\n", - "import tensorly as tl\n", - "from tensorly.random import check_random_state" + "import tensorly as tl" ] }, {