Skip to content

Commit 1fd1344

Browse files
denklemikeheddesgithub-actions[bot]
authored
Add methods to form thermometer codes (#93)
* Add methods to form thermometer codes * Add revisions after review * Formatting and micro optimizations * Modify code to allow flexible number of hypervectors * [github-action] formatting fixes * Modify Thermometer embedding to accept HD/VSA model type * Remove model option from embeddings Co-authored-by: mikeheddes <[email protected]> Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
1 parent f6ea455 commit 1fd1344

File tree

5 files changed

+162
-0
lines changed

5 files changed

+162
-0
lines changed

docs/embeddings.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ torchhd.embeddings
1212
Identity
1313
Random
1414
Level
15+
Thermometer
1516
Circular
1617
Projection
1718
Sinusoid

docs/torchhd.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ Basis-hypervector sets
1818
identity_hv
1919
random_hv
2020
level_hv
21+
thermometer_hv
2122
circular_hv
2223

2324

torchhd/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
identity_hv,
1717
random_hv,
1818
level_hv,
19+
thermometer_hv,
1920
circular_hv,
2021
bind,
2122
bundle,
@@ -62,6 +63,7 @@
6263
"identity_hv",
6364
"random_hv",
6465
"level_hv",
66+
"thermometer_hv",
6567
"circular_hv",
6668
"bind",
6769
"bundle",

torchhd/embeddings.py

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,19 @@
11
import math
2+
from typing import Type, Union
23
import torch
34
import torch.nn as nn
45
import torch.nn.functional as F
56
from torch import Tensor
67

78
import torchhd.functional as functional
9+
from torchhd.base import VSA_Model
810
from torchhd.map import MAP
911

1012
__all__ = [
1113
"Identity",
1214
"Random",
1315
"Level",
16+
"Thermometer",
1417
"Circular",
1518
"Projection",
1619
"Sinusoid",
@@ -169,6 +172,66 @@ def forward(self, input: torch.Tensor) -> torch.Tensor:
169172
return super(Level, self).forward(indices).as_subclass(MAP)
170173

171174

175+
class Thermometer(nn.Embedding):
176+
"""Embedding wrapper around :func:`~torchhd.functional.thermometer_hv`.
177+
178+
Class inherits from `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html>`_ and supports the same keyword arguments.
179+
180+
Args:
181+
num_embeddings (int): the number of hypervectors to generate.
182+
embedding_dim (int): the dimensionality of the hypervectors.
183+
low (float, optional): The lower bound of the real number range that the levels represent. Default: ``0.0``
184+
high (float, optional): The upper bound of the real number range that the levels represent. Default: ``1.0``
185+
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``.
186+
187+
Examples::
188+
189+
>>> emb = embeddings.Thermometer(11, 10, low=-1, high=2)
190+
>>> x = torch.FloatTensor([0.3, 1.9, -0.8])
191+
>>> emb(x)
192+
tensor([[ 1., 1., 1., 1., -1., -1., -1., -1., -1., -1.],
193+
[ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
194+
[ 1., -1., -1., -1., -1., -1., -1., -1., -1., -1.]])
195+
196+
"""
197+
198+
def __init__(
199+
self,
200+
num_embeddings,
201+
embedding_dim,
202+
low=0.0,
203+
high=1.0,
204+
requires_grad=False,
205+
**kwargs
206+
):
207+
self.low_value = low
208+
self.high_value = high
209+
210+
super(Thermometer, self).__init__(num_embeddings, embedding_dim, **kwargs)
211+
self.weight.requires_grad = requires_grad
212+
213+
def reset_parameters(self):
214+
factory_kwargs = {
215+
"device": self.weight.data.device,
216+
"dtype": self.weight.data.dtype,
217+
}
218+
219+
self.weight.data.copy_(
220+
functional.thermometer_hv(
221+
self.num_embeddings, self.embedding_dim, **factory_kwargs
222+
)
223+
)
224+
225+
self._fill_padding_idx_with_zero()
226+
227+
def forward(self, input: torch.Tensor) -> torch.Tensor:
228+
indices = functional.value_to_index(
229+
input, self.low_value, self.high_value, self.num_embeddings
230+
).clamp(0, self.num_embeddings - 1)
231+
232+
return super(Thermometer, self).forward(indices).as_subclass(MAP)
233+
234+
172235
class Circular(nn.Embedding):
173236
"""Embedding wrapper around :func:`~torchhd.functional.circular_hv`.
174237

torchhd/functional.py

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
"identity_hv",
1717
"random_hv",
1818
"level_hv",
19+
"thermometer_hv",
1920
"circular_hv",
2021
"bind",
2122
"bundle",
@@ -326,6 +327,100 @@ def level_hv(
326327
return hv.as_subclass(model)
327328

328329

330+
def thermometer_hv(
331+
num_vectors: int,
332+
dimensions: int,
333+
model: Type[VSA_Model] = MAP,
334+
*,
335+
requires_grad=False,
336+
**kwargs,
337+
) -> VSA_Model:
338+
"""Creates a thermometer code for given dimensionality.
339+
340+
Implements similarity-preserving hypervectors as described in `Sparse Binary Distributed Encoding of Scalars <https://doi.org/10.1615/J%20Automat%20Inf%20Scien.v37.i6.20>`_.
341+
342+
Args:
343+
num_vectors (int): the number of hypervectors to generate.
344+
dimensions (int): the dimensionality of the hypervectors.
345+
model: (``Type[VSA_Model]``, optional): specifies the hypervector type to be instantiated. Default: ``torchhd.MAP``.
346+
dtype (``torch.dtype``, optional): the desired data type of returned tensor. Default: if ``None`` depends on VSA_Model.
347+
device (``torch.device``, optional): the desired device of returned tensor. Default: if ``None``, uses the current device for the default tensor type (see torch.set_default_tensor_type()). ``device`` will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.
348+
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``.
349+
350+
Examples::
351+
352+
>>> torchhd.thermometer_hv(7, 6, torchhd.BSC)
353+
tensor([[False, False, False, False, False, False],
354+
[ True, False, False, False, False, False],
355+
[ True, True, False, False, False, False],
356+
[ True, True, True, False, False, False],
357+
[ True, True, True, True, False, False],
358+
[ True, True, True, True, True, False],
359+
[ True, True, True, True, True, True]])
360+
361+
>>> torchhd.thermometer_hv(4, 6, torchhd.MAP)
362+
tensor([[-1., -1., -1., -1., -1., -1.],
363+
[ 1., 1., -1., -1., -1., -1.],
364+
[ 1., 1., 1., 1., -1., -1.],
365+
[ 1., 1., 1., 1., 1., 1.]])
366+
367+
>>> torchhd.thermometer_hv(6, 6, torchhd.FHRR)
368+
tensor([[-1.+0.j, -1.+0.j, -1.+0.j, -1.+0.j, -1.+0.j, -1.+0.j],
369+
[ 1.+0.j, -1.+0.j, -1.+0.j, -1.+0.j, -1.+0.j, -1.+0.j],
370+
[ 1.+0.j, 1.+0.j, -1.+0.j, -1.+0.j, -1.+0.j, -1.+0.j],
371+
[ 1.+0.j, 1.+0.j, 1.+0.j, -1.+0.j, -1.+0.j, -1.+0.j],
372+
[ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, -1.+0.j, -1.+0.j],
373+
[ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, -1.+-0.j]])
374+
375+
"""
376+
# Check if the requested number of vectors can be accommodated
377+
if num_vectors > dimensions + 1:
378+
raise ValueError(
379+
f"For the given dimensionality: {dimensions}, the thermometer code cannot create more than {dimensions+1} hypervectors."
380+
)
381+
else:
382+
# Based on num_vectors and dimensions compute step between neighboring hypervectors
383+
step = 0
384+
if num_vectors > 1:
385+
step = (dimensions) // (num_vectors - 1)
386+
387+
# generate a random vector as a placeholder to get dtype and device
388+
rand_hv = model.random_hv(
389+
1,
390+
dimensions,
391+
**kwargs,
392+
)
393+
394+
if model == BSC:
395+
# Use binary vectors
396+
hv = torch.zeros(
397+
num_vectors,
398+
dimensions,
399+
dtype=rand_hv.dtype,
400+
device=rand_hv.device,
401+
)
402+
elif (model == MAP) | (model == FHRR):
403+
# Use bipolar vectors
404+
hv = torch.full(
405+
(
406+
num_vectors,
407+
dimensions,
408+
),
409+
-1,
410+
dtype=rand_hv.dtype,
411+
device=rand_hv.device,
412+
)
413+
else:
414+
raise ValueError(f"{model} HD/VSA model is not defined.")
415+
416+
# Create hypervectors using the obtained step
417+
for i in range(1, num_vectors):
418+
hv[i, 0 : i * step] = 1
419+
420+
hv.requires_grad = requires_grad
421+
return hv.as_subclass(model)
422+
423+
329424
def circular_hv(
330425
num_vectors: int,
331426
dimensions: int,

0 commit comments

Comments
 (0)