Skip to content

Commit 20d2bca

Browse files
gmarcusmjhoydisfaycalaaSebastianCalollodealma
committed
Release v1.2.1
Signed-off-by: The Sionna Team <sionna@nvidia.com> Merged-by: Guillermo Marcus <4169784+gmarcusm@users.noreply.github.com> Co-authored-by: Jakob Hoydis <5190129+jhoydis@users.noreply.github.com> Co-authored-by: Fayçal Ait-Aoudia <43564757+faycalaa@users.noreply.github.com> Co-authored-by: Sebastian Cammerer <18167671+SebastianCa@users.noreply.github.com> Co-authored-by: Lorenzo Maggi <34341780+lollodealma@users.noreply.github.com> Co-authored-by: Merlin Nimier-David <merlin.nimier@gmail.com> Co-authored-by: Baptiste Nicolet <40777524+bathal1@users.noreply.github.com> Co-authored-by: Guillermo Marcus <4169784+gmarcusm@users.noreply.github.com>
1 parent 302c5b8 commit 20d2bca

File tree

10 files changed

+51
-54
lines changed

10 files changed

+51
-54
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,6 @@ If you use this software, please cite it as:
122122
author = {Hoydis, Jakob and Cammerer, Sebastian and {Ait Aoudia}, Fayçal and Nimier-David, Merlin and Maggi, Lorenzo and Marcus, Guillermo and Vem, Avinash and Keller, Alexander},
123123
note = {https://nvlabs.github.io/sionna/},
124124
year = {2022},
125-
version = {1.2.0}
125+
version = {1.2.1}
126126
}
127127
```

doc/source/citation.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,5 +11,5 @@ If you use this software, please cite it as:
1111
Alexander},
1212
note = {https://nvlabs.github.io/sionna/},
1313
year = {2022},
14-
version = {1.2.0}
14+
version = {1.2.1}
1515
}

doc/source/index.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ rapidly prototyped by connecting the desired blocks.
2525
NVIDIA GPU acceleration provides orders-of-magnitude faster simulation, enabling
2626
the interactive exploration of such systems, for example, in `Jupyter notebooks <https://jupyter.org/>`_ that can be run on cloud services such as `Google Colab <https://colab.research.google.com>`_. If no GPU is available, Sionna will run on the CPU.
2727

28-
The `Sionna Research Kit (SRK) <rk/index.html>`_ allows to deploy trained AI/ML components in a real software-defined 5G NR radio access network (RAN). It is based on the `OpenAirInterface <https://gitlab.eurecom.fr/oai/openairinterface5g>`_ project and is powered by the `NVIDIA Jetson AGX Orin platform <https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/jetson-orin/>`_.
28+
The `Sionna Research Kit (SRK) <rk/index.html>`_ allows to deploy trained AI/ML components in a real software-defined 5G NR radio access network (RAN). It is based on the `OpenAirInterface <https://gitlab.eurecom.fr/oai/openairinterface5g>`_ project and is powered by the `NVIDIA Jetson AGX Thor platform <https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/jetson-thor/>`_.
2929

3030
Sionna is developed, continuously extended, and used by NVIDIA to drive 5G and 6G research.
3131

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ exclude = ["sionna.rt*"]
1515

1616
[project]
1717
name = "sionna"
18-
version = "1.2.0"
18+
version = "1.2.1"
1919
description = "Sionna - A hardware-accelerated differentiable open-source library for research on communication systems"
2020
readme = "README.md"
2121
license = {text = "Apache-2.0"}
@@ -51,7 +51,7 @@ classifiers = [
5151
]
5252
requires-python = ">=3.10"
5353
dependencies = [
54-
"sionna-rt==1.2.0",
54+
"sionna-rt==1.2.1",
5555
"tensorflow(>=2.14, !=2.16, !=2.17)",
5656
"numpy(>=1.26, <2.0)",
5757
"scipy>=1.14.1",

src/sionna/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import importlib
77

8-
__version__ = "1.2.0"
8+
__version__ = "1.2.1"
99

1010
# pylint: disable=invalid-name
1111
def __getattr__(name):

src/sionna/phy/nr/utils.py

Lines changed: 30 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -631,48 +631,44 @@ def calculate_tb_size(modulation_order,
631631
target_tb_size, tf.cast(num_coded_bits, target_tb_size.dtype),
632632
message="target_tb_size must be less than num_coded_bits.")
633633

634-
# no quantization for user input
635-
n_info_q = target_tb_size
636-
637634
else:
638635
# Compute n info bits (Target TB size)
639636
target_tb_size = target_coderate * tf.cast(num_coded_bits, rdtype)
640637

641-
# quantize target_tb_size
642-
def n_info_q_if_target_tbs_greater_3824():
643-
# Compute quantized n. info bits if target TB size > 3824
644-
# Step 4 of 38.214 5.3.1.2
645-
log2_n_info_minus_24 = tf.math.log(
646-
target_tb_size - tf.cast(24, target_tb_size.dtype)) \
647-
/ tf.math.log(tf.cast(2.0, target_tb_size.dtype))
648-
n = tf.math.floor(log2_n_info_minus_24) - 5.
649-
n_info_q = tf.math.maximum(
650-
tf.cast(3840.0, rdtype),
651-
tf.cast(2**n * tf.math.round((target_tb_size - 24) / 2**n), rdtype))
652-
return n_info_q
653-
654-
def n_info_q_if_target_tbs_smaller_3824():
655-
# Compute quantized n. info bits if target TB size <= 3824
656-
log2_n_info = tf.math.log(target_tb_size) \
657-
/ tf.cast(tf.math.log(2.0), target_tb_size.dtype)
658-
n = tf.math.maximum(tf.cast(3.0, rdtype),
659-
tf.cast(tf.math.floor(log2_n_info) - 6, rdtype))
660-
n_info_q = tf.math.maximum(
661-
tf.cast(24.0, rdtype),
662-
tf.cast(2**n * tf.math.floor(target_tb_size / 2**n), rdtype))
663-
return n_info_q
664-
665-
# ----------------------------- #
666-
# Quantized n. information bits #
667-
# ----------------------------- #
668-
n_info_q = tf.where(target_tb_size <= 3824,
669-
n_info_q_if_target_tbs_smaller_3824(),
670-
n_info_q_if_target_tbs_greater_3824())
638+
# quantize target_tb_size
639+
def n_info_q_if_target_tbs_greater_3824():
640+
# Compute quantized n. info bits if target TB size > 3824
641+
# Step 4 of 38.214 5.3.1.2
642+
log2_n_info_minus_24 = tf.math.log(
643+
target_tb_size - tf.cast(24, target_tb_size.dtype)) \
644+
/ tf.math.log(tf.cast(2.0, target_tb_size.dtype))
645+
n = tf.math.floor(log2_n_info_minus_24) - 5.
646+
n_info_q = tf.math.maximum(
647+
tf.cast(3840.0, rdtype),
648+
tf.cast(2**n * tf.math.round((target_tb_size - 24) / 2**n), rdtype))
649+
return n_info_q
650+
651+
def n_info_q_if_target_tbs_smaller_3824():
652+
# Compute quantized n. info bits if target TB size <= 3824
653+
log2_n_info = tf.math.log(target_tb_size) \
654+
/ tf.cast(tf.math.log(2.0), target_tb_size.dtype)
655+
n = tf.math.maximum(tf.cast(3.0, rdtype),
656+
tf.cast(tf.math.floor(log2_n_info) - 6, rdtype))
657+
n_info_q = tf.math.maximum(
658+
tf.cast(24.0, rdtype),
659+
tf.cast(2**n * tf.math.floor(target_tb_size / 2**n), rdtype))
660+
return n_info_q
661+
662+
# ----------------------------- #
663+
# Quantized n. information bits #
664+
# ----------------------------- #
665+
n_info_q = tf.where(target_tb_size <= 3824,
666+
n_info_q_if_target_tbs_smaller_3824(),
667+
n_info_q_if_target_tbs_greater_3824())
671668
# ------------------- #
672669
# Auxiliary functions #
673670
# ------------------- #
674671

675-
676672
def tbs_if_target_tbs_higher_3824():
677673
# Compute TB size if target_tb_size>3824
678674
tbs = 8 * num_cb * tf.math.ceil((n_info_q + 24) / (8 * num_cb)) - 24

src/sionna/sys/utils.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,6 @@ def get_pathloss(h_freq,
8787
batch_size = h_freq.shape[:-6]
8888
lbs = len(batch_size)
8989
num_ofdm_symbols = h_freq.shape[-2]
90-
num_ut = tf.reduce_sum(rx_tx_association)
9190

9291
# Compute RX power
9392
# [..., num_rx, num_rx_ant, num_tx, num_tx_ant, num_ofdm_symbols, num_subcarriers]
@@ -109,6 +108,9 @@ def get_pathloss(h_freq,
109108
True,
110109
message="rx_tx_association must contain binary values")
111110

111+
# Number of UTs
112+
num_ut = tf.reduce_sum(rx_tx_association)
113+
112114
# Extract pathloss for serving TX only, for each RX
113115
rx_tx_association = rx_tx_association == 1
114116
# [batch_size, num_rx, num_tx]

test/unit/nr/test_pusch_transmitter.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,6 @@ def run_test(test_name):
5151
x_grid = tf.transpose(x_grid[0,0], perm=[2,1,0])
5252
return np.allclose(tf.squeeze(x_grid), grid)
5353

54-
@pytest.mark.usefixtures("only_gpu")
5554
@pytest.mark.parametrize("test_id", list(range(0,83)))
5655
def tests_against_reference(test_id):
5756
"""Test PUSCHTransmitter output against reference"""

test/unit/nr/utils.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -326,17 +326,17 @@ def calculate_tb_size_numpy(modulation_order,
326326
# include tb_scaling as defined in Tab. 5.1.3.2-2 38.214
327327
n_info = target_coderate * num_coded_bits
328328

329-
# apply quantization of info bit
330-
if n_info <= 3824:
331-
# step3 in 38.214 5.1.3.2
332-
n = max(3, np.floor(np.log2(n_info)) - 6)
333-
n_info_q = max(24, 2**n * np.floor(n_info/2**n))
334-
else:
335-
# step 4 in 38.212 5.3.1.2
336-
n = np.floor(np.log2(n_info-24)) - 5
337-
# "ties in the round function are broken towards next largest
338-
# integer"
339-
n_info_q = max(3840, 2**n * np.round((n_info-24)/2**n))
329+
# apply quantization of info bit
330+
if n_info <= 3824:
331+
# step3 in 38.214 5.1.3.2
332+
n = max(3, np.floor(np.log2(n_info)) - 6)
333+
n_info_q = max(24, 2**n * np.floor(n_info/2**n))
334+
else:
335+
# step 4 in 38.212 5.3.1.2
336+
n = np.floor(np.log2(n_info-24)) - 5
337+
# "ties in the round function are broken towards next largest
338+
# integer"
339+
n_info_q = max(3840, 2**n * np.round((n_info-24)/2**n))
340340

341341
if n_info_q <= 3824:
342342
c=1

0 commit comments

Comments
 (0)