-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathnrx_large_64qam.cfg
117 lines (109 loc) · 4.49 KB
/
nrx_large_64qam.cfg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
[global]
label = 'nrx_large_64qam' # all relevant files such as weights will use this label
ebno = True # activate rate-adjusted SNR
[system]
n_size_bwp = 4
num_rx_antennas = 4
mcs_index = [19]
mcs_table = 1
carrier_frequency = 2140000000.0
subcarrier_spacing = 30000.0
n_start_grid = 0
slot_number = 0
frame_number = 0
cyclic_prefix = 'normal'
precoding = 'codebook'
n_cell_id = 1
tpmi = 2
symbol_allocation = [0, 14]
num_antenna_ports = 2
dmrs_mapping_type = "A"
dmrs_config_type = 1
dmrs_type_a_position = 2
dmrs_additional_position = 1
dmrs_length = 1
dmrs_nid = [[1, 1], [1, 1]]
n_scid = 1
num_cdm_groups_without_data = 2
verbose = False
dmrs_port_sets = [[0], [2]]
n_rntis = [1, 1]
n_ids = [1, 1]
[baseline]
demapping_type = 'maxlog'
num_bp_iter = 20
cn_type = 'boxplus'
# For large num_prbs (>100), a low reduced complexity
# LMMSE estimator is used where LMMSE is only performed over the
# lmmse_num_prbs PRBS
# if set to -1, the splitting parameters are calculated via an heurisitic
# the target of the heuristic is to find the best split such that
# the resulting LMMSE is done over at least 20 PRBs
lmmse_num_prbs = -1 # n_size_bwp must be multiple of this constant
[neural_receiver]
num_nrx_iter = 8 # defines number of cgnn_it stages
num_nrx_iter_eval = 8 # iterations used for evaluation; must be <= num_nrx_iter
d_s = 56 # feature space dimensions; effectively, defines num filter kernels (or neurons) for all components
num_units_init = [128, 128] # num filter kernels for input CNN (each list entry defines one layer)
num_units_agg = [[64], [64], [64], [64],[64], [64], [64], [64]] # number of neurons of state aggregation MLP (each list entry defines one layer)
num_units_state = [[128, 128], [128, 128], [128, 128], [128, 128], [128, 128], [128, 128], [128, 128], [128, 128]] # num filter kernels for stage update CNN (each list entry defines one layer)
num_units_readout = [128] # number of neurons of state aggregation MLP (each list entry defines one layer)
max_num_tx = 2 # max number of active DMRS ports
min_num_tx = 1 # only relevant during training for random user sampling
initial_chest = "ls" # "None" deactivates initial LS estimation
custom_constellation = False # activates trainable transmitter
mask_pilots = False # mask DMRS positions for e2e experiments
# quantization and other custom layer types
layer_type_dense = "dense"
layer_type_conv = "sepconv" # or "conv"
layer_type_readout = "dense"
nrx_dtype = tf.float32
[training]
# each entry of the training schedule denotes
# [number of SGD iterations, learning rate, batch_size, trainable_const.]
training_schedule = {
"num_iter": [1e6, 9e6],
"learning_rate": [0.001, 0.001],
"batch_size": [128, 128],
"train_tx": [False, False],
"min_training_snr_db": [[0., 0.],[2., 2.]], # 1 / 2 active UEs, is Eb/No [dB] if ebno==True
"max_training_snr_db": [[15., 20.],[15., 15.]], # 1 / 2 active UEs, is Eb/No [dB] if ebno==True
"double_readout": [True, True], # use additional MSE loss on h_hat
"apply_multiloss": [True, True],
"weighting_double_readout": [0.02, 0.01]} # weighting between MSE & BCE loss
num_iter_train_save = 1000
max_ut_velocity = 56.
min_ut_velocity = 0.
channel_norm = False
cfo_offset_ppm = 0.0 # randomly sampled in [-cfo_offset_ppm, cfo_offset_ppm]
# UMi
channel_type = 'UMi'
eval_ebno_db_arr = [4.0] # EbNo to evaluate model for each MCS during training every 1k iterations
# TDL (for two UEs)
#channel_type = 'DoubleTDLlow'
xla = True # Activate XLA for the training loop
tfrecord_filename = "na" # only relevant if training is done with a dataset
[evaluation]
# the following parameters are used during evaluation
snr_db_eval_min = -2
snr_db_eval_max = 10
snr_db_eval_stepsize = 1
max_ut_velocity_eval = 56
min_ut_velocity_eval = 56
cfo_offset_ppm_eval = 0.0
tfrecord_filename_eval = "na"
channel_type_eval = "DoubleTDLlow" # 2 Users
#channel_type_eval = 'TDL-B100' # 1 User
channel_norm_eval = False
n_size_bwp_eval = 132
batch_size_eval = 30
batch_size_eval_small = 3 # for k-best-based baselines