1""" 2/* Copyright (c) 2023 Amazon 3 Written by Jan Buethe */ 4/* 5 Redistribution and use in source and binary forms, with or without 6 modification, are permitted provided that the following conditions 7 are met: 8 9 - Redistributions of source code must retain the above copyright 10 notice, this list of conditions and the following disclaimer. 11 12 - Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 20 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 21 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 23 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 24 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 25 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27*/ 28""" 29import sys 30sys.path.append('../dnntools') 31import numbers 32 33 34import torch 35from torch import nn 36import torch.nn.functional as F 37from torch.nn.utils import weight_norm 38 39from utils.complexity import _conv1d_flop_count 40 41from dnntools.quantization.softquant import soft_quant 42from dnntools.sparsification import mark_for_sparsification 43 44class SilkFeatureNetPL(nn.Module): 45 """ feature net with partial lookahead """ 46 def __init__(self, 47 feature_dim=47, 48 num_channels=256, 49 hidden_feature_dim=64, 50 softquant=False, 51 sparsify=True, 52 sparsification_density=0.5, 53 apply_weight_norm=False): 54 55 super(SilkFeatureNetPL, self).__init__() 56 57 if isinstance(sparsification_density, numbers.Number): 58 sparsification_density = 4 * [sparsification_density] 59 60 self.feature_dim = feature_dim 61 self.num_channels = num_channels 62 self.hidden_feature_dim = hidden_feature_dim 63 64 norm = weight_norm if apply_weight_norm else lambda x, name=None: x 65 66 self.conv1 = norm(nn.Conv1d(feature_dim, self.hidden_feature_dim, 1)) 67 self.conv2 = norm(nn.Conv1d(4 * self.hidden_feature_dim, num_channels, 2)) 68 self.tconv = norm(nn.ConvTranspose1d(num_channels, num_channels, 4, 4)) 69 self.gru = norm(norm(nn.GRU(num_channels, num_channels, batch_first=True), name='weight_hh_l0'), name='weight_ih_l0') 70 71 if softquant: 72 self.conv2 = soft_quant(self.conv2) 73 self.tconv = soft_quant(self.tconv) 74 self.gru = soft_quant(self.gru, names=['weight_hh_l0', 'weight_ih_l0']) 75 76 77 if sparsify: 78 mark_for_sparsification(self.conv2, (sparsification_density[0], [8, 4])) 79 mark_for_sparsification(self.tconv, (sparsification_density[1], [8, 4])) 80 mark_for_sparsification( 81 self.gru, 82 { 83 'W_ir' : (sparsification_density[2], [8, 4], False), 84 'W_iz' : (sparsification_density[2], [8, 4], False), 85 'W_in' : (sparsification_density[2], [8, 4], False), 86 'W_hr' : (sparsification_density[3], [8, 4], True), 87 'W_hz' : (sparsification_density[3], [8, 4], True), 88 'W_hn' : (sparsification_density[3], [8, 4], True), 89 } 90 ) 91 92 93 def flop_count(self, rate=200): 94 count = 0 95 for conv in self.conv1, self.conv2, self.tconv: 96 count += _conv1d_flop_count(conv, rate) 97 98 count += 2 * (3 * self.gru.input_size * self.gru.hidden_size + 3 * self.gru.hidden_size * self.gru.hidden_size) * rate 99 100 return count 101 102 103 def forward(self, features, state=None): 104 """ features shape: (batch_size, num_frames, feature_dim) """ 105 106 batch_size = features.size(0) 107 num_frames = features.size(1) 108 109 if state is None: 110 state = torch.zeros((1, batch_size, self.num_channels), device=features.device) 111 112 features = features.permute(0, 2, 1) 113 # dimensionality reduction 114 c = torch.tanh(self.conv1(features)) 115 116 # frame accumulation 117 c = c.permute(0, 2, 1) 118 c = c.reshape(batch_size, num_frames // 4, -1).permute(0, 2, 1) 119 c = torch.tanh(self.conv2(F.pad(c, [1, 0]))) 120 121 # upsampling 122 c = torch.tanh(self.tconv(c)) 123 c = c.permute(0, 2, 1) 124 125 c, _ = self.gru(c, state) 126 127 return c