xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/sigmoid.c (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 /*
2  * Copyright (c) Facebook, Inc. and its affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <assert.h>
10 #include <math.h>
11 #include <stddef.h>
12 #include <stdint.h>
13 #include <stdlib.h>
14 
15 #include <pytorch_qnnpack.h>
16 #include <qnnpack/log.h>
17 #include <qnnpack/operator.h>
18 
pytorch_qnnp_create_sigmoid_nc_q8(size_t channels,uint8_t input_zero_point,float input_scale,uint8_t output_zero_point,float output_scale,uint8_t output_min,uint8_t output_max,uint32_t flags,pytorch_qnnp_operator_t * sigmoid_out)19 enum pytorch_qnnp_status pytorch_qnnp_create_sigmoid_nc_q8(
20     size_t channels,
21     uint8_t input_zero_point,
22     float input_scale,
23     uint8_t output_zero_point,
24     float output_scale,
25     uint8_t output_min,
26     uint8_t output_max,
27     uint32_t flags,
28     pytorch_qnnp_operator_t* sigmoid_out) {
29   pytorch_qnnp_operator_t sigmoid_op = NULL;
30   enum pytorch_qnnp_status status = pytorch_qnnp_status_uninitialized;
31 
32   if (!pytorch_qnnp_params.initialized) {
33     pytorch_qnnp_log_error(
34         "pytorch_qnnp_create_sigmoid_nc_q8 failed because QNNPACK is not properly initialized");
35     goto error;
36   }
37 
38   status = pytorch_qnnp_status_invalid_parameter;
39 
40   if (channels == 0) {
41     pytorch_qnnp_log_error(
42         "failed to create Sigmoid operator with %zu channels: number of channels must be non-zero",
43         channels);
44     goto error;
45   }
46 
47   if (input_scale <= 0.0f || !isnormal(input_scale)) {
48     pytorch_qnnp_log_error(
49         "failed to create Sigmoid operator with %.7g input scale: scale must be finite and positive",
50         input_scale);
51     goto error;
52   }
53 
54   if (output_scale <= 0.0f || !isnormal(output_scale)) {
55     pytorch_qnnp_log_error(
56         "failed to create Sigmoid operator with %.7g output scale: scale must be finite and positive",
57         output_scale);
58     goto error;
59   }
60 
61   if (output_min >= output_max) {
62     pytorch_qnnp_log_error(
63         "failed to create Sigmoid operator with [%" PRIu8 ", %" PRIu8
64         "] output range: range min must be below range max",
65         output_min,
66         output_max);
67     goto error;
68   }
69 
70   status = pytorch_qnnp_status_unsupported_parameter;
71 
72   if (output_scale != 0x1.0p-8f) {
73     pytorch_qnnp_log_error(
74         "failed to create Sigmoid operator with %.7g output scale: only output scale of 1/256 is supported",
75         output_scale);
76     goto error;
77   }
78 
79   if (output_zero_point != 0) {
80     pytorch_qnnp_log_error(
81         "failed to create Sigmoid operator with %" PRIu8
82         " output zero point: only output zero point of 0 is supported",
83         output_zero_point);
84     goto error;
85   }
86 
87   status = pytorch_qnnp_status_out_of_memory;
88 
89   sigmoid_op = calloc(1, sizeof(struct pytorch_qnnp_operator));
90   if (sigmoid_op == NULL) {
91     pytorch_qnnp_log_error(
92         "failed to allocate %zu bytes for pytorch_qnnp_operator structure",
93         sizeof(struct pytorch_qnnp_operator));
94     goto error;
95   }
96 
97   sigmoid_op->lookup_table = malloc(256 * sizeof(uint8_t));
98   if (sigmoid_op->lookup_table == NULL) {
99     pytorch_qnnp_log_error(
100         "failed to allocate 256 bytes for Sigmoid lookup table");
101     goto error;
102   }
103 
104   uint8_t* lookup_table = sigmoid_op->lookup_table;
105   const float scaled_min = (float)(int32_t)output_min;
106   const float scaled_max = (float)(int32_t)output_max;
107   for (int32_t i = 0; i < 256; i++) {
108     const float x =
109         input_scale * (float)(i - (int32_t)(uint32_t)input_zero_point);
110     /* Scale sigmoid(x) by 1 / output scale = 256.0 */
111     float scaled_sigmoid_x = 256.0f / (1.0f + expf(-x));
112     if (scaled_sigmoid_x < scaled_min) {
113       scaled_sigmoid_x = scaled_min;
114     }
115     if (scaled_sigmoid_x > scaled_max) {
116       scaled_sigmoid_x = scaled_max;
117     }
118     lookup_table[(uint32_t)i] = (uint8_t)lrintf(scaled_sigmoid_x);
119   }
120 
121   sigmoid_op->channels = channels;
122 
123   sigmoid_op->ukernel_type = pytorch_qnnp_ukernel_type_lut;
124   sigmoid_op->format = pytorch_qnnp_format_quint8;
125 
126   *sigmoid_out = sigmoid_op;
127   return pytorch_qnnp_status_success;
128 
129 error:
130   pytorch_qnnp_delete_operator(sigmoid_op);
131   return status;
132 }
133 
pytorch_qnnp_setup_sigmoid_nc_q8(pytorch_qnnp_operator_t sigmoid,size_t batch_size,const uint8_t * input,size_t input_stride,uint8_t * output,size_t output_stride)134 enum pytorch_qnnp_status pytorch_qnnp_setup_sigmoid_nc_q8(
135     pytorch_qnnp_operator_t sigmoid,
136     size_t batch_size,
137     const uint8_t* input,
138     size_t input_stride,
139     uint8_t* output,
140     size_t output_stride) {
141   if (!pytorch_qnnp_params.initialized) {
142     pytorch_qnnp_log_error(
143         "pytorch_qnnp_setup_sigmoid_nc_q8 failed because QNNPACK is not properly initialized");
144     return pytorch_qnnp_status_uninitialized;
145   }
146 
147   if (batch_size == 0) {
148     sigmoid->batch_size = 0;
149     return pytorch_qnnp_status_success;
150   }
151 
152   sigmoid->batch_size = batch_size;
153   sigmoid->input = input;
154   sigmoid->input_pixel_stride = input_stride;
155   sigmoid->output = output;
156   sigmoid->output_pixel_stride = output_stride;
157 
158   return pytorch_qnnp_status_success;
159 }
160