aboutsummaryrefslogtreecommitdiff
path: root/src/u32-vlog/scalar.c.in
blob: 9aa1a649c61260b7ce06171bf41bc4b26e595a34 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

$assert BATCH_TILE >= 1
#include <assert.h>
#include <stddef.h>
#include <stdint.h>

#include <xnnpack/math.h>
#include <xnnpack/vlog.h>

extern XNN_INTERNAL const uint16_t xnn_table_vlog[129];

// Calculate integer logarithm, 32 Bit version
static uint32_t xnn_u32_log32(uint32_t x, uint32_t out_scale) {
  const uint32_t log_scale = 65536;
  const uint32_t log_scale_log2 = 16;
  const uint32_t log_coeff = 45426;
  const uint32_t log2x = math_clz_nonzero_u32(x) ^ 31;  // log2 of x
  assert(log2x < 32);

  // Number of segments in the log lookup table. The table will be log_segments+1
  // in length (with some padding).
  const int log_segments_log2 = 7;

  // Part 1
  uint32_t frac = x - (UINT32_C(1) << log2x);

  // Shift the fractional part into msb of 16 bits
  frac =  XNN_UNPREDICTABLE(log2x < log_scale_log2) ?
      (frac << (log_scale_log2 - log2x)) :
      (frac >> (log2x - log_scale_log2));

  // Part 2
  const uint32_t base_seg = frac >> (log_scale_log2 - log_segments_log2);
  const uint32_t seg_unit = (UINT32_C(1) << log_scale_log2) >> log_segments_log2;

  assert(128 == (UINT32_C(1) << log_segments_log2));
  assert(base_seg < (UINT32_C(1) << log_segments_log2));

  const uint32_t c0 = xnn_table_vlog[base_seg];
  const uint32_t c1 = xnn_table_vlog[base_seg + 1];
  const uint32_t seg_base = seg_unit * base_seg;
  const uint32_t rel_pos = ((c1 - c0) * (frac - seg_base)) >> log_scale_log2;
  const uint32_t fraction =  frac + c0 + rel_pos;

  const uint32_t log2 = (log2x << log_scale_log2) + fraction;
  const uint32_t round = log_scale >> 1;
  const uint32_t loge = (math_mulext_u32(log_coeff, log2) + round) >> log_scale_log2;
  // Finally scale to our output scale
  const uint32_t loge_scaled = (out_scale * loge + round) >> log_scale_log2;
  return loge_scaled;
}

void xnn_u32_vlog_ukernel__scalar_x${BATCH_TILE}(
    size_t batch,
    const uint32_t* input,
    uint32_t input_lshift,
    uint32_t output_scale,
    uint16_t* output) {

  assert(batch != 0);
  assert(input != NULL);
  assert(input_lshift < 32);
  assert(output != NULL);

  $if BATCH_TILE > 1:
    for (; batch >= ${BATCH_TILE}; batch -= ${BATCH_TILE}) {
      $for N in range(BATCH_TILE):
        const uint32_t vi${N} = input[${N}];
      input += ${BATCH_TILE};

      $for N in range(BATCH_TILE):
        const uint32_t scaled${N} = vi${N} << input_lshift;

      $for N in range(BATCH_TILE):
        const uint32_t log_value${N} = scaled${N} ? xnn_u32_log32(scaled${N}, output_scale) : 0;

        const uint32_t vout${N} = math_min_u32(log_value${N}, (uint32_t) INT16_MAX);  // signed max value
        output[${N}] = (uint16_t) vout${N};

      output += ${BATCH_TILE};
    }

  if XNN_UNLIKELY(batch != 0) {
    do {
      const uint32_t vi = *input++;
      const uint32_t scaled = vi << input_lshift;

      const uint32_t log_value = scaled ? xnn_u32_log32(scaled, output_scale) : 0;

      const uint32_t vout = math_min_u32(log_value, (uint32_t) INT16_MAX);
      *output++ = (uint16_t) vout;
    } while (--batch != 0);
  }
}