aboutsummaryrefslogtreecommitdiff
path: root/networking/chksum_common.h
blob: 16f0f6c11df7015ed0a87e0032685a69c74c154f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
/*
 * Common code for checksum implementations
 *
 * Copyright (c) 2020, Arm Limited.
 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
 */

#ifndef CHKSUM_COMMON_H
#define CHKSUM_COMMON_H

#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
#error Only little endian supported
#endif

#include <limits.h>
#include <stdbool.h>
#include <stdint.h>
#include <string.h>

/* Assertions must be explicitly enabled */
#if WANT_ASSERT
#undef NDEBUG
#include <assert.h>
#define Assert(exp) assert(exp)
#else
#define Assert(exp) (void) (exp)
#endif

#ifdef __GNUC__
#define likely(x)     __builtin_expect(!!(x), 1)
#define unlikely(x)   __builtin_expect(!!(x), 0)
#define may_alias     __attribute__((__may_alias__))
#define always_inline __attribute__((always_inline))
#ifdef __clang__
#define no_unroll_loops
#else
#define no_unroll_loops  __attribute__((optimize("no-unroll-loops")))
#endif
#define bswap16(x)    __builtin_bswap16((x))
#else
#define likely(x)     (x)
#define unlikely(x)   (x)
#define may_alias
#define always_inline
#define no_unroll_loops
#define bswap16(x)    ((uint8_t)((x) >> 8) | ((uint8_t)(x) << 8))
#endif

#define ALL_ONES ~UINT64_C(0)

static inline
uint64_t load64(const void *ptr)
{
    /* GCC will optimise this to a normal load instruction */
    uint64_t v;
    memcpy(&v, ptr, sizeof v);
    return v;
}

static inline
uint32_t load32(const void *ptr)
{
    /* GCC will optimise this to a normal load instruction */
    uint32_t v;
    memcpy(&v, ptr, sizeof v);
    return v;
}

static inline
uint16_t load16(const void *ptr)
{
    /* GCC will optimise this to a normal load instruction */
    uint16_t v;
    memcpy(&v, ptr, sizeof v);
    return v;
}

/* slurp_small() is for small buffers, don't waste cycles on alignment */
no_unroll_loops
always_inline
static inline uint64_t
slurp_small(const void *ptr, uint32_t nbytes)
{
    const unsigned char *cptr = ptr;
    uint64_t sum = 0;
    while (nbytes >= 4)
    {
	sum += load32(cptr);
	cptr += 4;
	nbytes -= 4;
    }
    if (nbytes & 2)
    {
	sum += load16(cptr);
	cptr += 2;
    }
    if (nbytes & 1)
    {
	sum += (uint8_t) *cptr;
    }
    return sum;
}

static inline const void *
align_ptr(const void *ptr, size_t bytes)
{
    return (void *) ((uintptr_t) ptr & -(uintptr_t) bytes);
}

always_inline
static inline uint16_t
fold_and_swap(uint64_t sum, bool swap)
{
    /* Fold 64-bit sum to 32 bits */
    sum = (sum & 0xffffffff) + (sum >> 32);
    sum = (sum & 0xffffffff) + (sum >> 32);
    Assert(sum == (uint32_t) sum);

    /* Fold 32-bit sum to 16 bits */
    sum = (sum & 0xffff) + (sum >> 16);
    sum = (sum & 0xffff) + (sum >> 16);
    Assert(sum == (uint16_t) sum);

    if (unlikely(swap)) /* Odd base pointer is unexpected */
    {
	sum = bswap16(sum);
    }

    return (uint16_t) sum;
}

#endif