summaryrefslogtreecommitdiffstats
path: root/include/asm-generic/unaligned.h
blob: c856a43e3b455c30a654e0c2a16cc34c0ec1455c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#ifndef _ASM_GENERIC_UNALIGNED_H_
#define _ASM_GENERIC_UNALIGNED_H_

/*
 * For the benefit of those who are trying to port Linux to another
 * architecture, here are some C-language equivalents. 
 *
 * This is based almost entirely upon Richard Henderson's
 * asm-alpha/unaligned.h implementation.  Some comments were
 * taken from David Mosberger's asm-ia64/unaligned.h header.
 */

#include <linux/types.h>

/* 
 * The main single-value unaligned transfer routines.
 */
#define get_unaligned(ptr) \
	((__typeof__(*(ptr)))__get_unaligned((ptr), sizeof(*(ptr))))
#define put_unaligned(x,ptr) \
	__put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr)))

/*
 * This function doesn't actually exist.  The idea is that when
 * someone uses the macros below with an unsupported size (datatype),
 * the linker will alert us to the problem via an unresolved reference
 * error.
 */
extern void bad_unaligned_access_length(void) __attribute__((noreturn));

struct __una_u64 { __u64 x __attribute__((packed)); };
struct __una_u32 { __u32 x __attribute__((packed)); };
struct __una_u16 { __u16 x __attribute__((packed)); };

/*
 * Elemental unaligned loads 
 */

static inline unsigned long __uldq(const __u64 *addr)
{
	const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
	return ptr->x;
}

static inline unsigned long __uldl(const __u32 *addr)
{
	const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
	return ptr->x;
}

static inline unsigned long __uldw(const __u16 *addr)
{
	const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
	return ptr->x;
}

/*
 * Elemental unaligned stores 
 */

static inline void __ustq(__u64 val, __u64 *addr)
{
	struct __una_u64 *ptr = (struct __una_u64 *) addr;
	ptr->x = val;
}

static inline void __ustl(__u32 val, __u32 *addr)
{
	struct __una_u32 *ptr = (struct __una_u32 *) addr;
	ptr->x = val;
}

static inline void __ustw(__u16 val, __u16 *addr)
{
	struct __una_u16 *ptr = (struct __una_u16 *) addr;
	ptr->x = val;
}

static inline unsigned long __get_unaligned(const void *ptr, size_t size)
{
	unsigned long val;
	switch (size) {
	case 1:
		val = *(const __u8 *)ptr;
		break;
	case 2:
		val = __uldw((const __u16 *)ptr);
		break;
	case 4:
		val = __uldl((const __u32 *)ptr);
		break;
	case 8:
		val = __uldq((const __u64 *)ptr);
		break;
	default:
		bad_unaligned_access_length();
	};
	return val;
}

static inline void __put_unaligned(unsigned long val, void *ptr, size_t size)
{
	switch (size) {
	case 1:
		*(__u8 *)ptr = val;
	        break;
	case 2:
		__ustw(val, (__u16 *)ptr);
		break;
	case 4:
		__ustl(val, (__u32 *)ptr);
		break;
	case 8:
		__ustq(val, (__u64 *)ptr);
		break;
	default:
	    	bad_unaligned_access_length();
	};
}

#endif /* _ASM_GENERIC_UNALIGNED_H */