blob: 33383ca23837e4c386407d7f8966056a34618962 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
#define _LINUX_UNALIGNED_ACCESS_OK_H
#include <linux/kernel.h>
#include <asm/byteorder.h>
static __always_inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpup((__le16 *)p);
}
static __always_inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpup((__le32 *)p);
}
static __always_inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpup((__le64 *)p);
}
static __always_inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpup((__be16 *)p);
}
static __always_inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpup((__be32 *)p);
}
static __always_inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpup((__be64 *)p);
}
static __always_inline void put_unaligned_le16(u16 val, void *p)
{
*((__le16 *)p) = cpu_to_le16(val);
}
static __always_inline void put_unaligned_le32(u32 val, void *p)
{
*((__le32 *)p) = cpu_to_le32(val);
}
static __always_inline void put_unaligned_le64(u64 val, void *p)
{
*((__le64 *)p) = cpu_to_le64(val);
}
static __always_inline void put_unaligned_be16(u16 val, void *p)
{
*((__be16 *)p) = cpu_to_be16(val);
}
static __always_inline void put_unaligned_be32(u32 val, void *p)
{
*((__be32 *)p) = cpu_to_be32(val);
}
static __always_inline void put_unaligned_be64(u64 val, void *p)
{
*((__be64 *)p) = cpu_to_be64(val);
}
#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
|