summaryrefslogtreecommitdiffstats
path: root/net/smc/smc.h
blob: 878313f8d6c1772849d0877f50e4190d9fe2170b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
/* SPDX-License-Identifier: GPL-2.0 */
/*
 *  Shared Memory Communications over RDMA (SMC-R) and RoCE
 *
 *  Definitions for the SMC module (socket related)
 *
 *  Copyright IBM Corp. 2016
 *
 *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
 */
#ifndef __SMC_H
#define __SMC_H

#include <linux/socket.h>
#include <linux/types.h>
#include <linux/compiler.h> /* __aligned */
#include <net/sock.h>

#include "smc_ib.h"

#define SMCPROTO_SMC		0	/* SMC protocol, IPv4 */
#define SMCPROTO_SMC6		1	/* SMC protocol, IPv6 */

extern struct proto smc_proto;
extern struct proto smc_proto6;

#ifdef ATOMIC64_INIT
#define KERNEL_HAS_ATOMIC64
#endif

enum smc_state {		/* possible states of an SMC socket */
	SMC_ACTIVE	= 1,
	SMC_INIT	= 2,
	SMC_CLOSED	= 7,
	SMC_LISTEN	= 10,
	/* normal close */
	SMC_PEERCLOSEWAIT1	= 20,
	SMC_PEERCLOSEWAIT2	= 21,
	SMC_APPFINCLOSEWAIT	= 24,
	SMC_APPCLOSEWAIT1	= 22,
	SMC_APPCLOSEWAIT2	= 23,
	SMC_PEERFINCLOSEWAIT	= 25,
	/* abnormal close */
	SMC_PEERABORTWAIT	= 26,
	SMC_PROCESSABORT	= 27,
};

struct smc_link_group;

struct smc_wr_rx_hdr {	/* common prefix part of LLC and CDC to demultiplex */
	u8			type;
} __aligned(1);

struct smc_cdc_conn_state_flags {
#if defined(__BIG_ENDIAN_BITFIELD)
	u8	peer_done_writing : 1;	/* Sending done indicator */
	u8	peer_conn_closed : 1;	/* Peer connection closed indicator */
	u8	peer_conn_abort : 1;	/* Abnormal close indicator */
	u8	reserved : 5;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
	u8	reserved : 5;
	u8	peer_conn_abort : 1;
	u8	peer_conn_closed : 1;
	u8	peer_done_writing : 1;
#endif
};

struct smc_cdc_producer_flags {
#if defined(__BIG_ENDIAN_BITFIELD)
	u8	write_blocked : 1;	/* Writing Blocked, no rx buf space */
	u8	urg_data_pending : 1;	/* Urgent Data Pending */
	u8	urg_data_present : 1;	/* Urgent Data Present */
	u8	cons_curs_upd_req : 1;	/* cursor update requested */
	u8	failover_validation : 1;/* message replay due to failover */
	u8	reserved : 3;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
	u8	reserved : 3;
	u8	failover_validation : 1;
	u8	cons_curs_upd_req : 1;
	u8	urg_data_present : 1;
	u8	urg_data_pending : 1;
	u8	write_blocked : 1;
#endif
};

/* in host byte order */
union smc_host_cursor {	/* SMC cursor - an offset in an RMBE */
	struct {
		u16	reserved;
		u16	wrap;		/* window wrap sequence number */
		u32	count;		/* cursor (= offset) part */
	};
#ifdef KERNEL_HAS_ATOMIC64
	atomic64_t		acurs;	/* for atomic processing */
#else
	u64			acurs;	/* for atomic processing */
#endif
} __aligned(8);

/* in host byte order, except for flag bitfields in network byte order */
struct smc_host_cdc_msg {		/* Connection Data Control message */
	struct smc_wr_rx_hdr		common; /* .type = 0xFE */
	u8				len;	/* length = 44 */
	u16				seqno;	/* connection seq # */
	u32				token;	/* alert_token */
	union smc_host_cursor		prod;		/* producer cursor */
	union smc_host_cursor		cons;		/* consumer cursor,
							 * piggy backed "ack"
							 */
	struct smc_cdc_producer_flags	prod_flags;	/* conn. tx/rx status */
	struct smc_cdc_conn_state_flags	conn_state_flags; /* peer conn. status*/
	u8				reserved[18];
} __aligned(8);

enum smc_urg_state {
	SMC_URG_VALID	= 1,			/* data present */
	SMC_URG_NOTYET	= 2,			/* data pending */
	SMC_URG_READ	= 3,			/* data was already read */
};

struct smc_connection {
	struct rb_node		alert_node;
	struct smc_link_group	*lgr;		/* link group of connection */
	u32			alert_token_local; /* unique conn. id */
	u8			peer_rmbe_idx;	/* from tcp handshake */
	int			peer_rmbe_size;	/* size of peer rx buffer */
	atomic_t		peer_rmbe_space;/* remaining free bytes in peer
						 * rmbe
						 */
	int			rtoken_idx;	/* idx to peer RMB rkey/addr */

	struct smc_buf_desc	*sndbuf_desc;	/* send buffer descriptor */
	struct smc_buf_desc	*rmb_desc;	/* RMBE descriptor */
	int			rmbe_size_short;/* compressed notation */
	int			rmbe_update_limit;
						/* lower limit for consumer
						 * cursor update
						 */

	struct smc_host_cdc_msg	local_tx_ctrl;	/* host byte order staging
						 * buffer for CDC msg send
						 * .prod cf. TCP snd_nxt
						 * .cons cf. TCP sends ack
						 */
	union smc_host_cursor	tx_curs_prep;	/* tx - prepared data
						 * snd_max..wmem_alloc
						 */
	union smc_host_cursor	tx_curs_sent;	/* tx - sent data
						 * snd_nxt ?
						 */
	union smc_host_cursor	tx_curs_fin;	/* tx - confirmed by peer
						 * snd-wnd-begin ?
						 */
	atomic_t		sndbuf_space;	/* remaining space in sndbuf */
	u16			tx_cdc_seq;	/* sequence # for CDC send */
	spinlock_t		send_lock;	/* protect wr_sends */
	struct delayed_work	tx_work;	/* retry of smc_cdc_msg_send */
	u32			tx_off;		/* base offset in peer rmb */

	struct smc_host_cdc_msg	local_rx_ctrl;	/* filled during event_handl.
						 * .prod cf. TCP rcv_nxt
						 * .cons cf. TCP snd_una
						 */
	union smc_host_cursor	rx_curs_confirmed; /* confirmed to peer
						    * source of snd_una ?
						    */
	union smc_host_cursor	urg_curs;	/* points at urgent byte */
	enum smc_urg_state	urg_state;
	bool			urg_tx_pend;	/* urgent data staged */
	bool			urg_rx_skip_pend;
						/* indicate urgent oob data
						 * read, but previous regular
						 * data still pending
						 */
	char			urg_rx_byte;	/* urgent byte */
	atomic_t		bytes_to_rcv;	/* arrived data,
						 * not yet received
						 */
	atomic_t		splice_pending;	/* number of spliced bytes
						 * pending processing
						 */
#ifndef KERNEL_HAS_ATOMIC64
	spinlock_t		acurs_lock;	/* protect cursors */
#endif
	struct work_struct	close_work;	/* peer sent some closing */
	struct tasklet_struct	rx_tsklet;	/* Receiver tasklet for SMC-D */
	u8			rx_off;		/* receive offset:
						 * 0 for SMC-R, 32 for SMC-D
						 */
	u64			peer_token;	/* SMC-D token of peer */
};

struct smc_sock {				/* smc sock container */
	struct sock		sk;
	struct socket		*clcsock;	/* internal tcp socket */
	struct smc_connection	conn;		/* smc connection */
	struct smc_sock		*listen_smc;	/* listen parent */
	struct work_struct	connect_work;	/* handle non-blocking connect*/
	struct work_struct	tcp_listen_work;/* handle tcp socket accepts */
	struct work_struct	smc_listen_work;/* prepare new accept socket */
	struct list_head	accept_q;	/* sockets to be accepted */
	spinlock_t		accept_q_lock;	/* protects accept_q */
	bool			use_fallback;	/* fallback to tcp */
	int			fallback_rsn;	/* reason for fallback */
	u32			peer_diagnosis; /* decline reason from peer */
	int			sockopt_defer_accept;
						/* sockopt TCP_DEFER_ACCEPT
						 * value
						 */
	u8			wait_close_tx_prepared : 1;
						/* shutdown wr or close
						 * started, waiting for unsent
						 * data to be sent
						 */
	u8			connect_nonblock : 1;
						/* non-blocking connect in
						 * flight
						 */
	struct mutex            clcsock_release_lock;
						/* protects clcsock of a listen
						 * socket
						 * */
};

static inline struct smc_sock *smc_sk(const struct sock *sk)
{
	return (struct smc_sock *)sk;
}

#define SMC_SYSTEMID_LEN		8

extern u8	local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */

/* convert an u32 value into network byte order, store it into a 3 byte field */
static inline void hton24(u8 *net, u32 host)
{
	__be32 t;

	t = cpu_to_be32(host);
	memcpy(net, ((u8 *)&t) + 1, 3);
}

/* convert a received 3 byte field into host byte order*/
static inline u32 ntoh24(u8 *net)
{
	__be32 t = 0;

	memcpy(((u8 *)&t) + 1, net, 3);
	return be32_to_cpu(t);
}

#ifdef CONFIG_XFRM
static inline bool using_ipsec(struct smc_sock *smc)
{
	return (smc->clcsock->sk->sk_policy[0] ||
		smc->clcsock->sk->sk_policy[1]) ? true : false;
}
#else
static inline bool using_ipsec(struct smc_sock *smc)
{
	return false;
}
#endif

struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock);
void smc_close_non_accepted(struct sock *sk);

#endif	/* __SMC_H */
"p">, 15, 2, false) /* EMAC function */ MUX_CFG(DA850, MII_TXEN, 2, 4, 15, 8, false) MUX_CFG(DA850, MII_TXCLK, 2, 8, 15, 8, false) MUX_CFG(DA850, MII_COL, 2, 12, 15, 8, false) MUX_CFG(DA850, MII_TXD_3, 2, 16, 15, 8, false) MUX_CFG(DA850, MII_TXD_2, 2, 20, 15, 8, false) MUX_CFG(DA850, MII_TXD_1, 2, 24, 15, 8, false) MUX_CFG(DA850, MII_TXD_0, 2, 28, 15, 8, false) MUX_CFG(DA850, MII_RXCLK, 3, 0, 15, 8, false) MUX_CFG(DA850, MII_RXDV, 3, 4, 15, 8, false) MUX_CFG(DA850, MII_RXER, 3, 8, 15, 8, false) MUX_CFG(DA850, MII_CRS, 3, 12, 15, 8, false) MUX_CFG(DA850, MII_RXD_3, 3, 16, 15, 8, false) MUX_CFG(DA850, MII_RXD_2, 3, 20, 15, 8, false) MUX_CFG(DA850, MII_RXD_1, 3, 24, 15, 8, false) MUX_CFG(DA850, MII_RXD_0, 3, 28, 15, 8, false) MUX_CFG(DA850, MDIO_CLK, 4, 0, 15, 8, false) MUX_CFG(DA850, MDIO_D, 4, 4, 15, 8, false) MUX_CFG(DA850, RMII_TXD_0, 14, 12, 15, 8, false) MUX_CFG(DA850, RMII_TXD_1, 14, 8, 15, 8, false) MUX_CFG(DA850, RMII_TXEN, 14, 16, 15, 8, false) MUX_CFG(DA850, RMII_CRS_DV, 15, 4, 15, 8, false) MUX_CFG(DA850, RMII_RXD_0, 14, 24, 15, 8, false) MUX_CFG(DA850, RMII_RXD_1, 14, 20, 15, 8, false) MUX_CFG(DA850, RMII_RXER, 14, 28, 15, 8, false) MUX_CFG(DA850, RMII_MHZ_50_CLK, 15, 0, 15, 0, false) /* McASP function */ MUX_CFG(DA850, ACLKR, 0, 0, 15, 1, false) MUX_CFG(DA850, ACLKX, 0, 4, 15, 1, false) MUX_CFG(DA850, AFSR, 0, 8, 15, 1, false) MUX_CFG(DA850, AFSX, 0, 12, 15, 1, false) MUX_CFG(DA850, AHCLKR, 0, 16, 15, 1, false) MUX_CFG(DA850, AHCLKX, 0, 20, 15, 1, false) MUX_CFG(DA850, AMUTE, 0, 24, 15, 1, false) MUX_CFG(DA850, AXR_15, 1, 0, 15, 1, false) MUX_CFG(DA850, AXR_14, 1, 4, 15, 1, false) MUX_CFG(DA850, AXR_13, 1, 8, 15, 1, false) MUX_CFG(DA850, AXR_12, 1, 12, 15, 1, false) MUX_CFG(DA850, AXR_11, 1, 16, 15, 1, false) MUX_CFG(DA850, AXR_10, 1, 20, 15, 1, false) MUX_CFG(DA850, AXR_9, 1, 24, 15, 1, false) MUX_CFG(DA850, AXR_8, 1, 28, 15, 1, false) MUX_CFG(DA850, AXR_7, 2, 0, 15, 1, false) MUX_CFG(DA850, AXR_6, 2, 4, 15, 1, false) MUX_CFG(DA850, AXR_5, 2, 8, 15, 1, false) MUX_CFG(DA850, AXR_4, 2, 12, 15, 1, false) MUX_CFG(DA850, AXR_3, 2, 16, 15, 1, false) MUX_CFG(DA850, AXR_2, 2, 20, 15, 1, false) MUX_CFG(DA850, AXR_1, 2, 24, 15, 1, false) MUX_CFG(DA850, AXR_0, 2, 28, 15, 1, false) /* LCD function */ MUX_CFG(DA850, LCD_D_7, 16, 8, 15, 2, false) MUX_CFG(DA850, LCD_D_6, 16, 12, 15, 2, false) MUX_CFG(DA850, LCD_D_5, 16, 16, 15, 2, false) MUX_CFG(DA850, LCD_D_4, 16, 20, 15, 2, false) MUX_CFG(DA850, LCD_D_3, 16, 24, 15, 2, false) MUX_CFG(DA850, LCD_D_2, 16, 28, 15, 2, false) MUX_CFG(DA850, LCD_D_1, 17, 0, 15, 2, false) MUX_CFG(DA850, LCD_D_0, 17, 4, 15, 2, false) MUX_CFG(DA850, LCD_D_15, 17, 8, 15, 2, false) MUX_CFG(DA850, LCD_D_14, 17, 12, 15, 2, false) MUX_CFG(DA850, LCD_D_13, 17, 16, 15, 2, false) MUX_CFG(DA850, LCD_D_12, 17, 20, 15, 2, false) MUX_CFG(DA850, LCD_D_11, 17, 24, 15, 2, false) MUX_CFG(DA850, LCD_D_10, 17, 28, 15, 2, false) MUX_CFG(DA850, LCD_D_9, 18, 0, 15, 2, false) MUX_CFG(DA850, LCD_D_8, 18, 4, 15, 2, false) MUX_CFG(DA850, LCD_PCLK, 18, 24, 15, 2, false) MUX_CFG(DA850, LCD_HSYNC, 19, 0, 15, 2, false) MUX_CFG(DA850, LCD_VSYNC, 19, 4, 15, 2, false) MUX_CFG(DA850, NLCD_AC_ENB_CS, 19, 24, 15, 2, false) /* MMC/SD0 function */ MUX_CFG(DA850, MMCSD0_DAT_0, 10, 8, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_1, 10, 12, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_2, 10, 16, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_3, 10, 20, 15, 2, false) MUX_CFG(DA850, MMCSD0_CLK, 10, 0, 15, 2, false) MUX_CFG(DA850, MMCSD0_CMD, 10, 4, 15, 2, false) /* MMC/SD1 function */ MUX_CFG(DA850, MMCSD1_DAT_0, 18, 8, 15, 2, false) MUX_CFG(DA850, MMCSD1_DAT_1, 19, 16, 15, 2, false) MUX_CFG(DA850, MMCSD1_DAT_2, 19, 12, 15, 2, false) MUX_CFG(DA850, MMCSD1_DAT_3, 19, 8, 15, 2, false) MUX_CFG(DA850, MMCSD1_CLK, 18, 12, 15, 2, false) MUX_CFG(DA850, MMCSD1_CMD, 18, 16, 15, 2, false) /* EMIF2.5/EMIFA function */ MUX_CFG(DA850, EMA_D_7, 9, 0, 15, 1, false) MUX_CFG(DA850, EMA_D_6, 9, 4, 15, 1, false) MUX_CFG(DA850, EMA_D_5, 9, 8, 15, 1, false) MUX_CFG(DA850, EMA_D_4, 9, 12, 15, 1, false) MUX_CFG(DA850, EMA_D_3, 9, 16, 15, 1, false) MUX_CFG(DA850, EMA_D_2, 9, 20, 15, 1, false) MUX_CFG(DA850, EMA_D_1, 9, 24, 15, 1, false) MUX_CFG(DA850, EMA_D_0, 9, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_1, 12, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_2, 12, 20, 15, 1, false) MUX_CFG(DA850, NEMA_CS_3, 7, 4, 15, 1, false) MUX_CFG(DA850, NEMA_CS_4, 7, 8, 15, 1, false) MUX_CFG(DA850, NEMA_WE, 7, 16, 15, 1, false) MUX_CFG(DA850, NEMA_OE, 7, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_0, 12, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_3, 12, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_4, 12, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_5, 12, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_6, 12, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_7, 12, 0, 15, 1, false) MUX_CFG(DA850, EMA_A_8, 11, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_9, 11, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_10, 11, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_11, 11, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_12, 11, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_13, 11, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_14, 11, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_15, 11, 0, 15, 1, false) MUX_CFG(DA850, EMA_A_16, 10, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_17, 10, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_18, 10, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_19, 10, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_20, 10, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_21, 10, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_22, 10, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_23, 10, 0, 15, 1, false) MUX_CFG(DA850, EMA_D_8, 8, 28, 15, 1, false) MUX_CFG(DA850, EMA_D_9, 8, 24, 15, 1, false) MUX_CFG(DA850, EMA_D_10, 8, 20, 15, 1, false) MUX_CFG(DA850, EMA_D_11, 8, 16, 15, 1, false) MUX_CFG(DA850, EMA_D_12, 8, 12, 15, 1, false) MUX_CFG(DA850, EMA_D_13, 8, 8, 15, 1, false) MUX_CFG(DA850, EMA_D_14, 8, 4, 15, 1, false) MUX_CFG(DA850, EMA_D_15, 8, 0, 15, 1, false) MUX_CFG(DA850, EMA_BA_1, 5, 24, 15, 1, false) MUX_CFG(DA850, EMA_CLK, 6, 0, 15, 1, false) MUX_CFG(DA850, EMA_WAIT_1, 6, 24, 15, 1, false) MUX_CFG(DA850, NEMA_CS_2, 7, 0, 15, 1, false) /* GPIO function */ MUX_CFG(DA850, GPIO2_4, 6, 12, 15, 8, false) MUX_CFG(DA850, GPIO2_6, 6, 4, 15, 8, false) MUX_CFG(DA850, GPIO2_8, 5, 28, 15, 8, false) MUX_CFG(DA850, GPIO2_15, 5, 0, 15, 8, false) MUX_CFG(DA850, GPIO3_12, 7, 12, 15, 8, false) MUX_CFG(DA850, GPIO3_13, 7, 8, 15, 8, false) MUX_CFG(DA850, GPIO4_0, 10, 28, 15, 8, false) MUX_CFG(DA850, GPIO4_1, 10, 24, 15, 8, false) MUX_CFG(DA850, GPIO6_9, 13, 24, 15, 8, false) MUX_CFG(DA850, GPIO6_10, 13, 20, 15, 8, false) MUX_CFG(DA850, GPIO6_13, 13, 8, 15, 8, false) MUX_CFG(DA850, RTC_ALARM, 0, 28, 15, 2, false) /* VPIF Capture */ MUX_CFG(DA850, VPIF_DIN0, 15, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DIN1, 15, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DIN2, 14, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DIN3, 14, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DIN4, 14, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DIN5, 14, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DIN6, 14, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DIN7, 14, 8, 15, 1, false) MUX_CFG(DA850, VPIF_DIN8, 16, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DIN9, 16, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DIN10, 15, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DIN11, 15, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DIN12, 15, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DIN13, 15, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DIN14, 15, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DIN15, 15, 8, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN0, 14, 0, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN1, 14, 4, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN2, 19, 8, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN3, 19, 16, 15, 1, false) /* VPIF Display */ MUX_CFG(DA850, VPIF_DOUT0, 17, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT1, 17, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT2, 16, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT3, 16, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT4, 16, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT5, 16, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT6, 16, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT7, 16, 8, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT8, 18, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT9, 18, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT10, 17, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT11, 17, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT12, 17, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT13, 17, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT14, 17, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT15, 17, 8, 15, 1, false) MUX_CFG(DA850, VPIF_CLKO2, 19, 12, 15, 1, false) MUX_CFG(DA850, VPIF_CLKO3, 19, 20, 15, 1, false) #endif }; const short da850_i2c0_pins[] __initconst = { DA850_I2C0_SDA, DA850_I2C0_SCL, -1 }; const short da850_i2c1_pins[] __initconst = { DA850_I2C1_SCL, DA850_I2C1_SDA, -1 }; const short da850_lcdcntl_pins[] __initconst = { DA850_LCD_D_0, DA850_LCD_D_1, DA850_LCD_D_2, DA850_LCD_D_3, DA850_LCD_D_4, DA850_LCD_D_5, DA850_LCD_D_6, DA850_LCD_D_7, DA850_LCD_D_8, DA850_LCD_D_9, DA850_LCD_D_10, DA850_LCD_D_11, DA850_LCD_D_12, DA850_LCD_D_13, DA850_LCD_D_14, DA850_LCD_D_15, DA850_LCD_PCLK, DA850_LCD_HSYNC, DA850_LCD_VSYNC, DA850_NLCD_AC_ENB_CS, -1 }; const short da850_vpif_capture_pins[] __initconst = { DA850_VPIF_DIN0, DA850_VPIF_DIN1, DA850_VPIF_DIN2, DA850_VPIF_DIN3, DA850_VPIF_DIN4, DA850_VPIF_DIN5, DA850_VPIF_DIN6, DA850_VPIF_DIN7, DA850_VPIF_DIN8, DA850_VPIF_DIN9, DA850_VPIF_DIN10, DA850_VPIF_DIN11, DA850_VPIF_DIN12, DA850_VPIF_DIN13, DA850_VPIF_DIN14, DA850_VPIF_DIN15, DA850_VPIF_CLKIN0, DA850_VPIF_CLKIN1, DA850_VPIF_CLKIN2, DA850_VPIF_CLKIN3, -1 }; const short da850_vpif_display_pins[] __initconst = { DA850_VPIF_DOUT0, DA850_VPIF_DOUT1, DA850_VPIF_DOUT2, DA850_VPIF_DOUT3, DA850_VPIF_DOUT4, DA850_VPIF_DOUT5, DA850_VPIF_DOUT6, DA850_VPIF_DOUT7, DA850_VPIF_DOUT8, DA850_VPIF_DOUT9, DA850_VPIF_DOUT10, DA850_VPIF_DOUT11, DA850_VPIF_DOUT12, DA850_VPIF_DOUT13, DA850_VPIF_DOUT14, DA850_VPIF_DOUT15, DA850_VPIF_CLKO2, DA850_VPIF_CLKO3, -1 }; static struct map_desc da850_io_desc[] = { { .virtual = IO_VIRT, .pfn = __phys_to_pfn(IO_PHYS), .length = IO_SIZE, .type = MT_DEVICE }, { .virtual = DA8XX_CP_INTC_VIRT, .pfn = __phys_to_pfn(DA8XX_CP_INTC_BASE), .length = DA8XX_CP_INTC_SIZE, .type = MT_DEVICE }, }; /* Contents of JTAG ID register used to identify exact cpu type */ static struct davinci_id da850_ids[] = { { .variant = 0x0, .part_no = 0xb7d1, .manufacturer = 0x017, /* 0x02f >> 1 */ .cpu_id = DAVINCI_CPU_ID_DA850, .name = "da850/omap-l138", }, { .variant = 0x1, .part_no = 0xb7d1, .manufacturer = 0x017, /* 0x02f >> 1 */ .cpu_id = DAVINCI_CPU_ID_DA850, .name = "da850/omap-l138/am18x", }, }; /* * Bottom half of timer 0 is used for clock_event, top half for * clocksource. */ static const struct davinci_timer_cfg da850_timer_cfg = { .reg = DEFINE_RES_IO(DA8XX_TIMER64P0_BASE, SZ_4K), .irq = { DEFINE_RES_IRQ(DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT12_0)), DEFINE_RES_IRQ(DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT34_0)), }, }; #ifdef CONFIG_CPU_FREQ /* * Notes: * According to the TRM, minimum PLLM results in maximum power savings. * The OPP definitions below should keep the PLLM as low as possible. * * The output of the PLLM must be between 300 to 600 MHz. */ struct da850_opp { unsigned int freq; /* in KHz */ unsigned int prediv; unsigned int mult; unsigned int postdiv; unsigned int cvdd_min; /* in uV */ unsigned int cvdd_max; /* in uV */ }; static const struct da850_opp da850_opp_456 = { .freq = 456000, .prediv = 1, .mult = 19, .postdiv = 1, .cvdd_min = 1300000, .cvdd_max = 1350000, }; static const struct da850_opp da850_opp_408 = { .freq = 408000, .prediv = 1, .mult = 17, .postdiv = 1, .cvdd_min = 1300000, .cvdd_max = 1350000, }; static const struct da850_opp da850_opp_372 = { .freq = 372000, .prediv = 2, .mult = 31, .postdiv = 1, .cvdd_min = 1200000, .cvdd_max = 1320000, }; static const struct da850_opp da850_opp_300 = { .freq = 300000, .prediv = 1, .mult = 25, .postdiv = 2, .cvdd_min = 1200000, .cvdd_max = 1320000, }; static const struct da850_opp da850_opp_200 = { .freq = 200000, .prediv = 1, .mult = 25, .postdiv = 3, .cvdd_min = 1100000, .cvdd_max = 1160000, }; static const struct da850_opp da850_opp_96 = { .freq = 96000, .prediv = 1, .mult = 20, .postdiv = 5, .cvdd_min = 1000000, .cvdd_max = 1050000, }; #define OPP(freq) \ { \ .driver_data = (unsigned int) &da850_opp_##freq, \ .frequency = freq * 1000, \ } static struct cpufreq_frequency_table da850_freq_table[] = { OPP(456), OPP(408), OPP(372), OPP(300), OPP(200), OPP(96), { .driver_data = 0, .frequency = CPUFREQ_TABLE_END, }, }; #ifdef CONFIG_REGULATOR static int da850_set_voltage(unsigned int index); static int da850_regulator_init(void); #endif static struct davinci_cpufreq_config cpufreq_info = { .freq_table = da850_freq_table, #ifdef CONFIG_REGULATOR .init = da850_regulator_init, .set_voltage = da850_set_voltage, #endif }; #ifdef CONFIG_REGULATOR static struct regulator *cvdd; static int da850_set_voltage(unsigned int index) { struct da850_opp *opp; if (!cvdd) return -ENODEV; opp = (struct da850_opp *) cpufreq_info.freq_table[index].driver_data; return regulator_set_voltage(cvdd, opp->cvdd_min, opp->cvdd_max); } static int da850_regulator_init(void) { cvdd = regulator_get(NULL, "cvdd"); if (WARN(IS_ERR(cvdd), "Unable to obtain voltage regulator for CVDD;" " voltage scaling unsupported\n")) { return PTR_ERR(cvdd); } return 0; } #endif static struct platform_device da850_cpufreq_device = { .name = "cpufreq-davinci", .dev = { .platform_data = &cpufreq_info, }, .id = -1, }; unsigned int da850_max_speed = 300000; int da850_register_cpufreq(char *async_clk) { int i; /* cpufreq driver can help keep an "async" clock constant */ if (async_clk) clk_add_alias("async", da850_cpufreq_device.name, async_clk, NULL); for (i = 0; i < ARRAY_SIZE(da850_freq_table); i++) { if (da850_freq_table[i].frequency <= da850_max_speed) { cpufreq_info.freq_table = &da850_freq_table[i]; break; } } return platform_device_register(&da850_cpufreq_device); } #else int __init da850_register_cpufreq(char *async_clk) { return 0; } #endif /* VPIF resource, platform data */ static u64 da850_vpif_dma_mask = DMA_BIT_MASK(32); static struct resource da850_vpif_resource[] = { { .start = DA8XX_VPIF_BASE, .end = DA8XX_VPIF_BASE + 0xfff, .flags = IORESOURCE_MEM, } }; static struct platform_device da850_vpif_dev = { .name = "vpif", .id = -1, .dev = { .dma_mask = &da850_vpif_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = da850_vpif_resource, .num_resources = ARRAY_SIZE(da850_vpif_resource), }; static struct resource da850_vpif_display_resource[] = { { .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .flags = IORESOURCE_IRQ, }, }; static struct platform_device da850_vpif_display_dev = { .name = "vpif_display", .id = -1, .dev = { .dma_mask = &da850_vpif_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = da850_vpif_display_resource, .num_resources = ARRAY_SIZE(da850_vpif_display_resource), }; static struct resource da850_vpif_capture_resource[] = { { .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .flags = IORESOURCE_IRQ, }, { .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .flags = IORESOURCE_IRQ, }, }; static struct platform_device da850_vpif_capture_dev = { .name = "vpif_capture", .id = -1, .dev = { .dma_mask = &da850_vpif_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = da850_vpif_capture_resource, .num_resources = ARRAY_SIZE(da850_vpif_capture_resource), }; int __init da850_register_vpif(void) { return platform_device_register(&da850_vpif_dev); } int __init da850_register_vpif_display(struct vpif_display_config *display_config) { da850_vpif_display_dev.dev.platform_data = display_config; return platform_device_register(&da850_vpif_display_dev); } int __init da850_register_vpif_capture(struct vpif_capture_config *capture_config) { da850_vpif_capture_dev.dev.platform_data = capture_config; return platform_device_register(&da850_vpif_capture_dev); } static struct davinci_gpio_platform_data da850_gpio_platform_data = { .no_auto_base = true, .base = 0, .ngpio = 144, }; int __init da850_register_gpio(void) { return da8xx_register_gpio(&da850_gpio_platform_data); } static const struct davinci_soc_info davinci_soc_info_da850 = { .io_desc = da850_io_desc, .io_desc_num = ARRAY_SIZE(da850_io_desc), .jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG, .ids = da850_ids, .ids_num = ARRAY_SIZE(da850_ids), .pinmux_base = DA8XX_SYSCFG0_BASE + 0x120, .pinmux_pins = da850_pins, .pinmux_pins_num = ARRAY_SIZE(da850_pins), .emac_pdata = &da8xx_emac_pdata, .sram_dma = DA8XX_SHARED_RAM_BASE, .sram_len = SZ_128K, }; void __init da850_init(void) { davinci_common_init(&davinci_soc_info_da850); da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K); if (WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module")) return; da8xx_syscfg1_base = ioremap(DA8XX_SYSCFG1_BASE, SZ_4K); WARN(!da8xx_syscfg1_base, "Unable to map syscfg1 module"); } static const struct davinci_cp_intc_config da850_cp_intc_config = { .reg = { .start = DA8XX_CP_INTC_BASE, .end = DA8XX_CP_INTC_BASE + SZ_8K - 1, .flags = IORESOURCE_MEM, }, .num_irqs = DA850_N_CP_INTC_IRQ, }; void __init da850_init_irq(void) { davinci_cp_intc_init(&da850_cp_intc_config); } void __init da850_init_time(void) { void __iomem *pll0; struct regmap *cfgchip; struct clk *clk; int rv; clk_register_fixed_rate(NULL, "ref_clk", NULL, 0, DA850_REF_FREQ); pll0 = ioremap(DA8XX_PLL0_BASE, SZ_4K); cfgchip = da8xx_get_cfgchip(); da850_pll0_init(NULL, pll0, cfgchip); clk = clk_get(NULL, "timer0"); if (WARN_ON(IS_ERR(clk))) { pr_err("Unable to get the timer clock\n"); return; } rv = davinci_timer_register(clk, &da850_timer_cfg); WARN(rv, "Unable to register the timer: %d\n", rv); } static struct resource da850_pll1_resources[] = { { .start = DA850_PLL1_BASE, .end = DA850_PLL1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct davinci_pll_platform_data da850_pll1_pdata; static struct platform_device da850_pll1_device = { .name = "da850-pll1", .id = -1, .resource = da850_pll1_resources, .num_resources = ARRAY_SIZE(da850_pll1_resources), .dev = { .platform_data = &da850_pll1_pdata, }, }; static struct resource da850_psc0_resources[] = { { .start = DA8XX_PSC0_BASE, .end = DA8XX_PSC0_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device da850_psc0_device = { .name = "da850-psc0", .id = -1, .resource = da850_psc0_resources, .num_resources = ARRAY_SIZE(da850_psc0_resources), }; static struct resource da850_psc1_resources[] = { { .start = DA8XX_PSC1_BASE, .end = DA8XX_PSC1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device da850_psc1_device = { .name = "da850-psc1", .id = -1, .resource = da850_psc1_resources, .num_resources = ARRAY_SIZE(da850_psc1_resources), }; static struct da8xx_cfgchip_clk_platform_data da850_async1_pdata; static struct platform_device da850_async1_clksrc_device = { .name = "da850-async1-clksrc", .id = -1, .dev = { .platform_data = &da850_async1_pdata, }, }; static struct da8xx_cfgchip_clk_platform_data da850_async3_pdata; static struct platform_device da850_async3_clksrc_device = { .name = "da850-async3-clksrc", .id = -1, .dev = { .platform_data = &da850_async3_pdata, }, }; static struct da8xx_cfgchip_clk_platform_data da850_tbclksync_pdata; static struct platform_device da850_tbclksync_device = { .name = "da830-tbclksync", .id = -1, .dev = { .platform_data = &da850_tbclksync_pdata, }, }; void __init da850_register_clocks(void) { /* PLL0 is registered in da850_init_time() */ da850_pll1_pdata.cfgchip = da8xx_get_cfgchip(); platform_device_register(&da850_pll1_device); da850_async1_pdata.cfgchip = da8xx_get_cfgchip(); platform_device_register(&da850_async1_clksrc_device); da850_async3_pdata.cfgchip = da8xx_get_cfgchip(); platform_device_register(&da850_async3_clksrc_device); platform_device_register(&da850_psc0_device); platform_device_register(&da850_psc1_device); da850_tbclksync_pdata.cfgchip = da8xx_get_cfgchip(); platform_device_register(&da850_tbclksync_device); }