aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
AgeCommit message (Expand)Author
2016-08-07block: rename bio bi_rw to bi_opfJens Axboe
2016-06-07block, drivers, fs: rename REQ_FLUSH to REQ_PREFLUSHMike Christie
2016-06-07dm: use bio op accessorsMike Christie
2016-03-10dm cache: bump the target versionMike Snitzer
2016-03-10dm cache: make sure every metadata function checks fail_ioJoe Thornber
2016-02-22dm: rename target's per_bio_data_size to per_io_data_sizeMike Snitzer
2015-12-10dm: don't save and restore bi_privateMikulas Patocka
2015-10-31dm: drop NULL test before kmem_cache_destroy() and mempool_destroy()Julia Lawall
2015-09-02Merge tag 'dm-4.3-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/d...Linus Torvalds
2015-09-02Merge branch 'for-4.3/core' of git://git.kernel.dk/linux-blockLinus Torvalds
2015-09-01dm cache: fix use after freeing migrationsJoe Thornber
2015-08-31dm cache: small cleanups related to deferred prison cell cleanupMike Snitzer
2015-08-31dm cache: fix leaking of deferred bio prison cellsJoe Thornber
2015-08-13block: kill merge_bvec_fn() completelyKent Overstreet
2015-08-12dm cache: move wake_waker() from free_migrations() to where it is neededJoe Thornber
2015-07-29dm cache: fix device destroy hang due to improper prealloc_used accountingMike Snitzer
2015-07-29Revert "dm cache: do not wake_worker() in free_migration()"Mike Snitzer
2015-07-29block: add a bi_error field to struct bioChristoph Hellwig
2015-07-16dm cache: avoid calls to prealloc_free_structs() if possibleMike Snitzer
2015-07-16dm cache: avoid preallocation if no work in writeback_some_dirty_blocks()Mike Snitzer
2015-07-16dm cache: do not wake_worker() in free_migration()Mike Snitzer
2015-07-16dm cache: display 'needs_check' in status if it is setMike Snitzer
2015-06-11dm cache: age and write back cache entries even without active IOJoe Thornber
2015-06-11dm cache: prefix all DMERR and DMINFO messages with cache device nameMike Snitzer
2015-06-11dm cache: add fail io mode and needs_check flagJoe Thornber
2015-06-11dm cache: wake the worker thread every time we free a migration objectJoe Thornber
2015-05-29dm cache: boost promotion of blocks that will be overwrittenJoe Thornber
2015-05-29dm cache: defer whole cellsJoe Thornber
2015-05-29dm cache: pull out some bitset utility functions for reuseJoe Thornber
2015-05-29dm cache: pass a new 'critical' flag to the policies when requesting writebac...Joe Thornber
2015-05-29dm cache: track IO to the origin device using io_trackerJoe Thornber
2015-05-29dm cache: add io_trackerJoe Thornber
2015-05-29dm cache: fix race when issuing a POLICY_REPLACE operationJoe Thornber
2015-05-22block: remove management of bi_remaining when restoring original bi_end_ioMike Snitzer
2015-05-05bio: skip atomic inc/dec of ->bi_remaining for non-chainsJens Axboe
2015-02-12Merge tag 'dm-3.20-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/...Linus Torvalds
2015-02-09dm: use time_in_range() and time_after()Manuel Schölling
2015-01-23dm cache: fix problematic dual use of a single migration count variableJoe Thornber
2014-12-01dm cache: fix spurious cell_defer when dealing with partial block at end of d...Joe Thornber
2014-12-01dm cache: dirty flag was mistakenly being cleared when promoting via overwriteJoe Thornber
2014-12-01dm cache: only use overwrite optimisation for promotion when in writeback modeJoe Thornber
2014-12-01dm cache: discard block size must be a multiple of cache block sizeJoe Thornber
2014-12-01dm cache: fix a harmless race when working out if a block is discardedJoe Thornber
2014-12-01dm cache: when reloading a discard bitset allow for a different discard block...Joe Thornber
2014-12-01dm cache: fix some issues with the new discard range supportJoe Thornber
2014-11-12dm cache: emit a warning message if there are a lot of cache blocksJoe Thornber
2014-11-10dm cache: improve discard supportJoe Thornber
2014-11-10dm cache: revert "prevent corruption caused by discard_block_size > cache_blo...Joe Thornber
2014-11-10dm cache: revert "remove remainder of distinct discard block size"Joe Thornber
2014-11-10dm bio prison: introduce support for locking ranges of blocksJoe Thornber
on> Linux Embedded Kernel - tracks the next mainline releaseGrokmirror user
aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qualcomm/qca_uart.c
blob: bcdeca7b33664c3637557439b61d4a6143a4b758 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
/*
 *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
 *   Copyright (c) 2017, I2SE GmbH
 *
 *   Permission to use, copy, modify, and/or distribute this software
 *   for any purpose with or without fee is hereby granted, provided
 *   that the above copyright notice and this permission notice appear
 *   in all copies.
 *
 *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
 *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
 *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
 *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
 *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
 *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
 *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
 *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

/*   This module implements the Qualcomm Atheros UART protocol for
 *   kernel-based UART device; it is essentially an Ethernet-to-UART
 *   serial converter;
 */

#include <linux/device.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/sched.h>
#include <linux/serdev.h>
#include <linux/skbuff.h>
#include <linux/types.h>

#include "qca_7k_common.h"

#define QCAUART_DRV_VERSION "0.1.0"
#define QCAUART_DRV_NAME "qcauart"
#define QCAUART_TX_TIMEOUT (1 * HZ)

struct qcauart {
	struct net_device *net_dev;
	spinlock_t lock;			/* transmit lock */
	struct work_struct tx_work;		/* Flushes transmit buffer   */

	struct serdev_device *serdev;
	struct qcafrm_handle frm_handle;
	struct sk_buff *rx_skb;

	unsigned char *tx_head;			/* pointer to next XMIT byte */
	int tx_left;				/* bytes left in XMIT queue  */
	unsigned char *tx_buffer;
};

static int
qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
		size_t count)
{
	struct qcauart *qca = serdev_device_get_drvdata(serdev);
	struct net_device *netdev = qca->net_dev;
	struct net_device_stats *n_stats = &netdev->stats;
	size_t i;

	if (!qca->rx_skb) {
		qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
							netdev->mtu +
							VLAN_ETH_HLEN);
		if (!qca->rx_skb) {
			n_stats->rx_errors++;
			n_stats->rx_dropped++;
			return 0;
		}
	}

	for (i = 0; i < count; i++) {
		s32 retcode;

		retcode = qcafrm_fsm_decode(&qca->frm_handle,
					    qca->rx_skb->data,
					    skb_tailroom(qca->rx_skb),
					    data[i]);

		switch (retcode) {
		case QCAFRM_GATHER:
		case QCAFRM_NOHEAD:
			break;
		case QCAFRM_NOTAIL:
			netdev_dbg(netdev, "recv: no RX tail\n");
			n_stats->rx_errors++;
			n_stats->rx_dropped++;
			break;
		case QCAFRM_INVLEN:
			netdev_dbg(netdev, "recv: invalid RX length\n");
			n_stats->rx_errors++;
			n_stats->rx_dropped++;
			break;
		default:
			n_stats->rx_packets++;
			n_stats->rx_bytes += retcode;
			skb_put(qca->rx_skb, retcode);
			qca->rx_skb->protocol = eth_type_trans(
						qca->rx_skb, qca->rx_skb->dev);
			qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
			netif_rx_ni(qca->rx_skb);
			qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
								netdev->mtu +
								VLAN_ETH_HLEN);
			if (!qca->rx_skb) {
				netdev_dbg(netdev, "recv: out of RX resources\n");
				n_stats->rx_errors++;
				return i;
			}
		}
	}

	return i;
}

/* Write out any remaining transmit buffer. Scheduled when tty is writable */
static void qcauart_transmit(struct work_struct *work)
{
	struct qcauart *qca = container_of(work, struct qcauart, tx_work);
	struct net_device_stats *n_stats = &qca->net_dev->stats;
	int written;

	spin_lock_bh(&qca->lock);

	/* First make sure we're connected. */
	if (!netif_running(qca->net_dev)) {
		spin_unlock_bh(&qca->lock);
		return;
	}

	if (qca->tx_left <= 0)  {
		/* Now serial buffer is almost free & we can start
		 * transmission of another packet
		 */
		n_stats->tx_packets++;
		spin_unlock_bh(&qca->lock);
		netif_wake_queue(qca->net_dev);
		return;
	}

	written = serdev_device_write_buf(qca->serdev, qca->tx_head,
					  qca->tx_left);
	if (written > 0) {
		qca->tx_left -= written;
		qca->tx_head += written;
	}
	spin_unlock_bh(&qca->lock);
}

/* Called by the driver when there's room for more data.
 * Schedule the transmit.
 */
static void qca_tty_wakeup(struct serdev_device *serdev)
{
	struct qcauart *qca = serdev_device_get_drvdata(serdev);

	schedule_work(&qca->tx_work);
}

static const struct serdev_device_ops qca_serdev_ops = {
	.receive_buf = qca_tty_receive,
	.write_wakeup = qca_tty_wakeup,
};

static int qcauart_netdev_open(struct net_device *dev)
{
	struct qcauart *qca = netdev_priv(dev);

	netif_start_queue(qca->net_dev);

	return 0;
}

static int qcauart_netdev_close(struct net_device *dev)
{
	struct qcauart *qca = netdev_priv(dev);

	netif_stop_queue(dev);
	flush_work(&qca->tx_work);

	spin_lock_bh(&qca->lock);
	qca->tx_left = 0;
	spin_unlock_bh(&qca->lock);

	return 0;
}

static netdev_tx_t
qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct net_device_stats *n_stats = &dev->stats;
	struct qcauart *qca = netdev_priv(dev);
	u8 pad_len = 0;
	int written;
	u8 *pos;

	spin_lock(&qca->lock);

	WARN_ON(qca->tx_left);

	if (!netif_running(dev))  {
		spin_unlock(&qca->lock);
		netdev_warn(qca->net_dev, "xmit: iface is down\n");
		goto out;
	}

	pos = qca->tx_buffer;

	if (skb->len < QCAFRM_MIN_LEN)
		pad_len = QCAFRM_MIN_LEN - skb->len;

	pos += qcafrm_create_header(pos, skb->len + pad_len);

	memcpy(pos, skb->data, skb->len);
	pos += skb->len;

	if (pad_len) {
		memset(pos, 0, pad_len);
		pos += pad_len;
	}

	pos += qcafrm_create_footer(pos);

	netif_stop_queue(qca->net_dev);

	written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
					  pos - qca->tx_buffer);
	if (written > 0) {
		qca->tx_left = (pos - qca->tx_buffer) - written;
		qca->tx_head = qca->tx_buffer + written;
		n_stats->tx_bytes += written;
	}
	spin_unlock(&qca->lock);

	netif_trans_update(dev);
out:
	dev_kfree_skb_any(skb);
	return NETDEV_TX_OK;
}

static void qcauart_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
	struct qcauart *qca = netdev_priv(dev);

	netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
		    jiffies, dev_trans_start(dev));
	dev->stats.tx_errors++;
	dev->stats.tx_dropped++;
}

static int qcauart_netdev_init(struct net_device *dev)
{
	struct qcauart *qca = netdev_priv(dev);
	size_t len;

	/* Finish setting up the device info. */
	dev->mtu = QCAFRM_MAX_MTU;
	dev->type = ARPHRD_ETHER;

	len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
	qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
	if (!qca->tx_buffer)
		return -ENOMEM;

	qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
						qca->net_dev->mtu +
						VLAN_ETH_HLEN);
	if (!qca->rx_skb)
		return -ENOBUFS;

	return 0;
}

static void qcauart_netdev_uninit(struct net_device *dev)
{
	struct qcauart *qca = netdev_priv(dev);

	dev_kfree_skb(qca->rx_skb);
}

static const struct net_device_ops qcauart_netdev_ops = {
	.ndo_init = qcauart_netdev_init,
	.ndo_uninit = qcauart_netdev_uninit,
	.ndo_open = qcauart_netdev_open,
	.ndo_stop = qcauart_netdev_close,
	.ndo_start_xmit = qcauart_netdev_xmit,
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_tx_timeout = qcauart_netdev_tx_timeout,
	.ndo_validate_addr = eth_validate_addr,
};

static void qcauart_netdev_setup(struct net_device *dev)
{
	dev->netdev_ops = &qcauart_netdev_ops;
	dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
	dev->tx_queue_len = 100;

	/* MTU range: 46 - 1500 */
	dev->min_mtu = QCAFRM_MIN_MTU;
	dev->max_mtu = QCAFRM_MAX_MTU;
}

static const struct of_device_id qca_uart_of_match[] = {
	{
	 .compatible = "qca,qca7000",
	},
	{}
};
MODULE_DEVICE_TABLE(of, qca_uart_of_match);

static int qca_uart_probe(struct serdev_device *serdev)
{
	struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
	struct qcauart *qca;
	u32 speed = 115200;
	int ret;

	if (!qcauart_dev)
		return -ENOMEM;

	qcauart_netdev_setup(qcauart_dev);
	SET_NETDEV_DEV(qcauart_dev, &serdev->dev);

	qca = netdev_priv(qcauart_dev);
	if (!qca) {
		pr_err("qca_uart: Fail to retrieve private structure\n");
		ret = -ENOMEM;
		goto free;
	}
	qca->net_dev = qcauart_dev;
	qca->serdev = serdev;
	qcafrm_fsm_init_uart(&qca->frm_handle);

	spin_lock_init(&qca->lock);
	INIT_WORK(&qca->tx_work, qcauart_transmit);

	of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);

	ret = of_get_mac_address(serdev->dev.of_node, qca->net_dev->dev_addr);
	if (ret) {
		eth_hw_addr_random(qca->net_dev);
		dev_info(&serdev->dev, "Using random MAC address: %pM\n",
			 qca->net_dev->dev_addr);
	}

	netif_carrier_on(qca->net_dev);
	serdev_device_set_drvdata(serdev, qca);
	serdev_device_set_client_ops(serdev, &qca_serdev_ops);

	ret = serdev_device_open(serdev);
	if (ret) {
		dev_err(&serdev->dev, "Unable to open device %s\n",
			qcauart_dev->name);
		goto free;
	}

	speed = serdev_device_set_baudrate(serdev, speed);
	dev_info(&serdev->dev, "Using baudrate: %u\n", speed);

	serdev_device_set_flow_control(serdev, false);

	ret = register_netdev(qcauart_dev);
	if (ret) {
		dev_err(&serdev->dev, "Unable to register net device %s\n",
			qcauart_dev->name);
		serdev_device_close(serdev);
		cancel_work_sync(&qca->tx_work);
		goto free;
	}

	return 0;

free:
	free_netdev(qcauart_dev);
	return ret;
}

static void qca_uart_remove(struct serdev_device *serdev)
{
	struct qcauart *qca = serdev_device_get_drvdata(serdev);

	unregister_netdev(qca->net_dev);

	/* Flush any pending characters in the driver. */
	serdev_device_close(serdev);
	cancel_work_sync(&qca->tx_work);

	free_netdev(qca->net_dev);
}

static struct serdev_device_driver qca_uart_driver = {
	.probe = qca_uart_probe,
	.remove = qca_uart_remove,
	.driver = {
		.name = QCAUART_DRV_NAME,
		.of_match_table = of_match_ptr(qca_uart_of_match),
	},
};

module_serdev_device_driver(qca_uart_driver);

MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
MODULE_AUTHOR("Qualcomm Atheros Communications");
MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(QCAUART_DRV_VERSION);