summaryrefslogtreecommitdiffstats
path: root/drivers/clocksource/vf_pit_timer.c
blob: 0f92089ec08c79b23508641e507ee6dc86062e88 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
/*
 * Copyright 2012-2013 Freescale Semiconductor, Inc.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 */

#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>

/*
 * Each pit takes 0x10 Bytes register space
 */
#define PITMCR		0x00
#define PIT0_OFFSET	0x100
#define PITn_OFFSET(n)	(PIT0_OFFSET + 0x10 * (n))
#define PITLDVAL	0x00
#define PITCVAL		0x04
#define PITTCTRL	0x08
#define PITTFLG		0x0c

#define PITMCR_MDIS	(0x1 << 1)

#define PITTCTRL_TEN	(0x1 << 0)
#define PITTCTRL_TIE	(0x1 << 1)
#define PITCTRL_CHN	(0x1 << 2)

#define PITTFLG_TIF	0x1

static void __iomem *clksrc_base;
static void __iomem *clkevt_base;
static unsigned long cycle_per_jiffy;

static inline void pit_timer_enable(void)
{
	__raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL);
}

static inline void pit_timer_disable(void)
{
	__raw_writel(0, clkevt_base + PITTCTRL);
}

static inline void pit_irq_acknowledge(void)
{
	__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
}

static u64 notrace pit_read_sched_clock(void)
{
	return ~__raw_readl(clksrc_base + PITCVAL);
}

static int __init pit_clocksource_init(unsigned long rate)
{
	/* set the max load value and start the clock source counter */
	__raw_writel(0, clksrc_base + PITTCTRL);
	__raw_writel(~0UL, clksrc_base + PITLDVAL);
	__raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL);

	sched_clock_register(pit_read_sched_clock, 32, rate);
	return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,
			300, 32, clocksource_mmio_readl_down);
}

static int pit_set_next_event(unsigned long delta,
				struct clock_event_device *unused)
{
	/*
	 * set a new value to PITLDVAL register will not restart the timer,
	 * to abort the current cycle and start a timer period with the new
	 * value, the timer must be disabled and enabled again.
	 * and the PITLAVAL should be set to delta minus one according to pit
	 * hardware requirement.
	 */
	pit_timer_disable();
	__raw_writel(delta - 1, clkevt_base + PITLDVAL);
	pit_timer_enable();

	return 0;
}

static int pit_shutdown(struct clock_event_device *evt)
{
	pit_timer_disable();
	return 0;
}

static int pit_set_periodic(struct clock_event_device *evt)
{
	pit_set_next_event(cycle_per_jiffy, evt);
	return 0;
}

static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
{
	struct clock_event_device *evt = dev_id;

	pit_irq_acknowledge();

	/*
	 * pit hardware doesn't support oneshot, it will generate an interrupt
	 * and reload the counter value from PITLDVAL when PITCVAL reach zero,
	 * and start the counter again. So software need to disable the timer
	 * to stop the counter loop in ONESHOT mode.
	 */
	if (likely(clockevent_state_oneshot(evt)))
		pit_timer_disable();

	evt->event_handler(evt);

	return IRQ_HANDLED;
}

static struct clock_event_device clockevent_pit = {
	.name		= "VF pit timer",
	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
	.set_state_shutdown = pit_shutdown,
	.set_state_periodic = pit_set_periodic,
	.set_next_event	= pit_set_next_event,
	.rating		= 300,
};

static struct irqaction pit_timer_irq = {
	.name		= "VF pit timer",
	.flags		= IRQF_TIMER | IRQF_IRQPOLL,
	.handler	= pit_timer_interrupt,
	.dev_id		= &clockevent_pit,
};

static int __init pit_clockevent_init(unsigned long rate, int irq)
{
	__raw_writel(0, clkevt_base + PITTCTRL);
	__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);

	BUG_ON(setup_irq(irq, &pit_timer_irq));

	clockevent_pit.cpumask = cpumask_of(0);
	clockevent_pit.irq = irq;
	/*
	 * The value for the LDVAL register trigger is calculated as:
	 * LDVAL trigger = (period / clock period) - 1
	 * The pit is a 32-bit down count timer, when the conter value
	 * reaches 0, it will generate an interrupt, thus the minimal
	 * LDVAL trigger value is 1. And then the min_delta is
	 * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
	 */
	clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff);

	return 0;
}

static int __init pit_timer_init(struct device_node *np)
{
	struct clk *pit_clk;
	void __iomem *timer_base;
	unsigned long clk_rate;
	int irq, ret;

	timer_base = of_iomap(np, 0);
	if (!timer_base) {
		pr_err("Failed to iomap\n");
		return -ENXIO;
	}

	/*
	 * PIT0 and PIT1 can be chained to build a 64-bit timer,
	 * so choose PIT2 as clocksource, PIT3 as clockevent device,
	 * and leave PIT0 and PIT1 unused for anyone else who needs them.
	 */
	clksrc_base = timer_base + PITn_OFFSET(2);
	clkevt_base = timer_base + PITn_OFFSET(3);

	irq = irq_of_parse_and_map(np, 0);
	if (irq <= 0)
		return -EINVAL;

	pit_clk = of_clk_get(np, 0);
	if (IS_ERR(pit_clk))
		return PTR_ERR(pit_clk);

	ret = clk_prepare_enable(pit_clk);
	if (ret)
		return ret;

	clk_rate = clk_get_rate(pit_clk);
	cycle_per_jiffy = clk_rate / (HZ);

	/* enable the pit module */
	__raw_writel(~PITMCR_MDIS, timer_base + PITMCR);

	ret = pit_clocksource_init(clk_rate);
	if (ret)
		return ret;

	return pit_clockevent_init(clk_rate, irq);
}
TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
ass="n">u8 disable_elevator; u8 force_scan_complete; u8 scsi_transfer_mode; u8 force_narrow; u8 rebuild_priority; u8 expand_priority; u8 host_sdb_asic_fix; u8 pdpi_burst_from_host_disabled; char software_name[64]; char hardware_name[32]; u8 bridge_revision; u8 snapshot_priority; u32 os_specific; u8 post_prompt_timeout; u8 automatic_drive_slamming; u8 reserved1; u8 nvram_flags; #define HBA_MODE_ENABLED_FLAG (1 << 3) u8 cache_nvram_flags; u8 drive_config_flags; u16 reserved2; u8 temp_warning_level; u8 temp_shutdown_level; u8 temp_condition_reset; u8 max_coalesce_commands; u32 max_coalesce_delay; u8 orca_password[4]; u8 access_id[16]; u8 reserved[356]; }; #pragma pack() struct ctlr_info { int ctlr; char devname[8]; char *product_name; struct pci_dev *pdev; u32 board_id; void __iomem *vaddr; unsigned long paddr; int nr_cmds; /* Number of commands allowed on this controller */ struct CfgTable __iomem *cfgtable; int interrupts_enabled; int max_commands; atomic_t commands_outstanding; # define PERF_MODE_INT 0 # define DOORBELL_INT 1 # define SIMPLE_MODE_INT 2 # define MEMQ_MODE_INT 3 unsigned int intr[MAX_REPLY_QUEUES]; unsigned int msix_vector; unsigned int msi_vector; int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ struct access_method access; char hba_mode_enabled; /* queue and queue Info */ struct list_head reqQ; struct list_head cmpQ; unsigned int Qdepth; unsigned int maxSG; spinlock_t lock; int maxsgentries; u8 max_cmd_sg_entries; int chainsize; struct SGDescriptor **cmd_sg_list; /* pointers to command and error info pool */ struct CommandList *cmd_pool; dma_addr_t cmd_pool_dhandle; struct io_accel1_cmd *ioaccel_cmd_pool; dma_addr_t ioaccel_cmd_pool_dhandle; struct io_accel2_cmd *ioaccel2_cmd_pool; dma_addr_t ioaccel2_cmd_pool_dhandle; struct ErrorInfo *errinfo_pool; dma_addr_t errinfo_pool_dhandle; unsigned long *cmd_pool_bits; int scan_finished; spinlock_t scan_lock; wait_queue_head_t scan_wait_queue; struct Scsi_Host *scsi_host; spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ int ndevices; /* number of used elements in .dev[] array. */ struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES]; /* * Performant mode tables. */ u32 trans_support; u32 trans_offset; struct TransTable_struct __iomem *transtable; unsigned long transMethod; /* cap concurrent passthrus at some reasonable maximum */ #define HPSA_MAX_CONCURRENT_PASSTHRUS (20) spinlock_t passthru_count_lock; /* protects passthru_count */ int passthru_count; /* * Performant mode completion buffers */ size_t reply_queue_size; struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES]; u8 nreply_queues; u32 *blockFetchTable; u32 *ioaccel1_blockFetchTable; u32 *ioaccel2_blockFetchTable; u32 __iomem *ioaccel2_bft2_regs; unsigned char *hba_inquiry_data; u32 driver_support; u32 fw_support; int ioaccel_support; int ioaccel_maxsg; u64 last_intr_timestamp; u32 last_heartbeat; u64 last_heartbeat_timestamp; u32 heartbeat_sample_interval; atomic_t firmware_flash_in_progress; u32 __percpu *lockup_detected; struct delayed_work monitor_ctlr_work; int remove_in_progress; u32 fifo_recently_full; /* Address of h->q[x] is passed to intr handler to know which queue */ u8 q[MAX_REPLY_QUEUES]; u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ #define HPSATMF_BITS_SUPPORTED (1 << 0) #define HPSATMF_PHYS_LUN_RESET (1 << 1) #define HPSATMF_PHYS_NEX_RESET (1 << 2) #define HPSATMF_PHYS_TASK_ABORT (1 << 3) #define HPSATMF_PHYS_TSET_ABORT (1 << 4) #define HPSATMF_PHYS_CLEAR_ACA (1 << 5) #define HPSATMF_PHYS_CLEAR_TSET (1 << 6) #define HPSATMF_PHYS_QRY_TASK (1 << 7) #define HPSATMF_PHYS_QRY_TSET (1 << 8) #define HPSATMF_PHYS_QRY_ASYNC (1 << 9) #define HPSATMF_MASK_SUPPORTED (1 << 16) #define HPSATMF_LOG_LUN_RESET (1 << 17) #define HPSATMF_LOG_NEX_RESET (1 << 18) #define HPSATMF_LOG_TASK_ABORT (1 << 19) #define HPSATMF_LOG_TSET_ABORT (1 << 20) #define HPSATMF_LOG_CLEAR_ACA (1 << 21) #define HPSATMF_LOG_CLEAR_TSET (1 << 22) #define HPSATMF_LOG_QRY_TASK (1 << 23) #define HPSATMF_LOG_QRY_TSET (1 << 24) #define HPSATMF_LOG_QRY_ASYNC (1 << 25) u32 events; #define CTLR_STATE_CHANGE_EVENT (1 << 0) #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1) #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4) #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5) #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6) #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30) #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31) #define RESCAN_REQUIRED_EVENT_BITS \ (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) spinlock_t offline_device_lock; struct list_head offline_device_list; int acciopath_status; int drv_req_rescan; /* flag for driver to request rescan event */ int raid_offload_debug; }; struct offline_device_entry { unsigned char scsi3addr[8]; struct list_head offline_list; }; #define HPSA_ABORT_MSG 0 #define HPSA_DEVICE_RESET_MSG 1 #define HPSA_RESET_TYPE_CONTROLLER 0x00 #define HPSA_RESET_TYPE_BUS 0x01 #define HPSA_RESET_TYPE_TARGET 0x03 #define HPSA_RESET_TYPE_LUN 0x04 #define HPSA_MSG_SEND_RETRY_LIMIT 10 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000) /* Maximum time in seconds driver will wait for command completions * when polling before giving up. */ #define HPSA_MAX_POLL_TIME_SECS (20) /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines * how many times to retry TEST UNIT READY on a device * while waiting for it to become ready before giving up. * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval * between sending TURs while waiting for a device * to become ready. */ #define HPSA_TUR_RETRY_LIMIT (20) #define HPSA_MAX_WAIT_INTERVAL_SECS (30) /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board * to become ready, in seconds, before giving up on it. * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait * between polling the board to see if it is ready, in * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and * HPSA_BOARD_READY_ITERATIONS are derived from those. */ #define HPSA_BOARD_READY_WAIT_SECS (120) #define HPSA_BOARD_NOT_READY_WAIT_SECS (100) #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) #define HPSA_BOARD_READY_POLL_INTERVAL \ ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) #define HPSA_BOARD_READY_ITERATIONS \ ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \ HPSA_BOARD_READY_POLL_INTERVAL_MSECS) #define HPSA_BOARD_NOT_READY_ITERATIONS \ ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \ HPSA_BOARD_READY_POLL_INTERVAL_MSECS) #define HPSA_POST_RESET_PAUSE_MSECS (3000) #define HPSA_POST_RESET_NOOP_RETRIES (12) /* Defining the diffent access_menthods */ /* * Memory mapped FIFO interface (SMART 53xx cards) */ #define SA5_DOORBELL 0x20 #define SA5_REQUEST_PORT_OFFSET 0x40 #define SA5_REPLY_INTR_MASK_OFFSET 0x34 #define SA5_REPLY_PORT_OFFSET 0x44 #define SA5_INTR_STATUS 0x30 #define SA5_SCRATCHPAD_OFFSET 0xB0 #define SA5_CTCFG_OFFSET 0xB4 #define SA5_CTMEM_OFFSET 0xB8 #define SA5_INTR_OFF 0x08 #define SA5B_INTR_OFF 0x04 #define SA5_INTR_PENDING 0x08 #define SA5B_INTR_PENDING 0x04 #define FIFO_EMPTY 0xffffffff #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ #define HPSA_ERROR_BIT 0x02 /* Performant mode flags */ #define SA5_PERF_INTR_PENDING 0x04 #define SA5_PERF_INTR_OFF 0x05 #define SA5_OUTDB_STATUS_PERF_BIT 0x01 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 #define SA5_OUTDB_CLEAR 0xA0 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 #define SA5_OUTDB_STATUS 0x9C #define HPSA_INTR_ON 1 #define HPSA_INTR_OFF 0 /* * Inbound Post Queue offsets for IO Accelerator Mode 2 */ #define IOACCEL2_INBOUND_POSTQ_32 0x48 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4 /* Send the command to the hardware */ static void SA5_submit_command(struct ctlr_info *h, struct CommandList *c) { writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); } static void SA5_submit_command_no_read(struct ctlr_info *h, struct CommandList *c) { writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); } static void SA5_submit_command_ioaccel2(struct ctlr_info *h, struct CommandList *c) { if (c->cmd_type == CMD_IOACCEL2) writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); else writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); } /* * This card is the opposite of the other cards. * 0 turns interrupts on... * 0x08 turns them off... */ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) { if (val) { /* Turn interrupts on */ h->interrupts_enabled = 1; writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } else { /* Turn them off */ h->interrupts_enabled = 0; writel(SA5_INTR_OFF, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } } static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) { if (val) { /* turn on interrupts */ h->interrupts_enabled = 1; writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } else { h->interrupts_enabled = 0; writel(SA5_PERF_INTR_OFF, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } } static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) { struct reply_queue_buffer *rq = &h->reply_queue[q]; unsigned long register_value = FIFO_EMPTY; /* msi auto clears the interrupt pending bit. */ if (!(h->msi_vector || h->msix_vector)) { /* flush the controller write of the reply queue by reading * outbound doorbell status register. */ register_value = readl(h->vaddr + SA5_OUTDB_STATUS); writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); /* Do a read in order to flush the write to the controller * (as per spec.) */ register_value = readl(h->vaddr + SA5_OUTDB_STATUS); } if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { register_value = rq->head[rq->current_entry]; rq->current_entry++; atomic_dec(&h->commands_outstanding); } else { register_value = FIFO_EMPTY; } /* Check for wraparound */ if (rq->current_entry == h->max_commands) { rq->current_entry = 0; rq->wraparound ^= 1; } return register_value; } /* * Returns true if fifo is full. * */ static unsigned long SA5_fifo_full(struct ctlr_info *h) { return atomic_read(&h->commands_outstanding) >= h->max_commands; } /* * returns value read from hardware. * returns FIFO_EMPTY if there is nothing to read */ static unsigned long SA5_completed(struct ctlr_info *h, __attribute__((unused)) u8 q) { unsigned long register_value = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); if (register_value != FIFO_EMPTY) atomic_dec(&h->commands_outstanding); #ifdef HPSA_DEBUG if (register_value != FIFO_EMPTY) dev_dbg(&h->pdev->dev, "Read %lx back from board\n", register_value); else dev_dbg(&h->pdev->dev, "FIFO Empty read\n"); #endif return register_value; } /* * Returns true if an interrupt is pending.. */ static bool SA5_intr_pending(struct ctlr_info *h) { unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); return register_value & SA5_INTR_PENDING; } static bool SA5_performant_intr_pending(struct ctlr_info *h) { unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); if (!register_value) return false; if (h->msi_vector || h->msix_vector) return true; /* Read outbound doorbell to flush */ register_value = readl(h->vaddr + SA5_OUTDB_STATUS); return register_value & SA5_OUTDB_STATUS_PERF_BIT; } #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) { unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ? true : false; } #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) { u64 register_value; struct reply_queue_buffer *rq = &h->reply_queue[q]; BUG_ON(q >= h->nreply_queues); register_value = rq->head[rq->current_entry]; if (register_value != IOACCEL_MODE1_REPLY_UNUSED) { rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; if (++rq->current_entry == rq->size) rq->current_entry = 0; /* * @todo * * Don't really need to write the new index after each command, * but with current driver design this is easiest. */ wmb(); writel((q << 24) | rq->current_entry, h->vaddr + IOACCEL_MODE1_CONSUMER_INDEX); atomic_dec(&h->commands_outstanding); } return (unsigned long) register_value; } static struct access_method SA5_access = { SA5_submit_command, SA5_intr_mask, SA5_fifo_full, SA5_intr_pending, SA5_completed, }; static struct access_method SA5_ioaccel_mode1_access = { SA5_submit_command, SA5_performant_intr_mask, SA5_fifo_full, SA5_ioaccel_mode1_intr_pending, SA5_ioaccel_mode1_completed, }; static struct access_method SA5_ioaccel_mode2_access = { SA5_submit_command_ioaccel2, SA5_performant_intr_mask, SA5_fifo_full, SA5_performant_intr_pending, SA5_performant_completed, }; static struct access_method SA5_performant_access = { SA5_submit_command, SA5_performant_intr_mask, SA5_fifo_full, SA5_performant_intr_pending, SA5_performant_completed, }; static struct access_method SA5_performant_access_no_read = { SA5_submit_command_no_read, SA5_performant_intr_mask, SA5_fifo_full, SA5_performant_intr_pending, SA5_performant_completed, }; struct board_type { u32 board_id; char *product_name; struct access_method *access; }; #endif /* HPSA_H */