aboutsummaryrefslogtreecommitdiffstats
path: root/recipes-kernel/linux/files/clanton.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-kernel/linux/files/clanton.patch')
-rw-r--r--recipes-kernel/linux/files/clanton.patch9719
1 files changed, 6284 insertions, 3435 deletions
diff --git a/recipes-kernel/linux/files/clanton.patch b/recipes-kernel/linux/files/clanton.patch
index 437b591..ed2d1fb 100644
--- a/recipes-kernel/linux/files/clanton.patch
+++ b/recipes-kernel/linux/files/clanton.patch
@@ -168,10 +168,10 @@ index f0ffc27..e56f074 100644
;------------------------------------------------------------------------------
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 0694d09..7509992 100644
+index 0694d09..2c881ac 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -417,6 +417,14 @@ config X86_INTEL_CE
+@@ -417,6 +417,15 @@ config X86_INTEL_CE
This option compiles in support for the CE4100 SOC for settop
boxes and media devices.
@@ -179,6 +179,7 @@ index 0694d09..7509992 100644
+ bool "Intel Quark X1000 SOC support"
+ depends on M586TSC
+ select ARCH_REQUIRE_GPIOLIB
++ select I2C
+ ---help---
+ Quark X1000 SOC support . This option enables probing for various
+ PCI-IDs of several on-chip devices provided by the X1000
@@ -186,7 +187,7 @@ index 0694d09..7509992 100644
config X86_WANT_INTEL_MID
bool "Intel MID platform support"
depends on X86_32
-@@ -500,6 +508,13 @@ config X86_SUPPORTS_MEMORY_FAILURE
+@@ -500,6 +509,13 @@ config X86_SUPPORTS_MEMORY_FAILURE
depends on X86_64 || !SPARSEMEM
select ARCH_SUPPORTS_MEMORY_FAILURE
@@ -200,12 +201,12 @@ index 0694d09..7509992 100644
config X86_VISWS
bool "SGI 320/540 (Visual Workstation)"
depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT
-@@ -1524,6 +1539,13 @@ config EFI_STUB
+@@ -1524,6 +1540,13 @@ config EFI_STUB
See Documentation/x86/efi-stub.txt for more information.
+config EFI_CAPSULE
-+ bool "EFI capsule update support"
++ tristate "EFI capsule update support"
+ depends on EFI
+ ---help---
+ This kernel feature allows for loading of EFI capsule code
@@ -409,14 +410,14 @@ index 8b24289..c963186 100644
printk(KERN_INFO "Command line: %s\n", boot_command_line);
#endif
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
-index 6db1cc4..41ac17c 100644
+index 6db1cc4..03a4329 100644
--- a/arch/x86/platform/efi/Makefile
+++ b/arch/x86/platform/efi/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
-obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
+obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
-+obj-$(CONFIG_EFI_CAPSULE) += efi_capsule.o
++obj-$(CONFIG_EFI_CAPSULE) += efi_capsule_update.o
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index d9c1b95..9fd5168 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
@@ -457,7 +458,7 @@ index d9c1b95..9fd5168 100644
if (bgrt_tab->header.length < sizeof(*bgrt_tab))
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
-index e2cd38f..0e22b5f4 100644
+index e2cd38f..0e22b5f 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -847,6 +847,7 @@ void __init efi_enter_virtual_mode(void)
@@ -493,12 +494,12 @@ index e2cd38f..0e22b5f4 100644
continue;
size = md->num_pages << EFI_PAGE_SHIFT;
-diff --git a/arch/x86/platform/efi/efi_capsule.c b/arch/x86/platform/efi/efi_capsule.c
+diff --git a/arch/x86/platform/efi/efi_capsule_update.c b/arch/x86/platform/efi/efi_capsule_update.c
new file mode 100644
-index 0000000..d329d6d
+index 0000000..ccd4268
--- /dev/null
-+++ b/arch/x86/platform/efi/efi_capsule.c
-@@ -0,0 +1,320 @@
++++ b/arch/x86/platform/efi/efi_capsule_update.c
+@@ -0,0 +1,332 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -518,9 +519,10 @@ index 0000000..d329d6d
+ * Contact Information:
+ * Intel Corporation
+ */
-+
++#define DEBUG
+#include <asm/cln.h>
+#include <linux/errno.h>
++#include <linux/firmware.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
@@ -541,8 +543,10 @@ index 0000000..d329d6d
+} efi_blk_desc_t;
+
+static struct kobject * efi_capsule_kobj;
++static struct device *dev;
+static struct list_head sg_list;
+static char fpath[MAX_PATH];
++static bool path_set = false;
+static int csh_jump = CSH_HDR_SIZE; /* Clanton EDK wants CSH jump */
+
+/**
@@ -552,30 +556,26 @@ index 0000000..d329d6d
+ */
+static int efi_capsule_trigger_update(void)
+{
-+ struct file *fp = NULL;
-+ mm_segment_t old_fs = get_fs();
++ const struct firmware *fw_entry;
+ int ret = 0;
+ u32 nblocks = 0, i = 0, total_size = 0, data_len = 0, offset = 0;
+ efi_capsule_header_t *chdr = NULL;
+ efi_blk_desc_t * desc_block = NULL;
-+ u8 ** data = NULL;
++ //u8 ** data = NULL;
++ u8 * data = NULL;
+
-+ set_fs (KERNEL_DS);
-+ fp = filp_open(fpath, O_RDONLY, 0);
++ if (path_set == false)
++ return -ENODEV;
+
-+ /* Sanity check input */
-+ if (IS_ERR(fp)|| fp->f_op == NULL ||fp->f_op->read == NULL ||
-+ fp->f_dentry->d_inode->i_size == 0){
-+ pr_err(PFX"file open [%s] error!\n", fpath);
-+ ret = -EINVAL;
-+ goto done;
++ ret = request_firmware(&fw_entry, fpath, dev);
++ if (ret){
++ pr_err(PFX"unable to load firmware %s\n", fpath);
++ return ret;
+ }
+
+ /* Determine necessary sizes */
-+ nblocks = (fp->f_dentry->d_inode->i_size/MAX_CHUNK) + 2;
-+ total_size = fp->f_dentry->d_inode->i_size;
-+
-+ pr_info(PFX "nblocks %d total_size %d\n", nblocks, total_size);
++ nblocks = (fw_entry->size/MAX_CHUNK) + 2;
++ total_size = fw_entry->size;
+
+ /* Allocate array of descriptor blocks + 1 for terminator */
+ desc_block = (efi_blk_desc_t*)kzalloc(nblocks * sizeof(efi_blk_desc_t), GFP_KERNEL);
@@ -588,35 +588,19 @@ index 0000000..d329d6d
+ pr_info(PFX"File %s size %u descriptor blocks %u\n",
+ fpath, total_size, nblocks);
+
-+ data = kmalloc(nblocks, GFP_KERNEL);
-+ if (data == NULL){
-+ ret = -ENOMEM;
-+ pr_info("Failed to allocate %d bytes\n", nblocks);
-+ goto done;
-+ }
-+
-+ for (i = 0; i < nblocks; i++){
-+ data[i] = kmalloc(MAX_CHUNK, GFP_KERNEL);
-+ if (data[i] == NULL){
-+ ret = -ENOMEM;
-+ pr_info("Alloc fail %d bytes entry %d\n",
-+ nblocks, i);
-+ goto done;
-+ }
-+
-+ }
-+
+ /* Read in data */
+ for (i = 0; i < nblocks && offset < total_size; i++){
+ /* Determine read len */
+ data_len = offset < total_size - MAX_CHUNK ?
+ MAX_CHUNK : total_size - offset;
-+ ret = fp->f_op->read(fp, data[i], data_len, &fp->f_pos);
-+ if (ret < 0){
-+ pr_err(PFX"Error reading @ data %u\n", offset);
-+ ret = -EIO;
++ data = kmalloc(MAX_CHUNK, GFP_KERNEL);
++ if (data == NULL){
++ ret = -ENOMEM;
++ pr_info("Alloc fail %d bytes entry %d\n",
++ nblocks, i);
+ goto done;
-+ }
++ }
++ memcpy(data, fw_entry->data + offset, data_len);
+ offset += data_len;
+
+ /* Sanity check */
@@ -628,21 +612,20 @@ index 0000000..d329d6d
+
+ /* Validate header as appropriate */
+ if (chdr == NULL){
-+ chdr = (efi_capsule_header_t*)&data[i][csh_jump];
-+ desc_block[i].data_block = __pa(&data[i][csh_jump]);
++ chdr = (efi_capsule_header_t*)&data[csh_jump];
++ desc_block[i].data_block = __pa(&data[csh_jump]);
+ desc_block[i].length = data_len - csh_jump;
-+ pr_info(PFX"hdr offset in file %d bytes\n", csh_jump);
-+ pr_info(PFX"hdr size %u flags 0x%08x imagesize 0x%08x\n",
++ pr_debug(PFX"hdr offset in file %d bytes\n", csh_jump);
++ pr_debug(PFX"hdr size %u flags 0x%08x imagesize 0x%08x\n",
+ chdr->headersize, chdr->flags, chdr->imagesize);
+
+ }else{
-+ desc_block[i].data_block = __pa(&data[i][0]);
++ desc_block[i].data_block = __pa(data);
+ desc_block[i].length = data_len;
+ }
-+
-+ pr_info(PFX "block %d length %u data @ phys 0x%08x\n",
++ pr_debug(PFX "block %d length %u data @ phys 0x%08x virt %x\n",
+ i, (int)desc_block[i].length,
-+ (unsigned int)desc_block[i].data_block);
++ (unsigned int)desc_block[i].data_block, data);
+ }
+
+ if (i > nblocks-1){
@@ -651,35 +634,31 @@ index 0000000..d329d6d
+ goto done;
+ }
+
-+ pr_info(PFX"submitting capsule to EDKII firmware\n");
++ pr_debug(PFX"submitting capsule to EDKII firmware\n");
+
+ ret = efi.update_capsule(&chdr, 1, __pa(desc_block));
+ if(ret != EFI_SUCCESS) {
+ pr_err(PFX"submission fail err=0x%08x\n", ret);
+ }else{
-+ pr_info(PFX"submission success\n");
++ pr_debug(PFX"submission success\n");
+ ret = 0;
+ }
+
+ if (chdr != NULL && chdr->flags & 0x10000){
-+ pr_info(PFX"capsule persist across S3 skipping capsule free\n");
++ pr_debug(PFX"capsule persist across S3 skipping capsule free\n");
+ goto done_close;
+ }
+done:
++
+ for (i = 0; i < nblocks; i++){
-+ if (data && data[i])
-+ kfree(data[i]);
++ if (desc_block[i].data_block != 0)
++ kfree(phys_to_virt((u32)desc_block[i].data_block));
+ }
-+ if (data)
-+ kfree(data);
+
+ if (desc_block != NULL)
+ kfree(desc_block);
+done_close:
-+ if (!IS_ERR(fp))
-+ filp_close(fp, NULL);
-+
-+ set_fs (old_fs);
++ release_firmware(fw_entry);
+ return ret;
+}
+
@@ -736,6 +715,7 @@ index 0000000..d329d6d
+
+ memset(fpath, 0x00, sizeof(fpath));
+ memcpy(fpath, buf, count);
++ path_set = true;
+
+ return count;
+}
@@ -759,6 +739,11 @@ index 0000000..d329d6d
+static struct kobj_attribute efi_capsule_update_attr =
+ __ATTR(capsule_update, 0644, NULL, efi_capsule_update_store);
+
++static void efi_capsule_device_release(struct device *dev)
++{
++ kfree(dev);
++}
++
+#define SYSFS_ERRTXT "Error adding sysfs entry!\n"
+/**
+ * intel_cln_capsule_update_init
@@ -775,31 +760,59 @@ index 0000000..d329d6d
+ INIT_LIST_HEAD(&sg_list);
+
+ /* efi_capsule_kobj subordinate of firmware @ /sys/firmware/efi */
-+ efi_capsule_kobj = kobject_create_and_add("efi", firmware_kobj);
++ efi_capsule_kobj = kobject_create_and_add("efi_capsule", firmware_kobj);
+ if (!efi_capsule_kobj) {
+ pr_err(PFX"kset create error\n");
+ retval = -ENODEV;
+ goto err;
+ }
+
++ dev = kzalloc(sizeof(struct device), GFP_KERNEL);
++ if (!dev) {
++ retval = -ENOMEM;
++ goto err_name;
++ }
++
++ retval = dev_set_name(dev, "%s", DRIVER_NAME);
++ if (retval < 0){
++ pr_err(PFX"dev_set_name err\n");
++ goto err_dev_reg;
++ }
++
++ dev->kobj.parent = efi_capsule_kobj;
++ dev->groups = NULL;
++ dev->release = efi_capsule_device_release;
++
++ retval = device_register(dev);
++ if (retval < 0){
++ pr_err(PFX"device_register error\n");
++ goto err_dev_reg;
++ }
++
+ if(sysfs_create_file(efi_capsule_kobj, &efi_capsule_path_attr.attr)) {
+ pr_err(PFX SYSFS_ERRTXT);
+ retval = -ENODEV;
-+ goto err;
++ goto err_dev_reg;
+ }
+ if(sysfs_create_file(efi_capsule_kobj, &efi_capsule_update_attr.attr)) {
+ pr_err(PFX SYSFS_ERRTXT);
+ retval = -ENODEV;
-+ goto err;
++ goto err_dev_reg;
+
+ }
+ if(sysfs_create_file(efi_capsule_kobj, &efi_capsule_csh_jump_attr.attr)) {
+ pr_err(PFX SYSFS_ERRTXT);
+ retval = -ENODEV;
-+ goto err;
++ goto err_dev_reg;
+
+ }
++ return 0;
+
++err_dev_reg:
++ put_device(dev);
++ dev = NULL;
++err_name:
++ kfree(dev);
+err:
+ return retval;
+}
@@ -851,120 +864,44 @@ index a235085..dda0c7f 100644
return dev->devt == *devt;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
-index d4c1218..955d8fe 100644
+index d4c1218..9867547 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
-@@ -34,18 +34,46 @@ if DMADEVICES
+@@ -34,13 +34,13 @@ if DMADEVICES
comment "DMA Devices"
config INTEL_MID_DMAC
-+ tristate "Intel DMAC Moorsetown/Medfield/Clanton DMA controllers"
-+ depends on X86
-+ default n
-+ help
-+ Enable support for the Intel(R) MID/Clanton DMA engine present
-+ in Intel MID chipsets and Clanton SOC devices
-+
-+ Say Y here if you have such a chipset.
-+
-+ If unsure, say N.
-+
-+if INTEL_MID_DMAC
-+
-+config INTEL_MID_PCI
- tristate "Intel MID DMA support for Peripheral DMA controllers"
-- depends on PCI && X86
-+ depends on X86 && PCI
+- tristate "Intel MID DMA support for Peripheral DMA controllers"
++ tristate "Intel DMAC Moorsetown/Medfield/Quark DMA controllers"
+ depends on PCI && X86
select DMA_ENGINE
default n
help
- Enable support for the Intel(R) MID DMA engine present
+- Enable support for the Intel(R) MID DMA engine present
- in Intel MID chipsets.
-+ in Intel MID chipsets
++ Enable support for the Intel(R) MID/Quark DMA engine present
++ in Intel MID chipsets and Quark SOC devices
Say Y here if you have such a chipset.
- If unsure, say N.
-
-+config INTEL_CLN_DMAC
-+ tristate "Intel CLN DMA support for Peripheral DMA controllers"
-+ depends on PCI && X86 && INTEL_QUARK_X1000_SOC
-+ default n
-+ help
-+ Enable support for the Intel(R) Clanton DMA engine present
-+ in Intel Clanton DMA enabled UART. This is not a generic DMA
-+ driver, instead this enables DMAC regs for Clanton's UART alone
-+
-+ Say Y here if you have a Clanton processor.
-+
-+ If unsure, say N.
-+endif
-+
- config ASYNC_TX_ENABLE_CHANNEL_SWITCH
- bool
-
-diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
-index a0de82e..91883c2 100644
---- a/drivers/dma/intel_mid_dma.c
-+++ b/drivers/dma/intel_mid_dma.c
-@@ -30,9 +30,9 @@
- #include <linux/module.h>
-
- #include "dmaengine.h"
-+#include "intel_mid_dma_regs.h"
-
- #define MAX_CHAN 4 /*max ch across controllers*/
--#include "intel_mid_dma_regs.h"
-
- #define INTEL_MID_DMAC1_ID 0x0814
- #define INTEL_MID_DMAC2_ID 0x0813
-@@ -43,6 +43,14 @@
- #define LNW_PERIPHRAL_STATUS 0x0
- #define LNW_PERIPHRAL_MASK 0x8
-
-+/**
-+ * struct intel_mid_dma_probe_info
-+ *
-+ * @max_chan: maximum channels to probe
-+ * @ch_base: offset from register base
-+ * @block_size: TBD
-+ * @pimr_mask: indicates if mask registers to be mapped
-+ */
- struct intel_mid_dma_probe_info {
- u8 max_chan;
- u8 ch_base;
-@@ -1015,7 +1023,7 @@ static void dma_tasklet2(unsigned long data)
- * See if this is our interrupt if so then schedule the tasklet
- * otherwise ignore
- */
--static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
-+irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
- {
- struct middma_device *mid = data;
- u32 tfr_status, err_status;
-@@ -1048,6 +1056,7 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
-
- return IRQ_HANDLED;
- }
-+EXPORT_SYMBOL(intel_mid_dma_interrupt);
-
- static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
- {
-diff --git a/drivers/dma/intel_mid_dma/Makefile b/drivers/dma/intel_mid_dma/Makefile
-new file mode 100644
-index 0000000..567eca5
---- /dev/null
-+++ b/drivers/dma/intel_mid_dma/Makefile
-@@ -0,0 +1,3 @@
-+obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
+diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
+index 7428fea..425587f 100644
+--- a/drivers/dma/Makefile
++++ b/drivers/dma/Makefile
+@@ -5,6 +5,7 @@ obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
+ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
+ obj-$(CONFIG_NET_DMA) += iovlock.o
+ obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
+intel_mid_dma-objs:= intel_mid_dma_core.o intel_cln_dma_pci.o intel_mid_dma_pci.o
-+
-diff --git a/drivers/dma/intel_mid_dma/intel_cln_dma_pci.c b/drivers/dma/intel_mid_dma/intel_cln_dma_pci.c
+ obj-$(CONFIG_DMATEST) += dmatest.o
+ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
+diff --git a/drivers/dma/intel_cln_dma_pci.c b/drivers/dma/intel_cln_dma_pci.c
new file mode 100644
-index 0000000..442c2f2
+index 0000000..cb3d6e7
--- /dev/null
-+++ b/drivers/dma/intel_mid_dma/intel_cln_dma_pci.c
-@@ -0,0 +1,153 @@
++++ b/drivers/dma/intel_cln_dma_pci.c
+@@ -0,0 +1,155 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -986,10 +923,10 @@ index 0000000..442c2f2
+ */
+
+/*
-+ * intel_mid_dma.c - Intel Langwell DMA Drivers
++ * intel_quark_dma_pci.c
+ *
-+ * Author: Vinod Koul <bryan.odonoghue@linux.intel.com>
-+ * This is an entry point for Intel Clanton based DMAC on Clanton's UART
++ * Author: Bryan O'Donoghue <bryan.odonoghue@intel.com>
++ * This is an entry point for Intel Quark based DMAC on Quark's UART
+ * specifically we don't have a dedicated PCI function, instead we have DMAC
+ * regs hung off of a PCI BAR. This entry/exit allows re-use of the core
+ * DMA API for MID devices manipulated to suit our BAR setup
@@ -1001,7 +938,7 @@ index 0000000..442c2f2
+#include <linux/intel_mid_dma.h>
+#include <linux/module.h>
+
-+#include "intel_mid_dma_core.h"
++//#include "intel_mid_dma_core.h"
+#include "intel_mid_dma_regs.h"
+
+/**
@@ -1010,21 +947,20 @@ index 0000000..442c2f2
+ * @id: pci device id structure
+ *
+ * Initialize the PCI device, map BARs, query driver data.
-+ * Call intel_setup_dma to complete contoller and chan initilzation
++ * Call mid_setup_dma to complete contoller and chan initilzation
+ */
-+int __devinit intel_cln_dma_probe(struct pci_dev *pdev,
++int intel_cln_dma_probe(struct pci_dev *pdev,
+ struct middma_device *device)
+{
+ u32 base_addr, bar_size;
+ int err;
+
-+ pr_debug("MDMA: probe for %x\n", pdev->device);
-+ pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
++ dev_info(&pdev->dev, "MDMA: probe for %x\n", pdev->device);
++ dev_info(&pdev->dev, "MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
+ device->max_chan, device->chan_base,
+ device->block_size, device->pimr_mask);
+
+ device->pdev = pci_dev_get(pdev);
-+ device->ispci_fn = true;
+
+ base_addr = pci_resource_start(pdev, 1);
+ bar_size = pci_resource_len(pdev, 1);
@@ -1035,7 +971,10 @@ index 0000000..442c2f2
+ goto err_ioremap;
+ }
+
-+ err = intel_mid_dma_setup(pdev, device, false);
++ dev_info(&pdev->dev, "Remapped BAR 0x%08x to virt 0x%p\n",
++ base_addr, device->dma_base);
++
++ err = mid_setup_dma(pdev, device);
+ if (err)
+ goto err_dma;
+
@@ -1056,9 +995,9 @@ index 0000000..442c2f2
+ * Free up all resources and data
+ * Call shutdown_dma to complete contoller and chan cleanup
+ */
-+void __devexit intel_cln_dma_remove(struct pci_dev *pdev, struct middma_device *device)
++void intel_cln_dma_remove(struct pci_dev *pdev, struct middma_device *device)
+{
-+ intel_mid_dma_shutdown(pdev, device);
++ //middma_shutdown(pdev, device);
+}
+EXPORT_SYMBOL(intel_cln_dma_remove);
+
@@ -1080,7 +1019,9 @@ index 0000000..442c2f2
+ if (device->ch[i].in_use)
+ return -EAGAIN;
+ }
++#if 0
+ dmac1_mask_periphral_intr(device);
++#endif
+ device->state = SUSPENDED;
+ return 0;
+}
@@ -1095,9 +1036,7 @@ index 0000000..442c2f2
+*/
+int intel_cln_dma_resume(struct middma_device *device)
+{
-+ pr_debug("MDMA: dma_resume called\n");
-+ device->state = RUNNING;
-+ iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
++ //return middma_resume(device);
+ return 0;
+}
+EXPORT_SYMBOL(intel_cln_dma_resume);
@@ -1118,12 +1057,2788 @@ index 0000000..442c2f2
+EXPORT_SYMBOL(intel_cln_dma_runtime_resume);
+
+
-diff --git a/drivers/dma/intel_mid_dma/intel_mid_dma_pci.c b/drivers/dma/intel_mid_dma/intel_mid_dma_pci.c
+diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
+deleted file mode 100644
+index a0de82e..0000000
+--- a/drivers/dma/intel_mid_dma.c
++++ /dev/null
+@@ -1,1460 +0,0 @@
+-/*
+- * intel_mid_dma.c - Intel Langwell DMA Drivers
+- *
+- * Copyright (C) 2008-10 Intel Corp
+- * Author: Vinod Koul <vinod.koul@intel.com>
+- * The driver design is based on dw_dmac driver
+- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; version 2 of the License.
+- *
+- * This program is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License along
+- * with this program; if not, write to the Free Software Foundation, Inc.,
+- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+- *
+- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- *
+- *
+- */
+-#include <linux/pci.h>
+-#include <linux/interrupt.h>
+-#include <linux/pm_runtime.h>
+-#include <linux/intel_mid_dma.h>
+-#include <linux/module.h>
+-
+-#include "dmaengine.h"
+-
+-#define MAX_CHAN 4 /*max ch across controllers*/
+-#include "intel_mid_dma_regs.h"
+-
+-#define INTEL_MID_DMAC1_ID 0x0814
+-#define INTEL_MID_DMAC2_ID 0x0813
+-#define INTEL_MID_GP_DMAC2_ID 0x0827
+-#define INTEL_MFLD_DMAC1_ID 0x0830
+-#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
+-#define LNW_PERIPHRAL_MASK_SIZE 0x10
+-#define LNW_PERIPHRAL_STATUS 0x0
+-#define LNW_PERIPHRAL_MASK 0x8
+-
+-struct intel_mid_dma_probe_info {
+- u8 max_chan;
+- u8 ch_base;
+- u16 block_size;
+- u32 pimr_mask;
+-};
+-
+-#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
+- ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
+- .max_chan = (_max_chan), \
+- .ch_base = (_ch_base), \
+- .block_size = (_block_size), \
+- .pimr_mask = (_pimr_mask), \
+- })
+-
+-/*****************************************************************************
+-Utility Functions*/
+-/**
+- * get_ch_index - convert status to channel
+- * @status: status mask
+- * @base: dma ch base value
+- *
+- * Modify the status mask and return the channel index needing
+- * attention (or -1 if neither)
+- */
+-static int get_ch_index(int *status, unsigned int base)
+-{
+- int i;
+- for (i = 0; i < MAX_CHAN; i++) {
+- if (*status & (1 << (i + base))) {
+- *status = *status & ~(1 << (i + base));
+- pr_debug("MDMA: index %d New status %x\n", i, *status);
+- return i;
+- }
+- }
+- return -1;
+-}
+-
+-/**
+- * get_block_ts - calculates dma transaction length
+- * @len: dma transfer length
+- * @tx_width: dma transfer src width
+- * @block_size: dma controller max block size
+- *
+- * Based on src width calculate the DMA trsaction length in data items
+- * return data items or FFFF if exceeds max length for block
+- */
+-static int get_block_ts(int len, int tx_width, int block_size)
+-{
+- int byte_width = 0, block_ts = 0;
+-
+- switch (tx_width) {
+- case DMA_SLAVE_BUSWIDTH_1_BYTE:
+- byte_width = 1;
+- break;
+- case DMA_SLAVE_BUSWIDTH_2_BYTES:
+- byte_width = 2;
+- break;
+- case DMA_SLAVE_BUSWIDTH_4_BYTES:
+- default:
+- byte_width = 4;
+- break;
+- }
+-
+- block_ts = len/byte_width;
+- if (block_ts > block_size)
+- block_ts = 0xFFFF;
+- return block_ts;
+-}
+-
+-/*****************************************************************************
+-DMAC1 interrupt Functions*/
+-
+-/**
+- * dmac1_mask_periphral_intr - mask the periphral interrupt
+- * @mid: dma device for which masking is required
+- *
+- * Masks the DMA periphral interrupt
+- * this is valid for DMAC1 family controllers only
+- * This controller should have periphral mask registers already mapped
+- */
+-static void dmac1_mask_periphral_intr(struct middma_device *mid)
+-{
+- u32 pimr;
+-
+- if (mid->pimr_mask) {
+- pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
+- pimr |= mid->pimr_mask;
+- writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
+- }
+- return;
+-}
+-
+-/**
+- * dmac1_unmask_periphral_intr - unmask the periphral interrupt
+- * @midc: dma channel for which masking is required
+- *
+- * UnMasks the DMA periphral interrupt,
+- * this is valid for DMAC1 family controllers only
+- * This controller should have periphral mask registers already mapped
+- */
+-static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
+-{
+- u32 pimr;
+- struct middma_device *mid = to_middma_device(midc->chan.device);
+-
+- if (mid->pimr_mask) {
+- pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
+- pimr &= ~mid->pimr_mask;
+- writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
+- }
+- return;
+-}
+-
+-/**
+- * enable_dma_interrupt - enable the periphral interrupt
+- * @midc: dma channel for which enable interrupt is required
+- *
+- * Enable the DMA periphral interrupt,
+- * this is valid for DMAC1 family controllers only
+- * This controller should have periphral mask registers already mapped
+- */
+-static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
+-{
+- dmac1_unmask_periphral_intr(midc);
+-
+- /*en ch interrupts*/
+- iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
+- iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
+- return;
+-}
+-
+-/**
+- * disable_dma_interrupt - disable the periphral interrupt
+- * @midc: dma channel for which disable interrupt is required
+- *
+- * Disable the DMA periphral interrupt,
+- * this is valid for DMAC1 family controllers only
+- * This controller should have periphral mask registers already mapped
+- */
+-static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
+-{
+- /*Check LPE PISR, make sure fwd is disabled*/
+- iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
+- iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
+- iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
+- return;
+-}
+-
+-/*****************************************************************************
+-DMA channel helper Functions*/
+-/**
+- * mid_desc_get - get a descriptor
+- * @midc: dma channel for which descriptor is required
+- *
+- * Obtain a descriptor for the channel. Returns NULL if none are free.
+- * Once the descriptor is returned it is private until put on another
+- * list or freed
+- */
+-static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
+-{
+- struct intel_mid_dma_desc *desc, *_desc;
+- struct intel_mid_dma_desc *ret = NULL;
+-
+- spin_lock_bh(&midc->lock);
+- list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
+- if (async_tx_test_ack(&desc->txd)) {
+- list_del(&desc->desc_node);
+- ret = desc;
+- break;
+- }
+- }
+- spin_unlock_bh(&midc->lock);
+- return ret;
+-}
+-
+-/**
+- * mid_desc_put - put a descriptor
+- * @midc: dma channel for which descriptor is required
+- * @desc: descriptor to put
+- *
+- * Return a descriptor from lwn_desc_get back to the free pool
+- */
+-static void midc_desc_put(struct intel_mid_dma_chan *midc,
+- struct intel_mid_dma_desc *desc)
+-{
+- if (desc) {
+- spin_lock_bh(&midc->lock);
+- list_add_tail(&desc->desc_node, &midc->free_list);
+- spin_unlock_bh(&midc->lock);
+- }
+-}
+-/**
+- * midc_dostart - begin a DMA transaction
+- * @midc: channel for which txn is to be started
+- * @first: first descriptor of series
+- *
+- * Load a transaction into the engine. This must be called with midc->lock
+- * held and bh disabled.
+- */
+-static void midc_dostart(struct intel_mid_dma_chan *midc,
+- struct intel_mid_dma_desc *first)
+-{
+- struct middma_device *mid = to_middma_device(midc->chan.device);
+-
+- /* channel is idle */
+- if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
+- /*error*/
+- pr_err("ERR_MDMA: channel is busy in start\n");
+- /* The tasklet will hopefully advance the queue... */
+- return;
+- }
+- midc->busy = true;
+- /*write registers and en*/
+- iowrite32(first->sar, midc->ch_regs + SAR);
+- iowrite32(first->dar, midc->ch_regs + DAR);
+- iowrite32(first->lli_phys, midc->ch_regs + LLP);
+- iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
+- iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
+- iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
+- iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
+- pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
+- (int)first->sar, (int)first->dar, first->cfg_hi,
+- first->cfg_lo, first->ctl_hi, first->ctl_lo);
+- first->status = DMA_IN_PROGRESS;
+-
+- iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+-}
+-
+-/**
+- * midc_descriptor_complete - process completed descriptor
+- * @midc: channel owning the descriptor
+- * @desc: the descriptor itself
+- *
+- * Process a completed descriptor and perform any callbacks upon
+- * the completion. The completion handling drops the lock during the
+- * callbacks but must be called with the lock held.
+- */
+-static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
+- struct intel_mid_dma_desc *desc)
+- __releases(&midc->lock) __acquires(&midc->lock)
+-{
+- struct dma_async_tx_descriptor *txd = &desc->txd;
+- dma_async_tx_callback callback_txd = NULL;
+- struct intel_mid_dma_lli *llitem;
+- void *param_txd = NULL;
+-
+- dma_cookie_complete(txd);
+- callback_txd = txd->callback;
+- param_txd = txd->callback_param;
+-
+- if (desc->lli != NULL) {
+- /*clear the DONE bit of completed LLI in memory*/
+- llitem = desc->lli + desc->current_lli;
+- llitem->ctl_hi &= CLEAR_DONE;
+- if (desc->current_lli < desc->lli_length-1)
+- (desc->current_lli)++;
+- else
+- desc->current_lli = 0;
+- }
+- spin_unlock_bh(&midc->lock);
+- if (callback_txd) {
+- pr_debug("MDMA: TXD callback set ... calling\n");
+- callback_txd(param_txd);
+- }
+- if (midc->raw_tfr) {
+- desc->status = DMA_SUCCESS;
+- if (desc->lli != NULL) {
+- pci_pool_free(desc->lli_pool, desc->lli,
+- desc->lli_phys);
+- pci_pool_destroy(desc->lli_pool);
+- desc->lli = NULL;
+- }
+- list_move(&desc->desc_node, &midc->free_list);
+- midc->busy = false;
+- }
+- spin_lock_bh(&midc->lock);
+-
+-}
+-/**
+- * midc_scan_descriptors - check the descriptors in channel
+- * mark completed when tx is completete
+- * @mid: device
+- * @midc: channel to scan
+- *
+- * Walk the descriptor chain for the device and process any entries
+- * that are complete.
+- */
+-static void midc_scan_descriptors(struct middma_device *mid,
+- struct intel_mid_dma_chan *midc)
+-{
+- struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
+-
+- /*tx is complete*/
+- list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
+- if (desc->status == DMA_IN_PROGRESS)
+- midc_descriptor_complete(midc, desc);
+- }
+- return;
+- }
+-/**
+- * midc_lli_fill_sg - Helper function to convert
+- * SG list to Linked List Items.
+- *@midc: Channel
+- *@desc: DMA descriptor
+- *@sglist: Pointer to SG list
+- *@sglen: SG list length
+- *@flags: DMA transaction flags
+- *
+- * Walk through the SG list and convert the SG list into Linked
+- * List Items (LLI).
+- */
+-static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
+- struct intel_mid_dma_desc *desc,
+- struct scatterlist *sglist,
+- unsigned int sglen,
+- unsigned int flags)
+-{
+- struct intel_mid_dma_slave *mids;
+- struct scatterlist *sg;
+- dma_addr_t lli_next, sg_phy_addr;
+- struct intel_mid_dma_lli *lli_bloc_desc;
+- union intel_mid_dma_ctl_lo ctl_lo;
+- union intel_mid_dma_ctl_hi ctl_hi;
+- int i;
+-
+- pr_debug("MDMA: Entered midc_lli_fill_sg\n");
+- mids = midc->mid_slave;
+-
+- lli_bloc_desc = desc->lli;
+- lli_next = desc->lli_phys;
+-
+- ctl_lo.ctl_lo = desc->ctl_lo;
+- ctl_hi.ctl_hi = desc->ctl_hi;
+- for_each_sg(sglist, sg, sglen, i) {
+- /*Populate CTL_LOW and LLI values*/
+- if (i != sglen - 1) {
+- lli_next = lli_next +
+- sizeof(struct intel_mid_dma_lli);
+- } else {
+- /*Check for circular list, otherwise terminate LLI to ZERO*/
+- if (flags & DMA_PREP_CIRCULAR_LIST) {
+- pr_debug("MDMA: LLI is configured in circular mode\n");
+- lli_next = desc->lli_phys;
+- } else {
+- lli_next = 0;
+- ctl_lo.ctlx.llp_dst_en = 0;
+- ctl_lo.ctlx.llp_src_en = 0;
+- }
+- }
+- /*Populate CTL_HI values*/
+- ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
+- desc->width,
+- midc->dma->block_size);
+- /*Populate SAR and DAR values*/
+- sg_phy_addr = sg_dma_address(sg);
+- if (desc->dirn == DMA_MEM_TO_DEV) {
+- lli_bloc_desc->sar = sg_phy_addr;
+- lli_bloc_desc->dar = mids->dma_slave.dst_addr;
+- } else if (desc->dirn == DMA_DEV_TO_MEM) {
+- lli_bloc_desc->sar = mids->dma_slave.src_addr;
+- lli_bloc_desc->dar = sg_phy_addr;
+- }
+- /*Copy values into block descriptor in system memroy*/
+- lli_bloc_desc->llp = lli_next;
+- lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
+- lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
+-
+- lli_bloc_desc++;
+- }
+- /*Copy very first LLI values to descriptor*/
+- desc->ctl_lo = desc->lli->ctl_lo;
+- desc->ctl_hi = desc->lli->ctl_hi;
+- desc->sar = desc->lli->sar;
+- desc->dar = desc->lli->dar;
+-
+- return 0;
+-}
+-/*****************************************************************************
+-DMA engine callback Functions*/
+-/**
+- * intel_mid_dma_tx_submit - callback to submit DMA transaction
+- * @tx: dma engine descriptor
+- *
+- * Submit the DMA transaction for this descriptor, start if ch idle
+- */
+-static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+-{
+- struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
+- struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
+- dma_cookie_t cookie;
+-
+- spin_lock_bh(&midc->lock);
+- cookie = dma_cookie_assign(tx);
+-
+- if (list_empty(&midc->active_list))
+- list_add_tail(&desc->desc_node, &midc->active_list);
+- else
+- list_add_tail(&desc->desc_node, &midc->queue);
+-
+- midc_dostart(midc, desc);
+- spin_unlock_bh(&midc->lock);
+-
+- return cookie;
+-}
+-
+-/**
+- * intel_mid_dma_issue_pending - callback to issue pending txn
+- * @chan: chan where pending trascation needs to be checked and submitted
+- *
+- * Call for scan to issue pending descriptors
+- */
+-static void intel_mid_dma_issue_pending(struct dma_chan *chan)
+-{
+- struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+-
+- spin_lock_bh(&midc->lock);
+- if (!list_empty(&midc->queue))
+- midc_scan_descriptors(to_middma_device(chan->device), midc);
+- spin_unlock_bh(&midc->lock);
+-}
+-
+-/**
+- * intel_mid_dma_tx_status - Return status of txn
+- * @chan: chan for where status needs to be checked
+- * @cookie: cookie for txn
+- * @txstate: DMA txn state
+- *
+- * Return status of DMA txn
+- */
+-static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
+- dma_cookie_t cookie,
+- struct dma_tx_state *txstate)
+-{
+- struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+- enum dma_status ret;
+-
+- ret = dma_cookie_status(chan, cookie, txstate);
+- if (ret != DMA_SUCCESS) {
+- spin_lock_bh(&midc->lock);
+- midc_scan_descriptors(to_middma_device(chan->device), midc);
+- spin_unlock_bh(&midc->lock);
+-
+- ret = dma_cookie_status(chan, cookie, txstate);
+- }
+-
+- return ret;
+-}
+-
+-static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
+-{
+- struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+- struct dma_slave_config *slave = (struct dma_slave_config *)arg;
+- struct intel_mid_dma_slave *mid_slave;
+-
+- BUG_ON(!midc);
+- BUG_ON(!slave);
+- pr_debug("MDMA: slave control called\n");
+-
+- mid_slave = to_intel_mid_dma_slave(slave);
+-
+- BUG_ON(!mid_slave);
+-
+- midc->mid_slave = mid_slave;
+- return 0;
+-}
+-/**
+- * intel_mid_dma_device_control - DMA device control
+- * @chan: chan for DMA control
+- * @cmd: control cmd
+- * @arg: cmd arg value
+- *
+- * Perform DMA control command
+- */
+-static int intel_mid_dma_device_control(struct dma_chan *chan,
+- enum dma_ctrl_cmd cmd, unsigned long arg)
+-{
+- struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+- struct middma_device *mid = to_middma_device(chan->device);
+- struct intel_mid_dma_desc *desc, *_desc;
+- union intel_mid_dma_cfg_lo cfg_lo;
+-
+- if (cmd == DMA_SLAVE_CONFIG)
+- return dma_slave_control(chan, arg);
+-
+- if (cmd != DMA_TERMINATE_ALL)
+- return -ENXIO;
+-
+- spin_lock_bh(&midc->lock);
+- if (midc->busy == false) {
+- spin_unlock_bh(&midc->lock);
+- return 0;
+- }
+- /*Suspend and disable the channel*/
+- cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+- cfg_lo.cfgx.ch_susp = 1;
+- iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
+- iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+- midc->busy = false;
+- /* Disable interrupts */
+- disable_dma_interrupt(midc);
+- midc->descs_allocated = 0;
+-
+- spin_unlock_bh(&midc->lock);
+- list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
+- if (desc->lli != NULL) {
+- pci_pool_free(desc->lli_pool, desc->lli,
+- desc->lli_phys);
+- pci_pool_destroy(desc->lli_pool);
+- desc->lli = NULL;
+- }
+- list_move(&desc->desc_node, &midc->free_list);
+- }
+- return 0;
+-}
+-
+-
+-/**
+- * intel_mid_dma_prep_memcpy - Prep memcpy txn
+- * @chan: chan for DMA transfer
+- * @dest: destn address
+- * @src: src address
+- * @len: DMA transfer len
+- * @flags: DMA flags
+- *
+- * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
+- * The periphral txn details should be filled in slave structure properly
+- * Returns the descriptor for this txn
+- */
+-static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
+- struct dma_chan *chan, dma_addr_t dest,
+- dma_addr_t src, size_t len, unsigned long flags)
+-{
+- struct intel_mid_dma_chan *midc;
+- struct intel_mid_dma_desc *desc = NULL;
+- struct intel_mid_dma_slave *mids;
+- union intel_mid_dma_ctl_lo ctl_lo;
+- union intel_mid_dma_ctl_hi ctl_hi;
+- union intel_mid_dma_cfg_lo cfg_lo;
+- union intel_mid_dma_cfg_hi cfg_hi;
+- enum dma_slave_buswidth width;
+-
+- pr_debug("MDMA: Prep for memcpy\n");
+- BUG_ON(!chan);
+- if (!len)
+- return NULL;
+-
+- midc = to_intel_mid_dma_chan(chan);
+- BUG_ON(!midc);
+-
+- mids = midc->mid_slave;
+- BUG_ON(!mids);
+-
+- pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
+- midc->dma->pci_id, midc->ch_id, len);
+- pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
+- mids->cfg_mode, mids->dma_slave.direction,
+- mids->hs_mode, mids->dma_slave.src_addr_width);
+-
+- /*calculate CFG_LO*/
+- if (mids->hs_mode == LNW_DMA_SW_HS) {
+- cfg_lo.cfg_lo = 0;
+- cfg_lo.cfgx.hs_sel_dst = 1;
+- cfg_lo.cfgx.hs_sel_src = 1;
+- } else if (mids->hs_mode == LNW_DMA_HW_HS)
+- cfg_lo.cfg_lo = 0x00000;
+-
+- /*calculate CFG_HI*/
+- if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
+- /*SW HS only*/
+- cfg_hi.cfg_hi = 0;
+- } else {
+- cfg_hi.cfg_hi = 0;
+- if (midc->dma->pimr_mask) {
+- cfg_hi.cfgx.protctl = 0x0; /*default value*/
+- cfg_hi.cfgx.fifo_mode = 1;
+- if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
+- cfg_hi.cfgx.src_per = 0;
+- if (mids->device_instance == 0)
+- cfg_hi.cfgx.dst_per = 3;
+- if (mids->device_instance == 1)
+- cfg_hi.cfgx.dst_per = 1;
+- } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
+- if (mids->device_instance == 0)
+- cfg_hi.cfgx.src_per = 2;
+- if (mids->device_instance == 1)
+- cfg_hi.cfgx.src_per = 0;
+- cfg_hi.cfgx.dst_per = 0;
+- }
+- } else {
+- cfg_hi.cfgx.protctl = 0x1; /*default value*/
+- cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
+- midc->ch_id - midc->dma->chan_base;
+- }
+- }
+-
+- /*calculate CTL_HI*/
+- ctl_hi.ctlx.reser = 0;
+- ctl_hi.ctlx.done = 0;
+- width = mids->dma_slave.src_addr_width;
+-
+- ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
+- pr_debug("MDMA:calc len %d for block size %d\n",
+- ctl_hi.ctlx.block_ts, midc->dma->block_size);
+- /*calculate CTL_LO*/
+- ctl_lo.ctl_lo = 0;
+- ctl_lo.ctlx.int_en = 1;
+- ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
+- ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
+-
+- /*
+- * Here we need some translation from "enum dma_slave_buswidth"
+- * to the format for our dma controller
+- * standard intel_mid_dmac's format
+- * 1 Byte 0b000
+- * 2 Bytes 0b001
+- * 4 Bytes 0b010
+- */
+- ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
+- ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
+-
+- if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
+- ctl_lo.ctlx.tt_fc = 0;
+- ctl_lo.ctlx.sinc = 0;
+- ctl_lo.ctlx.dinc = 0;
+- } else {
+- if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
+- ctl_lo.ctlx.sinc = 0;
+- ctl_lo.ctlx.dinc = 2;
+- ctl_lo.ctlx.tt_fc = 1;
+- } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
+- ctl_lo.ctlx.sinc = 2;
+- ctl_lo.ctlx.dinc = 0;
+- ctl_lo.ctlx.tt_fc = 2;
+- }
+- }
+-
+- pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
+- ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
+-
+- enable_dma_interrupt(midc);
+-
+- desc = midc_desc_get(midc);
+- if (desc == NULL)
+- goto err_desc_get;
+- desc->sar = src;
+- desc->dar = dest ;
+- desc->len = len;
+- desc->cfg_hi = cfg_hi.cfg_hi;
+- desc->cfg_lo = cfg_lo.cfg_lo;
+- desc->ctl_lo = ctl_lo.ctl_lo;
+- desc->ctl_hi = ctl_hi.ctl_hi;
+- desc->width = width;
+- desc->dirn = mids->dma_slave.direction;
+- desc->lli_phys = 0;
+- desc->lli = NULL;
+- desc->lli_pool = NULL;
+- return &desc->txd;
+-
+-err_desc_get:
+- pr_err("ERR_MDMA: Failed to get desc\n");
+- midc_desc_put(midc, desc);
+- return NULL;
+-}
+-/**
+- * intel_mid_dma_prep_slave_sg - Prep slave sg txn
+- * @chan: chan for DMA transfer
+- * @sgl: scatter gather list
+- * @sg_len: length of sg txn
+- * @direction: DMA transfer dirtn
+- * @flags: DMA flags
+- * @context: transfer context (ignored)
+- *
+- * Prepares LLI based periphral transfer
+- */
+-static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
+- struct dma_chan *chan, struct scatterlist *sgl,
+- unsigned int sg_len, enum dma_transfer_direction direction,
+- unsigned long flags, void *context)
+-{
+- struct intel_mid_dma_chan *midc = NULL;
+- struct intel_mid_dma_slave *mids = NULL;
+- struct intel_mid_dma_desc *desc = NULL;
+- struct dma_async_tx_descriptor *txd = NULL;
+- union intel_mid_dma_ctl_lo ctl_lo;
+-
+- pr_debug("MDMA: Prep for slave SG\n");
+-
+- if (!sg_len) {
+- pr_err("MDMA: Invalid SG length\n");
+- return NULL;
+- }
+- midc = to_intel_mid_dma_chan(chan);
+- BUG_ON(!midc);
+-
+- mids = midc->mid_slave;
+- BUG_ON(!mids);
+-
+- if (!midc->dma->pimr_mask) {
+- /* We can still handle sg list with only one item */
+- if (sg_len == 1) {
+- txd = intel_mid_dma_prep_memcpy(chan,
+- mids->dma_slave.dst_addr,
+- mids->dma_slave.src_addr,
+- sg_dma_len(sgl),
+- flags);
+- return txd;
+- } else {
+- pr_warn("MDMA: SG list is not supported by this controller\n");
+- return NULL;
+- }
+- }
+-
+- pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
+- sg_len, direction, flags);
+-
+- txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
+- if (NULL == txd) {
+- pr_err("MDMA: Prep memcpy failed\n");
+- return NULL;
+- }
+-
+- desc = to_intel_mid_dma_desc(txd);
+- desc->dirn = direction;
+- ctl_lo.ctl_lo = desc->ctl_lo;
+- ctl_lo.ctlx.llp_dst_en = 1;
+- ctl_lo.ctlx.llp_src_en = 1;
+- desc->ctl_lo = ctl_lo.ctl_lo;
+- desc->lli_length = sg_len;
+- desc->current_lli = 0;
+- /* DMA coherent memory pool for LLI descriptors*/
+- desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
+- midc->dma->pdev,
+- (sizeof(struct intel_mid_dma_lli)*sg_len),
+- 32, 0);
+- if (NULL == desc->lli_pool) {
+- pr_err("MID_DMA:LLI pool create failed\n");
+- return NULL;
+- }
+-
+- desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
+- if (!desc->lli) {
+- pr_err("MID_DMA: LLI alloc failed\n");
+- pci_pool_destroy(desc->lli_pool);
+- return NULL;
+- }
+-
+- midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
+- if (flags & DMA_PREP_INTERRUPT) {
+- iowrite32(UNMASK_INTR_REG(midc->ch_id),
+- midc->dma_base + MASK_BLOCK);
+- pr_debug("MDMA:Enabled Block interrupt\n");
+- }
+- return &desc->txd;
+-}
+-
+-/**
+- * intel_mid_dma_free_chan_resources - Frees dma resources
+- * @chan: chan requiring attention
+- *
+- * Frees the allocated resources on this DMA chan
+- */
+-static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
+-{
+- struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+- struct middma_device *mid = to_middma_device(chan->device);
+- struct intel_mid_dma_desc *desc, *_desc;
+-
+- if (true == midc->busy) {
+- /*trying to free ch in use!!!!!*/
+- pr_err("ERR_MDMA: trying to free ch in use\n");
+- }
+- spin_lock_bh(&midc->lock);
+- midc->descs_allocated = 0;
+- list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
+- list_del(&desc->desc_node);
+- pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+- }
+- list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
+- list_del(&desc->desc_node);
+- pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+- }
+- list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
+- list_del(&desc->desc_node);
+- pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+- }
+- spin_unlock_bh(&midc->lock);
+- midc->in_use = false;
+- midc->busy = false;
+- /* Disable CH interrupts */
+- iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
+- iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
+- pm_runtime_put(&mid->pdev->dev);
+-}
+-
+-/**
+- * intel_mid_dma_alloc_chan_resources - Allocate dma resources
+- * @chan: chan requiring attention
+- *
+- * Allocates DMA resources on this chan
+- * Return the descriptors allocated
+- */
+-static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
+-{
+- struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+- struct middma_device *mid = to_middma_device(chan->device);
+- struct intel_mid_dma_desc *desc;
+- dma_addr_t phys;
+- int i = 0;
+-
+- pm_runtime_get_sync(&mid->pdev->dev);
+-
+- if (mid->state == SUSPENDED) {
+- if (dma_resume(&mid->pdev->dev)) {
+- pr_err("ERR_MDMA: resume failed");
+- return -EFAULT;
+- }
+- }
+-
+- /* ASSERT: channel is idle */
+- if (test_ch_en(mid->dma_base, midc->ch_id)) {
+- /*ch is not idle*/
+- pr_err("ERR_MDMA: ch not idle\n");
+- pm_runtime_put(&mid->pdev->dev);
+- return -EIO;
+- }
+- dma_cookie_init(chan);
+-
+- spin_lock_bh(&midc->lock);
+- while (midc->descs_allocated < DESCS_PER_CHANNEL) {
+- spin_unlock_bh(&midc->lock);
+- desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
+- if (!desc) {
+- pr_err("ERR_MDMA: desc failed\n");
+- pm_runtime_put(&mid->pdev->dev);
+- return -ENOMEM;
+- /*check*/
+- }
+- dma_async_tx_descriptor_init(&desc->txd, chan);
+- desc->txd.tx_submit = intel_mid_dma_tx_submit;
+- desc->txd.flags = DMA_CTRL_ACK;
+- desc->txd.phys = phys;
+- spin_lock_bh(&midc->lock);
+- i = ++midc->descs_allocated;
+- list_add_tail(&desc->desc_node, &midc->free_list);
+- }
+- spin_unlock_bh(&midc->lock);
+- midc->in_use = true;
+- midc->busy = false;
+- pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
+- return i;
+-}
+-
+-/**
+- * midc_handle_error - Handle DMA txn error
+- * @mid: controller where error occurred
+- * @midc: chan where error occurred
+- *
+- * Scan the descriptor for error
+- */
+-static void midc_handle_error(struct middma_device *mid,
+- struct intel_mid_dma_chan *midc)
+-{
+- midc_scan_descriptors(mid, midc);
+-}
+-
+-/**
+- * dma_tasklet - DMA interrupt tasklet
+- * @data: tasklet arg (the controller structure)
+- *
+- * Scan the controller for interrupts for completion/error
+- * Clear the interrupt and call for handling completion/error
+- */
+-static void dma_tasklet(unsigned long data)
+-{
+- struct middma_device *mid = NULL;
+- struct intel_mid_dma_chan *midc = NULL;
+- u32 status, raw_tfr, raw_block;
+- int i;
+-
+- mid = (struct middma_device *)data;
+- if (mid == NULL) {
+- pr_err("ERR_MDMA: tasklet Null param\n");
+- return;
+- }
+- pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
+- raw_tfr = ioread32(mid->dma_base + RAW_TFR);
+- raw_block = ioread32(mid->dma_base + RAW_BLOCK);
+- status = raw_tfr | raw_block;
+- status &= mid->intr_mask;
+- while (status) {
+- /*txn interrupt*/
+- i = get_ch_index(&status, mid->chan_base);
+- if (i < 0) {
+- pr_err("ERR_MDMA:Invalid ch index %x\n", i);
+- return;
+- }
+- midc = &mid->ch[i];
+- if (midc == NULL) {
+- pr_err("ERR_MDMA:Null param midc\n");
+- return;
+- }
+- pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
+- status, midc->ch_id, i);
+- midc->raw_tfr = raw_tfr;
+- midc->raw_block = raw_block;
+- spin_lock_bh(&midc->lock);
+- /*clearing this interrupts first*/
+- iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
+- if (raw_block) {
+- iowrite32((1 << midc->ch_id),
+- mid->dma_base + CLEAR_BLOCK);
+- }
+- midc_scan_descriptors(mid, midc);
+- pr_debug("MDMA:Scan of desc... complete, unmasking\n");
+- iowrite32(UNMASK_INTR_REG(midc->ch_id),
+- mid->dma_base + MASK_TFR);
+- if (raw_block) {
+- iowrite32(UNMASK_INTR_REG(midc->ch_id),
+- mid->dma_base + MASK_BLOCK);
+- }
+- spin_unlock_bh(&midc->lock);
+- }
+-
+- status = ioread32(mid->dma_base + RAW_ERR);
+- status &= mid->intr_mask;
+- while (status) {
+- /*err interrupt*/
+- i = get_ch_index(&status, mid->chan_base);
+- if (i < 0) {
+- pr_err("ERR_MDMA:Invalid ch index %x\n", i);
+- return;
+- }
+- midc = &mid->ch[i];
+- if (midc == NULL) {
+- pr_err("ERR_MDMA:Null param midc\n");
+- return;
+- }
+- pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
+- status, midc->ch_id, i);
+-
+- iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
+- spin_lock_bh(&midc->lock);
+- midc_handle_error(mid, midc);
+- iowrite32(UNMASK_INTR_REG(midc->ch_id),
+- mid->dma_base + MASK_ERR);
+- spin_unlock_bh(&midc->lock);
+- }
+- pr_debug("MDMA:Exiting takslet...\n");
+- return;
+-}
+-
+-static void dma_tasklet1(unsigned long data)
+-{
+- pr_debug("MDMA:in takslet1...\n");
+- return dma_tasklet(data);
+-}
+-
+-static void dma_tasklet2(unsigned long data)
+-{
+- pr_debug("MDMA:in takslet2...\n");
+- return dma_tasklet(data);
+-}
+-
+-/**
+- * intel_mid_dma_interrupt - DMA ISR
+- * @irq: IRQ where interrupt occurred
+- * @data: ISR cllback data (the controller structure)
+- *
+- * See if this is our interrupt if so then schedule the tasklet
+- * otherwise ignore
+- */
+-static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
+-{
+- struct middma_device *mid = data;
+- u32 tfr_status, err_status;
+- int call_tasklet = 0;
+-
+- tfr_status = ioread32(mid->dma_base + RAW_TFR);
+- err_status = ioread32(mid->dma_base + RAW_ERR);
+- if (!tfr_status && !err_status)
+- return IRQ_NONE;
+-
+- /*DMA Interrupt*/
+- pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
+- pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
+- tfr_status &= mid->intr_mask;
+- if (tfr_status) {
+- /*need to disable intr*/
+- iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
+- iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
+- pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
+- call_tasklet = 1;
+- }
+- err_status &= mid->intr_mask;
+- if (err_status) {
+- iowrite32((err_status << INT_MASK_WE),
+- mid->dma_base + MASK_ERR);
+- call_tasklet = 1;
+- }
+- if (call_tasklet)
+- tasklet_schedule(&mid->tasklet);
+-
+- return IRQ_HANDLED;
+-}
+-
+-static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
+-{
+- return intel_mid_dma_interrupt(irq, data);
+-}
+-
+-static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
+-{
+- return intel_mid_dma_interrupt(irq, data);
+-}
+-
+-/**
+- * mid_setup_dma - Setup the DMA controller
+- * @pdev: Controller PCI device structure
+- *
+- * Initialize the DMA controller, channels, registers with DMA engine,
+- * ISR. Initialize DMA controller channels.
+- */
+-static int mid_setup_dma(struct pci_dev *pdev)
+-{
+- struct middma_device *dma = pci_get_drvdata(pdev);
+- int err, i;
+-
+- /* DMA coherent memory pool for DMA descriptor allocations */
+- dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
+- sizeof(struct intel_mid_dma_desc),
+- 32, 0);
+- if (NULL == dma->dma_pool) {
+- pr_err("ERR_MDMA:pci_pool_create failed\n");
+- err = -ENOMEM;
+- goto err_dma_pool;
+- }
+-
+- INIT_LIST_HEAD(&dma->common.channels);
+- dma->pci_id = pdev->device;
+- if (dma->pimr_mask) {
+- dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
+- LNW_PERIPHRAL_MASK_SIZE);
+- if (dma->mask_reg == NULL) {
+- pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
+- err = -ENOMEM;
+- goto err_ioremap;
+- }
+- } else
+- dma->mask_reg = NULL;
+-
+- pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
+- /*init CH structures*/
+- dma->intr_mask = 0;
+- dma->state = RUNNING;
+- for (i = 0; i < dma->max_chan; i++) {
+- struct intel_mid_dma_chan *midch = &dma->ch[i];
+-
+- midch->chan.device = &dma->common;
+- dma_cookie_init(&midch->chan);
+- midch->ch_id = dma->chan_base + i;
+- pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
+-
+- midch->dma_base = dma->dma_base;
+- midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
+- midch->dma = dma;
+- dma->intr_mask |= 1 << (dma->chan_base + i);
+- spin_lock_init(&midch->lock);
+-
+- INIT_LIST_HEAD(&midch->active_list);
+- INIT_LIST_HEAD(&midch->queue);
+- INIT_LIST_HEAD(&midch->free_list);
+- /*mask interrupts*/
+- iowrite32(MASK_INTR_REG(midch->ch_id),
+- dma->dma_base + MASK_BLOCK);
+- iowrite32(MASK_INTR_REG(midch->ch_id),
+- dma->dma_base + MASK_SRC_TRAN);
+- iowrite32(MASK_INTR_REG(midch->ch_id),
+- dma->dma_base + MASK_DST_TRAN);
+- iowrite32(MASK_INTR_REG(midch->ch_id),
+- dma->dma_base + MASK_ERR);
+- iowrite32(MASK_INTR_REG(midch->ch_id),
+- dma->dma_base + MASK_TFR);
+-
+- disable_dma_interrupt(midch);
+- list_add_tail(&midch->chan.device_node, &dma->common.channels);
+- }
+- pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
+-
+- /*init dma structure*/
+- dma_cap_zero(dma->common.cap_mask);
+- dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
+- dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
+- dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
+- dma->common.dev = &pdev->dev;
+-
+- dma->common.device_alloc_chan_resources =
+- intel_mid_dma_alloc_chan_resources;
+- dma->common.device_free_chan_resources =
+- intel_mid_dma_free_chan_resources;
+-
+- dma->common.device_tx_status = intel_mid_dma_tx_status;
+- dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
+- dma->common.device_issue_pending = intel_mid_dma_issue_pending;
+- dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
+- dma->common.device_control = intel_mid_dma_device_control;
+-
+- /*enable dma cntrl*/
+- iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
+-
+- /*register irq */
+- if (dma->pimr_mask) {
+- pr_debug("MDMA:Requesting irq shared for DMAC1\n");
+- err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
+- IRQF_SHARED, "INTEL_MID_DMAC1", dma);
+- if (0 != err)
+- goto err_irq;
+- } else {
+- dma->intr_mask = 0x03;
+- pr_debug("MDMA:Requesting irq for DMAC2\n");
+- err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
+- IRQF_SHARED, "INTEL_MID_DMAC2", dma);
+- if (0 != err)
+- goto err_irq;
+- }
+- /*register device w/ engine*/
+- err = dma_async_device_register(&dma->common);
+- if (0 != err) {
+- pr_err("ERR_MDMA:device_register failed: %d\n", err);
+- goto err_engine;
+- }
+- if (dma->pimr_mask) {
+- pr_debug("setting up tasklet1 for DMAC1\n");
+- tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
+- } else {
+- pr_debug("setting up tasklet2 for DMAC2\n");
+- tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
+- }
+- return 0;
+-
+-err_engine:
+- free_irq(pdev->irq, dma);
+-err_irq:
+- if (dma->mask_reg)
+- iounmap(dma->mask_reg);
+-err_ioremap:
+- pci_pool_destroy(dma->dma_pool);
+-err_dma_pool:
+- pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
+- return err;
+-
+-}
+-
+-/**
+- * middma_shutdown - Shutdown the DMA controller
+- * @pdev: Controller PCI device structure
+- *
+- * Called by remove
+- * Unregister DMa controller, clear all structures and free interrupt
+- */
+-static void middma_shutdown(struct pci_dev *pdev)
+-{
+- struct middma_device *device = pci_get_drvdata(pdev);
+-
+- dma_async_device_unregister(&device->common);
+- pci_pool_destroy(device->dma_pool);
+- if (device->mask_reg)
+- iounmap(device->mask_reg);
+- if (device->dma_base)
+- iounmap(device->dma_base);
+- free_irq(pdev->irq, device);
+- return;
+-}
+-
+-/**
+- * intel_mid_dma_probe - PCI Probe
+- * @pdev: Controller PCI device structure
+- * @id: pci device id structure
+- *
+- * Initialize the PCI device, map BARs, query driver data.
+- * Call setup_dma to complete contoller and chan initilzation
+- */
+-static int intel_mid_dma_probe(struct pci_dev *pdev,
+- const struct pci_device_id *id)
+-{
+- struct middma_device *device;
+- u32 base_addr, bar_size;
+- struct intel_mid_dma_probe_info *info;
+- int err;
+-
+- pr_debug("MDMA: probe for %x\n", pdev->device);
+- info = (void *)id->driver_data;
+- pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
+- info->max_chan, info->ch_base,
+- info->block_size, info->pimr_mask);
+-
+- err = pci_enable_device(pdev);
+- if (err)
+- goto err_enable_device;
+-
+- err = pci_request_regions(pdev, "intel_mid_dmac");
+- if (err)
+- goto err_request_regions;
+-
+- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+- if (err)
+- goto err_set_dma_mask;
+-
+- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+- if (err)
+- goto err_set_dma_mask;
+-
+- device = kzalloc(sizeof(*device), GFP_KERNEL);
+- if (!device) {
+- pr_err("ERR_MDMA:kzalloc failed probe\n");
+- err = -ENOMEM;
+- goto err_kzalloc;
+- }
+- device->pdev = pci_dev_get(pdev);
+-
+- base_addr = pci_resource_start(pdev, 0);
+- bar_size = pci_resource_len(pdev, 0);
+- device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
+- if (!device->dma_base) {
+- pr_err("ERR_MDMA:ioremap failed\n");
+- err = -ENOMEM;
+- goto err_ioremap;
+- }
+- pci_set_drvdata(pdev, device);
+- pci_set_master(pdev);
+- device->max_chan = info->max_chan;
+- device->chan_base = info->ch_base;
+- device->block_size = info->block_size;
+- device->pimr_mask = info->pimr_mask;
+-
+- err = mid_setup_dma(pdev);
+- if (err)
+- goto err_dma;
+-
+- pm_runtime_put_noidle(&pdev->dev);
+- pm_runtime_allow(&pdev->dev);
+- return 0;
+-
+-err_dma:
+- iounmap(device->dma_base);
+-err_ioremap:
+- pci_dev_put(pdev);
+- kfree(device);
+-err_kzalloc:
+-err_set_dma_mask:
+- pci_release_regions(pdev);
+- pci_disable_device(pdev);
+-err_request_regions:
+-err_enable_device:
+- pr_err("ERR_MDMA:Probe failed %d\n", err);
+- return err;
+-}
+-
+-/**
+- * intel_mid_dma_remove - PCI remove
+- * @pdev: Controller PCI device structure
+- *
+- * Free up all resources and data
+- * Call shutdown_dma to complete contoller and chan cleanup
+- */
+-static void intel_mid_dma_remove(struct pci_dev *pdev)
+-{
+- struct middma_device *device = pci_get_drvdata(pdev);
+-
+- pm_runtime_get_noresume(&pdev->dev);
+- pm_runtime_forbid(&pdev->dev);
+- middma_shutdown(pdev);
+- pci_dev_put(pdev);
+- kfree(device);
+- pci_release_regions(pdev);
+- pci_disable_device(pdev);
+-}
+-
+-/* Power Management */
+-/*
+-* dma_suspend - PCI suspend function
+-*
+-* @pci: PCI device structure
+-* @state: PM message
+-*
+-* This function is called by OS when a power event occurs
+-*/
+-static int dma_suspend(struct device *dev)
+-{
+- struct pci_dev *pci = to_pci_dev(dev);
+- int i;
+- struct middma_device *device = pci_get_drvdata(pci);
+- pr_debug("MDMA: dma_suspend called\n");
+-
+- for (i = 0; i < device->max_chan; i++) {
+- if (device->ch[i].in_use)
+- return -EAGAIN;
+- }
+- dmac1_mask_periphral_intr(device);
+- device->state = SUSPENDED;
+- pci_save_state(pci);
+- pci_disable_device(pci);
+- pci_set_power_state(pci, PCI_D3hot);
+- return 0;
+-}
+-
+-/**
+-* dma_resume - PCI resume function
+-*
+-* @pci: PCI device structure
+-*
+-* This function is called by OS when a power event occurs
+-*/
+-int dma_resume(struct device *dev)
+-{
+- struct pci_dev *pci = to_pci_dev(dev);
+- int ret;
+- struct middma_device *device = pci_get_drvdata(pci);
+-
+- pr_debug("MDMA: dma_resume called\n");
+- pci_set_power_state(pci, PCI_D0);
+- pci_restore_state(pci);
+- ret = pci_enable_device(pci);
+- if (ret) {
+- pr_err("MDMA: device can't be enabled for %x\n", pci->device);
+- return ret;
+- }
+- device->state = RUNNING;
+- iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+- return 0;
+-}
+-
+-static int dma_runtime_suspend(struct device *dev)
+-{
+- struct pci_dev *pci_dev = to_pci_dev(dev);
+- struct middma_device *device = pci_get_drvdata(pci_dev);
+-
+- device->state = SUSPENDED;
+- return 0;
+-}
+-
+-static int dma_runtime_resume(struct device *dev)
+-{
+- struct pci_dev *pci_dev = to_pci_dev(dev);
+- struct middma_device *device = pci_get_drvdata(pci_dev);
+-
+- device->state = RUNNING;
+- iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+- return 0;
+-}
+-
+-static int dma_runtime_idle(struct device *dev)
+-{
+- struct pci_dev *pdev = to_pci_dev(dev);
+- struct middma_device *device = pci_get_drvdata(pdev);
+- int i;
+-
+- for (i = 0; i < device->max_chan; i++) {
+- if (device->ch[i].in_use)
+- return -EAGAIN;
+- }
+-
+- return pm_schedule_suspend(dev, 0);
+-}
+-
+-/******************************************************************************
+-* PCI stuff
+-*/
+-static struct pci_device_id intel_mid_dma_ids[] = {
+- { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)},
+- { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)},
+- { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)},
+- { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)},
+- { 0, }
+-};
+-MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
+-
+-static const struct dev_pm_ops intel_mid_dma_pm = {
+- .runtime_suspend = dma_runtime_suspend,
+- .runtime_resume = dma_runtime_resume,
+- .runtime_idle = dma_runtime_idle,
+- .suspend = dma_suspend,
+- .resume = dma_resume,
+-};
+-
+-static struct pci_driver intel_mid_dma_pci_driver = {
+- .name = "Intel MID DMA",
+- .id_table = intel_mid_dma_ids,
+- .probe = intel_mid_dma_probe,
+- .remove = intel_mid_dma_remove,
+-#ifdef CONFIG_PM
+- .driver = {
+- .pm = &intel_mid_dma_pm,
+- },
+-#endif
+-};
+-
+-static int __init intel_mid_dma_init(void)
+-{
+- pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
+- INTEL_MID_DMA_DRIVER_VERSION);
+- return pci_register_driver(&intel_mid_dma_pci_driver);
+-}
+-fs_initcall(intel_mid_dma_init);
+-
+-static void __exit intel_mid_dma_exit(void)
+-{
+- pci_unregister_driver(&intel_mid_dma_pci_driver);
+-}
+-module_exit(intel_mid_dma_exit);
+-
+-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+-MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
+-MODULE_LICENSE("GPL v2");
+-MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
+diff --git a/drivers/dma/intel_mid_dma/Makefile b/drivers/dma/intel_mid_dma/Makefile
new file mode 100644
-index 0000000..be7705b
+index 0000000..6b38f9f
--- /dev/null
-+++ b/drivers/dma/intel_mid_dma/intel_mid_dma_pci.c
-@@ -0,0 +1,287 @@
++++ b/drivers/dma/intel_mid_dma/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
++intel_mid_dma-objs:= intel_cln_dma_pci.o intel_mid_dma_pci.o
++
+diff --git a/drivers/dma/intel_mid_dma_core.c b/drivers/dma/intel_mid_dma_core.c
+new file mode 100644
+index 0000000..aeb7fd3
+--- /dev/null
++++ b/drivers/dma/intel_mid_dma_core.c
+@@ -0,0 +1,1295 @@
++/*
++ * intel_mid_dma_core.c - Intel Langwell DMA Drivers
++ *
++ * Copyright (C) 2008-14 Intel Corp
++ * Author: Vinod Koul <vinod.koul@intel.com>
++ * The driver design is based on dw_dmac driver
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *
++ */
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <linux/pm_runtime.h>
++#include <linux/intel_mid_dma.h>
++#include <linux/module.h>
++
++#include "dmaengine.h"
++#include "intel_mid_dma_regs.h"
++
++#define MAX_CHAN 4 /*max ch across controllers*/
++
++#define INTEL_MID_DMAC1_ID 0x0814
++#define INTEL_MID_DMAC2_ID 0x0813
++#define INTEL_MID_GP_DMAC2_ID 0x0827
++#define INTEL_MFLD_DMAC1_ID 0x0830
++#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
++#define LNW_PERIPHRAL_MASK_SIZE 0x10
++#define LNW_PERIPHRAL_STATUS 0x0
++#define LNW_PERIPHRAL_MASK 0x8
++
++#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
++ ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
++ .max_chan = (_max_chan), \
++ .ch_base = (_ch_base), \
++ .block_size = (_block_size), \
++ .pimr_mask = (_pimr_mask), \
++ })
++
++/*****************************************************************************
++Utility Functions*/
++/**
++ * get_ch_index - convert status to channel
++ * @status: status mask
++ * @base: dma ch base value
++ *
++ * Modify the status mask and return the channel index needing
++ * attention (or -1 if neither)
++ */
++static int get_ch_index(int *status, unsigned int base)
++{
++ int i;
++ for (i = 0; i < MAX_CHAN; i++) {
++ if (*status & (1 << (i + base))) {
++ *status = *status & ~(1 << (i + base));
++ pr_debug("MDMA: index %d New status %x\n", i, *status);
++ return i;
++ }
++ }
++ return -1;
++}
++
++/**
++ * get_block_ts - calculates dma transaction length
++ * @len: dma transfer length
++ * @tx_width: dma transfer src width
++ * @block_size: dma controller max block size
++ *
++ * Based on src width calculate the DMA trsaction length in data items
++ * return data items or FFFF if exceeds max length for block
++ */
++static int get_block_ts(int len, int tx_width, int block_size)
++{
++ int byte_width = 0, block_ts = 0;
++
++ switch (tx_width) {
++ case DMA_SLAVE_BUSWIDTH_1_BYTE:
++ byte_width = 1;
++ break;
++ case DMA_SLAVE_BUSWIDTH_2_BYTES:
++ byte_width = 2;
++ break;
++ case DMA_SLAVE_BUSWIDTH_4_BYTES:
++ default:
++ byte_width = 4;
++ break;
++ }
++
++ block_ts = len/byte_width;
++ if (block_ts > block_size)
++ block_ts = 0xFFFF;
++ return block_ts;
++}
++
++/*****************************************************************************
++DMAC1 interrupt Functions*/
++
++/**
++ * dmac1_mask_periphral_intr - mask the periphral interrupt
++ * @mid: dma device for which masking is required
++ *
++ * Masks the DMA periphral interrupt
++ * this is valid for DMAC1 family controllers only
++ * This controller should have periphral mask registers already mapped
++ */
++void dmac1_mask_periphral_intr(struct middma_device *mid)
++{
++ u32 pimr;
++
++ if (mid->pimr_mask) {
++ pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
++ pimr |= mid->pimr_mask;
++ writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
++ }
++ return;
++}
++
++/**
++ * dmac1_unmask_periphral_intr - unmask the periphral interrupt
++ * @midc: dma channel for which masking is required
++ *
++ * UnMasks the DMA periphral interrupt,
++ * this is valid for DMAC1 family controllers only
++ * This controller should have periphral mask registers already mapped
++ */
++void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
++{
++ u32 pimr;
++ struct middma_device *mid = to_middma_device(midc->chan.device);
++
++ if (mid->pimr_mask) {
++ pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
++ pimr &= ~mid->pimr_mask;
++ writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
++ }
++ return;
++}
++
++/**
++ * enable_dma_interrupt - enable the periphral interrupt
++ * @midc: dma channel for which enable interrupt is required
++ *
++ * Enable the DMA periphral interrupt,
++ * this is valid for DMAC1 family controllers only
++ * This controller should have periphral mask registers already mapped
++ */
++static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
++{
++ dmac1_unmask_periphral_intr(midc);
++
++ /*en ch interrupts*/
++ iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
++ iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
++ return;
++}
++
++/**
++ * disable_dma_interrupt - disable the periphral interrupt
++ * @midc: dma channel for which disable interrupt is required
++ *
++ * Disable the DMA periphral interrupt,
++ * this is valid for DMAC1 family controllers only
++ * This controller should have periphral mask registers already mapped
++ */
++static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
++{
++ /*Check LPE PISR, make sure fwd is disabled*/
++ iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
++ iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
++ return;
++}
++
++/*****************************************************************************
++DMA channel helper Functions*/
++/**
++ * mid_desc_get - get a descriptor
++ * @midc: dma channel for which descriptor is required
++ *
++ * Obtain a descriptor for the channel. Returns NULL if none are free.
++ * Once the descriptor is returned it is private until put on another
++ * list or freed
++ */
++static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
++{
++ struct intel_mid_dma_desc *desc, *_desc;
++ struct intel_mid_dma_desc *ret = NULL;
++
++ spin_lock_bh(&midc->lock);
++ list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
++ if (async_tx_test_ack(&desc->txd)) {
++ list_del(&desc->desc_node);
++ ret = desc;
++ break;
++ }
++ }
++ spin_unlock_bh(&midc->lock);
++ return ret;
++}
++
++/**
++ * mid_desc_put - put a descriptor
++ * @midc: dma channel for which descriptor is required
++ * @desc: descriptor to put
++ *
++ * Return a descriptor from lwn_desc_get back to the free pool
++ */
++static void midc_desc_put(struct intel_mid_dma_chan *midc,
++ struct intel_mid_dma_desc *desc)
++{
++ if (desc) {
++ spin_lock_bh(&midc->lock);
++ list_add_tail(&desc->desc_node, &midc->free_list);
++ spin_unlock_bh(&midc->lock);
++ }
++}
++/**
++ * midc_dostart - begin a DMA transaction
++ * @midc: channel for which txn is to be started
++ * @first: first descriptor of series
++ *
++ * Load a transaction into the engine. This must be called with midc->lock
++ * held and bh disabled.
++ */
++static void midc_dostart(struct intel_mid_dma_chan *midc,
++ struct intel_mid_dma_desc *first)
++{
++ struct middma_device *mid = to_middma_device(midc->chan.device);
++
++ /* channel is idle */
++ if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
++ /*error*/
++ pr_err("ERR_MDMA: channel is busy in start\n");
++ /* The tasklet will hopefully advance the queue... */
++ return;
++ }
++ midc->busy = true;
++ /*write registers and en*/
++ iowrite32(first->sar, midc->ch_regs + SAR);
++ iowrite32(first->dar, midc->ch_regs + DAR);
++ iowrite32(first->lli_phys, midc->ch_regs + LLP);
++ iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
++ iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
++ iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
++ iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
++ pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
++ (int)first->sar, (int)first->dar, first->cfg_hi,
++ first->cfg_lo, first->ctl_hi, first->ctl_lo);
++ first->status = DMA_IN_PROGRESS;
++
++ iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
++}
++
++/**
++ * midc_descriptor_complete - process completed descriptor
++ * @midc: channel owning the descriptor
++ * @desc: the descriptor itself
++ *
++ * Process a completed descriptor and perform any callbacks upon
++ * the completion. The completion handling drops the lock during the
++ * callbacks but must be called with the lock held.
++ */
++static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
++ struct intel_mid_dma_desc *desc)
++ __releases(&midc->lock) __acquires(&midc->lock)
++{
++ struct dma_async_tx_descriptor *txd = &desc->txd;
++ dma_async_tx_callback callback_txd = NULL;
++ struct intel_mid_dma_lli *llitem;
++ void *param_txd = NULL;
++
++ dma_cookie_complete(txd);
++ callback_txd = txd->callback;
++ param_txd = txd->callback_param;
++
++ if (desc->lli != NULL) {
++ /*clear the DONE bit of completed LLI in memory*/
++ llitem = desc->lli + desc->current_lli;
++ llitem->ctl_hi &= CLEAR_DONE;
++ if (desc->current_lli < desc->lli_length-1)
++ (desc->current_lli)++;
++ else
++ desc->current_lli = 0;
++ }
++ spin_unlock_bh(&midc->lock);
++ if (callback_txd) {
++ pr_debug("MDMA: TXD callback set ... calling\n");
++ callback_txd(param_txd);
++ }
++ if (midc->raw_tfr) {
++ desc->status = DMA_SUCCESS;
++ if (desc->lli != NULL) {
++ pci_pool_free(desc->lli_pool, desc->lli,
++ desc->lli_phys);
++ pci_pool_destroy(desc->lli_pool);
++ desc->lli = NULL;
++ }
++ list_move(&desc->desc_node, &midc->free_list);
++ midc->busy = false;
++ }
++ spin_lock_bh(&midc->lock);
++
++}
++/**
++ * midc_scan_descriptors - check the descriptors in channel
++ * mark completed when tx is completete
++ * @mid: device
++ * @midc: channel to scan
++ *
++ * Walk the descriptor chain for the device and process any entries
++ * that are complete.
++ */
++static void midc_scan_descriptors(struct middma_device *mid,
++ struct intel_mid_dma_chan *midc)
++{
++ struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
++
++ /*tx is complete*/
++ list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
++ if (desc->status == DMA_IN_PROGRESS)
++ midc_descriptor_complete(midc, desc);
++ }
++ return;
++ }
++/**
++ * midc_lli_fill_sg - Helper function to convert
++ * SG list to Linked List Items.
++ *@midc: Channel
++ *@desc: DMA descriptor
++ *@sglist: Pointer to SG list
++ *@sglen: SG list length
++ *@flags: DMA transaction flags
++ *
++ * Walk through the SG list and convert the SG list into Linked
++ * List Items (LLI).
++ */
++static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
++ struct intel_mid_dma_desc *desc,
++ struct scatterlist *sglist,
++ unsigned int sglen,
++ unsigned int flags)
++{
++ struct intel_mid_dma_slave *mids;
++ struct scatterlist *sg;
++ dma_addr_t lli_next, sg_phy_addr;
++ struct intel_mid_dma_lli *lli_bloc_desc;
++ union intel_mid_dma_ctl_lo ctl_lo;
++ union intel_mid_dma_ctl_hi ctl_hi;
++ int i;
++
++ pr_debug("MDMA: Entered midc_lli_fill_sg\n");
++ mids = midc->mid_slave;
++
++ lli_bloc_desc = desc->lli;
++ lli_next = desc->lli_phys;
++
++ ctl_lo.ctl_lo = desc->ctl_lo;
++ ctl_hi.ctl_hi = desc->ctl_hi;
++ for_each_sg(sglist, sg, sglen, i) {
++ /*Populate CTL_LOW and LLI values*/
++ if (i != sglen - 1) {
++ lli_next = lli_next +
++ sizeof(struct intel_mid_dma_lli);
++ } else {
++ /*Check for circular list, otherwise terminate LLI to ZERO*/
++ if (flags & DMA_PREP_CIRCULAR_LIST) {
++ pr_debug("MDMA: LLI is configured in circular mode\n");
++ lli_next = desc->lli_phys;
++ } else {
++ lli_next = 0;
++ ctl_lo.ctlx.llp_dst_en = 0;
++ ctl_lo.ctlx.llp_src_en = 0;
++ }
++ }
++ /*Populate CTL_HI values*/
++ ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
++ desc->width,
++ midc->dma->block_size);
++ /*Populate SAR and DAR values*/
++ sg_phy_addr = sg_dma_address(sg);
++ if (desc->dirn == DMA_MEM_TO_DEV) {
++ lli_bloc_desc->sar = sg_phy_addr;
++ lli_bloc_desc->dar = mids->dma_slave.dst_addr;
++ } else if (desc->dirn == DMA_DEV_TO_MEM) {
++ lli_bloc_desc->sar = mids->dma_slave.src_addr;
++ lli_bloc_desc->dar = sg_phy_addr;
++ }
++ /*Copy values into block descriptor in system memroy*/
++ lli_bloc_desc->llp = lli_next;
++ lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
++ lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
++
++ lli_bloc_desc++;
++ }
++ /*Copy very first LLI values to descriptor*/
++ desc->ctl_lo = desc->lli->ctl_lo;
++ desc->ctl_hi = desc->lli->ctl_hi;
++ desc->sar = desc->lli->sar;
++ desc->dar = desc->lli->dar;
++
++ return 0;
++}
++/*****************************************************************************
++DMA engine callback Functions*/
++/**
++ * intel_mid_dma_tx_submit - callback to submit DMA transaction
++ * @tx: dma engine descriptor
++ *
++ * Submit the DMA transaction for this descriptor, start if ch idle
++ */
++static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
++{
++ struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
++ dma_cookie_t cookie;
++
++ spin_lock_bh(&midc->lock);
++ cookie = dma_cookie_assign(tx);
++
++ if (list_empty(&midc->active_list))
++ list_add_tail(&desc->desc_node, &midc->active_list);
++ else
++ list_add_tail(&desc->desc_node, &midc->queue);
++
++ midc_dostart(midc, desc);
++ spin_unlock_bh(&midc->lock);
++
++ return cookie;
++}
++
++/**
++ * intel_mid_dma_issue_pending - callback to issue pending txn
++ * @chan: chan where pending trascation needs to be checked and submitted
++ *
++ * Call for scan to issue pending descriptors
++ */
++static void intel_mid_dma_issue_pending(struct dma_chan *chan)
++{
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
++
++ spin_lock_bh(&midc->lock);
++ if (!list_empty(&midc->queue))
++ midc_scan_descriptors(to_middma_device(chan->device), midc);
++ spin_unlock_bh(&midc->lock);
++}
++
++/**
++ * intel_mid_dma_tx_status - Return status of txn
++ * @chan: chan for where status needs to be checked
++ * @cookie: cookie for txn
++ * @txstate: DMA txn state
++ *
++ * Return status of DMA txn
++ */
++static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
++ dma_cookie_t cookie,
++ struct dma_tx_state *txstate)
++{
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
++ enum dma_status ret;
++
++ ret = dma_cookie_status(chan, cookie, txstate);
++ if (ret != DMA_SUCCESS) {
++ spin_lock_bh(&midc->lock);
++ midc_scan_descriptors(to_middma_device(chan->device), midc);
++ spin_unlock_bh(&midc->lock);
++
++ ret = dma_cookie_status(chan, cookie, txstate);
++ }
++
++ return ret;
++}
++
++static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
++{
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
++ struct dma_slave_config *slave = (struct dma_slave_config *)arg;
++ struct intel_mid_dma_slave *mid_slave;
++
++ BUG_ON(!midc);
++ BUG_ON(!slave);
++ pr_debug("MDMA: slave control called\n");
++
++ mid_slave = to_intel_mid_dma_slave(slave);
++
++ BUG_ON(!mid_slave);
++
++ midc->mid_slave = mid_slave;
++ return 0;
++}
++/**
++ * intel_mid_dma_device_control - DMA device control
++ * @chan: chan for DMA control
++ * @cmd: control cmd
++ * @arg: cmd arg value
++ *
++ * Perform DMA control command
++ */
++static int intel_mid_dma_device_control(struct dma_chan *chan,
++ enum dma_ctrl_cmd cmd, unsigned long arg)
++{
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
++ struct middma_device *mid = to_middma_device(chan->device);
++ struct intel_mid_dma_desc *desc, *_desc;
++ union intel_mid_dma_cfg_lo cfg_lo;
++
++ if (cmd == DMA_SLAVE_CONFIG)
++ return dma_slave_control(chan, arg);
++
++ if (cmd != DMA_TERMINATE_ALL)
++ return -ENXIO;
++
++ spin_lock_bh(&midc->lock);
++ if (midc->busy == false) {
++ spin_unlock_bh(&midc->lock);
++ return 0;
++ }
++ /*Suspend and disable the channel*/
++ cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
++ cfg_lo.cfgx.ch_susp = 1;
++ iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
++ iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
++ midc->busy = false;
++ /* Disable interrupts */
++ disable_dma_interrupt(midc);
++ midc->descs_allocated = 0;
++
++ spin_unlock_bh(&midc->lock);
++ list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
++ if (desc->lli != NULL) {
++ pci_pool_free(desc->lli_pool, desc->lli,
++ desc->lli_phys);
++ pci_pool_destroy(desc->lli_pool);
++ desc->lli = NULL;
++ }
++ list_move(&desc->desc_node, &midc->free_list);
++ }
++ return 0;
++}
++
++
++/**
++ * intel_mid_dma_prep_memcpy - Prep memcpy txn
++ * @chan: chan for DMA transfer
++ * @dest: destn address
++ * @src: src address
++ * @len: DMA transfer len
++ * @flags: DMA flags
++ *
++ * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
++ * The periphral txn details should be filled in slave structure properly
++ * Returns the descriptor for this txn
++ */
++static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
++ struct dma_chan *chan, dma_addr_t dest,
++ dma_addr_t src, size_t len, unsigned long flags)
++{
++ struct intel_mid_dma_chan *midc;
++ struct intel_mid_dma_desc *desc = NULL;
++ struct intel_mid_dma_slave *mids;
++ union intel_mid_dma_ctl_lo ctl_lo;
++ union intel_mid_dma_ctl_hi ctl_hi;
++ union intel_mid_dma_cfg_lo cfg_lo;
++ union intel_mid_dma_cfg_hi cfg_hi;
++ enum dma_slave_buswidth width;
++
++ pr_debug("MDMA: Prep for memcpy\n");
++ BUG_ON(!chan);
++ if (!len)
++ return NULL;
++
++ midc = to_intel_mid_dma_chan(chan);
++ BUG_ON(!midc);
++
++ mids = midc->mid_slave;
++ BUG_ON(!mids);
++
++ pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
++ midc->dma->pci_id, midc->ch_id, len);
++ pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
++ mids->cfg_mode, mids->dma_slave.direction,
++ mids->hs_mode, mids->dma_slave.src_addr_width);
++
++ /*calculate CFG_LO*/
++ if (mids->hs_mode == LNW_DMA_SW_HS) {
++ cfg_lo.cfg_lo = 0;
++ cfg_lo.cfgx.hs_sel_dst = 1;
++ cfg_lo.cfgx.hs_sel_src = 1;
++ } else if (mids->hs_mode == LNW_DMA_HW_HS)
++ cfg_lo.cfg_lo = 0x00000;
++
++ /*calculate CFG_HI*/
++ if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
++ /*SW HS only*/
++ cfg_hi.cfg_hi = 0;
++ } else {
++ cfg_hi.cfg_hi = 0;
++ if (midc->dma->pimr_mask) {
++ cfg_hi.cfgx.protctl = 0x0; /*default value*/
++ cfg_hi.cfgx.fifo_mode = 1;
++ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
++ cfg_hi.cfgx.src_per = 0;
++ if (mids->device_instance == 0)
++ cfg_hi.cfgx.dst_per = 3;
++ if (mids->device_instance == 1)
++ cfg_hi.cfgx.dst_per = 1;
++ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
++ if (mids->device_instance == 0)
++ cfg_hi.cfgx.src_per = 2;
++ if (mids->device_instance == 1)
++ cfg_hi.cfgx.src_per = 0;
++ cfg_hi.cfgx.dst_per = 0;
++ }
++ } else {
++ cfg_hi.cfgx.protctl = 0x1; /*default value*/
++ cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
++ midc->ch_id - midc->dma->chan_base;
++ }
++ }
++
++ /*calculate CTL_HI*/
++ ctl_hi.ctlx.reser = 0;
++ ctl_hi.ctlx.done = 0;
++ width = mids->dma_slave.src_addr_width;
++
++ ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
++ pr_debug("MDMA:calc len %d for block size %d\n",
++ ctl_hi.ctlx.block_ts, midc->dma->block_size);
++ /*calculate CTL_LO*/
++ ctl_lo.ctl_lo = 0;
++ ctl_lo.ctlx.int_en = 1;
++ ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
++ ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
++
++ /*
++ * Here we need some translation from "enum dma_slave_buswidth"
++ * to the format for our dma controller
++ * standard intel_mid_dmac's format
++ * 1 Byte 0b000
++ * 2 Bytes 0b001
++ * 4 Bytes 0b010
++ */
++ ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
++ ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
++
++ if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
++ ctl_lo.ctlx.tt_fc = 0;
++ ctl_lo.ctlx.sinc = 0;
++ ctl_lo.ctlx.dinc = 0;
++ } else {
++ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
++ ctl_lo.ctlx.sinc = 0;
++ ctl_lo.ctlx.dinc = 2;
++ ctl_lo.ctlx.tt_fc = 1;
++ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
++ ctl_lo.ctlx.sinc = 2;
++ ctl_lo.ctlx.dinc = 0;
++ ctl_lo.ctlx.tt_fc = 2;
++ }
++ }
++
++ pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
++ ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
++
++ enable_dma_interrupt(midc);
++
++ desc = midc_desc_get(midc);
++ if (desc == NULL)
++ goto err_desc_get;
++ desc->sar = src;
++ desc->dar = dest ;
++ desc->len = len;
++ desc->cfg_hi = cfg_hi.cfg_hi;
++ desc->cfg_lo = cfg_lo.cfg_lo;
++ desc->ctl_lo = ctl_lo.ctl_lo;
++ desc->ctl_hi = ctl_hi.ctl_hi;
++ desc->width = width;
++ desc->dirn = mids->dma_slave.direction;
++ desc->lli_phys = 0;
++ desc->lli = NULL;
++ desc->lli_pool = NULL;
++ return &desc->txd;
++
++err_desc_get:
++ pr_err("ERR_MDMA: Failed to get desc\n");
++ midc_desc_put(midc, desc);
++ return NULL;
++}
++/**
++ * intel_mid_dma_prep_slave_sg - Prep slave sg txn
++ * @chan: chan for DMA transfer
++ * @sgl: scatter gather list
++ * @sg_len: length of sg txn
++ * @direction: DMA transfer dirtn
++ * @flags: DMA flags
++ * @context: transfer context (ignored)
++ *
++ * Prepares LLI based periphral transfer
++ */
++static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
++ struct dma_chan *chan, struct scatterlist *sgl,
++ unsigned int sg_len, enum dma_transfer_direction direction,
++ unsigned long flags, void *context)
++{
++ struct intel_mid_dma_chan *midc = NULL;
++ struct intel_mid_dma_slave *mids = NULL;
++ struct intel_mid_dma_desc *desc = NULL;
++ struct dma_async_tx_descriptor *txd = NULL;
++ union intel_mid_dma_ctl_lo ctl_lo;
++
++ pr_debug("MDMA: Prep for slave SG\n");
++
++ if (!sg_len) {
++ pr_err("MDMA: Invalid SG length\n");
++ return NULL;
++ }
++ midc = to_intel_mid_dma_chan(chan);
++ BUG_ON(!midc);
++
++ mids = midc->mid_slave;
++ BUG_ON(!mids);
++
++ if (!midc->dma->pimr_mask) {
++ /* We can still handle sg list with only one item */
++ if (sg_len == 1) {
++ txd = intel_mid_dma_prep_memcpy(chan,
++ mids->dma_slave.dst_addr,
++ mids->dma_slave.src_addr,
++ sg_dma_len(sgl),
++ flags);
++ return txd;
++ } else {
++ pr_warn("MDMA: SG list is not supported by this controller\n");
++ return NULL;
++ }
++ }
++
++ pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
++ sg_len, direction, flags);
++
++ txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
++ if (NULL == txd) {
++ pr_err("MDMA: Prep memcpy failed\n");
++ return NULL;
++ }
++
++ desc = to_intel_mid_dma_desc(txd);
++ desc->dirn = direction;
++ ctl_lo.ctl_lo = desc->ctl_lo;
++ ctl_lo.ctlx.llp_dst_en = 1;
++ ctl_lo.ctlx.llp_src_en = 1;
++ desc->ctl_lo = ctl_lo.ctl_lo;
++ desc->lli_length = sg_len;
++ desc->current_lli = 0;
++ /* DMA coherent memory pool for LLI descriptors*/
++ desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
++ midc->dma->pdev,
++ (sizeof(struct intel_mid_dma_lli)*sg_len),
++ 32, 0);
++ if (NULL == desc->lli_pool) {
++ pr_err("MID_DMA:LLI pool create failed\n");
++ return NULL;
++ }
++
++ desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
++ if (!desc->lli) {
++ pr_err("MID_DMA: LLI alloc failed\n");
++ pci_pool_destroy(desc->lli_pool);
++ return NULL;
++ }
++
++ midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
++ if (flags & DMA_PREP_INTERRUPT) {
++ iowrite32(UNMASK_INTR_REG(midc->ch_id),
++ midc->dma_base + MASK_BLOCK);
++ pr_debug("MDMA:Enabled Block interrupt\n");
++ }
++ return &desc->txd;
++}
++
++/**
++ * intel_mid_dma_free_chan_resources - Frees dma resources
++ * @chan: chan requiring attention
++ *
++ * Frees the allocated resources on this DMA chan
++ */
++static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
++{
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
++ struct middma_device *mid = to_middma_device(chan->device);
++ struct intel_mid_dma_desc *desc, *_desc;
++
++ if (true == midc->busy) {
++ /*trying to free ch in use!!!!!*/
++ pr_err("ERR_MDMA: trying to free ch in use\n");
++ }
++ spin_lock_bh(&midc->lock);
++ midc->descs_allocated = 0;
++ list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
++ list_del(&desc->desc_node);
++ pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
++ }
++ list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
++ list_del(&desc->desc_node);
++ pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
++ }
++ list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
++ list_del(&desc->desc_node);
++ pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
++ }
++ spin_unlock_bh(&midc->lock);
++ midc->in_use = false;
++ midc->busy = false;
++ /* Disable CH interrupts */
++ iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
++ pm_runtime_put(&mid->pdev->dev);
++}
++
++/**
++ * intel_mid_dma_alloc_chan_resources - Allocate dma resources
++ * @chan: chan requiring attention
++ *
++ * Allocates DMA resources on this chan
++ * Return the descriptors allocated
++ */
++static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
++{
++ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
++ struct middma_device *mid = to_middma_device(chan->device);
++ struct intel_mid_dma_desc *desc;
++ dma_addr_t phys;
++ int i = 0;
++
++ pm_runtime_get_sync(&mid->pdev->dev);
++
++ if (mid->state == SUSPENDED) {
++ if (dma_resume(&mid->pdev->dev)) {
++ pr_err("ERR_MDMA: resume failed");
++ return -EFAULT;
++ }
++ }
++
++ /* ASSERT: channel is idle */
++ if (test_ch_en(mid->dma_base, midc->ch_id)) {
++ /*ch is not idle*/
++ pr_err("ERR_MDMA: ch not idle\n");
++ pm_runtime_put(&mid->pdev->dev);
++ return -EIO;
++ }
++ dma_cookie_init(chan);
++
++ spin_lock_bh(&midc->lock);
++ while (midc->descs_allocated < DESCS_PER_CHANNEL) {
++ spin_unlock_bh(&midc->lock);
++ desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
++ if (!desc) {
++ pr_err("ERR_MDMA: desc failed\n");
++ pm_runtime_put(&mid->pdev->dev);
++ return -ENOMEM;
++ /*check*/
++ }
++ dma_async_tx_descriptor_init(&desc->txd, chan);
++ desc->txd.tx_submit = intel_mid_dma_tx_submit;
++ desc->txd.flags = DMA_CTRL_ACK;
++ desc->txd.phys = phys;
++ spin_lock_bh(&midc->lock);
++ i = ++midc->descs_allocated;
++ list_add_tail(&desc->desc_node, &midc->free_list);
++ }
++ spin_unlock_bh(&midc->lock);
++ midc->in_use = true;
++ midc->busy = false;
++ pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
++ return i;
++}
++
++/**
++ * midc_handle_error - Handle DMA txn error
++ * @mid: controller where error occurred
++ * @midc: chan where error occurred
++ *
++ * Scan the descriptor for error
++ */
++static void midc_handle_error(struct middma_device *mid,
++ struct intel_mid_dma_chan *midc)
++{
++ midc_scan_descriptors(mid, midc);
++}
++
++/**
++ * dma_tasklet - DMA interrupt tasklet
++ * @data: tasklet arg (the controller structure)
++ *
++ * Scan the controller for interrupts for completion/error
++ * Clear the interrupt and call for handling completion/error
++ */
++static void dma_tasklet(unsigned long data)
++{
++ struct middma_device *mid = NULL;
++ struct intel_mid_dma_chan *midc = NULL;
++ u32 status, raw_tfr, raw_block;
++ int i;
++
++ mid = (struct middma_device *)data;
++ if (mid == NULL) {
++ pr_err("ERR_MDMA: tasklet Null param\n");
++ return;
++ }
++ pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
++ raw_tfr = ioread32(mid->dma_base + RAW_TFR);
++ raw_block = ioread32(mid->dma_base + RAW_BLOCK);
++ status = raw_tfr | raw_block;
++ status &= mid->intr_mask;
++ while (status) {
++ /*txn interrupt*/
++ i = get_ch_index(&status, mid->chan_base);
++ if (i < 0) {
++ pr_err("ERR_MDMA:Invalid ch index %x\n", i);
++ return;
++ }
++ midc = &mid->ch[i];
++ if (midc == NULL) {
++ pr_err("ERR_MDMA:Null param midc\n");
++ return;
++ }
++ pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
++ status, midc->ch_id, i);
++ midc->raw_tfr = raw_tfr;
++ midc->raw_block = raw_block;
++ spin_lock_bh(&midc->lock);
++ /*clearing this interrupts first*/
++ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
++ if (raw_block) {
++ iowrite32((1 << midc->ch_id),
++ mid->dma_base + CLEAR_BLOCK);
++ }
++ midc_scan_descriptors(mid, midc);
++ pr_debug("MDMA:Scan of desc... complete, unmasking\n");
++ iowrite32(UNMASK_INTR_REG(midc->ch_id),
++ mid->dma_base + MASK_TFR);
++ if (raw_block) {
++ iowrite32(UNMASK_INTR_REG(midc->ch_id),
++ mid->dma_base + MASK_BLOCK);
++ }
++ spin_unlock_bh(&midc->lock);
++ }
++
++ status = ioread32(mid->dma_base + RAW_ERR);
++ status &= mid->intr_mask;
++ while (status) {
++ /*err interrupt*/
++ i = get_ch_index(&status, mid->chan_base);
++ if (i < 0) {
++ pr_err("ERR_MDMA:Invalid ch index %x\n", i);
++ return;
++ }
++ midc = &mid->ch[i];
++ if (midc == NULL) {
++ pr_err("ERR_MDMA:Null param midc\n");
++ return;
++ }
++ pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
++ status, midc->ch_id, i);
++
++ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
++ spin_lock_bh(&midc->lock);
++ midc_handle_error(mid, midc);
++ iowrite32(UNMASK_INTR_REG(midc->ch_id),
++ mid->dma_base + MASK_ERR);
++ spin_unlock_bh(&midc->lock);
++ }
++ pr_debug("MDMA:Exiting takslet...\n");
++ return;
++}
++
++static void dma_tasklet1(unsigned long data)
++{
++ pr_debug("MDMA:in takslet1...\n");
++ return dma_tasklet(data);
++}
++
++static void dma_tasklet2(unsigned long data)
++{
++ pr_debug("MDMA:in takslet2...\n");
++ return dma_tasklet(data);
++}
++
++/**
++ * intel_mid_dma_interrupt - DMA ISR
++ * @irq: IRQ where interrupt occurred
++ * @data: ISR cllback data (the controller structure)
++ *
++ * See if this is our interrupt if so then schedule the tasklet
++ * otherwise ignore
++ */
++irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
++{
++ struct middma_device *mid = data;
++ u32 tfr_status, err_status;
++ int call_tasklet = 0;
++
++ tfr_status = ioread32(mid->dma_base + RAW_TFR);
++ err_status = ioread32(mid->dma_base + RAW_ERR);
++ if (!tfr_status && !err_status)
++ return IRQ_NONE;
++
++ /*DMA Interrupt*/
++#if 0
++ pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
++ pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
++#else
++ pr_info("MDMA:Got an interrupt on irq %d\n", irq);
++ pr_info("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
++
++#endif
++ tfr_status &= mid->intr_mask;
++ if (tfr_status) {
++ /*need to disable intr*/
++ iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
++ iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
++ pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
++ call_tasklet = 1;
++ }
++ err_status &= mid->intr_mask;
++ if (err_status) {
++ iowrite32((err_status << INT_MASK_WE),
++ mid->dma_base + MASK_ERR);
++ call_tasklet = 1;
++ }
++ if (call_tasklet)
++ tasklet_schedule(&mid->tasklet);
++
++ return IRQ_HANDLED;
++}
++EXPORT_SYMBOL(intel_mid_dma_interrupt);
++
++static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
++{
++ return intel_mid_dma_interrupt(irq, data);
++}
++
++static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
++{
++ return intel_mid_dma_interrupt(irq, data);
++}
++
++/**
++ * mid_setup_dma - Setup the DMA controller
++ * @pdev: Controller PCI device structure
++ *
++ * Initialize the DMA controller, channels, registers with DMA engine,
++ * ISR. Initialize DMA controller channels.
++ */
++int mid_setup_dma(struct pci_dev *pdev, struct middma_device *dma)
++{
++ int err, i;
++
++ /* DMA coherent memory pool for DMA descriptor allocations */
++ dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
++ sizeof(struct intel_mid_dma_desc),
++ 32, 0);
++ if (NULL == dma->dma_pool) {
++ pr_err("ERR_MDMA:pci_pool_create failed\n");
++ err = -ENOMEM;
++ goto err_dma_pool;
++ }
++
++ INIT_LIST_HEAD(&dma->common.channels);
++ dma->pci_id = pdev->device;
++ if (dma->pimr_mask) {
++ dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
++ LNW_PERIPHRAL_MASK_SIZE);
++ if (dma->mask_reg == NULL) {
++ pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++ } else
++ dma->mask_reg = NULL;
++
++ pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
++ /*init CH structures*/
++ dma->intr_mask = 0;
++ dma->state = RUNNING;
++ for (i = 0; i < dma->max_chan; i++) {
++ struct intel_mid_dma_chan *midch = &dma->ch[i];
++
++ midch->chan.device = &dma->common;
++ dma_cookie_init(&midch->chan);
++ midch->ch_id = dma->chan_base + i;
++ pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
++
++ midch->dma_base = dma->dma_base;
++ midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
++ midch->dma = dma;
++ dma->intr_mask |= 1 << (dma->chan_base + i);
++ spin_lock_init(&midch->lock);
++
++ INIT_LIST_HEAD(&midch->active_list);
++ INIT_LIST_HEAD(&midch->queue);
++ INIT_LIST_HEAD(&midch->free_list);
++ /*mask interrupts*/
++ iowrite32(MASK_INTR_REG(midch->ch_id),
++ dma->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(midch->ch_id),
++ dma->dma_base + MASK_SRC_TRAN);
++ iowrite32(MASK_INTR_REG(midch->ch_id),
++ dma->dma_base + MASK_DST_TRAN);
++ iowrite32(MASK_INTR_REG(midch->ch_id),
++ dma->dma_base + MASK_ERR);
++ iowrite32(MASK_INTR_REG(midch->ch_id),
++ dma->dma_base + MASK_TFR);
++
++ disable_dma_interrupt(midch);
++ list_add_tail(&midch->chan.device_node, &dma->common.channels);
++ }
++ pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
++
++ /*init dma structure*/
++ dma_cap_zero(dma->common.cap_mask);
++ dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
++ dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
++ dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
++ dma->common.dev = &pdev->dev;
++
++ dma->common.device_alloc_chan_resources =
++ intel_mid_dma_alloc_chan_resources;
++ dma->common.device_free_chan_resources =
++ intel_mid_dma_free_chan_resources;
++
++ dma->common.device_tx_status = intel_mid_dma_tx_status;
++ dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
++ dma->common.device_issue_pending = intel_mid_dma_issue_pending;
++ dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
++ dma->common.device_control = intel_mid_dma_device_control;
++
++ /*enable dma cntrl*/
++ iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
++
++ /*register irq */
++ if (dma->pimr_mask) {
++ pr_debug("MDMA:Requesting irq shared for DMAC1\n");
++ err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
++ IRQF_SHARED, "INTEL_MID_DMAC1", dma);
++ if (0 != err)
++ goto err_irq;
++ } else {
++ dma->intr_mask = 0x03;
++ pr_debug("MDMA:Requesting irq for DMAC2\n");
++ err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
++ IRQF_SHARED, "INTEL_MID_DMAC2", dma);
++ if (0 != err)
++ goto err_irq;
++ }
++ /*register device w/ engine*/
++ err = dma_async_device_register(&dma->common);
++ if (0 != err) {
++ pr_err("ERR_MDMA:device_register failed: %d\n", err);
++ goto err_engine;
++ }
++ if (dma->pimr_mask) {
++ pr_debug("setting up tasklet1 for DMAC1\n");
++ tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
++ } else {
++ pr_debug("setting up tasklet2 for DMAC2\n");
++ tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
++ }
++ return 0;
++
++err_engine:
++ free_irq(pdev->irq, dma);
++err_irq:
++ if (dma->mask_reg)
++ iounmap(dma->mask_reg);
++err_ioremap:
++ pci_pool_destroy(dma->dma_pool);
++err_dma_pool:
++ pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
++ return err;
++
++}
++/**
++ * middma_shutdown - Shutdown the DMA controller
++ * @pdev: Controller PCI device structure
++ *
++ * Called by remove
++ * Unregister DMa controller, clear all structures and free interrupt
++ */
++void middma_shutdown(struct pci_dev *pdev, struct middma_device *device)
++{
++ dma_async_device_unregister(&device->common);
++ pci_pool_destroy(device->dma_pool);
++ if (device->mask_reg)
++ iounmap(device->mask_reg);
++ if (device->dma_base)
++ iounmap(device->dma_base);
++ free_irq(pdev->irq, device);
++ return;
++}
++
++/* Power Management */
++/*
++* dma_suspend - PCI suspend function
++*
++* @pci: PCI device structure
++* @state: PM message
++*
++* This function is called by OS when a power event occurs
++*/
++static int dma_suspend(struct device *dev)
++{
++ struct pci_dev *pci = to_pci_dev(dev);
++ int i;
++ struct middma_device *device = pci_get_drvdata(pci);
++ pr_debug("MDMA: dma_suspend called\n");
++
++ for (i = 0; i < device->max_chan; i++) {
++ if (device->ch[i].in_use)
++ return -EAGAIN;
++ }
++#if 0
++ dmac1_mask_periphral_intr(device);
++#endif
++ device->state = SUSPENDED;
++ pci_save_state(pci);
++ pci_disable_device(pci);
++ pci_set_power_state(pci, PCI_D3hot);
++ return 0;
++}
++
++/**
++* dma_resume - PCI resume function
++*
++* @pci: PCI device structure
++*
++* This function is called by OS when a power event occurs
++*/
++int middma_resume(struct device *dev)
++{
++ struct pci_dev *pci_dev = to_pci_dev(dev);
++ struct middma_device *device = pci_get_drvdata(pci_dev);
++
++ device->state = RUNNING;
++ iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
++ return 0;
++}
++
++static int dma_runtime_suspend(struct device *dev)
++{
++ struct pci_dev *pci_dev = to_pci_dev(dev);
++ struct middma_device *device = pci_get_drvdata(pci_dev);
++
++ device->state = SUSPENDED;
++ return 0;
++}
++
++static int dma_runtime_resume(struct device *dev)
++{
++ struct pci_dev *pci_dev = to_pci_dev(dev);
++ struct middma_device *device = pci_get_drvdata(pci_dev);
++
++ device->state = RUNNING;
++ iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
++ return 0;
++}
++
++static int dma_runtime_idle(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct middma_device *device = pci_get_drvdata(pdev);
++ int i;
++
++ for (i = 0; i < device->max_chan; i++) {
++ if (device->ch[i].in_use)
++ return -EAGAIN;
++ }
++
++ return pm_schedule_suspend(dev, 0);
++}
++
+diff --git a/drivers/dma/intel_mid_dma_pci.c b/drivers/dma/intel_mid_dma_pci.c
+new file mode 100644
+index 0000000..bd753b9
+--- /dev/null
++++ b/drivers/dma/intel_mid_dma_pci.c
+@@ -0,0 +1,290 @@
+/*
+ * intel_mid_dma.c - Intel Langwell DMA Drivers
+ *
@@ -1156,7 +3871,7 @@ index 0000000..be7705b
+#include <linux/module.h>
+
+#include "intel_mid_dma_regs.h"
-+#include "intel_mid_dma_core.h"
++//#include "intel_mid_dma_core.h"
+
+#define INTEL_MID_DMAC1_ID 0x0814
+#define INTEL_MID_DMAC2_ID 0x0813
@@ -1232,7 +3947,7 @@ index 0000000..be7705b
+ device->block_size = info->block_size;
+ device->pimr_mask = info->pimr_mask;
+
-+ err = mid_setup_dma(pdev);
++ err = mid_setup_dma(pdev, device);
+ if (err)
+ goto err_dma;
+
@@ -1268,7 +3983,9 @@ index 0000000..be7705b
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
-+ middma_shutdown(pdev);
++#if 0
++ middma_shutdown(pdev, device);
++#endif
+ pci_dev_put(pdev);
+ kfree(device);
+ pci_release_regions(pdev);
@@ -1295,7 +4012,9 @@ index 0000000..be7705b
+ if (device->ch[i].in_use)
+ return -EAGAIN;
+ }
++#if 0
+ dmac1_mask_periphral_intr(device);
++#endif
+ device->state = SUSPENDED;
+ pci_save_state(pci);
+ pci_disable_device(pci);
@@ -1314,7 +4033,6 @@ index 0000000..be7705b
+{
+ struct pci_dev *pci = to_pci_dev(dev);
+ int ret;
-+ struct middma_device *device = pci_get_drvdata(pci);
+
+ pr_debug("MDMA: dma_resume called\n");
+ pci_set_power_state(pci, PCI_D0);
@@ -1324,8 +4042,8 @@ index 0000000..be7705b
+ pr_err("MDMA: device can't be enabled for %x\n", pci->device);
+ return ret;
+ }
-+ intel_mid_dma_resume(device);
-+ return 0;
++
++ return middma_resume(dev);
+}
+
+static int dma_runtime_suspend(struct device *dev)
@@ -1385,7 +4103,7 @@ index 0000000..be7705b
+ .name = "Intel MID DMA",
+ .id_table = intel_mid_dma_ids,
+ .probe = intel_mid_dma_probe,
-+ .remove = __devexit_p(intel_mid_dma_remove),
++ .remove = intel_mid_dma_remove,
+#ifdef CONFIG_PM
+ .driver = {
+ .pm = &intel_mid_dma_pm,
@@ -1412,7 +4130,7 @@ index 0000000..be7705b
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
-index 17b4219..4be7a01 100644
+index 17b4219..4b2ba69 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -27,6 +27,7 @@
@@ -1539,6 +4257,21 @@ index 17b4219..4be7a01 100644
struct intel_mid_dma_lli {
dma_addr_t sar;
dma_addr_t dar;
+@@ -294,6 +197,14 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
+ }
+
+
++int mid_setup_dma(struct pci_dev *pdev, struct middma_device *dma);
++#if 0
++void middma_shutdown(struct pci_dev *pdev, struct middma_device *device);
++void dmac1_mask_periphral_intr(struct middma_device *mid);
++void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc);
++#endif
++int middma_resume(struct device *dev);
++
+ int dma_resume(struct device *dev);
+
+ #endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 682de75..97e951b 100644
--- a/drivers/gpio/Kconfig
@@ -2204,23 +4937,14 @@ index 6add851..62ad7dc 100644
if (r)
goto err_iounmap;
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
-index 05e996f..168989b 100644
+index 05e996f..49a66c3 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
-@@ -3,15 +3,12 @@
- #
- menu "Accelerometers"
-
--config HID_SENSOR_ACCEL_3D
-- depends on HID_SENSOR_HUB
-- select IIO_BUFFER
-- select IIO_TRIGGERED_BUFFER
-- select HID_SENSOR_IIO_COMMON
-- select HID_SENSOR_IIO_TRIGGER
-- tristate "HID Accelerometers 3D"
-- help
-- Say yes here to build support for the HID SENSOR
-- accelerometers 3D.
+@@ -13,5 +13,13 @@ config HID_SENSOR_ACCEL_3D
+ help
+ Say yes here to build support for the HID SENSOR
+ accelerometers 3D.
++
+config IIO_LIS331DLH_INTEL_CLN
+ tristate "STMicroelectronics LIS331DLH accelerometer i2c driver for Intel Clanton platform"
+ depends on INTEL_QUARK_X1000_SOC
@@ -2231,34 +4955,21 @@ index 05e996f..168989b 100644
endmenu
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
-index 5bc6855..c82ce57 100644
+index 5bc6855..2ac0908 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
-@@ -1,5 +1,4 @@
- #
- # Makefile for industrial I/O accelerometer drivers
+@@ -3,3 +3,5 @@
#
--
--obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
+
+ obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
++
+obj-$(CONFIG_IIO_LIS331DLH_INTEL_CLN) += lis331dlh_intel_cln.o
-diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
-index 0b0c3c6..563a2fe 100644
---- a/drivers/iio/accel/hid-sensor-accel-3d.c
-+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
-@@ -45,6 +45,7 @@ enum accel_3d_channel {
- struct accel_3d_state {
- struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_iio_common common_attributes;
-+ struct hid_sensor_common common_attributes;
- struct hid_sensor_hub_attribute_info accel[ACCEL_3D_CHANNEL_MAX];
- u32 accel_val[ACCEL_3D_CHANNEL_MAX];
- };
diff --git a/drivers/iio/accel/lis331dlh_intel_cln.c b/drivers/iio/accel/lis331dlh_intel_cln.c
new file mode 100644
-index 0000000..57998d0
+index 0000000..c76c30a
--- /dev/null
+++ b/drivers/iio/accel/lis331dlh_intel_cln.c
-@@ -0,0 +1,675 @@
+@@ -0,0 +1,735 @@
+/*
+ * Intel Clanton Hill platform accelerometer driver
+ *
@@ -2296,18 +5007,15 @@ index 0000000..57998d0
+
+#include <linux/kernel.h>
+#include <linux/module.h>
-+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
-+#include <linux/irq.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
-+#include <linux/iio/trigger.h>
-+#include <linux/iio/buffer.h>
++#include <linux/iio/events.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_i2c.h>
@@ -2374,8 +5082,33 @@ index 0000000..57998d0
+#define ST_ACCEL_2_INT_CFG_ZHIE_EN 0x20
+
+#define ST_ACCEL_2_MULTIREAD_BIT true
++#define ST_ACCEL_2_THRESH_VAL_MIN 0x00
++#define ST_ACCEL_2_THRESH_VAL_MAX 0x7f
+#define CLN_ACCEL_INT2_WAKEUP_THRESH_VAL 0x7f
+
++#define CLN_ACCEL_INT1_DISABLED 0
++#define CLN_ACCEL_INT1_ENABLED 1
++
++#define CLN_ACCEL_LSM_CHANNELS(device_type, index, mod, endian, bits, addr) \
++{ \
++ .type = device_type, \
++ .modified = 1, \
++ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
++ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
++ .scan_index = index, \
++ .channel = mod, \
++ .channel2 = mod, \
++ .address = addr, \
++ .scan_type = { \
++ .sign = 's', \
++ .realbits = bits, \
++ .shift = 16 - bits, \
++ .storagebits = 16, \
++ .endianness = endian, \
++ }, \
++ .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), \
++}
++
+static const u8 iio_modifier_map[] = {
+ IIO_NO_MOD,
+ IIO_MOD_X,
@@ -2430,11 +5163,11 @@ index 0000000..57998d0
+ iio_modifier = iio_modifier_map[iio_modifier];
+
+ iio_push_event(private,
-+ IIO_EVENT_CODE(IIO_ACCEL,
-+ 0, /* non differential */
++ IIO_MOD_EVENT_CODE(IIO_ACCEL,
++ 0,
+ iio_modifier,
+ IIO_EV_TYPE_THRESH,
-+ IIO_EV_DIR_RISING, 0, 0, 0),
++ IIO_EV_DIR_RISING),
+ timestamp);
+ }
+
@@ -2442,6 +5175,27 @@ index 0000000..57998d0
+ return IRQ_HANDLED;
+}
+
++static inline int lis331dlh_intel_cln_read_info_raw(struct iio_dev *indio_dev,
++ struct iio_chan_spec const *ch, int *val)
++{
++ int err;
++
++ mutex_lock(&indio_dev->mlock);
++ err = st_sensors_read_axis_data(indio_dev, ch->address, val);
++
++ if (unlikely(err < 0))
++ goto read_error;
++
++ *val = *val >> ch->scan_type.shift;
++ mutex_unlock(&indio_dev->mlock);
++
++ return err;
++
++read_error:
++ mutex_unlock(&indio_dev->mlock);
++ return err;
++}
++
+static int lis331dlh_intel_cln_read_raw(
+ struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch,
@@ -2452,8 +5206,8 @@ index 0000000..57998d0
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
-+ err = st_sensors_read_info_raw(indio_dev, ch, val);
-+ if (err < 0)
++ err = lis331dlh_intel_cln_read_info_raw(indio_dev, ch, val);
++ if (unlikely(err < 0))
+ goto read_error;
+
+ return IIO_VAL_INT;
@@ -2487,7 +5241,6 @@ index 0000000..57998d0
+ return err;
+}
+
-+
+static ST_SENSOR_DEV_ATTR_SAMP_FREQ();
+static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
+static ST_SENSORS_DEV_ATTR_SCALE_AVAIL(in_accel_scale_available);
@@ -2528,7 +5281,8 @@ index 0000000..57998d0
+ struct st_sensor_data *sdata;
+
+ /* range check */
-+ if ((val < 0) || (val > 0x7f))
++ if (unlikely((val < ST_ACCEL_2_THRESH_VAL_MIN) ||
++ (val > ST_ACCEL_2_THRESH_VAL_MAX)))
+ return -EINVAL;
+
+ sdata = iio_priv(indio_dev);
@@ -2537,36 +5291,36 @@ index 0000000..57998d0
+ ST_ACCEL_2_INT1_THRESH_ADDR, val);
+
+ return err;
-+
+}
+
+/* Configure the INT1 pin to fire an interrupt on a high threshold event.
+ */
+static int lis331dlh_intel_cln_configure_threshold_interrupt(
-+ struct iio_dev *indio_dev, bool state)
++ struct iio_dev *indio_dev, u8 state)
+{
+ int err = 0;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
-+ if (sdata->int_thresh == state)
++ if (sdata->sensor->drdy_irq.ig1.en_mask == state)
+ return 0;
+
-+ if (state) {
++ if (state == CLN_ACCEL_INT1_ENABLED) {
+ err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+ NULL,
+ lis331dlh_intel_cln_threshold_event_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "lis331dlh_intel_cln_threshold",
+ indio_dev);
-+ if (err == 0) {
-+ sdata->int_thresh = true;
++ if (likely(err == 0)) {
++ sdata->sensor->drdy_irq.ig1.en_mask =
++ CLN_ACCEL_INT1_ENABLED;
+ err = sdata->tf->write_byte(
+ &sdata->tb, sdata->dev,
+ ST_ACCEL_2_INT1_DURATION_ADDR, 1);
+ }
+ } else {
+ free_irq(sdata->get_irq_data_ready(indio_dev), indio_dev);
-+ sdata->int_thresh = false;
++ sdata->sensor->drdy_irq.ig1.en_mask = CLN_ACCEL_INT1_DISABLED;
+ }
+
+ return err;
@@ -2604,21 +5358,23 @@ index 0000000..57998d0
+ mask = 1 << ((IIO_EVENT_CODE_EXTRACT_MODIFIER(event_code) << 1) - 1);
+
+ err = st_sensors_write_data_with_mask(indio_dev,
-+ ST_ACCEL_2_INT1_CFG_ADDR,
-+ mask, state);
++ ST_ACCEL_2_INT1_CFG_ADDR,
++ mask, state);
++ if (unlikely(err < 0))
++ goto write_event_err;
+
-+ if (err == 0)
-+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
-+ ST_ACCEL_2_INT1_CFG_ADDR, &data);
++ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
++ ST_ACCEL_2_INT1_CFG_ADDR, &data);
++ if (unlikely(err < 0))
++ goto write_event_err;
+
-+ if (err == 0) {
-+ new_int_state = data & (ST_ACCEL_2_INT_CFG_XHIE_EN |
++ new_int_state = data & (ST_ACCEL_2_INT_CFG_XHIE_EN |
+ ST_ACCEL_2_INT_CFG_YHIE_EN |
+ ST_ACCEL_2_INT_CFG_ZHIE_EN);
-+ err = lis331dlh_intel_cln_configure_threshold_interrupt(
++ err = lis331dlh_intel_cln_configure_threshold_interrupt(
+ indio_dev, new_int_state);
-+ }
+
++write_event_err:
+ return err;
+}
+
@@ -2632,33 +5388,38 @@ index 0000000..57998d0
+ u8 data;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
-+ if (err == 0)
-+ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
++ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
+ ST_ACCEL_2_INT2_THRESH_ADDR,
+ CLN_ACCEL_INT2_WAKEUP_THRESH_VAL);
++ if (unlikely(err < 0))
++ goto enable_wakeup_int_err;
+
+ /* Latch interrupt request on INT2 */
-+ if (err == 0)
-+ err = st_sensors_write_data_with_mask(
++ err = st_sensors_write_data_with_mask(
+ indio_dev, ST_ACCEL_2_DRDY_IRQ_ADDR,
+ ST_ACCEL_2_INT_LIR_MASK, 1);
++ if (unlikely(err < 0))
++ goto enable_wakeup_int_err;
+
-+ if (err == 0)
-+ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
-+ ST_ACCEL_2_INT2_DURATION_ADDR, 0);
++ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
++ ST_ACCEL_2_INT2_DURATION_ADDR, 0);
++ if (unlikely(err < 0))
++ goto enable_wakeup_int_err;
+
-+ if (err == 0)
-+ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
-+ ST_ACCEL_2_INT2_CFG_ADDR,
-+ ST_ACCEL_2_INT_CFG_XHIE_EN |
-+ ST_ACCEL_2_INT_CFG_YHIE_EN);
++ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
++ ST_ACCEL_2_INT2_CFG_ADDR,
++ ST_ACCEL_2_INT_CFG_XHIE_EN |
++ ST_ACCEL_2_INT_CFG_YHIE_EN);
++ if (unlikely(err < 0))
++ goto enable_wakeup_int_err;
+
+ /* Clean ST_ACCEL_2_INT2_SRC */
-+ if (err == 0)
-+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
-+ ST_ACCEL_2_INT2_SRC_ADDR,
-+ &data);
+
++ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
++ ST_ACCEL_2_INT2_SRC_ADDR,
++ &data);
++
++enable_wakeup_int_err:
+ return err;
+}
+
@@ -2669,17 +5430,20 @@ index 0000000..57998d0
+ u8 data;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
-+ if (err == 0)
-+ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
-+ ST_ACCEL_2_INT2_CFG_ADDR,
-+ 0);
++ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
++ ST_ACCEL_2_INT2_CFG_ADDR,
++ 0);
++ if (unlikely(err < 0))
++ goto disable_wakeup_int_err;
+
+ /* Clean ST_ACCEL_2_INT2_SRC */
-+ if (err == 0)
-+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
-+ ST_ACCEL_2_INT2_SRC_ADDR,
-+ &data);
++ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
++ ST_ACCEL_2_INT2_SRC_ADDR,
++ &data);
++ if (unlikely(err < 0))
++ goto disable_wakeup_int_err;
+
++disable_wakeup_int_err:
+ return err;
+}
+
@@ -2694,18 +5458,20 @@ index 0000000..57998d0
+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ ST_ACCEL_2_INT2_SRC_ADDR,
+ &data);
++ if (unlikely(err < 0))
++ goto handle_wakeup_int_err;
+
-+ if (err == 0)
-+ if (data & ST_ACCEL_2_INT_IA_MASK) {
-+ iio_push_event(indio_dev,
-+ IIO_EVENT_CODE(IIO_ACCEL,
-+ 0, /* non differential */
-+ IIO_MOD_X_OR_Y_OR_Z,
-+ IIO_EV_TYPE_THRESH,
-+ IIO_EV_DIR_EITHER, 0, 0, 0),
-+ timestamp);
-+ }
++ if (data & ST_ACCEL_2_INT_IA_MASK) {
++ iio_push_event(indio_dev,
++ IIO_MOD_EVENT_CODE(IIO_ACCEL,
++ 0,
++ IIO_MOD_X_OR_Y_OR_Z,
++ IIO_EV_TYPE_THRESH,
++ IIO_EV_DIR_EITHER),
++ timestamp);
++ }
+
++handle_wakeup_int_err:
+ return err;
+}
+
@@ -2721,11 +5487,11 @@ index 0000000..57998d0
+};
+
+static const struct iio_chan_spec st_accel_12bit_channels[] = {
-+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_X, IIO_MOD_X, IIO_LE,
++ CLN_ACCEL_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_X, IIO_MOD_X, IIO_LE,
+ ST_SENSORS_DEFAULT_12_REALBITS, ST_ACCEL_DEFAULT_OUT_X_L_ADDR),
-+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Y, IIO_MOD_Y, IIO_LE,
++ CLN_ACCEL_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Y, IIO_MOD_Y, IIO_LE,
+ ST_SENSORS_DEFAULT_12_REALBITS, ST_ACCEL_DEFAULT_OUT_Y_L_ADDR),
-+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Z, IIO_MOD_Z, IIO_LE,
++ CLN_ACCEL_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Z, IIO_MOD_Z, IIO_LE,
+ ST_SENSORS_DEFAULT_12_REALBITS, ST_ACCEL_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
@@ -2799,7 +5565,7 @@ index 0000000..57998d0
+ int ret = 0;
+
+ indio_dev = iio_device_alloc(sizeof(*adata));
-+ if (indio_dev == NULL) {
++ if (unlikely(indio_dev == NULL)) {
+ ret = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
@@ -2812,13 +5578,13 @@ index 0000000..57998d0
+ adata->dev = &client->dev;
+
+ pdata = client->dev.platform_data;
-+ if (!pdata) {
++ if (unlikely(!pdata)) {
+ pr_err("No platform data provided\n");
+ goto lis331dlh_intel_cln_init_err;
+ }
+
+ ret = gpio_to_irq(pdata->irq1_pin);
-+ if (ret < 0) {
++ if (unlikely(ret < 0)) {
+ pr_err(
+ "Failed to obtain valid IRQ for GPIO %d, "
+ "gpio_to_irq returned %d\n",
@@ -2833,8 +5599,8 @@ index 0000000..57998d0
+ indio_dev->info = &accel_info;
+
+ ret = st_sensors_check_device_support(indio_dev,
-+ 1, &lis331dlh_intel_cln_sensor);
-+ if (ret < 0)
++ 1, &lis331dlh_intel_cln_sensor);
++ if (unlikely(ret < 0))
+ goto lis331dlh_intel_cln_init_err;
+
+ indio_dev->channels = adata->sensor->ch;
@@ -2844,14 +5610,19 @@ index 0000000..57998d0
+ adata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &adata->sensor->fs.fs_avl[0];
+ adata->odr = adata->sensor->odr.odr_avl[0].hz;
-+ adata->int_thresh = false;
++
++ adata->sensor->drdy_irq.ig1.en_mask = CLN_ACCEL_INT1_DISABLED;
+
+ ret = st_sensors_init_sensor(indio_dev);
-+ if (ret < 0)
++ if (unlikely(ret < 0))
++ goto lis331dlh_intel_cln_init_err;
++
++ ret = st_sensors_set_enable(indio_dev, true);
++ if (unlikely(ret < 0))
+ goto lis331dlh_intel_cln_init_err;
+
+ ret = iio_device_register(indio_dev);
-+ if (ret)
++ if (unlikely(ret))
+ goto lis331dlh_intel_cln_init_err;
+
+ return 0;
@@ -2870,7 +5641,7 @@ index 0000000..57998d0
+
+ st_sensors_set_enable(indio_dev, false);
+
-+ if (adata->int_thresh)
++ if (adata->sensor->drdy_irq.ig1.en_mask == CLN_ACCEL_INT1_ENABLED)
+ free_irq(adata->get_irq_data_ready(indio_dev), indio_dev);
+
+ iio_device_unregister(indio_dev);
@@ -2994,53 +5765,44 @@ index b34d754..60491e4 100644
if (st->ext_ref) {
st->reg = regulator_get(&spi->dev, "vref");
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
-index ed45ee5..3c77727 100644
+index ed45ee5..64bcb14 100644
--- a/drivers/iio/common/Kconfig
+++ b/drivers/iio/common/Kconfig
-@@ -2,4 +2,4 @@
- # IIO common modules
+@@ -3,3 +3,4 @@
#
--source "drivers/iio/common/hid-sensors/Kconfig"
+ source "drivers/iio/common/hid-sensors/Kconfig"
+source "drivers/iio/common/st_sensors/Kconfig"
\ No newline at end of file
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
-index 8158400..4d22a20 100644
+index 8158400..c2352be 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
-@@ -6,4 +6,4 @@
- # instead of duplicating in each module.
+@@ -7,3 +7,4 @@
#
--obj-y += hid-sensors/
+ obj-y += hid-sensors/
+obj-y += st_sensors/
-\ No newline at end of file
diff --git a/drivers/iio/common/st_sensors/Kconfig b/drivers/iio/common/st_sensors/Kconfig
new file mode 100644
-index 0000000..d1b474a
+index 0000000..865f1ca
--- /dev/null
+++ b/drivers/iio/common/st_sensors/Kconfig
-@@ -0,0 +1,20 @@
+@@ -0,0 +1,14 @@
+#
+# STMicroelectronics sensors common library
+#
+
-+menu "STMicro sensors"
+config IIO_ST_SENSORS_I2C
-+ tristate "IIO ST SENSORS"
-+ help
-+ Enable SENSORS I2C option
++ tristate
+
+config IIO_ST_SENSORS_SPI
-+ tristate "IIO SENSORS SPI"
-+ help
-+ Enable IIO_ST_SENSORS_SPI
++ tristate
+
+config IIO_ST_SENSORS_CORE
-+ tristate "IIO SENSORS CORE"
++ tristate
+ select IIO_ST_SENSORS_I2C if I2C
+ select IIO_ST_SENSORS_SPI if SPI_MASTER
-+endmenu
diff --git a/drivers/iio/common/st_sensors/Makefile b/drivers/iio/common/st_sensors/Makefile
new file mode 100644
index 0000000..9f3e24f
@@ -3181,10 +5943,10 @@ index 0000000..09b236d
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
new file mode 100644
-index 0000000..9873869
+index 0000000..945a55b
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
-@@ -0,0 +1,435 @@
+@@ -0,0 +1,447 @@
+/*
+ * STMicroelectronics sensors core library driver
+ *
@@ -3387,7 +6149,7 @@ index 0000000..9873869
+
+ mutex_init(&sdata->tb.buf_lock);
+
-+ err = st_sensors_set_enable(indio_dev, true);
++ err = st_sensors_set_enable(indio_dev, false);
+ if (err < 0)
+ goto init_error;
+
@@ -3460,7 +6222,7 @@ index 0000000..9873869
+}
+EXPORT_SYMBOL(st_sensors_set_fullscale_by_gain);
+
-+static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
++int st_sensors_read_axis_data(struct iio_dev *indio_dev,
+ u8 ch_addr, int *data)
+{
+ int err;
@@ -3478,18 +6240,30 @@ index 0000000..9873869
+read_error:
+ return err;
+}
++EXPORT_SYMBOL(st_sensors_read_axis_data);
+
+int st_sensors_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val)
+{
+ int err;
++ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
-+ err = st_sensors_read_axis_data(indio_dev, ch->address, val);
-+ if (err < 0)
++ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
++ err = -EBUSY;
+ goto read_error;
++ } else {
++ err = st_sensors_set_enable(indio_dev, true);
++ if (err < 0)
++ goto read_error;
+
-+ *val = *val >> ch->scan_type.shift;
++ msleep((sdata->sensor->bootime * 1000) / sdata->odr);
++ err = st_sensors_read_axis_data(indio_dev, ch->address, val);
++ if (err < 0)
++ goto read_error;
++
++ *val = *val >> ch->scan_type.shift;
++ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return err;
@@ -3622,10 +6396,10 @@ index 0000000..9873869
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
new file mode 100644
-index 0000000..f8e82c5
+index 0000000..38af944
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
-@@ -0,0 +1,80 @@
+@@ -0,0 +1,81 @@
+/*
+ * STMicroelectronics sensors i2c library driver
+ *
@@ -3643,6 +6417,7 @@ index 0000000..f8e82c5
+
+#include <linux/iio/common/st_sensors_i2c.h>
+
++
+#define ST_SENSORS_I2C_MULTIREAD 0x80
+
+static unsigned int st_sensors_i2c_get_irq(struct iio_dev *indio_dev)
@@ -3842,10 +6617,10 @@ index 0000000..f0aa2f1
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
new file mode 100644
-index 0000000..8b4dd48
+index 0000000..139ed03
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
-@@ -0,0 +1,78 @@
+@@ -0,0 +1,77 @@
+/*
+ * STMicroelectronics sensors trigger library driver
+ *
@@ -3882,8 +6657,7 @@ index 0000000..8b4dd48
+ err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+ iio_trigger_generic_data_rdy_poll,
+ NULL,
-+ IRQF_SHARED | /* sharing with the accelerometer events*/
-+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
++ IRQF_TRIGGER_RISING,
+ sdata->trig->name,
+ sdata->trig);
+ if (err)
@@ -3969,7 +6743,7 @@ index 3e24571..da30c5c 100644
return 1;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
-index ff553ba..be79f4a 100644
+index ff553ba..edd3b4c 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -907,6 +907,44 @@ config MFD_TIMBERDALE
@@ -3978,11 +6752,12 @@ index ff553ba..be79f4a 100644
+config CY8C9540A
+ tristate "Cypress CY8C9540 GPIO/PWM expander"
-+ depends on INTEL_CLN_GIP
++ depends on GPIOLIB
++ depends on I2C
+ depends on PWM
+ help
-+ I/O expander providing GPIO/PWM functionality.
-+ This is Clanton-specific for now.
++ Select this option to enable support for the CY8C9540 I/O expander.
++ This device provides 40 interrupt-capable GPIOs, 8 PWMs and an EEPROM.
+
+config INTEL_CLN_GIP
+ tristate "Intel Clanton GIP"
@@ -3999,7 +6774,6 @@ index ff553ba..be79f4a 100644
+config INTEL_CLN_GIP_TEST
+ tristate "Intel Clanton GIP support for Integration Testing"
+ depends on INTEL_CLN_GIP
-+ depends on INTEL_QUARK_X1000_SOC_FPGAEMU || INTEL_QUARK_X1000_SOC_SVP
+ select I2C_CHARDEV
+ select GPIO_SYSFS
+ select SPI
@@ -4018,10 +6792,10 @@ index ff553ba..be79f4a 100644
tristate "Intel SCH LPC"
depends on PCI
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
-index 8b977f8..2fb8008 100644
+index 8b977f8..fda016c 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
-@@ -123,6 +123,13 @@ obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
+@@ -123,6 +123,14 @@ obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
@@ -4031,16 +6805,17 @@ index 8b977f8..2fb8008 100644
+ intel_cln_gip_gpio.o \
+ intel_cln_gip_i2c.o \
+ ../i2c/busses/i2c-designware-core.o
++obj-$(CONFIG_INTEL_QUARK_X1000_SOC) += intel_cln_gip_pdata.o
+obj-$(CONFIG_INTEL_CLN_GIP_TEST)+=intel_cln_gip_test.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
obj-$(CONFIG_LPC_ICH) += lpc_ich.o
obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
diff --git a/drivers/mfd/cy8c9540a.c b/drivers/mfd/cy8c9540a.c
new file mode 100644
-index 0000000..9d83e32
+index 0000000..0e4ea5e
--- /dev/null
+++ b/drivers/mfd/cy8c9540a.c
-@@ -0,0 +1,1022 @@
+@@ -0,0 +1,970 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -4064,7 +6839,10 @@ index 0000000..9d83e32
+/*
+ * Driver for Cypress CY8C9540A I/O Expander and PWM
+ *
-+ * Izmir-specific.
++ * The I/O Expander is I2C-controlled and provides 40 interrupt-capable GPIOs,
++ * 8 PWMs and an EEPROM.
++ * Note the device only supports I2C standard speed 100kHz.
++ *
+ * Based on gpio-adp5588.
+ */
+
@@ -4074,6 +6852,7 @@ index 0000000..9d83e32
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
++#include <linux/mfd/cy8c9540a.h>
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
@@ -4082,13 +6861,12 @@ index 0000000..9d83e32
+
+/* CY8C9540A settings */
+#define NGPIO 40
-+#define NPWM 8
+#define PWM_MAX_PERIOD 0xff
+#define DEVID_FAMILY_CY8C9540A 0x40
+#define DEVID_FAMILY_MASK 0xf0
+#define NPORTS 6
-+#define I2C_A0_ADDR_MASK 0x0001
-+#define POR_SETTINGS_LEN 147
++#define PWM_CLK 0x00 /* see resulting PWM_TCLK_NS */
++#define PWM_TCLK_NS 31250 /* 32kHz */
+
+/* Register offset */
+#define REG_INPUT_PORT0 0x00
@@ -4110,44 +6888,25 @@ index 0000000..9d83e32
+/* Commands */
+#define CMD_W_EEPROM_POR 0x03
+#define CMD_R_EEPROM_POR 0x04
-+#define CMD_RECONF_DEV 0x07
++#define CMD_RECONF 0x07
++
++/* Max retries after I2C NAK */
++#define MAX_RETRIES 3
++
++/*
++ * Wait time for device to be ready.
++ * Note the time the part takes depends on the user configuration (mainly on
++ * the number of active interrupts). The minimum delay here covers the
++ * worst-case scenario.
++ */
++#define SLEEP_US_MIN 4000
++#define SLEEP_US_MAX 4500
+
-+/* Galileo-specific POR default settings */
++/* Command string to store platform POR settings */
+#define POR_CMD_W_OFFS 2
-+static u8 por_default[POR_SETTINGS_LEN + POR_CMD_W_OFFS] = {
-+ REG_CMD, CMD_W_EEPROM_POR,
-+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* Output */
-+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* Interrupt mask */
-+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* PWM */
-+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Inversion */
-+ 0xe0, 0xe0, 0xff, 0xf3, 0x00, 0xff, 0xff, 0xff, /* Direction */
-+ 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, /* Port0 drive mode */
-+ 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, /* Port1 drive mode */
-+ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Port2 drive mode */
-+ 0xf3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, /* Port3 drive mode */
-+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* Port4 drive mode */
-+ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Port5 drive mode */
-+ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Port6 drive mode */
-+ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Port7 drive mode */
-+ 0x00, 0xff, 0x00, /* PWM0 */
-+ 0x00, 0xff, 0x00, /* PWM1 */
-+ 0x00, 0xff, 0x00, /* PWM2 */
-+ 0x00, 0xff, 0x00, /* PWM3 */
-+ 0x00, 0xff, 0x00, /* PWM4 */
-+ 0x00, 0xff, 0x00, /* PWM5 */
-+ 0x00, 0xff, 0x00, /* PWM6 */
-+ 0x00, 0xff, 0x00, /* PWM7 */
-+ 0x00, 0xff, 0x00, /* PWM8 */
-+ 0x00, 0xff, 0x00, /* PWM9 */
-+ 0x00, 0xff, 0x00, /* PWM10 */
-+ 0x00, 0xff, 0x00, /* PWM11 */
-+ 0x00, 0xff, 0x00, /* PWM12 */
-+ 0x00, 0xff, 0x00, /* PWM13 */
-+ 0x00, 0xff, 0x00, /* PWM14 */
-+ 0x00, 0xff, 0x00, /* PWM15 */
-+ 0xff, /* PWM CLK divider */
-+ 0x02, /* EEPROM enable */
-+ 0x00 /* CRC (placeholder) */
++static u8 por_set[CY8C9540A_POR_SETTINGS_LEN + POR_CMD_W_OFFS] = {
++ [0] = REG_CMD,
++ [1] = CMD_W_EEPROM_POR,
+};
+
+struct cy8c9540a {
@@ -4155,6 +6914,8 @@ index 0000000..9d83e32
+ struct gpio_chip gpio_chip;
+ struct pwm_chip pwm_chip;
+ struct mutex lock;
++ /* IRQ base stored from platform data */
++ int irq_base;
+ /* protect serialized access to the interrupt controller bus */
+ struct mutex irq_lock;
+ /* cached output registers */
@@ -4166,7 +6927,9 @@ index 0000000..9d83e32
+ /* Descriptor for raw i2c transactions */
+ struct i2c_msg i2c_segments[2];
+ /* POR settings stored in the EEPROM */
-+ u8 por_stored[POR_SETTINGS_LEN];
++ u8 por_stored[CY8C9540A_POR_SETTINGS_LEN];
++ /* PWM-to-GPIO mapping (0 == first gpio pin) */
++ int pwm2gpio_mapping[CY8C9540A_NPWM];
+};
+
+/* Per-port GPIO offset */
@@ -4179,29 +6942,6 @@ index 0000000..9d83e32
+ 36,
+};
+
-+/* Galileo-specific data. */
-+
-+#define GPIO_BASE_ID 16
-+#define GPIO_IRQBASE 64
-+#define PWM_BASE_ID 0
-+#define PWM_CLK 0x00 /* see resulting PWM_TCLK_NS */
-+#define PWM_TCLK_NS 31250 /* 32kHz */
-+#define SOC_GPIO_INT_PIN 13
-+#define SOC_GPIO_I2C_A0 7
-+
-+/* PWM-to-GPIO mapping (0 == first Cypress GPIO). */
-+#define PWM_UNUSED -1
-+static const int pwm2gpio_mapping[] = {
-+ PWM_UNUSED,
-+ 3,
-+ PWM_UNUSED,
-+ 2,
-+ 9,
-+ 1,
-+ 8,
-+ 0,
-+};
-+
+static inline u8 cypress_get_port(unsigned gpio)
+{
+ u8 i = 0;
@@ -4267,7 +7007,7 @@ index 0000000..9d83e32
+static int cy8c9540a_gpio_set_drive(struct gpio_chip *chip, unsigned gpio,
+ unsigned mode)
+{
-+ int ret = 0;
++ s32 ret = 0;
+ struct cy8c9540a *dev =
+ container_of(chip, struct cy8c9540a, gpio_chip);
+ struct i2c_client *client = dev->client;
@@ -4420,7 +7160,7 @@ index 0000000..9d83e32
+static void cy8c9540a_irq_mask(struct irq_data *d)
+{
+ struct cy8c9540a *dev = irq_data_get_irq_chip_data(d);
-+ unsigned gpio = d->irq - GPIO_IRQBASE;
++ unsigned gpio = d->irq - dev->irq_base;
+ u8 port = cypress_get_port(gpio);
+
+ dev->irq_mask[port] |= BIT(cypress_get_offs(gpio, port));
@@ -4429,7 +7169,7 @@ index 0000000..9d83e32
+static void cy8c9540a_irq_unmask(struct irq_data *d)
+{
+ struct cy8c9540a *dev = irq_data_get_irq_chip_data(d);
-+ unsigned gpio = d->irq - GPIO_IRQBASE;
++ unsigned gpio = d->irq - dev->irq_base;
+ u8 port = cypress_get_port(gpio);
+
+ dev->irq_mask[port] &= ~BIT(cypress_get_offs(gpio, port));
@@ -4437,7 +7177,9 @@ index 0000000..9d83e32
+
+static int cy8c9540a_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
-+ return GPIO_IRQBASE + gpio;
++ struct cy8c9540a *dev =
++ container_of(chip, struct cy8c9540a, gpio_chip);
++ return dev->irq_base + gpio;
+}
+
+static int cy8c9540a_irq_set_type(struct irq_data *d, unsigned int type)
@@ -4491,7 +7233,7 @@ index 0000000..9d83e32
+ ret = IRQ_HANDLED;
+ gpio = __ffs(pending);
+ pending &= ~BIT(gpio);
-+ gpio_irq = GPIO_IRQBASE + cy8c9540a_port_offs[port]
++ gpio_irq = dev->irq_base + cy8c9540a_port_offs[port]
+ + gpio;
+ handle_nested_irq(gpio_irq);
+ }
@@ -4537,25 +7279,16 @@ index 0000000..9d83e32
+ }
+ }
+
-+ /* Allocate external interrupt GPIO pin */
-+
-+ ret = gpio_request(SOC_GPIO_INT_PIN, "cy8c9540a-int");
-+ if (ret) {
-+ dev_err(&client->dev, "failed to request gpio%u\n",
-+ SOC_GPIO_INT_PIN);
-+ goto err;
-+ }
-+
+ /* Allocate IRQ descriptors for Cypress GPIOs and set handler */
+
-+ ret = irq_alloc_descs(GPIO_IRQBASE, GPIO_IRQBASE, NGPIO, 0);
++ ret = irq_alloc_descs(dev->irq_base, dev->irq_base, NGPIO, 0);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to allocate IRQ numbers\n");
-+ goto err_free_gpio;
++ goto err;
+ }
+
+ for (gpio = 0; gpio < NGPIO; gpio++) {
-+ int irq = gpio + GPIO_IRQBASE;
++ int irq = gpio + dev->irq_base;
+ irq_set_chip_data(irq, dev);
+ irq_set_chip_and_handler(irq, &cy8c9540a_irq_chip,
+ handle_edge_irq);
@@ -4563,9 +7296,7 @@ index 0000000..9d83e32
+ irq_set_noprobe(irq);
+ }
+
-+ ret = request_threaded_irq(gpio_to_irq(SOC_GPIO_INT_PIN),
-+ NULL,
-+ cy8c9540a_irq_handler,
++ ret = request_threaded_irq(client->irq, NULL, cy8c9540a_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), dev);
+ if (ret) {
@@ -4577,9 +7308,7 @@ index 0000000..9d83e32
+ return 0;
+
+err_free_irq_descs:
-+ irq_free_descs(GPIO_IRQBASE, NGPIO);
-+err_free_gpio:
-+ gpio_free(SOC_GPIO_INT_PIN);
++ irq_free_descs(dev->irq_base, NGPIO);
+err:
+ mutex_destroy(&dev->irq_lock);
+ return ret;
@@ -4587,10 +7316,11 @@ index 0000000..9d83e32
+
+static void cy8c9540a_irq_teardown(struct cy8c9540a *dev)
+{
++ struct i2c_client *client = dev->client;
++
++ irq_free_descs(dev->irq_base, NGPIO);
++ free_irq(client->irq, dev);
+ mutex_destroy(&dev->irq_lock);
-+ irq_free_descs(GPIO_IRQBASE, NGPIO);
-+ free_irq(gpio_to_irq(SOC_GPIO_INT_PIN), dev);
-+ gpio_free(SOC_GPIO_INT_PIN);
+}
+
+static int cy8c9540a_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -4602,7 +7332,7 @@ index 0000000..9d83e32
+ container_of(chip, struct cy8c9540a, pwm_chip);
+ struct i2c_client *client = dev->client;
+
-+ if (pwm->pwm > NPWM) {
++ if (pwm->pwm >= CY8C9540A_NPWM) {
+ return -EINVAL;
+ }
+
@@ -4647,7 +7377,7 @@ index 0000000..9d83e32
+
+static int cy8c9540a_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
-+ int ret = 0;
++ s32 ret = 0;
+ int gpio = 0;
+ int port = 0, pin = 0;
+ u8 out_reg = 0;
@@ -4656,11 +7386,11 @@ index 0000000..9d83e32
+ container_of(chip, struct cy8c9540a, pwm_chip);
+ struct i2c_client *client = dev->client;
+
-+ if (pwm->pwm > NPWM) {
++ if (pwm->pwm >= CY8C9540A_NPWM) {
+ return -EINVAL;
+ }
+
-+ gpio = pwm2gpio_mapping[pwm->pwm];
++ gpio = dev->pwm2gpio_mapping[pwm->pwm];
+ port = cypress_get_port(gpio);
+ pin = cypress_get_offs(gpio, port);
+ out_reg = REG_OUTPUT_PORT0 + port;
@@ -4675,13 +7405,12 @@ index 0000000..9d83e32
+ mutex_lock(&dev->lock);
+
+ /* Enable PWM */
-+ val = i2c_smbus_read_byte_data(client, REG_SELECT_PWM);
-+ if (val < 0) {
++ ret = i2c_smbus_read_byte_data(client, REG_SELECT_PWM);
++ if (ret < 0) {
+ dev_err(&client->dev, "can't read REG_SELECT_PWM\n");
-+ ret = val;
+ goto end;
+ }
-+ val |= BIT((u8)pin);
++ val = (u8)(ret | BIT((u8)pin));
+ ret = i2c_smbus_write_byte_data(client, REG_SELECT_PWM, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "can't write to SELECT_PWM\n");
@@ -4696,7 +7425,7 @@ index 0000000..9d83e32
+
+static void cy8c9540a_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
-+ int ret = 0;
++ s32 ret = 0;
+ int gpio = 0;
+ int port = 0, pin = 0;
+ u8 val = 0;
@@ -4704,12 +7433,12 @@ index 0000000..9d83e32
+ container_of(chip, struct cy8c9540a, pwm_chip);
+ struct i2c_client *client = dev->client;
+
-+ if (pwm->pwm > NPWM) {
++ if (pwm->pwm >= CY8C9540A_NPWM) {
+ return;
+ }
+
-+ gpio = pwm2gpio_mapping[pwm->pwm];
-+ if (PWM_UNUSED == gpio) {
++ gpio = dev->pwm2gpio_mapping[pwm->pwm];
++ if (CY8C9540A_PWM_UNUSED == gpio) {
+ dev_err(&client->dev, "pwm%d is unused\n", pwm->pwm);
+ return;
+ }
@@ -4720,12 +7449,12 @@ index 0000000..9d83e32
+ mutex_lock(&dev->lock);
+
+ /* Disable PWM */
-+ val = i2c_smbus_read_byte_data(client, REG_SELECT_PWM);
-+ if (val < 0) {
++ ret = i2c_smbus_read_byte_data(client, REG_SELECT_PWM);
++ if (ret < 0) {
+ dev_err(&client->dev, "can't read REG_SELECT_PWM\n");
+ goto end;
+ }
-+ val &= ~BIT((u8)pin);
++ val = (u8)(ret & ~BIT((u8)pin));
+ ret = i2c_smbus_write_byte_data(client, REG_SELECT_PWM, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "can't write to SELECT_PWM\n");
@@ -4738,7 +7467,7 @@ index 0000000..9d83e32
+}
+
+/*
-+ * Some PWMs are unavailable on Galileo. Prevent user from reserving them.
++ * Some PWMs may be unavailable. Prevent user from reserving them.
+ */
+static int cy8c9540a_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
@@ -4747,12 +7476,12 @@ index 0000000..9d83e32
+ container_of(chip, struct cy8c9540a, pwm_chip);
+ struct i2c_client *client = dev->client;
+
-+ if (pwm->pwm > NPWM) {
++ if (pwm->pwm >= CY8C9540A_NPWM) {
+ return -EINVAL;
+ }
+
-+ gpio = pwm2gpio_mapping[pwm->pwm];
-+ if (PWM_UNUSED == gpio) {
++ gpio = dev->pwm2gpio_mapping[pwm->pwm];
++ if (CY8C9540A_PWM_UNUSED == gpio) {
+ dev_err(&client->dev, "pwm%d unavailable\n", pwm->pwm);
+ return -EINVAL;
+ }
@@ -4779,11 +7508,16 @@ index 0000000..9d83e32
+{
+ int ret = 0;
+ struct i2c_client *client = dev->client;
++ struct cy8c9540a_pdata *pdata = client->dev.platform_data;
+ int i = 0;
+ int segments = -1;
-+ int crc_index = sizeof(por_default) - 1;
++ int crc_index = sizeof(por_set) - 1;
+ u8 reg_cmd_r_por[] = { REG_CMD, CMD_R_EEPROM_POR };
+
++ /* Populate platform POR setting table */
++ memcpy(por_set + POR_CMD_W_OFFS, pdata->por_default,
++ sizeof(pdata->por_default));
++
+ /* Read POR settings stored in EEPROM */
+ dev->i2c_segments[0].addr = client->addr;
+ dev->i2c_segments[0].flags = 0; /* write */
@@ -4803,14 +7537,14 @@ index 0000000..9d83e32
+ }
+
+ /* Compute CRC for platform-defined POR settings */
-+ por_default[crc_index] = 0;
++ por_set[crc_index] = 0;
+ for (i = POR_CMD_W_OFFS; i < crc_index; i ++) {
-+ por_default[crc_index] ^= por_default[i];
++ por_set[crc_index] ^= por_set[i];
+ }
+
+ /* Compare POR settings with platform-defined ones */
+ for (i = 0; i < sizeof(dev->por_stored); i ++) {
-+ if (dev->por_stored[i] != por_default[i + POR_CMD_W_OFFS]) {
++ if (dev->por_stored[i] != por_set[i + POR_CMD_W_OFFS]) {
+ break;
+ }
+ }
@@ -4825,8 +7559,8 @@ index 0000000..9d83e32
+ /* Store default POR settings into EEPROM */
+ dev->i2c_segments[0].addr = client->addr;
+ dev->i2c_segments[0].flags = 0; /* write */
-+ dev->i2c_segments[0].len = sizeof(por_default);
-+ dev->i2c_segments[0].buf = por_default;
++ dev->i2c_segments[0].len = sizeof(por_set);
++ dev->i2c_segments[0].buf = por_set;
+ segments = 1;
+ ret = i2c_transfer(client->adapter, dev->i2c_segments, segments);
+ if (segments != ret) {
@@ -4836,14 +7570,18 @@ index 0000000..9d83e32
+ ret = 0;
+ }
+
-+ /* Let EEPROM terminate its last page write. 200ms as per datasheet. */
-+ mdelay(200);
-+
+ /* Reconfigure device with newly stored POR settings */
-+ ret = i2c_smbus_write_byte_data(client, REG_CMD, CMD_RECONF_DEV);
++ for (i = 0; i < MAX_RETRIES; i++) {
++ usleep_range(SLEEP_US_MIN, SLEEP_US_MAX);
++
++ ret = i2c_smbus_write_byte_data(client, REG_CMD, CMD_RECONF);
++ if (0 == ret) {
++ break;
++ }
++ }
++
+ if (ret < 0) {
+ dev_err(&client->dev, "can't reconfigure device\n");
-+ goto end;
+ }
+
+end:
@@ -4896,26 +7634,13 @@ index 0000000..9d83e32
+{
+ struct cy8c9540a *dev;
+ struct gpio_chip *gc;
++ struct cy8c9540a_pdata *pdata = client->dev.platform_data;
+ int ret = 0;
+ s32 dev_id = 0;
+
-+ ret = gpio_request(SOC_GPIO_I2C_A0, "cy8c9540a-addr0");
-+ if (ret) {
-+ pr_err("%s: failed to request gpio%u\n", __func__,
-+ SOC_GPIO_I2C_A0);
-+ return ret;
-+ }
-+
-+ /*
-+ * Galileo uses A0 Extendable Soft Addressing pin on the Cypress part.
-+ * The inverted value of A0 is exposed to a SoC GPIO.
-+ *
-+ * Work out the I2C address of the device based on A0.
-+ */
-+ if (gpio_get_value(SOC_GPIO_I2C_A0)) {
-+ client->addr &= ~I2C_A0_ADDR_MASK;
-+ } else {
-+ client->addr |= I2C_A0_ADDR_MASK;
++ if (NULL == pdata) {
++ pr_err("%s: platform data is missing\n", __func__);
++ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter,
@@ -4924,18 +7649,17 @@ index 0000000..9d83e32
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "i2c adapter doesn't support required "
+ "functionality\n");
-+ ret = -EIO;
-+ goto err_i2c_addr;
++ return -EIO;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&client->dev, "failed to alloc memory\n");
-+ ret = -ENOMEM;
-+ goto err_i2c_addr;
++ return -ENOMEM;
+ }
+
+ dev->client = client;
++ dev->irq_base = pdata->irq_base;
+
+ gc = &dev->gpio_chip;
+ gc->direction_input = cy8c9540a_gpio_direction_input;
@@ -4946,7 +7670,7 @@ index 0000000..9d83e32
+
+ gc->can_sleep = 1;
+
-+ gc->base = GPIO_BASE_ID;
++ gc->base = pdata->gpio_base;
+ gc->ngpio = NGPIO;
+ gc->label = client->name;
+ gc->owner = THIS_MODULE;
@@ -4986,8 +7710,11 @@ index 0000000..9d83e32
+
+ dev->pwm_chip.dev = &client->dev;
+ dev->pwm_chip.ops = &cy8c9540a_pwm_ops;
-+ dev->pwm_chip.base = PWM_BASE_ID;
-+ dev->pwm_chip.npwm = NPWM;
++ dev->pwm_chip.base = pdata->pwm_base;
++ dev->pwm_chip.npwm = CY8C9540A_NPWM;
++
++ /* Populate platform-specific PWM-to-GPIO mapping table */
++ memcpy(dev->pwm2gpio_mapping, pdata->pwm2gpio_mapping, sizeof(pdata->pwm2gpio_mapping));
+
+ ret = pwmchip_add(&dev->pwm_chip);
+ if (ret) {
@@ -5007,8 +7734,6 @@ index 0000000..9d83e32
+err:
+ mutex_destroy(&dev->lock);
+ kfree(dev);
-+err_i2c_addr:
-+ gpio_free(SOC_GPIO_I2C_A0);
+
+ return ret;
+}
@@ -5036,8 +7761,6 @@ index 0000000..9d83e32
+ mutex_destroy(&dev->lock);
+ kfree(dev);
+
-+ gpio_free(SOC_GPIO_I2C_A0);
-+
+ return err;
+}
+
@@ -5065,10 +7788,10 @@ index 0000000..9d83e32
+
diff --git a/drivers/mfd/intel_cln_gip.h b/drivers/mfd/intel_cln_gip.h
new file mode 100644
-index 0000000..18ab774
+index 0000000..80472ae
--- /dev/null
+++ b/drivers/mfd/intel_cln_gip.h
-@@ -0,0 +1,101 @@
+@@ -0,0 +1,104 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -5096,6 +7819,7 @@ index 0000000..18ab774
+#define __INTEL_CLNGIP_H__
+
+#include <linux/i2c.h>
++#include <linux/mfd/intel_cln_gip_pdata.h>
+#include <linux/pci.h>
+#include "../i2c/busses/i2c-designware-core.h"
+
@@ -5152,12 +7876,14 @@ index 0000000..18ab774
+ * intel_cln_i2c_probe
+ * @param pdev: Pointer to GIP PCI device
+ * @param drvdata: private driver data
++ * @param drvdata: GIP platform-specific settings
+ * @return 0 success < 0 failure
+ *
+ * Perform I2C-specific probing on behalf of the top-level GIP driver.
+ */
+int intel_cln_i2c_probe(struct pci_dev *pdev,
-+ struct dw_i2c_dev **drvdata);
++ struct dw_i2c_dev **drvdata,
++ struct intel_cln_gip_pdata *pdata);
+
+/**
+ * intel_cln_i2c_remove
@@ -5172,10 +7898,10 @@ index 0000000..18ab774
+#endif /* __INTEL_CLNGIP_H__ */
diff --git a/drivers/mfd/intel_cln_gip_core.c b/drivers/mfd/intel_cln_gip_core.c
new file mode 100644
-index 0000000..7464b32
+index 0000000..24f175d
--- /dev/null
+++ b/drivers/mfd/intel_cln_gip_core.c
-@@ -0,0 +1,327 @@
+@@ -0,0 +1,335 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -5213,6 +7939,7 @@ index 0000000..7464b32
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
++#include <linux/mfd/intel_cln_gip_pdata.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "intel_cln_gip.h"
@@ -5233,6 +7960,7 @@ index 0000000..7464b32
+struct intel_cln_gip_data {
+ struct pci_dev *pci_device;
+ struct dw_i2c_dev *i2c_drvdata;
++ struct intel_cln_gip_pdata *pdata;
+};
+
+/**
@@ -5389,6 +8117,11 @@ index 0000000..7464b32
+
+ gip_drvdata->pci_device = pdev;
+
++ /* Retrieve platform data if there is any */
++ if (*intel_cln_gip_get_pdata) {
++ gip_drvdata->pdata = intel_cln_gip_get_pdata();
++ }
++
+ if (gpio) {
+ retval = intel_cln_gpio_probe(pdev);
+ if (retval)
@@ -5397,7 +8130,8 @@ index 0000000..7464b32
+
+ if (i2c) {
+ retval = intel_cln_i2c_probe(pdev,
-+ (struct dw_i2c_dev **)&gip_drvdata->i2c_drvdata);
++ (struct dw_i2c_dev **)&gip_drvdata->i2c_drvdata,
++ gip_drvdata->pdata);
+ if (retval)
+ goto err_release_drvdata;
+ }
@@ -6171,10 +8905,10 @@ index 0000000..6e2bbbf
+}
diff --git a/drivers/mfd/intel_cln_gip_i2c.c b/drivers/mfd/intel_cln_gip_i2c.c
new file mode 100644
-index 0000000..3675e50
+index 0000000..279ebb3
--- /dev/null
+++ b/drivers/mfd/intel_cln_gip_i2c.c
-@@ -0,0 +1,204 @@
+@@ -0,0 +1,248 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -6216,9 +8950,19 @@ index 0000000..3675e50
+ clanton_0,
+};
+
-+static unsigned int i2c_std_mode;
++/*
++ * By default, driver operates in fast mode (400kHz).
++ *
++ * Standard mode operation (100kHz) can be forced via, in order of priority:
++ * 1. setting the following i2c_std_mode module parameter to 1
++ * 2. setting the platform data i2c_std_mode parameter to 1
++ *
++ * Note in both cases, setting i2c_std_mode to 0 means forcing fast mode
++ * operation.
++ */
++static unsigned int i2c_std_mode = -1;
+module_param(i2c_std_mode, uint, S_IRUSR);
-+MODULE_PARM_DESC(i2c_std_mode, "Force I2C standard mode");
++MODULE_PARM_DESC(i2c_std_mode, "Set to 1 to force I2C standard mode");
+
+#define INTEL_CLN_STD_CFG (DW_IC_CON_MASTER | \
+ DW_IC_CON_SLAVE_DISABLE | \
@@ -6266,7 +9010,8 @@ index 0000000..3675e50
+ * Also call into I2C core driver routines for initiating the device.
+ */
+int intel_cln_i2c_probe(struct pci_dev *pdev,
-+ struct dw_i2c_dev **drvdata)
++ struct dw_i2c_dev **drvdata,
++ struct intel_cln_gip_pdata *pdata)
+{
+ int retval = 0;
+ resource_size_t start = 0, len = 0;
@@ -6274,12 +9019,45 @@ index 0000000..3675e50
+ struct i2c_adapter *adap = NULL;
+ void __iomem *reg_base = NULL;
+ struct dw_pci_controller *controller = NULL;
++ int std_mode = -1;
+
+ controller = &cln_gip_i2c_controller;
+
-+ /* Clanton default configuration is fast mode, unless otherwise asked */
-+ if (i2c_std_mode)
-+ controller->bus_cfg = INTEL_CLN_STD_CFG | DW_IC_CON_SPEED_STD;
++ /* Clanton default configuration is fast mode, unless otherwise forced */
++ if (-1 != i2c_std_mode) {
++ switch (i2c_std_mode) {
++ case 0:
++ case 1:
++ std_mode = i2c_std_mode;
++ break;
++ default:
++ dev_err(&pdev->dev, "invalid i2c_std_mode param val %d."
++ " Using driver default\n", i2c_std_mode);
++ break;
++ }
++ } else if (pdata) {
++ switch (pdata->i2c_std_mode) {
++ case 0:
++ case 1:
++ std_mode = pdata->i2c_std_mode;
++ break;
++ default:
++ dev_err(&pdev->dev, "invalid i2c_std_mode pdata val %d."
++ " Usign driver default\n", pdata->i2c_std_mode);
++ break;
++ }
++ }
++ if (-1 != std_mode) {
++ if (0 == std_mode) {
++ controller->bus_cfg |= DW_IC_CON_SPEED_FAST;
++ controller->bus_cfg &= ~DW_IC_CON_SPEED_STD;
++ } else {
++ controller->bus_cfg &= ~DW_IC_CON_SPEED_FAST;
++ controller->bus_cfg |= DW_IC_CON_SPEED_STD;
++ }
++ dev_info(&pdev->dev, "i2c speed set to %skHz\n",
++ std_mode ? "100" : "400");
++ }
+
+ /* Determine the address of the I2C area */
+ start = pci_resource_start(pdev, GIP_I2C_BAR);
@@ -6379,12 +9157,43 @@ index 0000000..3675e50
+
+ kfree(dev);
+}
+diff --git a/drivers/mfd/intel_cln_gip_pdata.c b/drivers/mfd/intel_cln_gip_pdata.c
+new file mode 100644
+index 0000000..853efdd
+--- /dev/null
++++ b/drivers/mfd/intel_cln_gip_pdata.c
+@@ -0,0 +1,25 @@
++/*
++ * Copyright(c) 2013 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Contact Information:
++ * Intel Corporation
++ */
++
++#include <linux/module.h>
++#include <linux/mfd/intel_cln_gip_pdata.h>
++
++struct intel_cln_gip_pdata *(*intel_cln_gip_get_pdata)(void) = NULL;
++EXPORT_SYMBOL_GPL(intel_cln_gip_get_pdata);
diff --git a/drivers/mfd/intel_cln_gip_test.c b/drivers/mfd/intel_cln_gip_test.c
new file mode 100644
-index 0000000..ea8d846
+index 0000000..c88f95c
--- /dev/null
+++ b/drivers/mfd/intel_cln_gip_test.c
-@@ -0,0 +1,1202 @@
+@@ -0,0 +1,1131 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -6417,13 +9226,11 @@ index 0000000..ea8d846
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
++#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
-+#include <linux/spi/spi.h>
-+#include <linux/spi/spi_bitbang.h>
-+#include <linux/spi/spi_gpio.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
@@ -6466,6 +9273,8 @@ index 0000000..ea8d846
+/* Exercise callbacks for S0/S3 power-state transitions and vice-versa */
+#define IOCTL_CLN_GIP_SYSTEM_SUSPEND _IO(GIP_TEST_IOCTL_CODE, 0x17)
+#define IOCTL_CLN_GIP_SYSTEM_RESUME _IO(GIP_TEST_IOCTL_CODE, 0x18)
++#define IOCTL_CLN_GPIO_NMI_ENABLE _IO(GIP_TEST_IOCTL_CODE, 0x19)
++#define IOCTL_CLN_GPIO_NMI_DISABLE _IO(GIP_TEST_IOCTL_CODE, 0x1A)
+
+#define GPIO_INT_EDGE_POS_LABEL "gpio-edge-pos"
+#define GPIO_INT_EDGE_NEG_LABEL "gpio-edge-neg"
@@ -6481,17 +9290,9 @@ index 0000000..ea8d846
+#define SUT_GPIO_NC_0 0x00
+#define SUT_GPIO_NC_1 0x01
+#define SUT_GPIO_NC_2 0x02
-+#define SUT_GPIO_NC_3 0x03
-+#define SUT_GPIO_NC_4 0x04
-+#define SUT_GPIO_NC_5 0x05
-+#define SUT_GPIO_NC_6 0x06
+#define SUT_GPIO_NC_7 0x07
+#define SUT_GPIO_SC_0 0x08
+#define SUT_GPIO_SC_1 0x09
-+#define SUT_GPIO_SC_2 0x0A
-+#define SUT_GPIO_SC_3 0x0B
-+#define SUT_GPIO_SC_4 0x0C
-+#define SUT_GPIO_SC_5 0x0D
+#define SUT_GPIO_SC_6 0x0E
+#define SUT_GPIO_SC_7 0x0F
+
@@ -6520,10 +9321,6 @@ index 0000000..ea8d846
+static DEFINE_MUTEX(gip_test_mutex);
+static int gip_test_major;
+
-+/* Private pointers to NC/SC bitbanged SPI devices */
-+static struct platform_device *spi_gpio_nc_pdev = NULL;
-+static struct platform_device *spi_gpio_sc_pdev = NULL;
-+
+/*
+ * Level-triggered interrupt variables
+ */
@@ -6608,49 +9405,6 @@ index 0000000..ea8d846
+ gpio_set_value(SUT_GPIO_SC_7, level_high_triggered ? 1 : 0);
+}
+
-+/*
-+ * Define bitbanged SPI interface over Nort-Cluster South-Cluster GPIO blocks.
-+ * Assign GPIO to SCK/MOSI/MISO
-+ */
-+static struct spi_gpio_platform_data spi_gpio_nc_data = {
-+ .sck = SUT_GPIO_NC_3,
-+ .mosi = SUT_GPIO_NC_4,
-+ .miso = SUT_GPIO_NC_5,
-+ .num_chipselect = 1,
-+};
-+static struct spi_gpio_platform_data spi_gpio_sc_data = {
-+ .sck = SUT_GPIO_SC_2,
-+ .mosi = SUT_GPIO_SC_3,
-+ .miso = SUT_GPIO_SC_4,
-+ .num_chipselect = 1,
-+};
-+
-+/*
-+ * Board information for SPI devices.
-+ */
-+static struct spi_board_info spi_gpio_nc_board_info[] = {
-+ {
-+ .modalias = "spidev",
-+ .max_speed_hz = 1000,
-+ .bus_num = GPIO_NC_BITBANG_SPI_BUS,
-+ .mode = SPI_MODE_0,
-+ .platform_data = &spi_gpio_nc_data,
-+ /* Assign GPIO to CS */
-+ .controller_data = (void *)SUT_GPIO_NC_6,
-+ },
-+};
-+static struct spi_board_info spi_gpio_sc_board_info[] = {
-+ {
-+ .modalias = "spidev",
-+ .max_speed_hz = 1000,
-+ .bus_num = GPIO_SC_BITBANG_SPI_BUS,
-+ .mode = SPI_MODE_0,
-+ .platform_data = &spi_gpio_sc_data,
-+ /* Assign GPIO to CS */
-+ .controller_data = (void *)SUT_GPIO_SC_5,
-+ },
-+};
-+
+/**
+ * gpio_sc_level_int
+ *
@@ -7117,57 +9871,8 @@ index 0000000..ea8d846
+ */
+static int gpio_spidev_register(int north_cluster)
+{
-+ int err = -ENOMEM;
-+ struct platform_device *pdev = NULL;
-+ struct spi_gpio_platform_data *pdata =
-+ north_cluster ? &spi_gpio_nc_data : &spi_gpio_sc_data;
-+ struct spi_board_info *gpio_spi_board_info =
-+ (north_cluster ? spi_gpio_nc_board_info : spi_gpio_sc_board_info);
-+
-+ if (north_cluster) {
-+ spi_gpio_nc_pdev = NULL;
-+ } else {
-+ spi_gpio_sc_pdev = NULL;
-+ }
-+
-+ pdev = platform_device_alloc("spi_gpio",
-+ north_cluster ? GPIO_NC_BITBANG_SPI_BUS : GPIO_SC_BITBANG_SPI_BUS);
-+ if (NULL == pdev) {
-+ goto err_out;
-+ }
-+ err = platform_device_add_data(pdev, pdata, sizeof(*pdata));
-+ if (err) {
-+ goto err_put_pd;
-+ }
-+ err = platform_device_add(pdev);
-+ if (err) {
-+ goto err_put_pd;
-+ }
-+
-+ err = spi_register_board_info(gpio_spi_board_info,
-+ /*
-+ * Note I pass an array here instead of a pointer in order not
-+ * to break ARRAY_SIZE.
-+ */
-+ ARRAY_SIZE(spi_gpio_sc_board_info));
-+ if (err) {
-+ goto err_del_pd;
-+ }
-+
-+ if (north_cluster) {
-+ spi_gpio_nc_pdev = pdev;
-+ } else {
-+ spi_gpio_sc_pdev = pdev;
-+ }
-+
++ /* Not needed anymore */
+ return 0;
-+
-+err_del_pd:
-+ platform_device_del(pdev);
-+err_put_pd:
-+ platform_device_put(pdev);
-+err_out:
-+ return err;
+}
+
+/**
@@ -7178,31 +9883,8 @@ index 0000000..ea8d846
+ */
+static int gpio_spidev_unregister(int north_cluster)
+{
-+ int ret = 0;
-+
-+ struct platform_device *pdev =
-+ (north_cluster ? spi_gpio_nc_pdev : spi_gpio_sc_pdev);
-+ struct spi_board_info *gpio_spi_board_info =
-+ (north_cluster ? spi_gpio_nc_board_info : spi_gpio_sc_board_info);
-+
-+ ret = spi_unregister_board_info(gpio_spi_board_info,
-+ /*
-+ * Note I pass an array here instead of a pointer in order not
-+ * to break ARRAY_SIZE.
-+ */
-+ ARRAY_SIZE(spi_gpio_sc_board_info));
-+
-+ if (0 == ret) {
-+ platform_device_unregister(pdev);
-+ }
-+
-+ if (north_cluster) {
-+ spi_gpio_nc_pdev = NULL;
-+ } else {
-+ spi_gpio_sc_pdev = NULL;
-+ }
-+
-+ return ret;
++ /* Not needed anymore */
++ return 0;
+}
+
+/**
@@ -7237,6 +9919,56 @@ index 0000000..ea8d846
+}
+
+/**
++ * gpio_nmi_enable
++ *
++ * @param enable: 0 to disable, !0 to enable
++ * @return 0 success < 0 failure
++ *
++ * Hack the legacy GPIO hardware to enable rising-edge triggered NMI on Core
++ * Well gpio0.
++ *
++ */
++static int gpio_nmi_enable(int enable)
++{
++ unsigned int base_u32 = 0x0;
++ unsigned short base = 0x0;
++ struct pci_dev *ilb = pci_get_device(PCI_VENDOR_ID_INTEL,
++ PCI_DEVICE_ID_INTEL_CLANTON_ILB,
++ NULL);
++ /* Assume interrupts are disabled by default by BIOS */
++ unsigned char gpio = enable ? 0x01 : 0x00;
++
++ if (NULL == ilb) {
++ pr_err("can't find iLB device\n");
++ return -ENOENT;
++ }
++
++ /* The GPIO base address is @ offset 0x44. Sussed out from driver */
++ pci_read_config_dword(ilb, 0x44, &base_u32);
++ if (0x0 == base_u32) {
++ pr_err("can't read iLB GPIO baseaddr\n");
++ return -ENOENT;
++ }
++ base = (unsigned short)base_u32;
++
++ /*
++ * Prepare for rising edge NMI triggering. This assumes the pin
++ * is already set as input.
++ */
++#define CGTPE 0x0C /* Core Well trigger positive edge */
++#define CGTS 0x1C /* Core Well trigges status - W1C */
++#define CGNMIEN 0x40 /* Core Well NMI enable */
++ outb(0x01, base + CGTS);
++ outb(gpio, base + CGTPE);
++ outb(gpio, base + CGNMIEN);
++#undef CGTPE
++#undef CGTS
++#undef CGNMIEN
++
++ return 0;
++}
++
++/**
+ * gpio_sc_debounce
+ *
+ * Enable GPIO debounce functionality for SC_GPIO_1 (edge and level triggered)
@@ -7431,6 +10163,12 @@ index 0000000..ea8d846
+ case IOCTL_CLN_GIP_SYSTEM_RESUME:
+ ret = gip_system_power_transition(1);
+ break;
++ case IOCTL_CLN_GPIO_NMI_ENABLE:
++ ret = gpio_nmi_enable(1);
++ break;
++ case IOCTL_CLN_GPIO_NMI_DISABLE:
++ ret = gpio_nmi_enable(0);
++ break;
+ default:
+ break;
+ }
@@ -8903,12 +11641,14 @@ index b75f4b2..f74b542 100644
free_netdev(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
-index 064eaac..38213d2 100644
+index 064eaac..bc1a2a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
-@@ -24,29 +24,186 @@
+@@ -23,32 +23,194 @@
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
++#include <linux/dmi.h>
#include <linux/pci.h>
+#include <linux/platform_data/clanton.h>
#include "stmmac.h"
@@ -8936,89 +11676,108 @@ index 064eaac..38213d2 100644
+struct stmmac_cln_mac_data {
+ int phy_addr;
+ int bus_id;
-+ cln_plat_id_t plat_id;
++ const char * name;
+};
+
+static struct stmmac_cln_mac_data phy_data [] = {
+ {
+ .phy_addr = -1, /* not connected */
+ .bus_id = 1,
-+ .plat_id = CLANTON_EMULATION,
++ .name = "QuarkEmulation",
+ },
+ {
+ .phy_addr = 1,
+ .bus_id = 2,
-+ .plat_id = CLANTON_EMULATION,
++ .name = "QuarkEmulation",
+ },
+ {
+ .phy_addr = 3,
+ .bus_id = 1,
-+ .plat_id = CLANTON_PEAK,
++ .name = "ClantonPeakSVP",
+ },
+ {
+ .phy_addr = 1,
+ .bus_id = 2,
-+ .plat_id = CLANTON_PEAK,
++ .name = "ClantonPeakSVP",
+ },
+ {
+ .phy_addr = 1,
+ .bus_id = 1,
-+ .plat_id = KIPS_BAY,
++ .name = "KipsBay",
+ },
+ {
+ .phy_addr = -1, /* not connected */
+ .bus_id = 2,
-+ .plat_id = KIPS_BAY,
++ .name = "KipsBay",
+ },
+ {
+ .phy_addr = 1,
+ .bus_id = 1,
-+ .plat_id = CROSS_HILL,
++ .name = "CrossHill",
+ },
+ {
+ .phy_addr = 1,
+ .bus_id = 2,
-+ .plat_id = CROSS_HILL,
++ .name = "CrossHill",
+ },
+ {
+ .phy_addr = 1,
+ .bus_id = 1,
-+ .plat_id = CLANTON_HILL,
++ .name = "ClantonHill",
+ },
+ {
+ .phy_addr = 1,
+ .bus_id = 2,
-+ .plat_id = CLANTON_HILL,
++ .name = "ClantonHill",
+ },
+ {
+ .phy_addr = 1,
+ .bus_id = 1,
-+ .plat_id = IZMIR,
++ .name = "Galileo",
+ },
+ {
+ .phy_addr = -1, /* not connected */
+ .bus_id = 2,
-+ .plat_id = IZMIR,
++ .name = "Galileo",
+ },
+};
+
+
-+static int stmmac_find_phy_addr(int mdio_bus_id, cln_plat_id_t cln_plat_id)
++static int stmmac_find_phy_addr(int mdio_bus_id)
+{
+ int i = 0;
++ const char * board_name = dmi_get_system_info(DMI_BOARD_NAME);
++ if (board_name == NULL)
++ return -1;
+
+ for (; i < sizeof(phy_data)/sizeof(struct stmmac_cln_mac_data); i++){
-+ if ( phy_data[i].plat_id == cln_plat_id &&
++ if ((!strcmp(phy_data[i].name, board_name)) &&
+ phy_data[i].bus_id == mdio_bus_id)
+ return phy_data[i].phy_addr;
+ }
+
+ return -1;
+}
-+
+
+-static void stmmac_default_data(void)
+static int stmmac_default_data(struct plat_stmmacenet_data *plat_dat,
+ int mdio_bus_id, const struct pci_device_id *id)
-+{
+ {
+- memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+- plat_dat.bus_id = 1;
+- plat_dat.phy_addr = 0;
+- plat_dat.interface = PHY_INTERFACE_MODE_GMII;
+- plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+- plat_dat.has_gmac = 1;
+- plat_dat.force_sf_dma_mode = 1;
+-
+- mdio_data.phy_reset = NULL;
+- mdio_data.phy_mask = 0;
+- plat_dat.mdio_bus_data = &mdio_data;
+-
+- dma_cfg.pbl = 32;
+- dma_cfg.burst_len = DMA_AXI_BLEN_256;
+- plat_dat.dma_cfg = &dma_cfg;
+ int phy_addr = 0;
+ memset(plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+
@@ -9033,8 +11792,7 @@ index 064eaac..38213d2 100644
+
+ if (id->device == STMMAC_CLANTON_ID) {
+
-+ phy_addr = stmmac_find_phy_addr(mdio_bus_id,
-+ intel_cln_plat_get_id());
++ phy_addr = stmmac_find_phy_addr(mdio_bus_id);
+ if (phy_addr == -1)
+ return -ENODEV;
+
@@ -9072,9 +11830,9 @@ index 064eaac..38213d2 100644
+ }
+
+ return 0;
-+}
+ }
--static void stmmac_default_data(void)
++#if 0
+/**
+ * stmmac_pci_find_mac
+ *
@@ -9085,22 +11843,7 @@ index 064eaac..38213d2 100644
+ * a random one for itself in any case
+ */
+void stmmac_pci_find_mac (struct stmmac_priv * priv, unsigned int mdio_bus_id)
- {
-- memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
-- plat_dat.bus_id = 1;
-- plat_dat.phy_addr = 0;
-- plat_dat.interface = PHY_INTERFACE_MODE_GMII;
-- plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
-- plat_dat.has_gmac = 1;
-- plat_dat.force_sf_dma_mode = 1;
--
-- mdio_data.phy_reset = NULL;
-- mdio_data.phy_mask = 0;
-- plat_dat.mdio_bus_data = &mdio_data;
--
-- dma_cfg.pbl = 32;
-- dma_cfg.burst_len = DMA_AXI_BLEN_256;
-- plat_dat.dma_cfg = &dma_cfg;
++{
+ unsigned int id = mdio_bus_id - 1;
+ if (priv == NULL || id >= MAX_INTERFACES)
+ return;
@@ -9109,10 +11852,13 @@ index 064eaac..38213d2 100644
+ (char*)&stmmac_mac_data[id]) == 0){
+ memcpy(priv->dev->dev_addr, &stmmac_mac_data[id], ETH_ALEN);
+ }
- }
-
++}
++#endif
++
/**
-@@ -67,8 +224,21 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
+ * stmmac_pci_probe
+ *
+@@ -67,8 +229,21 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
int ret = 0;
void __iomem *addr = NULL;
struct stmmac_priv *priv = NULL;
@@ -9134,7 +11880,7 @@ index 064eaac..38213d2 100644
/* Enable pci device */
ret = pci_enable_device(pdev);
if (ret) {
-@@ -96,30 +266,51 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
+@@ -96,30 +271,51 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
break;
}
pci_set_master(pdev);
@@ -9153,9 +11899,9 @@ index 064eaac..38213d2 100644
pr_err("%s: main driver probe failed", __func__);
goto err_out;
}
-+
++#if 0
+ stmmac_pci_find_mac(priv, bus_id);
-+
++#endif
priv->dev->irq = pdev->irq;
priv->wol_irq = pdev->irq;
-
@@ -9191,7 +11937,7 @@ index 064eaac..38213d2 100644
return ret;
}
-@@ -138,6 +329,21 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
+@@ -138,6 +334,21 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(ndev);
pci_set_drvdata(pdev, NULL);
@@ -9213,7 +11959,7 @@ index 064eaac..38213d2 100644
pci_iounmap(pdev, priv->ioaddr);
pci_release_regions(pdev);
pci_disable_device(pdev);
-@@ -167,12 +373,10 @@ static int stmmac_pci_resume(struct pci_dev *pdev)
+@@ -167,12 +378,10 @@ static int stmmac_pci_resume(struct pci_dev *pdev)
}
#endif
@@ -9995,10 +12741,10 @@ index bf7e4f9..fce76ef 100644
+obj-$(CONFIG_INTEL_QUARK_X1000_SOC) += quark/
diff --git a/drivers/platform/x86/quark/Kconfig b/drivers/platform/x86/quark/Kconfig
new file mode 100644
-index 0000000..5d47629
+index 0000000..dffa0fc
--- /dev/null
+++ b/drivers/platform/x86/quark/Kconfig
-@@ -0,0 +1,75 @@
+@@ -0,0 +1,41 @@
+config INTEL_CLN_ESRAM
+ bool "eSRAM - embedded SRAM driver for Intel Clanton platform"
+ depends on INTEL_QUARK_X1000_SOC && PM
@@ -10027,41 +12773,6 @@ index 0000000..5d47629
+ most people. Set to zero to disable - this is NOT recommended. Max 48
+ hours.
+
-+config INTEL_CLN_ECC_SCRUB
-+ bool "Hardware ECC Scrub - /proc interface for Intel Clanton platform"
-+ depends on INTEL_QUARK_X1000_SOC
-+ help
-+ Say Y here to enable support for accessing the hardware memory
-+ ECC Scrubber via the /proc interface.
-+
-+config INTEL_CLN_ECC_SCRUB_OVERRIDE_CONFIG
-+ bool "Hardware ECC Scrub - use config settings to override scrub vals"
-+ depends on INTEL_CLN_ECC_SCRUB
-+ help
-+ Say Y here to enable support to use config settings to override
-+ BIOS configured scrub values
-+
-+config INTEL_CLN_HW_ECC_REFRESH_RATE
-+ int "Choose DRAM ECC refresh rate"
-+ depends on INTEL_CLN_ECC_SCRUB_OVERRIDE_CONFIG
-+ default 20
-+ help
-+ Range 0 - 255 mSec
-+
-+config INTEL_CLN_HW_ECC_REFRESH_SIZE
-+ int "Choose DRAM ECC refresh size"
-+ depends on INTEL_CLN_ECC_SCRUB_OVERRIDE_CONFIG
-+ default 512
-+ help
-+ Range 64-512 bytes, multiples of 32
-+
-+config INTEL_CLN_ECC_SCRUB_S3_CONFIG
-+ bool "Hardware ECC Scrub - linux manages S3 entry/resume for scrub"
-+ depends on INTEL_CLN_ECC_SCRUB
-+ help
-+ Say Y here to enable linux to manage S3 entry/resume for the
-+ hardware memory ECC Scrubber.
-+
+config INTEL_CLN_THERMAL
+ bool "Thermal driver for Intel Clanton platform"
+ depends on INTEL_QUARK_X1000_SOC
@@ -10074,15 +12785,14 @@ index 0000000..5d47629
+ depends on INTEL_QUARK_X1000_SOC
+ help
+ Say Y here to enable Clanton's audio control driver
++
diff --git a/drivers/platform/x86/quark/Makefile b/drivers/platform/x86/quark/Makefile
new file mode 100644
-index 0000000..00c4ce7
+index 0000000..53bfc65
--- /dev/null
+++ b/drivers/platform/x86/quark/Makefile
-@@ -0,0 +1,18 @@
+@@ -0,0 +1,15 @@
+obj-$(CONFIG_INTEL_QUARK_X1000_SOC) += intel_cln_board_data.o
-+obj-$(CONFIG_INTEL_QUARK_X1000_SOC) += intel_cln_layout_data.o
-+obj-$(CONFIG_INTEL_QUARK_X1000_SOC) += intel_cln_plat_data.o
+obj-$(CONFIG_INTEL_QUARK_X1000_SOC) += intel_cln_plat_clanton_hill.o
+obj-$(CONFIG_INTEL_QUARK_X1000_SOC) += intel_cln_plat_clanton_peak.o
+obj-$(CONFIG_INTEL_QUARK_X1000_SOC) += intel_cln_plat_cross_hill.o
@@ -10095,7 +12805,6 @@ index 0000000..00c4ce7
+obj-$(CONFIG_INTEL_QUARK_X1000_SOC) += intel_cln_imr_test.o
+obj-$(CONFIG_INTEL_CLN_ESRAM) += intel_cln_esram_test.o
+#obj-$(CONFIG_INTEL_CLN_ESRAM) += intel_cln_smep_test.o
-+obj-$(CONFIG_INTEL_CLN_ECC_SCRUB) += intel_cln_ecc_scrub.o
+obj-$(CONFIG_INTEL_CLN_THERMAL) += intel_cln_thermal.o
+obj-$(CONFIG_INTEL_CLN_AUDIO_CTRL) += intel_cln_audio_ctrl.o
diff --git a/drivers/platform/x86/quark/intel_cln_audio_ctrl.c b/drivers/platform/x86/quark/intel_cln_audio_ctrl.c
@@ -10671,10 +13380,10 @@ index 0000000..b6c4692
+#endif /* __INTEL_CLN_AUDIO_CTRL_H__ */
diff --git a/drivers/platform/x86/quark/intel_cln_board_data.c b/drivers/platform/x86/quark/intel_cln_board_data.c
new file mode 100644
-index 0000000..da226c7
+index 0000000..3888e3e
--- /dev/null
+++ b/drivers/platform/x86/quark/intel_cln_board_data.c
-@@ -0,0 +1,207 @@
+@@ -0,0 +1,260 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -10703,8 +13412,10 @@ index 0000000..da226c7
+ */
+
+#include <asm/io.h>
++#include <linux/dmi.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
++#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
@@ -10760,29 +13471,9 @@ index 0000000..da226c7
+ u32 res0;
+};
+
-+static struct resource conf_res __initdata = {
-+ .flags = IORESOURCE_MEM,
-+ .start = 0,
-+ .end = 0,
-+};
-+
-+static struct resource plat_res __initdata = {
-+ .flags = IORESOURCE_MEM,
-+ .start = 0,
-+ .end = 0,
-+};
-+
-+static struct platform_device conf_pdev = {
-+ .name = "cln-layout-conf",
-+ .id = -1,
-+ .resource = &conf_res,
-+};
-+
+struct kobject * board_data_kobj;
+EXPORT_SYMBOL_GPL(board_data_kobj);
+
-+static bool mfh_plat_found = false;
-+
+static long unsigned int flash_version_data;
+static ssize_t flash_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
@@ -10795,18 +13486,72 @@ index 0000000..da226c7
+
+extern int intel_cln_plat_probe(struct resource * pres);
+
++#define DEFAULT_BOARD "Galileo"
++
++static struct platform_device bsp_data [] = {
++ {
++ .name = "QuarkEmulation",
++ .id = -1,
++ },
++ {
++ .name = "ClantonPeakSVP",
++ .id = -1,
++ },
++ {
++ .name = "KipsBay",
++ .id = -1,
++ },
++ {
++ .name = "CrossHill",
++ .id = -1,
++ },
++ {
++ .name = "ClantonHill",
++ .id = -1,
++ },
++ {
++ .name = "Galileo",
++ .id = -1,
++ },
++
++};
++
+/**
-+ * intel_cln_board_data_init
++ * add_firmware_sysfs_entry
++ *
++ * Add an entry in sysfs consistent with Galileo IDE's expected location
++ * covers current software versions and legacy code < Intel Galileo BIOS 0.9.0
+ *
-+ * Module entry point
+ */
-+static int __init intel_cln_board_data_init(void)
++static int add_firmware_sysfs_entry(const char * board_name)
++{
++ pr_info("Intel Quark Board %s Firmware Version %#010lx\n",
++ board_name, flash_version_data);
++
++ /* board_data_kobj subordinate of firmware @ /sys/firmware/board_data */
++ board_data_kobj = kobject_create_and_add("board_data", firmware_kobj);
++ if (!board_data_kobj) {
++ pr_err(PFX"kset create error\n");
++ return -ENODEV;
++ }
++ return sysfs_create_file(board_data_kobj, &flash_version_attr.attr);
++}
++
++/**
++ * intel_cln_board_data_init_legacy
++ *
++ * Module entry point for older BIOS versions
++ * Allows more recent kernels to boot on Galileo boards with BIOS before release
++ * 0.9.0
++ */
++static int __init intel_cln_board_data_init_legacy(void)
+{
+ extern struct kobject * firmware_kobj;
+ struct intel_cln_mfh __iomem * mfh;
+ struct intel_cln_mfh_item __iomem * item;
+ struct platform_device * pdev;
-+ u32 count;
++ u32 i;
++ char * board_name = NULL;
+ void __iomem * spi_data;
+ int ret = 0;
+
@@ -10822,739 +13567,82 @@ index 0000000..da226c7
+ return -ENODEV;
+ }
+
++ pr_info(PFX"Booting on an old BIOS assuming %s board\n", DEFAULT_BOARD);
+ pr_info(PFX"mfh @ 0x%p: id 0x%08lx ver 0x%08lx entries 0x%08lx\n",
+ mfh, (unsigned long)mfh->id, (unsigned long)mfh->ver,
+ (unsigned long)mfh->item_count);
+ item = (struct intel_cln_mfh_item __iomem *)
+ &mfh->padding [sizeof(u32) * mfh->boot_priority_list];
-+
-+ /* board_data_kobj subordinate of firmware @ /sys/firmware/board_data */
-+ board_data_kobj = kobject_create_and_add("board_data", firmware_kobj);
-+ if (!board_data_kobj) {
-+ pr_err(PFX"kset create error\n");
-+ return -ENODEV;
++
++ /* Register a default board */
++ for (i = 0; i < sizeof(bsp_data)/sizeof(struct platform_device); i++){
++ if (!strcmp(bsp_data[i].name, DEFAULT_BOARD)){
++ board_name = (char*)bsp_data[i].name;
++ platform_device_register(&bsp_data[i]);
++ }
+ }
+
+ /* Register flash regions as seperate platform devices */
-+ for (count = 0; count < mfh->item_count; count++, item++){
++ for (i = 0; i < mfh->item_count; i++, item++){
+ pdev = NULL;
+
+ switch (item->type){
-+ case MFH_ITEM_BUILD_INFO:
-+ conf_res.start = item->addr;
-+ conf_res.end = item->addr + item->len;
-+ pdev = &conf_pdev;
-+ break;
+ case MFH_ITEM_VERSION:
+ flash_version_data = item->res0;
-+ if(sysfs_create_file(board_data_kobj,
-+ &flash_version_attr.attr)) {
-+ pr_err("failed to create sysfs entry for flash version\n");
-+ flash_version_data = 0;
-+ }
++ ret = add_firmware_sysfs_entry(board_name);
+ break;
+ default:
+ break;
+ }
-+
-+ if (pdev != NULL)
-+ platform_device_register(pdev);
-+ }
-+
-+ /* This ought to be encoded in the MFH ! */
-+ if (mfh_plat_found == false){
-+ pr_err(PFX"Warning platform data MFH missing - using hardcoded "
-+ "offsets\n");
-+
-+ /* Platform data */
-+ plat_res.start = SPIFLASH_BASEADDR + PLATFORM_DATA_OFFSET;
-+ count = *(uint32_t*)(spi_data + PLATFORM_DATA_OFFSET + sizeof(uint32_t));
-+ plat_res.end = count;
-+ ret = intel_cln_plat_probe(&plat_res);
+ }
-+
+ iounmap(spi_data);
+ return ret;
+}
+
-+MODULE_AUTHOR("Bryan O'Donoghue <bryan.odonoghue@intel.com>");
-+MODULE_DESCRIPTION("Intel Clanton SPI Data API");
-+MODULE_LICENSE("Dual BSD/GPL");
-+subsys_initcall(intel_cln_board_data_init);
-+
-diff --git a/drivers/platform/x86/quark/intel_cln_ecc_scrub.c b/drivers/platform/x86/quark/intel_cln_ecc_scrub.c
-new file mode 100644
-index 0000000..1fb46f6
---- /dev/null
-+++ b/drivers/platform/x86/quark/intel_cln_ecc_scrub.c
-@@ -0,0 +1,668 @@
-+/*
-+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of version 2 of the GNU General Public License as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful, but
-+ * WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Contact Information:
-+ * Intel Corporation
-+ */
-+/*
-+ * Intel Clanton DRAM ECC Scrub driver
-+ *
-+ * !!!!!!! Description
-+ *
-+ */
-+#include <asm-generic/uaccess.h>
-+#include <linux/intel_cln_sb.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/printk.h>
-+#include <linux/platform_device.h>
-+#include <linux/proc_fs.h>
-+
-+#define DRIVER_NAME "intel-cln-ecc"
-+#define INTEL_CLN_ECC_SCRUB_PROCDIR "driver/ecc_scrub"
-+#define STATUS "status"
-+#define CONTROL "control"
-+#define INTERVAL "interval"
-+#define ECC_BLOCK_SIZE "block_size"
-+
-+#define CONTROL_USAGE "ECC Scrub Control: invalid setting. "\
-+ "Valid values are 1 or 0\n"
-+#define CONTROL_SCRUB_ON_STR "1\n"
-+#define CONTROL_SCRUB_OFF_STR "0\n"
-+#define CONTROL_ON_STR "on\n"
-+#define CONTROL_OFF_STR "off\n"
-+
-+#define INTERVAL_USAGE "ECC Scrub Interval: invalid setting. "\
-+ "Valid range is 1 - 255\n"
-+#define SIZE_USAGE "ECC Scrub Block Size: invalid setting. "\
-+ "Valid range is 64 - 512\n"
-+
-+#define OVERRIDE_CONFIG_PARM_DESC "Clanton ECC Scrub - "\
-+ "Override BIOS settings "\
-+ "for Scrub Config"
-+
-+#define OVERRIDE_START_PARM_DESC "Clanton ECC Scrub - "\
-+ "Override BIOS settings "\
-+ "for Scrub Start address"
-+
-+#define OVERRIDE_END_PARM_DESC "Clanton ECC Scrub - "\
-+ "Override BIOS settings "\
-+ "for Scrub End address"
-+
-+#define OVERRIDE_NEXT_PARM_DESC "Clanton ECC Scrub - "\
-+ "Override BIOS settings "\
-+ "for Scrub Next address"
-+
-+#define MAX_SCRUB_BLOCK_SIZE 512
-+#define MIN_SCRUB_BLOCK_SIZE 64
-+#define MAX_SCRUB_REFRESH 255
-+#define MIN_SCRUB_REFRESH 0
-+
-+#define NOT_OVERRIDDEN 0xfffffffful
-+
-+/* Shorten fn names to fit 80 char limit */
-+#ifndef sb_read
-+#define sb_read intel_cln_sb_read_reg
-+#endif
-+#ifndef sb_write
-+#define sb_write intel_cln_sb_write_reg
-+#endif
-+
-+/* Register ID */
-+#define ECC_SCRUB_CONFIG_REG (0x50)
-+#define ECC_SCRUB_START_MEM_REG (0x76)
-+#define ECC_SCRUB_END_MEM_REG (0x77)
-+#define ECC_SCRUB_NEXT_READ_REG (0x7C)
-+
-+
-+/* Reg commands */
-+#define THERMAL_CTRL_READ (0x10)
-+#define THERMAL_CTRL_WRITE (0x11)
-+#define THERMAL_RESUME_SCRUB (0xC2)
-+#define THERMAL_PAUSE_SCRUB (0xC3)
-+
-+/**
-+ * struct intel_cln_ecc_scrub_dev
-+ *
-+ * Structure to represent module state/data/etc
-+ */
-+struct intel_cln_ecc_scrub_dev {
-+
-+ /* Linux kernel structures */
-+ struct platform_device *pldev; /* Platform device */
-+
-+ /* Register copies */
-+ u32 start_address;
-+ u32 end_address;
-+ u32 next_address;
-+ u32 config;
-+
-+};
-+
-+static struct intel_cln_ecc_scrub_dev ecc_scrub_dev;
-+
-+static u32 ecc_scrub_config_override = NOT_OVERRIDDEN;
-+static u32 ecc_scrub_start_override = NOT_OVERRIDDEN;
-+static u32 ecc_scrub_end_override = NOT_OVERRIDDEN;
-+static u32 ecc_scrub_next_override = NOT_OVERRIDDEN;
-+
-+/**
-+ * intel_cln_ecc_scrub_stat_show
-+ *
-+ * @param dev: pointer to device
-+ * @param attr: attribute pointer
-+ * @param buf: output buffer
-+ * @return number of bytes successfully read
-+ *
-+ * Populates ecc_scrub state via /sys/device/platform/intel-cln-ecc/status
-+ */
-+static ssize_t
-+intel_cln_ecc_scrub_stat_show(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ int len = 0, size = 0;
-+ unsigned int count = PAGE_SIZE;
-+ u32 reg_data = 0;
-+ char *scrub_status = CONTROL_OFF_STR;
-+
-+ /* Display start of memory address */
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ, ECC_SCRUB_START_MEM_REG,
-+ &reg_data, 1);
-+ len += snprintf(buf + len, count - len,
-+ "ecc scrub mem start\t\t\t: 0x%08x\n", reg_data);
-+
-+
-+ /* Display end of memory address */
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ, ECC_SCRUB_END_MEM_REG,
-+ &reg_data, 1);
-+ len += snprintf(buf + len, count - len,
-+ "ecc scrub mem end\t\t\t: 0x%08x\n", reg_data);
-+
-+ /* Display next address to be read */
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ, ECC_SCRUB_NEXT_READ_REG,
-+ &reg_data, 1);
-+ len += snprintf(buf + len, count - len,
-+ "ecc scrub next read\t\t\t: 0x%08x\n", reg_data);
-+
-+ /* Display config settings */
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ, ECC_SCRUB_CONFIG_REG,
-+ &reg_data, 1);
-+
-+ /* Interval is the lsbyte of the config reg, so mask out just
-+ * that byte in the data printed. */
-+ len += snprintf(buf + len, count - len,
-+ "ecc scrub interval\t\t\t: %d\n",
-+ (reg_data & 0x000000ff));
-+
-+ /* Size is indicated in bits 12:8 of register in
-+ * terms of 32 byte blocks. */
-+ size = ((reg_data & 0x00001f00) >> 8)*32;
-+ len += snprintf(buf + len, count - len,
-+ "ecc scrub block_size\t\t\t: %d\n", size);
-+
-+ /* Status is indicated in bit 13 of register. */
-+ if ((reg_data & 0x00002000) > 0)
-+ scrub_status = CONTROL_ON_STR;
-+
-+ len += snprintf(buf + len, count - len,
-+ "ecc scrub status\t\t\t: %s\n", scrub_status);
-+ return len;
-+}
-+
+/**
-+ * intel_cln_ecc_scrub_ctrl_show
-+ *
-+ * @param dev: pointer to device
-+ * @param attr: attribute pointer
-+ * @param buf: output buffer
-+ * @return number of bytes successfully read
-+ *
-+ * Populates ecc_scrub state via /sys/device/platform/intel-cln-ecc/control
-+ */
-+static ssize_t
-+intel_cln_ecc_scrub_ctrl_show(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ unsigned int count = PAGE_SIZE;
-+ u32 reg_data = 0;
-+ char *on_or_off = CONTROL_SCRUB_OFF_STR;
-+
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ, ECC_SCRUB_CONFIG_REG,
-+ &reg_data, 1);
-+
-+ /* Status is indicated in bit 13 of register. */
-+ if ((reg_data & 0x00002000) > 0)
-+ /* interval > 0 assume scrubbing on */
-+ on_or_off = CONTROL_SCRUB_ON_STR;
-+
-+ return snprintf(buf, count,"%s", on_or_off);
-+}
-+
-+/**
-+ * intel_cln_ecc_scrub_ctrl_store
-+ *
-+ * @param dev: pointer to device
-+ * @param attr: attribute pointer
-+ * @param buf: input buffer
-+ * @param size: size of input data
-+ * @return number of bytes successfully written
++ * intel_cln_board_data_init_legacy
+ *
-+ * Function allows user-space to switch on/off scrubbing with a simple
-+ * echo 1/0 > /sys/device/platform/intel-cln-ecc/control command
++ * Module entry point for older BIOS versions
+ */
-+static ssize_t
-+intel_cln_ecc_scrub_ctrl_store(struct device *dev,
-+ struct device_attribute *attr, const char *buf,
-+ size_t count)
-+{
-+ ssize_t ret = 0;
-+
-+ if (count <= 1)
-+ return -EINVAL;
-+
-+ ret = -EINVAL;
-+
-+ /* Check for command starting with "scrub"
-+ * and ending with "on" or "off" */
-+
-+ if (!strcmp(buf, CONTROL_SCRUB_ON_STR)) {
-+ sb_write(SB_ID_THERMAL, THERMAL_RESUME_SCRUB,
-+ 0, 0, 1);
-+ ret = 0;
-+ } else if (!strcmp(buf, CONTROL_SCRUB_OFF_STR)) {
-+ sb_write(SB_ID_THERMAL, THERMAL_PAUSE_SCRUB, 0,
-+ 0, 1);
-+ ret = 0;
-+ }
-+
-+
-+ if (ret == 0)
-+ ret = (ssize_t)count;
-+
-+ else if (ret == -EINVAL)
-+ printk(CONTROL_USAGE);
-+
-+ return ret;
-+}
-+
-+/**
-+ * intel_cln_ecc_scrub_intrvl_show
-+ *
-+ * @param dev: pointer to device
-+ * @param attr: attribute pointer
-+ * @param buf: output buffer
-+ * @return number of bytes successfully read
-+ *
-+ * Populates ecc_scrub state via /sys/device/platform/intel-cln-ecc/interval
-+ */
-+static ssize_t
-+intel_cln_ecc_scrub_intrvl_show(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ u32 reg_data = 0;
-+
-+ /* Interval is the lsbyte of the config reg,
-+ * so mask out just that byte in the data printed. */
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ, ECC_SCRUB_CONFIG_REG,
-+ &reg_data, 1);
-+
-+ return snprintf(buf, PAGE_SIZE, "%d\n", (reg_data & 0x000000ff));
-+}
-+
-+/**
-+ * intel_cln_ecc_scrub_intrvl_store
-+ *
-+ * @param dev: pointer to device
-+ * @param attr: attribute pointer
-+ * @param buf: input buffer
-+ * @param size: size of input data
-+ * @return number of bytes successfully written
-+ *
-+ * Function allows user-space to set scrub interval with a value of 1-255
-+ * echo 1-255 > /sys/device/platform/intel-cln-ecc/interval type command
-+ */
-+static ssize_t
-+intel_cln_ecc_scrub_intrvl_store(struct device *dev,
-+ struct device_attribute *attr, const char *buf,
-+ size_t count)
++static int __init intel_cln_board_data_init(void)
+{
-+ ssize_t ret = 0;
-+ unsigned long val = 0;
-+ u32 reg_data = 0;
-+ int ret_temp = 0;
-+
-+ if (count <= 1)
-+ return -EINVAL;
-+
-+ ret = -EINVAL;
-+
-+ ret_temp = kstrtoul(buf, 10, &val);
++ bool found = false;
++ const char * bios_version = dmi_get_system_info(DMI_BIOS_VERSION);
++ const char * board_name = dmi_get_system_info(DMI_BOARD_NAME);
++ extern struct kobject * firmware_kobj;
++ int ret = 0;
++ u32 i;
+
-+ if (ret_temp)
-+ return ret_temp;
++ /* BIOS later than version 0.9.0 contains the right DMI data */
++ for (i = 0; board_name != NULL && bios_version != NULL &&
++ i < sizeof(bsp_data)/sizeof(struct platform_device); i++){
++
++ if (!strcmp(bsp_data[i].name, board_name)){
+
-+ if (val > MIN_SCRUB_REFRESH && val <= MAX_SCRUB_REFRESH) {
-+ /* Need to read-modify-write config register. */
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ,
-+ ECC_SCRUB_CONFIG_REG,
-+ &reg_data, 1);
++ /* Register board */
++ platform_device_register(&bsp_data[i]);
++ found = true;
+
-+ reg_data &= 0xffffff00; /* clear lsb. */
-+ reg_data |= val; /* now set interval. */
++ /* Galileo IDE expects this entry */
++ flash_version_data = simple_strtoul(bios_version, NULL, 16);
++ ret = add_firmware_sysfs_entry(bsp_data[i].name);
+
-+ sb_write(SB_ID_THERMAL, THERMAL_CTRL_WRITE,
-+ ECC_SCRUB_CONFIG_REG,
-+ reg_data, 1);
-+ ret = 0;
-+ } else {
-+ printk(INTERVAL_USAGE);
++ break;
++ }
+ }
+
-+ if (ret == 0)
-+ ret = (ssize_t)count;
-+ return ret;
-+}
-+
-+/**
-+ * intel_cln_ecc_scrub_size_show
-+ *
-+ * @param dev: pointer to device
-+ * @param attr: attribute pointer
-+ * @param buf: output buffer
-+ * @return number of bytes successfully read
-+ *
-+ * Populates ecc_scrub state via /sys/device/platform/intel-cln-ecc/block_size
-+ */
-+static ssize_t
-+intel_cln_ecc_scrub_size_show(struct device *dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ int size = 0;
-+ u32 reg_data = 0;
-+
-+ /* Size is indicated in bits 12:8 of config register
-+ * multiply x32 to get num bytes). */
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ, ECC_SCRUB_CONFIG_REG,
-+ &reg_data, 1);
-+ size = ((reg_data & 0x00001f00) >> 8)*32;
-+
-+ return snprintf(buf, PAGE_SIZE, "%d\n", size);
-+}
-+
-+/**
-+ * intel_cln_ecc_scrub_size_store
-+ *
-+ * @param dev: pointer to device
-+ * @param attr: attribute pointer
-+ * @param buf: input buffer
-+ * @param size: size of input data
-+ * @return number of bytes successfully written
-+ *
-+ * Function allows user-space to set scrub block size of 64-512 with a simple
-+ * echo 64-512 > /sys/device/platform/intel-cln-ecc/block_size command
-+ */
-+static ssize_t
-+intel_cln_ecc_scrub_size_store(struct device *dev, struct device_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ ssize_t ret = 0;
-+ unsigned long val = 0;
-+ u32 reg_data = 0;
-+ int ret_temp = 0;
-+
-+ if (count <= 1)
-+ return -EINVAL;
-+
-+ ret = -EINVAL;
-+ ret_temp = kstrtoul(buf, 10, &val);
-+
-+ if (ret_temp)
-+ return ret_temp;
-+
-+ if (val >= MIN_SCRUB_BLOCK_SIZE && val <= MAX_SCRUB_BLOCK_SIZE){
-+
-+ /* Need to read-modify-write config register. */
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ,
-+ ECC_SCRUB_CONFIG_REG,
-+ &reg_data, 1);
-+
-+ reg_data &= 0xfffffe0ff; /* clear bits 12:8 */
-+ reg_data |= (val/32)<<8; /* now set size */
-+
-+ sb_write(SB_ID_THERMAL, THERMAL_CTRL_WRITE,
-+ ECC_SCRUB_CONFIG_REG, reg_data, 1);
-+ ret = 0;
-+ } else {
-+ printk(SIZE_USAGE);
++ /* For older BIOS without DMI data we read the data directly from flash */
++ if (found == false){
++ ret = intel_cln_board_data_init_legacy();
+ }
+
-+ if (ret == 0)
-+ ret = (ssize_t)count;
-+
+ return ret;
+}
+
-+static struct device_attribute dev_attr_status = {
-+ .attr = {
-+ .name = "status",
-+ .mode = 0444,
-+ },
-+ .show = intel_cln_ecc_scrub_stat_show,
-+};
-+
-+static struct device_attribute dev_attr_control = {
-+ .attr = {
-+ .name = "control",
-+ .mode = 0644,
-+ },
-+ .show = intel_cln_ecc_scrub_ctrl_show,
-+ .store = intel_cln_ecc_scrub_ctrl_store,
-+};
-+
-+static struct device_attribute dev_attr_intrvl = {
-+ .attr = {
-+ .name = "interval",
-+ .mode = 0644,
-+ },
-+ .show = intel_cln_ecc_scrub_intrvl_show,
-+ .store = intel_cln_ecc_scrub_intrvl_store,
-+};
-+
-+static struct device_attribute dev_attr_block_size = {
-+ .attr = {
-+ .name = "block_size",
-+ .mode = 0644,
-+ },
-+ .show = intel_cln_ecc_scrub_size_show,
-+ .store = intel_cln_ecc_scrub_size_store,
-+};
-+
-+static struct attribute *platform_attributes[] = {
-+ &dev_attr_status.attr,
-+ &dev_attr_control.attr,
-+ &dev_attr_intrvl.attr,
-+ &dev_attr_block_size.attr,
-+ NULL,
-+};
-+
-+static struct attribute_group ecc_attrib_group = {
-+ .attrs = platform_attributes
-+};
-+
-+/*****************************************************************************
-+ * Module/PowerManagement hooks
-+ *****************************************************************************/
-+/**
-+ * intel_cln_ecc_probe
-+ *
-+ * @param pdev: Platform device
-+ * @return 0 success < 0 failure
-+ *
-+ * Callback from platform sub-system to probe
-+ *
-+ */
-+static int intel_cln_ecc_scrub_probe(struct platform_device *pdev)
-+{
-+ int value_overridden = 0;
-+
-+#ifdef CONFIG_INTEL_CLN_ECC_SCRUB_OVERRIDE_CONFIG
-+ u32 scrubber_refresh = 0;
-+ u32 scrubber_block_size = 0;
-+ u32 config_settings = 0;
-+#endif
-+
-+ memset(&ecc_scrub_dev, 0x00, sizeof(ecc_scrub_dev));
-+
-+ /* Update config settings, if directed so to do */
-+ if (ecc_scrub_start_override != NOT_OVERRIDDEN) {
-+ /* start of memory address */
-+ sb_write(SB_ID_THERMAL, THERMAL_CTRL_WRITE,
-+ ECC_SCRUB_START_MEM_REG, ecc_scrub_start_override, 1);
-+
-+ value_overridden = 1;
-+ }
-+ if (ecc_scrub_end_override != NOT_OVERRIDDEN) {
-+ /* end of memory address */
-+ sb_write(SB_ID_THERMAL, THERMAL_CTRL_WRITE,
-+ ECC_SCRUB_END_MEM_REG, ecc_scrub_end_override, 1);
-+
-+ value_overridden = 1;
-+ }
-+ if (ecc_scrub_next_override != NOT_OVERRIDDEN) {
-+ /* next address to be read */
-+ sb_write(SB_ID_THERMAL, THERMAL_CTRL_WRITE,
-+ ECC_SCRUB_NEXT_READ_REG, ecc_scrub_next_override, 1);
-+
-+ value_overridden = 1;
-+ }
-+ if (ecc_scrub_config_override != NOT_OVERRIDDEN) {
-+ sb_write(SB_ID_THERMAL, THERMAL_CTRL_WRITE,
-+ ECC_SCRUB_CONFIG_REG, ecc_scrub_config_override, 1);
-+
-+ value_overridden = 1;
-+ }
-+
-+ /* Config Reg can be updated by either command line or kconfig setting
-+ * in the case where we have both the command line takes precedence.*/
-+
-+ else {
-+#ifdef CONFIG_INTEL_CLN_ECC_SCRUB_OVERRIDE_CONFIG
-+ scrubber_refresh = CONFIG_INTEL_CLN_HW_ECC_REFRESH_RATE;
-+ scrubber_block_size = CONFIG_INTEL_CLN_HW_ECC_REFRESH_SIZE;
-+
-+ if (scrubber_block_size > MAX_SCRUB_BLOCK_SIZE)
-+ scrubber_block_size = MAX_SCRUB_BLOCK_SIZE;
-+
-+ else if (scrubber_block_size < MIN_SCRUB_BLOCK_SIZE)
-+ scrubber_block_size = MIN_SCRUB_BLOCK_SIZE;
-+
-+ if (scrubber_refresh > MAX_SCRUB_REFRESH)
-+ scrubber_refresh = MAX_SCRUB_REFRESH;
-+
-+
-+ /* adjust block size to multiples of 32 -
-+ * as that is what the register setting actually expects. */
-+ config_settings = scrubber_block_size/32;
-+ config_settings <<= 8;
-+ config_settings += scrubber_refresh;
-+
-+ /* config settings */
-+ sb_write(SB_ID_THERMAL, THERMAL_CTRL_WRITE,
-+ ECC_SCRUB_CONFIG_REG, config_settings, 1);
-+
-+ value_overridden = 1;
-+#endif
-+ }
-+
-+ if (value_overridden)
-+ sb_write(SB_ID_THERMAL, THERMAL_RESUME_SCRUB, 0, 0, 1);
-+
-+ return sysfs_create_group(&pdev->dev.kobj, &ecc_attrib_group);
-+}
-+
-+/**
-+ * intel_cln_ecc_scrub_suspend
-+ *
-+ * @param pdev: Platform device structure (unused)
-+ * @return 0 success < 0 failure
-+ *
-+ */
-+static int intel_cln_ecc_scrub_suspend(struct device *pdev)
-+{
-+#ifdef CONFIG_INTEL_CLN_ECC_SCRUB_S3_CONFIG
-+ u32 reg_data = 0;
-+
-+ /* Store off the 4 registers associated with scrubbing. */
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ, ECC_SCRUB_START_MEM_REG,
-+ &reg_data, 1);
-+ ecc_scrub_dev.start_address = reg_data;
-+
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ, ECC_SCRUB_END_MEM_REG,
-+ &reg_data, 1);
-+ ecc_scrub_dev.end_address = reg_data;
-+
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ, ECC_SCRUB_NEXT_READ_REG,
-+ &reg_data, 1);
-+ ecc_scrub_dev.next_address = reg_data;
-+
-+ sb_read(SB_ID_THERMAL, THERMAL_CTRL_READ, ECC_SCRUB_CONFIG_REG,
-+ &reg_data, 1);
-+ ecc_scrub_dev.config = reg_data;
-+#endif
-+ return 0;
-+}
-+
-+/**
-+ * intel_cln_ecc_scrub_resume
-+ *
-+ * @param pdev: Platform device structure (unused)
-+ * @return 0 success < 0 failure
-+ */
-+static int intel_cln_ecc_scrub_resume(struct device *pdev)
-+{
-+#ifdef CONFIG_INTEL_CLN_ECC_SCRUB_S3_CONFIG
-+
-+ sb_write(SB_ID_THERMAL, THERMAL_CTRL_WRITE, ECC_SCRUB_START_MEM_REG,
-+ ecc_scrub_dev.start_address, 1);
-+
-+ sb_write(SB_ID_THERMAL, THERMAL_CTRL_WRITE, ECC_SCRUB_END_MEM_REG,
-+ ecc_scrub_dev.end_address, 1);
-+
-+ sb_write(SB_ID_THERMAL, THERMAL_CTRL_WRITE, ECC_SCRUB_NEXT_READ_REG,
-+ ecc_scrub_dev.next_address, 1);
-+
-+ sb_write(SB_ID_THERMAL, THERMAL_CTRL_WRITE, ECC_SCRUB_CONFIG_REG,
-+ ecc_scrub_dev.config, 1);
-+
-+ sb_write(SB_ID_THERMAL, THERMAL_RESUME_SCRUB, 0, 0, 1);
-+
-+#endif
-+ return 0;
-+}
-+
-+/**
-+ * intel_cln_ecc_scrub_remove
-+ *
-+ * @return 0 success < 0 failure
-+ *
-+ * Removes a platform device
-+ */
-+static int intel_cln_ecc_scrub_remove(struct platform_device *pdev)
-+{
-+ return sysfs_create_group(&pdev->dev.kobj, &ecc_attrib_group);
-+}
-+
-+/*
-+ * Power management operations
-+ */
-+static const struct dev_pm_ops intel_cln_ecc_scrub_pm_ops = {
-+ .suspend = intel_cln_ecc_scrub_suspend,
-+ .resume = intel_cln_ecc_scrub_resume,
-+};
-+
-+
-+/*
-+ * Platform structures useful for interface to PM subsystem
-+ */
-+static struct platform_driver intel_cln_ecc_scrub_driver = {
-+ .driver = {
-+ .name = DRIVER_NAME,
-+ .owner = THIS_MODULE,
-+ .pm = &intel_cln_ecc_scrub_pm_ops,
-+ },
-+ .probe = intel_cln_ecc_scrub_probe,
-+ .remove = intel_cln_ecc_scrub_remove,
-+};
-+
-+
-+MODULE_AUTHOR("Derek Harnett <derek.harnett@intel.com>");
-+MODULE_DESCRIPTION("Intel Clanton DRAM ECC-scrub driver");
++MODULE_AUTHOR("Bryan O'Donoghue <bryan.odonoghue@intel.com>");
++MODULE_DESCRIPTION("Intel Clanton SPI Data API");
+MODULE_LICENSE("Dual BSD/GPL");
-+
-+module_param(ecc_scrub_config_override, uint, 0644);
-+MODULE_PARM_DESC(ecc_scrub_config_override, OVERRIDE_CONFIG_PARM_DESC);
-+
-+module_param(ecc_scrub_start_override, uint, 0644);
-+MODULE_PARM_DESC(ecc_scrub_start_override, OVERRIDE_START_PARM_DESC);
-+
-+module_param(ecc_scrub_end_override, uint, 0644);
-+MODULE_PARM_DESC(ecc_scrub_end_override, OVERRIDE_END_PARM_DESC);
-+
-+module_param(ecc_scrub_next_override, uint, 0644);
-+MODULE_PARM_DESC(ecc_scrub_next_override, OVERRIDE_NEXT_PARM_DESC);
-+
-+module_platform_driver(intel_cln_ecc_scrub_driver);
++subsys_initcall(intel_cln_board_data_init);
+
diff --git a/drivers/platform/x86/quark/intel_cln_esram.c b/drivers/platform/x86/quark/intel_cln_esram.c
new file mode 100644
@@ -14735,118 +16823,12 @@ index 0000000..2d98507
+MODULE_DESCRIPTION("Clanton IMR test module");
+MODULE_LICENSE("Dual BSD/GPL");
+
-diff --git a/drivers/platform/x86/quark/intel_cln_layout_data.c b/drivers/platform/x86/quark/intel_cln_layout_data.c
-new file mode 100644
-index 0000000..124bccf
---- /dev/null
-+++ b/drivers/platform/x86/quark/intel_cln_layout_data.c
-@@ -0,0 +1,100 @@
-+/*
-+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of version 2 of the GNU General Public License as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful, but
-+ * WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Contact Information:
-+ * Intel Corporation
-+ */
-+/*
-+ * Intel Clanton Legacy Platform Data Layout.conf accessor
-+ *
-+ * Simple Legacy SPI flash access layer
-+ *
-+ * Author : Bryan O'Donoghue <bryan.odonoghue@linux.intel.com> 2013
-+ */
-+
-+#include <asm/io.h>
-+#include <linux/errno.h>
-+#include <linux/ioport.h>
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+#include <linux/printk.h>
-+
-+#define DRIVER_NAME "cln-layout-conf"
-+static char __iomem * layout_conf_data;
-+static int len;
-+
-+static ssize_t layout_conf_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ ssize_t plen = len+1;
-+ if( plen > PAGE_SIZE )
-+ plen = PAGE_SIZE;
-+ memcpy(buf, layout_conf_data, plen);
-+ return plen;
-+}
-+
-+static struct kobj_attribute layout_conf_attr =
-+ __ATTR(layout_conf, 0644, layout_conf_show, NULL);
-+
-+static int intel_cln_layout_data_probe(struct platform_device *pdev)
-+{
-+ extern struct kobject * board_data_kobj;
-+ int ret = 0;
-+
-+ layout_conf_data = ioremap(pdev->resource->start,
-+ resource_size(pdev->resource));
-+ if (!layout_conf_data)
-+ return -ENODEV;
-+
-+ len = resource_size(pdev->resource);
-+ ret = sysfs_create_file(board_data_kobj, &layout_conf_attr.attr);
-+ if (ret != 0){
-+ pr_err("failed to create sysfs entry for layout config\n");
-+ iounmap(layout_conf_data);
-+ layout_conf_data = NULL;
-+ }
-+
-+ return ret;
-+}
-+
-+static int intel_cln_layout_data_remove(struct platform_device *pdev)
-+{
-+ extern struct kobject * board_data_kobj;
-+
-+ if (layout_conf_data){
-+ sysfs_remove_file(board_data_kobj, &layout_conf_attr.attr);
-+ iounmap(layout_conf_data);
-+
-+ }
-+ return 0;
-+}
-+
-+static struct platform_driver cln_layout_data_driver = {
-+ .driver = {
-+ .name = DRIVER_NAME,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = intel_cln_layout_data_probe,
-+ .remove = intel_cln_layout_data_remove,
-+};
-+
-+module_platform_driver(cln_layout_data_driver);
-+
-+MODULE_AUTHOR("Bryan O'Donoghue <bryan.odonoghue@intel.com>");
-+MODULE_DESCRIPTION("Intel Clanton SPI Data API");
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_ALIAS("platform:"DRIVER_NAME);
-+
diff --git a/drivers/platform/x86/quark/intel_cln_plat_clanton_hill.c b/drivers/platform/x86/quark/intel_cln_plat_clanton_hill.c
new file mode 100644
-index 0000000..90da1c8
+index 0000000..7dac528
--- /dev/null
+++ b/drivers/platform/x86/quark/intel_cln_plat_clanton_hill.c
-@@ -0,0 +1,197 @@
+@@ -0,0 +1,196 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -14871,7 +16853,6 @@ index 0000000..90da1c8
+ *
+ * Simple Legacy SPI flash access layer
+ *
-+ * Author : Bryan O'Donoghue <bryan.odonoghue@linux.intel.com> 2013
+ */
+
+#include <linux/errno.h>
@@ -14885,8 +16866,8 @@ index 0000000..90da1c8
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/spi.h>
+
-+#define DRIVER_NAME "cln-plat-clanton-hill"
-+#define GPIO_RESTRICT_NAME "cln-gpio-restrict-nc"
++#define DRIVER_NAME "ClantonHill"
++#define GPIO_RESTRICT_NAME "cln-gpio-restrict-nc"
+
+/******************************************************************************
+ * Analog Devices AD7298 SPI Device Platform Data
@@ -15046,10 +17027,10 @@ index 0000000..90da1c8
+
diff --git a/drivers/platform/x86/quark/intel_cln_plat_clanton_peak.c b/drivers/platform/x86/quark/intel_cln_plat_clanton_peak.c
new file mode 100644
-index 0000000..27ca0ea
+index 0000000..0341606
--- /dev/null
+++ b/drivers/platform/x86/quark/intel_cln_plat_clanton_peak.c
-@@ -0,0 +1,92 @@
+@@ -0,0 +1,227 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -15070,11 +17051,8 @@ index 0000000..27ca0ea
+ * Intel Corporation
+ */
+/*
-+ * Intel Clanton Legacy Platform Data Layout.conf accessor
-+ *
-+ * Simple Legacy SPI flash access layer
++ * Clanton Peak board entry point
+ *
-+ * Author : Bryan O'Donoghue <bryan.odonoghue@linux.intel.com> 2013
+ */
+
+#include <linux/errno.h>
@@ -15085,8 +17063,26 @@ index 0000000..27ca0ea
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/spi/spi.h>
++#include <linux/spi/spi_bitbang.h>
++#include <linux/spi/spi_gpio.h>
+
-+#define DRIVER_NAME "cln-plat-clanton-peak"
++#define DRIVER_NAME "ClantonPeakSVP"
++#define GPIO_RESTRICT_NAME_NC "cln-gpio-restrict-nc"
++#define GPIO_RESTRICT_NAME_SC "cln-gpio-restrict-sc"
++
++
++/* GPIO connected to Test Equipment */
++#define SUT_GPIO_NC_3 0x03
++#define SUT_GPIO_NC_4 0x04
++#define SUT_GPIO_NC_5 0x05
++#define SUT_GPIO_NC_6 0x06
++#define SUT_GPIO_SC_2 0x0A
++#define SUT_GPIO_SC_3 0x0B
++#define SUT_GPIO_SC_4 0x0C
++#define SUT_GPIO_SC_5 0x0D
++
++#define GPIO_NC_BITBANG_SPI_BUS 2
++#define GPIO_SC_BITBANG_SPI_BUS 3
+
+static struct spi_board_info spi_onboard_devs[] = {
+ {
@@ -15103,6 +17099,64 @@ index 0000000..27ca0ea
+ },
+};
+
++/*
++ * Define platform data for bitbanged SPI devices.
++ * Assign GPIO to SCK/MOSI/MISO
++ */
++static struct spi_gpio_platform_data spi_gpio_nc_data = {
++ .sck = SUT_GPIO_NC_3,
++ .mosi = SUT_GPIO_NC_4,
++ .miso = SUT_GPIO_NC_5,
++ .num_chipselect = 1,
++};
++static struct spi_gpio_platform_data spi_gpio_sc_data = {
++ .sck = SUT_GPIO_SC_2,
++ .mosi = SUT_GPIO_SC_3,
++ .miso = SUT_GPIO_SC_4,
++ .num_chipselect = 1,
++};
++
++/*
++ * Board information for bitbanged SPI devices.
++ */
++static const struct spi_board_info spi_gpio_nc_bi[] = {
++ {
++ .modalias = "spidev",
++ .max_speed_hz = 1000,
++ .bus_num = GPIO_NC_BITBANG_SPI_BUS,
++ .mode = SPI_MODE_0,
++ .platform_data = &spi_gpio_nc_data,
++ /* Assign GPIO to CS */
++ .controller_data = (void *)SUT_GPIO_NC_6,
++ },
++};
++static const struct spi_board_info spi_gpio_sc_bi[] = {
++ {
++ .modalias = "spidev",
++ .max_speed_hz = 1000,
++ .bus_num = GPIO_SC_BITBANG_SPI_BUS,
++ .mode = SPI_MODE_0,
++ .platform_data = &spi_gpio_sc_data,
++ /* Assign GPIO to CS */
++ .controller_data = (void *)SUT_GPIO_SC_5,
++ },
++};
++
++static struct platform_device spi_gpio_nc_pd = {
++ .name = "spi_gpio",
++ .id = GPIO_NC_BITBANG_SPI_BUS,
++ .dev = {
++ .platform_data = &spi_gpio_nc_data,
++ },
++};
++
++static struct platform_device spi_gpio_sc_pd = {
++ .name = "spi_gpio",
++ .id = GPIO_SC_BITBANG_SPI_BUS,
++ .dev = {
++ .platform_data = &spi_gpio_sc_data,
++ },
++};
+
+/**
+ * intel_cln_spi_add_onboard_devs
@@ -15117,8 +17171,70 @@ index 0000000..27ca0ea
+ ARRAY_SIZE(spi_onboard_devs));
+}
+
++static int register_bitbanged_spi(int nc)
++{
++ int ret = 0;
++
++ ret = platform_device_register(nc ? &spi_gpio_nc_pd : &spi_gpio_sc_pd);
++ if (ret)
++ goto err;
++
++ ret = spi_register_board_info(nc ? spi_gpio_nc_bi : spi_gpio_sc_bi,
++ nc ? ARRAY_SIZE(spi_gpio_nc_bi) :
++ ARRAY_SIZE(spi_gpio_sc_bi));
++ if (ret)
++ goto err_unregister;
++
++ return 0;
++
++err_unregister:
++ platform_device_unregister(nc ? &spi_gpio_nc_pd : &spi_gpio_sc_pd);
++err:
++ return ret;
++}
++
++static int intel_cln_gpio_restrict_probe_nc(struct platform_device *pdev)
++{
++ return register_bitbanged_spi(1);
++}
++
++static int intel_cln_gpio_restrict_probe_sc(struct platform_device *pdev)
++{
++ return register_bitbanged_spi(0);
++}
++
++static struct platform_driver gpio_restrict_pdriver_nc = {
++ .driver = {
++ .name = GPIO_RESTRICT_NAME_NC,
++ .owner = THIS_MODULE,
++ },
++ .probe = intel_cln_gpio_restrict_probe_nc,
++};
++
++static struct platform_driver gpio_restrict_pdriver_sc = {
++ .driver = {
++ .name = GPIO_RESTRICT_NAME_SC,
++ .owner = THIS_MODULE,
++ },
++ .probe = intel_cln_gpio_restrict_probe_sc,
++};
++
+static int intel_cln_plat_clanton_peak_probe(struct platform_device *pdev)
+{
++ int ret = 0;
++
++ ret = platform_driver_register(&gpio_restrict_pdriver_nc);
++ if (ret) {
++ pr_err("%s: couldn't register %s platform driver\n",
++ __func__, gpio_restrict_pdriver_nc.driver.name);
++ }
++
++ ret = platform_driver_register(&gpio_restrict_pdriver_sc);
++ if (ret) {
++ pr_err("%s: couldn't register %s platform driver\n",
++ __func__, gpio_restrict_pdriver_sc.driver.name);
++ }
++
+ return intel_cln_spi_add_onboard_devs();
+}
+
@@ -15144,10 +17260,10 @@ index 0000000..27ca0ea
+MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/platform/x86/quark/intel_cln_plat_cross_hill.c b/drivers/platform/x86/quark/intel_cln_plat_cross_hill.c
new file mode 100644
-index 0000000..b79ce01
+index 0000000..263c07b
--- /dev/null
+++ b/drivers/platform/x86/quark/intel_cln_plat_cross_hill.c
-@@ -0,0 +1,387 @@
+@@ -0,0 +1,383 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -15168,11 +17284,7 @@ index 0000000..b79ce01
+ * Intel Corporation
+ */
+/*
-+ * Intel Clanton Legacy Platform Data Layout.conf accessor
-+ *
-+ * Simple Legacy SPI flash access layer
-+ *
-+ * Author : Bryan O'Donoghue <bryan.odonoghue@linux.intel.com> 2013
++ * CrossHill board entry point
+ */
+
+#include <linux/errno.h>
@@ -15185,9 +17297,9 @@ index 0000000..b79ce01
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/spi.h>
+
-+#define DRIVER_NAME "cln-plat-cross-hill"
-+#define GPIO_RESTRICT_NAME_NC "cln-gpio-restrict-nc"
-+#define GPIO_RESTRICT_NAME_SC "cln-gpio-restrict-sc"
++#define DRIVER_NAME "CrossHill"
++#define GPIO_RESTRICT_NAME_NC "cln-gpio-restrict-nc"
++#define GPIO_RESTRICT_NAME_SC "cln-gpio-restrict-sc"
+
+/*
+ * GPIO numbers to use for reading 4-bit Blackburn Peak SPI daughterboard ID
@@ -15535,473 +17647,12 @@ index 0000000..b79ce01
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:"DRIVER_NAME);
+
-diff --git a/drivers/platform/x86/quark/intel_cln_plat_data.c b/drivers/platform/x86/quark/intel_cln_plat_data.c
-new file mode 100644
-index 0000000..059fcee
---- /dev/null
-+++ b/drivers/platform/x86/quark/intel_cln_plat_data.c
-@@ -0,0 +1,455 @@
-+/*
-+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of version 2 of the GNU General Public License as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful, but
-+ * WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Contact Information:
-+ * Intel Corporation
-+ */
-+/*
-+ * Intel Clanton Legacy Platform Data Layout.conf accessor
-+ *
-+ * Simple Legacy SPI flash access layer
-+ *
-+ * Author : Bryan O'Donoghue <bryan.odonoghue@linux.intel.com> 2013
-+ */
-+
-+#include <asm/io.h>
-+#include <linux/crc32.h>
-+#include <linux/crc32c.h>
-+#include <linux/errno.h>
-+#include <linux/ioport.h>
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+#include <linux/platform_data/clanton.h>
-+#include <linux/printk.h>
-+#include <linux/slab.h>
-+
-+#define PREFIX "CLN-PLT: "
-+#define PLAT_MAGIC 0x54414450 /* PDAT */
-+#define DESC_LEN 0x0A
-+#define MAC_STRLEN 20
-+#define MAC_LEN 6
-+
-+struct cln_plat_dat_hdr {
-+ uint32_t magic;
-+ uint32_t length;
-+ uint32_t crc32;
-+};
-+
-+struct cln_plat_data {
-+ uint16_t plat_id;
-+ uint16_t length;
-+ uint8_t desc[DESC_LEN];
-+ uint16_t version;
-+};
-+
-+struct cln_bsp_reg {
-+ struct platform_device pdev;
-+ cln_plat_id_t id;
-+};
-+
-+static struct cln_bsp_reg bsp_data [] = {
-+ {
-+ .pdev.name = "cln-plat-clanton-peak",
-+ .pdev.id = -1,
-+ .id = CLANTON_PEAK,
-+ },
-+ {
-+ .pdev.name = "cln-plat-kips-bay",
-+ .pdev.id = -1,
-+ .id = KIPS_BAY,
-+ },
-+ {
-+ .pdev.name = "cln-plat-cross-hill",
-+ .pdev.id = -1,
-+ .id = CROSS_HILL,
-+ },
-+ {
-+ .pdev.name = "cln-plat-clanton-hill",
-+ .pdev.id = -1,
-+ .id = CLANTON_HILL,
-+ },
-+ {
-+ .pdev.name = "cln-plat-galileo",
-+ .pdev.id = -1,
-+ .id = IZMIR,
-+ },
-+
-+};
-+
-+/**
-+ * struct cln_plat_data_list
-+ *
-+ * Structure to hold a linked list of platform data refs
-+ */
-+struct cln_plat_data_list {
-+ char name[DESC_LEN+1];
-+ struct cln_plat_data * plat_data;
-+ struct kobj_attribute plat_attr;
-+ struct list_head list;
-+};
-+
-+static char __iomem * plat_data;
-+static char * plat_bin_name = "pdat_bin";
-+static unsigned int plat_bin_size;
-+static struct cln_plat_dat_hdr * plat_hdr;
-+static struct list_head entry_list;
-+
-+/**
-+ * intel_cln_plat_sysfs_show_bin
-+ *
-+ * Generic show routine for any of the sysfs entries of this module
-+ */
-+static ssize_t intel_cln_plat_sysfs_show_bin(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ ssize_t plen = plat_bin_size;
-+ if( plen > PAGE_SIZE )
-+ plen = PAGE_SIZE;
-+
-+ memcpy(buf, plat_data, plen);
-+ return plen;
-+}
-+
-+/**
-+ * intel_cln_plat_sysfs_show
-+ *
-+ * Generic show routine for any of the sysfs entries of this module
-+ */
-+static ssize_t intel_cln_plat_sysfs_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ unsigned char * data;
-+ char fmt[0x20];
-+ struct cln_plat_data_list * plat_item_list;
-+ ssize_t plen = 0;
-+
-+ list_for_each_entry(plat_item_list, &entry_list, list){
-+ if ( attr == &plat_item_list->plat_attr ){
-+
-+ /* Derive length */
-+ plen = plat_item_list->plat_data->length;
-+ if (unlikely(plen > PAGE_SIZE))
-+ plen = PAGE_SIZE;
-+
-+ /* Hook data */
-+ data =(char*)(plat_item_list->plat_data);
-+ data += +sizeof(struct cln_plat_data);
-+
-+ /* Enumrate return */
-+ switch (plat_item_list->plat_data->plat_id){
-+ case PLAT_DATA_ID:
-+ case PLAT_DATA_SN:
-+ snprintf(fmt, sizeof(fmt), "0x%%0%dx\n",
-+ plen*2);
-+ return sprintf(buf, fmt, *(int16_t*)data);
-+ case PLAT_DATA_MAC0:
-+ case PLAT_DATA_MAC1:
-+ if (unlikely(plen != MAC_LEN)){
-+ return sprintf(buf, "invalid mac\n");
-+ }
-+ return snprintf(buf, MAC_STRLEN,
-+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
-+ data[0], data[1], data[2], data[3],
-+ data[4], data[5]);
-+ default:
-+ /* Treat as string data */
-+ return snprintf(buf, plen, "%s", data);
-+ }
-+ }
-+ }
-+ return 0;
-+}
-+
-+/**
-+ * intel_cln_plat_cleanup
-+ *
-+ * Generic cleanup code for the platform data interface
-+ *
-+ */
-+static void intel_cln_plat_cleanup (void)
-+{
-+ extern struct kobject * board_data_kobj;
-+ struct cln_plat_data_list * plat_item_list;
-+
-+ if (plat_data != NULL){
-+ iounmap(plat_data);
-+ plat_data = NULL;
-+ }
-+
-+ list_for_each_entry(plat_item_list, &entry_list, list){
-+ sysfs_remove_file(board_data_kobj,
-+ &plat_item_list->plat_attr.attr);
-+ kfree(plat_item_list);
-+ }
-+}
-+
-+/**
-+ * intel_cln_plat_get_desc_len
-+ *
-+ * @param desc: Pointer to desc string
-+ * @return len on success < 0 failure
-+ *
-+ * Function called to get a bounds checked desc field from platfrom data
-+ *
-+ */
-+static int intel_cln_plat_get_desc_len (char * desc)
-+{
-+ int len = 0;
-+ if (desc == NULL){
-+ return -EINVAL;
-+ }
-+
-+ for(; *desc != '\0' && len < DESC_LEN; desc++, len++);
-+ return len;
-+}
-+
-+/**
-+ * intel_cln_get_id
-+ *
-+ * @return platform id on success or < CLANTON_PLAT_UNDEFINED on error
-+ *
-+ * Function called to get platform id
-+ *
-+ */
-+cln_plat_id_t intel_cln_plat_get_id(void)
-+{
-+ unsigned char * data;
-+ struct cln_plat_data_list * plat_item_list;
-+
-+ if (plat_data == NULL)
-+ return CLANTON_PLAT_UNDEFINED;
-+
-+ list_for_each_entry(plat_item_list, &entry_list, list){
-+
-+ /* Enumrate return */
-+ if(plat_item_list->plat_data->plat_id == PLAT_DATA_ID){
-+
-+ /* Hook data */
-+ data =(char*)(plat_item_list->plat_data);
-+ data += +sizeof(struct cln_plat_data);
-+
-+ /* Return payload */
-+ return *(int16_t*)data;
-+ }
-+ }
-+ return CLANTON_PLAT_UNDEFINED;
-+}
-+EXPORT_SYMBOL(intel_cln_plat_get_id);
-+
-+/**
-+ * intel_cln_plat_get_mac
-+ *
-+ * @param id: Index of MAC address to find
-+ * @param mac: Output parameter for mac address
-+ *
-+ * @return 0 success < 0 failure
-+ *
-+ * Function called to remove the platfrom device from kernel space
-+ *
-+ */
-+int intel_cln_plat_get_mac(plat_dataid_t id, char * mac)
-+{
-+ unsigned char * data;
-+ unsigned int plen = 0;
-+ struct cln_plat_data_list * plat_item_list;
-+
-+ if ((id != PLAT_DATA_MAC0 && id != PLAT_DATA_MAC1) || mac == NULL){
-+ pr_err("invalid input id %d mac %p\n", id, mac);
-+ return -EINVAL;
-+ }
-+
-+ list_for_each_entry(plat_item_list, &entry_list, list){
-+ if(plat_item_list->plat_data->plat_id == id){
-+
-+ /* Derive length */
-+ plen = plat_item_list->plat_data->length;
-+ if (unlikely(plen != MAC_LEN)){
-+ pr_err("%s mac len invalid!\n", __func__);
-+ return -ENODEV;
-+ }
-+
-+ /* Hook data */
-+ data =(char*)(plat_item_list->plat_data);
-+ data += +sizeof(struct cln_plat_data);
-+
-+ /* Good to go */
-+ memcpy(mac, data, MAC_LEN);
-+ return 0;
-+ }
-+ }
-+ return -ENODEV;
-+}
-+EXPORT_SYMBOL(intel_cln_plat_get_mac);
-+
-+
-+/**
-+ * intel_cln_plat_probe
-+ *
-+ * @param pdev: Pointer to platform device
-+ * @return 0 success < 0 failure
-+ *
-+ * Function called to probe platform device "cln-plat"
-+ *
-+ */
-+int intel_cln_plat_probe(struct resource * pres)
-+{
-+ char __iomem * end_addr = NULL;
-+ char __iomem * data = NULL;
-+ cln_plat_id_t id = CLANTON_PLAT_UNDEFINED;
-+ extern struct kobject * board_data_kobj;
-+ struct cln_plat_data * plat_item = NULL;
-+ struct cln_plat_data_list * plat_item_list = NULL;
-+ u32 crc = 0;
-+ int ret = 0, i = 0;
-+
-+ INIT_LIST_HEAD(&entry_list);
-+ plat_hdr = ioremap(pres->start, resource_size(pres));
-+ end_addr = (char*)plat_hdr + resource_size(pres);
-+ plat_data = (char*)plat_hdr;
-+ if (!plat_hdr)
-+ return -ENODEV;
-+
-+ /* Verify header magic */
-+ if (plat_hdr->magic != PLAT_MAGIC){
-+ pr_err(PREFIX"Expected magic 0x%08x read 0x%08lx\n",
-+ PLAT_MAGIC, (unsigned long)plat_hdr->magic);
-+ }
-+
-+ /* Validate length is sane */
-+ if ((char*)plat_hdr + plat_hdr->length > end_addr ||
-+ plat_hdr->length < sizeof(struct cln_plat_data)){
-+ pr_err(PREFIX"Invalid length 0x%08lx\n",
-+ (unsigned long)plat_hdr->length);
-+ return -ENODEV;
-+ }
-+
-+ /* Point to real end addr */
-+ end_addr = (char*)plat_hdr +
-+ sizeof(struct cln_plat_dat_hdr) + plat_hdr->length;
-+ plat_bin_size = end_addr - plat_data;
-+
-+ /* Get pointer to start of data */
-+ plat_item = (struct cln_plat_data*)(plat_hdr+1);
-+ data = ((char*)(plat_item)+sizeof(struct cln_plat_data));
-+
-+ /* Validate CRC32 */
-+ crc = ~crc32(0xFFFFFFFF, plat_item, plat_hdr->length);
-+ if (crc != plat_hdr->crc32){
-+ pr_err(PREFIX"CRC 0x%08x header indicates 0x%08x - fatal!\n",
-+ crc, plat_hdr->crc32);
-+ return -EFAULT;
-+ }
-+
-+ /* /sys/firmware/board_data/plat_bin - dump entire platform binary */
-+ plat_item_list = kzalloc(sizeof(struct cln_plat_data_list),
-+ GFP_KERNEL);
-+ if (unlikely(plat_item_list == NULL)) {
-+ pr_err("kzalloc fail !\n");
-+ intel_cln_plat_cleanup();
-+ return -ENOMEM;
-+ }
-+ sysfs_attr_init(&plat_item_list->plat_attr.attr);
-+ plat_item_list->plat_attr.attr.name = plat_bin_name;
-+ plat_item_list->plat_attr.attr.mode = 0644;
-+ plat_item_list->plat_attr.show = intel_cln_plat_sysfs_show_bin;
-+
-+ ret = sysfs_create_file(board_data_kobj,
-+ &plat_item_list->plat_attr.attr);
-+ if (unlikely(ret != 0)){
-+ intel_cln_plat_cleanup();
-+ pr_err("failed to create sysfs entry\n");
-+ return ret;
-+ }
-+
-+ /* Add to list */
-+ list_add(&plat_item_list->list, &entry_list);
-+
-+ /* Iterate through each entry - add sysfs entry as appropriate */
-+ while ( (char*)plat_item < end_addr){
-+
-+ /* Bounds check */
-+ if (data + plat_item->length > end_addr){
-+ pr_err(PREFIX"Data 0x%p over-runs max-addr 0x%p\n",
-+ data, end_addr);
-+ break;
-+ }
-+
-+ /* Extract data */
-+ switch(plat_item->plat_id){
-+ case PLAT_DATA_ID:
-+ id = *((uint16_t*)data);
-+ pr_info(PREFIX"Clanton Platform ID = %d\n", id);
-+ break;
-+ case PLAT_DATA_SN:
-+ case PLAT_DATA_MAC0:
-+ case PLAT_DATA_MAC1:
-+ break;
-+ default:
-+ /* Unknown identifier */
-+ break;
-+ }
-+
-+ plat_item_list = kzalloc(sizeof(struct cln_plat_data_list),
-+ GFP_KERNEL);
-+ if (unlikely(plat_item_list == NULL)) {
-+ pr_err("kzalloc fail !\n");
-+ intel_cln_plat_cleanup();
-+ return -ENOMEM;
-+ }
-+
-+ /* Get name of entity */
-+ i = intel_cln_plat_get_desc_len(plat_item->desc);
-+ if (i <= 0){
-+ pr_err("desc len is %d!\n", i);
-+ intel_cln_plat_cleanup();
-+ return i;
-+ }
-+
-+ memcpy(plat_item_list->name, plat_item->desc, i);
-+ plat_item_list->plat_data = plat_item;
-+
-+ sysfs_attr_init(&plat_item_list->plat_attr.attr);
-+ plat_item_list->plat_attr.attr.name = plat_item_list->name;
-+ plat_item_list->plat_attr.attr.mode = 0644;
-+ plat_item_list->plat_attr.show = intel_cln_plat_sysfs_show;
-+
-+ ret = sysfs_create_file(board_data_kobj,
-+ &plat_item_list->plat_attr.attr);
-+ if (unlikely(ret != 0)){
-+ intel_cln_plat_cleanup();
-+ pr_err("failed to create sysfs entry\n");
-+ return ret;
-+ }
-+
-+ /* Add to list */
-+ list_add(&plat_item_list->list, &entry_list);
-+
-+ /* Next */
-+ plat_item = (struct cln_plat_data*)
-+ (((char*)plat_item) + plat_item->length + sizeof(struct cln_plat_data));
-+ data = ((char*)(plat_item) + sizeof(struct cln_plat_data));
-+ }
-+
-+ /* Register BSP enabling platform code */
-+ for (i = 0; i < sizeof(bsp_data)/sizeof(struct cln_bsp_reg); i++){
-+ if (bsp_data[i].id == id){
-+ platform_device_register(&bsp_data[i].pdev);
-+ }
-+ }
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(intel_cln_plat_probe);
diff --git a/drivers/platform/x86/quark/intel_cln_plat_galileo.c b/drivers/platform/x86/quark/intel_cln_plat_galileo.c
new file mode 100644
-index 0000000..48ce294
+index 0000000..364358c
--- /dev/null
+++ b/drivers/platform/x86/quark/intel_cln_plat_galileo.c
-@@ -0,0 +1,264 @@
+@@ -0,0 +1,395 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -16036,6 +17687,8 @@ index 0000000..48ce294
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
++#include <linux/mfd/cy8c9540a.h>
++#include <linux/mfd/intel_cln_gip_pdata.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
@@ -16045,12 +17698,85 @@ index 0000000..48ce294
+#include <linux/spi/flash.h>
+#include <linux/i2c/at24.h>
+
-+#define DRIVER_NAME "cln-plat-galileo"
++#define DRIVER_NAME "Galileo"
+#define GPIO_RESTRICT_NAME "cln-gpio-restrict-sc"
+#define LPC_SCH_SPINAME "spi-lpc-sch"
+
+#define CLN_SPI_MAX_CLK_DEFAULT 5000000
+
++/* GPIO line used to detect the LSB of the Cypress i2c address */
++#define GPIO_CYPRESS_A0 7
++/* GPIO line Cypress interrupts are routed to (in S0 power state) */
++#define GPIO_CYPRESS_INT_S0 13
++/* GPIO line Cypress interrupts are routed to (in S3 power state) */
++#define GPIO_CYPRESS_INT_S3 2
++
++/* Cypress i2c address depending on A0 value */
++#define CYPRESS_ADDR_A0_1 0x20
++#define CYPRESS_ADDR_A0_0 0x21
++
++/******************************************************************************
++ * Cypress I/O Expander Platform Data
++ ******************************************************************************/
++static struct cy8c9540a_pdata cy8c9540a_platform_data = {
++ .por_default = {
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* Output */
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* Int mask */
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* PWM */
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Inversion */
++ 0xe0, 0xe0, 0xff, 0xf3, 0x00, 0xff, 0xff, 0xff, /* Direction */
++ 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, /* P0 drive */
++ 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, /* P1 drive */
++ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* P2 drive */
++ 0xf3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, /* P3 drive */
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* P4 drive */
++ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* P5 drive */
++ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* P6 drive */
++ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* P7 drive */
++ 0x00, 0xff, 0x00, /* PWM0 */
++ 0x00, 0xff, 0x00, /* PWM1 */
++ 0x00, 0xff, 0x00, /* PWM2 */
++ 0x00, 0xff, 0x00, /* PWM3 */
++ 0x00, 0xff, 0x00, /* PWM4 */
++ 0x00, 0xff, 0x00, /* PWM5 */
++ 0x00, 0xff, 0x00, /* PWM6 */
++ 0x00, 0xff, 0x00, /* PWM7 */
++ 0x00, 0xff, 0x00, /* PWM8 */
++ 0x00, 0xff, 0x00, /* PWM9 */
++ 0x00, 0xff, 0x00, /* PWM10 */
++ 0x00, 0xff, 0x00, /* PWM11 */
++ 0x00, 0xff, 0x00, /* PWM12 */
++ 0x00, 0xff, 0x00, /* PWM13 */
++ 0x00, 0xff, 0x00, /* PWM14 */
++ 0x00, 0xff, 0x00, /* PWM15 */
++ 0xff, /* PWM CLKdiv */
++ 0x02, /* EEPROM en */
++ 0x00 /* CRC */
++ },
++ .pwm2gpio_mapping = {
++ CY8C9540A_PWM_UNUSED,
++ 3,
++ CY8C9540A_PWM_UNUSED,
++ 2,
++ 9,
++ 1,
++ 8,
++ 0,
++ },
++ .gpio_base = 16,
++ .pwm_base = 0,
++ .irq_base = 64,
++};
++
++/* Cypress expander requires i2c master to operate @100kHz 'standard mode' */
++static struct intel_cln_gip_pdata gip_pdata = {
++ .i2c_std_mode = 1,
++};
++static struct intel_cln_gip_pdata *galileo_gip_get_pdata(void)
++{
++ return &gip_pdata;
++}
++
+/******************************************************************************
+ * Analog Devices AD7298 SPI Device Platform Data
+ ******************************************************************************/
@@ -16076,16 +17802,18 @@ index 0000000..48ce294
+/******************************************************************************
+ * Intel Izmir i2c clients
+ ******************************************************************************/
-+static struct i2c_board_info __initdata galileo_i2c_board_info[] = {
-+ {
-+ /* Note following address may change at driver load time */
-+ I2C_BOARD_INFO("cy8c9540a", 0x20),
-+ },
++static struct i2c_board_info __initdata static_i2c_board_info[] = {
+ {
+ I2C_BOARD_INFO("at24", 0x50),
+ .platform_data = &at24_platform_data,
+ },
+};
++static struct i2c_board_info probed_i2c_cypress = {
++ .platform_data = &cy8c9540a_platform_data,
++};
++static struct i2c_adapter *i2c_adap;
++static const unsigned short cypress_i2c_addr[] =
++ { CYPRESS_ADDR_A0_1, CYPRESS_ADDR_A0_0, I2C_CLIENT_END };
+
+/******************************************************************************
+ * Intel Clanton SPI Controller Data
@@ -16100,11 +17828,6 @@ index 0000000..48ce294
+
+#define LPC_SCH_SPI_BUS_ID 0x03
+
-+static struct platform_device lpc_sch_spi = {
-+ .name = "spi-lpc-sch-drv",
-+ .id = LPC_SCH_SPI_BUS_ID,
-+};
-+
+/* TODO: extract this data from layout.conf encoded in flash */
+struct mtd_partition ilb_partitions [] = {
+ {
@@ -16166,6 +17889,33 @@ index 0000000..48ce294
+ },
+};
+
++static struct gpio reserved_gpios[] = {
++ {
++ GPIO_CYPRESS_A0,
++ GPIOF_IN,
++ "cy8c9540a-a0",
++ },
++ {
++ GPIO_CYPRESS_INT_S0,
++ GPIOF_IN,
++ "cy8c9540a-int-s0",
++ },
++ {
++ GPIO_CYPRESS_INT_S3,
++ GPIOF_IN,
++ "cy8c9540a-int-s3",
++ },
++};
++
++static int cypress_i2c_probe(struct i2c_adapter *adap, unsigned short addr)
++{
++ if (gpio_get_value(GPIO_CYPRESS_A0) && CYPRESS_ADDR_A0_1 == addr)
++ return 1;
++ if (!gpio_get_value(GPIO_CYPRESS_A0) && CYPRESS_ADDR_A0_0 == addr)
++ return 1;
++ return 0;
++}
++
+/**
+ * intel_cln_spi_add_onboard_devs
+ *
@@ -16184,15 +17934,59 @@ index 0000000..48ce294
+/**
+ * intel_cln_gpio_restrict_probe
+ *
-+ * Make GPIOs pertaining to Firmware inaccessible by requesting them. The
-+ * GPIOs are never released nor accessed by this driver.
++ * Register devices that depend on GPIOs.
++ * Note this function makes extensive use of the probe deferral mechanism:
++ * gpio_request() for a GPIO that is not yet available returns
++ * -EPROBE_DEFER.
+ */
+static int intel_cln_gpio_restrict_probe(struct platform_device *pdev)
+{
+ int ret = 0;
++ struct i2c_client *cypress = NULL;
++ static int spi_done;
++ static int gpios_done;
++
++ if (spi_done)
++ goto gpios;
+
+ ret = intel_cln_spi_add_onboard_devs();
++ if (ret)
++ goto end;
++
++ spi_done = 1;
++
++gpios:
++ if (gpios_done)
++ goto i2c;
++
++ ret = gpio_request_array(reserved_gpios, ARRAY_SIZE(reserved_gpios));
++ if (ret)
++ goto end;
++
++ probed_i2c_cypress.irq = gpio_to_irq(GPIO_CYPRESS_INT_S0);
++
++ gpios_done = 1;
++
++i2c:
++ i2c_adap = i2c_get_adapter(0);
++ if (NULL == i2c_adap) {
++ pr_info("%s: i2c adapter not ready yet. Deferring..\n",
++ __func__);
++ ret = -EPROBE_DEFER;
++ goto end;
++ }
++ strlcpy(probed_i2c_cypress.type, "cy8c9540a", I2C_NAME_SIZE);
++ cypress = i2c_new_probed_device(i2c_adap, &probed_i2c_cypress,
++ cypress_i2c_addr, cypress_i2c_probe);
++ i2c_put_adapter(i2c_adap);
+
++ if (NULL == cypress) {
++ pr_err("%s: can't probe Cypress Expander\n", __func__);
++ ret = -ENODEV;
++ goto end;
++ }
++
++end:
+ return ret;
+}
+
@@ -16204,28 +17998,16 @@ index 0000000..48ce294
+ .probe = intel_cln_gpio_restrict_probe,
+};
+
-+/* LPC SPI */
-+static int intel_cln_plat_galileo_lpcspi_probe(struct platform_device *pdev)
-+{
-+ lpc_sch_spi.resource = pdev->resource;
-+ return platform_device_register(&lpc_sch_spi);
-+}
-+
-+static struct platform_driver intel_cln_plat_galileo_lpcspi_pdriver = {
-+ .driver = {
-+ .name = LPC_SCH_SPINAME,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = intel_cln_plat_galileo_lpcspi_probe,
-+};
-+
+static int intel_cln_plat_galileo_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
++ /* Assign GIP driver handle for board-specific settings */
++ intel_cln_gip_get_pdata = galileo_gip_get_pdata;
++
+ /* i2c */
-+ ret = i2c_register_board_info(0, galileo_i2c_board_info,
-+ ARRAY_SIZE(galileo_i2c_board_info));
++ ret = i2c_register_board_info(0, static_i2c_board_info,
++ ARRAY_SIZE(static_i2c_board_info));
+ if (ret) {
+ goto end;
+ }
@@ -16268,7 +18050,7 @@ index 0000000..48ce294
+
diff --git a/drivers/platform/x86/quark/intel_cln_plat_kips_bay.c b/drivers/platform/x86/quark/intel_cln_plat_kips_bay.c
new file mode 100644
-index 0000000..1f3deb2
+index 0000000..f701e69
--- /dev/null
+++ b/drivers/platform/x86/quark/intel_cln_plat_kips_bay.c
@@ -0,0 +1,176 @@
@@ -16309,8 +18091,8 @@ index 0000000..1f3deb2
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/spi.h>
+
-+#define DRIVER_NAME "cln-plat-kips-bay"
-+#define GPIO_RESTRICT_NAME "cln-gpio-restrict-sc"
++#define DRIVER_NAME "KipsBay"
++#define GPIO_RESTRICT_NAME "cln-gpio-restrict-sc"
+
+static int gpio_cs = 1;
+
@@ -17004,7 +18786,7 @@ index 0000000..16f43db
+module_exit(intel_cln_smep_test_exit);
diff --git a/drivers/platform/x86/quark/intel_cln_thermal.c b/drivers/platform/x86/quark/intel_cln_thermal.c
new file mode 100644
-index 0000000..ce0da9cd
+index 0000000..ce0da9c
--- /dev/null
+++ b/drivers/platform/x86/quark/intel_cln_thermal.c
@@ -0,0 +1,360 @@
@@ -20241,10 +22023,10 @@ index d285596..2c4e7e1 100644
+obj-$(CONFIG_MAX78M6610_LMU) += max78m6610_lmu.o
diff --git a/drivers/staging/iio/adc/max78m6610_lmu.c b/drivers/staging/iio/adc/max78m6610_lmu.c
new file mode 100644
-index 0000000..f38ed57
+index 0000000..5a72f8e
--- /dev/null
+++ b/drivers/staging/iio/adc/max78m6610_lmu.c
-@@ -0,0 +1,837 @@
+@@ -0,0 +1,1113 @@
+/*
+ * max78m6610+lmu SPI protocol driver
+ *
@@ -20285,7 +22067,10 @@ index 0000000..f38ed57
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/spi/spi.h>
++#include <linux/spi/spidev.h>
+#include <linux/version.h>
++#include <linux/cdev.h>
++#include <linux/fs.h>
+
+#define INSTAN_VA 0x33 /* instaneous Voltage for VA source */
+#define INSTAN_IA 0x44 /* instaneous Current for IA source */
@@ -20324,6 +22109,7 @@ index 0000000..f38ed57
+#define SIGN_BIT_NUM 23
+#define SPI_MSG_LEN 5
+#define RX_OFFSET 1
++#define SPI_BBUFFER_LEN 4096
+
+/* SPI message Control byte */
+#define SPI_CB(x) ((SPI_CB_NBR_ACC << 4)\
@@ -20683,6 +22469,14 @@ index 0000000..f38ed57
+
+ u8 tx_buf[SPI_MSG_LEN * MAX_CHAN_NUM];
+ u8 rx_buf[SPI_MSG_LEN * MAX_CHAN_NUM + sizeof(s64)];
++
++
++ /* Char dev to provide ioctl interface for f/w upgrade
++ * or low-level register access */
++ struct cdev cdev;
++ dev_t cdev_no;
++ struct class *cl;
++ u8 *bbuffer;
+};
+
+/**
@@ -20957,6 +22751,264 @@ index 0000000..f38ed57
+ .driver_module = THIS_MODULE,
+};
+
++static int
++max78m6610_lmu_open(struct inode *inode, struct file *filp)
++{
++ struct max78m6610_lmu_state *st;
++ int ret = 0;
++
++ st = container_of(inode->i_cdev,
++ struct max78m6610_lmu_state,
++ cdev);
++ filp->private_data = st;
++
++ if (!st->bbuffer) {
++ st->bbuffer = kmalloc(SPI_BBUFFER_LEN, GFP_KERNEL);
++ if (!st->bbuffer) {
++ dev_dbg(&st->spi->dev, "open/ENOMEM\n");
++ ret = -ENOMEM;
++ }
++ }
++
++ return ret;
++}
++
++static int
++max78m6610_lmu_release(struct inode *inode, struct file *filp)
++{
++ struct max78m6610_lmu_state *st =
++ (struct max78m6610_lmu_state *)filp->private_data;
++
++ kfree(st->bbuffer);
++ st->bbuffer = NULL;
++
++ return 0;
++}
++
++static int spidev_message(struct max78m6610_lmu_state *st,
++ struct spi_ioc_transfer *u_xfers,
++ unsigned n_xfers)
++{
++ struct spi_message msg;
++ struct spi_transfer *k_xfers;
++ struct spi_transfer *k_tmp;
++ struct spi_ioc_transfer *u_tmp;
++ unsigned n, total;
++ u8 *buf;
++ int status = -EFAULT;
++
++ spi_message_init(&msg);
++ k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
++ if (k_xfers == NULL)
++ return -ENOMEM;
++
++ /* Construct spi_message, copying any tx data to bounce buffer.
++ * We walk the array of user-provided transfers, using each one
++ * to initialize a kernel version of the same transfer.
++ */
++ buf = st->bbuffer;
++ total = 0;
++ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
++ n;
++ n--, k_tmp++, u_tmp++) {
++ k_tmp->len = u_tmp->len;
++
++ total += k_tmp->len;
++ if (total > SPI_BBUFFER_LEN) {
++ status = -EMSGSIZE;
++ goto done;
++ }
++
++ if (u_tmp->rx_buf) {
++ k_tmp->rx_buf = buf;
++ if (!access_ok(VERIFY_WRITE, (u8 __user *)
++ (uintptr_t) u_tmp->rx_buf,
++ u_tmp->len))
++ goto done;
++ }
++ if (u_tmp->tx_buf) {
++ k_tmp->tx_buf = buf;
++ if (copy_from_user(buf, (const u8 __user *)
++ (uintptr_t) u_tmp->tx_buf,
++ u_tmp->len))
++ goto done;
++ }
++ buf += k_tmp->len;
++
++ k_tmp->cs_change = !!u_tmp->cs_change;
++ k_tmp->bits_per_word = u_tmp->bits_per_word;
++ k_tmp->delay_usecs = u_tmp->delay_usecs;
++ k_tmp->speed_hz = u_tmp->speed_hz;
++#ifdef VERBOSE
++ dev_dbg(&st->spi->dev,
++ " xfer len %zd %s%s%s%dbits %u usec %uHz\n",
++ u_tmp->len,
++ u_tmp->rx_buf ? "rx " : "",
++ u_tmp->tx_buf ? "tx " : "",
++ u_tmp->cs_change ? "cs " : "",
++ u_tmp->bits_per_word ? : st->spi->bits_per_word,
++ u_tmp->delay_usecs,
++ u_tmp->speed_hz ? : st->spi->max_speed_hz);
++#endif
++ spi_message_add_tail(k_tmp, &msg);
++ }
++
++ status = spi_sync(st->spi, &msg);
++ if (status < 0)
++ goto done;
++
++ /* copy any rx data out of bounce buffer */
++ buf = st->bbuffer;
++ for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
++ if (u_tmp->rx_buf) {
++ if (__copy_to_user((u8 __user *)
++ (uintptr_t) u_tmp->rx_buf, buf,
++ u_tmp->len)) {
++ status = -EFAULT;
++ goto done;
++ }
++ }
++ buf += u_tmp->len;
++ }
++ status = total;
++
++done:
++ kfree(k_xfers);
++ return status;
++}
++
++static long
++max78m6610_lmu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++ struct max78m6610_lmu_state *st = filp->private_data;
++ struct iio_dev *indio_dev = spi_get_drvdata(st->spi);
++ u32 tmp;
++ unsigned n_ioc;
++ struct spi_ioc_transfer *ioc;
++ int ret = 0;
++
++ /* Check type and command number */
++ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
++ return -ENOTTY;
++
++ /* Check access direction once here; don't repeat below.
++ * IOC_DIR is from the user perspective, while access_ok is
++ * from the kernel perspective; so they look reversed.
++ */
++ if (_IOC_DIR(cmd) & _IOC_READ)
++ ret = !access_ok(VERIFY_WRITE,
++ (void __user *)arg, _IOC_SIZE(cmd));
++ if (ret == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
++ ret = !access_ok(VERIFY_READ,
++ (void __user *)arg, _IOC_SIZE(cmd));
++ if (ret)
++ return -EFAULT;
++
++ ret = mutex_lock_interruptible(&indio_dev->mlock);
++ if (ret)
++ return ret;
++
++ /* segmented and/or full-duplex I/O request */
++ if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
++ || _IOC_DIR(cmd) != _IOC_WRITE) {
++ ret = -ENOTTY;
++ goto exit;
++ }
++
++ tmp = _IOC_SIZE(cmd);
++ if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
++ ret = -EINVAL;
++ goto exit;
++ }
++ n_ioc = tmp / sizeof(struct spi_ioc_transfer);
++ if (n_ioc == 0)
++ goto exit;
++
++ /* copy into scratch area */
++ ioc = kmalloc(tmp, GFP_KERNEL);
++ if (!ioc) {
++ ret = -ENOMEM;
++ goto exit;
++ }
++ if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
++ kfree(ioc);
++ ret = -EFAULT;
++ goto exit;
++ }
++
++ /* translate to spi_message, execute */
++ ret = spidev_message(st, ioc, n_ioc);
++ kfree(ioc);
++
++exit:
++ mutex_unlock(&indio_dev->mlock);
++
++ return ret;
++}
++
++static const struct file_operations max78m6610_lmu_fops = {
++ .owner = THIS_MODULE,
++ .open = max78m6610_lmu_open,
++ .release = max78m6610_lmu_release,
++ .unlocked_ioctl = max78m6610_lmu_ioctl,
++};
++
++static int
++max78m6610_lmu_chrdev_init(struct max78m6610_lmu_state *st)
++{
++ int ret;
++ struct device *dev;
++
++ ret = alloc_chrdev_region(&st->cdev_no, 0, 1,
++ "max78m6610_lmu");
++ if (ret) {
++ pr_err("Failed to alloc chrdev: %d", ret);
++ return ret;
++ }
++
++ cdev_init(&st->cdev, &max78m6610_lmu_fops);
++
++ ret = cdev_add(&st->cdev, st->cdev_no, 1);
++ if (ret) {
++ pr_err("Failed to add cdev: %d", ret);
++ unregister_chrdev_region(st->cdev_no, 1);
++ return ret;
++ }
++
++ st->cl = class_create(THIS_MODULE, "char");
++ if (IS_ERR(st->cl)) {
++ pr_err("Failed to create device class: %ld",
++ PTR_ERR(st->cl));
++ cdev_del(&st->cdev);
++ unregister_chrdev_region(st->cdev_no, 1);
++ return PTR_ERR(st->cl);
++ }
++
++ dev = device_create(st->cl, NULL, st->cdev_no, NULL,
++ "max78m6610_lmu");
++ if (IS_ERR(dev)) {
++ pr_err("Failed to create device: %ld",
++ PTR_ERR(st->cl));
++ class_destroy(st->cl);
++ cdev_del(&st->cdev);
++ unregister_chrdev_region(st->cdev_no, 1);
++ return PTR_ERR(dev);
++ }
++
++ return 0;
++}
++
++static int
++max78m6610_lmu_chrdev_remove(struct max78m6610_lmu_state *st)
++{
++ device_destroy(st->cl, st->cdev_no);
++ class_destroy(st->cl);
++ cdev_del(&st->cdev);
++ unregister_chrdev_region(st->cdev_no, 1);
++
++ return 0;
++}
++
+/**
+ * max78m6610_lmu_probe
+ *
@@ -21005,6 +23057,10 @@ index 0000000..f38ed57
+ if (ret)
+ goto error_cleanup_ring;
+
++ ret = max78m6610_lmu_chrdev_init(st);
++ if (ret)
++ goto error_cleanup_ring;
++
+ return 0;
+
+error_cleanup_ring:
@@ -21026,7 +23082,9 @@ index 0000000..f38ed57
+static int max78m6610_lmu_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
++ struct max78m6610_lmu_state *st = iio_priv(indio_dev);
+
++ max78m6610_lmu_chrdev_remove(st);
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_device_free(indio_dev);
@@ -21082,6 +23140,326 @@ index 0000000..f38ed57
+MODULE_AUTHOR("Kai Ji <kai.ji@emutex.com>");
+MODULE_DESCRIPTION("Maxim 78M6610+LMU eADC");
+MODULE_LICENSE("GPL v2");
+diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig
+index 7d32075..e9e837d 100644
+--- a/drivers/staging/iio/trigger/Kconfig
++++ b/drivers/staging/iio/trigger/Kconfig
+@@ -29,6 +29,17 @@ config IIO_SYSFS_TRIGGER
+
+ To compile this driver as a module, choose M here: the
+ module will be called iio-trig-sysfs.
++
++config IIO_HRTIMER_TRIGGER
++ tristate "HRTIMER trigger"
++ #depends on HRTIMER
++ select IRQ_WORK
++ help
++ Provides support for using HRTIMER entries as IIO triggers.
++ If unsure, say N (but it's safe to say "Y").
++
++ To compile this driver as a module, choose M here: the
++ module will be called iio-trig-hrtimer.
+
+ config IIO_BFIN_TMR_TRIGGER
+ tristate "Blackfin TIMER trigger"
+diff --git a/drivers/staging/iio/trigger/Makefile b/drivers/staging/iio/trigger/Makefile
+index b088b57..ad2f595 100644
+--- a/drivers/staging/iio/trigger/Makefile
++++ b/drivers/staging/iio/trigger/Makefile
+@@ -5,4 +5,5 @@
+ obj-$(CONFIG_IIO_PERIODIC_RTC_TRIGGER) += iio-trig-periodic-rtc.o
+ obj-$(CONFIG_IIO_GPIO_TRIGGER) += iio-trig-gpio.o
+ obj-$(CONFIG_IIO_SYSFS_TRIGGER) += iio-trig-sysfs.o
++obj-$(CONFIG_IIO_HRTIMER_TRIGGER) += iio-trig-hrtimer.o
+ obj-$(CONFIG_IIO_BFIN_TMR_TRIGGER) += iio-trig-bfin-timer.o
+diff --git a/drivers/staging/iio/trigger/iio-trig-hrtimer.c b/drivers/staging/iio/trigger/iio-trig-hrtimer.c
+new file mode 100644
+index 0000000..3f7d5b8
+--- /dev/null
++++ b/drivers/staging/iio/trigger/iio-trig-hrtimer.c
+@@ -0,0 +1,282 @@
++/*
++ * Industrial I/O - hrtimer trigger support
++ *
++ * Copyright 2013 STMicroelectronics Inc.
++ * Denis Ciocca <denis.ciocca@xxxxxx>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/hrtimer.h>
++#include <linux/ktime.h>
++#include <linux/slab.h>
++#include <linux/list.h>
++
++#include <linux/iio/iio.h>
++#include <linux/iio/trigger.h>
++
++struct iio_hrtimer_trigger_data {
++ struct iio_trigger *trig;
++ struct hrtimer timer;
++ struct list_head l;
++ ktime_t period;
++ u16 freq;
++ int id;
++};
++
++static LIST_HEAD(iio_hrtimer_trigger_list);
++static DEFINE_MUTEX(iio_hrtimer_trigger_list_mut);
++
++static int iio_hrtimer_trigger_probe(int id);
++static int iio_hrtimer_trigger_remove(int id);
++
++static ssize_t iio_sysfs_hrtimer_trig_add(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t len)
++{
++ int ret;
++ unsigned long input;
++
++ ret = kstrtoul(buf, 10, &input);
++ if (ret)
++ return ret;
++
++ ret = iio_hrtimer_trigger_probe(input);
++ if (ret)
++ return ret;
++
++ return len;
++}
++static DEVICE_ATTR(add_trigger, S_IWUSR, NULL, &iio_sysfs_hrtimer_trig_add);
++
++static ssize_t iio_sysfs_hrtimer_trig_remove(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t len)
++{
++ int ret;
++ unsigned long input;
++
++ ret = kstrtoul(buf, 10, &input);
++ if (ret)
++ return ret;
++
++ ret = iio_hrtimer_trigger_remove(input);
++ if (ret)
++ return ret;
++
++ return len;
++}
++static DEVICE_ATTR(remove_trigger, S_IWUSR,
++ NULL, &iio_sysfs_hrtimer_trig_remove);
++
++static struct attribute *iio_hrtimer_trig_attrs[] = {
++ &dev_attr_add_trigger.attr,
++ &dev_attr_remove_trigger.attr,
++ NULL,
++};
++
++static const struct attribute_group iio_hrtimer_trig_group = {
++ .attrs = iio_hrtimer_trig_attrs,
++};
++
++static const struct attribute_group *iio_hrtimer_trig_groups[] = {
++ &iio_hrtimer_trig_group,
++ NULL,
++};
++
++static struct device iio_hrtimer_trig_dev = {
++ .bus = &iio_bus_type,
++ .groups = iio_hrtimer_trig_groups,
++};
++
++static int iio_hrtimer_trig_set_state(struct iio_trigger *trig, bool state)
++{
++ struct iio_hrtimer_trigger_data *trig_data =
++ dev_get_drvdata(&trig->dev);
++
++ if (trig_data->freq == 0)
++ return -EINVAL;
++
++ if (state)
++ hrtimer_start(&trig_data->timer,
++ trig_data->period, HRTIMER_MODE_REL);
++ else
++ hrtimer_cancel(&trig_data->timer);
++
++ return 0;
++}
++
++static ssize_t iio_hrtimer_trigger_set_freq_value(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t len)
++{
++ int ret;
++ u16 frequency;
++ struct iio_trigger *trig = to_iio_trigger(dev);
++ struct iio_hrtimer_trigger_data *trig_data =
++ dev_get_drvdata(&trig->dev);
++
++ ret = kstrtou16(buf, 10, &frequency);
++ if (ret < 0)
++ return ret;
++
++ if (frequency > NSEC_PER_SEC)
++ return -EINVAL;
++
++ trig_data->freq = frequency;
++
++ if (frequency)
++ trig_data->period =
++ ktime_set(0, NSEC_PER_SEC / trig_data->freq);
++
++ return len;
++}
++
++static ssize_t iio_hrtimer_trigger_get_freq_value(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct iio_trigger *trig = to_iio_trigger(dev);
++ struct iio_hrtimer_trigger_data *trig_data =
++ dev_get_drvdata(&trig->dev);
++
++ return sprintf(buf, "%hu\n", trig_data->freq);
++}
++
++static DEVICE_ATTR(frequency, S_IWUSR | S_IRUGO,
++ iio_hrtimer_trigger_get_freq_value,
++ iio_hrtimer_trigger_set_freq_value);
++
++static struct attribute *iio_hrtimer_trigger_attrs[] = {
++ &dev_attr_frequency.attr,
++ NULL,
++};
++
++static const struct attribute_group iio_hrtimer_trigger_attr_group = {
++ .attrs = iio_hrtimer_trigger_attrs,
++};
++
++static const struct attribute_group *iio_hrtimer_trigger_attr_groups[] = {
++ &iio_hrtimer_trigger_attr_group,
++ NULL,
++};
++
++static const struct iio_trigger_ops iio_hrtimer_trigger_ops = {
++ .owner = THIS_MODULE,
++ .set_trigger_state = &iio_hrtimer_trig_set_state,
++};
++
++enum hrtimer_restart iio_hrtimer_trigger_func(struct hrtimer *timer)
++{
++ struct iio_hrtimer_trigger_data *trig_data;
++
++ trig_data = container_of(timer, struct iio_hrtimer_trigger_data, timer);
++
++ hrtimer_forward_now(timer, trig_data->period);
++ iio_trigger_poll(trig_data->trig, 0);
++
++ return HRTIMER_RESTART;
++}
++
++static int iio_hrtimer_trigger_probe(int id)
++{
++ int err;
++ bool foundit = false;
++ struct iio_hrtimer_trigger_data *trig_data;
++
++ mutex_lock(&iio_hrtimer_trigger_list_mut);
++ list_for_each_entry(trig_data, &iio_hrtimer_trigger_list, l) {
++ if (id == trig_data->id) {
++ foundit = true;
++ break;
++ }
++ }
++ if (foundit) {
++ err = -EINVAL;
++ goto iio_hrtimer_mutex_unlock;
++ }
++
++ trig_data = kmalloc(sizeof(*trig_data), GFP_KERNEL);
++ if (trig_data == NULL) {
++ err = -ENOMEM;
++ goto iio_hrtimer_mutex_unlock;
++ }
++
++ trig_data->id = id;
++ trig_data->trig = iio_trigger_alloc("hrtimer_trig%d", id);
++ if (!trig_data->trig) {
++ err = -ENOMEM;
++ goto iio_hrtimer_free_trig_data;
++ }
++
++ trig_data->trig->dev.groups = iio_hrtimer_trigger_attr_groups;
++ trig_data->trig->ops = &iio_hrtimer_trigger_ops;
++ trig_data->trig->dev.parent = &iio_hrtimer_trig_dev;
++ dev_set_drvdata(&trig_data->trig->dev, trig_data);
++
++ trig_data->freq = 0;
++ hrtimer_init(&trig_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ trig_data->timer.function = &iio_hrtimer_trigger_func;
++
++ err = iio_trigger_register(trig_data->trig);
++ if (err)
++ goto iio_hrtimer_free_trig_data;
++
++ list_add(&trig_data->l, &iio_hrtimer_trigger_list);
++ __module_get(THIS_MODULE);
++ mutex_unlock(&iio_hrtimer_trigger_list_mut);
++
++ return 0;
++
++iio_hrtimer_free_trig_data:
++ kfree(trig_data);
++iio_hrtimer_mutex_unlock:
++ mutex_unlock(&iio_hrtimer_trigger_list_mut);
++ return err;
++}
++
++static int iio_hrtimer_trigger_remove(int id)
++{
++ bool foundit = false;
++ struct iio_hrtimer_trigger_data *trig_data;
++
++ mutex_lock(&iio_hrtimer_trigger_list_mut);
++ list_for_each_entry(trig_data, &iio_hrtimer_trigger_list, l) {
++ if (id == trig_data->id) {
++ foundit = true;
++ break;
++ }
++ }
++ if (!foundit) {
++ mutex_unlock(&iio_hrtimer_trigger_list_mut);
++ return -EINVAL;
++ }
++
++ iio_trigger_unregister(trig_data->trig);
++ iio_trigger_free(trig_data->trig);
++
++ list_del(&trig_data->l);
++ kfree(trig_data);
++ module_put(THIS_MODULE);
++ mutex_unlock(&iio_hrtimer_trigger_list_mut);
++
++ return 0;
++}
++
++static int __init iio_hrtimer_trig_init(void)
++{
++ device_initialize(&iio_hrtimer_trig_dev);
++ dev_set_name(&iio_hrtimer_trig_dev, "iio_hrtimer_trigger");
++ return device_add(&iio_hrtimer_trig_dev);
++}
++module_init(iio_hrtimer_trig_init);
++
++static void __exit iio_hrtimer_trig_exit(void)
++{
++ device_unregister(&iio_hrtimer_trig_dev);
++}
++module_exit(iio_hrtimer_trig_exit);
++
++MODULE_AUTHOR("Denis Ciocca <denis.ciocca@xxxxxx>");
++MODULE_DESCRIPTION("Hrtimer trigger for the iio subsystem");
++MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 733f22c..5f85dde 100644
--- a/drivers/tty/serial/8250/8250.c
@@ -21252,27 +23630,27 @@ index 5cdb092..9f5d030 100644
/*
* EKF addition for i960 Boards form EKF with serial port
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
-index 02e706e..a656d3b 100644
+index 02e706e..3a60e64 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1327,6 +1327,26 @@ config SERIAL_IFX6X60
help
Support for the IFX6x60 modem devices on Intel MID platforms.
-+config SERIAL_CLN_UART
-+ tristate "Intel Clanton UART"
++config SERIAL_QUARK_UART
++ tristate "Quark High Speed UART support"
+ depends on PCI
+ select SERIAL_CORE
+ select DMADEVICES
-+ select INTEL_CLN_DMAC
++ select INTEL_MID_DMAC
+ help
-+ This driver is for Intel(R) Clanton UART with DMA enabled.
++ This driver is for Intel(R) Quark X1000 UART with DMA enabled.
+ If you don't want DMA then you should use the standard 8250_pci
+ driver.
+
-+config SERIAL_CLN_UART_CONSOLE
-+ bool "Support for console on Intel Clanton UART"
-+ depends on SERIAL_CLN_UART=y
++config SERIAL_QUARK_UART_CONSOLE
++ bool "Support for console on Intel(R) Quark X1000 UART"
++ depends on SERIAL_QUARK_UART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you wish to use the Clanton UART as the system console
@@ -21283,163 +23661,335 @@ index 02e706e..a656d3b 100644
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART"
depends on PCI
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
-index df1b998..62a1833 100644
+index df1b998..ccbc063 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
obj-$(CONFIG_SERIAL_MRST_MAX3110) += mrst_max3110.o
obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
-+obj-$(CONFIG_SERIAL_CLN_UART) += intel_cln_uart.o
++obj-$(CONFIG_SERIAL_QUARK_UART) += intel_quark_uart.o
obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
-diff --git a/drivers/tty/serial/intel_cln_uart.c b/drivers/tty/serial/intel_cln_uart.c
+diff --git a/drivers/tty/serial/intel_quark_uart.c b/drivers/tty/serial/intel_quark_uart.c
new file mode 100644
-index 0000000..17454d0
+index 0000000..1377bdd
--- /dev/null
-+++ b/drivers/tty/serial/intel_cln_uart.c
-@@ -0,0 +1,1759 @@
-+/*
-+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of version 2 of the GNU General Public License as
-+ * published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful, but
-+ * WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Contact Information:
-+ * Intel Corporation
-+ */
++++ b/drivers/tty/serial/intel_quark_uart.c
+@@ -0,0 +1,2032 @@
+/*
-+ * Intel Clanton DMA-UART driver
-+ *
-+ * The hardware here consists of
-+ * 1 x MMIO BAR with 16550 compatible deisgnware UART regs - byte aligned
-+ * 1 x MMIO BAR with a designware DMAC - modified for byte aligned bursts
-+ * Lots of code stolen with pride from pch_uart.c/mfd.c
-+ *
-+ * DMA Config - set by hardware as a default
-+ *
-+ * Channel 0 : RX (Device to host)
-+ * CTL0_LO : 0x00304837
-+ * CTL0_HI : 0x00000002
-+ * CFG0_LO : 0x00000C00 (HS_DST_SRC | HS_SEL_SRC)
-+ * CFG0_HI : 0x00000004
++ *Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
++ *Copyright (C) 2014 Intel Corporation.
+ *
++ *This program is free software; you can redistribute it and/or modify
++ *it under the terms of the GNU General Public License as published by
++ *the Free Software Foundation; version 2 of the License.
+ *
-+ * Channel 1 : TX (Host to device)
-+ * CTL1_LO : 0x00304837
-+ * CTL1_HI : 0x00000002
-+ * CFG1_LO : 0x00000C20 (HS_DST_SRC | HS_SEL_SRC | CH_PRIOR:001)
-+ * CFG1_HI : 0x00000004 (PROTCTL = 001)
++ *This program is distributed in the hope that it will be useful,
++ *but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ *GNU General Public License for more details.
+ *
++ *You should have received a copy of the GNU General Public License
++ *along with this program; if not, write to the Free Software
++ *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
-+
-+#include <asm/io.h>
-+#include <linux/console.h>
-+#include <linux/debugfs.h>
-+#include <linux/intel_mid_dma.h>
++#if defined(CONFIG_SERIAL_QUARK_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
++#define SUPPORT_SYSRQ
++#endif
++#if defined(CONFIG_INTEL_QUARK_X1000_SOC)
++#include <asm/cln.h>
++#endif
++#include <linux/kernel.h>
++#include <linux/serial_reg.h>
++#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pci.h>
-+#include <linux/pm_runtime.h>
++#include <linux/console.h>
+#include <linux/serial_core.h>
-+#include <linux/serial_reg.h>
-+#include <linux/slab.h>
-+#include <linux/spinlock.h>
++#include <linux/tty.h>
+#include <linux/tty_flip.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/dmi.h>
++#include <linux/nmi.h>
++#include <linux/delay.h>
++#include <linux/intel_mid_dma.h>
++#include <linux/debugfs.h>
++#include <linux/dmaengine.h>
+
-+#define CLN_UART_MAX_INSTANCES 2
-+#define CLN_UART_DMA_CHANNELS 2
-+#define CLN_UART_DMA_TXCHAN 1
-+#define CLN_UART_DMA_RXCHAN 0
-+#define CLN_UART_FIFO_LEN 16
-+#define CLN_UART_DRIVER_DEVICE "ttyCLN"
-+#define CLN_UART_DMA_BUF_SIZE PAGE_SIZE
++enum {
++ QUARK_UART_HANDLED_RX_INT_SHIFT,
++ QUARK_UART_HANDLED_TX_INT_SHIFT,
++ QUARK_UART_HANDLED_RX_ERR_INT_SHIFT,
++ QUARK_UART_HANDLED_RX_TRG_INT_SHIFT,
++ QUARK_UART_HANDLED_MS_INT_SHIFT,
++ QUARK_UART_HANDLED_LS_INT_SHIFT,
++};
+
-+#define CLN_UART_MODE_MSI 0x00000001
-+#define CLN_UART_MODE_DMA 0x00000002
++enum {
++ QUARK_UART_8LINE,
++ QUARK_UART_2LINE,
++};
+
-+#define CLN_UART_DEFAULT_UARTCLK 1843200 /* 1.8432 MHz */
++#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
++ ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
++ .max_chan = (_max_chan), \
++ .ch_base = (_ch_base), \
++ .block_size = (_block_size), \
++ .pimr_mask = (_pimr_mask), \
++ })
++
++#define QUARK_UART_DRIVER_DEVICE "ttyQRK"
++#define QUARK_UART_FIFO_LEN 16
++//#define __QRK_DMA_DEBUG /* TODO: remove all code of this type */
+
-+/* IIR bits - TO is non-standard */
-+#define INTEL_CLN_UART_IIR_MS 0x00
-+#define INTEL_CLN_UART_IIR_NOIRQ 0x01
-+#define INTEL_CLN_UART_IIR_THRE 0x02
-+#define INTEL_CLN_UART_IIR_RXD 0x04
-+#define INTEL_CLN_UART_IIR_RLS 0x06
-+#define INTEL_CLN_UART_IIR_BUSY 0x07
-+#define INTEL_CLN_UART_IIR_TO 0x08
++/* Set the max number of UART port
++ * Intel EG20T QUARK: 4 port
++ * LAPIS Semiconductor ML7213 IOH: 3 port
++ * LAPIS Semiconductor ML7223 IOH: 2 port
++*/
++#define QUARK_UART_NR 2
++
++#define QUARK_UART_HANDLED_RX_INT (1<<((QUARK_UART_HANDLED_RX_INT_SHIFT)<<1))
++#define QUARK_UART_HANDLED_TX_INT (1<<((QUARK_UART_HANDLED_TX_INT_SHIFT)<<1))
++#define QUARK_UART_HANDLED_RX_ERR_INT (1<<((\
++ QUARK_UART_HANDLED_RX_ERR_INT_SHIFT)<<1))
++#define QUARK_UART_HANDLED_RX_TRG_INT (1<<((\
++ QUARK_UART_HANDLED_RX_TRG_INT_SHIFT)<<1))
++#define QUARK_UART_HANDLED_MS_INT (1<<((QUARK_UART_HANDLED_MS_INT_SHIFT)<<1))
++
++#define QUARK_UART_HANDLED_LS_INT (1<<((QUARK_UART_HANDLED_LS_INT_SHIFT)<<1))
++
++#define QUARK_UART_RBR 0x00
++#define QUARK_UART_THR 0x00
++
++#define QUARK_UART_IER_MASK (QUARK_UART_IER_ERBFI|QUARK_UART_IER_ETBEI|\
++ QUARK_UART_IER_ELSI|QUARK_UART_IER_EDSSI)
++#define QUARK_UART_IER_ERBFI 0x00000001
++#define QUARK_UART_IER_ETBEI 0x00000002
++#define QUARK_UART_IER_ELSI 0x00000004
++#define QUARK_UART_IER_EDSSI 0x00000008
++
++#define QUARK_UART_IIR_IP 0x00000001
++#define QUARK_UART_IIR_IID 0x00000006
++#define QUARK_UART_IIR_MSI 0x00000000
++#define QUARK_UART_IIR_TRI 0x00000002
++#define QUARK_UART_IIR_RRI 0x00000004
++#define QUARK_UART_IIR_REI 0x00000006
++#define QUARK_UART_IIR_TOI 0x00000008
++#define QUARK_UART_IIR_FIFO256 0x00000020
++#define QUARK_UART_IIR_FIFO64 QUARK_UART_IIR_FIFO256
++#define QUARK_UART_IIR_FE 0x000000C0
++
++#define QUARK_UART_FCR_FIFOE 0x00000001
++#define QUARK_UART_FCR_RFR 0x00000002
++#define QUARK_UART_FCR_TFR 0x00000004
++#define QUARK_UART_FCR_DMS 0x00000008
++#define QUARK_UART_FCR_FIFO256 0x00000020
++#define QUARK_UART_FCR_RFTL 0x000000C0
++
++#define QUARK_UART_FCR_RFTL1 0x00000000
++#define QUARK_UART_FCR_RFTL64 0x00000040
++#define QUARK_UART_FCR_RFTL128 0x00000080
++#define QUARK_UART_FCR_RFTL224 0x000000C0
++#define QUARK_UART_FCR_RFTL16 QUARK_UART_FCR_RFTL64
++#define QUARK_UART_FCR_RFTL32 QUARK_UART_FCR_RFTL128
++#define QUARK_UART_FCR_RFTL56 QUARK_UART_FCR_RFTL224
++#define QUARK_UART_FCR_RFTL4 QUARK_UART_FCR_RFTL64
++#define QUARK_UART_FCR_RFTL8 QUARK_UART_FCR_RFTL128
++#define QUARK_UART_FCR_RFTL14 QUARK_UART_FCR_RFTL224
++#define QUARK_UART_FCR_RFTL_SHIFT 6
++
++#define QUARK_UART_LCR_WLS 0x00000003
++#define QUARK_UART_LCR_STB 0x00000004
++#define QUARK_UART_LCR_PEN 0x00000008
++#define QUARK_UART_LCR_EPS 0x00000010
++#define QUARK_UART_LCR_SP 0x00000020
++#define QUARK_UART_LCR_SB 0x00000040
++#define QUARK_UART_LCR_DLAB 0x00000080
++#define QUARK_UART_LCR_NP 0x00000000
++#define QUARK_UART_LCR_OP QUARK_UART_LCR_PEN
++#define QUARK_UART_LCR_EP (QUARK_UART_LCR_PEN | QUARK_UART_LCR_EPS)
++#define QUARK_UART_LCR_1P (QUARK_UART_LCR_PEN | QUARK_UART_LCR_SP)
++#define QUARK_UART_LCR_0P (QUARK_UART_LCR_PEN | QUARK_UART_LCR_EPS |\
++ QUARK_UART_LCR_SP)
++
++#define QUARK_UART_LCR_5BIT 0x00000000
++#define QUARK_UART_LCR_6BIT 0x00000001
++#define QUARK_UART_LCR_7BIT 0x00000002
++#define QUARK_UART_LCR_8BIT 0x00000003
++
++#define QUARK_UART_MCR_DTR 0x00000001
++#define QUARK_UART_MCR_RTS 0x00000002
++#define QUARK_UART_MCR_OUT 0x0000000C
++#define QUARK_UART_MCR_LOOP 0x00000010
++#define QUARK_UART_MCR_AFE 0x00000020
++
++#define QUARK_UART_LSR_DR 0x00000001
++#define QUARK_UART_LSR_ERR (1<<7)
++
++#define QUARK_UART_MSR_DCTS 0x00000001
++#define QUARK_UART_MSR_DDSR 0x00000002
++#define QUARK_UART_MSR_TERI 0x00000004
++#define QUARK_UART_MSR_DDCD 0x00000008
++#define QUARK_UART_MSR_CTS 0x00000010
++#define QUARK_UART_MSR_DSR 0x00000020
++#define QUARK_UART_MSR_RI 0x00000040
++#define QUARK_UART_MSR_DCD 0x00000080
++#define QUARK_UART_MSR_DELTA (QUARK_UART_MSR_DCTS | QUARK_UART_MSR_DDSR |\
++ QUARK_UART_MSR_TERI | QUARK_UART_MSR_DDCD)
++
++#define QUARK_UART_DLL 0x00
++#define QUARK_UART_DLM 0x01
++
++#define QUARK_UART_BRCSR 0x0E
++
++#define QUARK_UART_IID_RLS (QUARK_UART_IIR_REI)
++#define QUARK_UART_IID_RDR (QUARK_UART_IIR_RRI)
++#define QUARK_UART_IID_RDR_TO (QUARK_UART_IIR_RRI | QUARK_UART_IIR_TOI)
++#define QUARK_UART_IID_THRE (QUARK_UART_IIR_TRI)
++#define QUARK_UART_IID_MS (QUARK_UART_IIR_MSI)
++
++#define QUARK_UART_HAL_PARITY_NONE (QUARK_UART_LCR_NP)
++#define QUARK_UART_HAL_PARITY_ODD (QUARK_UART_LCR_OP)
++#define QUARK_UART_HAL_PARITY_EVEN (QUARK_UART_LCR_EP)
++#define QUARK_UART_HAL_PARITY_FIX1 (QUARK_UART_LCR_1P)
++#define QUARK_UART_HAL_PARITY_FIX0 (QUARK_UART_LCR_0P)
++#define QUARK_UART_HAL_5BIT (QUARK_UART_LCR_5BIT)
++#define QUARK_UART_HAL_6BIT (QUARK_UART_LCR_6BIT)
++#define QUARK_UART_HAL_7BIT (QUARK_UART_LCR_7BIT)
++#define QUARK_UART_HAL_8BIT (QUARK_UART_LCR_8BIT)
++#define QUARK_UART_HAL_STB1 0
++#define QUARK_UART_HAL_STB2 (QUARK_UART_LCR_STB)
++
++#define QUARK_UART_HAL_CLR_TX_FIFO (QUARK_UART_FCR_TFR)
++#define QUARK_UART_HAL_CLR_RX_FIFO (QUARK_UART_FCR_RFR)
++#define QUARK_UART_HAL_CLR_ALL_FIFO (QUARK_UART_HAL_CLR_TX_FIFO | \
++ QUARK_UART_HAL_CLR_RX_FIFO)
++
++#define QUARK_UART_HAL_DMA_MODE0 0
++#define QUARK_UART_HAL_FIFO_DIS 0
++#define QUARK_UART_HAL_FIFO16 (QUARK_UART_FCR_FIFOE)
++#define QUARK_UART_HAL_FIFO256 (QUARK_UART_FCR_FIFOE | \
++ QUARK_UART_FCR_FIFO256)
++#define QUARK_UART_HAL_FIFO64 (QUARK_UART_HAL_FIFO256)
++#define QUARK_UART_HAL_TRIGGER1 (QUARK_UART_FCR_RFTL1)
++#define QUARK_UART_HAL_TRIGGER64 (QUARK_UART_FCR_RFTL64)
++#define QUARK_UART_HAL_TRIGGER128 (QUARK_UART_FCR_RFTL128)
++#define QUARK_UART_HAL_TRIGGER224 (QUARK_UART_FCR_RFTL224)
++#define QUARK_UART_HAL_TRIGGER16 (QUARK_UART_FCR_RFTL16)
++#define QUARK_UART_HAL_TRIGGER32 (QUARK_UART_FCR_RFTL32)
++#define QUARK_UART_HAL_TRIGGER56 (QUARK_UART_FCR_RFTL56)
++#define QUARK_UART_HAL_TRIGGER4 (QUARK_UART_FCR_RFTL4)
++#define QUARK_UART_HAL_TRIGGER8 (QUARK_UART_FCR_RFTL8)
++#define QUARK_UART_HAL_TRIGGER14 (QUARK_UART_FCR_RFTL14)
++#define QUARK_UART_HAL_TRIGGER_L (QUARK_UART_FCR_RFTL64)
++#define QUARK_UART_HAL_TRIGGER_M (QUARK_UART_FCR_RFTL128)
++#define QUARK_UART_HAL_TRIGGER_H (QUARK_UART_FCR_RFTL224)
++
++#define QUARK_UART_HAL_RX_INT (QUARK_UART_IER_ERBFI)
++#define QUARK_UART_HAL_TX_INT (QUARK_UART_IER_ETBEI)
++#define QUARK_UART_HAL_RX_ERR_INT (QUARK_UART_IER_ELSI)
++#define QUARK_UART_HAL_MS_INT (QUARK_UART_IER_EDSSI)
++#define QUARK_UART_HAL_ALL_INT (QUARK_UART_IER_MASK)
++
++#define QUARK_UART_HAL_DTR (QUARK_UART_MCR_DTR)
++#define QUARK_UART_HAL_RTS (QUARK_UART_MCR_RTS)
++#define QUARK_UART_HAL_OUT (QUARK_UART_MCR_OUT)
++#define QUARK_UART_HAL_LOOP (QUARK_UART_MCR_LOOP)
++#define QUARK_UART_HAL_AFE (QUARK_UART_MCR_AFE)
++
++#define PCI_VENDOR_ID_ROHM 0x10DB
++
++#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
-+static bool dma_enable = false;
-+static int intel_cln_uart_port_ct = 0;
-+module_param(dma_enable, bool, 0644);
-+MODULE_PARM_DESC(dma_enable, "Enable/disable DMA - default true");
++#define DEFAULT_UARTCLK 44236800 /* 2.76 MHz * 16 */
+
+/**
+ * struct inel_cln_uart_buffer
+ *
+ * Descriptor for a UART bufer
+ */
-+struct intel_cln_uart_buffer {
++struct quark_uart_buffer {
+ dma_addr_t dma_addr;
-+ unsigned char *buf_virt;
++ unsigned char *buf;
+ u32 offs;
+ int size;
+};
+
-+/**
-+ * struct intel_cln_uart
-+ *
-+ * Describes an individual UART
-+ */
-+struct intel_cln_uart {
-+ char name[10];
-+ int uartclk;
-+ int tx_dma_use;
-+ int start_tx;
-+ int start_rx;
-+ int dma_tx_nent;
-+ int tx_empty;
-+
-+ spinlock_t lock;
-+ struct dentry *debugfs;
-+ struct device *dev;
++struct x1000_port {
++ struct uart_port port;
++ int port_type;
++ void __iomem *membase;
++ resource_size_t mapbase;
++ struct pci_dev *pdev;
++ int fifo_size;
++ unsigned int uartclk;
++ int start_tx;
++ int start_rx;
++ int tx_empty;
++ int trigger;
++ int trigger_level;
++ unsigned int dmsr;
++ unsigned int fcr;
++ unsigned int mcr;
++ unsigned int use_dma;
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx;
-+ struct dma_chan *tx_chan;
-+ struct dma_chan *rx_chan;
++#if 1
++ struct dma_chan *chan_tx;
++ struct dma_chan *chan_rx;
+ struct middma_device mid_dma;
-+ struct intel_cln_uart_buffer txbuf;
-+ struct intel_cln_uart_buffer rxbuf;
++ struct quark_uart_buffer txbuf;
++ struct quark_uart_buffer rxbuf;
+ struct intel_mid_dma_slave dmas_rx;
+ struct intel_mid_dma_slave dmas_tx;
++#else
++ struct quark_dma_slave param_tx;
++ struct quark_dma_slave param_rx;
++ struct dma_chan *chan_tx;
++ struct dma_chan *chan_rx;
++#endif
+ struct scatterlist *sg_tx_p;
++ int nent;
+ struct scatterlist sg_rx;
-+ struct uart_port port;
-+
-+ unsigned char fcr;
-+ unsigned char ier;
-+ unsigned char lcr;
-+ unsigned char mcr;
-+
-+ unsigned long paddr;
-+ unsigned long iolen;
-+ unsigned long tx_trigger_level;
-+ unsigned long rx_trigger_level;
-+ u32 irq;
-+ u32 mode;
++ int tx_dma_use;
++ void *rx_buf_virt;
++ dma_addr_t rx_buf_dma;
++
++ struct dentry *debugfs;
++
++ /* protect the x1000_port private structure and io access to membase */
++ spinlock_t lock;
+};
+
+/**
++ * struct quark_uart_driver_data - private data structure for UART-DMA
++ * @port_type: The number of DMA channel
++ * @line_no: UART port line number (0, 1, 2...)
++ */
++struct quark_uart_driver_data {
++ int port_type;
++ int line_no;
++};
++
++#if 0
++static unsigned int mem_serial_in(struct uart_port *p, int offset)
++{
++ offset = offset << p->regshift;
++ return readb(p->membase + offset);
++}
++
++static void mem_serial_out(struct uart_port *p, int offset, int value)
++{
++ offset = offset << p->regshift;
++ writeb(value, p->membase + offset);
++}
++#endif
++
++/**
+ * serial_in
+ *
+ * @param up: pointer to uart descriptor
@@ -21447,9 +23997,11 @@ index 0000000..17454d0
+ *
+ * Reads a register @ offset
+ */
-+static inline unsigned int serial_in(struct intel_cln_uart *up, int offset)
++static inline unsigned int serial_in(struct x1000_port *up, int offset)
+{
-+ return (unsigned int)readb(up->port.membase + offset);
++ int soffset = offset << 2;
++
++ return (unsigned int)readb(up->membase + soffset);
+}
+
+/**
@@ -21460,97 +24012,72 @@ index 0000000..17454d0
+ *
+ * Writes a register @ offset
+ */
-+static inline void serial_out(struct intel_cln_uart *up, int offset, int value)
++static inline void serial_out(struct x1000_port *up, int offset, int value)
+{
+ unsigned char val = value & 0xff;
-+ writeb(val, up->port.membase + offset);
-+}
-+
-+/**
-+ * intel_cln_uart_handle_rx_to
-+ *
-+ * For FIFO RX timeout just read the data until nothing else to read
-+ */
-+static int intel_cln_uart_hal_read(struct intel_cln_uart *up, unsigned char *buf,
-+ int rx_size)
-+{
-+ int i;
-+ u8 rbr, lsr;
++ int soffset = offset << 2;
+
-+ lsr = serial_in(up, UART_LSR);
-+ for (i = 0, lsr = serial_in(up, UART_LSR);
-+ i < rx_size && lsr & UART_LSR_DR;
-+ lsr = serial_in(up, UART_LSR)) {
-+ rbr = serial_in(up, UART_RX);
-+ buf[i++] = rbr;
-+ }
-+ return i;
++ writeb(val, up->membase + soffset);
+}
+
-+/**
-+ * intel_cln_uart_hal_write
-+ *
-+ * For FIFO RX timeout just read the data until nothing else to read
-+ */
-+static void intel_cln_uart_hal_write(struct intel_cln_uart *up, unsigned char *buf,
-+ int tx_size)
-+{
-+ int i;
-+ unsigned int thr;
-+
-+ for (i = 0; i < tx_size;) {
-+ thr = buf[i++];
-+ serial_out(up, UART_TX, thr);
-+ }
-+}
++#ifdef CONFIG_SERIAL_QUARK_UART_CONSOLE
++static struct x1000_port *quark_uart_ports[QUARK_UART_NR];
++#endif
++static unsigned int default_baud = 115200;
++static const int trigger_level_256[4] = { 1, 64, 128, 224 };
++static const int trigger_level_64[4] = { 1, 16, 32, 56 };
++static const int trigger_level_16[4] = { 1, 4, 8, 14 };
++static const int trigger_level_1[4] = { 1, 1, 1, 1 };
+
+#ifdef CONFIG_DEBUG_FS
-+#define INTEL_CLN_UART_REGS_BUFSIZE 1024
+
-+/**
-+ * port_show_regs
-+ *
-+ * @param file: pointer to uart descriptor
-+ * @param user_buf: register offset
-+ * @param count:
-+ * @param ppos:
-+ *
-+ * Dump uart regs to string @ user_buf
-+ */
++#define QUARK_REGS_BUFSIZE 1024
++
++
+static ssize_t port_show_regs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
-+ struct intel_cln_uart *up = file->private_data;
++ struct x1000_port *priv = file->private_data;
+ char *buf;
+ u32 len = 0;
+ ssize_t ret;
++ unsigned char lcr;
+
-+ buf = kzalloc(INTEL_CLN_UART_REGS_BUFSIZE, GFP_KERNEL);
++ buf = kzalloc(QUARK_REGS_BUFSIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
-+ len += snprintf(buf + len, INTEL_CLN_UART_REGS_BUFSIZE - len,
-+ "INTEL_CLN_UART port regs:\n");
++ len += snprintf(buf + len, QUARK_REGS_BUFSIZE - len,
++ "QUARK X1000 port[%d] regs:\n", priv->port.line);
+
-+ len += snprintf(buf + len, INTEL_CLN_UART_REGS_BUFSIZE - len,
++ len += snprintf(buf + len, QUARK_REGS_BUFSIZE - len,
+ "=================================\n");
-+ len += snprintf(buf + len, INTEL_CLN_UART_REGS_BUFSIZE - len,
-+ "IER: \t\t0x%08x\n", serial_in(up, UART_IER));
-+ len += snprintf(buf + len, INTEL_CLN_UART_REGS_BUFSIZE - len,
-+ "IIR: \t\t0x%08x\n", serial_in(up, UART_IIR));
-+ len += snprintf(buf + len, INTEL_CLN_UART_REGS_BUFSIZE - len,
-+ "LCR: \t\t0x%08x\n", serial_in(up, UART_LCR));
-+ len += snprintf(buf + len, INTEL_CLN_UART_REGS_BUFSIZE - len,
-+ "MCR: \t\t0x%08x\n", serial_in(up, UART_MCR));
-+ len += snprintf(buf + len, INTEL_CLN_UART_REGS_BUFSIZE - len,
-+ "LSR: \t\t0x%08x\n", serial_in(up, UART_LSR));
-+ len += snprintf(buf + len, INTEL_CLN_UART_REGS_BUFSIZE - len,
-+ "MSR: \t\t0x%08x\n", serial_in(up, UART_MSR));
-+ len += snprintf(buf + len, INTEL_CLN_UART_REGS_BUFSIZE - len,
-+ "FCR: \t\t0x%08x\n", serial_in(up, UART_FCR));
-+
-+ if (len > INTEL_CLN_UART_REGS_BUFSIZE)
-+ len = INTEL_CLN_UART_REGS_BUFSIZE;
++ len += snprintf(buf + len, QUARK_REGS_BUFSIZE - len,
++ "IER: \t0x%02x\n", serial_in(priv, UART_IER));
++ len += snprintf(buf + len, QUARK_REGS_BUFSIZE - len,
++ "IIR: \t0x%02x\n", serial_in(priv, UART_IIR));
++ len += snprintf(buf + len, QUARK_REGS_BUFSIZE - len,
++ "LCR: \t0x%02x\n", serial_in(priv, UART_LCR));
++ len += snprintf(buf + len, QUARK_REGS_BUFSIZE - len,
++ "MCR: \t0x%02x\n", serial_in(priv, UART_MCR));
++ len += snprintf(buf + len, QUARK_REGS_BUFSIZE - len,
++ "LSR: \t0x%02x\n", serial_in(priv, UART_LSR));
++ len += snprintf(buf + len, QUARK_REGS_BUFSIZE - len,
++ "MSR: \t0x%02x\n", serial_in(priv, UART_MSR));
++ len += snprintf(buf + len, QUARK_REGS_BUFSIZE - len,
++ "BRCSR: \t0x%02x\n",
++ serial_in(priv, QUARK_UART_BRCSR));
++
++ lcr = serial_in(priv, UART_LCR);
++ serial_out(priv, UART_LCR, QUARK_UART_LCR_DLAB);
++ len += snprintf(buf + len, QUARK_REGS_BUFSIZE - len,
++ "DLL: \t0x%02x\n", serial_in(priv, UART_DLL));
++ len += snprintf(buf + len, QUARK_REGS_BUFSIZE - len,
++ "DLM: \t0x%02x\n", serial_in(priv, UART_DLM));
++ serial_out(priv, UART_LCR, lcr);
++
++ if (len > QUARK_REGS_BUFSIZE)
++ len = QUARK_REGS_BUFSIZE;
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
@@ -21563,100 +24090,423 @@ index 0000000..17454d0
+ .read = port_show_regs,
+ .llseek = default_llseek,
+};
++#endif /* CONFIG_DEBUG_FS */
+
-+/**
-+ * intel_cln_uart_debugfs_init
-+ *
-+ * @param up: pointer to uart descriptor
-+ *
-+ * Create a debug FS entry for the UART and associated register entries
-+ */
-+static int intel_cln_uart_debugfs_init(struct intel_cln_uart *up)
++/* Return UART clock, checking for board specific clocks. */
++static unsigned int quark_uart_get_uartclk(void)
+{
-+ up->debugfs = debugfs_create_dir("intel_cln_uart", NULL);
-+ if (!up->debugfs)
-+ return -ENOMEM;
++ return DEFAULT_UARTCLK;
++}
++
++static void quark_uart_hal_enable_interrupt(struct x1000_port *priv,
++ unsigned int flag)
++{
++ u8 ier = serial_in(priv, UART_IER);
++#ifdef __QRK_DMA_DEBUG
++// pr_info("%s read IER %x\n", __func__, ier);
++#endif
++ ier |= flag & QUARK_UART_IER_MASK;
++ serial_out(priv, UART_IER, ier);
++#ifdef __QRK_DMA_DEBUG
++// pr_info("%s wrote IER %x\n", __func__, ier);
++#endif
++}
++
++static void quark_uart_hal_disable_interrupt(struct x1000_port *priv,
++ unsigned int flag)
++{
++#ifdef __QRK_DMA_DEBUG
++// pr_info("%s entry\n", __func__);
++#endif
++ u8 ier = serial_in(priv, UART_IER);
++ ier &= ~(flag & QUARK_UART_IER_MASK);
++ serial_out(priv, UART_IER, ier);
++}
++
++static int quark_uart_hal_set_line(struct x1000_port *priv, unsigned int baud,
++ unsigned int parity, unsigned int bits,
++ unsigned int stb)
++{
++ unsigned int dll, dlm, lcr;
++ int div;
++
++ div = DIV_ROUND_CLOSEST(priv->uartclk / 16, baud);
++ if (div < 0 || USHRT_MAX <= div) {
++ dev_err(priv->port.dev, "Invalid Baud(div=0x%x)\n", div);
++ return -EINVAL;
++ }
++
++ dll = (unsigned int)div & 0x00FFU;
++ dlm = ((unsigned int)div >> 8) & 0x00FFU;
++
++ if (parity & ~(QUARK_UART_LCR_PEN | QUARK_UART_LCR_EPS | QUARK_UART_LCR_SP)) {
++ dev_err(priv->port.dev, "Invalid parity(0x%x)\n", parity);
++ return -EINVAL;
++ }
++
++ if (bits & ~QUARK_UART_LCR_WLS) {
++ dev_err(priv->port.dev, "Invalid bits(0x%x)\n", bits);
++ return -EINVAL;
++ }
++
++ if (stb & ~QUARK_UART_LCR_STB) {
++ dev_err(priv->port.dev, "Invalid STB(0x%x)\n", stb);
++ return -EINVAL;
++ }
++
++ lcr = parity;
++ lcr |= bits;
++ lcr |= stb;
++
++#ifdef __QRK_DMA_DEBUG
++ /* TODO: change this back to dev_dbg - BOD */
++ dev_info(priv->port.dev, "%s:baud = %u, div = %04x, lcr = %02x (%lu)\n",
++ __func__, baud, div, lcr, jiffies);
++#endif
++ serial_out(priv, UART_LCR, QUARK_UART_LCR_DLAB);
++ serial_out(priv, QUARK_UART_DLL, dll);
++ serial_out(priv, QUARK_UART_DLM, dlm);
++ serial_out(priv, UART_LCR, lcr);
+
-+ debugfs_create_file(up->name, S_IFREG | S_IRUGO,
-+ up->debugfs, (void *)up, &port_regs_ops);
+ return 0;
+}
+
-+/**
-+ * intel_cln_uart_debugfs_remove
-+ *
-+ * @param up: pointer to uart descriptor
-+ *
-+ * Remove recursive debug FS entries for the UART
-+ */
-+static void intel_cln_uart_debugfs_remove(struct intel_cln_uart *intel_cln_uart)
++static int quark_uart_hal_fifo_reset(struct x1000_port *priv,
++ unsigned int flag)
+{
-+ if (intel_cln_uart->debugfs)
-+ debugfs_remove_recursive(intel_cln_uart->debugfs);
++ if (flag & ~(QUARK_UART_FCR_TFR | QUARK_UART_FCR_RFR)) {
++ dev_err(priv->port.dev, "%s:Invalid flag(0x%x)\n",
++ __func__, flag);
++ return -EINVAL;
++ }
++
++ serial_out(priv, UART_FCR, QUARK_UART_FCR_FIFOE | priv->fcr);
++ serial_out(priv,
++ UART_FCR, QUARK_UART_FCR_FIFOE | priv->fcr | flag);
++ serial_out(priv, UART_FCR, priv->fcr);
++
++ return 0;
+}
+
++static int quark_uart_hal_set_fifo(struct x1000_port *priv,
++ unsigned int dmamode,
++ unsigned int fifo_size, unsigned int trigger)
++{
++ u8 fcr;
++
++ if (dmamode & ~QUARK_UART_FCR_DMS) {
++ dev_err(priv->port.dev, "%s:Invalid DMA Mode(0x%x)\n",
++ __func__, dmamode);
++ return -EINVAL;
++ }
++
++ if (fifo_size & ~(QUARK_UART_FCR_FIFOE | QUARK_UART_FCR_FIFO256)) {
++ dev_err(priv->port.dev, "%s:Invalid FIFO SIZE(0x%x)\n",
++ __func__, fifo_size);
++ return -EINVAL;
++ }
++
++ if (trigger & ~QUARK_UART_FCR_RFTL) {
++ dev_err(priv->port.dev, "%s:Invalid TRIGGER(0x%x)\n",
++ __func__, trigger);
++ return -EINVAL;
++ }
++
++ switch (priv->fifo_size) {
++ case 256:
++ priv->trigger_level =
++ trigger_level_256[trigger >> QUARK_UART_FCR_RFTL_SHIFT];
++ break;
++ case 64:
++ priv->trigger_level =
++ trigger_level_64[trigger >> QUARK_UART_FCR_RFTL_SHIFT];
++ break;
++ case 16:
++ priv->trigger_level =
++ trigger_level_16[trigger >> QUARK_UART_FCR_RFTL_SHIFT];
++ break;
++ default:
++ priv->trigger_level =
++ trigger_level_1[trigger >> QUARK_UART_FCR_RFTL_SHIFT];
++ break;
++ }
++#if 0
++ fcr =
++ dmamode | fifo_size | trigger | QUARK_UART_FCR_RFR | QUARK_UART_FCR_TFR;
+#else
-+static inline int intel_cln_uart_debugfs_init(struct intel_cln_uart *intel_cln_uart)
++ fcr =
++ fifo_size | trigger | QUARK_UART_FCR_RFR | QUARK_UART_FCR_TFR;
++
++#endif
++ serial_out(priv, UART_FCR, QUARK_UART_FCR_FIFOE);
++ serial_out(priv,
++ UART_FCR, QUARK_UART_FCR_FIFOE | QUARK_UART_FCR_RFR | QUARK_UART_FCR_TFR);
++ serial_out(priv, UART_FCR, fcr);
++ priv->fcr = fcr;
++#ifdef __QRK_DMA_DEBUG
++ pr_info("%s FCR set to %x\n", __func__, priv->fcr);
++#endif
++ return 0;
++}
++
++static u8 quark_uart_hal_get_modem(struct x1000_port *priv)
+{
++ unsigned int msr = serial_in(priv, UART_MSR);
++ priv->dmsr = msr & QUARK_UART_MSR_DELTA;
++ return (u8)msr;
++}
++
++static void quark_uart_hal_write(struct x1000_port *priv,
++ const unsigned char *buf, int tx_size)
++{
++ int i;
++ unsigned int thr;
++
++ for (i = 0; i < tx_size;) {
++ thr = buf[i++];
++ serial_out(priv, QUARK_UART_THR, thr);
++ }
++}
++
++static int quark_uart_hal_read(struct x1000_port *priv, unsigned char *buf,
++ int rx_size)
++{
++ int i;
++ u8 rbr, lsr;
++ struct uart_port *port = &priv->port;
++
++ lsr = serial_in(priv, UART_LSR);
++ for (i = 0, lsr = serial_in(priv, UART_LSR);
++ i < rx_size && lsr & (UART_LSR_DR | UART_LSR_BI);
++ lsr = serial_in(priv, UART_LSR)) {
++ rbr = serial_in(priv, QUARK_UART_RBR);
++
++ if (lsr & UART_LSR_BI) {
++ port->icount.brk++;
++ if (uart_handle_break(port))
++ continue;
++ }
++#ifdef SUPPORT_SYSRQ
++ if (port->sysrq) {
++ if (uart_handle_sysrq_char(port, rbr))
++ continue;
++ }
++#endif
++
++ buf[i++] = rbr;
++ }
++ return i;
++}
++
++static unsigned char quark_uart_hal_get_iid(struct x1000_port *priv)
++{
++ return serial_in(priv, UART_IIR) &\
++ (QUARK_UART_IIR_IID | QUARK_UART_IIR_TOI | QUARK_UART_IIR_IP);
++}
++
++static u8 quark_uart_hal_get_line_status(struct x1000_port *priv)
++{
++ return serial_in(priv, UART_LSR);
++}
++
++static void quark_uart_hal_set_break(struct x1000_port *priv, int on)
++{
++ unsigned int lcr;
++
++ lcr = serial_in(priv, UART_LCR);
++ if (on)
++ lcr |= QUARK_UART_LCR_SB;
++ else
++ lcr &= ~QUARK_UART_LCR_SB;
++
++ serial_out(priv, UART_LCR, lcr);
++}
++
++static int push_rx(struct x1000_port *priv, const unsigned char *buf,
++ int size)
++{
++ struct uart_port *port = &priv->port;
++ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
++
++ tty_insert_flip_string(tty, buf, size);
++ tty_flip_buffer_push(tty);
++
+ return 0;
+}
+
-+static inline void intel_cln_uart_debugfs_remove(struct intel_cln_uart *intel_cln_uart)
++static int pop_tx_x(struct x1000_port *priv, unsigned char *buf)
+{
++ int ret = 0;
++ struct uart_port *port = &priv->port;
++
++ if (port->x_char) {
++ dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n",
++ __func__, port->x_char, jiffies);
++ buf[0] = port->x_char;
++ port->x_char = 0;
++ ret = 1;
++ }
++
++ return ret;
+}
-+#endif /* CONFIG_DEBUG_FS */
+
-+/**
-+ * intel_cln_uart_enable_ms
-+ *
-+ * @param up: pointer to uart port structure
-+ *
-+ * Enable the modem status interrupt
-+ */
-+static void intel_cln_uart_enable_ms(struct uart_port *port)
++static int dma_push_rx(struct x1000_port *priv, int size)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
++ int room;
++ struct uart_port *port = &priv->port;
++ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
++
++ room = tty_buffer_request_room(tty, size);
+
-+ up->ier |= UART_IER_MSI;
-+ serial_out(up, UART_IER, up->ier);
++ if (room < size)
++ dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
++ size - room);
++ if (!room)
++ return 0;
++
++ tty_insert_flip_string(tty, sg_virt(&priv->sg_rx), size);
++
++ port->icount.rx += room;
++
++ return room;
+}
+
-+/**
-+ * intel_cln_uart_dma_tx_complete
-+ *
-+ * @param arg: Pointer to intel_cln_uart
-+ *
-+ * TX DMA completion callback
-+ */
-+static void intel_cln_uart_dma_tx_complete(void *arg)
++static void quark_free_dma(struct uart_port *port)
+{
-+ struct intel_cln_uart *up = arg;
-+ struct uart_port *port = &up->port;
++ struct x1000_port *priv;
++ priv = container_of(port, struct x1000_port, port);
++
++ if (priv->chan_tx) {
++ dma_release_channel(priv->chan_tx);
++ priv->chan_tx = NULL;
++ }
++ if (priv->chan_rx) {
++ dma_release_channel(priv->chan_rx);
++ priv->chan_rx = NULL;
++ }
++
++ if (priv->rx_buf_dma) {
++ dma_free_coherent(port->dev, port->fifosize, priv->rx_buf_virt,
++ priv->rx_buf_dma);
++ priv->rx_buf_virt = NULL;
++ priv->rx_buf_dma = 0;
++ }
++
++ return;
++}
++
++static bool filter(struct dma_chan *chan, void *slave)
++{
++ #if 0
++ struct quark_dma_slave *param = slave;
++
++ if ((chan->chan_id == param->chan_id) && (param->dma_dev ==
++ chan->device->dev)) {
++ chan->private = param;
++ return true;
++ } else {
++ return false;
++ }
++ #else
++ return true;
++ #endif
++}
++
++static void quark_request_dma(struct uart_port *port)
++{
++ dma_cap_mask_t mask;
++ struct dma_chan *chan;
++ struct pci_dev *dma_dev;
++#if 0
++ struct quark_dma_slave *param;
++#endif
++ struct x1000_port *priv =
++ container_of(port, struct x1000_port, port);
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ dma_dev = pci_get_bus_and_slot(priv->pdev->bus->number,
++ PCI_DEVFN(0xa, 0)); /* Get DMA's dev
++ information */
++ /* Set Tx DMA */
++#if 0
++ param = &priv->param_tx;
++ param->dma_dev = &dma_dev->dev;
++ param->chan_id = priv->port.line * 2; /* Tx = 0, 2, 4, ... */
++
++ param->tx_reg = port->mapbase + UART_TX;
++#endif
++ chan = dma_request_channel(mask, filter, &priv->dmas_tx);
++ if (!chan) {
++ dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n",
++ __func__);
++ return;
++ }
++ priv->chan_tx = chan;
++#if 0
++ /* Set Rx DMA */
++ param = &priv->param_rx;
++ param->dma_dev = &dma_dev->dev;
++ param->chan_id = priv->port.line * 2 + 1; /* Rx = Tx + 1 */
++
++ param->rx_reg = port->mapbase + UART_RX;
++#endif
++ chan = dma_request_channel(mask, filter, &priv->dmas_rx);
++ if (!chan) {
++ dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n",
++ __func__);
++ dma_release_channel(priv->chan_tx);
++ priv->chan_tx = NULL;
++ return;
++ }
++
++ /* Get Consistent memory for DMA */
++ priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize,
++ &priv->rx_buf_dma, GFP_KERNEL);
++ priv->chan_rx = chan;
++}
++
++static void quark_dma_rx_complete(void *arg)
++{
++ struct x1000_port *priv = arg;
++ struct uart_port *port = &priv->port;
++ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
++ int count;
++
++ dma_sync_sg_for_cpu(port->dev, &priv->sg_rx, 1, DMA_FROM_DEVICE);
++ count = dma_push_rx(priv, priv->trigger_level);
++ if (count)
++ tty_flip_buffer_push(tty);
++ async_tx_ack(priv->desc_rx);
++ quark_uart_hal_enable_interrupt(priv, QUARK_UART_HAL_RX_INT |
++ QUARK_UART_HAL_RX_ERR_INT);
++}
++
++static void quark_dma_tx_complete(void *arg)
++{
++ struct x1000_port *priv = arg;
++ struct uart_port *port = &priv->port;
+ struct circ_buf *xmit = &port->state->xmit;
-+ struct scatterlist *sg = up->sg_tx_p;
++ struct scatterlist *sg = priv->sg_tx_p;
+ int i;
+
-+ for (i = 0; i < up->dma_tx_nent; i++, sg++) {
++ for (i = 0; i < priv->nent; i++, sg++) {
+ xmit->tail += sg_dma_len(sg);
+ port->icount.tx += sg_dma_len(sg);
+ }
+ xmit->tail &= UART_XMIT_SIZE - 1;
-+ async_tx_ack(up->desc_tx);
-+ dma_unmap_sg(port->dev, sg, up->dma_tx_nent, DMA_TO_DEVICE);
-+ up->tx_dma_use = 0;
-+ up->dma_tx_nent = 0;
-+ kfree(up->sg_tx_p);
-+
-+ /* TODO: move to function */
-+ up->ier |= UART_IER_THRI;
-+ serial_out(up, UART_IER, up->ier);
++ async_tx_ack(priv->desc_tx);
++ dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE);
++ priv->tx_dma_use = 0;
++ priv->nent = 0;
++ kfree(priv->sg_tx_p);
++ quark_uart_hal_enable_interrupt(priv, QUARK_UART_HAL_TX_INT);
+}
+
-+static int pop_tx(struct intel_cln_uart *up, int size)
++static int pop_tx(struct x1000_port *priv, int size)
+{
+ int count = 0;
-+ struct uart_port *port = &up->port;
++ struct uart_port *port = &priv->port;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (uart_tx_stopped(port) || uart_circ_empty(xmit) || count >= size)
@@ -21666,69 +24516,132 @@ index 0000000..17454d0
+ int cnt_to_end =
+ CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ int sz = min(size - count, cnt_to_end);
-+ intel_cln_uart_hal_write(up, &xmit->buf[xmit->tail], sz);
++ quark_uart_hal_write(priv, &xmit->buf[xmit->tail], sz);
+ xmit->tail = (xmit->tail + sz) & (UART_XMIT_SIZE - 1);
+ count += sz;
+ } while (!uart_circ_empty(xmit) && count < size);
+
+pop_tx_end:
-+ dev_dbg(up->port.dev, "%d characters. Remained %d characters.(%lu)\n",
++ dev_dbg(priv->port.dev, "%d characters. Remained %d characters.(%lu)\n",
+ count, size - count, jiffies);
+
+ return count;
+}
+
-+static int pop_tx_x(struct intel_cln_uart *up, unsigned char *buf)
++static int handle_rx_to(struct x1000_port *priv)
+{
-+ int ret = 0;
-+ struct uart_port *port = &up->port;
-+
-+ if (port->x_char) {
-+ dev_dbg(up->port.dev, "%s:X character send %02x (%lu)\n",
-+ __func__, port->x_char, jiffies);
-+ buf[0] = port->x_char;
-+ port->x_char = 0;
-+ ret = 1;
++ struct quark_uart_buffer *buf;
++ int rx_size;
++ int ret;
++ if (!priv->start_rx) {
++ quark_uart_hal_disable_interrupt(priv, QUARK_UART_HAL_RX_INT |
++ QUARK_UART_HAL_RX_ERR_INT);
++ return 0;
+ }
++ buf = &priv->rxbuf;
++ do {
++ rx_size = quark_uart_hal_read(priv, buf->buf, buf->size);
++ ret = push_rx(priv, buf->buf, rx_size);
++ if (ret)
++ return 0;
++ } while (rx_size == buf->size);
+
-+ return ret;
++ return QUARK_UART_HANDLED_RX_INT;
+}
+
-+static int push_rx(struct intel_cln_uart *up, const unsigned char *buf,
-+ int size)
++static int handle_rx(struct x1000_port *priv)
+{
-+ struct uart_port *port;
-+ struct tty_struct *tty;
++ return handle_rx_to(priv);
++}
+
-+ port = &up->port;
-+ tty = tty_port_tty_get(&port->state->port);
-+ if (!tty) {
-+ dev_dbg(up->port.dev, "%s:tty is busy now", __func__);
-+ return -EBUSY;
++static int dma_handle_rx(struct x1000_port *priv)
++{
++ struct uart_port *port = &priv->port;
++ struct dma_async_tx_descriptor *desc;
++ struct scatterlist *sg;
++
++ priv = container_of(port, struct x1000_port, port);
++ sg = &priv->sg_rx;
++
++ sg_init_table(&priv->sg_rx, 1); /* Initialize SG table */
++
++ sg_dma_len(sg) = priv->trigger_level;
++
++ sg_set_page(&priv->sg_rx, virt_to_page(priv->rx_buf_virt),
++ sg_dma_len(sg), (unsigned long)priv->rx_buf_virt &
++ ~PAGE_MASK);
++
++ sg_dma_address(sg) = priv->rx_buf_dma;
++
++ desc = dmaengine_prep_slave_sg(priv->chan_rx,
++ sg, 1, DMA_DEV_TO_MEM,
++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++
++ if (!desc)
++ return 0;
++
++ priv->desc_rx = desc;
++ desc->callback = quark_dma_rx_complete;
++ desc->callback_param = priv;
++ desc->tx_submit(desc);
++ dma_async_issue_pending(priv->chan_rx);
++
++ return QUARK_UART_HANDLED_RX_INT;
++}
++
++static unsigned int handle_tx(struct x1000_port *priv)
++{
++ struct uart_port *port = &priv->port;
++ struct circ_buf *xmit = &port->state->xmit;
++ int fifo_size;
++ int tx_size;
++ int size;
++ int tx_empty;
++
++ if (!priv->start_tx) {
++ dev_info(priv->port.dev, "%s:Tx isn't started. (%lu)\n",
++ __func__, jiffies);
++ quark_uart_hal_disable_interrupt(priv, QUARK_UART_HAL_TX_INT);
++ priv->tx_empty = 1;
++ return 0;
+ }
+
-+ tty_insert_flip_string(tty, buf, size);
-+ tty_flip_buffer_push(tty);
-+ tty_kref_put(tty);
++ fifo_size = max(priv->fifo_size, 1);
++ tx_empty = 1;
++ if (pop_tx_x(priv, xmit->buf)) {
++ quark_uart_hal_write(priv, xmit->buf, 1);
++ port->icount.tx++;
++ tx_empty = 0;
++ fifo_size--;
++ }
++ size = min(xmit->head - xmit->tail, fifo_size);
++ if (size < 0)
++ size = fifo_size;
+
-+ return 0;
++ tx_size = pop_tx(priv, size);
++ if (tx_size > 0) {
++ port->icount.tx += tx_size;
++ tx_empty = 0;
++ }
++
++ priv->tx_empty = tx_empty;
++
++ if (tx_empty) {
++ quark_uart_hal_disable_interrupt(priv, QUARK_UART_HAL_TX_INT);
++ uart_write_wakeup(port);
++ }
++
++ return QUARK_UART_HANDLED_TX_INT;
+}
+
-+/**
-+ * intel_cln_uart_dma_tx
-+ *
-+ * @param arg: Pointer to intel_cln_uart
-+ *
-+ * Initiate a TX DMA transaction
-+ */
-+void intel_cln_uart_dma_tx(struct intel_cln_uart *up)
++static unsigned int dma_handle_tx(struct x1000_port *priv)
+{
-+ struct uart_port *port = &up->port;
++ struct uart_port *port = &priv->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct scatterlist *sg;
+ int nent;
+ int fifo_size;
-+ //int tx_empty;
++ int tx_empty;
+ struct dma_async_tx_descriptor *desc;
+ int num;
+ int i;
@@ -21736,34 +24649,28 @@ index 0000000..17454d0
+ int size;
+ int rem;
+
-+ if (!up->start_tx) {
-+ dev_info(up->port.dev, "%s:Tx isn't started. (%lu)\n",
++ if (!priv->start_tx) {
++ dev_info(priv->port.dev, "%s:Tx isn't started. (%lu)\n",
+ __func__, jiffies);
-+
-+ /* TODO: move to function */
-+ up->ier &= ~UART_IER_THRI;
-+ serial_out(up, UART_IER, up->ier);
-+
-+ up->tx_empty = 1;
-+ return;
++ quark_uart_hal_disable_interrupt(priv, QUARK_UART_HAL_TX_INT);
++ priv->tx_empty = 1;
++ return 0;
+ }
+
-+ if (up->tx_dma_use) {
-+ dev_dbg(up->port.dev, "%s:Tx is not completed. (%lu)\n",
++ if (priv->tx_dma_use) {
++ dev_dbg(priv->port.dev, "%s:Tx is not completed. (%lu)\n",
+ __func__, jiffies);
-+
-+ /* TODO: move to function */
-+ up->ier &= ~UART_IER_THRI;
-+ serial_out(up, UART_IER, up->ier);
-+
-+ up->tx_empty = 1;
-+ return;
++ quark_uart_hal_disable_interrupt(priv, QUARK_UART_HAL_TX_INT);
++ priv->tx_empty = 1;
++ return 0;
+ }
+
-+ fifo_size = max((int)port->fifosize, 1);
-+ if (pop_tx_x(up, xmit->buf)) {
-+ intel_cln_uart_hal_write(up, xmit->buf, 1);
++ fifo_size = max(priv->fifo_size, 1);
++ tx_empty = 1;
++ if (pop_tx_x(priv, xmit->buf)) {
++ quark_uart_hal_write(priv, xmit->buf, 1);
+ port->icount.tx++;
++ tx_empty = 0;
+ fifo_size--;
+ }
+
@@ -21771,14 +24678,10 @@ index 0000000..17454d0
+ UART_XMIT_SIZE), CIRC_CNT_TO_END(xmit->head,
+ xmit->tail, UART_XMIT_SIZE));
+ if (!bytes) {
-+ dev_dbg(up->port.dev, "%s 0 bytes return\n", __func__);
-+
-+ /* TODO: move to function */
-+ up->ier &= ~UART_IER_THRI;
-+ serial_out(up, UART_IER, up->ier);
-+
++ dev_dbg(priv->port.dev, "%s 0 bytes return\n", __func__);
++ quark_uart_hal_disable_interrupt(priv, QUARK_UART_HAL_TX_INT);
+ uart_write_wakeup(port);
-+ return;
++ return 0;
+ }
+
+ if (bytes > fifo_size) {
@@ -21791,15 +24694,19 @@ index 0000000..17454d0
+ rem = bytes;
+ }
+
-+ dev_dbg(up->port.dev, "%s num=%d size=%d rem=%d\n",
++ dev_dbg(priv->port.dev, "%s num=%d size=%d rem=%d\n",
+ __func__, num, size, rem);
+
-+ up->tx_dma_use = 1;
++ priv->tx_dma_use = 1;
+
-+ up->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
++ priv->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
++ if (!priv->sg_tx_p) {
++ dev_err(priv->port.dev, "%s:kzalloc Failed\n", __func__);
++ return 0;
++ }
+
-+ sg_init_table(up->sg_tx_p, num); /* Initialize SG table */
-+ sg = up->sg_tx_p;
++ sg_init_table(priv->sg_tx_p, num); /* Initialize SG table */
++ sg = priv->sg_tx_p;
+
+ for (i = 0; i < num; i++, sg++) {
+ if (i == (num - 1))
@@ -21810,13 +24717,13 @@ index 0000000..17454d0
+ size, fifo_size * i);
+ }
+
-+ sg = up->sg_tx_p;
++ sg = priv->sg_tx_p;
+ nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE);
+ if (!nent) {
-+ dev_err(up->port.dev, "%s:dma_map_sg Failed\n", __func__);
-+ return;
++ dev_err(priv->port.dev, "%s:dma_map_sg Failed\n", __func__);
++ return 0;
+ }
-+ up->dma_tx_nent = nent;
++ priv->nent = nent;
+
+ for (i = 0; i < nent; i++, sg++) {
+ sg->offset = (xmit->tail & (UART_XMIT_SIZE - 1)) +
@@ -21829,1236 +24736,980 @@ index 0000000..17454d0
+ sg_dma_len(sg) = size;
+ }
+
-+ desc = dmaengine_prep_slave_sg(up->tx_chan,
-+ up->sg_tx_p, nent, DMA_MEM_TO_DEV,
++ desc = dmaengine_prep_slave_sg(priv->chan_tx,
++ priv->sg_tx_p, nent, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
-+ dev_err(up->port.dev, "%s:device_prep_slave_sg Failed\n",
++ dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n",
+ __func__);
-+ return;
++ return 0;
+ }
-+ dma_sync_sg_for_device(port->dev, up->sg_tx_p, nent, DMA_TO_DEVICE);
-+ up->desc_tx = desc;
-+ desc->callback = intel_cln_uart_dma_tx_complete;
-+ desc->callback_param = up;
++ dma_sync_sg_for_device(port->dev, priv->sg_tx_p, nent, DMA_TO_DEVICE);
++ priv->desc_tx = desc;
++ desc->callback = quark_dma_tx_complete;
++ desc->callback_param = priv;
+
+ desc->tx_submit(desc);
+
-+ dma_async_issue_pending(up->tx_chan);
-+ up->tx_empty = 0;
++ dma_async_issue_pending(priv->chan_tx);
+
-+ return;
++ return QUARK_UART_HANDLED_TX_INT;
+}
+
-+/**
-+ * intel_cln_uart_start_tx
-+ *
-+ * @param arg: Pointer to intel_cln_uart
-+ *
-+ * Enable TX interrupts on the UART @ port
-+ */
-+static void intel_cln_uart_start_tx(struct uart_port *port)
-+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+
-+ up->start_tx = 1;
-+ up->ier |= UART_IER_THRI;
-+ serial_out(up, UART_IER, up->ier);
-+}
-+
-+/**
-+ * intel_cln_uart_stop_tx
-+ *
-+ * @param arg: Pointer to intel_cln_uart
-+ *
-+ * Disable TX interrupts on the UART @ port
-+ */
-+static void intel_cln_uart_stop_tx(struct uart_port *port)
-+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+
-+ up->start_tx = 0;
-+ up->tx_dma_use = 0;
-+ up->ier &= ~UART_IER_THRI;
-+ serial_out(up, UART_IER, up->ier);
-+}
-+
-+/**
-+ * intel_cln_uart_tx
-+ *
-+ * @up: pointer to UART instance
-+ *
-+ * Transmit characters in non-DMA mode
-+ */
-+static void intel_cln_uart_tx(struct intel_cln_uart *up)
++static void quark_uart_err_ir(struct x1000_port *priv, unsigned int lsr)
+{
-+ struct uart_port *port = &up->port;
-+ struct circ_buf *xmit = &port->state->xmit;
-+ int fifo_size;
-+ int tx_size;
-+ int size;
-+ int tx_empty;
-+
-+ if (!up->start_tx) {
-+ dev_info(up->port.dev, "%s:Tx isn't started. (%lu)\n",
-+ __func__, jiffies);
-+
-+ /* TODO: move to function */
-+ up->ier |= UART_IER_THRI;
-+ serial_out(up, UART_IER, up->ier);
-+
-+ up->tx_empty = 1;
-+ return;
-+ }
-+
-+ fifo_size = max((int)port->fifosize, 1);
-+ tx_empty = 1;
-+ if (pop_tx_x(up, xmit->buf)) {
-+ intel_cln_uart_hal_write(up, xmit->buf, 1);
-+ port->icount.tx++;
-+ tx_empty = 0;
-+ fifo_size--;
-+ }
-+ size = min(xmit->head - xmit->tail, fifo_size);
-+ if (size < 0)
-+ size = fifo_size;
-+
-+ tx_size = pop_tx(up, size);
-+ if (tx_size > 0) {
-+ port->icount.tx += tx_size;
-+ tx_empty = 0;
-+ }
-+
-+ up->tx_empty = tx_empty;
++ struct uart_port *port = &priv->port;
++ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
++ char *error_msg[5] = {};
++ int i = 0;
+
-+ if (tx_empty) {
-+ /* TODO: move to function */
-+ up->ier |= UART_IER_THRI;
-+ serial_out(up, UART_IER, up->ier);
++ if (lsr & QUARK_UART_LSR_ERR)
++ error_msg[i++] = "Error data in FIFO\n";
+
-+ uart_write_wakeup(port);
++ if (lsr & UART_LSR_FE) {
++ port->icount.frame++;
++ error_msg[i++] = " Framing Error\n";
+ }
+
-+ return;
-+}
-+
-+/**
-+ * intel_cln_uart_stop_rx
-+ *
-+ * Stop RX on the given UART
-+ */
-+static void intel_cln_uart_stop_rx(struct uart_port *port)
-+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+
-+ up->start_rx = 0;
-+ up->ier &= ~UART_IER_RLSI;
-+ up->port.read_status_mask &= ~UART_LSR_DR;
-+ serial_out(up, UART_IER, up->ier);
-+}
-+
-+/**
-+ * intel_cln_uart_handle_rx_to
-+ *
-+ * For FIFO RX timeout just read the data until nothing else to read
-+ */
-+static int intel_cln_uart_rx_to(struct intel_cln_uart *up)
-+{
-+ struct intel_cln_uart_buffer *buf;
-+ int rx_size;
-+ int ret;
-+
-+ if (!up->start_rx) {
-+ up->ier &= ~UART_IER_RLSI;
-+ up->port.read_status_mask &= ~UART_LSR_DR;
-+ serial_out(up, UART_IER, up->ier);
-+ return 0;
++ if (lsr & UART_LSR_PE) {
++ port->icount.parity++;
++ error_msg[i++] = " Parity Error\n";
+ }
+
-+ buf = &up->rxbuf;
-+ do {
-+ rx_size = intel_cln_uart_hal_read(up, buf->buf_virt, buf->size);
-+ ret = push_rx(up, buf->buf_virt, rx_size);
-+ if (ret)
-+ return 0;
-+ } while (rx_size == buf->size);
-+
-+ return 0;
-+}
-+
-+/**
-+ * intel_cln_uart_dma_push_rx
-+ *
-+ * Take DMA RX data and push into the TTY layer
-+ */
-+static int intel_cln_uart_dma_push_rx(struct intel_cln_uart *up, int size)
-+{
-+ struct tty_struct *tty;
-+ int room;
-+ struct uart_port *port = &up->port;
-+
-+ port = &up->port;
-+ tty = tty_port_tty_get(&port->state->port);
-+ if (!tty) {
-+ dev_dbg(up->port.dev, "%s:tty is busy now", __func__);
-+ return 0;
++ if (lsr & UART_LSR_OE) {
++ port->icount.overrun++;
++ error_msg[i++] = " Overrun Error\n";
+ }
+
-+ room = tty_buffer_request_room(tty, size);
-+
-+ if (room < size)
-+ dev_warn(up->dev, "Rx overrun: dropping %u bytes\n",
-+ size - room);
-+ if (!room)
-+ return room;
-+
-+ tty_insert_flip_string(tty, sg_virt(&up->sg_rx), size);
-+
-+ port->icount.rx += room;
-+ tty_kref_put(tty);
-+
-+ return room;
-+}
-+
-+/**
-+ * intel_cln_uart_dma_rx_complete
-+ *
-+ * Called when a UART RX interrupt happens - initiates a DMA transaction
-+ */
-+static void intel_cln_uart_dma_rx_complete(void *arg)
-+{
-+ struct intel_cln_uart *up = arg;
-+ struct uart_port *port = &up->port;
-+ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
-+ int count;
-+
-+ if (!tty) {
-+ dev_dbg(up->port.dev, "%s:tty is busy now", __func__);
-+ return;
++ if (tty == NULL) {
++ for (i = 0; error_msg[i] != NULL; i++)
++ dev_err(&priv->pdev->dev, error_msg[i]);
++ } else {
++ tty_kref_put(tty);
+ }
-+
-+ dma_sync_sg_for_cpu(up->dev, &up->sg_rx, 1, DMA_FROM_DEVICE);
-+ count = intel_cln_uart_dma_push_rx(up, up->rx_trigger_level);
-+ if (count)
-+ tty_flip_buffer_push(tty);
-+ tty_kref_put(tty);
-+ async_tx_ack(up->desc_rx);
-+}
-+
-+/**
-+ * intel_cln_uart_dma_rx
-+ *
-+ * Called when a UART RX interrupt happens - initiates a DMA transaction
-+ */
-+void intel_cln_uart_dma_rx(struct intel_cln_uart *up)
-+{
-+ struct dma_async_tx_descriptor *desc;
-+
-+ sg_init_table(&up->sg_rx, 1); /* Initialize SG table */
-+
-+ sg_dma_len(&up->sg_rx) = up->rx_trigger_level;
-+
-+ sg_set_page(&up->sg_rx, virt_to_page(up->rxbuf.buf_virt),
-+ sg_dma_len(&up->sg_rx), (unsigned long)up->rxbuf.buf_virt &
-+ ~PAGE_MASK);
-+
-+ sg_dma_address(&up->sg_rx) = up->rxbuf.dma_addr;
-+
-+ desc = dmaengine_prep_slave_sg(up->rx_chan,
-+ &up->sg_rx, 1, DMA_DEV_TO_MEM,
-+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-+
-+ if (!desc)
-+ return;
-+
-+ up->desc_rx = desc;
-+ desc->callback = intel_cln_uart_dma_rx_complete;
-+ desc->callback_param = up;
-+ desc->tx_submit(desc);
-+ dma_async_issue_pending(up->rx_chan);
-+}
-+
-+/**
-+ * check_modem_status
-+ *
-+ * @param up: pointer to UART descriptor
-+ *
-+ * Check modem status
-+ */
-+static inline void check_modem_status(struct intel_cln_uart *up)
-+{
-+ int status;
-+
-+ status = serial_in(up, UART_MSR);
-+
-+ if ((status & UART_MSR_ANY_DELTA) == 0)
-+ return;
-+
-+ if (status & UART_MSR_TERI)
-+ up->port.icount.rng++;
-+ if (status & UART_MSR_DDSR)
-+ up->port.icount.dsr++;
-+ /* We may only get DDCD when HW init and reset */
-+ if (status & UART_MSR_DDCD)
-+ uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
-+ /* Will start/stop_tx accordingly */
-+ if (status & UART_MSR_DCTS)
-+ uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
-+
-+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+}
+
-+/**
-+ * intel_cln_uart_isr
-+ *
-+ * @param irq: interrupt identifier
-+ * @param dev_id: pointer to the device structure data
-+ *
-+ * This handles the interrupt from one port. And calls into the DMAC interrupt
-+ * handler directly which is what will run our asynchronous tx/rx DMA callbacks
-+ *
-+ */
-+static void intel_cln_uart_err_ir(struct intel_cln_uart *up, unsigned int lsr)
-+{
-+ up->fcr = serial_in(up, UART_FCR);
-+
-+ /* Reset FIFO */
-+ up->fcr |= UART_FCR_CLEAR_RCVR;
-+ serial_out(up, UART_FCR, up->fcr);
-+
-+ if (lsr & UART_LSR_FIFOE)
-+ dev_err(up->port.dev, "Error data in FIFO\n");
-+
-+ if (lsr & UART_LSR_FE)
-+ dev_err(up->port.dev, "Framing Error\n");
-+
-+ if (lsr & UART_LSR_PE)
-+ dev_err(up->port.dev, "Parity Error\n");
-+
-+ if (lsr & UART_LSR_OE)
-+ dev_err(up->port.dev, "Overrun Error\n");
-+}
++#if defined(CONFIG_INTEL_QUARK_X1000_SOC)
++ #define mask_pvm(x) cln_pci_pvm_mask(x)
++ #define unmask_pvm(x) cln_pci_pvm_unmask(x)
++#else
++ #define mask_pvm(x)
++ #define unmask_pvm(x)
++#endif
+
-+/**
-+ * intel_cln_uart_isr
-+ *
-+ * @param irq: interrupt identifier
-+ * @param dev_id: pointer to the device structure data
-+ *
-+ * This handles the interrupt from one port. And calls into the DMAC interrupt
-+ * handler directly which is what will run our asynchronous tx/rx DMA callbacks
-+ *
-+ */
-+static irqreturn_t intel_cln_uart_isr(int irq, void *dev_id)
++static irqreturn_t quark_uart_interrupt(int irq, void *dev_id)
+{
-+ struct intel_cln_uart *up = dev_id;
-+ unsigned int iid = 0, lsr, ret = IRQ_HANDLED;
++ struct x1000_port *priv = dev_id;
++ unsigned int handled;
++ u8 lsr;
++ int ret = 0;
++ unsigned char iid;
+ unsigned long flags;
-+
-+ if(likely(up->mode & CLN_UART_MODE_MSI)){
-+ /* TODO: see about moving this to the IO/APIC layer */
-+ }
-+
-+ spin_lock_irqsave(&up->port.lock, flags);
-+
-+ if (up->mode & CLN_UART_MODE_DMA) {
-+ /* Run the ISR for the DMA directly */
-+ intel_mid_dma_interrupt(irq, dev_id);
-+ }
-+
-+ while ((iid = serial_in(up, UART_IIR)) > 1) {
-+
++ int next = 1;
++ u8 msr;
++
++ spin_lock_irqsave(&priv->lock, flags);
++ handled = 0;
++ while (next) {
++ iid = quark_uart_hal_get_iid(priv);
++ if (iid & QUARK_UART_IIR_IP) /* No Interrupt */
++ break;
+ switch (iid) {
-+ case INTEL_CLN_UART_IIR_RLS:
-+ /* Receiver Line Status */
-+ lsr = serial_in(up, UART_LSR);
-+ if (lsr & (UART_LSR_FIFOE | UART_LSR_FE |
++ case QUARK_UART_IID_RLS: /* Receiver Line Status */
++ lsr = quark_uart_hal_get_line_status(priv);
++ if (lsr & (QUARK_UART_LSR_ERR | UART_LSR_FE |
+ UART_LSR_PE | UART_LSR_OE)) {
-+ intel_cln_uart_err_ir(up, lsr);
++ quark_uart_err_ir(priv, lsr);
++ ret = QUARK_UART_HANDLED_RX_ERR_INT;
++ } else {
++ ret = QUARK_UART_HANDLED_LS_INT;
+ }
+ break;
-+ case INTEL_CLN_UART_IIR_RXD:
-+ /* Received Data Ready */
-+ if(up->mode & CLN_UART_MODE_DMA){
-+ intel_cln_uart_dma_rx(up);
-+ }else{
-+ intel_cln_uart_rx_to(up);
++ case QUARK_UART_IID_RDR: /* Received Data Ready */
++ if (priv->use_dma) {
++ quark_uart_hal_disable_interrupt(priv,
++ QUARK_UART_HAL_RX_INT |
++ QUARK_UART_HAL_RX_ERR_INT);
++ ret = dma_handle_rx(priv);
++ if (!ret)
++ quark_uart_hal_enable_interrupt(priv,
++ QUARK_UART_HAL_RX_INT |
++ QUARK_UART_HAL_RX_ERR_INT);
++ } else {
++ ret = handle_rx(priv);
+ }
+ break;
-+ case INTEL_CLN_UART_IIR_TO:
-+ /* Received Data Ready (FIFO Timeout) */
-+ intel_cln_uart_rx_to(up);
++ case QUARK_UART_IID_RDR_TO: /* Received Data Ready
++ (FIFO Timeout) */
++ ret = handle_rx_to(priv);
+ break;
-+ case INTEL_CLN_UART_IIR_THRE:
-+ /* Transmitter Holding Register Empty */
-+ if(up->mode & CLN_UART_MODE_DMA){
-+ intel_cln_uart_dma_tx(up);
-+ }else{
-+ intel_cln_uart_tx(up);
-+ }
++ case QUARK_UART_IID_THRE: /* Transmitter Holding Register
++ Empty */
++ if (priv->use_dma)
++
++ ret = dma_handle_tx(priv);
++ else
++ ret = handle_tx(priv);
+ break;
-+ default:
-+ /* Never junp to this label */
-+ dev_err(up->port.dev, "%s:iid=%d (%lu)\n", __func__,
++ case QUARK_UART_IID_MS: /* Modem Status */
++ msr = quark_uart_hal_get_modem(priv);
++ next = 0; /* MS ir prioirty is the lowest. So, MS ir
++ means final interrupt */
++ if ((msr & UART_MSR_ANY_DELTA) == 0)
++ break;
++ ret |= QUARK_UART_HANDLED_MS_INT;
++ break;
++ default: /* Never junp to this label */
++ dev_err(priv->port.dev, "%s:iid=%02x (%lu)\n", __func__,
+ iid, jiffies);
+ ret = -1;
++ next = 0;
+ break;
+ }
++ handled |= (unsigned int)ret;
+ }
+
-+ check_modem_status(up);
-+
-+ spin_unlock_irqrestore(&up->port.lock, flags);
++ spin_unlock_irqrestore(&priv->lock, flags);
+
-+ if(likely(up->mode & CLN_UART_MODE_MSI)){
-+ /* TODO: see about moving this to the IO/APIC layer */
-+ }
-+
-+ return ret;
++ return IRQ_RETVAL(handled);
+}
+
-+static unsigned int intel_cln_uart_tx_empty(struct uart_port *port)
++/* This function tests whether the transmitter fifo and shifter for the port
++ described by 'port' is empty. */
++static unsigned int quark_uart_tx_empty(struct uart_port *port)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+ unsigned long flags;
-+ unsigned int ret;
-+
-+ spin_lock_irqsave(&up->port.lock, flags);
-+ ret = up->tx_empty;
-+ spin_unlock_irqrestore(&up->port.lock, flags);
++ struct x1000_port *priv;
+
-+ return ret;
++ priv = container_of(port, struct x1000_port, port);
++ if (priv->tx_empty)
++ return TIOCSER_TEMT;
++ else
++ return 0;
+}
+
-+static unsigned int intel_cln_uart_get_mctrl(struct uart_port *port)
++/* Returns the current state of modem control inputs. */
++static unsigned int quark_uart_get_mctrl(struct uart_port *port)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+ unsigned char status;
-+ unsigned int ret;
++ struct x1000_port *priv;
++ u8 modem;
++ unsigned int ret = 0;
+
-+ status = serial_in(up, UART_MSR);
++ priv = container_of(port, struct x1000_port, port);
++ modem = quark_uart_hal_get_modem(priv);
+
-+ ret = 0;
-+ if (status & UART_MSR_DCD)
++ if (modem & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
-+ if (status & UART_MSR_RI)
++
++ if (modem & UART_MSR_RI)
+ ret |= TIOCM_RNG;
-+ if (status & UART_MSR_DSR)
++
++ if (modem & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
-+ if (status & UART_MSR_CTS)
++
++ if (modem & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
++
+ return ret;
+}
+
-+static void intel_cln_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
++static void quark_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+ unsigned char mcr = 0;
++ u32 mcr = 0;
++ struct x1000_port *priv = container_of(port, struct x1000_port, port);
+
-+ if (mctrl & TIOCM_RTS)
-+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
-+ if (mctrl & TIOCM_OUT1)
-+ mcr |= UART_MCR_OUT1;
-+ if (mctrl & TIOCM_OUT2)
-+ mcr |= UART_MCR_OUT2;
++ if (mctrl & TIOCM_RTS)
++ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
-+ mcr |= up->mcr;
++ if (priv->mcr & UART_MCR_AFE)
++ mcr |= UART_MCR_AFE;
+
-+ serial_out(up, UART_MCR, mcr);
++ if (mctrl)
++ serial_out(priv, UART_MCR, mcr);
+}
+
-+static void intel_cln_uart_break_ctl(struct uart_port *port, int break_state)
++static void quark_uart_stop_tx(struct uart_port *port)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+ unsigned long flags;
++ struct x1000_port *priv;
++ priv = container_of(port, struct x1000_port, port);
++ priv->start_tx = 0;
++ priv->tx_dma_use = 0;
++}
+
-+ pr_info("%s entry\n", __FUNCTION__);
++static void quark_uart_start_tx(struct uart_port *port)
++{
++ struct x1000_port *priv;
+
-+ spin_lock_irqsave(&up->port.lock, flags);
-+ if (break_state == -1)
-+ up->lcr |= UART_LCR_SBC;
-+ else
-+ up->lcr &= ~UART_LCR_SBC;
-+ serial_out(up, UART_LCR, up->lcr);
-+ spin_unlock_irqrestore(&up->port.lock, flags);
++ priv = container_of(port, struct x1000_port, port);
++
++ if (priv->use_dma) {
++ if (priv->tx_dma_use) {
++ dev_dbg(priv->port.dev, "%s : Tx DMA is NOT empty.\n",
++ __func__);
++ return;
++ }
++ }
++
++#ifdef __QRK_DMA_DEBUG
++ unsigned char iid = quark_uart_hal_get_iid(priv);
++ pr_info("%s enable interrupt IER %x FCR %x iid %x\n", __func__, serial_in(priv, UART_IER),
++ serial_in(priv, UART_FCR), iid);
++#endif
++ priv->start_tx = 1;
++ quark_uart_hal_enable_interrupt(priv, QUARK_UART_HAL_TX_INT);
+}
+
-+/**
-+ * intel_cln_uart_startup
-+ *
-+ * @port: Pointer to the uart port to be started
-+ *
-+ */
-+static int intel_cln_uart_startup(struct uart_port *port)
++static void quark_uart_stop_rx(struct uart_port *port)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+ unsigned long flags;
-+
-+ pr_info("%s entry\n", __FUNCTION__);
++ struct x1000_port *priv;
++
++ priv = container_of(port, struct x1000_port, port);
++ priv->start_rx = 0;
++#ifdef __QRK_DMA_DEBUG
++ unsigned char iid;
++ iid = quark_uart_hal_get_iid(priv);
++ pr_info("%s IID is 0x%x USR 0x%x LSR 0x%x MSR 0x%x\n", __func__, iid, serial_in(priv,31), serial_in(priv, UART_LSR), serial_in(priv, UART_MSR));
++#endif
++ quark_uart_hal_disable_interrupt(priv, QUARK_UART_HAL_RX_INT |
++ QUARK_UART_HAL_RX_ERR_INT);
++}
+
-+ /*
-+ * Clear the FIFO buffers and disable them.
-+ * (they will be reenabled in set_termios())
-+ */
-+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
-+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
-+ serial_out(up, UART_FCR, 0);
++/* Enable the modem status interrupts. */
++static void quark_uart_enable_ms(struct uart_port *port)
++{
++ struct x1000_port *priv;
++ priv = container_of(port, struct x1000_port, port);
++ quark_uart_hal_enable_interrupt(priv, QUARK_UART_HAL_MS_INT);
++}
+
-+ /* Clear the interrupt registers. */
-+ (void) serial_in(up, UART_LSR);
-+ (void) serial_in(up, UART_RX);
-+ (void) serial_in(up, UART_IIR);
-+ (void) serial_in(up, UART_MSR);
++/* Control the transmission of a break signal. */
++static void quark_uart_break_ctl(struct uart_port *port, int ctl)
++{
++ struct x1000_port *priv;
++ unsigned long flags;
+
-+ /* Now, initialize the UART, default is 8n1 */
-+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
++ priv = container_of(port, struct x1000_port, port);
++ spin_lock_irqsave(&priv->lock, flags);
++ quark_uart_hal_set_break(priv, ctl);
++ spin_unlock_irqrestore(&priv->lock, flags);
++}
+
-+ spin_lock_irqsave(&up->port.lock, flags);
++/* Grab any interrupt resources and initialise any low level driver state. */
++static int quark_uart_startup(struct uart_port *port)
++{
++ struct x1000_port *priv;
++ int ret;
++ int fifo_size;
++ int trigger_level;
+
-+ up->port.mctrl |= TIOCM_OUT2;
-+ intel_cln_uart_set_mctrl(&up->port, up->port.mctrl);
++ priv = container_of(port, struct x1000_port, port);
++ priv->tx_empty = 1;
+
-+ /*
-+ * Finally, enable interrupts. Note: Modem status interrupts
-+ * are set via set_termios(), which will be occurring imminently
-+ * anyway, so we don't enable them here.
-+ */
-+ if (!(up->mode & CLN_UART_MODE_DMA))
-+ up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
++ if (port->uartclk)
++ priv->uartclk = port->uartclk;
+ else
-+ up->ier = 0;
-+ serial_out(up, UART_IER, up->ier);
++ port->uartclk = priv->uartclk;
++#ifdef __QRK_DMA_DEBUG
++ pr_info("%s entry fifo size %d!\n", __func__, priv->fifo_size);
++#endif
++ quark_uart_hal_disable_interrupt(priv, QUARK_UART_HAL_ALL_INT);
++ ret = quark_uart_hal_set_line(priv, default_baud,
++ QUARK_UART_HAL_PARITY_NONE, QUARK_UART_HAL_8BIT,
++ QUARK_UART_HAL_STB1);
+
-+ /* And clear the interrupt registers again for luck. */
-+ (void) serial_in(up, UART_LSR);
-+ (void) serial_in(up, UART_RX);
-+ (void) serial_in(up, UART_IIR);
-+ (void) serial_in(up, UART_MSR);
++ if (ret)
++ return ret;
+
-+ up->start_rx = 1;
++ switch (priv->fifo_size) {
++ case 256:
++ fifo_size = QUARK_UART_HAL_FIFO256;
++ break;
++ case 64:
++ fifo_size = QUARK_UART_HAL_FIFO64;
++ break;
++ case 16:
++ fifo_size = QUARK_UART_HAL_FIFO16;
++ break;
++ case 1:
++ default:
++ fifo_size = QUARK_UART_HAL_FIFO_DIS;
++ break;
++ }
++
++ switch (priv->trigger) {
++ case QUARK_UART_HAL_TRIGGER1:
++ trigger_level = 1;
++ break;
++ case QUARK_UART_HAL_TRIGGER_L:
++ trigger_level = priv->fifo_size / 4;
++ break;
++ case QUARK_UART_HAL_TRIGGER_M:
++ trigger_level = priv->fifo_size / 2;
++ break;
++ case QUARK_UART_HAL_TRIGGER_H:
++ default:
++ trigger_level = priv->fifo_size - (priv->fifo_size / 8);
++ break;
++ }
+
-+ /* Coarse locking */
-+ spin_unlock_irqrestore(&up->port.lock, flags);
++ priv->trigger_level = trigger_level;
++#ifdef __QRK_DMA_DEBUG
++ pr_info("%s setting FCR fifo_size %d FIFO trig %d\n", __func__, fifo_size, priv->trigger);
++#endif
++ ret = quark_uart_hal_set_fifo(priv, QUARK_UART_HAL_DMA_MODE0,
++ fifo_size, priv->trigger);
++ if (ret < 0)
++ return ret;
++
++ if (priv->use_dma)
++ quark_request_dma(port);
+
++#ifdef __QRK_DMA_DEBUG
++ pr_info("%s enable interrupt IER %x FCR %x USR %x\n", __func__, serial_in(priv, UART_IER),
++ serial_in(priv, UART_FCR), serial_in(priv, 31));
++#endif
++ priv->start_rx = 1;
++ quark_uart_hal_enable_interrupt(priv, QUARK_UART_HAL_RX_INT |
++ QUARK_UART_HAL_RX_ERR_INT);
++ uart_update_timeout(port, CS8, default_baud);
++#ifdef __QRK_DMA_DEBUG
++ pr_info("%s exit IER %x FCR %x USR %x\n", __func__, serial_in(priv, UART_IER), serial_in(priv, UART_FCR), serial_in(priv, 31));
++#endif
+ return 0;
+}
+
-+static void intel_cln_uart_shutdown(struct uart_port *port)
++static void quark_uart_shutdown(struct uart_port *port)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+ unsigned long flags;
-+
-+ pr_info("%s entry\n", __FUNCTION__);
-+
-+ /* Disable interrupts from this port */
-+ up->ier = 0;
-+ up->start_tx = up->start_rx = 0;
-+ serial_out(up, UART_IER, 0);
-+
-+ spin_lock_irqsave(&up->port.lock, flags);
-+ up->port.mctrl &= ~TIOCM_OUT2;
-+ intel_cln_uart_set_mctrl(&up->port, up->port.mctrl);
-+ spin_unlock_irqrestore(&up->port.lock, flags);
-+
-+ /* Disable break condition and FIFOs */
-+ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
-+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
-+ UART_FCR_CLEAR_RCVR |
-+ UART_FCR_CLEAR_XMIT);
-+ serial_out(up, UART_FCR, 0);
-+
-+ /* Unmap DMA */
-+ if (up->mode & CLN_UART_MODE_DMA) {
-+ dma_unmap_single(port->dev, up->txbuf.dma_addr,
-+ UART_XMIT_SIZE, DMA_TO_DEVICE);
++ struct x1000_port *priv;
++ int ret;
+
-+ dma_unmap_single(port->dev, up->rxbuf.dma_addr,
-+ CLN_UART_DMA_BUF_SIZE, DMA_FROM_DEVICE);
-+ }
++#ifdef __QRK_DMA_DEBUG
++ pr_info("%s called!\n", __func__);
++#endif
++ priv = container_of(port, struct x1000_port, port);
++ quark_uart_hal_disable_interrupt(priv, QUARK_UART_HAL_ALL_INT);
++ quark_uart_hal_fifo_reset(priv, QUARK_UART_HAL_CLR_ALL_FIFO);
++ ret = quark_uart_hal_set_fifo(priv, QUARK_UART_HAL_DMA_MODE0,
++ QUARK_UART_HAL_FIFO_DIS, QUARK_UART_HAL_TRIGGER1);
++ if (ret)
++ dev_err(priv->port.dev,
++ "quark_uart_hal_set_fifo Failed(ret=%d)\n", ret);
+
++ quark_free_dma(port);
+}
+
-+/**
-+ * intel_cln_uart_set_termios
-+ *
-+ * @param port: Pointer to UART structure
-+ * @termios: Pointer to termios control structure
-+ * @old: Pointer to old termios structure
-+ *
-+ * Set the UART into the mode specified by the termios structure
-+ */
-+static void
-+intel_cln_uart_set_termios(struct uart_port *port, struct ktermios *termios,
-+ struct ktermios *old)
++/* Change the port parameters, including word length, parity, stop
++ *bits. Update read_status_mask and ignore_status_mask to indicate
++ *the types of events we are interested in receiving. */
++static void quark_uart_set_termios(struct uart_port *port,
++ struct ktermios *termios, struct ktermios *old)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+ unsigned char cval;
++ int rtn;
++ unsigned int baud, parity, bits, stb;
++ struct x1000_port *priv;
+ unsigned long flags;
-+ unsigned int baud, quot;
-+// int div; TODO: on hardware
-+
-+ pr_info("%s up %p port %p termios %p ktermios %p\n",
-+ __FUNCTION__, up, port, termios, old);
+
++ priv = container_of(port, struct x1000_port, port);
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
-+ cval = UART_LCR_WLEN5;
++ bits = QUARK_UART_HAL_5BIT;
+ break;
+ case CS6:
-+ cval = UART_LCR_WLEN6;
++ bits = QUARK_UART_HAL_6BIT;
+ break;
+ case CS7:
-+ cval = UART_LCR_WLEN7;
++ bits = QUARK_UART_HAL_7BIT;
+ break;
-+ default:
-+ case CS8:
-+ cval = UART_LCR_WLEN8;
++ default: /* CS8 */
++ bits = QUARK_UART_HAL_8BIT;
+ break;
+ }
-+
+ if (termios->c_cflag & CSTOPB)
-+ cval |= UART_LCR_STOP;
-+ if (termios->c_cflag & PARENB)
-+ cval |= UART_LCR_PARITY;
-+ if (!(termios->c_cflag & PARODD))
-+ cval |= UART_LCR_EPAR;
++ stb = QUARK_UART_HAL_STB2;
++ else
++ stb = QUARK_UART_HAL_STB1;
+
-+ termios->c_cflag &= ~CMSPAR; /* Mark/Space parity is not supported */
++ if (termios->c_cflag & PARENB) {
++ if (termios->c_cflag & PARODD)
++ parity = QUARK_UART_HAL_PARITY_ODD;
++ else
++ parity = QUARK_UART_HAL_PARITY_EVEN;
+
-+ /*
-+ * Ask the core to calculate the divisor for us.
-+ */
-+ baud = uart_get_baud_rate(port, termios, old,
-+ port->uartclk / 16 / 0xffff,
-+ port->uartclk / 16);
-+ quot = uart_get_divisor(port, baud);
++ } else
++ parity = QUARK_UART_HAL_PARITY_NONE;
+
-+ pr_info("%s resulting baud rate was %d\n", __FUNCTION__, baud);
++ /* Only UART0 has auto hardware flow function */
++ if ((termios->c_cflag & CRTSCTS) && (priv->fifo_size == 256))
++ priv->mcr |= UART_MCR_AFE;
++ else
++ priv->mcr &= ~UART_MCR_AFE;
+
-+ /* Init to FIFO enabled mode - RX-trig (FIFO-2) TX-trig TX-trig (FIFO/2) */
-+ up->fcr = UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_11 | UART_FCR_R_TRIG_11;
-+ if (up->mode & CLN_UART_MODE_DMA)
-+ up->fcr |= UART_FCR_DMA_SELECT;
++ termios->c_cflag &= ~CMSPAR; /* Mark/Space parity is not supported */
+
-+ up->rx_trigger_level = up->port.fifosize-2;
-+ up->tx_trigger_level = up->port.fifosize/2;
++ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+
-+ /*
-+ * Ok, we're now changing the port state. Do it with
-+ * interrupts disabled.
-+ */
-+ spin_lock_irqsave(&up->port.lock, flags);
++ spin_lock_irqsave(&priv->lock, flags);
++ spin_lock(&port->lock);
+
-+ /* Update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, baud);
++ rtn = quark_uart_hal_set_line(priv, baud, parity, bits, stb);
++ if (rtn)
++ goto out;
+
-+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
-+ if (termios->c_iflag & INPCK)
-+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-+ if (termios->c_iflag & (BRKINT | PARMRK))
-+ up->port.read_status_mask |= UART_LSR_BI;
-+
-+ /* Characters to ignore */
-+ up->port.ignore_status_mask = 0;
-+ if (termios->c_iflag & IGNPAR)
-+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
-+ if (termios->c_iflag & IGNBRK) {
-+ up->port.ignore_status_mask |= UART_LSR_BI;
-+ /*
-+ * If we're ignoring parity and break indicators,
-+ * ignore overruns too (for real raw support).
-+ */
-+ if (termios->c_iflag & IGNPAR)
-+ up->port.ignore_status_mask |= UART_LSR_OE;
-+ }
-+
-+ /* Ignore all characters if CREAD is not set */
-+ if ((termios->c_cflag & CREAD) == 0)
-+ up->port.ignore_status_mask |= UART_LSR_DR;
-+
-+ /*
-+ * CTS flow control flag and modem status interrupts, disable
-+ * MSI by default
-+ */
-+ up->ier &= ~UART_IER_MSI;
-+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
-+ up->ier |= UART_IER_MSI;
-+
-+ if (termios->c_cflag & CRTSCTS)
-+ up->mcr |= UART_MCR_AFE | UART_MCR_RTS;
-+ else
-+ up->mcr &= ~UART_MCR_AFE;
-+
-+ serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
-+ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
-+ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
-+ serial_out(up, UART_LCR, cval); /* reset DLAB */
-+ up->lcr = cval; /* Save LCR */
-+
-+ intel_cln_uart_set_mctrl(&up->port, up->port.mctrl);
-+ up->fcr = 0;
-+ serial_out(up, UART_FCR, up->fcr);
-+
-+ /* Set IER state */
-+ serial_out(up, UART_IER, up->ier);
++ quark_uart_set_mctrl(&priv->port, priv->port.mctrl);
++ /* Don't rewrite B0 */
++ if (tty_termios_baud_rate(termios))
++ tty_termios_encode_baud_rate(termios, baud, baud);
+
-+ /* Unlock spinlock */
-+ spin_unlock_irqrestore(&up->port.lock, flags);
++out:
++ spin_unlock(&port->lock);
++ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
-+static void
-+intel_cln_uart_pm(struct uart_port *port, unsigned int state,
-+ unsigned int oldstate)
++static const char *quark_uart_type(struct uart_port *port)
+{
++ return KBUILD_MODNAME;
+}
+
-+static void intel_cln_uart_release_port(struct uart_port *port)
++static void quark_uart_release_port(struct uart_port *port)
+{
++ struct x1000_port *priv;
++
++ priv = container_of(port, struct x1000_port, port);
++ pci_iounmap(priv->pdev, priv->membase);
++ pci_release_regions(priv->pdev);
+}
+
-+static int intel_cln_uart_request_port(struct uart_port *port)
++static int quark_uart_request_port(struct uart_port *port)
+{
++#if 0
++ struct x1000_port *priv;
++ int ret;
++ void __iomem *membase;
++
++ priv = container_of(port, struct x1000_port, port);
++ ret = pci_request_regions(priv->pdev, KBUILD_MODNAME);
++ if (ret < 0)
++ return -EBUSY;
++
++ membase = pci_iomap(priv->pdev, 1, 0);
++ if (!membase) {
++ pci_release_regions(priv->pdev);
++ return -EBUSY;
++ }
++ priv->membase = port->membase = membase;
++#endif
+ return 0;
+}
+
-+static void intel_cln_uart_config_port(struct uart_port *port, int flags)
++static void quark_uart_config_port(struct uart_port *port, int type)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+ up->port.type = PORT_MFD;
++ struct x1000_port *priv;
++
++#ifdef __QRK_DMA_DEBUG
++ pr_info("%s entry!\n", __func__);
++#endif
++ priv = container_of(port, struct x1000_port, port);
++ if (type & UART_CONFIG_TYPE) {
++ port->type = priv->port_type;
++ quark_uart_request_port(port);
++ }
+}
+
-+/**
-+ * intel_cln_uart_verify_port
-+ *
-+ * @param port: Pointer to UART descriptor
-+ * @param ser: Serail configuration structure
-+ *
-+ * Sets the port into hi-speed/lo-speed mode
-+ */
-+static int
-+intel_cln_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
++static int quark_uart_verify_port(struct uart_port *port,
++ struct serial_struct *serinfo)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+
-+ if (ser->flags & UPF_LOW_LATENCY) {
-+ dev_info(up->port.dev,
-+ "CLN UART : Use PIO Mode (without DMA)\n");
-+ up->mode &= ~CLN_UART_MODE_DMA;
-+ ser->flags &= ~UPF_LOW_LATENCY;
++ struct x1000_port *priv;
++#ifdef __QRK_DMA_DEBUG
++ pr_info("%s entry point !\n", __func__);
++#endif
++ priv = container_of(port, struct x1000_port, port);
++ if (serinfo->flags & UPF_LOW_LATENCY) {
++ dev_info(priv->port.dev,
++ "QUARK UART : Use PIO Mode (without DMA)\n");
++ priv->use_dma = 0;
++ serinfo->flags &= ~UPF_LOW_LATENCY;
+ } else {
-+ up->mode |= CLN_UART_MODE_DMA;
-+ dev_info(up->port.dev, "CLN UART : Use DMA Mode\n");
++#ifndef CONFIG_QUARK_DMA
++ dev_err(priv->port.dev, "%s : QUARK DMA is not Loaded.\n",
++ __func__);
++ return -EOPNOTSUPP;
++#endif
++ dev_info(priv->port.dev, "QUARK UART : Use DMA Mode\n");
++ if (!priv->use_dma)
++ quark_request_dma(port);
++ priv->use_dma = 1;
+ }
+
+ return 0;
+}
+
-+/**
-+ * intel_cln_uart_type
-+ *
-+ * @param port: Pointer to UART descriptor
-+ *
-+ * Returns the type of the port
++#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_QUARK_UART_CONSOLE)
++/*
++ * Wait for transmitter & holding register to empty
+ */
-+static const char *
-+intel_cln_uart_type(struct uart_port *port)
++static void wait_for_xmitr(struct x1000_port *up, int bits)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
-+ return up->name;
-+}
-+
-+/* Mainly for uart console use */
-+static struct uart_driver intel_cln_uart_driver;
-+
-+#ifdef CONFIG_INTEL_CLN_UART_CONSOLE
-+
-+static struct intel_cln_uart *intel_cln_uart_ports[2];
-+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-+
-+/* Wait for transmitter & holding register to empty */
-+static inline void wait_for_xmitr(struct intel_cln_uart *up)
-+{
-+ unsigned int status, tmout = 1000;
++ unsigned int status, tmout = 10000;
+
-+ /* Wait up to 1ms for the character to be sent. */
-+ do {
++ /* Wait up to 10ms for the character(s) to be sent. */
++ for (;;) {
+ status = serial_in(up, UART_LSR);
+
-+ if (status & UART_LSR_BI)
-+ up->lsr_break_flag = UART_LSR_BI;
-+
++ if ((status & bits) == bits)
++ break;
+ if (--tmout == 0)
+ break;
+ udelay(1);
-+ } while (!(status & BOTH_EMPTY));
++ }
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
-+ tmout = 1000000;
-+ while (--tmout &&
-+ ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
++ unsigned int tmout;
++ for (tmout = 1000000; tmout; tmout--) {
++ unsigned int msr = serial_in(up, UART_MSR);
++ if (msr & UART_MSR_CTS)
++ break;
+ udelay(1);
++ touch_nmi_watchdog();
++ }
+ }
+}
++#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_QUARK_UART_CONSOLE */
++
++#ifdef CONFIG_CONSOLE_POLL
++/*
++ * Console polling routines for communicate via uart while
++ * in an interrupt or debug context.
++ */
++static int quark_uart_get_poll_char(struct uart_port *port)
++{
++ struct x1000_port *priv =
++ container_of(port, struct x1000_port, port);
++ u8 lsr = serial_in(priv, UART_LSR);
++
++ if (!(lsr & UART_LSR_DR))
++ return NO_POLL_CHAR;
++
++ return serial_in(priv, QUARK_UART_RBR);
++}
++
++
++static void quark_uart_put_poll_char(struct uart_port *port,
++ unsigned char c)
++{
++ unsigned int ier;
++ struct x1000_port *priv =
++ container_of(port, struct x1000_port, port);
++
++ /*
++ * First save the IER then disable the interrupts
++ */
++ ier = serial_in(priv, UART_IER);
++ quark_uart_hal_disable_interrupt(priv, QUARK_UART_HAL_ALL_INT);
++
++ wait_for_xmitr(priv, UART_LSR_THRE);
++ /*
++ * Send the character out.
++ * If a LF, also do CR...
++ */
++ serial_out(priv, QUARK_UART_THR, c);
++ if (c == 10) {
++ wait_for_xmitr(priv, UART_LSR_THRE);
++ serial_out(priv, QUARK_UART_THR, 13);
++ }
++
++ /*
++ * Finally, wait for transmitter to become empty
++ * and restore the IER
++ */
++ wait_for_xmitr(priv, BOTH_EMPTY);
++ serial_out(priv, UART_IER, ier);
++}
++#endif /* CONFIG_CONSOLE_POLL */
++
++static struct uart_ops quark_uart_ops = {
++ .tx_empty = quark_uart_tx_empty,
++ .set_mctrl = quark_uart_set_mctrl,
++ .get_mctrl = quark_uart_get_mctrl,
++ .stop_tx = quark_uart_stop_tx,
++ .start_tx = quark_uart_start_tx,
++ .stop_rx = quark_uart_stop_rx,
++ .enable_ms = quark_uart_enable_ms,
++ .break_ctl = quark_uart_break_ctl,
++ .startup = quark_uart_startup,
++ .shutdown = quark_uart_shutdown,
++ .set_termios = quark_uart_set_termios,
++/* .pm = quark_uart_pm, Not supported yet */
++/* .set_wake = quark_uart_set_wake, Not supported yet */
++ .type = quark_uart_type,
++ .release_port = quark_uart_release_port,
++ .request_port = quark_uart_request_port,
++ .config_port = quark_uart_config_port,
++ .verify_port = quark_uart_verify_port,
++#ifdef CONFIG_CONSOLE_POLL
++ .poll_get_char = quark_uart_get_poll_char,
++ .poll_put_char = quark_uart_put_poll_char,
++#endif
++};
++
++#ifdef CONFIG_SERIAL_QUARK_UART_CONSOLE
+
-+static void intel_cln_uart_console_putchar(struct uart_port *port, int ch)
++static void quark_console_putchar(struct uart_port *port, int ch)
+{
-+ struct intel_cln_uart *up =
-+ container_of(port, struct intel_cln_uart, port);
++ struct x1000_port *priv =
++ container_of(port, struct x1000_port, port);
+
-+ wait_for_xmitr(up);
-+ serial_out(up, UART_TX, ch);
++ wait_for_xmitr(priv, UART_LSR_THRE);
++ serial_out(priv, QUARK_UART_THR, ch);
+}
+
+/*
-+ * Print a string to the serial port trying not to disturb
-+ * any possible real use of the port...
++ * Print a string to the serial port trying not to disturb
++ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void
-+intel_cln_uart_console_write(struct console *co, const char *s, unsigned int count)
++quark_console_write(struct console *co, const char *s, unsigned int count)
+{
-+ struct intel_cln_uart *up = &intel_cln_uart_ports[co->index];
++ struct x1000_port *priv;
+ unsigned long flags;
-+ unsigned int ier;
-+ int locked = 1;
++ int priv_locked = 1;
++ int port_locked = 1;
++ u8 ier;
++
++ priv = quark_uart_ports[co->index];
++
++ touch_nmi_watchdog();
+
+ local_irq_save(flags);
-+ if (up->port.sysrq)
-+ locked = 0;
-+ else if (oops_in_progress) {
-+ locked = spin_trylock(&up->port.lock);
-+ } else
-+ spin_lock(&up->port.lock);
++ if (priv->port.sysrq) {
++ /* call to uart_handle_sysrq_char already took the priv lock */
++ priv_locked = 0;
++ /* serial8250_handle_port() already took the port lock */
++ port_locked = 0;
++ } else if (oops_in_progress) {
++ priv_locked = spin_trylock(&priv->lock);
++ port_locked = spin_trylock(&priv->port.lock);
++ } else {
++ spin_lock(&priv->lock);
++ spin_lock(&priv->port.lock);
++ }
+
-+ /* First save the IER then disable the interrupts */
-+ ier = serial_in(up, UART_IER);
-+ serial_out(up, UART_IER, 0);
++ /*
++ * First save the IER then disable the interrupts
++ */
++ ier = serial_in(priv, UART_IER);
++
++ quark_uart_hal_disable_interrupt(priv, QUARK_UART_HAL_ALL_INT);
+
-+ uart_console_write(&up->port, s, count, intel_cln_uart_console_putchar);
++ uart_console_write(&priv->port, s, count, quark_console_putchar);
+
+ /*
-+ * Finally, wait for transmitter to become empty
-+ * and restore the IER
++ * Finally, wait for transmitter to become empty
++ * and restore the IER
+ */
-+ wait_for_xmitr(up);
-+ serial_out(up, UART_IER, ier);
++ wait_for_xmitr(priv, BOTH_EMPTY);
++ serial_out(priv, UART_IER, ier);
+
-+ if (locked)
-+ spin_unlock(&up->port.lock);
++ if (port_locked)
++ spin_unlock(&priv->port.lock);
++ if (priv_locked)
++ spin_unlock(&priv->lock);
+ local_irq_restore(flags);
+}
+
-+static struct console intel_cln_uart_console;
-+
-+static int __init
-+intel_cln_uart_console_setup(struct console *co, char *options)
++static int __init quark_console_setup(struct console *co, char *options)
+{
-+ struct intel_cln_uart *up;
-+ int baud = 115200;
++ struct uart_port *port;
++ int baud = default_baud;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
-+ if (co->index == -1 || co->index >= intel_cln_uart_driver.nr)
++ /*
++ * Check whether an invalid uart number has been specified, and
++ * if so, search for the first available port that does have
++ * console support.
++ */
++ if (co->index >= QUARK_UART_NR)
+ co->index = 0;
-+ up = intel_cln_uart_ports[co->index];
-+ if (!up)
++ port = &quark_uart_ports[co->index]->port;
++
++ if (!port || !port->membase)
+ return -ENODEV;
+
++ port->uartclk = quark_uart_get_uartclk();
++
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
-+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
++ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
++static struct uart_driver quark_uart_driver;
+
-+static struct uart_driver intel_cln_uart_driver;
-+static struct console intel_cln_uart_console = {
-+ .name = "ttyCLN",
-+ .write = intel_cln_uart_console_write,
++static struct console quark_console = {
++ .name = QUARK_UART_DRIVER_DEVICE,
++ .write = quark_console_write,
+ .device = uart_console_device,
-+ .setup = intel_cln_uart_console_setup,
-+ .flags = CON_PRINTBUFFER,
++ .setup = quark_console_setup,
++ .flags = CON_PRINTBUFFER | CON_ANYTIME,
+ .index = -1,
-+ .data = &intel_cln_uart_driver,
++ .data = &quark_uart_driver,
+};
+
-+#define INTEL_CLN_UART_CONSOLE (&intel_cln_uart_console)
++#define QUARK_CONSOLE (&quark_console)
+#else
-+#define INTEL_CLN_UART_CONSOLE NULL
-+#endif
++#define QUARK_CONSOLE NULL
++#endif /* CONFIG_SERIAL_QUARK_UART_CONSOLE */
+
-+static struct uart_driver intel_cln_uart_driver = {
++static struct uart_driver quark_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = KBUILD_MODNAME,
-+ .dev_name = CLN_UART_DRIVER_DEVICE,
-+ .major = TTY_MAJOR,
-+ .minor = 129,
-+ .nr = CLN_UART_MAX_INSTANCES,
-+ .cons = INTEL_CLN_UART_CONSOLE,
-+};
-+
-+static struct uart_ops intel_cln_uart_ops = {
-+ .tx_empty = intel_cln_uart_tx_empty,
-+ .set_mctrl = intel_cln_uart_set_mctrl,
-+ .get_mctrl = intel_cln_uart_get_mctrl,
-+ .stop_tx = intel_cln_uart_stop_tx,
-+ .start_tx = intel_cln_uart_start_tx,
-+ .stop_rx = intel_cln_uart_stop_rx,
-+ .enable_ms = intel_cln_uart_enable_ms,
-+ .break_ctl = intel_cln_uart_break_ctl,
-+ .startup = intel_cln_uart_startup,
-+ .shutdown = intel_cln_uart_shutdown,
-+ .set_termios = intel_cln_uart_set_termios,
-+ .pm = intel_cln_uart_pm,
-+ .type = intel_cln_uart_type,
-+ .release_port = intel_cln_uart_release_port,
-+ .request_port = intel_cln_uart_request_port,
-+ .config_port = intel_cln_uart_config_port,
-+ .verify_port = intel_cln_uart_verify_port,
++ .dev_name = QUARK_UART_DRIVER_DEVICE,
++ .major = 0,
++ .minor = 0,
++ .nr = QUARK_UART_NR,
++ .cons = QUARK_CONSOLE,
+};
+
-+/**
-+ * intel_cln_dma_chan_filter
-+ *
-+ * Simple descriptor disjunct function
-+ */
-+static bool intel_cln_dma_chan_filter(struct dma_chan * chan, void *param)
++static struct x1000_port *quark_uart_init_port(struct pci_dev *pdev,
++ const struct pci_device_id *id)
+{
-+// struct intel_mid_dma_slave *dws = param;
++ struct x1000_port *priv;
++ int ret, len;
++ unsigned char *rxbuf;
++ char name[32]; /* for debugfs file name */
++ struct intel_mid_dma_probe_info * info = NULL;
+
-+// pr_info("%s compare device %p to %p\n", __FUNCTION__, dws->dma_dev, chan->device->dev);
++ dev_info(&pdev->dev,"QUARK UART-DMA (ID: %04x:%04x) pdev->irq %d\n",
++ pdev->vendor, pdev->device, pdev->irq);
+
-+ //return dws->dmac && (&dws->dmac->dev == chan->device->dev);
-+ return 1; // TBD
-+}
-+
-+/**
-+ * intel_cln_uart_probe
-+ *
-+ * @param dev: the PCI device matching
-+ * @param id: entry in the match table
-+ * @return 0
-+ *
-+ * Callback from PCI layer when dev/vendor ids match.
-+ * Sets up necessary resources
-+ */
-+static int intel_cln_uart_probe(struct pci_dev *pdev,
-+ const struct pci_device_id *id)
-+{
-+ dma_cap_mask_t mask;
-+ int ret = 0;
-+ struct intel_cln_uart *up = NULL;
-+ unsigned long flags = 0, len = 0;
-+
-+ printk(KERN_INFO "Intel Clanton UART-DMA (ID: %04x:%04x)\n",
-+ pdev->vendor, pdev->device);
-+
-+ /* Driver desc */
-+ up = kzalloc(sizeof(struct intel_cln_uart), GFP_KERNEL);
-+ if (up == NULL){
-+ ret = -ENOMEM;
-+ goto err;
-+ }
-+ up->mid_dma.pdev = pci_dev_get(pdev);
-+
-+ ret = pci_enable_device(pdev);
-+ if (ret){
-+ goto err;
-+ }
-+
-+ /* Attempt MSI enable */
-+ //if(pci_enable_msi(pdev)){
-+ if(1){
-+ dev_warn(&pdev->dev, "MSI enable fail\n");
-+ flags = IRQF_SHARED;
-+ }else{
-+ /*
-+ * MSI enable good - set IRQ type to level. This seems wrong
-+ * since PCI is an edge triggered interrupt system - but, the IP
-+ * block connected to the bridge is level triggered. Setting the
-+ * IRQ type to LEVEL_HIGH will trigger the
-+ * io_apic->irq_mask()/unmask() functions to be automagically
-+ * called by the kernel - which saves us from having to do nasty
-+ * PCI config space writes explicitely in the ISR - kernel
-+ * entry/exit functions will do that for us
-+ */
-+ irq_set_irq_type(pdev->irq, IRQ_TYPE_LEVEL_HIGH);
-+ up->mode |= CLN_UART_MODE_MSI;
-+ }
-+
-+ /* DMA hook */
-+ if(dma_enable == true){
-+ up->mode |= CLN_UART_MODE_DMA;
-+ }
-+ up->mode = 0;
++ info = (void*)id->driver_data;
++ dev_info(&pdev->dev,"QUARK UART-DMA : CH %d base %d block len %d per mask %x\n",
++ info->max_chan, info->ch_base, info->block_size, info->pimr_mask);
++#if 0
++ board = &drv_dat[id->driver_data];
++ port_type = board->port_type;
++#endif
++ priv = kzalloc(sizeof(struct x1000_port), GFP_KERNEL);
++ if (priv == NULL)
++ goto init_port_alloc_err;
+
-+ /* Hook an IRQ - in whichever mode */
-+ ret = request_irq(pdev->irq, intel_cln_uart_isr, flags, KBUILD_MODNAME,
-+ up);
-+ if (ret) {
-+ dev_err(&pdev->dev, "can not get IRQ\n");
-+ goto err_dev;
-+ }
++ rxbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
++ if (!rxbuf)
++ goto init_port_free_txbuf;
+
-+ /* Add debugfs entries */
-+ intel_cln_uart_debugfs_init(up);
++ pci_set_master(pdev);
+
-+ /* Init spinlock */
-+ spin_lock_init(&up->lock);
++ spin_lock_init(&priv->lock);
+
-+ /* UART regs on BAR0 */
-+ up->port.mapbase = pci_resource_start(pdev, 0);
++ priv->mapbase = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
-+ up->port.membase = ioremap_nocache(up->port.mapbase, len);
-+ if(up->port.membase == NULL){
++ priv->membase = ioremap_nocache(priv->mapbase, len);
++ if(priv->membase == NULL){
+ ret = -ENODEV;
-+ goto err_dev;
++ goto init_port_free_txbuf;
+ }
+
-+ /* Init DMA driver */
-+ up->mid_dma.max_chan = CLN_UART_DMA_CHANNELS; /* Max channels */
-+ up->mid_dma.chan_base = 0; /* Index start */
-+ up->mid_dma.block_size = CLN_UART_FIFO_LEN; /* MAX DMA block */
-+ up->mid_dma.pimr_mask = 0; /* Per int regs bool */
-+
-+ ret = intel_cln_dma_probe(pdev, &up->mid_dma);
-+ if(ret != 0){
-+ dev_err(&pdev->dev, "Unable to init DMA sub-system\n");
-+ goto err_dev;
-+ }
++ priv->pdev = pdev;
++ priv->tx_empty = 1;
++ priv->rxbuf.buf = rxbuf;
++ priv->rxbuf.size = PAGE_SIZE;
++ priv->fifo_size = QUARK_UART_FIFO_LEN;
++ priv->uartclk = quark_uart_get_uartclk();
++ priv->port_type = PORT_MAX_8250 + 1; /* BOD what does this do ? TBD*/
++ priv->port.dev = &pdev->dev;
++ priv->port.membase = priv->membase;
++ priv->port.mapbase = priv->mapbase;
++ priv->port.irq = pdev->irq;
++ priv->port.iotype = UPIO_MEM;
++ priv->port.ops = &quark_uart_ops;
++ priv->port.flags = UPF_BOOT_AUTOCONF;
++ priv->port.fifosize = QUARK_UART_FIFO_LEN;
++ priv->port.line = pdev->dev.id;
++ priv->trigger = QUARK_UART_HAL_TRIGGER_M;
++
++ spin_lock_init(&priv->port.lock);
++ pci_set_drvdata(pdev, priv);
++ priv->trigger_level = 1;
++ priv->fcr = 0;
++
++ ret = request_irq(pdev->irq, quark_uart_interrupt, IRQF_SHARED,
++ KBUILD_MODNAME, priv);
++#ifdef __QRK_DMA_DEBUG
++ pr_info("%s request_irq %d use_dma %d irq=%d\n", __func__, ret, priv->use_dma, pdev->irq);
++#endif
++ if (ret < 0)
++ goto init_port_hal_free;
+
-+ /* Request DMA channels TODO: move to startup() once debugged on hw */
-+ dma_cap_zero(mask);
-+ dma_cap_set(DMA_SLAVE, mask);
++#ifdef CONFIG_SERIAL_QUARK_UART_CONSOLE
++ quark_uart_ports[board->line_no] = priv;
++#endif
++ ret = uart_add_one_port(&quark_uart_driver, &priv->port);
+
-+ up->rx_chan = dma_request_channel(mask, intel_cln_dma_chan_filter, &up->dmas_rx);
-+ if(up->rx_chan == NULL){
-+ dev_err(&pdev->dev, "Unable to hook DMA RX channel\n");
-+ goto err_bar0;
-+ };
-+ up->dmas_rx.hs_mode = LNW_DMA_SW_HS;
-+ up->dmas_rx.cfg_mode = LNW_DMA_PER_TO_MEM;
++ if (ret < 0)
++ goto init_port_hal_free;
+
-+ up->tx_chan = dma_request_channel(mask, intel_cln_dma_chan_filter, &up->dmas_tx);
-+ if(up->tx_chan == NULL){
-+ dev_err(&pdev->dev, "Unable to hook DMA RX channel\n");
-+ goto err_bar0;
-+ };
-+ up->dmas_tx.hs_mode = LNW_DMA_SW_HS;
-+ up->dmas_tx.cfg_mode = LNW_DMA_MEM_TO_PER;
-+
-+ dev_info(&pdev->dev, "using %s for DMA RX %s for DMA TX\n",
-+ dev_name(&up->rx_chan->dev->device), dev_name(&up->tx_chan->dev->device));
-+
-+ /* Enumerate port */
-+ up->irq = pdev->irq;
-+ up->dev = &pdev->dev;
-+ up->tx_empty = 1;
-+
-+// up->port_type = PORT_MAX_8250 + 10; /* TODO: add to include/linux/serial_core.h */
-+ up->uartclk = CLN_UART_DEFAULT_UARTCLK;
-+ up->port.uartclk = up->uartclk;
-+ up->port.dev = &pdev->dev;
-+ up->port.irq = pdev->irq;
-+ up->port.iotype = UPIO_MEM;
-+ up->port.ops = &intel_cln_uart_ops;
-+ up->port.flags = UPF_BOOT_AUTOCONF;
-+ up->port.fifosize = 16;
-+ up->port.line = pdev->dev.id;
-+ snprintf(up->name, sizeof(up->name), "cln_port%d", intel_cln_uart_port_ct++);
-+
-+ /* Get Consistent memory for DMA TODO: move to startup() once debugged on hw */
-+ up->rxbuf.buf_virt = dma_alloc_coherent(up->port.dev, up->port.fifosize,
-+ &up->rxbuf.dma_addr, GFP_KERNEL);
-+ up->rxbuf.size = up->port.fifosize;
-+
-+ /* Add UART */
-+ uart_add_one_port(&intel_cln_uart_driver, &up->port);
-+ pci_set_drvdata(pdev, up);
++#ifdef CONFIG_DEBUG_FS
++ snprintf(name, sizeof(name), "uart%d_regs", pdev->dev.id);
++ priv->debugfs = debugfs_create_file(name, S_IFREG | S_IRUGO,
++ NULL, priv, &port_regs_ops);
++#endif
+
-+ pm_runtime_put_noidle(&pdev->dev);
-+ pm_runtime_allow(&pdev->dev);
++ return priv;
+
-+ return 0;
++init_port_hal_free:
++#ifdef CONFIG_SERIAL_QUARK_UART_CONSOLE
++ quark_uart_ports[board->line_no] = NULL;
++#endif
++ free_page((unsigned long)rxbuf);
++init_port_free_txbuf:
++ kfree(priv);
++init_port_alloc_err:
+
-+err_bar0:
-+ iounmap(up->port.membase);
-+err_dev:
-+ free_irq(up->irq, NULL);
-+ pci_disable_device(pdev);
-+err:
-+ kfree(up);
-+ return ret;
++ return NULL;
+}
+
-+/**
-+ * uart_remove
-+ *
-+ * @param pdev: PCI device
-+ * @return nothing
-+ *
-+ * Callback from PCI sub-system upon PCI dev removal
-+ */
-+static void intel_cln_uart_remove(struct pci_dev *pdev)
++static void quark_uart_exit_port(struct x1000_port *priv)
+{
-+ struct intel_cln_uart *up = pci_get_drvdata(pdev);
-+ if (!up)
-+ return;
-+
-+ /* Shutdown DMA */
-+ intel_cln_dma_remove(pdev, &up->mid_dma);
+
-+ /* TODO: move to remove() when h/w proved out */
-+ if (up->tx_chan) {
-+ dma_release_channel(up->tx_chan);
-+ up->tx_chan = NULL;
-+ }
-+ if (up->rx_chan) {
-+ dma_release_channel(up->rx_chan);
-+ up->rx_chan = NULL;
-+ }
++#ifdef CONFIG_DEBUG_FS
++ if (priv->debugfs)
++ debugfs_remove(priv->debugfs);
++#endif
++ free_irq(priv->port.irq, priv);
++ uart_remove_one_port(&quark_uart_driver, &priv->port);
++ pci_set_drvdata(priv->pdev, NULL);
++ free_page((unsigned long)priv->rxbuf.buf);
++}
+
-+ if (sg_dma_address(&up->sg_rx))
-+ dma_free_coherent(up->port.dev, up->port.fifosize,
-+ sg_virt(&up->sg_rx),
-+ sg_dma_address(&up->sg_rx));
++static void quark_uart_pci_remove(struct pci_dev *pdev)
++{
++ struct x1000_port *priv = pci_get_drvdata(pdev);
+
-+ /* Remove UART */
-+ uart_remove_one_port(&intel_cln_uart_driver, &up->port);
++ pci_disable_msi(pdev);
+
-+ pci_set_drvdata(pdev, NULL);
-+ free_irq(up->irq, NULL);
++#ifdef CONFIG_SERIAL_QUARK_UART_CONSOLE
++ quark_uart_ports[priv->port.line] = NULL;
++#endif
++ quark_uart_exit_port(priv);
+ pci_disable_device(pdev);
-+
-+ /* Remove debugfs entries */
-+ intel_cln_uart_debugfs_remove(up);
-+
-+ kfree(up);
++ kfree(priv);
++ return;
+}
-+
+#ifdef CONFIG_PM
-+
-+static int intel_cln_uart_suspend(struct pci_dev *pdev, pm_message_t state)
++static int quark_uart_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
-+ struct intel_cln_uart *up = pci_get_drvdata(pdev);
-+
-+ /* Suspend DMA regs */
-+ intel_cln_dma_suspend(&up->mid_dma);
++ struct x1000_port *priv = pci_get_drvdata(pdev);
+
-+
-+ /* Suspend UART */
-+ uart_suspend_port(&intel_cln_uart_driver, &up->port);
++ uart_suspend_port(&quark_uart_driver, &priv->port);
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
-+ return 0;
++ return 0;
+}
+
-+static int intel_cln_uart_resume(struct pci_dev *pdev)
++static int quark_uart_pci_resume(struct pci_dev *pdev)
+{
-+ struct intel_cln_uart *up = pci_get_drvdata(pdev);
++ struct x1000_port *priv = pci_get_drvdata(pdev);
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ ret = pci_enable_device(pdev);
-+ if (ret){
-+ dev_warn(&pdev->dev,
-+ "INTEL_CLN_UART: can't re-enable device, try to continue\n");
++ if (ret) {
++ dev_err(&pdev->dev,
++ "%s-pci_enable_device failed(ret=%d) ", __func__, ret);
++ return ret;
+ }
+
-+ uart_resume_port(&intel_cln_uart_driver, &up->port);
-+
-+ /* Resume DMA regs */
-+ intel_cln_dma_resume(&up->mid_dma);
++ uart_resume_port(&quark_uart_driver, &priv->port);
+
+ return 0;
+}
-+
+#else
-+
-+#define intel_cln_uart_suspend NULL
-+#define intel_cln_uart_resume NULL
-+
++#define quark_uart_pci_suspend NULL
++#define quark_uart_pci_resume NULL
+#endif
+
-+struct pci_device_id intel_cln_uart_ids[] = {
-+ { PCI_VDEVICE(INTEL, 0x0936), 0},
++struct pci_device_id quark_uart_pci_ids[] = {
++ /* channels = 2, offset = 0, block size = FIFO_LEN, pimr = 0 */
++ { PCI_VDEVICE(INTEL, 0x0936), INFO(2, 0, QUARK_UART_FIFO_LEN, 0)},
+ { 0 }
+};
+
-+MODULE_DEVICE_TABLE(pci, intel_cln_uart_ids);
++static int quark_uart_pci_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ int ret;
++ struct x1000_port *priv;
+
-+/* PCI callbacks */
-+static struct pci_driver intel_cln_uart_pci_desc = {
-+ .name = "intel_cln_uart",
-+ .id_table = intel_cln_uart_ids,
-+ .probe = intel_cln_uart_probe,
-+ .remove = intel_cln_uart_remove,
-+ .suspend = intel_cln_uart_suspend,
-+ .resume = intel_cln_uart_resume,
++ ret = pci_enable_device(pdev);
++ if (ret < 0)
++ goto probe_error;
++
++ priv = quark_uart_init_port(pdev, id);
++ if (!priv) {
++ ret = -EBUSY;
++ goto probe_disable_device;
++ }
++ pci_set_drvdata(pdev, priv);
++
++ return ret;
++
++probe_disable_device:
++ pci_disable_msi(pdev);
++ pci_disable_device(pdev);
++probe_error:
++ return ret;
++}
++
++static struct pci_driver quark_uart_pci_driver = {
++ .name = "quark_uart",
++ .id_table = quark_uart_pci_ids,
++ .probe = quark_uart_pci_probe,
++ .remove = quark_uart_pci_remove,
++ .suspend = quark_uart_pci_suspend,
++ .resume = quark_uart_pci_resume,
+};
+
-+/**
-+ * intel_cln_uart_init
-+ *
-+ * Module entry point
-+ */
-+static int __init intel_cln_uart_init(void)
++static int __init quark_uart_module_init(void)
+{
+ int ret;
+
+ /* register as UART driver */
-+ ret = uart_register_driver(&intel_cln_uart_driver);
++ ret = uart_register_driver(&quark_uart_driver);
+ if (ret < 0)
+ return ret;
+
+ /* register as PCI driver */
-+ ret = pci_register_driver(&intel_cln_uart_pci_desc);
++ ret = pci_register_driver(&quark_uart_pci_driver);
+ if (ret < 0)
-+ uart_unregister_driver(&intel_cln_uart_driver);
++ uart_unregister_driver(&quark_uart_driver);
+
+ return ret;
+}
++module_init(quark_uart_module_init);
+
-+/**
-+ * intel_cln_uart_exit
-+ *
-+ * Module exit
-+ */
-+static void __exit intel_cln_uart_exit(void)
++static void __exit quark_uart_module_exit(void)
+{
-+ pci_unregister_driver(&intel_cln_uart_pci_desc);
++ pci_unregister_driver(&quark_uart_pci_driver);
++ uart_unregister_driver(&quark_uart_driver);
+}
++module_exit(quark_uart_module_exit);
+
-+MODULE_AUTHOR("Bryan O'Donoghue <bryan.odonoghue@linux.intel.com>");
-+MODULE_DESCRIPTION("Intel Clanton UART-DMA driver");
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+module_init(intel_cln_uart_init);
-+module_exit(intel_cln_uart_exit);
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("Intel QUARK X1000 UART PCI Driver");
++module_param(default_baud, uint, S_IRUGO);
++MODULE_PARM_DESC(default_baud,
++ "Default BAUD for initial driver state and console (default 115200)");
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index da9fde8..6b20fd6 100644
--- a/drivers/tty/tty_io.c
@@ -23664,10 +26315,10 @@ index bfe6656..cadd9d2 100644
/* GPIO can never have been requested or set as {in,out}put */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
new file mode 100644
-index 0000000..337da33
+index 0000000..56d0495
--- /dev/null
+++ b/include/linux/iio/common/st_sensors.h
-@@ -0,0 +1,290 @@
+@@ -0,0 +1,289 @@
+/*
+ * STMicroelectronics sensors library driver
+ *
@@ -23685,7 +26336,6 @@ index 0000000..337da33
+#include <linux/spi/spi.h>
+#include <linux/irqreturn.h>
+#include <linux/iio/trigger.h>
-+#include <linux/iio/events.h>
+
+#define ST_SENSORS_TX_MAX_LENGTH 2
+#define ST_SENSORS_RX_MAX_LENGTH 6
@@ -23719,7 +26369,6 @@ index 0000000..337da33
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
+ .scan_index = index, \
-+ .channel = mod, \
+ .channel2 = mod, \
+ .address = addr, \
+ .scan_type = { \
@@ -23729,7 +26378,6 @@ index 0000000..337da33
+ .storagebits = 16, \
+ .endianness = endian, \
+ }, \
-+ .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), \
+}
+
+#define ST_SENSOR_DEV_ATTR_SAMP_FREQ() \
@@ -23887,7 +26535,6 @@ index 0000000..337da33
+ struct st_sensor_fullscale_avl *current_fullscale;
+
+ bool enabled;
-+ bool int_thresh;
+ bool multiread_bit;
+
+ char *buffer_data;
@@ -23939,6 +26586,9 @@ index 0000000..337da33
+
+int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale);
+
++int st_sensors_read_axis_data(struct iio_dev *indio_dev,
++ u8 ch_addr, int *data);
++
+int st_sensors_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val);
+
@@ -24107,7 +26757,7 @@ index 0000000..6092fc0
+
+#endif /* __INTEL_CLN_SB_H__ */
diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h
-index 10496bd..fdb8085 100644
+index 10496bd..a723856 100644
--- a/include/linux/intel_mid_dma.h
+++ b/include/linux/intel_mid_dma.h
@@ -26,8 +26,10 @@
@@ -24121,7 +26771,7 @@ index 10496bd..fdb8085 100644
/*DMA mode configurations*/
enum intel_mid_dma_mode {
-@@ -73,4 +75,163 @@ struct intel_mid_dma_slave {
+@@ -73,4 +75,188 @@ struct intel_mid_dma_slave {
struct dma_slave_config dma_slave;
};
@@ -24226,6 +26876,22 @@ index 10496bd..fdb8085 100644
+};
+
+/**
++ * struct intel_mid_dma_probe_info
++ *
++ * @max_chan: maximum channels to probe
++ * @ch_base: offset from register base
++ * @block_size: TBD
++ * @pimr_mask: indicates if mask registers to be mapped
++ */
++struct intel_mid_dma_probe_info {
++ u8 max_chan;
++ u8 ch_base;
++ u16 block_size;
++ u32 pimr_mask;
++};
++
++
++/**
+ * intel_mid_dma_interrupt - DMA ISR
+ * @irq: IRQ where interrupt occurred
+ * @data: ISR cllback data (the controller structure)
@@ -24236,13 +26902,22 @@ index 10496bd..fdb8085 100644
+irqreturn_t intel_mid_dma_interrupt(int irq, void *data);
+
+/**
++ * mid_setup_dma - Setup DMA controller
++ * @pdev: Controller PCI device structure
++ *
++ * Called by remove
++ * Unregister DMa controller, clear all structures and free interrupt
++ */
++int mid_setup_dma(struct pci_dev *pdev, struct middma_device *dma);
++
++/**
+ * middma_shutdown - Shutdown the DMA controller
+ * @pdev: Controller PCI device structure
+ *
+ * Called by remove
+ * Unregister DMa controller, clear all structures and free interrupt
+ */
-+void intel_mid_dma_shutdown(struct pci_dev *pdev, struct middma_device *device);
++void middma_shutdown(struct pci_dev *pdev, struct middma_device *device);
+
+/**
+ * intel_mid_dma_probe - PCI Probe
@@ -24285,6 +26960,88 @@ index 10496bd..fdb8085 100644
+
+
#endif /*__INTEL_MID_DMA_H__*/
+diff --git a/include/linux/mfd/cy8c9540a.h b/include/linux/mfd/cy8c9540a.h
+new file mode 100644
+index 0000000..0fe1d70
+--- /dev/null
++++ b/include/linux/mfd/cy8c9540a.h
+@@ -0,0 +1,38 @@
++/*
++ * Copyright(c) 2013 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Contact Information:
++ * Intel Corporation
++ */
++
++#ifndef LINUX_CY8C9540A_PDATA_H
++#define LINUX_CY8C9540A_PDATA_H
++
++#include <linux/types.h>
++
++#define CY8C9540A_POR_SETTINGS_LEN 147
++#define CY8C9540A_NPWM 8
++#define CY8C9540A_PWM_UNUSED -1
++
++struct cy8c9540a_pdata {
++ u8 por_default[CY8C9540A_POR_SETTINGS_LEN];
++ int pwm2gpio_mapping[CY8C9540A_NPWM];
++ int gpio_base;
++ int pwm_base;
++ int irq_base;
++};
++
++#endif
+diff --git a/include/linux/mfd/intel_cln_gip_pdata.h b/include/linux/mfd/intel_cln_gip_pdata.h
+new file mode 100644
+index 0000000..183f881
+--- /dev/null
++++ b/include/linux/mfd/intel_cln_gip_pdata.h
+@@ -0,0 +1,32 @@
++/*
++ * Copyright(c) 2013 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Contact Information:
++ * Intel Corporation
++ */
++
++#ifndef LINUX_INTEL_CLN_GIP_DATA_H
++#define LINUX_INTEL_CLN_GIP_DATA_H
++
++struct pci_dev;
++
++struct intel_cln_gip_pdata {
++ int i2c_std_mode;
++};
++
++extern struct intel_cln_gip_pdata *(*intel_cln_gip_get_pdata)(void);
++
++#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 0eb6579..060fce2 100644
--- a/include/linux/pci_ids.h
@@ -24329,10 +27086,10 @@ index fbf8adf..721ed6f 100644
#endif /* IIO_ADC_AD7298_H_ */
diff --git a/include/linux/platform_data/clanton.h b/include/linux/platform_data/clanton.h
new file mode 100644
-index 0000000..95b5e6d
+index 0000000..7f2622e
--- /dev/null
+++ b/include/linux/platform_data/clanton.h
-@@ -0,0 +1,58 @@
+@@ -0,0 +1,44 @@
+/*
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
@@ -24366,7 +27123,7 @@ index 0000000..95b5e6d
+ KIPS_BAY = 3,
+ CROSS_HILL = 4,
+ CLANTON_HILL = 5,
-+ IZMIR = 6,
++ GALILEO = 6,
+}cln_plat_id_t;
+
+typedef enum {
@@ -24376,20 +27133,6 @@ index 0000000..95b5e6d
+ PLAT_DATA_MAC1 = 4,
+}plat_dataid_t;
+
-+
-+#ifdef CONFIG_INTEL_QUARK_X1000_SOC
-+extern cln_plat_id_t intel_cln_plat_get_id(void);
-+extern int intel_cln_plat_get_mac(plat_dataid_t id, char * mac);
-+#else
-+static inline cln_plat_id_t intel_cln_plat_get_id(void)
-+{
-+ return CLANTON_PLAT_UNDEFINED;
-+}
-+static int intel_cln_plat_get_mac(plat_dataid_t id, char * mac)
-+{
-+ return -ENODEV;
-+}
-+#endif
+#endif /* _PDATA_CLANTON_H */
diff --git a/include/linux/platform_data/lis331dlh_intel_cln.h b/include/linux/platform_data/lis331dlh_intel_cln.h
new file mode 100644
@@ -24599,7 +27342,7 @@ index f366320..9810c71 100644
+int pxa_msi_enabled(void);
#endif
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
-index 9531845c..445fe6e 100644
+index 9531845..445fe6e 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -148,7 +148,7 @@ extern int rtc_initialize_alarm(struct rtc_device *rtc,
@@ -24714,12 +27457,12 @@ index 25596e4..9b2a1d5 100644
if (!rtc) {
printk(warn_no_rtc);
goto done;
-diff --git a/.meta/cfg/kernel-cache/bsp/clanton/clanton.cfg b/.meta/cfg/kernel-cache/bsp/clanton/clanton.cfg
+diff --git a/meta/cfg/kernel-cache/bsp/clanton/clanton.cfg b/meta/cfg/kernel-cache/bsp/clanton/clanton.cfg
new file mode 100644
-index 0000000..b4c8852
+index 0000000..3ba0d41
--- /dev/null
-+++ b/.meta/cfg/kernel-cache/bsp/clanton/clanton.cfg
-@@ -0,0 +1,2938 @@
++++ b/meta/cfg/kernel-cache/bsp/clanton/clanton.cfg
+@@ -0,0 +1,3044 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/i386 3.8.7 Kernel Configuration
@@ -25002,7 +27745,11 @@ index 0000000..b4c8852
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
-+CONFIG_UNINLINE_SPIN_UNLOCK=y
++CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
++CONFIG_INLINE_READ_UNLOCK=y
++CONFIG_INLINE_READ_UNLOCK_IRQ=y
++CONFIG_INLINE_WRITE_UNLOCK=y
++CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+CONFIG_FREEZER=y
+
+#
@@ -25069,7 +27816,7 @@ index 0000000..b4c8852
+CONFIG_CPU_SUP_UMC_32=y
+CONFIG_HPET_TIMER=y
+CONFIG_HPET_EMULATE_RTC=y
-+# CONFIG_DMI is not set
++CONFIG_DMI=y
+CONFIG_NR_CPUS=1
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
@@ -25121,7 +27868,7 @@ index 0000000..b4c8852
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+CONFIG_PAGEFLAGS_EXTENDED=y
-+CONFIG_SPLIT_PTLOCK_CPUS=999999
++CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_COMPACTION is not set
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=0
@@ -25144,7 +27891,7 @@ index 0000000..b4c8852
+CONFIG_X86_SMAP=y
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
-+CONFIG_EFI_CAPSULE=y
++CONFIG_EFI_CAPSULE=m
+CONFIG_SECCOMP=y
+# CONFIG_CC_STACKPROTECTOR is not set
+CONFIG_HZ_100=y
@@ -25174,6 +27921,7 @@ index 0000000..b4c8852
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+# CONFIG_PM_ADVANCED_DEBUG is not set
++# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_PM_SLEEP_DEBUG=y
+CONFIG_PM_TRACE=y
+CONFIG_PM_TRACE_RTC=y
@@ -25188,7 +27936,7 @@ index 0000000..b4c8852
+CONFIG_ACPI_BUTTON=y
+# CONFIG_ACPI_FAN is not set
+# CONFIG_ACPI_DOCK is not set
-+CONFIG_ACPI_I2C=m
++CONFIG_ACPI_I2C=y
+CONFIG_ACPI_PROCESSOR=y
+# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set
+CONFIG_ACPI_THERMAL=y
@@ -25258,6 +28006,7 @@ index 0000000..b4c8852
+# CONFIG_SCx200 is not set
+# CONFIG_ALIX is not set
+# CONFIG_NET5501 is not set
++# CONFIG_GEOS is not set
+CONFIG_AMD_NB=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
@@ -25951,14 +28700,13 @@ index 0000000..b4c8852
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
-+# CONFIG_SERIAL_CLN_UART is not set
++CONFIG_SERIAL_QUARK_UART=m
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
-+CONFIG_RTC=y
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
@@ -25972,7 +28720,7 @@ index 0000000..b4c8852
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
-+CONFIG_I2C=m
++CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=m
@@ -26113,6 +28861,7 @@ index 0000000..b4c8852
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
++# CONFIG_GPIO_SX150X is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
@@ -26190,22 +28939,50 @@ index 0000000..b4c8852
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
++# CONFIG_MFD_88PM860X is not set
++# CONFIG_MFD_88PM800 is not set
++# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_HTC_PASIC3 is not set
++# CONFIG_HTC_I2CPLD is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65217 is not set
++# CONFIG_MFD_TPS6586X is not set
++# CONFIG_MFD_TPS65910 is not set
++# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
++# CONFIG_MFD_TPS80031 is not set
++# CONFIG_TWL4030_CORE is not set
++# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_STMPE is not set
++# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
++# CONFIG_MFD_SMSC is not set
++# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_SPI is not set
++# CONFIG_MFD_DA9052_I2C is not set
++# CONFIG_MFD_DA9055 is not set
++# CONFIG_PMIC_ADP5520 is not set
++# CONFIG_MFD_LP8788 is not set
++# CONFIG_MFD_MAX77686 is not set
++# CONFIG_MFD_MAX77693 is not set
++# CONFIG_MFD_MAX8907 is not set
++# CONFIG_MFD_MAX8925 is not set
++# CONFIG_MFD_MAX8997 is not set
++# CONFIG_MFD_MAX8998 is not set
++# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_ARIZONA_SPI is not set
++# CONFIG_MFD_WM8400 is not set
++# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
++# CONFIG_MFD_WM8350_I2C is not set
++# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13XXX_SPI is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
@@ -26215,14 +28992,20 @@ index 0000000..b4c8852
+# CONFIG_MFD_TIMBERDALE is not set
+CONFIG_CY8C9540A=m
+CONFIG_INTEL_CLN_GIP=m
++CONFIG_INTEL_CLN_GIP_TEST=m
+CONFIG_LPC_SCH=y
+# CONFIG_LPC_ICH is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_WL1273_CORE is not set
++# CONFIG_MFD_TPS65090 is not set
++# CONFIG_MFD_AAT2870_CORE is not set
++# CONFIG_MFD_RC5T583 is not set
++# CONFIG_MFD_PALMAS is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_RETU is not set
++# CONFIG_MFD_AS3711 is not set
+# CONFIG_REGULATOR is not set
+CONFIG_MEDIA_SUPPORT=m
+
@@ -26414,7 +29197,6 @@ index 0000000..b4c8852
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_HRTIMER is not set
-+# CONFIG_SND_RTCTIMER is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
@@ -26682,7 +29464,7 @@ index 0000000..b4c8852
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
-+# CONFIG_USB_SERIAL_GENERIC is not set
++CONFIG_USB_SERIAL_GENERIC=y
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
@@ -26857,6 +29639,7 @@ index 0000000..b4c8852
+# CONFIG_LEDS_PCA9633 is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
++# CONFIG_LEDS_INTEL_SS4200 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_LEDS_LM355x is not set
@@ -26882,7 +29665,79 @@ index 0000000..b4c8852
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_EDAC is not set
-+# CONFIG_RTC_CLASS is not set
++CONFIG_RTC_LIB=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_HCTOSYS=y
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# I2C RTC drivers
++#
++# CONFIG_RTC_DRV_DS1307 is not set
++# CONFIG_RTC_DRV_DS1374 is not set
++# CONFIG_RTC_DRV_DS1672 is not set
++# CONFIG_RTC_DRV_DS3232 is not set
++# CONFIG_RTC_DRV_MAX6900 is not set
++# CONFIG_RTC_DRV_RS5C372 is not set
++# CONFIG_RTC_DRV_ISL1208 is not set
++# CONFIG_RTC_DRV_ISL12022 is not set
++# CONFIG_RTC_DRV_X1205 is not set
++# CONFIG_RTC_DRV_PCF8523 is not set
++# CONFIG_RTC_DRV_PCF8563 is not set
++# CONFIG_RTC_DRV_PCF8583 is not set
++# CONFIG_RTC_DRV_M41T80 is not set
++# CONFIG_RTC_DRV_BQ32K is not set
++# CONFIG_RTC_DRV_S35390A is not set
++# CONFIG_RTC_DRV_FM3130 is not set
++# CONFIG_RTC_DRV_RX8581 is not set
++# CONFIG_RTC_DRV_RX8025 is not set
++# CONFIG_RTC_DRV_EM3027 is not set
++# CONFIG_RTC_DRV_RV3029C2 is not set
++
++#
++# SPI RTC drivers
++#
++# CONFIG_RTC_DRV_M41T93 is not set
++# CONFIG_RTC_DRV_M41T94 is not set
++# CONFIG_RTC_DRV_DS1305 is not set
++# CONFIG_RTC_DRV_DS1390 is not set
++# CONFIG_RTC_DRV_MAX6902 is not set
++# CONFIG_RTC_DRV_R9701 is not set
++# CONFIG_RTC_DRV_RS5C348 is not set
++# CONFIG_RTC_DRV_DS3234 is not set
++# CONFIG_RTC_DRV_PCF2123 is not set
++
++#
++# Platform RTC drivers
++#
++CONFIG_RTC_DRV_CMOS=y
++# CONFIG_RTC_DRV_DS1286 is not set
++# CONFIG_RTC_DRV_DS1511 is not set
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++# CONFIG_RTC_DRV_STK17TA8 is not set
++# CONFIG_RTC_DRV_M48T86 is not set
++# CONFIG_RTC_DRV_M48T35 is not set
++# CONFIG_RTC_DRV_M48T59 is not set
++# CONFIG_RTC_DRV_MSM6242 is not set
++# CONFIG_RTC_DRV_BQ4802 is not set
++# CONFIG_RTC_DRV_RP5C01 is not set
++# CONFIG_RTC_DRV_V3020 is not set
++# CONFIG_RTC_DRV_DS2404 is not set
++
++#
++# on-CPU RTC drivers
++#
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
@@ -26890,8 +29745,6 @@ index 0000000..b4c8852
+# DMA Devices
+#
+CONFIG_INTEL_MID_DMAC=m
-+CONFIG_INTEL_MID_PCI=m
-+CONFIG_INTEL_CLN_DMAC=m
+# CONFIG_INTEL_IOATDMA is not set
+# CONFIG_TIMB_DMA is not set
+# CONFIG_PCH_DMA is not set
@@ -27052,8 +29905,10 @@ index 0000000..b4c8852
+#
+# Triggers - standalone
+#
++# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
+# CONFIG_IIO_GPIO_TRIGGER is not set
+CONFIG_IIO_SYSFS_TRIGGER=m
++CONFIG_IIO_HRTIMER_TRIGGER=m
+# CONFIG_IIO_SIMPLE_DUMMY is not set
+# CONFIG_ZSMALLOC is not set
+# CONFIG_CRYSTALHD is not set
@@ -27097,9 +29952,6 @@ index 0000000..b4c8852
+# CONFIG_ACPI_CMPC is not set
+CONFIG_INTEL_CLN_ESRAM=y
+CONFIG_INTEL_CLN_ECC_REFRESH_PERIOD=24
-+CONFIG_INTEL_CLN_ECC_SCRUB=y
-+# CONFIG_INTEL_CLN_ECC_SCRUB_OVERRIDE_CONFIG is not set
-+# CONFIG_INTEL_CLN_ECC_SCRUB_S3_CONFIG is not set
+CONFIG_INTEL_CLN_THERMAL=y
+CONFIG_INTEL_CLN_AUDIO_CTRL=m
+# CONFIG_INTEL_IPS is not set
@@ -27157,7 +30009,7 @@ index 0000000..b4c8852
+# CONFIG_AD8366 is not set
+
+#
-+# STMicro sensors
++# Hid Sensor IIO Common
+#
+CONFIG_IIO_ST_SENSORS_I2C=m
+CONFIG_IIO_ST_SENSORS_SPI=m
@@ -27227,6 +30079,8 @@ index 0000000..b4c8852
+CONFIG_EFI_VARS=m
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
++CONFIG_DMIID=y
++CONFIG_DMI_SYSFS=y
+# CONFIG_ISCSI_IBFT_FIND is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+
@@ -27391,19 +30245,14 @@ index 0000000..b4c8852
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
-+CONFIG_DEBUG_SPINLOCK=y
-+CONFIG_DEBUG_MUTEXES=y
-+CONFIG_DEBUG_LOCK_ALLOC=y
-+CONFIG_PROVE_LOCKING=y
-+# CONFIG_PROVE_RCU is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
-+CONFIG_LOCKDEP=y
+# CONFIG_LOCK_STAT is not set
-+# CONFIG_DEBUG_LOCKDEP is not set
-+CONFIG_TRACE_IRQFLAGS=y
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_HIGHMEM is not set
@@ -27658,11 +30507,11 @@ index 0000000..b4c8852
+CONFIG_AVERAGE=y
+# CONFIG_CORDIC is not set
+# CONFIG_DDR is not set
-diff --git a/.meta/cfg/kernel-cache/bsp/clanton/clanton_emutex.cfg b/.meta/cfg/kernel-cache/bsp/clanton/clanton_emutex.cfg
+diff --git a/meta/cfg/kernel-cache/bsp/clanton/clanton_emutex.cfg b/meta/cfg/kernel-cache/bsp/clanton/clanton_emutex.cfg
new file mode 100644
-index 0000000..f032487
+index 0000000..68c375e
--- /dev/null
-+++ b/.meta/cfg/kernel-cache/bsp/clanton/clanton_emutex.cfg
++++ b/meta/cfg/kernel-cache/bsp/clanton/clanton_emutex.cfg
@@ -0,0 +1,2938 @@
+#
+# Automatically generated file; DO NOT EDIT.
@@ -28661,7 +31510,7 @@ index 0000000..f032487
+CONFIG_NET_VENDOR_INTEL=y
+# CONFIG_E100 is not set
+CONFIG_E1000=m
-+CONFIG_E1000E=m
++# CONFIG_E1000E is not set
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
+# CONFIG_IXGB is not set
@@ -29332,8 +32181,7 @@ index 0000000..f032487
+# Graphics support
+#
+# CONFIG_AGP is not set
-+CONFIG_VGA_ARB=y
-+CONFIG_VGA_ARB_MAX_GPUS=16
++# CONFIG_VGA_ARB is not set
+# CONFIG_VGA_SWITCHEROO is not set
+# CONFIG_DRM is not set
+# CONFIG_STUB_POULSBO is not set
@@ -29995,6 +32843,7 @@ index 0000000..f032487
+#
+# CONFIG_IIO_GPIO_TRIGGER is not set
+CONFIG_IIO_SYSFS_TRIGGER=m
++CONFIG_IIO_HRTIMER_TRIGGER=m
+# CONFIG_IIO_SIMPLE_DUMMY is not set
+# CONFIG_ZSMALLOC is not set
+# CONFIG_CRYSTALHD is not set