aboutsummaryrefslogtreecommitdiffstats
path: root/recipes-kernel/linux/files/0014-Quark-GPIO-2-2-quark.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-kernel/linux/files/0014-Quark-GPIO-2-2-quark.patch')
-rw-r--r--recipes-kernel/linux/files/0014-Quark-GPIO-2-2-quark.patch922
1 files changed, 922 insertions, 0 deletions
diff --git a/recipes-kernel/linux/files/0014-Quark-GPIO-2-2-quark.patch b/recipes-kernel/linux/files/0014-Quark-GPIO-2-2-quark.patch
new file mode 100644
index 0000000..9017515
--- /dev/null
+++ b/recipes-kernel/linux/files/0014-Quark-GPIO-2-2-quark.patch
@@ -0,0 +1,922 @@
+From xxxx Mon Sep 17 00:00:00 2001
+From: Dan O'Donovan <danielx.o'donovan@intel.com>
+Date: Thu, 13 Feb 2014 16:42:31 +0000
+Subject: [PATCH 14/21] Quark GPIO 2/2
+
+---
+ drivers/gpio/gpio-sch.c | 749 ++++++++++++++++++++++++++++++++++++++++-------
+ 1 files changed, 638 insertions(+), 111 deletions(-)
+
+diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
+index edae963..faabd97 100644
+--- a/drivers/gpio/gpio-sch.c
++++ b/drivers/gpio/gpio-sch.c
+@@ -28,6 +28,8 @@
+ #include <linux/pci_ids.h>
+
+ #include <linux/gpio.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
+
+ static DEFINE_SPINLOCK(gpio_lock);
+
+@@ -35,83 +37,168 @@ static DEFINE_SPINLOCK(gpio_lock);
+ #define CGIO (0x04)
+ #define CGLV (0x08)
+
++#define CGTPE (0x0C)
++#define CGTNE (0x10)
++#define CGGPE (0x14)
++#define CGSMI (0x18)
++#define CGTS (0x1C)
++
+ #define RGEN (0x20)
+ #define RGIO (0x24)
+ #define RGLV (0x28)
+
+-static unsigned short gpio_ba;
++#define RGTPE (0x2C)
++#define RGTNE (0x30)
++#define RGGPE (0x34)
++#define RGSMI (0x38)
++#define RGTS (0x3C)
+
+-static int sch_gpio_core_direction_in(struct gpio_chip *gc, unsigned gpio_num)
++#define CGNMIEN (0x40)
++#define RGNMIEN (0x44)
++
++#define RESOURCE_IRQ 9
++
++static unsigned long gpio_ba;
++
++static int irq_num;
++
++struct sch_gpio_core_int_regvals {
++ u8 cgtpe;
++ u8 cgtne;
++ u8 cggpe;
++ u8 cgsmi;
++ u8 cgnmien;
++};
++
++struct sch_gpio_resume_int_regvals {
++ u8 rgtpe;
++ u8 rgtne;
++ u8 rggpe;
++ u8 rgsmi;
++ u8 rgnmien;
++};
++
++struct sch_gpio {
++ int irq_base_core;
++ struct sch_gpio_core_int_regvals initial_core;
++ struct sch_gpio_core_int_regvals lp_core;
++ int irq_base_resume;
++ struct sch_gpio_resume_int_regvals initial_resume;
++ struct sch_gpio_resume_int_regvals lp_resume;
++};
++
++static struct sch_gpio *chip_ptr;
++
++static void qrk_gpio_restrict_release(struct device *dev) {}
++static struct platform_device qrk_gpio_restrict_pdev = {
++ .name = "qrk-gpio-restrict-nc",
++ .dev.release = qrk_gpio_restrict_release,
++};
++
++static void sch_gpio_reg_clear_if_set(unsigned short reg,
++ unsigned short gpio_num)
+ {
+ u8 curr_dirs;
+ unsigned short offset, bit;
+
+- spin_lock(&gpio_lock);
+-
+- offset = CGIO + gpio_num / 8;
++ offset = reg + gpio_num / 8;
+ bit = gpio_num % 8;
+
+ curr_dirs = inb(gpio_ba + offset);
+
+- if (!(curr_dirs & (1 << bit)))
+- outb(curr_dirs | (1 << bit), gpio_ba + offset);
+-
+- spin_unlock(&gpio_lock);
+- return 0;
++ if (curr_dirs & (1 << bit))
++ outb(curr_dirs & ~(1 << bit), gpio_ba + offset);
+ }
+
+-static int sch_gpio_core_get(struct gpio_chip *gc, unsigned gpio_num)
++static void sch_gpio_reg_set_if_clear(unsigned short reg,
++ unsigned short gpio_num)
+ {
+- int res;
++ u8 curr_dirs;
+ unsigned short offset, bit;
+
+- offset = CGLV + gpio_num / 8;
++ offset = reg + gpio_num / 8;
+ bit = gpio_num % 8;
+
+- res = !!(inb(gpio_ba + offset) & (1 << bit));
+- return res;
++ curr_dirs = inb(gpio_ba + offset);
++
++ if (!(curr_dirs & (1 << bit)))
++ outb(curr_dirs | (1 << bit), gpio_ba + offset);
+ }
+
+-static void sch_gpio_core_set(struct gpio_chip *gc, unsigned gpio_num, int val)
++static void sch_gpio_reg_set(unsigned short reg, unsigned short gpio_num,
++ int val)
+ {
+- u8 curr_vals;
++ u8 curr_dirs;
+ unsigned short offset, bit;
+
+- spin_lock(&gpio_lock);
+-
+- offset = CGLV + gpio_num / 8;
++ offset = reg + gpio_num / 8;
+ bit = gpio_num % 8;
+
+- curr_vals = inb(gpio_ba + offset);
++ curr_dirs = inb(gpio_ba + offset);
+
+ if (val)
+- outb(curr_vals | (1 << bit), gpio_ba + offset);
++ outb(curr_dirs | (1 << bit), gpio_ba + offset);
+ else
+- outb((curr_vals & ~(1 << bit)), gpio_ba + offset);
+- spin_unlock(&gpio_lock);
++ outb(curr_dirs & ~(1 << bit), gpio_ba + offset);
+ }
+
+-static int sch_gpio_core_direction_out(struct gpio_chip *gc,
+- unsigned gpio_num, int val)
++static unsigned short sch_gpio_reg_get(unsigned short reg,
++ unsigned short gpio_num)
+ {
+ u8 curr_dirs;
+ unsigned short offset, bit;
+
+- sch_gpio_core_set(gc, gpio_num, val);
++ offset = reg + gpio_num / 8;
++ bit = gpio_num % 8;
+
+- spin_lock(&gpio_lock);
++ curr_dirs = !!(inb(gpio_ba + offset) & (1 << bit));
+
+- offset = CGIO + gpio_num / 8;
+- bit = gpio_num % 8;
++ return curr_dirs;
++}
+
+- curr_dirs = inb(gpio_ba + offset);
+- if (curr_dirs & (1 << bit))
+- outb(curr_dirs & ~(1 << bit), gpio_ba + offset);
++static int sch_gpio_core_direction_in(struct gpio_chip *gc, unsigned gpio_num)
++{
++ unsigned long flags = 0;
++ spin_lock_irqsave(&gpio_lock, flags);
++ sch_gpio_reg_set_if_clear(CGIO, gpio_num);
++ spin_unlock_irqrestore(&gpio_lock, flags);
++
++ return 0;
++}
++
++static int sch_gpio_core_get(struct gpio_chip *gc, unsigned gpio_num)
++{
++ int res;
++ res = sch_gpio_reg_get(CGLV, gpio_num);
++
++ return res;
++}
++
++static void sch_gpio_core_set(struct gpio_chip *gc, unsigned gpio_num, int val)
++{
++ unsigned long flags = 0;
++ spin_lock_irqsave(&gpio_lock, flags);
++ sch_gpio_reg_set(CGLV, gpio_num, val);
++ spin_unlock_irqrestore(&gpio_lock, flags);
++
++}
++
++static int sch_gpio_core_direction_out(struct gpio_chip *gc,
++ unsigned gpio_num, int val)
++{
++ unsigned long flags = 0;
++ spin_lock_irqsave(&gpio_lock, flags);
++ sch_gpio_reg_clear_if_set(CGIO, gpio_num);
++ spin_unlock_irqrestore(&gpio_lock, flags);
+
+- spin_unlock(&gpio_lock);
+ return 0;
+ }
+
++static int sch_gpio_core_to_irq(struct gpio_chip *gc, unsigned offset)
++{
++ return chip_ptr->irq_base_core + offset;
++}
++
+ static struct gpio_chip sch_gpio_core = {
+ .label = "sch_gpio_core",
+ .owner = THIS_MODULE,
+@@ -119,63 +206,198 @@ static struct gpio_chip sch_gpio_core = {
+ .get = sch_gpio_core_get,
+ .direction_output = sch_gpio_core_direction_out,
+ .set = sch_gpio_core_set,
++ .to_irq = sch_gpio_core_to_irq,
+ };
+
+-static int sch_gpio_resume_direction_in(struct gpio_chip *gc,
+- unsigned gpio_num)
++static void sch_gpio_core_irq_enable(struct irq_data *d)
+ {
+- u8 curr_dirs;
++ u32 gpio_num = 0;
+
+- spin_lock(&gpio_lock);
++ gpio_num = d->irq - chip_ptr->irq_base_core;
++ sch_gpio_reg_set_if_clear(CGGPE, gpio_num);
++}
+
+- curr_dirs = inb(gpio_ba + RGIO);
++static void sch_gpio_core_irq_disable(struct irq_data *d)
++{
++ u32 gpio_num = 0;
+
+- if (!(curr_dirs & (1 << gpio_num)))
+- outb(curr_dirs | (1 << gpio_num) , gpio_ba + RGIO);
++ gpio_num = d->irq - chip_ptr->irq_base_core;
++ sch_gpio_reg_clear_if_set(CGGPE, gpio_num);
++}
+
+- spin_unlock(&gpio_lock);
+- return 0;
++static void sch_gpio_core_irq_ack(struct irq_data *d)
++{
++ u32 gpio_num = 0;
++
++ gpio_num = d->irq - chip_ptr->irq_base_core;
++ sch_gpio_reg_set(CGTS, gpio_num, 1);
+ }
+
+-static int sch_gpio_resume_get(struct gpio_chip *gc, unsigned gpio_num)
++static int sch_gpio_core_irq_type(struct irq_data *d, unsigned type)
+ {
+- return !!(inb(gpio_ba + RGLV) & (1 << gpio_num));
++ int ret = 0;
++ unsigned long flags = 0;
++ u32 gpio_num = 0;
++
++ if (NULL == d) {
++ pr_err("%s(): null irq_data\n", __func__);
++ return -EFAULT;
++ }
++
++ gpio_num = d->irq - chip_ptr->irq_base_core;
++
++ spin_lock_irqsave(&gpio_lock, flags);
++
++ switch (type) {
++ case IRQ_TYPE_EDGE_RISING:
++ sch_gpio_reg_clear_if_set(CGTNE, gpio_num);
++ sch_gpio_reg_set_if_clear(CGTPE, gpio_num);
++ break;
++ case IRQ_TYPE_EDGE_FALLING:
++ sch_gpio_reg_clear_if_set(CGTPE, gpio_num);
++ sch_gpio_reg_set_if_clear(CGTNE, gpio_num);
++ break;
++ case IRQ_TYPE_EDGE_BOTH:
++ sch_gpio_reg_set_if_clear(CGTPE, gpio_num);
++ sch_gpio_reg_set_if_clear(CGTNE, gpio_num);
++ break;
++ case IRQ_TYPE_NONE:
++ sch_gpio_reg_clear_if_set(CGTPE, gpio_num);
++ sch_gpio_reg_clear_if_set(CGTNE, gpio_num);
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ spin_unlock_irqrestore(&gpio_lock, flags);
++
++ return ret;
++}
++
++static struct irq_chip sch_irq_core = {
++ .irq_ack = sch_gpio_core_irq_ack,
++ .irq_set_type = sch_gpio_core_irq_type,
++ .irq_enable = sch_gpio_core_irq_enable,
++ .irq_disable = sch_gpio_core_irq_disable,
++};
++
++static void sch_gpio_core_irqs_init(struct sch_gpio *chip, unsigned int num)
++{
++ int i;
++
++ for (i = 0; i < num; i++) {
++ irq_set_chip_data(i + chip->irq_base_core, chip);
++ irq_set_chip_and_handler_name(i + chip->irq_base_core,
++ &sch_irq_core,
++ handle_simple_irq,
++ "sch_gpio_irq_core");
++ }
+ }
+
+-static void sch_gpio_resume_set(struct gpio_chip *gc,
+- unsigned gpio_num, int val)
++static void sch_gpio_core_irqs_deinit(struct sch_gpio *chip, unsigned int num)
+ {
+- u8 curr_vals;
++ int i;
+
+- spin_lock(&gpio_lock);
++ for (i = 0; i < num; i++) {
++ irq_set_chip_data(i + chip->irq_base_core, 0);
++ irq_set_chip_and_handler_name(i + chip->irq_base_core,
++ 0, 0, 0);
++ }
++}
+
+- curr_vals = inb(gpio_ba + RGLV);
++static void sch_gpio_core_irq_disable_all(struct sch_gpio *chip,
++ unsigned int num)
++{
++ unsigned long flags = 0;
++ u32 gpio_num = 0;
++
++ spin_lock_irqsave(&gpio_lock, flags);
++
++ for (gpio_num = 0; gpio_num < num; gpio_num++) {
++ sch_gpio_reg_clear_if_set(CGTPE, gpio_num);
++ sch_gpio_reg_clear_if_set(CGTNE, gpio_num);
++ sch_gpio_reg_clear_if_set(CGGPE, gpio_num);
++ sch_gpio_reg_clear_if_set(CGSMI, gpio_num);
++ sch_gpio_reg_clear_if_set(CGNMIEN, gpio_num);
++ /* clear any pending interrupt */
++ sch_gpio_reg_set(CGTS, gpio_num, 1);
++ }
+
+- if (val)
+- outb(curr_vals | (1 << gpio_num), gpio_ba + RGLV);
+- else
+- outb((curr_vals & ~(1 << gpio_num)), gpio_ba + RGLV);
++ spin_unlock_irqrestore(&gpio_lock, flags);
+
+- spin_unlock(&gpio_lock);
+ }
+
+-static int sch_gpio_resume_direction_out(struct gpio_chip *gc,
+- unsigned gpio_num, int val)
++void sch_gpio_core_save_state(struct sch_gpio_core_int_regvals *regs)
+ {
+- u8 curr_dirs;
++ unsigned long flags = 0;
++ spin_lock_irqsave(&gpio_lock, flags);
++
++ regs->cgtpe = inb(gpio_ba + CGTPE);
++ regs->cgtne = inb(gpio_ba + CGTNE);
++ regs->cggpe = inb(gpio_ba + CGGPE);
++ regs->cgsmi = inb(gpio_ba + CGSMI);
++ regs->cgnmien = inb(gpio_ba + CGNMIEN);
+
+- sch_gpio_resume_set(gc, gpio_num, val);
++ spin_unlock_irqrestore(&gpio_lock, flags);
++}
+
+- spin_lock(&gpio_lock);
++void sch_gpio_core_restore_state(struct sch_gpio_core_int_regvals *regs)
++{
++ unsigned long flags = 0;
++ spin_lock_irqsave(&gpio_lock, flags);
+
+- curr_dirs = inb(gpio_ba + RGIO);
+- if (curr_dirs & (1 << gpio_num))
+- outb(curr_dirs & ~(1 << gpio_num), gpio_ba + RGIO);
++ outb(regs->cgtpe, gpio_ba + CGTPE);
++ outb(regs->cgtne, gpio_ba + CGTNE);
++ outb(regs->cggpe, gpio_ba + CGGPE);
++ outb(regs->cgsmi, gpio_ba + CGSMI);
++ outb(regs->cgnmien, gpio_ba + CGNMIEN);
+
+- spin_unlock(&gpio_lock);
++ spin_unlock_irqrestore(&gpio_lock, flags);
++}
++
++static int sch_gpio_resume_direction_in(struct gpio_chip *gc,
++ unsigned gpio_num)
++{
++ unsigned long flags = 0;
++ spin_lock_irqsave(&gpio_lock, flags);
++ sch_gpio_reg_set_if_clear(RGIO, gpio_num);
++ spin_unlock_irqrestore(&gpio_lock, flags);
+ return 0;
+ }
+
++static int sch_gpio_resume_get(struct gpio_chip *gc, unsigned gpio_num)
++{
++ int res;
++ res = sch_gpio_reg_get(RGLV, gpio_num);
++ return res;
++}
++
++static void sch_gpio_resume_set(struct gpio_chip *gc, unsigned gpio_num,
++ int val)
++{
++ unsigned long flags = 0;
++ spin_lock_irqsave(&gpio_lock, flags);
++ sch_gpio_reg_set(RGLV, gpio_num, val);
++ spin_unlock_irqrestore(&gpio_lock, flags);
++
++}
++
++static int sch_gpio_resume_direction_out(struct gpio_chip *gc,
++ unsigned gpio_num, int val)
++{
++ unsigned long flags = 0;
++ spin_lock_irqsave(&gpio_lock, flags);
++ sch_gpio_reg_clear_if_set(RGIO, gpio_num);
++ spin_unlock_irqrestore(&gpio_lock, flags);
++ return 0;
++}
++
++static int sch_gpio_resume_to_irq(struct gpio_chip *gc, unsigned offset)
++{
++ return chip_ptr->irq_base_resume + offset;
++}
++
+ static struct gpio_chip sch_gpio_resume = {
+ .label = "sch_gpio_resume",
+ .owner = THIS_MODULE,
+@@ -183,13 +405,203 @@ static struct gpio_chip sch_gpio_resume = {
+ .get = sch_gpio_resume_get,
+ .direction_output = sch_gpio_resume_direction_out,
+ .set = sch_gpio_resume_set,
++ .to_irq = sch_gpio_resume_to_irq,
++};
++
++static void sch_gpio_resume_irq_enable(struct irq_data *d)
++{
++ u32 gpio_num = 0;
++
++ gpio_num = d->irq - chip_ptr->irq_base_resume;
++ sch_gpio_reg_set_if_clear(RGGPE, gpio_num);
++}
++
++static void sch_gpio_resume_irq_disable(struct irq_data *d)
++{
++ u32 gpio_num = 0;
++
++ gpio_num = d->irq - chip_ptr->irq_base_resume;
++ sch_gpio_reg_clear_if_set(RGGPE, gpio_num);
++}
++
++static void sch_gpio_resume_irq_ack(struct irq_data *d)
++{
++ u32 gpio_num = 0;
++
++ gpio_num = d->irq - chip_ptr->irq_base_resume;
++ sch_gpio_reg_set(RGTS, gpio_num, 1);
++}
++
++static int sch_gpio_resume_irq_type(struct irq_data *d, unsigned type)
++{
++ int ret = 0;
++ unsigned long flags = 0;
++ u32 gpio_num = 0;
++
++ if (NULL == d) {
++ pr_err("%s(): null irq_data\n", __func__);
++ return -EFAULT;
++ }
++
++ gpio_num = d->irq - chip_ptr->irq_base_resume;
++
++ spin_lock_irqsave(&gpio_lock, flags);
++
++ switch (type) {
++ case IRQ_TYPE_EDGE_RISING:
++ sch_gpio_reg_clear_if_set(RGTNE, gpio_num);
++ sch_gpio_reg_set_if_clear(RGTPE, gpio_num);
++ break;
++ case IRQ_TYPE_EDGE_FALLING:
++ sch_gpio_reg_clear_if_set(RGTPE, gpio_num);
++ sch_gpio_reg_set_if_clear(RGTNE, gpio_num);
++ break;
++ case IRQ_TYPE_EDGE_BOTH:
++ sch_gpio_reg_set_if_clear(RGTPE, gpio_num);
++ sch_gpio_reg_set_if_clear(RGTNE, gpio_num);
++ break;
++ case IRQ_TYPE_NONE:
++ sch_gpio_reg_clear_if_set(RGTPE, gpio_num);
++ sch_gpio_reg_clear_if_set(RGTNE, gpio_num);
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ spin_unlock_irqrestore(&gpio_lock, flags);
++
++ return ret;
++}
++
++static struct irq_chip sch_irq_resume = {
++ .irq_ack = sch_gpio_resume_irq_ack,
++ .irq_set_type = sch_gpio_resume_irq_type,
++ .irq_enable = sch_gpio_resume_irq_enable,
++ .irq_disable = sch_gpio_resume_irq_disable,
+ };
+
++static void sch_gpio_resume_irqs_init(struct sch_gpio *chip, unsigned int num)
++{
++ int i;
++
++ for (i = 0; i < num; i++) {
++ irq_set_chip_data(i + chip->irq_base_resume, chip);
++ irq_set_chip_and_handler_name(i + chip->irq_base_resume,
++ &sch_irq_resume,
++ handle_simple_irq,
++ "sch_gpio_irq_resume");
++ }
++}
++
++static void sch_gpio_resume_irqs_deinit(struct sch_gpio *chip, unsigned int num)
++{
++ int i;
++
++ for (i = 0; i < num; i++) {
++ irq_set_chip_data(i + chip->irq_base_core, 0);
++ irq_set_chip_and_handler_name(i + chip->irq_base_core,
++ 0, 0, 0);
++ }
++}
++
++static void sch_gpio_resume_irq_disable_all(struct sch_gpio *chip,
++ unsigned int num)
++{
++ unsigned long flags = 0;
++ u32 gpio_num = 0;
++
++ spin_lock_irqsave(&gpio_lock, flags);
++
++ for (gpio_num = 0; gpio_num < num; gpio_num++) {
++ sch_gpio_reg_clear_if_set(RGTPE, gpio_num);
++ sch_gpio_reg_clear_if_set(RGTNE, gpio_num);
++ sch_gpio_reg_clear_if_set(RGGPE, gpio_num);
++ sch_gpio_reg_clear_if_set(RGSMI, gpio_num);
++ sch_gpio_reg_clear_if_set(RGNMIEN, gpio_num);
++ /* clear any pending interrupt */
++ sch_gpio_reg_set(RGTS, gpio_num, 1);
++ }
++
++ spin_unlock_irqrestore(&gpio_lock, flags);
++}
++
++void sch_gpio_resume_save_state(struct sch_gpio_resume_int_regvals *regs)
++{
++ unsigned long flags = 0;
++
++ spin_lock_irqsave(&gpio_lock, flags);
++
++ regs->rgtpe = inb(gpio_ba + RGTPE);
++ regs->rgtne = inb(gpio_ba + RGTNE);
++ regs->rggpe = inb(gpio_ba + RGGPE);
++ regs->rgsmi = inb(gpio_ba + RGSMI);
++ regs->rgnmien = inb(gpio_ba + RGNMIEN);
++
++ spin_unlock_irqrestore(&gpio_lock, flags);
++}
++
++void sch_gpio_resume_restore_state(struct sch_gpio_resume_int_regvals *regs)
++{
++ unsigned long flags = 0;
++ spin_lock_irqsave(&gpio_lock, flags);
++
++ outb(regs->rgtpe, gpio_ba + RGTPE);
++ outb(regs->rgtne, gpio_ba + RGTNE);
++ outb(regs->rggpe, gpio_ba + RGGPE);
++ outb(regs->rgsmi, gpio_ba + RGSMI);
++ outb(regs->rgnmien, gpio_ba + RGNMIEN);
++
++ spin_unlock_irqrestore(&gpio_lock, flags);
++}
++
++static irqreturn_t sch_gpio_irq_handler(int irq, void *dev_id)
++{
++ int res;
++ int i, ret = IRQ_NONE;
++
++ for (i = 0; i < sch_gpio_core.ngpio; i++) {
++
++ res = sch_gpio_reg_get(CGTS, i);
++ if (res) {
++ /* clear by setting TS to 1 */
++ sch_gpio_reg_set(CGTS, i, 1);
++
++ generic_handle_irq(chip_ptr->irq_base_core + i);
++ ret = IRQ_HANDLED;
++ }
++ }
++
++ for (i = 0; i < sch_gpio_resume.ngpio; i++) {
++
++ res = sch_gpio_reg_get(RGTS, i);
++ if (res) {
++ /* clear by setting TS to 1 */
++ sch_gpio_reg_set(RGTS, i, 1);
++
++ generic_handle_irq(chip_ptr->irq_base_resume + i);
++ ret = IRQ_HANDLED;
++ }
++ }
++
++ return ret;
++}
++
+ static int sch_gpio_probe(struct platform_device *pdev)
+ {
+ struct resource *res;
++ struct sch_gpio *chip;
+ int err, id;
+
++ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
++ if (chip == NULL)
++ return -ENOMEM;
++
++ chip_ptr = chip;
++
++ sch_gpio_core_save_state(&(chip->initial_core));
++ sch_gpio_resume_save_state(&(chip->initial_resume));
++
+ id = pdev->id;
+ if (!id)
+ return -ENODEV;
+@@ -203,46 +615,56 @@ static int sch_gpio_probe(struct platform_device *pdev)
+
+ gpio_ba = res->start;
+
++ irq_num = RESOURCE_IRQ;
++
+ switch (id) {
+- case PCI_DEVICE_ID_INTEL_SCH_LPC:
+- sch_gpio_core.base = 0;
+- sch_gpio_core.ngpio = 10;
+-
+- sch_gpio_resume.base = 10;
+- sch_gpio_resume.ngpio = 4;
+-
+- /*
+- * GPIO[6:0] enabled by default
+- * GPIO7 is configured by the CMC as SLPIOVR
+- * Enable GPIO[9:8] core powered gpios explicitly
+- */
+- outb(0x3, gpio_ba + CGEN + 1);
+- /*
+- * SUS_GPIO[2:0] enabled by default
+- * Enable SUS_GPIO3 resume powered gpio explicitly
+- */
+- outb(0x8, gpio_ba + RGEN);
+- break;
+-
+- case PCI_DEVICE_ID_INTEL_ITC_LPC:
+- sch_gpio_core.base = 0;
+- sch_gpio_core.ngpio = 5;
+-
+- sch_gpio_resume.base = 5;
+- sch_gpio_resume.ngpio = 9;
+- break;
+-
+- case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
+- sch_gpio_core.base = 0;
+- sch_gpio_core.ngpio = 21;
+-
+- sch_gpio_resume.base = 21;
+- sch_gpio_resume.ngpio = 9;
+- break;
+-
+- default:
+- err = -ENODEV;
+- goto err_sch_gpio_core;
++ case PCI_DEVICE_ID_INTEL_SCH_LPC:
++ sch_gpio_core.base = 0;
++ sch_gpio_core.ngpio = 10;
++
++ sch_gpio_resume.base = 10;
++ sch_gpio_resume.ngpio = 4;
++
++ /*
++ * GPIO[6:0] enabled by default
++ * GPIO7 is configured by the CMC as SLPIOVR
++ * Enable GPIO[9:8] core powered gpios explicitly
++ */
++ outb(0x3, gpio_ba + CGEN + 1);
++ /*
++ * SUS_GPIO[2:0] enabled by default
++ * Enable SUS_GPIO3 resume powered gpio explicitly
++ */
++ outb(0x8, gpio_ba + RGEN);
++ break;
++
++ case PCI_DEVICE_ID_INTEL_ITC_LPC:
++ sch_gpio_core.base = 0;
++ sch_gpio_core.ngpio = 5;
++
++ sch_gpio_resume.base = 5;
++ sch_gpio_resume.ngpio = 9;
++ break;
++
++ case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
++ sch_gpio_core.base = 0;
++ sch_gpio_core.ngpio = 21;
++
++ sch_gpio_resume.base = 21;
++ sch_gpio_resume.ngpio = 9;
++ break;
++
++ case PCI_DEVICE_ID_INTEL_QUARK_ILB:
++ sch_gpio_core.base = 0;
++ sch_gpio_core.ngpio = 2;
++
++ sch_gpio_resume.base = 2;
++ sch_gpio_resume.ngpio = 6;
++ break;
++
++ default:
++ err = -ENODEV;
++ goto err_sch_gpio_core;
+ }
+
+ sch_gpio_core.dev = &pdev->dev;
+@@ -256,44 +678,147 @@ static int sch_gpio_probe(struct platform_device *pdev)
+ if (err < 0)
+ goto err_sch_gpio_resume;
+
++ chip->irq_base_core = irq_alloc_descs(-1, 0,
++ sch_gpio_core.ngpio,
++ NUMA_NO_NODE);
++ if (chip->irq_base_core < 0) {
++ dev_err(&pdev->dev, "failure adding GPIO core IRQ descs\n");
++ chip->irq_base_core = -1;
++ goto err_sch_intr_core;
++ }
++
++ chip->irq_base_resume = irq_alloc_descs(-1, 0,
++ sch_gpio_resume.ngpio,
++ NUMA_NO_NODE);
++ if (chip->irq_base_resume < 0) {
++ dev_err(&pdev->dev, "failure adding GPIO resume IRQ descs\n");
++ chip->irq_base_resume = -1;
++ goto err_sch_intr_resume;
++ }
++
++ platform_set_drvdata(pdev, chip);
++
++ err = platform_device_register(&qrk_gpio_restrict_pdev);
++ if (err < 0)
++ goto err_sch_gpio_device_register;
++
++ /* disable interrupts */
++ sch_gpio_core_irq_disable_all(chip, sch_gpio_core.ngpio);
++ sch_gpio_resume_irq_disable_all(chip, sch_gpio_resume.ngpio);
++
++
++ err = request_irq(irq_num, sch_gpio_irq_handler,
++ IRQF_SHARED, KBUILD_MODNAME, chip);
++ if (err != 0) {
++ dev_err(&pdev->dev,
++ "%s request_irq failed\n", __func__);
++ goto err_sch_request_irq;
++ }
++
++ sch_gpio_core_irqs_init(chip, sch_gpio_core.ngpio);
++ sch_gpio_resume_irqs_init(chip, sch_gpio_resume.ngpio);
++
+ return 0;
+
++err_sch_request_irq:
++ platform_device_unregister(&qrk_gpio_restrict_pdev);
++
++err_sch_gpio_device_register:
++ irq_free_descs(chip->irq_base_resume, sch_gpio_resume.ngpio);
++
++err_sch_intr_resume:
++ irq_free_descs(chip->irq_base_core, sch_gpio_core.ngpio);
++
++err_sch_intr_core:
++ err = gpiochip_remove(&sch_gpio_resume);
++ if (err)
++ dev_err(&pdev->dev, "%s failed, %d\n",
++ "resume gpiochip_remove()", err);
++
+ err_sch_gpio_resume:
+ err = gpiochip_remove(&sch_gpio_core);
+ if (err)
+ dev_err(&pdev->dev, "%s failed, %d\n",
+- "gpiochip_remove()", err);
++ "core gpiochip_remove()", err);
+
+ err_sch_gpio_core:
+ release_region(res->start, resource_size(res));
+ gpio_ba = 0;
+
++ sch_gpio_resume_restore_state(&(chip->initial_resume));
++ sch_gpio_core_restore_state(&(chip->initial_core));
++
++ kfree(chip);
++ chip_ptr = 0;
++
+ return err;
+ }
+
+ static int sch_gpio_remove(struct platform_device *pdev)
+ {
++ int err = 0;
+ struct resource *res;
++
++ struct sch_gpio *chip = platform_get_drvdata(pdev);
++
+ if (gpio_ba) {
+- int err;
+
+- err = gpiochip_remove(&sch_gpio_core);
++ sch_gpio_resume_irqs_deinit(chip, sch_gpio_resume.ngpio);
++ sch_gpio_core_irqs_deinit(chip, sch_gpio_core.ngpio);
++
++ if (irq_num > 0)
++ free_irq(irq_num, chip);
++
++ platform_device_unregister(&qrk_gpio_restrict_pdev);
++
++ irq_free_descs(chip->irq_base_resume,
++ sch_gpio_resume.ngpio);
++
++ irq_free_descs(chip->irq_base_core, sch_gpio_core.ngpio);
++
++ err = gpiochip_remove(&sch_gpio_resume);
+ if (err)
+ dev_err(&pdev->dev, "%s failed, %d\n",
+- "gpiochip_remove()", err);
+- err = gpiochip_remove(&sch_gpio_resume);
++ "resume gpiochip_remove()", err);
++
++ err = gpiochip_remove(&sch_gpio_core);
+ if (err)
+ dev_err(&pdev->dev, "%s failed, %d\n",
+- "gpiochip_remove()", err);
++ "core gpiochip_remove()", err);
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+
+ release_region(res->start, resource_size(res));
+ gpio_ba = 0;
+-
+- return err;
+ }
+
++ sch_gpio_resume_restore_state(&(chip->initial_resume));
++ sch_gpio_core_restore_state(&(chip->initial_core));
++
++ kfree(chip);
++
++ chip_ptr = 0;
++
++ return err;
++}
++
++static int sch_gpio_suspend_sys(struct platform_device *pdev,
++ pm_message_t state)
++{
++ sch_gpio_core_save_state(&(chip_ptr->lp_core));
++ sch_gpio_resume_save_state(&(chip_ptr->lp_resume));
++
++ sch_gpio_resume_restore_state(&(chip_ptr->initial_resume));
++ sch_gpio_core_restore_state(&(chip_ptr->initial_core));
++
++ return 0;
++}
++
++static int sch_gpio_resume_sys(struct platform_device *pdev)
++{
++ sch_gpio_resume_restore_state(&(chip_ptr->lp_resume));
++ sch_gpio_core_restore_state(&(chip_ptr->lp_core));
++
+ return 0;
+ }
+
+@@ -304,6 +829,8 @@ static struct platform_driver sch_gpio_driver = {
+ },
+ .probe = sch_gpio_probe,
+ .remove = sch_gpio_remove,
++ .suspend = sch_gpio_suspend_sys,
++ .resume = sch_gpio_resume_sys,
+ };
+
+ module_platform_driver(sch_gpio_driver);
+--
+1.7.4.1
+