summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-s3c2410/spi.h1
-rw-r--r--include/asm-generic/gpio.h1
-rw-r--r--include/asm-generic/pgtable-nopmd.h6
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/cpumask.h63
-rw-r--r--include/linux/fs.h44
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmu_notifier.h279
-rw-r--r--include/linux/pagemap.h1
-rw-r--r--include/linux/rculist.h28
-rw-r--r--include/linux/rmap.h8
12 files changed, 379 insertions, 61 deletions
diff --git a/include/asm-arm/arch-s3c2410/spi.h b/include/asm-arm/arch-s3c2410/spi.h
index 352d33860b63..442169887d3b 100644
--- a/include/asm-arm/arch-s3c2410/spi.h
+++ b/include/asm-arm/arch-s3c2410/spi.h
@@ -16,6 +16,7 @@
struct s3c2410_spi_info {
unsigned long pin_cs; /* simple gpio cs */
unsigned int num_cs; /* total chipselects */
+ int bus_num; /* bus number to use. */
void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
};
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index c764a8fcb058..0f99ad38b012 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -2,6 +2,7 @@
#define _ASM_GENERIC_GPIO_H
#include <linux/types.h>
+#include <linux/errno.h>
#ifdef CONFIG_GPIOLIB
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index 087325ede76c..a7cdc48e8b78 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -5,6 +5,8 @@
#include <asm-generic/pgtable-nopud.h>
+struct mm_struct;
+
#define __PAGETABLE_PMD_FOLDED
/*
@@ -54,7 +56,9 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
* inside the pud, so has no extra memory associated with it.
*/
#define pmd_alloc_one(mm, address) NULL
-#define pmd_free(mm, x) do { } while (0)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+}
#define __pmd_free_tlb(tlb, x) do { } while (0)
#undef pmd_addr_end
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 82aa36c53ea7..50cfe8ceb478 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -205,6 +205,8 @@ void block_invalidatepage(struct page *page, unsigned long offset);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
int block_read_full_page(struct page*, get_block_t*);
+int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
+ unsigned long from);
int block_write_begin(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page **, void **, get_block_t*);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 1b5c98e7fef7..96d0509fb8d8 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -62,15 +62,7 @@
* int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
*
* cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
- *ifdef CONFIG_HAS_CPUMASK_OF_CPU
- * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v
- * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu]
- * cpumask_of_cpu_ptr(v, cpu) Combines above two operations
- *else
- * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v
- * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu)
- * cpumask_of_cpu_ptr(v, cpu) Combines above two operations
- *endif
+ * (can be used as an lvalue)
* CPU_MASK_ALL Initializer - all bits set
* CPU_MASK_NONE Initializer - no bits set
* unsigned long *cpus_addr(mask) Array of unsigned long's in mask
@@ -273,37 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
+/*
+ * Special-case data structure for "single bit set only" constant CPU masks.
+ *
+ * We pre-generate all the 64 (or 32) possible bit positions, with enough
+ * padding to the left and the right, and return the constant pointer
+ * appropriately offset.
+ */
+extern const unsigned long
+ cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
+
+static inline const cpumask_t *get_cpu_mask(unsigned int cpu)
+{
+ const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
+ p -= cpu / BITS_PER_LONG;
+ return (const cpumask_t *)p;
+}
+
+/*
+ * In cases where we take the address of the cpumask immediately,
+ * gcc optimizes it out (it's a constant) and there's no huge stack
+ * variable created:
+ */
+#define cpumask_of_cpu(cpu) ({ *get_cpu_mask(cpu); })
-#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
-extern cpumask_t *cpumask_of_cpu_map;
-#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
-#define cpumask_of_cpu_ptr(v, cpu) \
- const cpumask_t *v = &cpumask_of_cpu(cpu)
-#define cpumask_of_cpu_ptr_declare(v) \
- const cpumask_t *v
-#define cpumask_of_cpu_ptr_next(v, cpu) \
- v = &cpumask_of_cpu(cpu)
-#else
-#define cpumask_of_cpu(cpu) \
-({ \
- typeof(_unused_cpumask_arg_) m; \
- if (sizeof(m) == sizeof(unsigned long)) { \
- m.bits[0] = 1UL<<(cpu); \
- } else { \
- cpus_clear(m); \
- cpu_set((cpu), m); \
- } \
- m; \
-})
-#define cpumask_of_cpu_ptr(v, cpu) \
- cpumask_t _##v = cpumask_of_cpu(cpu); \
- const cpumask_t *v = &_##v
-#define cpumask_of_cpu_ptr_declare(v) \
- cpumask_t _##v; \
- const cpumask_t *v = &_##v
-#define cpumask_of_cpu_ptr_next(v, cpu) \
- _##v = cpumask_of_cpu(cpu)
-#endif
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8252b045e624..580b513668fe 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -443,6 +443,27 @@ static inline size_t iov_iter_count(struct iov_iter *i)
return i->count;
}
+/*
+ * "descriptor" for what we're up to with a read.
+ * This allows us to use the same read code yet
+ * have multiple different users of the data that
+ * we read from a file.
+ *
+ * The simplest case just copies the data to user
+ * mode.
+ */
+typedef struct {
+ size_t written;
+ size_t count;
+ union {
+ char __user *buf;
+ void *data;
+ } arg;
+ int error;
+} read_descriptor_t;
+
+typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
+ unsigned long, unsigned long);
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
@@ -484,6 +505,8 @@ struct address_space_operations {
int (*migratepage) (struct address_space *,
struct page *, struct page *);
int (*launder_page) (struct page *);
+ int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
+ unsigned long);
};
/*
@@ -1198,27 +1221,6 @@ struct block_device_operations {
struct module *owner;
};
-/*
- * "descriptor" for what we're up to with a read.
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
- size_t written;
- size_t count;
- union {
- char __user * buf;
- void *data;
- } arg;
- int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long);
-
/* These macros are for out of kernel modules to test that
* the kernel supports the unlocked_ioctl and compat_ioctl
* fields in struct file_operations. */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6e695eaab4ce..866a3dbe5c75 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1104,6 +1104,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, pgoff_t pgoff);
extern void exit_mmap(struct mm_struct *);
+extern int mm_take_all_locks(struct mm_struct *mm);
+extern void mm_drop_all_locks(struct mm_struct *mm);
+
#ifdef CONFIG_PROC_FS
/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
extern void added_exe_file_vma(struct mm_struct *mm);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 746f975b58ef..386edbe2cb4e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -10,6 +10,7 @@
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/completion.h>
+#include <linux/cpumask.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -253,6 +254,9 @@ struct mm_struct {
struct file *exe_file;
unsigned long num_exe_file_vmas;
#endif
+#ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_notifier_mm *mmu_notifier_mm;
+#endif
};
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
new file mode 100644
index 000000000000..b77486d152cd
--- /dev/null
+++ b/include/linux/mmu_notifier.h
@@ -0,0 +1,279 @@
+#ifndef _LINUX_MMU_NOTIFIER_H
+#define _LINUX_MMU_NOTIFIER_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mm_types.h>
+
+struct mmu_notifier;
+struct mmu_notifier_ops;
+
+#ifdef CONFIG_MMU_NOTIFIER
+
+/*
+ * The mmu notifier_mm structure is allocated and installed in
+ * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
+ * critical section and it's released only when mm_count reaches zero
+ * in mmdrop().
+ */
+struct mmu_notifier_mm {
+ /* all mmu notifiers registerd in this mm are queued in this list */
+ struct hlist_head list;
+ /* to serialize the list modifications and hlist_unhashed */
+ spinlock_t lock;
+};
+
+struct mmu_notifier_ops {
+ /*
+ * Called either by mmu_notifier_unregister or when the mm is
+ * being destroyed by exit_mmap, always before all pages are
+ * freed. This can run concurrently with other mmu notifier
+ * methods (the ones invoked outside the mm context) and it
+ * should tear down all secondary mmu mappings and freeze the
+ * secondary mmu. If this method isn't implemented you've to
+ * be sure that nothing could possibly write to the pages
+ * through the secondary mmu by the time the last thread with
+ * tsk->mm == mm exits.
+ *
+ * As side note: the pages freed after ->release returns could
+ * be immediately reallocated by the gart at an alias physical
+ * address with a different cache model, so if ->release isn't
+ * implemented because all _software_ driven memory accesses
+ * through the secondary mmu are terminated by the time the
+ * last thread of this mm quits, you've also to be sure that
+ * speculative _hardware_ operations can't allocate dirty
+ * cachelines in the cpu that could not be snooped and made
+ * coherent with the other read and write operations happening
+ * through the gart alias address, so leading to memory
+ * corruption.
+ */
+ void (*release)(struct mmu_notifier *mn,
+ struct mm_struct *mm);
+
+ /*
+ * clear_flush_young is called after the VM is
+ * test-and-clearing the young/accessed bitflag in the
+ * pte. This way the VM will provide proper aging to the
+ * accesses to the page through the secondary MMUs and not
+ * only to the ones through the Linux pte.
+ */
+ int (*clear_flush_young)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address);
+
+ /*
+ * Before this is invoked any secondary MMU is still ok to
+ * read/write to the page previously pointed to by the Linux
+ * pte because the page hasn't been freed yet and it won't be
+ * freed until this returns. If required set_page_dirty has to
+ * be called internally to this method.
+ */
+ void (*invalidate_page)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address);
+
+ /*
+ * invalidate_range_start() and invalidate_range_end() must be
+ * paired and are called only when the mmap_sem and/or the
+ * locks protecting the reverse maps are held. The subsystem
+ * must guarantee that no additional references are taken to
+ * the pages in the range established between the call to
+ * invalidate_range_start() and the matching call to
+ * invalidate_range_end().
+ *
+ * Invalidation of multiple concurrent ranges may be
+ * optionally permitted by the driver. Either way the
+ * establishment of sptes is forbidden in the range passed to
+ * invalidate_range_begin/end for the whole duration of the
+ * invalidate_range_begin/end critical section.
+ *
+ * invalidate_range_start() is called when all pages in the
+ * range are still mapped and have at least a refcount of one.
+ *
+ * invalidate_range_end() is called when all pages in the
+ * range have been unmapped and the pages have been freed by
+ * the VM.
+ *
+ * The VM will remove the page table entries and potentially
+ * the page between invalidate_range_start() and
+ * invalidate_range_end(). If the page must not be freed
+ * because of pending I/O or other circumstances then the
+ * invalidate_range_start() callback (or the initial mapping
+ * by the driver) must make sure that the refcount is kept
+ * elevated.
+ *
+ * If the driver increases the refcount when the pages are
+ * initially mapped into an address space then either
+ * invalidate_range_start() or invalidate_range_end() may
+ * decrease the refcount. If the refcount is decreased on
+ * invalidate_range_start() then the VM can free pages as page
+ * table entries are removed. If the refcount is only
+ * droppped on invalidate_range_end() then the driver itself
+ * will drop the last refcount but it must take care to flush
+ * any secondary tlb before doing the final free on the
+ * page. Pages will no longer be referenced by the linux
+ * address space but may still be referenced by sptes until
+ * the last refcount is dropped.
+ */
+ void (*invalidate_range_start)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+ void (*invalidate_range_end)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+};
+
+/*
+ * The notifier chains are protected by mmap_sem and/or the reverse map
+ * semaphores. Notifier chains are only changed when all reverse maps and
+ * the mmap_sem locks are taken.
+ *
+ * Therefore notifier chains can only be traversed when either
+ *
+ * 1. mmap_sem is held.
+ * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock).
+ * 3. No other concurrent thread can access the list (release)
+ */
+struct mmu_notifier {
+ struct hlist_node hlist;
+ const struct mmu_notifier_ops *ops;
+};
+
+static inline int mm_has_notifiers(struct mm_struct *mm)
+{
+ return unlikely(mm->mmu_notifier_mm);
+}
+
+extern int mmu_notifier_register(struct mmu_notifier *mn,
+ struct mm_struct *mm);
+extern int __mmu_notifier_register(struct mmu_notifier *mn,
+ struct mm_struct *mm);
+extern void mmu_notifier_unregister(struct mmu_notifier *mn,
+ struct mm_struct *mm);
+extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
+extern void __mmu_notifier_release(struct mm_struct *mm);
+extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ unsigned long address);
+extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+ unsigned long address);
+extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+
+static inline void mmu_notifier_release(struct mm_struct *mm)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_release(mm);
+}
+
+static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ unsigned long address)
+{
+ if (mm_has_notifiers(mm))
+ return __mmu_notifier_clear_flush_young(mm, address);
+ return 0;
+}
+
+static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
+ unsigned long address)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_invalidate_page(mm, address);
+}
+
+static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_invalidate_range_start(mm, start, end);
+}
+
+static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_invalidate_range_end(mm, start, end);
+}
+
+static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+{
+ mm->mmu_notifier_mm = NULL;
+}
+
+static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_mm_destroy(mm);
+}
+
+/*
+ * These two macros will sometime replace ptep_clear_flush.
+ * ptep_clear_flush is impleemnted as macro itself, so this also is
+ * implemented as a macro until ptep_clear_flush will converted to an
+ * inline function, to diminish the risk of compilation failure. The
+ * invalidate_page method over time can be moved outside the PT lock
+ * and these two macros can be later removed.
+ */
+#define ptep_clear_flush_notify(__vma, __address, __ptep) \
+({ \
+ pte_t __pte; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+ __pte = ptep_clear_flush(___vma, ___address, __ptep); \
+ mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
+ __pte; \
+})
+
+#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
+({ \
+ int __young; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+ __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
+ __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
+ ___address); \
+ __young; \
+})
+
+#else /* CONFIG_MMU_NOTIFIER */
+
+static inline void mmu_notifier_release(struct mm_struct *mm)
+{
+}
+
+static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ unsigned long address)
+{
+ return 0;
+}
+
+static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
+ unsigned long address)
+{
+}
+
+static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+{
+}
+
+static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+{
+}
+
+#define ptep_clear_flush_young_notify ptep_clear_flush_young
+#define ptep_clear_flush_notify ptep_clear_flush
+
+#endif /* CONFIG_MMU_NOTIFIER */
+
+#endif /* _LINUX_MMU_NOTIFIER_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a81d81890422..a39b38ccdc97 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -20,6 +20,7 @@
*/
#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
+#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */
static inline void mapping_set_error(struct address_space *mapping, int error)
{
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index b0f39be08b6c..eb4443c7e05b 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -98,6 +98,34 @@ static inline void list_del_rcu(struct list_head *entry)
}
/**
+ * hlist_del_init_rcu - deletes entry from hash list with re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: list_unhashed() on the node return true after this. It is
+ * useful for RCU based read lockfree traversal if the writer side
+ * must know if the list entry is still hashed or already unhashed.
+ *
+ * In particular, it means that we can not poison the forward pointers
+ * that may still be used for walking the hash list and we can only
+ * zero the pprev pointer so list_unhashed() will return true after
+ * this.
+ *
+ * The caller must take whatever precautions are necessary (such as
+ * holding appropriate locks) to avoid racing with another
+ * list-mutation primitive, such as hlist_add_head_rcu() or
+ * hlist_del_rcu(), running on this same list. However, it is
+ * perfectly legal to run concurrently with the _rcu list-traversal
+ * primitives, such as hlist_for_each_entry_rcu().
+ */
+static inline void hlist_del_init_rcu(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ n->pprev = NULL;
+ }
+}
+
+/**
* list_replace_rcu - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 1383692ac5bd..69407f85e10b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -26,6 +26,14 @@
*/
struct anon_vma {
spinlock_t lock; /* Serialize access to vma list */
+ /*
+ * NOTE: the LSB of the head.next is set by
+ * mm_take_all_locks() _after_ taking the above lock. So the
+ * head must only be read/written after taking the above lock
+ * to be sure to see a valid next pointer. The LSB bit itself
+ * is serialized by a system wide lock only visible to
+ * mm_take_all_locks() (mm_all_locks_mutex).
+ */
struct list_head head; /* List of private "related" vmas */
};