aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amdfalconx86/recipes-kernel/linux/files/0007-yocto-amd-backport-kernel-dependencies-for-amdgpu-driver.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amdfalconx86/recipes-kernel/linux/files/0007-yocto-amd-backport-kernel-dependencies-for-amdgpu-driver.patch')
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0007-yocto-amd-backport-kernel-dependencies-for-amdgpu-driver.patch35302
1 files changed, 35302 insertions, 0 deletions
diff --git a/meta-amdfalconx86/recipes-kernel/linux/files/0007-yocto-amd-backport-kernel-dependencies-for-amdgpu-driver.patch b/meta-amdfalconx86/recipes-kernel/linux/files/0007-yocto-amd-backport-kernel-dependencies-for-amdgpu-driver.patch
new file mode 100644
index 00000000..d2bcdaca
--- /dev/null
+++ b/meta-amdfalconx86/recipes-kernel/linux/files/0007-yocto-amd-backport-kernel-dependencies-for-amdgpu-driver.patch
@@ -0,0 +1,35302 @@
+Backport of kernel dependencies for amdgpu driver to yocto
+kernel v3.14.24. The amdgpu driver will need these dependencies
+to be resolved before it can be compiled without any issues.
+
+Signed-off-by: Sanjay Mehta <sanju.mehta@amd.com>
+diff -Naur a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+--- a/arch/x86/include/asm/cpufeature.h 2015-03-26 14:43:28.362436395 +0530
++++ b/arch/x86/include/asm/cpufeature.h 2015-03-26 14:42:37.686435401 +0530
+@@ -37,7 +37,7 @@
+ #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
+ #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
+ #define X86_FEATURE_PN (0*32+18) /* Processor serial number */
+-#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */
++#define X86_FEATURE_CLFLUSH (0*32+19) /* "clflush" CLFLUSH instruction */
+ #define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */
+ #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
+ #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
+@@ -224,6 +224,8 @@
+ #define X86_FEATURE_AVX512PF (9*32+26) /* AVX-512 Prefetch */
+ #define X86_FEATURE_AVX512ER (9*32+27) /* AVX-512 Exponential and Reciprocal */
+ #define X86_FEATURE_AVX512CD (9*32+28) /* AVX-512 Conflict Detection */
++#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
++
+
+ /*
+ * BUG word(s)
+@@ -317,7 +319,7 @@
+ #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
+ #define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
+ #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
+-#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
++#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
+ #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
+ #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
+ #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
+diff -Naur a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
+--- a/arch/x86/include/asm/disabled-features.h 1970-01-01 05:30:00.000000000 +0530
++++ b/arch/x86/include/asm/disabled-features.h 2015-03-26 14:42:37.686435401 +0530
+@@ -0,0 +1,45 @@
++#ifndef _ASM_X86_DISABLED_FEATURES_H
++#define _ASM_X86_DISABLED_FEATURES_H
++
++/* These features, although they might be available in a CPU
++ * will not be used because the compile options to support
++ * them are not present.
++ *
++ * This code allows them to be checked and disabled at
++ * compile time without an explicit #ifdef. Use
++ * cpu_feature_enabled().
++ */
++
++#ifdef CONFIG_X86_INTEL_MPX
++# define DISABLE_MPX 0
++#else
++# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
++#endif
++
++#ifdef CONFIG_X86_64
++# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
++# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
++# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
++# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
++#else
++# define DISABLE_VME 0
++# define DISABLE_K6_MTRR 0
++# define DISABLE_CYRIX_ARR 0
++# define DISABLE_CENTAUR_MCR 0
++#endif /* CONFIG_X86_64 */
++
++/*
++ * Make sure to add features to the correct mask
++ */
++#define DISABLED_MASK0 (DISABLE_VME)
++#define DISABLED_MASK1 0
++#define DISABLED_MASK2 0
++#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
++#define DISABLED_MASK4 0
++#define DISABLED_MASK5 0
++#define DISABLED_MASK6 0
++#define DISABLED_MASK7 0
++#define DISABLED_MASK8 0
++#define DISABLED_MASK9 (DISABLE_MPX)
++
++#endif /* _ASM_X86_DISABLED_FEATURES_H */
+diff -Naur a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
+--- a/arch/x86/include/asm/special_insns.h 2015-03-26 14:43:28.362436395 +0530
++++ b/arch/x86/include/asm/special_insns.h 2015-03-26 14:42:37.686435401 +0530
+@@ -191,6 +191,14 @@
+ asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
+ }
+
++static inline void clflushopt(volatile void *__p)
++{
++ alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
++ ".byte 0x66; clflush %P0",
++ X86_FEATURE_CLFLUSHOPT,
++ "+m" (*(volatile char __force *)__p));
++}
++
+ #define nop() asm volatile ("nop")
+
+
+diff -Naur a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+--- a/arch/x86/kernel/cpu/common.c 2015-03-26 14:43:28.394436396 +0530
++++ b/arch/x86/kernel/cpu/common.c 2015-03-26 14:42:37.686435401 +0530
+@@ -1025,7 +1025,7 @@
+
+ static __init int setup_noclflush(char *arg)
+ {
+- setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
++ setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
+ return 1;
+ }
+ __setup("noclflush", setup_noclflush);
+diff -Naur a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+--- a/arch/x86/kernel/smpboot.c 2015-03-26 14:43:28.398436396 +0530
++++ b/arch/x86/kernel/smpboot.c 2015-03-26 14:42:37.686435401 +0530
+@@ -1389,7 +1389,7 @@
+
+ if (!this_cpu_has(X86_FEATURE_MWAIT))
+ return;
+- if (!this_cpu_has(X86_FEATURE_CLFLSH))
++ if (!this_cpu_has(X86_FEATURE_CLFLUSH))
+ return;
+ if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
+ return;
+diff -Naur a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+--- a/arch/x86/kvm/cpuid.c 2015-03-26 14:43:28.430436396 +0530
++++ b/arch/x86/kvm/cpuid.c 2015-03-26 14:42:37.690435401 +0530
+@@ -263,7 +263,7 @@
+ F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+ F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
+ F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+- F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
++ F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
+ 0 /* Reserved, DS, ACPI */ | F(MMX) |
+ F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
+ 0 /* HTT, TM, Reserved, PBE */;
+diff -Naur a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
+--- a/Documentation/atomic_ops.txt 2015-03-26 14:43:29.098436410 +0530
++++ b/Documentation/atomic_ops.txt 2015-03-26 14:42:37.690435401 +0530
+@@ -7,12 +7,13 @@
+ maintainers on how to implement atomic counter, bitops, and spinlock
+ interfaces properly.
+
+- The atomic_t type should be defined as a signed integer.
+-Also, it should be made opaque such that any kind of cast to a normal
+-C integer type will fail. Something like the following should
+-suffice:
++ The atomic_t type should be defined as a signed integer and
++the atomic_long_t type as a signed long integer. Also, they should
++be made opaque such that any kind of cast to a normal C integer type
++will fail. Something like the following should suffice:
+
+ typedef struct { int counter; } atomic_t;
++ typedef struct { long counter; } atomic_long_t;
+
+ Historically, counter has been declared volatile. This is now discouraged.
+ See Documentation/volatile-considered-harmful.txt for the complete rationale.
+@@ -37,6 +38,9 @@
+ proper implicit or explicit read memory barrier is needed before reading the
+ value with atomic_read from another thread.
+
++As with all of the atomic_ interfaces, replace the leading "atomic_"
++with "atomic_long_" to operate on atomic_long_t.
++
+ The second interface can be used at runtime, as in:
+
+ struct foo { atomic_t counter; };
+@@ -285,15 +289,13 @@
+ operation which does not return a value, a set of interfaces are
+ defined which accomplish this:
+
+- void smp_mb__before_atomic_dec(void);
+- void smp_mb__after_atomic_dec(void);
+- void smp_mb__before_atomic_inc(void);
+- void smp_mb__after_atomic_inc(void);
++ void smp_mb__before_atomic(void);
++ void smp_mb__after_atomic(void);
+
+-For example, smp_mb__before_atomic_dec() can be used like so:
++For example, smp_mb__before_atomic() can be used like so:
+
+ obj->dead = 1;
+- smp_mb__before_atomic_dec();
++ smp_mb__before_atomic();
+ atomic_dec(&obj->ref_count);
+
+ It makes sure that all memory operations preceding the atomic_dec()
+@@ -302,15 +304,10 @@
+ "1" to obj->dead will be globally visible to other cpus before the
+ atomic counter decrement.
+
+-Without the explicit smp_mb__before_atomic_dec() call, the
++Without the explicit smp_mb__before_atomic() call, the
+ implementation could legally allow the atomic counter update visible
+ to other cpus before the "obj->dead = 1;" assignment.
+
+-The other three interfaces listed are used to provide explicit
+-ordering with respect to memory operations after an atomic_dec() call
+-(smp_mb__after_atomic_dec()) and around atomic_inc() calls
+-(smp_mb__{before,after}_atomic_inc()).
+-
+ A missing memory barrier in the cases where they are required by the
+ atomic_t implementation above can have disastrous results. Here is
+ an example, which follows a pattern occurring frequently in the Linux
+@@ -487,12 +484,12 @@
+ Which returns a boolean indicating if bit "nr" is set in the bitmask
+ pointed to by "addr".
+
+-If explicit memory barriers are required around clear_bit() (which
+-does not return a value, and thus does not need to provide memory
+-barrier semantics), two interfaces are provided:
++If explicit memory barriers are required around {set,clear}_bit() (which do
++not return a value, and thus does not need to provide memory barrier
++semantics), two interfaces are provided:
+
+- void smp_mb__before_clear_bit(void);
+- void smp_mb__after_clear_bit(void);
++ void smp_mb__before_atomic(void);
++ void smp_mb__after_atomic(void);
+
+ They are used as follows, and are akin to their atomic_t operation
+ brothers:
+@@ -500,13 +497,13 @@
+ /* All memory operations before this call will
+ * be globally visible before the clear_bit().
+ */
+- smp_mb__before_clear_bit();
++ smp_mb__before_atomic();
+ clear_bit( ... );
+
+ /* The clear_bit() will be visible before all
+ * subsequent memory operations.
+ */
+- smp_mb__after_clear_bit();
++ smp_mb__after_atomic();
+
+ There are two special bitops with lock barrier semantics (acquire/release,
+ same as spinlocks). These operate in the same way as their non-_lock/unlock
+diff -Naur a/drivers/acpi/video.c b/drivers/acpi/video.c
+--- a/drivers/acpi/video.c 2015-03-26 14:43:30.918436445 +0530
++++ b/drivers/acpi/video.c 2015-03-26 14:42:37.690435401 +0530
+@@ -240,13 +240,14 @@
+ return use_native_backlight_dmi;
+ }
+
+-static bool acpi_video_verify_backlight_support(void)
++bool acpi_video_verify_backlight_support(void)
+ {
+ if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
+ backlight_device_registered(BACKLIGHT_RAW))
+ return false;
+ return acpi_video_backlight_support();
+ }
++EXPORT_SYMBOL_GPL(acpi_video_verify_backlight_support);
+
+ /* backlight device sysfs support */
+ static int acpi_video_get_brightness(struct backlight_device *bd)
+diff -Naur a/drivers/base/fence.c b/drivers/base/fence.c
+--- a/drivers/base/fence.c 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/base/fence.c 2015-03-26 14:42:37.690435401 +0530
+@@ -0,0 +1,431 @@
++/*
++ * Fence mechanism for dma-buf and to allow for asynchronous dma access
++ *
++ * Copyright (C) 2012 Canonical Ltd
++ * Copyright (C) 2012 Texas Instruments
++ *
++ * Authors:
++ * Rob Clark <robdclark@gmail.com>
++ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ */
++
++#include <linux/slab.h>
++#include <linux/export.h>
++#include <linux/atomic.h>
++#include <linux/fence.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/fence.h>
++
++EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
++EXPORT_TRACEPOINT_SYMBOL(fence_emit);
++
++/*
++ * fence context counter: each execution context should have its own
++ * fence context, this allows checking if fences belong to the same
++ * context or not. One device can have multiple separate contexts,
++ * and they're used if some engine can run independently of another.
++ */
++static atomic_t fence_context_counter = ATOMIC_INIT(0);
++
++/**
++ * fence_context_alloc - allocate an array of fence contexts
++ * @num: [in] amount of contexts to allocate
++ *
++ * This function will return the first index of the number of fences allocated.
++ * The fence context is used for setting fence->context to a unique number.
++ */
++unsigned fence_context_alloc(unsigned num)
++{
++ BUG_ON(!num);
++ return atomic_add_return(num, &fence_context_counter) - num;
++}
++EXPORT_SYMBOL(fence_context_alloc);
++
++/**
++ * fence_signal_locked - signal completion of a fence
++ * @fence: the fence to signal
++ *
++ * Signal completion for software callbacks on a fence, this will unblock
++ * fence_wait() calls and run all the callbacks added with
++ * fence_add_callback(). Can be called multiple times, but since a fence
++ * can only go from unsignaled to signaled state, it will only be effective
++ * the first time.
++ *
++ * Unlike fence_signal, this function must be called with fence->lock held.
++ */
++int fence_signal_locked(struct fence *fence)
++{
++ struct fence_cb *cur, *tmp;
++ int ret = 0;
++
++ if (WARN_ON(!fence))
++ return -EINVAL;
++
++ if (!ktime_to_ns(fence->timestamp)) {
++ fence->timestamp = ktime_get();
++ smp_mb__before_atomic_inc();
++ }
++
++ if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
++ ret = -EINVAL;
++
++ /*
++ * we might have raced with the unlocked fence_signal,
++ * still run through all callbacks
++ */
++ } else
++ trace_fence_signaled(fence);
++
++ list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
++ list_del_init(&cur->node);
++ cur->func(fence, cur);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(fence_signal_locked);
++
++/**
++ * fence_signal - signal completion of a fence
++ * @fence: the fence to signal
++ *
++ * Signal completion for software callbacks on a fence, this will unblock
++ * fence_wait() calls and run all the callbacks added with
++ * fence_add_callback(). Can be called multiple times, but since a fence
++ * can only go from unsignaled to signaled state, it will only be effective
++ * the first time.
++ */
++int fence_signal(struct fence *fence)
++{
++ unsigned long flags;
++
++ if (!fence)
++ return -EINVAL;
++
++ if (!ktime_to_ns(fence->timestamp)) {
++ fence->timestamp = ktime_get();
++ smp_mb__before_atomic_inc();
++ }
++
++ if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++ return -EINVAL;
++
++ trace_fence_signaled(fence);
++
++ if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
++ struct fence_cb *cur, *tmp;
++
++ spin_lock_irqsave(fence->lock, flags);
++ list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
++ list_del_init(&cur->node);
++ cur->func(fence, cur);
++ }
++ spin_unlock_irqrestore(fence->lock, flags);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(fence_signal);
++
++/**
++ * fence_wait_timeout - sleep until the fence gets signaled
++ * or until timeout elapses
++ * @fence: [in] the fence to wait on
++ * @intr: [in] if true, do an interruptible wait
++ * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
++ *
++ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
++ * remaining timeout in jiffies on success. Other error values may be
++ * returned on custom implementations.
++ *
++ * Performs a synchronous wait on this fence. It is assumed the caller
++ * directly or indirectly (buf-mgr between reservation and committing)
++ * holds a reference to the fence, otherwise the fence might be
++ * freed before return, resulting in undefined behavior.
++ */
++signed long
++fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
++{
++ signed long ret;
++
++ if (WARN_ON(timeout < 0))
++ return -EINVAL;
++
++ trace_fence_wait_start(fence);
++ ret = fence->ops->wait(fence, intr, timeout);
++ trace_fence_wait_end(fence);
++ return ret;
++}
++EXPORT_SYMBOL(fence_wait_timeout);
++
++void fence_release(struct kref *kref)
++{
++ struct fence *fence =
++ container_of(kref, struct fence, refcount);
++
++ trace_fence_destroy(fence);
++
++ BUG_ON(!list_empty(&fence->cb_list));
++
++ if (fence->ops->release)
++ fence->ops->release(fence);
++ else
++ fence_free(fence);
++}
++EXPORT_SYMBOL(fence_release);
++
++void fence_free(struct fence *fence)
++{
++ kfree_rcu(fence, rcu);
++}
++EXPORT_SYMBOL(fence_free);
++
++/**
++ * fence_enable_sw_signaling - enable signaling on fence
++ * @fence: [in] the fence to enable
++ *
++ * this will request for sw signaling to be enabled, to make the fence
++ * complete as soon as possible
++ */
++void fence_enable_sw_signaling(struct fence *fence)
++{
++ unsigned long flags;
++
++ if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
++ !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
++ trace_fence_enable_signal(fence);
++
++ spin_lock_irqsave(fence->lock, flags);
++
++ if (!fence->ops->enable_signaling(fence))
++ fence_signal_locked(fence);
++
++ spin_unlock_irqrestore(fence->lock, flags);
++ }
++}
++EXPORT_SYMBOL(fence_enable_sw_signaling);
++
++/**
++ * fence_add_callback - add a callback to be called when the fence
++ * is signaled
++ * @fence: [in] the fence to wait on
++ * @cb: [in] the callback to register
++ * @func: [in] the function to call
++ *
++ * cb will be initialized by fence_add_callback, no initialization
++ * by the caller is required. Any number of callbacks can be registered
++ * to a fence, but a callback can only be registered to one fence at a time.
++ *
++ * Note that the callback can be called from an atomic context. If
++ * fence is already signaled, this function will return -ENOENT (and
++ * *not* call the callback)
++ *
++ * Add a software callback to the fence. Same restrictions apply to
++ * refcount as it does to fence_wait, however the caller doesn't need to
++ * keep a refcount to fence afterwards: when software access is enabled,
++ * the creator of the fence is required to keep the fence alive until
++ * after it signals with fence_signal. The callback itself can be called
++ * from irq context.
++ *
++ */
++int fence_add_callback(struct fence *fence, struct fence_cb *cb,
++ fence_func_t func)
++{
++ unsigned long flags;
++ int ret = 0;
++ bool was_set;
++
++ if (WARN_ON(!fence || !func))
++ return -EINVAL;
++
++ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
++ INIT_LIST_HEAD(&cb->node);
++ return -ENOENT;
++ }
++
++ spin_lock_irqsave(fence->lock, flags);
++
++ was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
++
++ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++ ret = -ENOENT;
++ else if (!was_set) {
++ trace_fence_enable_signal(fence);
++
++ if (!fence->ops->enable_signaling(fence)) {
++ fence_signal_locked(fence);
++ ret = -ENOENT;
++ }
++ }
++
++ if (!ret) {
++ cb->func = func;
++ list_add_tail(&cb->node, &fence->cb_list);
++ } else
++ INIT_LIST_HEAD(&cb->node);
++ spin_unlock_irqrestore(fence->lock, flags);
++
++ return ret;
++}
++EXPORT_SYMBOL(fence_add_callback);
++
++/**
++ * fence_remove_callback - remove a callback from the signaling list
++ * @fence: [in] the fence to wait on
++ * @cb: [in] the callback to remove
++ *
++ * Remove a previously queued callback from the fence. This function returns
++ * true if the callback is successfully removed, or false if the fence has
++ * already been signaled.
++ *
++ * *WARNING*:
++ * Cancelling a callback should only be done if you really know what you're
++ * doing, since deadlocks and race conditions could occur all too easily. For
++ * this reason, it should only ever be done on hardware lockup recovery,
++ * with a reference held to the fence.
++ */
++bool
++fence_remove_callback(struct fence *fence, struct fence_cb *cb)
++{
++ unsigned long flags;
++ bool ret;
++
++ spin_lock_irqsave(fence->lock, flags);
++
++ ret = !list_empty(&cb->node);
++ if (ret)
++ list_del_init(&cb->node);
++
++ spin_unlock_irqrestore(fence->lock, flags);
++
++ return ret;
++}
++EXPORT_SYMBOL(fence_remove_callback);
++
++struct default_wait_cb {
++ struct fence_cb base;
++ struct task_struct *task;
++};
++
++static void
++fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
++{
++ struct default_wait_cb *wait =
++ container_of(cb, struct default_wait_cb, base);
++
++ wake_up_state(wait->task, TASK_NORMAL);
++}
++
++/**
++ * fence_default_wait - default sleep until the fence gets signaled
++ * or until timeout elapses
++ * @fence: [in] the fence to wait on
++ * @intr: [in] if true, do an interruptible wait
++ * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
++ *
++ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
++ * remaining timeout in jiffies on success.
++ */
++signed long
++fence_default_wait(struct fence *fence, bool intr, signed long timeout)
++{
++ struct default_wait_cb cb;
++ unsigned long flags;
++ signed long ret = timeout;
++ bool was_set;
++
++ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++ return timeout;
++
++ spin_lock_irqsave(fence->lock, flags);
++
++ if (intr && signal_pending(current)) {
++ ret = -ERESTARTSYS;
++ goto out;
++ }
++
++ was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
++
++ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++ goto out;
++
++ if (!was_set) {
++ trace_fence_enable_signal(fence);
++
++ if (!fence->ops->enable_signaling(fence)) {
++ fence_signal_locked(fence);
++ goto out;
++ }
++ }
++
++ cb.base.func = fence_default_wait_cb;
++ cb.task = current;
++ list_add(&cb.base.node, &fence->cb_list);
++
++ while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
++ if (intr)
++ __set_current_state(TASK_INTERRUPTIBLE);
++ else
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ spin_unlock_irqrestore(fence->lock, flags);
++
++ ret = schedule_timeout(ret);
++
++ spin_lock_irqsave(fence->lock, flags);
++ if (ret > 0 && intr && signal_pending(current))
++ ret = -ERESTARTSYS;
++ }
++
++ if (!list_empty(&cb.base.node))
++ list_del(&cb.base.node);
++ __set_current_state(TASK_RUNNING);
++
++out:
++ spin_unlock_irqrestore(fence->lock, flags);
++ return ret;
++}
++EXPORT_SYMBOL(fence_default_wait);
++
++/**
++ * fence_init - Initialize a custom fence.
++ * @fence: [in] the fence to initialize
++ * @ops: [in] the fence_ops for operations on this fence
++ * @lock: [in] the irqsafe spinlock to use for locking this fence
++ * @context: [in] the execution context this fence is run on
++ * @seqno: [in] a linear increasing sequence number for this context
++ *
++ * Initializes an allocated fence, the caller doesn't have to keep its
++ * refcount after committing with this fence, but it will need to hold a
++ * refcount again if fence_ops.enable_signaling gets called. This can
++ * be used for other implementing other types of fence.
++ *
++ * context and seqno are used for easy comparison between fences, allowing
++ * to check which fence is later by simply using fence_later.
++ */
++void
++fence_init(struct fence *fence, const struct fence_ops *ops,
++ spinlock_t *lock, unsigned context, unsigned seqno)
++{
++ BUG_ON(!lock);
++ BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
++ !ops->get_driver_name || !ops->get_timeline_name);
++
++ kref_init(&fence->refcount);
++ fence->ops = ops;
++ INIT_LIST_HEAD(&fence->cb_list);
++ fence->lock = lock;
++ fence->context = context;
++ fence->seqno = seqno;
++ fence->flags = 0UL;
++
++ trace_fence_init(fence);
++}
++EXPORT_SYMBOL(fence_init);
+diff -Naur a/drivers/base/Makefile b/drivers/base/Makefile
+--- a/drivers/base/Makefile 2015-03-26 14:43:30.858436444 +0530
++++ b/drivers/base/Makefile 2015-03-26 14:42:37.694435402 +0530
+@@ -10,7 +10,7 @@
+ obj-y += power/
+ obj-$(CONFIG_HAS_DMA) += dma-mapping.o
+ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
+-obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o reservation.o
++obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o fence.o reservation.o seqno-fence.o
+ obj-$(CONFIG_ISA) += isa.o
+ obj-$(CONFIG_FW_LOADER) += firmware_class.o
+ obj-$(CONFIG_NUMA) += node.o
+diff -Naur a/drivers/base/reservation.c b/drivers/base/reservation.c
+--- a/drivers/base/reservation.c 2015-03-26 14:43:30.862436444 +0530
++++ b/drivers/base/reservation.c 2015-03-26 14:42:37.694435402 +0530
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2012-2013 Canonical Ltd
++ * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+@@ -37,3 +37,439 @@
+
+ DEFINE_WW_CLASS(reservation_ww_class);
+ EXPORT_SYMBOL(reservation_ww_class);
++
++struct lock_class_key reservation_seqcount_class;
++EXPORT_SYMBOL(reservation_seqcount_class);
++
++const char reservation_seqcount_string[] = "reservation_seqcount";
++EXPORT_SYMBOL(reservation_seqcount_string);
++/*
++ * Reserve space to add a shared fence to a reservation_object,
++ * must be called with obj->lock held.
++ */
++int reservation_object_reserve_shared(struct reservation_object *obj)
++{
++ struct reservation_object_list *fobj, *old;
++ u32 max;
++
++ old = reservation_object_get_list(obj);
++
++ if (old && old->shared_max) {
++ if (old->shared_count < old->shared_max) {
++ /* perform an in-place update */
++ kfree(obj->staged);
++ obj->staged = NULL;
++ return 0;
++ } else
++ max = old->shared_max * 2;
++ } else
++ max = 4;
++
++ /*
++ * resize obj->staged or allocate if it doesn't exist,
++ * noop if already correct size
++ */
++ fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
++ GFP_KERNEL);
++ if (!fobj)
++ return -ENOMEM;
++
++ obj->staged = fobj;
++ fobj->shared_max = max;
++ return 0;
++}
++EXPORT_SYMBOL(reservation_object_reserve_shared);
++
++static void
++reservation_object_add_shared_inplace(struct reservation_object *obj,
++ struct reservation_object_list *fobj,
++ struct fence *fence)
++{
++ u32 i;
++
++ fence_get(fence);
++
++ preempt_disable();
++ write_seqcount_begin(&obj->seq);
++
++ for (i = 0; i < fobj->shared_count; ++i) {
++ struct fence *old_fence;
++
++ old_fence = rcu_dereference_protected(fobj->shared[i],
++ reservation_object_held(obj));
++
++ if (old_fence->context == fence->context) {
++ /* memory barrier is added by write_seqcount_begin */
++ RCU_INIT_POINTER(fobj->shared[i], fence);
++ write_seqcount_end(&obj->seq);
++ preempt_enable();
++
++ fence_put(old_fence);
++ return;
++ }
++ }
++
++ /*
++ * memory barrier is added by write_seqcount_begin,
++ * fobj->shared_count is protected by this lock too
++ */
++ RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
++ fobj->shared_count++;
++
++ write_seqcount_end(&obj->seq);
++ preempt_enable();
++}
++
++static void
++reservation_object_add_shared_replace(struct reservation_object *obj,
++ struct reservation_object_list *old,
++ struct reservation_object_list *fobj,
++ struct fence *fence)
++{
++ unsigned i;
++ struct fence *old_fence = NULL;
++
++ fence_get(fence);
++
++ if (!old) {
++ RCU_INIT_POINTER(fobj->shared[0], fence);
++ fobj->shared_count = 1;
++ goto done;
++ }
++
++ /*
++ * no need to bump fence refcounts, rcu_read access
++ * requires the use of kref_get_unless_zero, and the
++ * references from the old struct are carried over to
++ * the new.
++ */
++ fobj->shared_count = old->shared_count;
++
++ for (i = 0; i < old->shared_count; ++i) {
++ struct fence *check;
++
++ check = rcu_dereference_protected(old->shared[i],
++ reservation_object_held(obj));
++
++ if (!old_fence && check->context == fence->context) {
++ old_fence = check;
++ RCU_INIT_POINTER(fobj->shared[i], fence);
++ } else
++ RCU_INIT_POINTER(fobj->shared[i], check);
++ }
++ if (!old_fence) {
++ RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
++ fobj->shared_count++;
++ }
++
++done:
++ preempt_disable();
++ write_seqcount_begin(&obj->seq);
++ /*
++ * RCU_INIT_POINTER can be used here,
++ * seqcount provides the necessary barriers
++ */
++ RCU_INIT_POINTER(obj->fence, fobj);
++ write_seqcount_end(&obj->seq);
++ preempt_enable();
++
++ if (old)
++ kfree_rcu(old, rcu);
++
++ if (old_fence)
++ fence_put(old_fence);
++}
++
++/*
++ * Add a fence to a shared slot, obj->lock must be held, and
++ * reservation_object_reserve_shared_fence has been called.
++ */
++void reservation_object_add_shared_fence(struct reservation_object *obj,
++ struct fence *fence)
++{
++ struct reservation_object_list *old, *fobj = obj->staged;
++
++ old = reservation_object_get_list(obj);
++ obj->staged = NULL;
++
++ if (!fobj) {
++ BUG_ON(old->shared_count >= old->shared_max);
++ reservation_object_add_shared_inplace(obj, old, fence);
++ } else
++ reservation_object_add_shared_replace(obj, old, fobj, fence);
++}
++EXPORT_SYMBOL(reservation_object_add_shared_fence);
++
++void reservation_object_add_excl_fence(struct reservation_object *obj,
++ struct fence *fence)
++{
++ struct fence *old_fence = reservation_object_get_excl(obj);
++ struct reservation_object_list *old;
++ u32 i = 0;
++
++ old = reservation_object_get_list(obj);
++ if (old)
++ i = old->shared_count;
++
++ if (fence)
++ fence_get(fence);
++
++ preempt_disable();
++ write_seqcount_begin(&obj->seq);
++ /* write_seqcount_begin provides the necessary memory barrier */
++ RCU_INIT_POINTER(obj->fence_excl, fence);
++ if (old)
++ old->shared_count = 0;
++ write_seqcount_end(&obj->seq);
++ preempt_enable();
++
++ /* inplace update, no shared fences */
++ while (i--)
++ fence_put(rcu_dereference_protected(old->shared[i],
++ reservation_object_held(obj)));
++
++ if (old_fence)
++ fence_put(old_fence);
++}
++EXPORT_SYMBOL(reservation_object_add_excl_fence);
++
++int reservation_object_get_fences_rcu(struct reservation_object *obj,
++ struct fence **pfence_excl,
++ unsigned *pshared_count,
++ struct fence ***pshared)
++{
++ unsigned shared_count = 0;
++ unsigned retry = 1;
++ struct fence **shared = NULL, *fence_excl = NULL;
++ int ret = 0;
++
++ while (retry) {
++ struct reservation_object_list *fobj;
++ unsigned seq;
++
++ seq = read_seqcount_begin(&obj->seq);
++
++ rcu_read_lock();
++
++ fobj = rcu_dereference(obj->fence);
++ if (fobj) {
++ struct fence **nshared;
++ size_t sz = sizeof(*shared) * fobj->shared_max;
++
++ nshared = krealloc(shared, sz,
++ GFP_NOWAIT | __GFP_NOWARN);
++ if (!nshared) {
++ rcu_read_unlock();
++ nshared = krealloc(shared, sz, GFP_KERNEL);
++ if (nshared) {
++ shared = nshared;
++ continue;
++ }
++
++ ret = -ENOMEM;
++ shared_count = 0;
++ break;
++ }
++ shared = nshared;
++ memcpy(shared, fobj->shared, sz);
++ shared_count = fobj->shared_count;
++ } else
++ shared_count = 0;
++ fence_excl = rcu_dereference(obj->fence_excl);
++
++ retry = read_seqcount_retry(&obj->seq, seq);
++ if (retry)
++ goto unlock;
++
++ if (!fence_excl || fence_get_rcu(fence_excl)) {
++ unsigned i;
++
++ for (i = 0; i < shared_count; ++i) {
++ if (fence_get_rcu(shared[i]))
++ continue;
++
++ /* uh oh, refcount failed, abort and retry */
++ while (i--)
++ fence_put(shared[i]);
++
++ if (fence_excl) {
++ fence_put(fence_excl);
++ fence_excl = NULL;
++ }
++
++ retry = 1;
++ break;
++ }
++ } else
++ retry = 1;
++
++unlock:
++ rcu_read_unlock();
++ }
++ *pshared_count = shared_count;
++ if (shared_count)
++ *pshared = shared;
++ else {
++ *pshared = NULL;
++ kfree(shared);
++ }
++ *pfence_excl = fence_excl;
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
++
++long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
++ bool wait_all, bool intr,
++ unsigned long timeout)
++{
++ struct fence *fence;
++ unsigned seq, shared_count, i = 0;
++ long ret = timeout;
++
++retry:
++ fence = NULL;
++ shared_count = 0;
++ seq = read_seqcount_begin(&obj->seq);
++ rcu_read_lock();
++
++ if (wait_all) {
++ struct reservation_object_list *fobj = rcu_dereference(obj->fence);
++
++ if (fobj)
++ shared_count = fobj->shared_count;
++
++ if (read_seqcount_retry(&obj->seq, seq))
++ goto unlock_retry;
++
++ for (i = 0; i < shared_count; ++i) {
++ struct fence *lfence = rcu_dereference(fobj->shared[i]);
++
++ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
++ continue;
++
++ if (!fence_get_rcu(lfence))
++ goto unlock_retry;
++
++ if (fence_is_signaled(lfence)) {
++ fence_put(lfence);
++ continue;
++ }
++
++ fence = lfence;
++ break;
++ }
++ }
++
++ if (!shared_count) {
++ struct fence *fence_excl = rcu_dereference(obj->fence_excl);
++
++ if (read_seqcount_retry(&obj->seq, seq))
++ goto unlock_retry;
++
++ if (fence_excl &&
++ !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
++ if (!fence_get_rcu(fence_excl))
++ goto unlock_retry;
++
++ if (fence_is_signaled(fence_excl))
++ fence_put(fence_excl);
++ else
++ fence = fence_excl;
++ }
++ }
++
++ rcu_read_unlock();
++ if (fence) {
++ ret = fence_wait_timeout(fence, intr, ret);
++ fence_put(fence);
++ if (ret > 0 && wait_all && (i + 1 < shared_count))
++ goto retry;
++ }
++ return ret;
++
++unlock_retry:
++ rcu_read_unlock();
++ goto retry;
++}
++EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
++
++
++static inline int
++reservation_object_test_signaled_single(struct fence *passed_fence)
++{
++ struct fence *fence, *lfence = passed_fence;
++ int ret = 1;
++
++ if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
++ fence = fence_get_rcu(lfence);
++ if (!fence)
++ return -1;
++
++ ret = !!fence_is_signaled(fence);
++ fence_put(fence);
++ }
++ return ret;
++}
++
++bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
++ bool test_all)
++{
++ unsigned seq, shared_count;
++ int ret = true;
++
++retry:
++ shared_count = 0;
++ seq = read_seqcount_begin(&obj->seq);
++ rcu_read_lock();
++
++ if (test_all) {
++ unsigned i;
++
++ struct reservation_object_list *fobj = rcu_dereference(obj->fence);
++
++ if (fobj)
++ shared_count = fobj->shared_count;
++
++ if (read_seqcount_retry(&obj->seq, seq))
++ goto unlock_retry;
++
++ for (i = 0; i < shared_count; ++i) {
++ struct fence *fence = rcu_dereference(fobj->shared[i]);
++
++ ret = reservation_object_test_signaled_single(fence);
++ if (ret < 0)
++ goto unlock_retry;
++ else if (!ret)
++ break;
++ }
++
++ /*
++ * There could be a read_seqcount_retry here, but nothing cares
++ * about whether it's the old or newer fence pointers that are
++ * signaled. That race could still have happened after checking
++ * read_seqcount_retry. If you care, use ww_mutex_lock.
++ */
++ }
++
++ if (!shared_count) {
++ struct fence *fence_excl = rcu_dereference(obj->fence_excl);
++
++ if (read_seqcount_retry(&obj->seq, seq))
++ goto unlock_retry;
++
++ if (fence_excl) {
++ ret = reservation_object_test_signaled_single(fence_excl);
++ if (ret < 0)
++ goto unlock_retry;
++ }
++ }
++
++ rcu_read_unlock();
++ return ret;
++
++unlock_retry:
++ rcu_read_unlock();
++ goto retry;
++}
++EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
+diff -Naur a/drivers/base/seqno-fence.c b/drivers/base/seqno-fence.c
+--- a/drivers/base/seqno-fence.c 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/base/seqno-fence.c 2015-03-26 14:42:37.694435402 +0530
+@@ -0,0 +1,73 @@
++/*
++ * seqno-fence, using a dma-buf to synchronize fencing
++ *
++ * Copyright (C) 2012 Texas Instruments
++ * Copyright (C) 2012-2014 Canonical Ltd
++ * Authors:
++ * Rob Clark <robdclark@gmail.com>
++ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ */
++
++#include <linux/slab.h>
++#include <linux/export.h>
++#include <linux/seqno-fence.h>
++
++static const char *seqno_fence_get_driver_name(struct fence *fence)
++{
++ struct seqno_fence *seqno_fence = to_seqno_fence(fence);
++ return seqno_fence->ops->get_driver_name(fence);
++}
++
++static const char *seqno_fence_get_timeline_name(struct fence *fence)
++{
++ struct seqno_fence *seqno_fence = to_seqno_fence(fence);
++ return seqno_fence->ops->get_timeline_name(fence);
++}
++
++static bool seqno_enable_signaling(struct fence *fence)
++{
++ struct seqno_fence *seqno_fence = to_seqno_fence(fence);
++ return seqno_fence->ops->enable_signaling(fence);
++}
++
++static bool seqno_signaled(struct fence *fence)
++{
++ struct seqno_fence *seqno_fence = to_seqno_fence(fence);
++ return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
++}
++
++static void seqno_release(struct fence *fence)
++{
++ struct seqno_fence *f = to_seqno_fence(fence);
++
++ dma_buf_put(f->sync_buf);
++ if (f->ops->release)
++ f->ops->release(fence);
++ else
++ fence_free(&f->base);
++}
++
++static signed long seqno_wait(struct fence *fence, bool intr, signed long timeout)
++{
++ struct seqno_fence *f = to_seqno_fence(fence);
++ return f->ops->wait(fence, intr, timeout);
++}
++
++const struct fence_ops seqno_fence_ops = {
++ .get_driver_name = seqno_fence_get_driver_name,
++ .get_timeline_name = seqno_fence_get_timeline_name,
++ .enable_signaling = seqno_enable_signaling,
++ .signaled = seqno_signaled,
++ .wait = seqno_wait,
++ .release = seqno_release,
++};
++EXPORT_SYMBOL(seqno_fence_ops);
+diff -Naur a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
+--- a/drivers/gpu/drm/ati_pcigart.c 2015-03-26 14:43:30.430436436 +0530
++++ b/drivers/gpu/drm/ati_pcigart.c 2015-03-26 14:42:38.702435421 +0530
+@@ -34,6 +34,8 @@
+ #include <linux/export.h>
+ #include <drm/drmP.h>
+
++#include <drm/ati_pcigart.h>
++
+ # define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
+
+ static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
+diff -Naur a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
+--- a/drivers/gpu/drm/drm_agpsupport.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/drm_agpsupport.c 2015-03-26 14:42:38.702435421 +0530
+@@ -34,6 +34,7 @@
+ #include <drm/drmP.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include "drm_legacy.h"
+
+ #if __OS_HAS_AGP
+
+diff -Naur a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+--- a/drivers/gpu/drm/drm_atomic.c 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/gpu/drm/drm_atomic.c 2015-03-26 14:42:38.702435421 +0530
+@@ -0,0 +1,657 @@
++/*
++ * Copyright (C) 2014 Red Hat
++ * Copyright (C) 2014 Intel Corp.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Rob Clark <robdclark@gmail.com>
++ * Daniel Vetter <daniel.vetter@ffwll.ch>
++ */
++
++
++#include <drm/drmP.h>
++#include <drm/drm_atomic.h>
++#include <drm/drm_plane_helper.h>
++
++static void kfree_state(struct drm_atomic_state *state)
++{
++ kfree(state->connectors);
++ kfree(state->connector_states);
++ kfree(state->crtcs);
++ kfree(state->crtc_states);
++ kfree(state->planes);
++ kfree(state->plane_states);
++ kfree(state);
++}
++
++/**
++ * drm_atomic_state_alloc - allocate atomic state
++ * @dev: DRM device
++ *
++ * This allocates an empty atomic state to track updates.
++ */
++struct drm_atomic_state *
++drm_atomic_state_alloc(struct drm_device *dev)
++{
++ struct drm_atomic_state *state;
++
++ state = kzalloc(sizeof(*state), GFP_KERNEL);
++ if (!state)
++ return NULL;
++
++ state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
++
++ state->crtcs = kcalloc(dev->mode_config.num_crtc,
++ sizeof(*state->crtcs), GFP_KERNEL);
++ if (!state->crtcs)
++ goto fail;
++ state->crtc_states = kcalloc(dev->mode_config.num_crtc,
++ sizeof(*state->crtc_states), GFP_KERNEL);
++ if (!state->crtc_states)
++ goto fail;
++ state->planes = kcalloc(dev->mode_config.num_total_plane,
++ sizeof(*state->planes), GFP_KERNEL);
++ if (!state->planes)
++ goto fail;
++ state->plane_states = kcalloc(dev->mode_config.num_total_plane,
++ sizeof(*state->plane_states), GFP_KERNEL);
++ if (!state->plane_states)
++ goto fail;
++ state->connectors = kcalloc(state->num_connector,
++ sizeof(*state->connectors),
++ GFP_KERNEL);
++ if (!state->connectors)
++ goto fail;
++ state->connector_states = kcalloc(state->num_connector,
++ sizeof(*state->connector_states),
++ GFP_KERNEL);
++ if (!state->connector_states)
++ goto fail;
++
++ state->dev = dev;
++
++ DRM_DEBUG_KMS("Allocate atomic state %p\n", state);
++
++ return state;
++fail:
++ kfree_state(state);
++
++ return NULL;
++}
++EXPORT_SYMBOL(drm_atomic_state_alloc);
++
++/**
++ * drm_atomic_state_clear - clear state object
++ * @state: atomic state
++ *
++ * When the w/w mutex algorithm detects a deadlock we need to back off and drop
++ * all locks. So someone else could sneak in and change the current modeset
++ * configuration. Which means that all the state assembled in @state is no
++ * longer an atomic update to the current state, but to some arbitrary earlier
++ * state. Which could break assumptions the driver's ->atomic_check likely
++ * relies on.
++ *
++ * Hence we must clear all cached state and completely start over, using this
++ * function.
++ */
++void drm_atomic_state_clear(struct drm_atomic_state *state)
++{
++ struct drm_device *dev = state->dev;
++ struct drm_mode_config *config = &dev->mode_config;
++ int i;
++
++ DRM_DEBUG_KMS("Clearing atomic state %p\n", state);
++
++ for (i = 0; i < state->num_connector; i++) {
++ struct drm_connector *connector = state->connectors[i];
++
++ if (!connector)
++ continue;
++
++ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
++
++ connector->funcs->atomic_destroy_state(connector,
++ state->connector_states[i]);
++ }
++
++ for (i = 0; i < config->num_crtc; i++) {
++ struct drm_crtc *crtc = state->crtcs[i];
++
++ if (!crtc)
++ continue;
++
++ crtc->funcs->atomic_destroy_state(crtc,
++ state->crtc_states[i]);
++ }
++
++ for (i = 0; i < config->num_total_plane; i++) {
++ struct drm_plane *plane = state->planes[i];
++
++ if (!plane)
++ continue;
++
++ plane->funcs->atomic_destroy_state(plane,
++ state->plane_states[i]);
++ }
++}
++EXPORT_SYMBOL(drm_atomic_state_clear);
++
++/**
++ * drm_atomic_state_free - free all memory for an atomic state
++ * @state: atomic state to deallocate
++ *
++ * This frees all memory associated with an atomic state, including all the
++ * per-object state for planes, crtcs and connectors.
++ */
++void drm_atomic_state_free(struct drm_atomic_state *state)
++{
++ drm_atomic_state_clear(state);
++
++ DRM_DEBUG_KMS("Freeing atomic state %p\n", state);
++
++ kfree_state(state);
++}
++EXPORT_SYMBOL(drm_atomic_state_free);
++
++/**
++ * drm_atomic_get_crtc_state - get crtc state
++ * @state: global atomic state object
++ * @crtc: crtc to get state object for
++ *
++ * This function returns the crtc state for the given crtc, allocating it if
++ * needed. It will also grab the relevant crtc lock to make sure that the state
++ * is consistent.
++ *
++ * Returns:
++ *
++ * Either the allocated state or the error code encoded into the pointer. When
++ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
++ * entire atomic sequence must be restarted. All other errors are fatal.
++ */
++struct drm_crtc_state *
++drm_atomic_get_crtc_state(struct drm_atomic_state *state,
++ struct drm_crtc *crtc)
++{
++ int ret, index;
++ struct drm_crtc_state *crtc_state;
++
++ index = drm_crtc_index(crtc);
++
++ if (state->crtc_states[index])
++ return state->crtc_states[index];
++
++ ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
++ if (ret)
++ return ERR_PTR(ret);
++
++ crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
++ if (!crtc_state)
++ return ERR_PTR(-ENOMEM);
++
++ state->crtc_states[index] = crtc_state;
++ state->crtcs[index] = crtc;
++ crtc_state->state = state;
++
++ DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n",
++ crtc->base.id, crtc_state, state);
++
++ return crtc_state;
++}
++EXPORT_SYMBOL(drm_atomic_get_crtc_state);
++
++/**
++ * drm_atomic_get_plane_state - get plane state
++ * @state: global atomic state object
++ * @plane: plane to get state object for
++ *
++ * This function returns the plane state for the given plane, allocating it if
++ * needed. It will also grab the relevant plane lock to make sure that the state
++ * is consistent.
++ *
++ * Returns:
++ *
++ * Either the allocated state or the error code encoded into the pointer. When
++ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
++ * entire atomic sequence must be restarted. All other errors are fatal.
++ */
++struct drm_plane_state *
++drm_atomic_get_plane_state(struct drm_atomic_state *state,
++ struct drm_plane *plane)
++{
++ int ret, index;
++ struct drm_plane_state *plane_state;
++
++ index = drm_plane_index(plane);
++
++ if (state->plane_states[index])
++ return state->plane_states[index];
++
++ ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
++ if (ret)
++ return ERR_PTR(ret);
++
++ plane_state = plane->funcs->atomic_duplicate_state(plane);
++ if (!plane_state)
++ return ERR_PTR(-ENOMEM);
++
++ state->plane_states[index] = plane_state;
++ state->planes[index] = plane;
++ plane_state->state = state;
++
++ DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n",
++ plane->base.id, plane_state, state);
++
++ if (plane_state->crtc) {
++ struct drm_crtc_state *crtc_state;
++
++ crtc_state = drm_atomic_get_crtc_state(state,
++ plane_state->crtc);
++ if (IS_ERR(crtc_state))
++ return ERR_CAST(crtc_state);
++ }
++
++ return plane_state;
++}
++EXPORT_SYMBOL(drm_atomic_get_plane_state);
++
++/**
++ * drm_atomic_get_connector_state - get connector state
++ * @state: global atomic state object
++ * @connector: connector to get state object for
++ *
++ * This function returns the connector state for the given connector,
++ * allocating it if needed. It will also grab the relevant connector lock to
++ * make sure that the state is consistent.
++ *
++ * Returns:
++ *
++ * Either the allocated state or the error code encoded into the pointer. When
++ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
++ * entire atomic sequence must be restarted. All other errors are fatal.
++ */
++struct drm_connector_state *
++drm_atomic_get_connector_state(struct drm_atomic_state *state,
++ struct drm_connector *connector)
++{
++ int ret, index;
++ struct drm_mode_config *config = &connector->dev->mode_config;
++ struct drm_connector_state *connector_state;
++
++ ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
++ if (ret)
++ return ERR_PTR(ret);
++
++ index = drm_connector_index(connector);
++
++ /*
++ * Construction of atomic state updates can race with a connector
++ * hot-add which might overflow. In this case flip the table and just
++ * restart the entire ioctl - no one is fast enough to livelock a cpu
++ * with physical hotplug events anyway.
++ *
++ * Note that we only grab the indexes once we have the right lock to
++ * prevent hotplug/unplugging of connectors. So removal is no problem,
++ * at most the array is a bit too large.
++ */
++ if (index >= state->num_connector) {
++ DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n");
++ return ERR_PTR(-EAGAIN);
++ }
++
++ if (state->connector_states[index])
++ return state->connector_states[index];
++
++ connector_state = connector->funcs->atomic_duplicate_state(connector);
++ if (!connector_state)
++ return ERR_PTR(-ENOMEM);
++
++ state->connector_states[index] = connector_state;
++ state->connectors[index] = connector;
++ connector_state->state = state;
++
++ DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n",
++ connector->base.id, connector_state, state);
++
++ if (connector_state->crtc) {
++ struct drm_crtc_state *crtc_state;
++
++ crtc_state = drm_atomic_get_crtc_state(state,
++ connector_state->crtc);
++ if (IS_ERR(crtc_state))
++ return ERR_CAST(crtc_state);
++ }
++
++ return connector_state;
++}
++EXPORT_SYMBOL(drm_atomic_get_connector_state);
++
++/**
++ * drm_atomic_set_crtc_for_plane - set crtc for plane
++ * @state: the incoming atomic state
++ * @plane: the plane whose incoming state to update
++ * @crtc: crtc to use for the plane
++ *
++ * Changing the assigned crtc for a plane requires us to grab the lock and state
++ * for the new crtc, as needed. This function takes care of all these details
++ * besides updating the pointer in the state object itself.
++ *
++ * Returns:
++ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
++ * then the w/w mutex code has detected a deadlock and the entire atomic
++ * sequence must be restarted. All other errors are fatal.
++ */
++int
++drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
++ struct drm_plane *plane, struct drm_crtc *crtc)
++{
++ struct drm_plane_state *plane_state =
++ drm_atomic_get_plane_state(state, plane);
++ struct drm_crtc_state *crtc_state;
++
++ if (WARN_ON(IS_ERR(plane_state)))
++ return PTR_ERR(plane_state);
++
++ if (plane_state->crtc) {
++ crtc_state = drm_atomic_get_crtc_state(plane_state->state,
++ plane_state->crtc);
++ if (WARN_ON(IS_ERR(crtc_state)))
++ return PTR_ERR(crtc_state);
++
++ crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
++ }
++
++ plane_state->crtc = crtc;
++
++ if (crtc) {
++ crtc_state = drm_atomic_get_crtc_state(plane_state->state,
++ crtc);
++ if (IS_ERR(crtc_state))
++ return PTR_ERR(crtc_state);
++ crtc_state->plane_mask |= (1 << drm_plane_index(plane));
++ }
++
++ if (crtc)
++ DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n",
++ plane_state, crtc->base.id);
++ else
++ DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state);
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
++
++/**
++ * drm_atomic_set_fb_for_plane - set crtc for plane
++ * @plane_state: atomic state object for the plane
++ * @fb: fb to use for the plane
++ *
++ * Changing the assigned framebuffer for a plane requires us to grab a reference
++ * to the new fb and drop the reference to the old fb, if there is one. This
++ * function takes care of all these details besides updating the pointer in the
++ * state object itself.
++ */
++void
++drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
++ struct drm_framebuffer *fb)
++{
++ if (plane_state->fb)
++ drm_framebuffer_unreference(plane_state->fb);
++ if (fb)
++ drm_framebuffer_reference(fb);
++ plane_state->fb = fb;
++
++ if (fb)
++ DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n",
++ fb->base.id, plane_state);
++ else
++ DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state);
++}
++EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
++
++/**
++ * drm_atomic_set_crtc_for_connector - set crtc for connector
++ * @conn_state: atomic state object for the connector
++ * @crtc: crtc to use for the connector
++ *
++ * Changing the assigned crtc for a connector requires us to grab the lock and
++ * state for the new crtc, as needed. This function takes care of all these
++ * details besides updating the pointer in the state object itself.
++ *
++ * Returns:
++ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
++ * then the w/w mutex code has detected a deadlock and the entire atomic
++ * sequence must be restarted. All other errors are fatal.
++ */
++int
++drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
++ struct drm_crtc *crtc)
++{
++ struct drm_crtc_state *crtc_state;
++
++ if (crtc) {
++ crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
++ if (IS_ERR(crtc_state))
++ return PTR_ERR(crtc_state);
++ }
++
++ conn_state->crtc = crtc;
++
++ if (crtc)
++ DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n",
++ conn_state, crtc->base.id);
++ else
++ DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n",
++ conn_state);
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
++
++/**
++ * drm_atomic_add_affected_connectors - add connectors for crtc
++ * @state: atomic state
++ * @crtc: DRM crtc
++ *
++ * This function walks the current configuration and adds all connectors
++ * currently using @crtc to the atomic configuration @state. Note that this
++ * function must acquire the connection mutex. This can potentially cause
++ * unneeded seralization if the update is just for the planes on one crtc. Hence
++ * drivers and helpers should only call this when really needed (e.g. when a
++ * full modeset needs to happen due to some change).
++ *
++ * Returns:
++ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
++ * then the w/w mutex code has detected a deadlock and the entire atomic
++ * sequence must be restarted. All other errors are fatal.
++ */
++int
++drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
++ struct drm_crtc *crtc)
++{
++ struct drm_mode_config *config = &state->dev->mode_config;
++ struct drm_connector *connector;
++ struct drm_connector_state *conn_state;
++ int ret;
++
++ ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
++ if (ret)
++ return ret;
++
++ DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n",
++ crtc->base.id, state);
++
++ /*
++ * Changed connectors are already in @state, so only need to look at the
++ * current configuration.
++ */
++ list_for_each_entry(connector, &config->connector_list, head) {
++ if (connector->state->crtc != crtc)
++ continue;
++
++ conn_state = drm_atomic_get_connector_state(state, connector);
++ if (IS_ERR(conn_state))
++ return PTR_ERR(conn_state);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
++
++/**
++ * drm_atomic_connectors_for_crtc - count number of connected outputs
++ * @state: atomic state
++ * @crtc: DRM crtc
++ *
++ * This function counts all connectors which will be connected to @crtc
++ * according to @state. Useful to recompute the enable state for @crtc.
++ */
++int
++drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
++ struct drm_crtc *crtc)
++{
++ int i, num_connected_connectors = 0;
++
++ for (i = 0; i < state->num_connector; i++) {
++ struct drm_connector_state *conn_state;
++
++ conn_state = state->connector_states[i];
++
++ if (conn_state && conn_state->crtc == crtc)
++ num_connected_connectors++;
++ }
++
++ DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n",
++ state, num_connected_connectors, crtc->base.id);
++
++ return num_connected_connectors;
++}
++EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
++
++/**
++ * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
++ * @state: atomic state
++ *
++ * This function should be used by legacy entry points which don't understand
++ * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
++ * the slowpath completed.
++ */
++void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
++{
++ int ret;
++
++retry:
++ drm_modeset_backoff(state->acquire_ctx);
++
++ ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
++ state->acquire_ctx);
++ if (ret)
++ goto retry;
++ ret = drm_modeset_lock_all_crtcs(state->dev,
++ state->acquire_ctx);
++ if (ret)
++ goto retry;
++}
++EXPORT_SYMBOL(drm_atomic_legacy_backoff);
++
++/**
++ * drm_atomic_check_only - check whether a given config would work
++ * @state: atomic configuration to check
++ *
++ * Note that this function can return -EDEADLK if the driver needed to acquire
++ * more locks but encountered a deadlock. The caller must then do the usual w/w
++ * backoff dance and restart. All other errors are fatal.
++ *
++ * Returns:
++ * 0 on success, negative error code on failure.
++ */
++int drm_atomic_check_only(struct drm_atomic_state *state)
++{
++ struct drm_mode_config *config = &state->dev->mode_config;
++
++ DRM_DEBUG_KMS("checking %p\n", state);
++
++ if (config->funcs->atomic_check)
++ return config->funcs->atomic_check(state->dev, state);
++ else
++ return 0;
++}
++EXPORT_SYMBOL(drm_atomic_check_only);
++
++/**
++ * drm_atomic_commit - commit configuration atomically
++ * @state: atomic configuration to check
++ *
++ * Note that this function can return -EDEADLK if the driver needed to acquire
++ * more locks but encountered a deadlock. The caller must then do the usual w/w
++ * backoff dance and restart. All other errors are fatal.
++ *
++ * Also note that on successful execution ownership of @state is transferred
++ * from the caller of this function to the function itself. The caller must not
++ * free or in any other way access @state. If the function fails then the caller
++ * must clean up @state itself.
++ *
++ * Returns:
++ * 0 on success, negative error code on failure.
++ */
++int drm_atomic_commit(struct drm_atomic_state *state)
++{
++ struct drm_mode_config *config = &state->dev->mode_config;
++ int ret;
++
++ ret = drm_atomic_check_only(state);
++ if (ret)
++ return ret;
++
++ DRM_DEBUG_KMS("commiting %p\n", state);
++
++ return config->funcs->atomic_commit(state->dev, state, false);
++}
++EXPORT_SYMBOL(drm_atomic_commit);
++
++/**
++ * drm_atomic_async_commit - atomic&async configuration commit
++ * @state: atomic configuration to check
++ *
++ * Note that this function can return -EDEADLK if the driver needed to acquire
++ * more locks but encountered a deadlock. The caller must then do the usual w/w
++ * backoff dance and restart. All other errors are fatal.
++ *
++ * Also note that on successful execution ownership of @state is transferred
++ * from the caller of this function to the function itself. The caller must not
++ * free or in any other way access @state. If the function fails then the caller
++ * must clean up @state itself.
++ *
++ * Returns:
++ * 0 on success, negative error code on failure.
++ */
++int drm_atomic_async_commit(struct drm_atomic_state *state)
++{
++ struct drm_mode_config *config = &state->dev->mode_config;
++ int ret;
++
++ ret = drm_atomic_check_only(state);
++ if (ret)
++ return ret;
++
++ DRM_DEBUG_KMS("commiting %p asynchronously\n", state);
++
++ return config->funcs->atomic_commit(state->dev, state, true);
++}
++EXPORT_SYMBOL(drm_atomic_async_commit);
+diff -Naur a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+--- a/drivers/gpu/drm/drm_atomic_helper.c 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/gpu/drm/drm_atomic_helper.c 2015-03-26 14:42:38.706435421 +0530
+@@ -0,0 +1,1966 @@
++/*
++ * Copyright (C) 2014 Red Hat
++ * Copyright (C) 2014 Intel Corp.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Rob Clark <robdclark@gmail.com>
++ * Daniel Vetter <daniel.vetter@ffwll.ch>
++ */
++
++#include <drm/drmP.h>
++#include <drm/drm_atomic.h>
++#include <drm/drm_plane_helper.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/drm_atomic_helper.h>
++#include <linux/fence.h>
++
++/**
++ * DOC: overview
++ *
++ * This helper library provides implementations of check and commit functions on
++ * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
++ * also provides convenience implementations for the atomic state handling
++ * callbacks for drivers which don't need to subclass the drm core structures to
++ * add their own additional internal state.
++ *
++ * This library also provides default implementations for the check callback in
++ * drm_atomic_helper_check and for the commit callback with
++ * drm_atomic_helper_commit. But the individual stages and callbacks are expose
++ * to allow drivers to mix and match and e.g. use the plane helpers only
++ * together with a driver private modeset implementation.
++ *
++ * This library also provides implementations for all the legacy driver
++ * interfaces on top of the atomic interface. See drm_atomic_helper_set_config,
++ * drm_atomic_helper_disable_plane, drm_atomic_helper_disable_plane and the
++ * various functions to implement set_property callbacks. New drivers must not
++ * implement these functions themselves but must use the provided helpers.
++ */
++static void
++drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
++ struct drm_plane_state *plane_state,
++ struct drm_plane *plane)
++{
++ struct drm_crtc_state *crtc_state;
++
++ if (plane->state->crtc) {
++ crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
++
++ if (WARN_ON(!crtc_state))
++ return;
++
++ crtc_state->planes_changed = true;
++ }
++
++ if (plane_state->crtc) {
++ crtc_state =
++ state->crtc_states[drm_crtc_index(plane_state->crtc)];
++
++ if (WARN_ON(!crtc_state))
++ return;
++
++ crtc_state->planes_changed = true;
++ }
++}
++
++static struct drm_crtc *
++get_current_crtc_for_encoder(struct drm_device *dev,
++ struct drm_encoder *encoder)
++{
++ struct drm_mode_config *config = &dev->mode_config;
++ struct drm_connector *connector;
++
++ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
++
++ list_for_each_entry(connector, &config->connector_list, head) {
++ if (connector->state->best_encoder != encoder)
++ continue;
++
++ return connector->state->crtc;
++ }
++
++ return NULL;
++}
++
++static int
++steal_encoder(struct drm_atomic_state *state,
++ struct drm_encoder *encoder,
++ struct drm_crtc *encoder_crtc)
++{
++ struct drm_mode_config *config = &state->dev->mode_config;
++ struct drm_crtc_state *crtc_state;
++ struct drm_connector *connector;
++ struct drm_connector_state *connector_state;
++ int ret;
++
++ /*
++ * We can only steal an encoder coming from a connector, which means we
++ * must already hold the connection_mutex.
++ */
++ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
++
++ DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
++ encoder->base.id, encoder->name,
++ encoder_crtc->base.id);
++
++ crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
++ if (IS_ERR(crtc_state))
++ return PTR_ERR(crtc_state);
++
++ crtc_state->mode_changed = true;
++
++ list_for_each_entry(connector, &config->connector_list, head) {
++ if (connector->state->best_encoder != encoder)
++ continue;
++
++ DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n",
++ connector->base.id,
++ connector->name);
++
++ connector_state = drm_atomic_get_connector_state(state,
++ connector);
++ if (IS_ERR(connector_state))
++ return PTR_ERR(connector_state);
++
++ ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
++ if (ret)
++ return ret;
++ connector_state->best_encoder = NULL;
++ }
++
++ return 0;
++}
++
++static int
++update_connector_routing(struct drm_atomic_state *state, int conn_idx)
++{
++ struct drm_connector_helper_funcs *funcs;
++ struct drm_encoder *new_encoder;
++ struct drm_crtc *encoder_crtc;
++ struct drm_connector *connector;
++ struct drm_connector_state *connector_state;
++ struct drm_crtc_state *crtc_state;
++ int idx, ret;
++
++ connector = state->connectors[conn_idx];
++ connector_state = state->connector_states[conn_idx];
++
++ if (!connector)
++ return 0;
++
++ DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n",
++ connector->base.id,
++ connector->name);
++
++ if (connector->state->crtc != connector_state->crtc) {
++ if (connector->state->crtc) {
++ idx = drm_crtc_index(connector->state->crtc);
++
++ crtc_state = state->crtc_states[idx];
++ crtc_state->mode_changed = true;
++ }
++
++ if (connector_state->crtc) {
++ idx = drm_crtc_index(connector_state->crtc);
++
++ crtc_state = state->crtc_states[idx];
++ crtc_state->mode_changed = true;
++ }
++ }
++
++ if (!connector_state->crtc) {
++ DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n",
++ connector->base.id,
++ connector->name);
++
++ connector_state->best_encoder = NULL;
++
++ return 0;
++ }
++
++ funcs = connector->helper_private;
++ new_encoder = funcs->best_encoder(connector);
++
++ if (!new_encoder) {
++ DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n",
++ connector->base.id,
++ connector->name);
++ return -EINVAL;
++ }
++
++ if (new_encoder == connector_state->best_encoder) {
++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
++ connector->base.id,
++ connector->name,
++ new_encoder->base.id,
++ new_encoder->name,
++ connector_state->crtc->base.id);
++
++ return 0;
++ }
++
++ encoder_crtc = get_current_crtc_for_encoder(state->dev,
++ new_encoder);
++
++ if (encoder_crtc) {
++ ret = steal_encoder(state, new_encoder, encoder_crtc);
++ if (ret) {
++ DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
++ connector->base.id,
++ connector->name);
++ return ret;
++ }
++ }
++
++ connector_state->best_encoder = new_encoder;
++ idx = drm_crtc_index(connector_state->crtc);
++
++ crtc_state = state->crtc_states[idx];
++ crtc_state->mode_changed = true;
++
++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
++ connector->base.id,
++ connector->name,
++ new_encoder->base.id,
++ new_encoder->name,
++ connector_state->crtc->base.id);
++
++ return 0;
++}
++
++static int
++mode_fixup(struct drm_atomic_state *state)
++{
++ int ncrtcs = state->dev->mode_config.num_crtc;
++ struct drm_crtc_state *crtc_state;
++ struct drm_connector_state *conn_state;
++ int i;
++ bool ret;
++
++ for (i = 0; i < ncrtcs; i++) {
++ crtc_state = state->crtc_states[i];
++
++ if (!crtc_state || !crtc_state->mode_changed)
++ continue;
++
++ drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
++ }
++
++ for (i = 0; i < state->num_connector; i++) {
++ struct drm_encoder_helper_funcs *funcs;
++ struct drm_encoder *encoder;
++
++ conn_state = state->connector_states[i];
++
++ if (!conn_state)
++ continue;
++
++ WARN_ON(!!conn_state->best_encoder != !!conn_state->crtc);
++
++ if (!conn_state->crtc || !conn_state->best_encoder)
++ continue;
++
++ crtc_state =
++ state->crtc_states[drm_crtc_index(conn_state->crtc)];
++
++ /*
++ * Each encoder has at most one connector (since we always steal
++ * it away), so we won't call ->mode_fixup twice.
++ */
++ encoder = conn_state->best_encoder;
++ funcs = encoder->helper_private;
++
++ if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
++ ret = encoder->bridge->funcs->mode_fixup(
++ encoder->bridge, &crtc_state->mode,
++ &crtc_state->adjusted_mode);
++ if (!ret) {
++ DRM_DEBUG_KMS("Bridge fixup failed\n");
++ return -EINVAL;
++ }
++ }
++
++
++ ret = funcs->mode_fixup(encoder, &crtc_state->mode,
++ &crtc_state->adjusted_mode);
++ if (!ret) {
++ DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
++ encoder->base.id, encoder->name);
++ return -EINVAL;
++ }
++ }
++
++ for (i = 0; i < ncrtcs; i++) {
++ struct drm_crtc_helper_funcs *funcs;
++ struct drm_crtc *crtc;
++
++ crtc_state = state->crtc_states[i];
++ crtc = state->crtcs[i];
++
++ if (!crtc_state || !crtc_state->mode_changed)
++ continue;
++
++ funcs = crtc->helper_private;
++ ret = funcs->mode_fixup(crtc, &crtc_state->mode,
++ &crtc_state->adjusted_mode);
++ if (!ret) {
++ DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n",
++ crtc->base.id);
++ return -EINVAL;
++ }
++ }
++
++ return 0;
++}
++
++static int
++drm_atomic_helper_check_modeset(struct drm_device *dev,
++ struct drm_atomic_state *state)
++{
++ int ncrtcs = dev->mode_config.num_crtc;
++ struct drm_crtc *crtc;
++ struct drm_crtc_state *crtc_state;
++ int i, ret;
++
++ for (i = 0; i < ncrtcs; i++) {
++ crtc = state->crtcs[i];
++ crtc_state = state->crtc_states[i];
++
++ if (!crtc)
++ continue;
++
++ if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
++ DRM_DEBUG_KMS("[CRTC:%d] mode changed\n",
++ crtc->base.id);
++ crtc_state->mode_changed = true;
++ }
++
++ if (crtc->state->enable != crtc_state->enable) {
++ DRM_DEBUG_KMS("[CRTC:%d] enable changed\n",
++ crtc->base.id);
++ crtc_state->mode_changed = true;
++ }
++ }
++
++ for (i = 0; i < state->num_connector; i++) {
++ /*
++ * This only sets crtc->mode_changed for routing changes,
++ * drivers must set crtc->mode_changed themselves when connector
++ * properties need to be updated.
++ */
++ ret = update_connector_routing(state, i);
++ if (ret)
++ return ret;
++ }
++
++ /*
++ * After all the routing has been prepared we need to add in any
++ * connector which is itself unchanged, but who's crtc changes it's
++ * configuration. This must be done before calling mode_fixup in case a
++ * crtc only changed its mode but has the same set of connectors.
++ */
++ for (i = 0; i < ncrtcs; i++) {
++ int num_connectors;
++
++ crtc = state->crtcs[i];
++ crtc_state = state->crtc_states[i];
++
++ if (!crtc || !crtc_state->mode_changed)
++ continue;
++
++ DRM_DEBUG_KMS("[CRTC:%d] needs full modeset, enable: %c\n",
++ crtc->base.id,
++ crtc_state->enable ? 'y' : 'n');
++
++ ret = drm_atomic_add_affected_connectors(state, crtc);
++ if (ret != 0)
++ return ret;
++
++ num_connectors = drm_atomic_connectors_for_crtc(state,
++ crtc);
++
++ if (crtc_state->enable != !!num_connectors) {
++ DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n",
++ crtc->base.id);
++
++ return -EINVAL;
++ }
++ }
++
++ return mode_fixup(state);
++}
++
++/**
++ * drm_atomic_helper_check - validate state object
++ * @dev: DRM device
++ * @state: the driver state object
++ *
++ * Check the state object to see if the requested state is physically possible.
++ * Only crtcs and planes have check callbacks, so for any additional (global)
++ * checking that a driver needs it can simply wrap that around this function.
++ * Drivers without such needs can directly use this as their ->atomic_check()
++ * callback.
++ *
++ * RETURNS
++ * Zero for success or -errno
++ */
++int drm_atomic_helper_check(struct drm_device *dev,
++ struct drm_atomic_state *state)
++{
++ int nplanes = dev->mode_config.num_total_plane;
++ int ncrtcs = dev->mode_config.num_crtc;
++ int i, ret = 0;
++
++ for (i = 0; i < nplanes; i++) {
++ struct drm_plane_helper_funcs *funcs;
++ struct drm_plane *plane = state->planes[i];
++ struct drm_plane_state *plane_state = state->plane_states[i];
++
++ if (!plane)
++ continue;
++
++ funcs = plane->helper_private;
++
++ drm_atomic_helper_plane_changed(state, plane_state, plane);
++
++ if (!funcs || !funcs->atomic_check)
++ continue;
++
++ ret = funcs->atomic_check(plane, plane_state);
++ if (ret) {
++ DRM_DEBUG_KMS("[PLANE:%d] atomic check failed\n",
++ plane->base.id);
++ return ret;
++ }
++ }
++
++ for (i = 0; i < ncrtcs; i++) {
++ struct drm_crtc_helper_funcs *funcs;
++ struct drm_crtc *crtc = state->crtcs[i];
++
++ if (!crtc)
++ continue;
++
++ funcs = crtc->helper_private;
++
++ if (!funcs || !funcs->atomic_check)
++ continue;
++
++ ret = funcs->atomic_check(crtc, state->crtc_states[i]);
++ if (ret) {
++ DRM_DEBUG_KMS("[CRTC:%d] atomic check failed\n",
++ crtc->base.id);
++ return ret;
++ }
++ }
++
++ ret = drm_atomic_helper_check_modeset(dev, state);
++ if (ret)
++ return ret;
++
++ return ret;
++}
++EXPORT_SYMBOL(drm_atomic_helper_check);
++
++static void
++disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
++{
++ int ncrtcs = old_state->dev->mode_config.num_crtc;
++ int i;
++
++ for (i = 0; i < old_state->num_connector; i++) {
++ struct drm_connector_state *old_conn_state;
++ struct drm_connector *connector;
++ struct drm_encoder_helper_funcs *funcs;
++ struct drm_encoder *encoder;
++
++ old_conn_state = old_state->connector_states[i];
++ connector = old_state->connectors[i];
++
++ /* Shut down everything that's in the changeset and currently
++ * still on. So need to check the old, saved state. */
++ if (!old_conn_state || !old_conn_state->crtc)
++ continue;
++
++ encoder = old_conn_state->best_encoder;
++
++ /* We shouldn't get this far if we didn't previously have
++ * an encoder.. but WARN_ON() rather than explode.
++ */
++ if (WARN_ON(!encoder))
++ continue;
++
++ funcs = encoder->helper_private;
++
++ /*
++ * Each encoder has at most one connector (since we always steal
++ * it away), so we won't call call disable hooks twice.
++ */
++ if (encoder->bridge)
++ encoder->bridge->funcs->disable(encoder->bridge);
++
++ /* Right function depends upon target state. */
++ if (connector->state->crtc)
++ funcs->prepare(encoder);
++ else if (funcs->disable)
++ funcs->disable(encoder);
++ else
++ funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
++
++ if (encoder->bridge)
++ encoder->bridge->funcs->post_disable(encoder->bridge);
++ }
++
++ for (i = 0; i < ncrtcs; i++) {
++ struct drm_crtc_helper_funcs *funcs;
++ struct drm_crtc *crtc;
++
++ crtc = old_state->crtcs[i];
++
++ /* Shut down everything that needs a full modeset. */
++ if (!crtc || !crtc->state->mode_changed)
++ continue;
++
++ funcs = crtc->helper_private;
++
++ /* Right function depends upon target state. */
++ if (crtc->state->enable)
++ funcs->prepare(crtc);
++ else if (funcs->disable)
++ funcs->disable(crtc);
++ else
++ funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++ }
++}
++
++static void
++set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
++{
++ int ncrtcs = old_state->dev->mode_config.num_crtc;
++ int i;
++
++ /* clear out existing links */
++ for (i = 0; i < old_state->num_connector; i++) {
++ struct drm_connector *connector;
++
++ connector = old_state->connectors[i];
++
++ if (!connector || !connector->encoder)
++ continue;
++
++ WARN_ON(!connector->encoder->crtc);
++
++ connector->encoder->crtc = NULL;
++ connector->encoder = NULL;
++ }
++
++ /* set new links */
++ for (i = 0; i < old_state->num_connector; i++) {
++ struct drm_connector *connector;
++
++ connector = old_state->connectors[i];
++
++ if (!connector || !connector->state->crtc)
++ continue;
++
++ if (WARN_ON(!connector->state->best_encoder))
++ continue;
++
++ connector->encoder = connector->state->best_encoder;
++ connector->encoder->crtc = connector->state->crtc;
++ }
++
++ /* set legacy state in the crtc structure */
++ for (i = 0; i < ncrtcs; i++) {
++ struct drm_crtc *crtc;
++
++ crtc = old_state->crtcs[i];
++
++ if (!crtc)
++ continue;
++
++ crtc->mode = crtc->state->mode;
++ crtc->enabled = crtc->state->enable;
++ crtc->x = crtc->primary->state->src_x >> 16;
++ crtc->y = crtc->primary->state->src_y >> 16;
++ }
++}
++
++static void
++crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
++{
++ int ncrtcs = old_state->dev->mode_config.num_crtc;
++ int i;
++
++ for (i = 0; i < ncrtcs; i++) {
++ struct drm_crtc_helper_funcs *funcs;
++ struct drm_crtc *crtc;
++
++ crtc = old_state->crtcs[i];
++
++ if (!crtc || !crtc->state->mode_changed)
++ continue;
++
++ funcs = crtc->helper_private;
++
++ if (crtc->state->enable)
++ funcs->mode_set_nofb(crtc);
++ }
++
++ for (i = 0; i < old_state->num_connector; i++) {
++ struct drm_connector *connector;
++ struct drm_crtc_state *new_crtc_state;
++ struct drm_encoder_helper_funcs *funcs;
++ struct drm_encoder *encoder;
++ struct drm_display_mode *mode, *adjusted_mode;
++
++ connector = old_state->connectors[i];
++
++ if (!connector || !connector->state->best_encoder)
++ continue;
++
++ encoder = connector->state->best_encoder;
++ funcs = encoder->helper_private;
++ new_crtc_state = connector->state->crtc->state;
++ mode = &new_crtc_state->mode;
++ adjusted_mode = &new_crtc_state->adjusted_mode;
++
++ /*
++ * Each encoder has at most one connector (since we always steal
++ * it away), so we won't call call mode_set hooks twice.
++ */
++ funcs->mode_set(encoder, mode, adjusted_mode);
++
++ if (encoder->bridge && encoder->bridge->funcs->mode_set)
++ encoder->bridge->funcs->mode_set(encoder->bridge,
++ mode, adjusted_mode);
++ }
++}
++
++/**
++ * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates
++ * @dev: DRM device
++ * @state: atomic state
++ *
++ * This function commits the modeset changes that need to be committed before
++ * updating planes. It shuts down all the outputs that need to be shut down and
++ * prepares them (if required) with the new mode.
++ */
++void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
++ struct drm_atomic_state *state)
++{
++ disable_outputs(dev, state);
++ set_routing_links(dev, state);
++ crtc_set_mode(dev, state);
++}
++EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes);
++
++/**
++ * drm_atomic_helper_commit_post_planes - modeset commit after plane updates
++ * @dev: DRM device
++ * @old_state: atomic state object with old state structures
++ *
++ * This function commits the modeset changes that need to be committed after
++ * updating planes: It enables all the outputs with the new configuration which
++ * had to be turned off for the update.
++ */
++void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
++ struct drm_atomic_state *old_state)
++{
++ int ncrtcs = old_state->dev->mode_config.num_crtc;
++ int i;
++
++ for (i = 0; i < ncrtcs; i++) {
++ struct drm_crtc_helper_funcs *funcs;
++ struct drm_crtc *crtc;
++
++ crtc = old_state->crtcs[i];
++
++ /* Need to filter out CRTCs where only planes change. */
++ if (!crtc || !crtc->state->mode_changed)
++ continue;
++
++ funcs = crtc->helper_private;
++
++ if (crtc->state->enable)
++ funcs->commit(crtc);
++ }
++
++ for (i = 0; i < old_state->num_connector; i++) {
++ struct drm_connector *connector;
++ struct drm_encoder_helper_funcs *funcs;
++ struct drm_encoder *encoder;
++
++ connector = old_state->connectors[i];
++
++ if (!connector || !connector->state->best_encoder)
++ continue;
++
++ encoder = connector->state->best_encoder;
++ funcs = encoder->helper_private;
++
++ /*
++ * Each encoder has at most one connector (since we always steal
++ * it away), so we won't call call enable hooks twice.
++ */
++ if (encoder->bridge)
++ encoder->bridge->funcs->pre_enable(encoder->bridge);
++
++ funcs->commit(encoder);
++
++ if (encoder->bridge)
++ encoder->bridge->funcs->enable(encoder->bridge);
++ }
++}
++EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes);
++
++static void wait_for_fences(struct drm_device *dev,
++ struct drm_atomic_state *state)
++{
++ int nplanes = dev->mode_config.num_total_plane;
++ int i;
++
++ for (i = 0; i < nplanes; i++) {
++ struct drm_plane *plane = state->planes[i];
++
++ if (!plane || !plane->state->fence)
++ continue;
++
++ WARN_ON(!plane->state->fb);
++
++ fence_wait(plane->state->fence, false);
++ fence_put(plane->state->fence);
++ plane->state->fence = NULL;
++ }
++}
++
++static bool framebuffer_changed(struct drm_device *dev,
++ struct drm_atomic_state *old_state,
++ struct drm_crtc *crtc)
++{
++ struct drm_plane *plane;
++ struct drm_plane_state *old_plane_state;
++ int nplanes = old_state->dev->mode_config.num_total_plane;
++ int i;
++
++ for (i = 0; i < nplanes; i++) {
++ plane = old_state->planes[i];
++ old_plane_state = old_state->plane_states[i];
++
++ if (!plane)
++ continue;
++
++ if (plane->state->crtc != crtc &&
++ old_plane_state->crtc != crtc)
++ continue;
++
++ if (plane->state->fb != old_plane_state->fb)
++ return true;
++ }
++
++ return false;
++}
++
++/**
++ * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
++ * @dev: DRM device
++ * @old_state: atomic state object with old state structures
++ *
++ * Helper to, after atomic commit, wait for vblanks on all effected
++ * crtcs (ie. before cleaning up old framebuffers using
++ * drm_atomic_helper_cleanup_planes()). It will only wait on crtcs where the
++ * framebuffers have actually changed to optimize for the legacy cursor and
++ * plane update use-case.
++ */
++void
++drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
++ struct drm_atomic_state *old_state)
++{
++ struct drm_crtc *crtc;
++ struct drm_crtc_state *old_crtc_state;
++ int ncrtcs = old_state->dev->mode_config.num_crtc;
++ int i, ret;
++
++ for (i = 0; i < ncrtcs; i++) {
++ crtc = old_state->crtcs[i];
++ old_crtc_state = old_state->crtc_states[i];
++
++ if (!crtc)
++ continue;
++
++ /* No one cares about the old state, so abuse it for tracking
++ * and store whether we hold a vblank reference (and should do a
++ * vblank wait) in the ->enable boolean. */
++ old_crtc_state->enable = false;
++
++ if (!crtc->state->enable)
++ continue;
++
++ if (!framebuffer_changed(dev, old_state, crtc))
++ continue;
++
++ ret = drm_crtc_vblank_get(crtc);
++ if (ret != 0)
++ continue;
++
++ old_crtc_state->enable = true;
++ old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
++ }
++
++ for (i = 0; i < ncrtcs; i++) {
++ crtc = old_state->crtcs[i];
++ old_crtc_state = old_state->crtc_states[i];
++
++ if (!crtc || !old_crtc_state->enable)
++ continue;
++
++ ret = wait_event_timeout(dev->vblank[i].queue,
++ old_crtc_state->last_vblank_count !=
++ drm_vblank_count(dev, i),
++ msecs_to_jiffies(50));
++
++ drm_crtc_vblank_put(crtc);
++ }
++}
++EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
++
++/**
++ * drm_atomic_helper_commit - commit validated state object
++ * @dev: DRM device
++ * @state: the driver state object
++ * @async: asynchronous commit
++ *
++ * This function commits a with drm_atomic_helper_check() pre-validated state
++ * object. This can still fail when e.g. the framebuffer reservation fails. For
++ * now this doesn't implement asynchronous commits.
++ *
++ * RETURNS
++ * Zero for success or -errno.
++ */
++int drm_atomic_helper_commit(struct drm_device *dev,
++ struct drm_atomic_state *state,
++ bool async)
++{
++ int ret;
++
++ if (async)
++ return -EBUSY;
++
++ ret = drm_atomic_helper_prepare_planes(dev, state);
++ if (ret)
++ return ret;
++
++ /*
++ * This is the point of no return - everything below never fails except
++ * when the hw goes bonghits. Which means we can commit the new state on
++ * the software side now.
++ */
++
++ drm_atomic_helper_swap_state(dev, state);
++
++ /*
++ * Everything below can be run asynchronously without the need to grab
++ * any modeset locks at all under one conditions: It must be guaranteed
++ * that the asynchronous work has either been cancelled (if the driver
++ * supports it, which at least requires that the framebuffers get
++ * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
++ * before the new state gets committed on the software side with
++ * drm_atomic_helper_swap_state().
++ *
++ * This scheme allows new atomic state updates to be prepared and
++ * checked in parallel to the asynchronous completion of the previous
++ * update. Which is important since compositors need to figure out the
++ * composition of the next frame right after having submitted the
++ * current layout.
++ */
++
++ wait_for_fences(dev, state);
++
++ drm_atomic_helper_commit_pre_planes(dev, state);
++
++ drm_atomic_helper_commit_planes(dev, state);
++
++ drm_atomic_helper_commit_post_planes(dev, state);
++
++ drm_atomic_helper_wait_for_vblanks(dev, state);
++
++ drm_atomic_helper_cleanup_planes(dev, state);
++
++ drm_atomic_state_free(state);
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_atomic_helper_commit);
++
++/**
++ * DOC: implementing async commit
++ *
++ * For now the atomic helpers don't support async commit directly. If there is
++ * real need it could be added though, using the dma-buf fence infrastructure
++ * for generic synchronization with outstanding rendering.
++ *
++ * For now drivers have to implement async commit themselves, with the following
++ * sequence being the recommended one:
++ *
++ * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
++ * which commit needs to call which can fail, so we want to run it first and
++ * synchronously.
++ *
++ * 2. Synchronize with any outstanding asynchronous commit worker threads which
++ * might be affected the new state update. This can be done by either cancelling
++ * or flushing the work items, depending upon whether the driver can deal with
++ * cancelled updates. Note that it is important to ensure that the framebuffer
++ * cleanup is still done when cancelling.
++ *
++ * For sufficient parallelism it is recommended to have a work item per crtc
++ * (for updates which don't touch global state) and a global one. Then we only
++ * need to synchronize with the crtc work items for changed crtcs and the global
++ * work item, which allows nice concurrent updates on disjoint sets of crtcs.
++ *
++ * 3. The software state is updated synchronously with
++ * drm_atomic_helper_swap_state. Doing this under the protection of all modeset
++ * locks means concurrent callers never see inconsistent state. And doing this
++ * while it's guaranteed that no relevant async worker runs means that async
++ * workers do not need grab any locks. Actually they must not grab locks, for
++ * otherwise the work flushing will deadlock.
++ *
++ * 4. Schedule a work item to do all subsequent steps, using the split-out
++ * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
++ * then cleaning up the framebuffers after the old framebuffer is no longer
++ * being displayed.
++ */
++
++/**
++ * drm_atomic_helper_prepare_planes - prepare plane resources after commit
++ * @dev: DRM device
++ * @state: atomic state object with old state structures
++ *
++ * This function prepares plane state, specifically framebuffers, for the new
++ * configuration. If any failure is encountered this function will call
++ * ->cleanup_fb on any already successfully prepared framebuffer.
++ *
++ * Returns:
++ * 0 on success, negative error code on failure.
++ */
++int drm_atomic_helper_prepare_planes(struct drm_device *dev,
++ struct drm_atomic_state *state)
++{
++ int nplanes = dev->mode_config.num_total_plane;
++ int ret, i;
++
++ for (i = 0; i < nplanes; i++) {
++ struct drm_plane_helper_funcs *funcs;
++ struct drm_plane *plane = state->planes[i];
++ struct drm_framebuffer *fb;
++
++ if (!plane)
++ continue;
++
++ funcs = plane->helper_private;
++
++ fb = state->plane_states[i]->fb;
++
++ if (fb && funcs->prepare_fb) {
++ ret = funcs->prepare_fb(plane, fb);
++ if (ret)
++ goto fail;
++ }
++ }
++
++ return 0;
++
++fail:
++ for (i--; i >= 0; i--) {
++ struct drm_plane_helper_funcs *funcs;
++ struct drm_plane *plane = state->planes[i];
++ struct drm_framebuffer *fb;
++
++ if (!plane)
++ continue;
++
++ funcs = plane->helper_private;
++
++ fb = state->plane_states[i]->fb;
++
++ if (fb && funcs->cleanup_fb)
++ funcs->cleanup_fb(plane, fb);
++
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
++
++/**
++ * drm_atomic_helper_commit_planes - commit plane state
++ * @dev: DRM device
++ * @old_state: atomic state object with old state structures
++ *
++ * This function commits the new plane state using the plane and atomic helper
++ * functions for planes and crtcs. It assumes that the atomic state has already
++ * been pushed into the relevant object state pointers, since this step can no
++ * longer fail.
++ *
++ * It still requires the global state object @old_state to know which planes and
++ * crtcs need to be updated though.
++ */
++void drm_atomic_helper_commit_planes(struct drm_device *dev,
++ struct drm_atomic_state *old_state)
++{
++ int nplanes = dev->mode_config.num_total_plane;
++ int ncrtcs = dev->mode_config.num_crtc;
++ int i;
++
++ for (i = 0; i < ncrtcs; i++) {
++ struct drm_crtc_helper_funcs *funcs;
++ struct drm_crtc *crtc = old_state->crtcs[i];
++
++ if (!crtc)
++ continue;
++
++ funcs = crtc->helper_private;
++
++ if (!funcs || !funcs->atomic_begin)
++ continue;
++
++ funcs->atomic_begin(crtc);
++ }
++
++ for (i = 0; i < nplanes; i++) {
++ struct drm_plane_helper_funcs *funcs;
++ struct drm_plane *plane = old_state->planes[i];
++ struct drm_plane_state *old_plane_state;
++
++ if (!plane)
++ continue;
++
++ funcs = plane->helper_private;
++
++ if (!funcs || !funcs->atomic_update)
++ continue;
++
++ old_plane_state = old_state->plane_states[i];
++
++ funcs->atomic_update(plane, old_plane_state);
++ }
++
++ for (i = 0; i < ncrtcs; i++) {
++ struct drm_crtc_helper_funcs *funcs;
++ struct drm_crtc *crtc = old_state->crtcs[i];
++
++ if (!crtc)
++ continue;
++
++ funcs = crtc->helper_private;
++
++ if (!funcs || !funcs->atomic_flush)
++ continue;
++
++ funcs->atomic_flush(crtc);
++ }
++}
++EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
++
++/**
++ * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
++ * @dev: DRM device
++ * @old_state: atomic state object with old state structures
++ *
++ * This function cleans up plane state, specifically framebuffers, from the old
++ * configuration. Hence the old configuration must be perserved in @old_state to
++ * be able to call this function.
++ *
++ * This function must also be called on the new state when the atomic update
++ * fails at any point after calling drm_atomic_helper_prepare_planes().
++ */
++void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
++ struct drm_atomic_state *old_state)
++{
++ int nplanes = dev->mode_config.num_total_plane;
++ int i;
++
++ for (i = 0; i < nplanes; i++) {
++ struct drm_plane_helper_funcs *funcs;
++ struct drm_plane *plane = old_state->planes[i];
++ struct drm_framebuffer *old_fb;
++
++ if (!plane)
++ continue;
++
++ funcs = plane->helper_private;
++
++ old_fb = old_state->plane_states[i]->fb;
++
++ if (old_fb && funcs->cleanup_fb)
++ funcs->cleanup_fb(plane, old_fb);
++ }
++}
++EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
++
++/**
++ * drm_atomic_helper_swap_state - store atomic state into current sw state
++ * @dev: DRM device
++ * @state: atomic state
++ *
++ * This function stores the atomic state into the current state pointers in all
++ * driver objects. It should be called after all failing steps have been done
++ * and succeeded, but before the actual hardware state is committed.
++ *
++ * For cleanup and error recovery the current state for all changed objects will
++ * be swaped into @state.
++ *
++ * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
++ *
++ * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
++ *
++ * 2. Do any other steps that might fail.
++ *
++ * 3. Put the staged state into the current state pointers with this function.
++ *
++ * 4. Actually commit the hardware state.
++ *
++ * 5. Call drm_atomic_helper_cleanup_planes with @state, which since step 3
++ * contains the old state. Also do any other cleanup required with that state.
++ */
++void drm_atomic_helper_swap_state(struct drm_device *dev,
++ struct drm_atomic_state *state)
++{
++ int i;
++
++ for (i = 0; i < dev->mode_config.num_connector; i++) {
++ struct drm_connector *connector = state->connectors[i];
++
++ if (!connector)
++ continue;
++
++ connector->state->state = state;
++ swap(state->connector_states[i], connector->state);
++ connector->state->state = NULL;
++ }
++
++ for (i = 0; i < dev->mode_config.num_crtc; i++) {
++ struct drm_crtc *crtc = state->crtcs[i];
++
++ if (!crtc)
++ continue;
++
++ crtc->state->state = state;
++ swap(state->crtc_states[i], crtc->state);
++ crtc->state->state = NULL;
++ }
++
++ for (i = 0; i < dev->mode_config.num_total_plane; i++) {
++ struct drm_plane *plane = state->planes[i];
++
++ if (!plane)
++ continue;
++
++ plane->state->state = state;
++ swap(state->plane_states[i], plane->state);
++ plane->state->state = NULL;
++ }
++}
++EXPORT_SYMBOL(drm_atomic_helper_swap_state);
++
++/**
++ * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
++ * @plane: plane object to update
++ * @crtc: owning CRTC of owning plane
++ * @fb: framebuffer to flip onto plane
++ * @crtc_x: x offset of primary plane on crtc
++ * @crtc_y: y offset of primary plane on crtc
++ * @crtc_w: width of primary plane rectangle on crtc
++ * @crtc_h: height of primary plane rectangle on crtc
++ * @src_x: x offset of @fb for panning
++ * @src_y: y offset of @fb for panning
++ * @src_w: width of source rectangle in @fb
++ * @src_h: height of source rectangle in @fb
++ *
++ * Provides a default plane update handler using the atomic driver interface.
++ *
++ * RETURNS:
++ * Zero on success, error code on failure
++ */
++int drm_atomic_helper_update_plane(struct drm_plane *plane,
++ struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ int crtc_x, int crtc_y,
++ unsigned int crtc_w, unsigned int crtc_h,
++ uint32_t src_x, uint32_t src_y,
++ uint32_t src_w, uint32_t src_h)
++{
++ struct drm_atomic_state *state;
++ struct drm_plane_state *plane_state;
++ int ret = 0;
++
++ state = drm_atomic_state_alloc(plane->dev);
++ if (!state)
++ return -ENOMEM;
++
++ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
++retry:
++ plane_state = drm_atomic_get_plane_state(state, plane);
++ if (IS_ERR(plane_state)) {
++ ret = PTR_ERR(plane_state);
++ goto fail;
++ }
++
++ ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
++ if (ret != 0)
++ goto fail;
++ drm_atomic_set_fb_for_plane(plane_state, fb);
++ plane_state->crtc_x = crtc_x;
++ plane_state->crtc_y = crtc_y;
++ plane_state->crtc_h = crtc_h;
++ plane_state->crtc_w = crtc_w;
++ plane_state->src_x = src_x;
++ plane_state->src_y = src_y;
++ plane_state->src_h = src_h;
++ plane_state->src_w = src_w;
++
++ ret = drm_atomic_commit(state);
++ if (ret != 0)
++ goto fail;
++
++ /* Driver takes ownership of state on successful commit. */
++ return 0;
++fail:
++ if (ret == -EDEADLK)
++ goto backoff;
++
++ drm_atomic_state_free(state);
++
++ return ret;
++backoff:
++ drm_atomic_state_clear(state);
++ drm_atomic_legacy_backoff(state);
++
++ /*
++ * Someone might have exchanged the framebuffer while we dropped locks
++ * in the backoff code. We need to fix up the fb refcount tracking the
++ * core does for us.
++ */
++ plane->old_fb = plane->fb;
++
++ goto retry;
++}
++EXPORT_SYMBOL(drm_atomic_helper_update_plane);
++
++/**
++ * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
++ * @plane: plane to disable
++ *
++ * Provides a default plane disable handler using the atomic driver interface.
++ *
++ * RETURNS:
++ * Zero on success, error code on failure
++ */
++int drm_atomic_helper_disable_plane(struct drm_plane *plane)
++{
++ struct drm_atomic_state *state;
++ struct drm_plane_state *plane_state;
++ int ret = 0;
++
++ /*
++ * FIXME: Without plane->crtc set we can't get at the implicit legacy
++ * acquire context. The real fix will be to wire the acquire ctx through
++ * everywhere we need it, but meanwhile prevent chaos by just skipping
++ * this noop. The critical case is the cursor ioctls which a) only grab
++ * crtc/cursor-plane locks (so we need the crtc to get at the right
++ * acquire context) and b) can try to disable the plane multiple times.
++ */
++ if (!plane->crtc)
++ return 0;
++
++ state = drm_atomic_state_alloc(plane->dev);
++ if (!state)
++ return -ENOMEM;
++
++ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(plane->crtc);
++retry:
++ plane_state = drm_atomic_get_plane_state(state, plane);
++ if (IS_ERR(plane_state)) {
++ ret = PTR_ERR(plane_state);
++ goto fail;
++ }
++
++ ret = drm_atomic_set_crtc_for_plane(state, plane, NULL);
++ if (ret != 0)
++ goto fail;
++ drm_atomic_set_fb_for_plane(plane_state, NULL);
++ plane_state->crtc_x = 0;
++ plane_state->crtc_y = 0;
++ plane_state->crtc_h = 0;
++ plane_state->crtc_w = 0;
++ plane_state->src_x = 0;
++ plane_state->src_y = 0;
++ plane_state->src_h = 0;
++ plane_state->src_w = 0;
++
++ ret = drm_atomic_commit(state);
++ if (ret != 0)
++ goto fail;
++
++ /* Driver takes ownership of state on successful commit. */
++ return 0;
++fail:
++ if (ret == -EDEADLK)
++ goto backoff;
++
++ drm_atomic_state_free(state);
++
++ return ret;
++backoff:
++ drm_atomic_state_clear(state);
++ drm_atomic_legacy_backoff(state);
++
++ /*
++ * Someone might have exchanged the framebuffer while we dropped locks
++ * in the backoff code. We need to fix up the fb refcount tracking the
++ * core does for us.
++ */
++ plane->old_fb = plane->fb;
++
++ goto retry;
++}
++EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
++
++static int update_output_state(struct drm_atomic_state *state,
++ struct drm_mode_set *set)
++{
++ struct drm_device *dev = set->crtc->dev;
++ struct drm_connector_state *conn_state;
++ int ncrtcs = state->dev->mode_config.num_crtc;
++ int ret, i, j;
++
++ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
++ state->acquire_ctx);
++ if (ret)
++ return ret;
++
++ /* First grab all affected connector/crtc states. */
++ for (i = 0; i < set->num_connectors; i++) {
++ conn_state = drm_atomic_get_connector_state(state,
++ set->connectors[i]);
++ if (IS_ERR(conn_state))
++ return PTR_ERR(conn_state);
++ }
++
++ for (i = 0; i < ncrtcs; i++) {
++ struct drm_crtc *crtc = state->crtcs[i];
++
++ if (!crtc)
++ continue;
++
++ ret = drm_atomic_add_affected_connectors(state, crtc);
++ if (ret)
++ return ret;
++ }
++
++ /* Then recompute connector->crtc links and crtc enabling state. */
++ for (i = 0; i < state->num_connector; i++) {
++ struct drm_connector *connector;
++
++ connector = state->connectors[i];
++ conn_state = state->connector_states[i];
++
++ if (!connector)
++ continue;
++
++ if (conn_state->crtc == set->crtc) {
++ ret = drm_atomic_set_crtc_for_connector(conn_state,
++ NULL);
++ if (ret)
++ return ret;
++ }
++
++ for (j = 0; j < set->num_connectors; j++) {
++ if (set->connectors[j] == connector) {
++ ret = drm_atomic_set_crtc_for_connector(conn_state,
++ set->crtc);
++ if (ret)
++ return ret;
++ break;
++ }
++ }
++ }
++
++ for (i = 0; i < ncrtcs; i++) {
++ struct drm_crtc *crtc = state->crtcs[i];
++ struct drm_crtc_state *crtc_state = state->crtc_states[i];
++
++ if (!crtc)
++ continue;
++
++ /* Don't update ->enable for the CRTC in the set_config request,
++ * since a mismatch would indicate a bug in the upper layers.
++ * The actual modeset code later on will catch any
++ * inconsistencies here. */
++ if (crtc == set->crtc)
++ continue;
++
++ crtc_state->enable =
++ drm_atomic_connectors_for_crtc(state, crtc);
++ }
++
++ return 0;
++}
++
++/**
++ * drm_atomic_helper_set_config - set a new config from userspace
++ * @set: mode set configuration
++ *
++ * Provides a default crtc set_config handler using the atomic driver interface.
++ *
++ * Returns:
++ * Returns 0 on success, negative errno numbers on failure.
++ */
++int drm_atomic_helper_set_config(struct drm_mode_set *set)
++{
++ struct drm_atomic_state *state;
++ struct drm_crtc *crtc = set->crtc;
++ struct drm_crtc_state *crtc_state;
++ struct drm_plane_state *primary_state;
++ int ret = 0;
++
++ state = drm_atomic_state_alloc(crtc->dev);
++ if (!state)
++ return -ENOMEM;
++
++ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
++retry:
++ crtc_state = drm_atomic_get_crtc_state(state, crtc);
++ if (IS_ERR(crtc_state)) {
++ ret = PTR_ERR(crtc_state);
++ goto fail;
++ }
++
++ primary_state = drm_atomic_get_plane_state(state, crtc->primary);
++ if (IS_ERR(primary_state)) {
++ ret = PTR_ERR(primary_state);
++ goto fail;
++ }
++
++ if (!set->mode) {
++ WARN_ON(set->fb);
++ WARN_ON(set->num_connectors);
++
++ crtc_state->enable = false;
++
++ ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, NULL);
++ if (ret != 0)
++ goto fail;
++
++ drm_atomic_set_fb_for_plane(primary_state, NULL);
++
++ goto commit;
++ }
++
++ WARN_ON(!set->fb);
++ WARN_ON(!set->num_connectors);
++
++ crtc_state->enable = true;
++ drm_mode_copy(&crtc_state->mode, set->mode);
++
++ ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, crtc);
++ if (ret != 0)
++ goto fail;
++ drm_atomic_set_fb_for_plane(primary_state, set->fb);
++ primary_state->crtc_x = 0;
++ primary_state->crtc_y = 0;
++ primary_state->crtc_h = set->mode->vdisplay;
++ primary_state->crtc_w = set->mode->hdisplay;
++ primary_state->src_x = set->x << 16;
++ primary_state->src_y = set->y << 16;
++ primary_state->src_h = set->mode->vdisplay << 16;
++ primary_state->src_w = set->mode->hdisplay << 16;
++
++commit:
++ ret = update_output_state(state, set);
++ if (ret)
++ goto fail;
++
++ ret = drm_atomic_commit(state);
++ if (ret != 0)
++ goto fail;
++
++ /* Driver takes ownership of state on successful commit. */
++ return 0;
++fail:
++ if (ret == -EDEADLK)
++ goto backoff;
++
++ drm_atomic_state_free(state);
++
++ return ret;
++backoff:
++ drm_atomic_state_clear(state);
++ drm_atomic_legacy_backoff(state);
++
++ /*
++ * Someone might have exchanged the framebuffer while we dropped locks
++ * in the backoff code. We need to fix up the fb refcount tracking the
++ * core does for us.
++ */
++ crtc->primary->old_fb = crtc->primary->fb;
++
++ goto retry;
++}
++EXPORT_SYMBOL(drm_atomic_helper_set_config);
++
++/**
++ * drm_atomic_helper_crtc_set_property - helper for crtc prorties
++ * @crtc: DRM crtc
++ * @property: DRM property
++ * @val: value of property
++ *
++ * Provides a default plane disablle handler using the atomic driver interface.
++ *
++ * RETURNS:
++ * Zero on success, error code on failure
++ */
++int
++drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
++ struct drm_property *property,
++ uint64_t val)
++{
++ struct drm_atomic_state *state;
++ struct drm_crtc_state *crtc_state;
++ int ret = 0;
++
++ state = drm_atomic_state_alloc(crtc->dev);
++ if (!state)
++ return -ENOMEM;
++
++ /* ->set_property is always called with all locks held. */
++ state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
++retry:
++ crtc_state = drm_atomic_get_crtc_state(state, crtc);
++ if (IS_ERR(crtc_state)) {
++ ret = PTR_ERR(crtc_state);
++ goto fail;
++ }
++
++ ret = crtc->funcs->atomic_set_property(crtc, crtc_state,
++ property, val);
++ if (ret)
++ goto fail;
++
++ ret = drm_atomic_commit(state);
++ if (ret != 0)
++ goto fail;
++
++ /* Driver takes ownership of state on successful commit. */
++ return 0;
++fail:
++ if (ret == -EDEADLK)
++ goto backoff;
++
++ drm_atomic_state_free(state);
++
++ return ret;
++backoff:
++ drm_atomic_state_clear(state);
++ drm_atomic_legacy_backoff(state);
++
++ goto retry;
++}
++EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
++
++/**
++ * drm_atomic_helper_plane_set_property - helper for plane prorties
++ * @plane: DRM plane
++ * @property: DRM property
++ * @val: value of property
++ *
++ * Provides a default plane disable handler using the atomic driver interface.
++ *
++ * RETURNS:
++ * Zero on success, error code on failure
++ */
++int
++drm_atomic_helper_plane_set_property(struct drm_plane *plane,
++ struct drm_property *property,
++ uint64_t val)
++{
++ struct drm_atomic_state *state;
++ struct drm_plane_state *plane_state;
++ int ret = 0;
++
++ state = drm_atomic_state_alloc(plane->dev);
++ if (!state)
++ return -ENOMEM;
++
++ /* ->set_property is always called with all locks held. */
++ state->acquire_ctx = plane->dev->mode_config.acquire_ctx;
++retry:
++ plane_state = drm_atomic_get_plane_state(state, plane);
++ if (IS_ERR(plane_state)) {
++ ret = PTR_ERR(plane_state);
++ goto fail;
++ }
++
++ ret = plane->funcs->atomic_set_property(plane, plane_state,
++ property, val);
++ if (ret)
++ goto fail;
++
++ ret = drm_atomic_commit(state);
++ if (ret != 0)
++ goto fail;
++
++ /* Driver takes ownership of state on successful commit. */
++ return 0;
++fail:
++ if (ret == -EDEADLK)
++ goto backoff;
++
++ drm_atomic_state_free(state);
++
++ return ret;
++backoff:
++ drm_atomic_state_clear(state);
++ drm_atomic_legacy_backoff(state);
++
++ goto retry;
++}
++EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
++
++/**
++ * drm_atomic_helper_connector_set_property - helper for connector prorties
++ * @connector: DRM connector
++ * @property: DRM property
++ * @val: value of property
++ *
++ * Provides a default plane disablle handler using the atomic driver interface.
++ *
++ * RETURNS:
++ * Zero on success, error code on failure
++ */
++int
++drm_atomic_helper_connector_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t val)
++{
++ struct drm_atomic_state *state;
++ struct drm_connector_state *connector_state;
++ int ret = 0;
++
++ state = drm_atomic_state_alloc(connector->dev);
++ if (!state)
++ return -ENOMEM;
++
++ /* ->set_property is always called with all locks held. */
++ state->acquire_ctx = connector->dev->mode_config.acquire_ctx;
++retry:
++ connector_state = drm_atomic_get_connector_state(state, connector);
++ if (IS_ERR(connector_state)) {
++ ret = PTR_ERR(connector_state);
++ goto fail;
++ }
++
++ ret = connector->funcs->atomic_set_property(connector, connector_state,
++ property, val);
++ if (ret)
++ goto fail;
++
++ ret = drm_atomic_commit(state);
++ if (ret != 0)
++ goto fail;
++
++ /* Driver takes ownership of state on successful commit. */
++ return 0;
++fail:
++ if (ret == -EDEADLK)
++ goto backoff;
++
++ drm_atomic_state_free(state);
++
++ return ret;
++backoff:
++ drm_atomic_state_clear(state);
++ drm_atomic_legacy_backoff(state);
++
++ goto retry;
++}
++EXPORT_SYMBOL(drm_atomic_helper_connector_set_property);
++
++/**
++ * drm_atomic_helper_page_flip - execute a legacy page flip
++ * @crtc: DRM crtc
++ * @fb: DRM framebuffer
++ * @event: optional DRM event to signal upon completion
++ * @flags: flip flags for non-vblank sync'ed updates
++ *
++ * Provides a default page flip implementation using the atomic driver interface.
++ *
++ * Note that for now so called async page flips (i.e. updates which are not
++ * synchronized to vblank) are not supported, since the atomic interfaces have
++ * no provisions for this yet.
++ *
++ * Returns:
++ * Returns 0 on success, negative errno numbers on failure.
++ */
++int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ struct drm_pending_vblank_event *event,
++ uint32_t flags)
++{
++ struct drm_plane *plane = crtc->primary;
++ struct drm_atomic_state *state;
++ struct drm_plane_state *plane_state;
++ struct drm_crtc_state *crtc_state;
++ int ret = 0;
++
++ if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
++ return -EINVAL;
++
++ state = drm_atomic_state_alloc(plane->dev);
++ if (!state)
++ return -ENOMEM;
++
++ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
++retry:
++ crtc_state = drm_atomic_get_crtc_state(state, crtc);
++ if (IS_ERR(crtc_state)) {
++ ret = PTR_ERR(crtc_state);
++ goto fail;
++ }
++ crtc_state->event = event;
++
++ plane_state = drm_atomic_get_plane_state(state, plane);
++ if (IS_ERR(plane_state)) {
++ ret = PTR_ERR(plane_state);
++ goto fail;
++ }
++
++ ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
++ if (ret != 0)
++ goto fail;
++ drm_atomic_set_fb_for_plane(plane_state, fb);
++
++ ret = drm_atomic_async_commit(state);
++ if (ret != 0)
++ goto fail;
++
++ /* TODO: ->page_flip is the only driver callback where the core
++ * doesn't update plane->fb. For now patch it up here. */
++ plane->fb = plane->state->fb;
++
++ /* Driver takes ownership of state on successful async commit. */
++ return 0;
++fail:
++ if (ret == -EDEADLK)
++ goto backoff;
++
++ drm_atomic_state_free(state);
++
++ return ret;
++backoff:
++ drm_atomic_state_clear(state);
++ drm_atomic_legacy_backoff(state);
++
++ /*
++ * Someone might have exchanged the framebuffer while we dropped locks
++ * in the backoff code. We need to fix up the fb refcount tracking the
++ * core does for us.
++ */
++ plane->old_fb = plane->fb;
++
++ goto retry;
++}
++EXPORT_SYMBOL(drm_atomic_helper_page_flip);
++
++/**
++ * DOC: atomic state reset and initialization
++ *
++ * Both the drm core and the atomic helpers assume that there is always the full
++ * and correct atomic software state for all connectors, CRTCs and planes
++ * available. Which is a bit a problem on driver load and also after system
++ * suspend. One way to solve this is to have a hardware state read-out
++ * infrastructure which reconstructs the full software state (e.g. the i915
++ * driver).
++ *
++ * The simpler solution is to just reset the software state to everything off,
++ * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
++ * the atomic helpers provide default reset implementations for all hooks.
++ */
++
++/**
++ * drm_atomic_helper_crtc_reset - default ->reset hook for CRTCs
++ * @crtc: drm CRTC
++ *
++ * Resets the atomic state for @crtc by freeing the state pointer (which might
++ * be NULL, e.g. at driver load time) and allocating a new empty state object.
++ */
++void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
++{
++ kfree(crtc->state);
++ crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
++}
++EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
++
++/**
++ * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
++ * @crtc: drm CRTC
++ *
++ * Default CRTC state duplicate hook for drivers which don't have their own
++ * subclassed CRTC state structure.
++ */
++struct drm_crtc_state *
++drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
++{
++ struct drm_crtc_state *state;
++
++ if (WARN_ON(!crtc->state))
++ return NULL;
++
++ state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL);
++
++ if (state) {
++ state->mode_changed = false;
++ state->planes_changed = false;
++ state->event = NULL;
++ }
++
++ return state;
++}
++EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
++
++/**
++ * drm_atomic_helper_crtc_destroy_state - default state destroy hook
++ * @crtc: drm CRTC
++ * @state: CRTC state object to release
++ *
++ * Default CRTC state destroy hook for drivers which don't have their own
++ * subclassed CRTC state structure.
++ */
++void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
++ struct drm_crtc_state *state)
++{
++ kfree(state);
++}
++EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
++
++/**
++ * drm_atomic_helper_plane_reset - default ->reset hook for planes
++ * @plane: drm plane
++ *
++ * Resets the atomic state for @plane by freeing the state pointer (which might
++ * be NULL, e.g. at driver load time) and allocating a new empty state object.
++ */
++void drm_atomic_helper_plane_reset(struct drm_plane *plane)
++{
++ if (plane->state && plane->state->fb)
++ drm_framebuffer_unreference(plane->state->fb);
++
++ kfree(plane->state);
++ plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
++}
++EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
++
++/**
++ * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
++ * @plane: drm plane
++ *
++ * Default plane state duplicate hook for drivers which don't have their own
++ * subclassed plane state structure.
++ */
++struct drm_plane_state *
++drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
++{
++ struct drm_plane_state *state;
++
++ if (WARN_ON(!plane->state))
++ return NULL;
++
++ state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL);
++
++ if (state && state->fb)
++ drm_framebuffer_reference(state->fb);
++
++ return state;
++}
++EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
++
++/**
++ * drm_atomic_helper_plane_destroy_state - default state destroy hook
++ * @plane: drm plane
++ * @state: plane state object to release
++ *
++ * Default plane state destroy hook for drivers which don't have their own
++ * subclassed plane state structure.
++ */
++void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
++ struct drm_plane_state *state)
++{
++ if (state->fb)
++ drm_framebuffer_unreference(state->fb);
++
++ kfree(state);
++}
++EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
++
++/**
++ * drm_atomic_helper_connector_reset - default ->reset hook for connectors
++ * @connector: drm connector
++ *
++ * Resets the atomic state for @connector by freeing the state pointer (which
++ * might be NULL, e.g. at driver load time) and allocating a new empty state
++ * object.
++ */
++void drm_atomic_helper_connector_reset(struct drm_connector *connector)
++{
++ kfree(connector->state);
++ connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL);
++}
++EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
++
++/**
++ * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
++ * @connector: drm connector
++ *
++ * Default connector state duplicate hook for drivers which don't have their own
++ * subclassed connector state structure.
++ */
++struct drm_connector_state *
++drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
++{
++ if (WARN_ON(!connector->state))
++ return NULL;
++
++ return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL);
++}
++EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
++
++/**
++ * drm_atomic_helper_connector_destroy_state - default state destroy hook
++ * @connector: drm connector
++ * @state: connector state object to release
++ *
++ * Default connector state destroy hook for drivers which don't have their own
++ * subclassed connector state structure.
++ */
++void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
++ struct drm_connector_state *state)
++{
++ kfree(state);
++}
++EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
+diff -Naur a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
+--- a/drivers/gpu/drm/drm_auth.c 2015-03-26 14:43:30.430436436 +0530
++++ b/drivers/gpu/drm/drm_auth.c 2015-03-26 14:42:38.706435421 +0530
+@@ -34,6 +34,13 @@
+ */
+
+ #include <drm/drmP.h>
++#include "drm_internal.h"
++
++struct drm_magic_entry {
++ struct list_head head;
++ struct drm_hash_item hash_item;
++ struct drm_file *priv;
++};
+
+ /**
+ * Find the file with the given magic number.
+diff -Naur a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
+--- a/drivers/gpu/drm/drm_bufs.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_bufs.c 2015-03-26 14:42:38.706435421 +0530
+@@ -1,18 +1,13 @@
+-/**
+- * \file drm_bufs.c
+- * Generic buffer template
+- *
+- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+- * \author Gareth Hughes <gareth@valinux.com>
+- */
+-
+ /*
+- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
++ * Legacy: Generic DRM Buffer Management
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
++ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
++ * Author: Gareth Hughes <gareth@valinux.com>
++ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+@@ -39,6 +34,7 @@
+ #include <linux/export.h>
+ #include <asm/shmparam.h>
+ #include <drm/drmP.h>
++#include "drm_legacy.h"
+
+ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+ struct drm_local_map *map)
+@@ -363,11 +359,11 @@
+ list->master = dev->primary->master;
+ *maplist = list;
+ return 0;
+- }
++}
+
+-int drm_addmap(struct drm_device * dev, resource_size_t offset,
+- unsigned int size, enum drm_map_type type,
+- enum drm_map_flags flags, struct drm_local_map ** map_ptr)
++int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
++ unsigned int size, enum drm_map_type type,
++ enum drm_map_flags flags, struct drm_local_map **map_ptr)
+ {
+ struct drm_map_list *list;
+ int rc;
+@@ -377,8 +373,7 @@
+ *map_ptr = list->map;
+ return rc;
+ }
+-
+-EXPORT_SYMBOL(drm_addmap);
++EXPORT_SYMBOL(drm_legacy_addmap);
+
+ /**
+ * Ioctl to specify a range of memory that is available for mapping by a
+@@ -391,8 +386,8 @@
+ * \return zero on success or a negative value on error.
+ *
+ */
+-int drm_addmap_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_map *map = data;
+ struct drm_map_list *maplist;
+@@ -429,9 +424,9 @@
+ * its being used, and free any associate resource (such as MTRR's) if it's not
+ * being on use.
+ *
+- * \sa drm_addmap
++ * \sa drm_legacy_addmap
+ */
+-int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
++int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
+ {
+ struct drm_map_list *r_list = NULL, *list_t;
+ drm_dma_handle_t dmah;
+@@ -478,26 +473,26 @@
+ dmah.vaddr = map->handle;
+ dmah.busaddr = map->offset;
+ dmah.size = map->size;
+- __drm_pci_free(dev, &dmah);
++ __drm_legacy_pci_free(dev, &dmah);
+ break;
+ }
+ kfree(map);
+
+ return 0;
+ }
+-EXPORT_SYMBOL(drm_rmmap_locked);
++EXPORT_SYMBOL(drm_legacy_rmmap_locked);
+
+-int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
++int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
+ {
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+- ret = drm_rmmap_locked(dev, map);
++ ret = drm_legacy_rmmap_locked(dev, map);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+ }
+-EXPORT_SYMBOL(drm_rmmap);
++EXPORT_SYMBOL(drm_legacy_rmmap);
+
+ /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
+ * the last close of the device, and this is necessary for cleanup when things
+@@ -514,8 +509,8 @@
+ * \param arg pointer to a struct drm_map structure.
+ * \return zero on success or a negative value on error.
+ */
+-int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_map *request = data;
+ struct drm_local_map *map = NULL;
+@@ -546,7 +541,7 @@
+ return 0;
+ }
+
+- ret = drm_rmmap_locked(dev, map);
++ ret = drm_legacy_rmmap_locked(dev, map);
+
+ mutex_unlock(&dev->struct_mutex);
+
+@@ -599,7 +594,8 @@
+ * reallocates the buffer list of the same size order to accommodate the new
+ * buffers.
+ */
+-int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
++int drm_legacy_addbufs_agp(struct drm_device *dev,
++ struct drm_buf_desc *request)
+ {
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+@@ -656,13 +652,13 @@
+ DRM_DEBUG("zone invalid\n");
+ return -EINVAL;
+ }
+- spin_lock(&dev->count_lock);
++ spin_lock(&dev->buf_lock);
+ if (dev->buf_use) {
+- spin_unlock(&dev->count_lock);
++ spin_unlock(&dev->buf_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+- spin_unlock(&dev->count_lock);
++ spin_unlock(&dev->buf_lock);
+
+ mutex_lock(&dev->struct_mutex);
+ entry = &dma->bufs[order];
+@@ -759,10 +755,11 @@
+ atomic_dec(&dev->buf_alloc);
+ return 0;
+ }
+-EXPORT_SYMBOL(drm_addbufs_agp);
++EXPORT_SYMBOL(drm_legacy_addbufs_agp);
+ #endif /* __OS_HAS_AGP */
+
+-int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
++int drm_legacy_addbufs_pci(struct drm_device *dev,
++ struct drm_buf_desc *request)
+ {
+ struct drm_device_dma *dma = dev->dma;
+ int count;
+@@ -805,13 +802,13 @@
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+- spin_lock(&dev->count_lock);
++ spin_lock(&dev->buf_lock);
+ if (dev->buf_use) {
+- spin_unlock(&dev->count_lock);
++ spin_unlock(&dev->buf_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+- spin_unlock(&dev->count_lock);
++ spin_unlock(&dev->buf_lock);
+
+ mutex_lock(&dev->struct_mutex);
+ entry = &dma->bufs[order];
+@@ -964,9 +961,10 @@
+ return 0;
+
+ }
+-EXPORT_SYMBOL(drm_addbufs_pci);
++EXPORT_SYMBOL(drm_legacy_addbufs_pci);
+
+-static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
++static int drm_legacy_addbufs_sg(struct drm_device *dev,
++ struct drm_buf_desc *request)
+ {
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+@@ -1015,13 +1013,13 @@
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+ return -EINVAL;
+
+- spin_lock(&dev->count_lock);
++ spin_lock(&dev->buf_lock);
+ if (dev->buf_use) {
+- spin_unlock(&dev->count_lock);
++ spin_unlock(&dev->buf_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+- spin_unlock(&dev->count_lock);
++ spin_unlock(&dev->buf_lock);
+
+ mutex_lock(&dev->struct_mutex);
+ entry = &dma->bufs[order];
+@@ -1135,8 +1133,8 @@
+ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
+ * PCI memory respectively.
+ */
+-int drm_addbufs(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_addbufs(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_buf_desc *request = data;
+ int ret;
+@@ -1149,15 +1147,15 @@
+
+ #if __OS_HAS_AGP
+ if (request->flags & _DRM_AGP_BUFFER)
+- ret = drm_addbufs_agp(dev, request);
++ ret = drm_legacy_addbufs_agp(dev, request);
+ else
+ #endif
+ if (request->flags & _DRM_SG_BUFFER)
+- ret = drm_addbufs_sg(dev, request);
++ ret = drm_legacy_addbufs_sg(dev, request);
+ else if (request->flags & _DRM_FB_BUFFER)
+ ret = -EINVAL;
+ else
+- ret = drm_addbufs_pci(dev, request);
++ ret = drm_legacy_addbufs_pci(dev, request);
+
+ return ret;
+ }
+@@ -1175,12 +1173,12 @@
+ * \param arg pointer to a drm_buf_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+- * Increments drm_device::buf_use while holding the drm_device::count_lock
++ * Increments drm_device::buf_use while holding the drm_device::buf_lock
+ * lock, preventing of allocating more buffers after this call. Information
+ * about each requested buffer is then copied into user space.
+ */
+-int drm_infobufs(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_infobufs(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_info *request = data;
+@@ -1196,13 +1194,13 @@
+ if (!dma)
+ return -EINVAL;
+
+- spin_lock(&dev->count_lock);
++ spin_lock(&dev->buf_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+- spin_unlock(&dev->count_lock);
++ spin_unlock(&dev->buf_lock);
+ return -EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+- spin_unlock(&dev->count_lock);
++ spin_unlock(&dev->buf_lock);
+
+ for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+ if (dma->bufs[i].buf_count)
+@@ -1217,7 +1215,6 @@
+ struct drm_buf_desc __user *to =
+ &request->list[count];
+ struct drm_buf_entry *from = &dma->bufs[i];
+- struct drm_freelist *list = &dma->bufs[i].freelist;
+ if (copy_to_user(&to->count,
+ &from->buf_count,
+ sizeof(from->buf_count)) ||
+@@ -1225,19 +1222,19 @@
+ &from->buf_size,
+ sizeof(from->buf_size)) ||
+ copy_to_user(&to->low_mark,
+- &list->low_mark,
+- sizeof(list->low_mark)) ||
++ &from->low_mark,
++ sizeof(from->low_mark)) ||
+ copy_to_user(&to->high_mark,
+- &list->high_mark,
+- sizeof(list->high_mark)))
++ &from->high_mark,
++ sizeof(from->high_mark)))
+ return -EFAULT;
+
+ DRM_DEBUG("%d %d %d %d %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+- dma->bufs[i].freelist.low_mark,
+- dma->bufs[i].freelist.high_mark);
++ dma->bufs[i].low_mark,
++ dma->bufs[i].high_mark);
+ ++count;
+ }
+ }
+@@ -1261,8 +1258,8 @@
+ *
+ * \note This ioctl is deprecated and mostly never used.
+ */
+-int drm_markbufs(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_markbufs(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_desc *request = data;
+@@ -1290,8 +1287,8 @@
+ if (request->high_mark < 0 || request->high_mark > entry->buf_count)
+ return -EINVAL;
+
+- entry->freelist.low_mark = request->low_mark;
+- entry->freelist.high_mark = request->high_mark;
++ entry->low_mark = request->low_mark;
++ entry->high_mark = request->high_mark;
+
+ return 0;
+ }
+@@ -1308,8 +1305,8 @@
+ * Calls free_buffer() for each used buffer.
+ * This function is primarily used for debugging.
+ */
+-int drm_freebufs(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_freebufs(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_free *request = data;
+@@ -1341,7 +1338,7 @@
+ task_pid_nr(current));
+ return -EINVAL;
+ }
+- drm_free_buffer(dev, buf);
++ drm_legacy_free_buffer(dev, buf);
+ }
+
+ return 0;
+@@ -1361,8 +1358,8 @@
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
+ */
+-int drm_mapbufs(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_mapbufs(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_device_dma *dma = dev->dma;
+ int retcode = 0;
+@@ -1381,13 +1378,13 @@
+ if (!dma)
+ return -EINVAL;
+
+- spin_lock(&dev->count_lock);
++ spin_lock(&dev->buf_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+- spin_unlock(&dev->count_lock);
++ spin_unlock(&dev->buf_lock);
+ return -EBUSY;
+ }
+ dev->buf_use++; /* Can't allocate more after this call */
+- spin_unlock(&dev->count_lock);
++ spin_unlock(&dev->buf_lock);
+
+ if (request->count >= dma->buf_count) {
+ if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
+@@ -1449,7 +1446,7 @@
+ return retcode;
+ }
+
+-int drm_dma_ioctl(struct drm_device *dev, void *data,
++int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+@@ -1461,7 +1458,7 @@
+ return -EINVAL;
+ }
+
+-struct drm_local_map *drm_getsarea(struct drm_device *dev)
++struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
+ {
+ struct drm_map_list *entry;
+
+@@ -1473,4 +1470,4 @@
+ }
+ return NULL;
+ }
+-EXPORT_SYMBOL(drm_getsarea);
++EXPORT_SYMBOL(drm_legacy_getsarea);
+diff -Naur a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
+--- a/drivers/gpu/drm/drm_cache.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_cache.c 2015-03-26 14:42:38.706435421 +0530
+@@ -32,6 +32,12 @@
+ #include <drm/drmP.h>
+
+ #if defined(CONFIG_X86)
++
++/*
++ * clflushopt is an unordered instruction which needs fencing with mfence or
++ * sfence to avoid ordering issues. For drm_clflush_page this fencing happens
++ * in the caller.
++ */
+ static void
+ drm_clflush_page(struct page *page)
+ {
+@@ -44,7 +50,7 @@
+
+ page_virtual = kmap_atomic(page);
+ for (i = 0; i < PAGE_SIZE; i += size)
+- clflush(page_virtual + i);
++ clflushopt(page_virtual + i);
+ kunmap_atomic(page_virtual);
+ }
+
+@@ -125,15 +131,15 @@
+ EXPORT_SYMBOL(drm_clflush_sg);
+
+ void
+-drm_clflush_virt_range(char *addr, unsigned long length)
++drm_clflush_virt_range(void *addr, unsigned long length)
+ {
+ #if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+- char *end = addr + length;
++ void *end = addr + length;
+ mb();
+ for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
+- clflush(addr);
+- clflush(end - 1);
++ clflushopt(addr);
++ clflushopt(end - 1);
+ mb();
+ return;
+ }
+diff -Naur a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
+--- a/drivers/gpu/drm/drm_context.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/drm_context.c 2015-03-26 14:42:38.706435421 +0530
+@@ -1,18 +1,13 @@
+-/**
+- * \file drm_context.c
+- * IOCTLs for generic contexts
+- *
+- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+- * \author Gareth Hughes <gareth@valinux.com>
+- */
+-
+ /*
+- * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
++ * Legacy: Generic DRM Contexts
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
++ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
++ * Author: Gareth Hughes <gareth@valinux.com>
++ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+@@ -33,14 +28,14 @@
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-/*
+- * ChangeLog:
+- * 2001-11-16 Torsten Duwe <duwe@caldera.de>
+- * added context constructor/destructor hooks,
+- * needed by SiS driver's memory management.
+- */
+-
+ #include <drm/drmP.h>
++#include "drm_legacy.h"
++
++struct drm_ctx_list {
++ struct list_head head;
++ drm_context_t handle;
++ struct drm_file *tag;
++};
+
+ /******************************************************************/
+ /** \name Context bitmap support */
+@@ -56,7 +51,7 @@
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
+ * lock.
+ */
+-void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
++void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+ {
+ mutex_lock(&dev->struct_mutex);
+ idr_remove(&dev->ctx_idr, ctx_handle);
+@@ -72,7 +67,7 @@
+ * Allocate a new idr from drm_device::ctx_idr while holding the
+ * drm_device::struct_mutex lock.
+ */
+-static int drm_ctxbitmap_next(struct drm_device * dev)
++static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
+ {
+ int ret;
+
+@@ -90,7 +85,7 @@
+ *
+ * Initialise the drm_device::ctx_idr
+ */
+-int drm_ctxbitmap_init(struct drm_device * dev)
++int drm_legacy_ctxbitmap_init(struct drm_device * dev)
+ {
+ idr_init(&dev->ctx_idr);
+ return 0;
+@@ -104,13 +99,43 @@
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
+ */
+-void drm_ctxbitmap_cleanup(struct drm_device * dev)
++void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
+ {
+ mutex_lock(&dev->struct_mutex);
+ idr_destroy(&dev->ctx_idr);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
++/**
++ * drm_ctxbitmap_flush() - Flush all contexts owned by a file
++ * @dev: DRM device to operate on
++ * @file: Open file to flush contexts for
++ *
++ * This iterates over all contexts on @dev and drops them if they're owned by
++ * @file. Note that after this call returns, new contexts might be added if
++ * the file is still alive.
++ */
++void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
++{
++ struct drm_ctx_list *pos, *tmp;
++
++ mutex_lock(&dev->ctxlist_mutex);
++
++ list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
++ if (pos->tag == file &&
++ pos->handle != DRM_KERNEL_CONTEXT) {
++ if (dev->driver->context_dtor)
++ dev->driver->context_dtor(dev, pos->handle);
++
++ drm_legacy_ctxbitmap_free(dev, pos->handle);
++ list_del(&pos->head);
++ kfree(pos);
++ }
++ }
++
++ mutex_unlock(&dev->ctxlist_mutex);
++}
++
+ /*@}*/
+
+ /******************************************************************/
+@@ -129,8 +154,8 @@
+ * Gets the map from drm_device::ctx_idr with the handle specified and
+ * returns its handle.
+ */
+-int drm_getsareactx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_getsareactx(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_ctx_priv_map *request = data;
+ struct drm_local_map *map;
+@@ -173,8 +198,8 @@
+ * Searches the mapping specified in \p arg and update the entry in
+ * drm_device::ctx_idr with it.
+ */
+-int drm_setsareactx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_setsareactx(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_ctx_priv_map *request = data;
+ struct drm_local_map *map = NULL;
+@@ -273,8 +298,8 @@
+ * \param arg user argument pointing to a drm_ctx_res structure.
+ * \return zero on success or a negative number on failure.
+ */
+-int drm_resctx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_resctx(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_ctx_res *res = data;
+ struct drm_ctx ctx;
+@@ -304,16 +329,16 @@
+ *
+ * Get a new handle for the context and copy to userspace.
+ */
+-int drm_addctx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_addctx(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_ctx_list *ctx_entry;
+ struct drm_ctx *ctx = data;
+
+- ctx->handle = drm_ctxbitmap_next(dev);
++ ctx->handle = drm_legacy_ctxbitmap_next(dev);
+ if (ctx->handle == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+- ctx->handle = drm_ctxbitmap_next(dev);
++ ctx->handle = drm_legacy_ctxbitmap_next(dev);
+ }
+ DRM_DEBUG("%d\n", ctx->handle);
+ if (ctx->handle == -1) {
+@@ -348,7 +373,8 @@
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ */
+-int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
++int drm_legacy_getctx(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_ctx *ctx = data;
+
+@@ -369,8 +395,8 @@
+ *
+ * Calls context_switch().
+ */
+-int drm_switchctx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_switchctx(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_ctx *ctx = data;
+
+@@ -389,8 +415,8 @@
+ *
+ * Calls context_switch_complete().
+ */
+-int drm_newctx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_newctx(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_ctx *ctx = data;
+
+@@ -411,8 +437,8 @@
+ *
+ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
+ */
+-int drm_rmctx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_rmctx(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_ctx *ctx = data;
+
+@@ -420,7 +446,7 @@
+ if (ctx->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev, ctx->handle);
+- drm_ctxbitmap_free(dev, ctx->handle);
++ drm_legacy_ctxbitmap_free(dev, ctx->handle);
+ }
+
+ mutex_lock(&dev->ctxlist_mutex);
+diff -Naur a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+--- a/drivers/gpu/drm/drm_crtc.c 2015-03-26 14:43:30.426436436 +0530
++++ b/drivers/gpu/drm/drm_crtc.c 2015-03-26 14:42:38.710435421 +0530
+@@ -37,58 +37,14 @@
+ #include <drm/drm_crtc.h>
+ #include <drm/drm_edid.h>
+ #include <drm/drm_fourcc.h>
++#include <drm/drm_modeset_lock.h>
+
+-/**
+- * drm_modeset_lock_all - take all modeset locks
+- * @dev: drm device
+- *
+- * This function takes all modeset locks, suitable where a more fine-grained
+- * scheme isn't (yet) implemented.
+- */
+-void drm_modeset_lock_all(struct drm_device *dev)
+-{
+- struct drm_crtc *crtc;
+-
+- mutex_lock(&dev->mode_config.mutex);
+-
+- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+- mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
+-}
+-EXPORT_SYMBOL(drm_modeset_lock_all);
+-
+-/**
+- * drm_modeset_unlock_all - drop all modeset locks
+- * @dev: device
+- */
+-void drm_modeset_unlock_all(struct drm_device *dev)
+-{
+- struct drm_crtc *crtc;
+-
+- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+- mutex_unlock(&crtc->mutex);
+-
+- mutex_unlock(&dev->mode_config.mutex);
+-}
+-EXPORT_SYMBOL(drm_modeset_unlock_all);
+-
+-/**
+- * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
+- * @dev: device
+- */
+-void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
+-{
+- struct drm_crtc *crtc;
++#include "drm_crtc_internal.h"
++#include "drm_internal.h"
+
+- /* Locking is currently fubar in the panic handler. */
+- if (oops_in_progress)
+- return;
+-
+- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+- WARN_ON(!mutex_is_locked(&crtc->mutex));
+-
+- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+-}
+-EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
++static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
++ struct drm_mode_fb_cmd2 *r,
++ struct drm_file *file_priv);
+
+ /* Avoid boilerplate. I'm tired of typing. */
+ #define DRM_ENUM_NAME_FN(fnname, list) \
+@@ -114,6 +70,13 @@
+
+ DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
++static const struct drm_prop_enum_list drm_plane_type_enum_list[] =
++{
++ { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
++ { DRM_PLANE_TYPE_PRIMARY, "Primary" },
++ { DRM_PLANE_TYPE_CURSOR, "Cursor" },
++};
++
+ /*
+ * Optional properties
+ */
+@@ -125,6 +88,12 @@
+ { DRM_MODE_SCALE_ASPECT, "Full aspect" },
+ };
+
++static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
++ { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
++ { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
++ { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
++};
++
+ /*
+ * Non-global properties, but "required" for certain connectors.
+ */
+@@ -213,6 +182,17 @@
+ { DRM_MODE_ENCODER_TVDAC, "TV" },
+ { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+ { DRM_MODE_ENCODER_DSI, "DSI" },
++ { DRM_MODE_ENCODER_DPMST, "DP MST" },
++};
++
++static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
++{
++ { SubPixelUnknown, "Unknown" },
++ { SubPixelHorizontalRGB, "Horizontal RGB" },
++ { SubPixelHorizontalBGR, "Horizontal BGR" },
++ { SubPixelVerticalRGB, "Vertical RGB" },
++ { SubPixelVerticalBGR, "Vertical BGR" },
++ { SubPixelNone, "None" },
+ };
+
+ void drm_connector_ida_init(void)
+@@ -231,28 +211,13 @@
+ ida_destroy(&drm_connector_enum_list[i].ida);
+ }
+
+-const char *drm_get_encoder_name(const struct drm_encoder *encoder)
+-{
+- static char buf[32];
+-
+- snprintf(buf, 32, "%s-%d",
+- drm_encoder_enum_list[encoder->encoder_type].name,
+- encoder->base.id);
+- return buf;
+-}
+-EXPORT_SYMBOL(drm_get_encoder_name);
+-
+-const char *drm_get_connector_name(const struct drm_connector *connector)
+-{
+- static char buf[32];
+-
+- snprintf(buf, 32, "%s-%d",
+- drm_connector_enum_list[connector->connector_type].name,
+- connector->connector_type_id);
+- return buf;
+-}
+-EXPORT_SYMBOL(drm_get_connector_name);
+-
++/**
++ * drm_get_connector_status_name - return a string for connector status
++ * @status: connector status to compute name of
++ *
++ * In contrast to the other drm_get_*_name functions this one here returns a
++ * const pointer and hence is threadsafe.
++ */
+ const char *drm_get_connector_status_name(enum drm_connector_status status)
+ {
+ if (status == connector_status_connected)
+@@ -264,11 +229,33 @@
+ }
+ EXPORT_SYMBOL(drm_get_connector_status_name);
+
++/**
++ * drm_get_subpixel_order_name - return a string for a given subpixel enum
++ * @order: enum of subpixel_order
++ *
++ * Note you could abuse this and return something out of bounds, but that
++ * would be a caller error. No unscrubbed user data should make it here.
++ */
++const char *drm_get_subpixel_order_name(enum subpixel_order order)
++{
++ return drm_subpixel_enum_list[order].name;
++}
++EXPORT_SYMBOL(drm_get_subpixel_order_name);
++
+ static char printable_char(int c)
+ {
+ return isascii(c) && isprint(c) ? c : '?';
+ }
+
++/**
++ * drm_get_format_name - return a string for drm fourcc format
++ * @format: format to compute name of
++ *
++ * Note that the buffer used by this function is globally shared and owned by
++ * the function itself.
++ *
++ * FIXME: This isn't really multithreading safe.
++ */
+ const char *drm_get_format_name(uint32_t format)
+ {
+ static char buf[32];
+@@ -286,26 +273,19 @@
+ }
+ EXPORT_SYMBOL(drm_get_format_name);
+
+-/**
+- * drm_mode_object_get - allocate a new modeset identifier
+- * @dev: DRM device
+- * @obj: object pointer, used to generate unique ID
+- * @obj_type: object type
+- *
+- * Create a unique identifier based on @ptr in @dev's identifier space. Used
+- * for tracking modes, CRTCs and connectors.
+- *
+- * RETURNS:
+- * New unique (relative to other objects in @dev) integer identifier for the
+- * object.
++/*
++ * Internal function to assign a slot in the object idr and optionally
++ * register the object into the idr.
+ */
+-static int drm_mode_object_get(struct drm_device *dev,
+- struct drm_mode_object *obj, uint32_t obj_type)
++static int drm_mode_object_get_reg(struct drm_device *dev,
++ struct drm_mode_object *obj,
++ uint32_t obj_type,
++ bool register_obj)
+ {
+ int ret;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+- ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
++ ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ /*
+ * Set up the object linking under the protection of the idr
+@@ -320,20 +300,70 @@
+ }
+
+ /**
++ * drm_mode_object_get - allocate a new modeset identifier
++ * @dev: DRM device
++ * @obj: object pointer, used to generate unique ID
++ * @obj_type: object type
++ *
++ * Create a unique identifier based on @ptr in @dev's identifier space. Used
++ * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
++ * modeset identifiers are _not_ reference counted. Hence don't use this for
++ * reference counted modeset objects like framebuffers.
++ *
++ * Returns:
++ * New unique (relative to other objects in @dev) integer identifier for the
++ * object.
++ */
++int drm_mode_object_get(struct drm_device *dev,
++ struct drm_mode_object *obj, uint32_t obj_type)
++{
++ return drm_mode_object_get_reg(dev, obj, obj_type, true);
++}
++
++static void drm_mode_object_register(struct drm_device *dev,
++ struct drm_mode_object *obj)
++{
++ mutex_lock(&dev->mode_config.idr_mutex);
++ idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
++ mutex_unlock(&dev->mode_config.idr_mutex);
++}
++
++/**
+ * drm_mode_object_put - free a modeset identifer
+ * @dev: DRM device
+ * @object: object to free
+ *
+- * Free @id from @dev's unique identifier pool.
++ * Free @id from @dev's unique identifier pool. Note that despite the _get
++ * postfix modeset identifiers are _not_ reference counted. Hence don't use this
++ * for reference counted modeset objects like framebuffers.
+ */
+-static void drm_mode_object_put(struct drm_device *dev,
+- struct drm_mode_object *object)
++void drm_mode_object_put(struct drm_device *dev,
++ struct drm_mode_object *object)
+ {
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.crtc_idr, object->id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ }
+
++static struct drm_mode_object *_object_find(struct drm_device *dev,
++ uint32_t id, uint32_t type)
++{
++ struct drm_mode_object *obj = NULL;
++
++ mutex_lock(&dev->mode_config.idr_mutex);
++ obj = idr_find(&dev->mode_config.crtc_idr, id);
++ if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
++ obj = NULL;
++ if (obj && obj->id != id)
++ obj = NULL;
++ /* don't leak out unref'd fb's */
++ if (obj && (obj->type == DRM_MODE_OBJECT_FB))
++ obj = NULL;
++ mutex_unlock(&dev->mode_config.idr_mutex);
++
++ return obj;
++}
++
+ /**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+@@ -341,7 +371,9 @@
+ * @type: type of the mode object
+ *
+ * Note that framebuffers cannot be looked up with this functions - since those
+- * are reference counted, they need special treatment.
++ * are reference counted, they need special treatment. Even with
++ * DRM_MODE_OBJECT_ANY (although that will simply return NULL
++ * rather than WARN_ON()).
+ */
+ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
+@@ -351,13 +383,7 @@
+ /* Framebuffers are reference counted and need their own lookup
+ * function.*/
+ WARN_ON(type == DRM_MODE_OBJECT_FB);
+-
+- mutex_lock(&dev->mode_config.idr_mutex);
+- obj = idr_find(&dev->mode_config.crtc_idr, id);
+- if (!obj || (obj->type != type) || (obj->id != id))
+- obj = NULL;
+- mutex_unlock(&dev->mode_config.idr_mutex);
+-
++ obj = _object_find(dev, id, type);
+ return obj;
+ }
+ EXPORT_SYMBOL(drm_mode_object_find);
+@@ -377,7 +403,7 @@
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
+- * RETURNS:
++ * Returns:
+ * Zero on success, error code on failure.
+ */
+ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+@@ -395,9 +421,6 @@
+ if (ret)
+ goto out;
+
+- /* Grab the idr reference. */
+- drm_framebuffer_reference(fb);
+-
+ dev->mode_config.num_fb++;
+ list_add(&fb->head, &dev->mode_config.fb_list);
+ out:
+@@ -407,10 +430,34 @@
+ }
+ EXPORT_SYMBOL(drm_framebuffer_init);
+
++/* dev->mode_config.fb_lock must be held! */
++static void __drm_framebuffer_unregister(struct drm_device *dev,
++ struct drm_framebuffer *fb)
++{
++ mutex_lock(&dev->mode_config.idr_mutex);
++ idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
++ mutex_unlock(&dev->mode_config.idr_mutex);
++
++ fb->base.id = 0;
++}
++
+ static void drm_framebuffer_free(struct kref *kref)
+ {
+ struct drm_framebuffer *fb =
+ container_of(kref, struct drm_framebuffer, refcount);
++ struct drm_device *dev = fb->dev;
++
++ /*
++ * The lookup idr holds a weak reference, which has not necessarily been
++ * removed at this point. Check for that.
++ */
++ mutex_lock(&dev->mode_config.fb_lock);
++ if (fb->base.id) {
++ /* Mark fb as reaped and drop idr ref. */
++ __drm_framebuffer_unregister(dev, fb);
++ }
++ mutex_unlock(&dev->mode_config.fb_lock);
++
+ fb->funcs->destroy(fb);
+ }
+
+@@ -438,7 +485,7 @@
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+- * again.
++ * again, using @drm_framebuffer_unreference.
+ */
+ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+@@ -447,8 +494,10 @@
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ fb = __drm_framebuffer_lookup(dev, id);
+- if (fb)
+- drm_framebuffer_reference(fb);
++ if (fb) {
++ if (!kref_get_unless_zero(&fb->refcount))
++ fb = NULL;
++ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return fb;
+@@ -463,7 +512,7 @@
+ */
+ void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+ {
+- DRM_DEBUG("FB ID: %d\n", fb->base.id);
++ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
+ kref_put(&fb->refcount, drm_framebuffer_free);
+ }
+ EXPORT_SYMBOL(drm_framebuffer_unreference);
+@@ -471,10 +520,12 @@
+ /**
+ * drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
++ *
++ * This functions increments the fb's refcount.
+ */
+ void drm_framebuffer_reference(struct drm_framebuffer *fb)
+ {
+- DRM_DEBUG("FB ID: %d\n", fb->base.id);
++ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
+ kref_get(&fb->refcount);
+ }
+ EXPORT_SYMBOL(drm_framebuffer_reference);
+@@ -486,23 +537,10 @@
+
+ static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
+ {
+- DRM_DEBUG("FB ID: %d\n", fb->base.id);
++ DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
+ kref_put(&fb->refcount, drm_framebuffer_free_bug);
+ }
+
+-/* dev->mode_config.fb_lock must be held! */
+-static void __drm_framebuffer_unregister(struct drm_device *dev,
+- struct drm_framebuffer *fb)
+-{
+- mutex_lock(&dev->mode_config.idr_mutex);
+- idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
+- mutex_unlock(&dev->mode_config.idr_mutex);
+-
+- fb->base.id = 0;
+-
+- __drm_framebuffer_unreference(fb);
+-}
+-
+ /**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+@@ -527,8 +565,9 @@
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+- * Cleanup references to a user-created framebuffer. This function is intended
+- * to be used from the drivers ->destroy callback.
++ * Cleanup framebuffer. This function is intended to be used from the drivers
++ * ->destroy callback. It can also be used to clean up driver private
++ * framebuffers embedded into a larger structure.
+ *
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+@@ -591,7 +630,7 @@
+ drm_modeset_lock_all(dev);
+ /* remove from any CRTC */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+- if (crtc->fb == fb) {
++ if (crtc->primary->fb == fb) {
+ /* should turn off the crtc */
+ memset(&set, 0, sizeof(struct drm_mode_set));
+ set.crtc = crtc;
+@@ -613,45 +652,54 @@
+ }
+ EXPORT_SYMBOL(drm_framebuffer_remove);
+
++DEFINE_WW_CLASS(crtc_ww_class);
++
+ /**
+- * drm_crtc_init - Initialise a new CRTC object
++ * drm_crtc_init_with_planes - Initialise a new CRTC object with
++ * specified primary and cursor planes.
+ * @dev: DRM device
+ * @crtc: CRTC object to init
++ * @primary: Primary plane for CRTC
++ * @cursor: Cursor plane for CRTC
+ * @funcs: callbacks for the new CRTC
+ *
+ * Inits a new object created as base part of a driver crtc object.
+ *
+- * RETURNS:
++ * Returns:
+ * Zero on success, error code on failure.
+ */
+-int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+- const struct drm_crtc_funcs *funcs)
++int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
++ struct drm_plane *primary,
++ struct drm_plane *cursor,
++ const struct drm_crtc_funcs *funcs)
+ {
++ struct drm_mode_config *config = &dev->mode_config;
+ int ret;
+
+ crtc->dev = dev;
+ crtc->funcs = funcs;
+ crtc->invert_dimensions = false;
+
+- drm_modeset_lock_all(dev);
+- mutex_init(&crtc->mutex);
+- mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
+-
++ drm_modeset_lock_init(&crtc->mutex);
+ ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+ if (ret)
+- goto out;
++ return ret;
+
+ crtc->base.properties = &crtc->properties;
+
+- list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
+- dev->mode_config.num_crtc++;
++ list_add_tail(&crtc->head, &config->crtc_list);
++ config->num_crtc++;
+
+- out:
+- drm_modeset_unlock_all(dev);
++ crtc->primary = primary;
++ crtc->cursor = cursor;
++ if (primary)
++ primary->possible_crtcs = 1 << drm_crtc_index(crtc);
++ if (cursor)
++ cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
+
+- return ret;
++ return 0;
+ }
+-EXPORT_SYMBOL(drm_crtc_init);
++EXPORT_SYMBOL(drm_crtc_init_with_planes);
+
+ /**
+ * drm_crtc_cleanup - Clean up the core crtc usage
+@@ -668,9 +716,17 @@
+ kfree(crtc->gamma_store);
+ crtc->gamma_store = NULL;
+
++ drm_modeset_lock_fini(&crtc->mutex);
++
+ drm_mode_object_put(dev, &crtc->base);
+ list_del(&crtc->head);
+ dev->mode_config.num_crtc--;
++
++ WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state);
++ if (crtc->state && crtc->funcs->atomic_destroy_state)
++ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
++
++ memset(crtc, 0, sizeof(*crtc));
+ }
+ EXPORT_SYMBOL(drm_crtc_cleanup);
+
+@@ -697,20 +753,6 @@
+ }
+ EXPORT_SYMBOL(drm_crtc_index);
+
+-/**
+- * drm_mode_probed_add - add a mode to a connector's probed mode list
+- * @connector: connector the new mode
+- * @mode: mode data
+- *
+- * Add @mode to @connector's mode list for later use.
+- */
+-void drm_mode_probed_add(struct drm_connector *connector,
+- struct drm_display_mode *mode)
+-{
+- list_add_tail(&mode->head, &connector->probed_modes);
+-}
+-EXPORT_SYMBOL(drm_mode_probed_add);
+-
+ /*
+ * drm_mode_remove - remove and free a mode
+ * @connector: connector list to modify
+@@ -726,6 +768,58 @@
+ }
+
+ /**
++ * drm_connector_get_cmdline_mode - reads the user's cmdline mode
++ * @connector: connector to quwery
++ *
++ * The kernel supports per-connector configration of its consoles through
++ * use of the video= parameter. This function parses that option and
++ * extracts the user's specified mode (or enable/disable status) for a
++ * particular connector. This is typically only used during the early fbdev
++ * setup.
++ */
++static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
++{
++ struct drm_cmdline_mode *mode = &connector->cmdline_mode;
++ char *option = NULL;
++
++ if (fb_get_options(connector->name, &option))
++ return;
++
++ if (!drm_mode_parse_command_line_for_connector(option,
++ connector,
++ mode))
++ return;
++
++ if (mode->force) {
++ const char *s;
++
++ switch (mode->force) {
++ case DRM_FORCE_OFF:
++ s = "OFF";
++ break;
++ case DRM_FORCE_ON_DIGITAL:
++ s = "ON - dig";
++ break;
++ default:
++ case DRM_FORCE_ON:
++ s = "ON";
++ break;
++ }
++
++ DRM_INFO("forcing %s connector %s\n", connector->name, s);
++ connector->force = mode->force;
++ }
++
++ DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
++ connector->name,
++ mode->xres, mode->yres,
++ mode->refresh_specified ? mode->refresh : 60,
++ mode->rb ? " reduced blanking" : "",
++ mode->margins ? " with margins" : "",
++ mode->interlace ? " interlaced" : "");
++}
++
++/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+@@ -735,7 +829,7 @@
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+- * RETURNS:
++ * Returns:
+ * Zero on success, error code on failure.
+ */
+ int drm_connector_init(struct drm_device *dev,
+@@ -749,9 +843,9 @@
+
+ drm_modeset_lock_all(dev);
+
+- ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
++ ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false);
+ if (ret)
+- goto out;
++ goto out_unlock;
+
+ connector->base.properties = &connector->properties;
+ connector->dev = dev;
+@@ -761,14 +855,26 @@
+ ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
+ if (connector->connector_type_id < 0) {
+ ret = connector->connector_type_id;
+- drm_mode_object_put(dev, &connector->base);
+- goto out;
++ goto out_put;
+ }
++ connector->name =
++ kasprintf(GFP_KERNEL, "%s-%d",
++ drm_connector_enum_list[connector_type].name,
++ connector->connector_type_id);
++ if (!connector->name) {
++ ret = -ENOMEM;
++ goto out_put;
++ }
++
+ INIT_LIST_HEAD(&connector->probed_modes);
+ INIT_LIST_HEAD(&connector->modes);
+ connector->edid_blob_ptr = NULL;
+ connector->status = connector_status_unknown;
+
++ drm_connector_get_cmdline_mode(connector);
++
++ /* We should add connectors at the end to avoid upsetting the connector
++ * index too much. */
+ list_add_tail(&connector->head, &dev->mode_config.connector_list);
+ dev->mode_config.num_connector++;
+
+@@ -780,7 +886,13 @@
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.dpms_property, 0);
+
+- out:
++ connector->debugfs_entry = NULL;
++
++out_put:
++ if (ret)
++ drm_mode_object_put(dev, &connector->base);
++
++out_unlock:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+@@ -798,6 +910,11 @@
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *t;
+
++ if (connector->tile_group) {
++ drm_mode_put_tile_group(dev, connector->tile_group);
++ connector->tile_group = NULL;
++ }
++
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+ drm_mode_remove(connector, mode);
+
+@@ -808,22 +925,120 @@
+ connector->connector_type_id);
+
+ drm_mode_object_put(dev, &connector->base);
++ kfree(connector->name);
++ connector->name = NULL;
+ list_del(&connector->head);
+ dev->mode_config.num_connector--;
++
++ WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
++ if (connector->state && connector->funcs->atomic_destroy_state)
++ connector->funcs->atomic_destroy_state(connector,
++ connector->state);
++
++ memset(connector, 0, sizeof(*connector));
+ }
+ EXPORT_SYMBOL(drm_connector_cleanup);
+
++/**
++ * drm_connector_index - find the index of a registered connector
++ * @connector: connector to find index for
++ *
++ * Given a registered connector, return the index of that connector within a DRM
++ * device's list of connectors.
++ */
++unsigned int drm_connector_index(struct drm_connector *connector)
++{
++ unsigned int index = 0;
++ struct drm_connector *tmp;
++ struct drm_mode_config *config = &connector->dev->mode_config;
++
++ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
++
++ list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
++ if (tmp == connector)
++ return index;
++
++ index++;
++ }
++
++ BUG();
++}
++EXPORT_SYMBOL(drm_connector_index);
++
++/**
++ * drm_connector_register - register a connector
++ * @connector: the connector to register
++ *
++ * Register userspace interfaces for a connector
++ *
++ * Returns:
++ * Zero on success, error code on failure.
++ */
++int drm_connector_register(struct drm_connector *connector)
++{
++ int ret;
++
++ drm_mode_object_register(connector->dev, &connector->base);
++
++ ret = drm_sysfs_connector_add(connector);
++ if (ret)
++ return ret;
++
++ ret = drm_debugfs_connector_add(connector);
++ if (ret) {
++ drm_sysfs_connector_remove(connector);
++ return ret;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_connector_register);
++
++/**
++ * drm_connector_unregister - unregister a connector
++ * @connector: the connector to unregister
++ *
++ * Unregister userspace interfaces for a connector
++ */
++void drm_connector_unregister(struct drm_connector *connector)
++{
++ drm_sysfs_connector_remove(connector);
++ drm_debugfs_connector_remove(connector);
++}
++EXPORT_SYMBOL(drm_connector_unregister);
++
++
++/**
++ * drm_connector_unplug_all - unregister connector userspace interfaces
++ * @dev: drm device
++ *
++ * This function unregisters all connector userspace interfaces in sysfs. Should
++ * be call when the device is disconnected, e.g. from an usb driver's
++ * ->disconnect callback.
++ */
+ void drm_connector_unplug_all(struct drm_device *dev)
+ {
+ struct drm_connector *connector;
+
+ /* taking the mode config mutex ends up in a clash with sysfs */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+- drm_sysfs_connector_remove(connector);
++ drm_connector_unregister(connector);
+
+ }
+ EXPORT_SYMBOL(drm_connector_unplug_all);
+
++/**
++ * drm_bridge_init - initialize a drm transcoder/bridge
++ * @dev: drm device
++ * @bridge: transcoder/bridge to set up
++ * @funcs: bridge function table
++ *
++ * Initialises a preallocated bridge. Bridges should be
++ * subclassed as part of driver connector objects.
++ *
++ * Returns:
++ * Zero on success, error code on failure.
++ */
+ int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
+ const struct drm_bridge_funcs *funcs)
+ {
+@@ -847,6 +1062,12 @@
+ }
+ EXPORT_SYMBOL(drm_bridge_init);
+
++/**
++ * drm_bridge_cleanup - cleans up an initialised bridge
++ * @bridge: bridge to cleanup
++ *
++ * Cleans up the bridge but doesn't free the object.
++ */
+ void drm_bridge_cleanup(struct drm_bridge *bridge)
+ {
+ struct drm_device *dev = bridge->dev;
+@@ -856,9 +1077,24 @@
+ list_del(&bridge->head);
+ dev->mode_config.num_bridge--;
+ drm_modeset_unlock_all(dev);
++
++ memset(bridge, 0, sizeof(*bridge));
+ }
+ EXPORT_SYMBOL(drm_bridge_cleanup);
+
++/**
++ * drm_encoder_init - Init a preallocated encoder
++ * @dev: drm device
++ * @encoder: the encoder to init
++ * @funcs: callbacks for this encoder
++ * @encoder_type: user visible type of the encoder
++ *
++ * Initialises a preallocated encoder. Encoder should be
++ * subclassed as part of driver encoder objects.
++ *
++ * Returns:
++ * Zero on success, error code on failure.
++ */
+ int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+@@ -870,61 +1106,81 @@
+
+ ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+ if (ret)
+- goto out;
++ goto out_unlock;
+
+ encoder->dev = dev;
+ encoder->encoder_type = encoder_type;
+ encoder->funcs = funcs;
++ encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
++ drm_encoder_enum_list[encoder_type].name,
++ encoder->base.id);
++ if (!encoder->name) {
++ ret = -ENOMEM;
++ goto out_put;
++ }
+
+ list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+ dev->mode_config.num_encoder++;
+
+- out:
++out_put:
++ if (ret)
++ drm_mode_object_put(dev, &encoder->base);
++
++out_unlock:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+ }
+ EXPORT_SYMBOL(drm_encoder_init);
+
++/**
++ * drm_encoder_cleanup - cleans up an initialised encoder
++ * @encoder: encoder to cleanup
++ *
++ * Cleans up the encoder but doesn't free the object.
++ */
+ void drm_encoder_cleanup(struct drm_encoder *encoder)
+ {
+ struct drm_device *dev = encoder->dev;
+ drm_modeset_lock_all(dev);
+ drm_mode_object_put(dev, &encoder->base);
++ kfree(encoder->name);
+ list_del(&encoder->head);
+ dev->mode_config.num_encoder--;
+ drm_modeset_unlock_all(dev);
++
++ memset(encoder, 0, sizeof(*encoder));
+ }
+ EXPORT_SYMBOL(drm_encoder_cleanup);
+
+ /**
+- * drm_plane_init - Initialise a new plane object
++ * drm_universal_plane_init - Initialize a new universal plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @format_count: number of elements in @formats
+- * @priv: plane is private (hidden from userspace)?
++ * @type: type of plane (overlay, primary, cursor)
+ *
+- * Inits a new object created as base part of a driver plane object.
++ * Initializes a plane object of type @type.
+ *
+- * RETURNS:
++ * Returns:
+ * Zero on success, error code on failure.
+ */
+-int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+- unsigned long possible_crtcs,
+- const struct drm_plane_funcs *funcs,
+- const uint32_t *formats, uint32_t format_count,
+- bool priv)
++int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
++ unsigned long possible_crtcs,
++ const struct drm_plane_funcs *funcs,
++ const uint32_t *formats, uint32_t format_count,
++ enum drm_plane_type type)
+ {
+ int ret;
+
+- drm_modeset_lock_all(dev);
+-
+ ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+ if (ret)
+- goto out;
++ return ret;
++
++ drm_modeset_lock_init(&plane->mutex);
+
+ plane->base.properties = &plane->properties;
+ plane->dev = dev;
+@@ -934,29 +1190,55 @@
+ if (!plane->format_types) {
+ DRM_DEBUG_KMS("out of memory when allocating plane\n");
+ drm_mode_object_put(dev, &plane->base);
+- ret = -ENOMEM;
+- goto out;
++ return -ENOMEM;
+ }
+
+ memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+ plane->format_count = format_count;
+ plane->possible_crtcs = possible_crtcs;
++ plane->type = type;
+
+- /* private planes are not exposed to userspace, but depending on
+- * display hardware, might be convenient to allow sharing programming
+- * for the scanout engine with the crtc implementation.
+- */
+- if (!priv) {
+- list_add_tail(&plane->head, &dev->mode_config.plane_list);
+- dev->mode_config.num_plane++;
+- } else {
+- INIT_LIST_HEAD(&plane->head);
+- }
++ list_add_tail(&plane->head, &dev->mode_config.plane_list);
++ dev->mode_config.num_total_plane++;
++ if (plane->type == DRM_PLANE_TYPE_OVERLAY)
++ dev->mode_config.num_overlay_plane++;
++
++ drm_object_attach_property(&plane->base,
++ dev->mode_config.plane_type_property,
++ plane->type);
+
+- out:
+- drm_modeset_unlock_all(dev);
++ return 0;
++}
++EXPORT_SYMBOL(drm_universal_plane_init);
+
+- return ret;
++/**
++ * drm_plane_init - Initialize a legacy plane
++ * @dev: DRM device
++ * @plane: plane object to init
++ * @possible_crtcs: bitmask of possible CRTCs
++ * @funcs: callbacks for the new plane
++ * @formats: array of supported formats (%DRM_FORMAT_*)
++ * @format_count: number of elements in @formats
++ * @is_primary: plane type (primary vs overlay)
++ *
++ * Legacy API to initialize a DRM plane.
++ *
++ * New drivers should call drm_universal_plane_init() instead.
++ *
++ * Returns:
++ * Zero on success, error code on failure.
++ */
++int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
++ unsigned long possible_crtcs,
++ const struct drm_plane_funcs *funcs,
++ const uint32_t *formats, uint32_t format_count,
++ bool is_primary)
++{
++ enum drm_plane_type type;
++
++ type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
++ return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
++ formats, format_count, type);
+ }
+ EXPORT_SYMBOL(drm_plane_init);
+
+@@ -975,16 +1257,47 @@
+ drm_modeset_lock_all(dev);
+ kfree(plane->format_types);
+ drm_mode_object_put(dev, &plane->base);
+- /* if not added to a list, it must be a private plane */
+- if (!list_empty(&plane->head)) {
+- list_del(&plane->head);
+- dev->mode_config.num_plane--;
+- }
++
++ BUG_ON(list_empty(&plane->head));
++
++ list_del(&plane->head);
++ dev->mode_config.num_total_plane--;
++ if (plane->type == DRM_PLANE_TYPE_OVERLAY)
++ dev->mode_config.num_overlay_plane--;
+ drm_modeset_unlock_all(dev);
++
++ WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
++ if (plane->state && plane->funcs->atomic_destroy_state)
++ plane->funcs->atomic_destroy_state(plane, plane->state);
++
++ memset(plane, 0, sizeof(*plane));
+ }
+ EXPORT_SYMBOL(drm_plane_cleanup);
+
+ /**
++ * drm_plane_index - find the index of a registered plane
++ * @plane: plane to find index for
++ *
++ * Given a registered plane, return the index of that CRTC within a DRM
++ * device's list of planes.
++ */
++unsigned int drm_plane_index(struct drm_plane *plane)
++{
++ unsigned int index = 0;
++ struct drm_plane *tmp;
++
++ list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
++ if (tmp == plane)
++ return index;
++
++ index++;
++ }
++
++ BUG();
++}
++EXPORT_SYMBOL(drm_plane_index);
++
++/**
+ * drm_plane_force_disable - Forcibly disable a plane
+ * @plane: plane to disable
+ *
+@@ -1000,64 +1313,26 @@
+ if (!plane->fb)
+ return;
+
++ plane->old_fb = plane->fb;
+ ret = plane->funcs->disable_plane(plane);
+- if (ret)
++ if (ret) {
+ DRM_ERROR("failed to disable plane with busy fb\n");
++ plane->old_fb = NULL;
++ return;
++ }
+ /* disconnect the plane from the fb and crtc: */
+- __drm_framebuffer_unreference(plane->fb);
++ __drm_framebuffer_unreference(plane->old_fb);
++ plane->old_fb = NULL;
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ }
+ EXPORT_SYMBOL(drm_plane_force_disable);
+
+-/**
+- * drm_mode_create - create a new display mode
+- * @dev: DRM device
+- *
+- * Create a new drm_display_mode, give it an ID, and return it.
+- *
+- * RETURNS:
+- * Pointer to new mode on success, NULL on error.
+- */
+-struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+-{
+- struct drm_display_mode *nmode;
+-
+- nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+- if (!nmode)
+- return NULL;
+-
+- if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
+- kfree(nmode);
+- return NULL;
+- }
+-
+- return nmode;
+-}
+-EXPORT_SYMBOL(drm_mode_create);
+-
+-/**
+- * drm_mode_destroy - remove a mode
+- * @dev: DRM device
+- * @mode: mode to remove
+- *
+- * Free @mode's unique identifier, then free it.
+- */
+-void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+-{
+- if (!mode)
+- return;
+-
+- drm_mode_object_put(dev, &mode->base);
+-
+- kfree(mode);
+-}
+-EXPORT_SYMBOL(drm_mode_destroy);
+-
+ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+ {
+ struct drm_property *edid;
+ struct drm_property *dpms;
++ struct drm_property *dev_path;
+
+ /*
+ * Standard properties (apply to all connectors)
+@@ -1072,6 +1347,32 @@
+ ARRAY_SIZE(drm_dpms_enum_list));
+ dev->mode_config.dpms_property = dpms;
+
++ dev_path = drm_property_create(dev,
++ DRM_MODE_PROP_BLOB |
++ DRM_MODE_PROP_IMMUTABLE,
++ "PATH", 0);
++ dev->mode_config.path_property = dev_path;
++
++ dev->mode_config.tile_property = drm_property_create(dev,
++ DRM_MODE_PROP_BLOB |
++ DRM_MODE_PROP_IMMUTABLE,
++ "TILE", 0);
++
++ return 0;
++}
++
++static int drm_mode_create_standard_plane_properties(struct drm_device *dev)
++{
++ struct drm_property *type;
++
++ /*
++ * Standard properties (apply to all planes)
++ */
++ type = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
++ "type", drm_plane_type_enum_list,
++ ARRAY_SIZE(drm_plane_type_enum_list));
++ dev->mode_config.plane_type_property = type;
++
+ return 0;
+ }
+
+@@ -1117,12 +1418,13 @@
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+-int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
++int drm_mode_create_tv_properties(struct drm_device *dev,
++ unsigned int num_modes,
+ char *modes[])
+ {
+ struct drm_property *tv_selector;
+ struct drm_property *tv_subconnector;
+- int i;
++ unsigned int i;
+
+ if (dev->mode_config.tv_select_subconnector_property)
+ return 0;
+@@ -1213,6 +1515,33 @@
+ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+ /**
++ * drm_mode_create_aspect_ratio_property - create aspect ratio property
++ * @dev: DRM device
++ *
++ * Called by a driver the first time it's needed, must be attached to desired
++ * connectors.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
++int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
++{
++ if (dev->mode_config.aspect_ratio_property)
++ return 0;
++
++ dev->mode_config.aspect_ratio_property =
++ drm_property_create_enum(dev, 0, "aspect ratio",
++ drm_aspect_ratio_enum_list,
++ ARRAY_SIZE(drm_aspect_ratio_enum_list));
++
++ if (dev->mode_config.aspect_ratio_property == NULL)
++ return -ENOMEM;
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
++
++/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+@@ -1237,6 +1566,30 @@
+ }
+ EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+
++/**
++ * drm_mode_create_suggested_offset_properties - create suggests offset properties
++ * @dev: DRM device
++ *
++ * Create the the suggested x/y offset property for connectors.
++ */
++int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
++{
++ if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
++ return 0;
++
++ dev->mode_config.suggested_x_property =
++ drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
++
++ dev->mode_config.suggested_y_property =
++ drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
++
++ if (dev->mode_config.suggested_x_property == NULL ||
++ dev->mode_config.suggested_y_property == NULL)
++ return -ENOMEM;
++ return 0;
++}
++EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
++
+ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+ {
+ uint32_t total_objects = 0;
+@@ -1257,6 +1610,16 @@
+ return 0;
+ }
+
++void drm_mode_group_destroy(struct drm_mode_group *group)
++{
++ kfree(group->id_list);
++ group->id_list = NULL;
++}
++
++/*
++ * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
++ * the drm core's responsibility to set up mode control groups.
++ */
+ int drm_mode_group_init_legacy_group(struct drm_device *dev,
+ struct drm_mode_group *group)
+ {
+@@ -1289,6 +1652,15 @@
+ }
+ EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
+
++void drm_reinit_primary_mode_group(struct drm_device *dev)
++{
++ drm_modeset_lock_all(dev);
++ drm_mode_group_destroy(&dev->primary->mode_group);
++ drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
++ drm_modeset_unlock_all(dev);
++}
++EXPORT_SYMBOL(drm_reinit_primary_mode_group);
++
+ /**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+@@ -1333,8 +1705,8 @@
+ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ *
+- * RETURNS:
+- * Zero on success, errno on failure.
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ static int drm_crtc_convert_umode(struct drm_display_mode *out,
+ const struct drm_mode_modeinfo *in)
+@@ -1376,8 +1748,8 @@
+ *
+ * Called by the user via ioctl.
+ *
+- * RETURNS:
+- * Zero on success, errno on failure.
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ int drm_mode_getresources(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+@@ -1428,10 +1800,12 @@
+ card_res->count_fbs = fb_count;
+ mutex_unlock(&file_priv->fbs_lock);
+
+- drm_modeset_lock_all(dev);
+- mode_group = &file_priv->master->minor->mode_group;
+- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
++ /* mode_config.mutex protects the connector list against e.g. DP MST
++ * connector hot-adding. CRTC/Plane lists are invariant. */
++ mutex_lock(&dev->mode_config.mutex);
++ if (!drm_is_primary_client(file_priv)) {
+
++ mode_group = NULL;
+ list_for_each(lh, &dev->mode_config.crtc_list)
+ crtc_count++;
+
+@@ -1442,6 +1816,7 @@
+ encoder_count++;
+ } else {
+
++ mode_group = &file_priv->master->minor->mode_group;
+ crtc_count = mode_group->num_crtcs;
+ connector_count = mode_group->num_connectors;
+ encoder_count = mode_group->num_encoders;
+@@ -1456,7 +1831,7 @@
+ if (card_res->count_crtcs >= crtc_count) {
+ copied = 0;
+ crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
++ if (!mode_group) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ head) {
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+@@ -1483,12 +1858,12 @@
+ if (card_res->count_encoders >= encoder_count) {
+ copied = 0;
+ encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
++ if (!mode_group) {
+ list_for_each_entry(encoder,
+ &dev->mode_config.encoder_list,
+ head) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+- drm_get_encoder_name(encoder));
++ encoder->name);
+ if (put_user(encoder->base.id, encoder_id +
+ copied)) {
+ ret = -EFAULT;
+@@ -1514,13 +1889,13 @@
+ if (card_res->count_connectors >= connector_count) {
+ copied = 0;
+ connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
++ if (!mode_group) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ head) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+- drm_get_connector_name(connector));
++ connector->name);
+ if (put_user(connector->base.id,
+ connector_id + copied)) {
+ ret = -EFAULT;
+@@ -1547,7 +1922,7 @@
+ card_res->count_connectors, card_res->count_encoders);
+
+ out:
+- drm_modeset_unlock_all(dev);
++ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+ }
+
+@@ -1561,35 +1936,28 @@
+ *
+ * Called by the user via ioctl.
+ *
+- * RETURNS:
+- * Zero on success, errno on failure.
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+ struct drm_mode_crtc *crtc_resp = data;
+ struct drm_crtc *crtc;
+- struct drm_mode_object *obj;
+- int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+- drm_modeset_lock_all(dev);
+-
+- obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
+- DRM_MODE_OBJECT_CRTC);
+- if (!obj) {
+- ret = -ENOENT;
+- goto out;
+- }
+- crtc = obj_to_crtc(obj);
++ crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
++ if (!crtc)
++ return -ENOENT;
+
++ drm_modeset_lock_crtc(crtc, crtc->primary);
+ crtc_resp->x = crtc->x;
+ crtc_resp->y = crtc->y;
+ crtc_resp->gamma_size = crtc->gamma_size;
+- if (crtc->fb)
+- crtc_resp->fb_id = crtc->fb->base.id;
++ if (crtc->primary->fb)
++ crtc_resp->fb_id = crtc->primary->fb->base.id;
+ else
+ crtc_resp->fb_id = 0;
+
+@@ -1601,10 +1969,9 @@
+ } else {
+ crtc_resp->mode_valid = 0;
+ }
++ drm_modeset_unlock_crtc(crtc);
+
+-out:
+- drm_modeset_unlock_all(dev);
+- return ret;
++ return 0;
+ }
+
+ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+@@ -1620,6 +1987,15 @@
+ return true;
+ }
+
++static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
++{
++ /* For atomic drivers only state objects are synchronously updated and
++ * protected by modeset locks, so check those first. */
++ if (connector->state)
++ return connector->state->best_encoder;
++ return connector->encoder;
++}
++
+ /**
+ * drm_mode_getconnector - get connector configuration
+ * @dev: drm device for the ioctl
+@@ -1630,15 +2006,15 @@
+ *
+ * Called by the user via ioctl.
+ *
+- * RETURNS:
+- * Zero on success, errno on failure.
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ int drm_mode_getconnector(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_mode_get_connector *out_resp = data;
+- struct drm_mode_object *obj;
+ struct drm_connector *connector;
++ struct drm_encoder *encoder;
+ struct drm_display_mode *mode;
+ int mode_count = 0;
+ int props_count = 0;
+@@ -1661,13 +2037,11 @@
+
+ mutex_lock(&dev->mode_config.mutex);
+
+- obj = drm_mode_object_find(dev, out_resp->connector_id,
+- DRM_MODE_OBJECT_CONNECTOR);
+- if (!obj) {
++ connector = drm_connector_find(dev, out_resp->connector_id);
++ if (!connector) {
+ ret = -ENOENT;
+ goto out;
+ }
+- connector = obj_to_connector(obj);
+
+ props_count = connector->properties.count;
+
+@@ -1695,10 +2069,14 @@
+ out_resp->mm_height = connector->display_info.height_mm;
+ out_resp->subpixel = connector->display_info.subpixel_order;
+ out_resp->connection = connector->status;
+- if (connector->encoder)
+- out_resp->encoder_id = connector->encoder->base.id;
++ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
++
++ encoder = drm_connector_get_encoder(connector);
++ if (encoder)
++ out_resp->encoder_id = encoder->base.id;
+ else
+ out_resp->encoder_id = 0;
++ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+@@ -1765,116 +2143,167 @@
+ return ret;
+ }
+
++static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
++{
++ struct drm_connector *connector;
++ struct drm_device *dev = encoder->dev;
++ bool uses_atomic = false;
++
++ /* For atomic drivers only state objects are synchronously updated and
++ * protected by modeset locks, so check those first. */
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ if (!connector->state)
++ continue;
++
++ uses_atomic = true;
++
++ if (connector->state->best_encoder != encoder)
++ continue;
++
++ return connector->state->crtc;
++ }
++
++ /* Don't return stale data (e.g. pending async disable). */
++ if (uses_atomic)
++ return NULL;
++
++ return encoder->crtc;
++}
++
++/**
++ * drm_mode_getencoder - get encoder configuration
++ * @dev: drm device for the ioctl
++ * @data: data pointer for the ioctl
++ * @file_priv: drm file for the ioctl call
++ *
++ * Construct a encoder configuration structure to return to the user.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_getencoder(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_mode_get_encoder *enc_resp = data;
+- struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+- int ret = 0;
++ struct drm_crtc *crtc;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+- drm_modeset_lock_all(dev);
+- obj = drm_mode_object_find(dev, enc_resp->encoder_id,
+- DRM_MODE_OBJECT_ENCODER);
+- if (!obj) {
+- ret = -ENOENT;
+- goto out;
+- }
+- encoder = obj_to_encoder(obj);
++ encoder = drm_encoder_find(dev, enc_resp->encoder_id);
++ if (!encoder)
++ return -ENOENT;
+
+- if (encoder->crtc)
++ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
++ crtc = drm_encoder_get_crtc(encoder);
++ if (crtc)
++ enc_resp->crtc_id = crtc->base.id;
++ else if (encoder->crtc)
+ enc_resp->crtc_id = encoder->crtc->base.id;
+ else
+ enc_resp->crtc_id = 0;
++ drm_modeset_unlock(&dev->mode_config.connection_mutex);
++
+ enc_resp->encoder_type = encoder->encoder_type;
+ enc_resp->encoder_id = encoder->base.id;
+ enc_resp->possible_crtcs = encoder->possible_crtcs;
+ enc_resp->possible_clones = encoder->possible_clones;
+
+-out:
+- drm_modeset_unlock_all(dev);
+- return ret;
++ return 0;
+ }
+
+ /**
+- * drm_mode_getplane_res - get plane info
++ * drm_mode_getplane_res - enumerate all plane resources
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+- * Return an plane count and set of IDs.
++ * Construct a list of plane ids to return to the user.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ int drm_mode_getplane_res(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++ struct drm_file *file_priv)
+ {
+ struct drm_mode_get_plane_res *plane_resp = data;
+ struct drm_mode_config *config;
+ struct drm_plane *plane;
+ uint32_t __user *plane_ptr;
+- int copied = 0, ret = 0;
++ int copied = 0;
++ unsigned num_planes;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+- drm_modeset_lock_all(dev);
+ config = &dev->mode_config;
+
++ if (file_priv->universal_planes)
++ num_planes = config->num_total_plane;
++ else
++ num_planes = config->num_overlay_plane;
++
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+- if (config->num_plane &&
+- (plane_resp->count_planes >= config->num_plane)) {
++ if (num_planes &&
++ (plane_resp->count_planes >= num_planes)) {
+ plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
++ /* Plane lists are invariant, no locking needed. */
+ list_for_each_entry(plane, &config->plane_list, head) {
+- if (put_user(plane->base.id, plane_ptr + copied)) {
+- ret = -EFAULT;
+- goto out;
+- }
++ /*
++ * Unless userspace set the 'universal planes'
++ * capability bit, only advertise overlays.
++ */
++ if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
++ !file_priv->universal_planes)
++ continue;
++
++ if (put_user(plane->base.id, plane_ptr + copied))
++ return -EFAULT;
+ copied++;
+ }
+ }
+- plane_resp->count_planes = config->num_plane;
++ plane_resp->count_planes = num_planes;
+
+-out:
+- drm_modeset_unlock_all(dev);
+- return ret;
++ return 0;
+ }
+
+ /**
+- * drm_mode_getplane - get plane info
++ * drm_mode_getplane - get plane configuration
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+- * Return plane info, including formats supported, gamma size, any
+- * current fb, etc.
++ * Construct a plane configuration structure to return to the user.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ int drm_mode_getplane(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++ struct drm_file *file_priv)
+ {
+ struct drm_mode_get_plane *plane_resp = data;
+- struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ uint32_t __user *format_ptr;
+- int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+- drm_modeset_lock_all(dev);
+- obj = drm_mode_object_find(dev, plane_resp->plane_id,
+- DRM_MODE_OBJECT_PLANE);
+- if (!obj) {
+- ret = -ENOENT;
+- goto out;
+- }
+- plane = obj_to_plane(obj);
++ plane = drm_plane_find(dev, plane_resp->plane_id);
++ if (!plane)
++ return -ENOENT;
+
++ drm_modeset_lock(&plane->mutex, NULL);
+ if (plane->crtc)
+ plane_resp->crtc_id = plane->crtc->base.id;
+ else
+@@ -1884,6 +2313,7 @@
+ plane_resp->fb_id = plane->fb->base.id;
+ else
+ plane_resp->fb_id = 0;
++ drm_modeset_unlock(&plane->mutex);
+
+ plane_resp->plane_id = plane->base.id;
+ plane_resp->possible_crtcs = plane->possible_crtcs;
+@@ -1899,80 +2329,53 @@
+ if (copy_to_user(format_ptr,
+ plane->format_types,
+ sizeof(uint32_t) * plane->format_count)) {
+- ret = -EFAULT;
+- goto out;
++ return -EFAULT;
+ }
+ }
+ plane_resp->count_format_types = plane->format_count;
+
+-out:
+- drm_modeset_unlock_all(dev);
+- return ret;
++ return 0;
+ }
+
+-/**
+- * drm_mode_setplane - set up or tear down an plane
+- * @dev: DRM device
+- * @data: ioctl data*
+- * @file_priv: DRM file info
++/*
++ * setplane_internal - setplane handler for internal callers
+ *
+- * Set plane info, including placement, fb, scaling, and other factors.
+- * Or pass a NULL fb to disable.
+- */
+-int drm_mode_setplane(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++ * Note that we assume an extra reference has already been taken on fb. If the
++ * update fails, this reference will be dropped before return; if it succeeds,
++ * the previous framebuffer (if any) will be unreferenced instead.
++ *
++ * src_{x,y,w,h} are provided in 16.16 fixed point format
++ */
++static int __setplane_internal(struct drm_plane *plane,
++ struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ int32_t crtc_x, int32_t crtc_y,
++ uint32_t crtc_w, uint32_t crtc_h,
++ /* src_{x,y,w,h} values are 16.16 fixed point */
++ uint32_t src_x, uint32_t src_y,
++ uint32_t src_w, uint32_t src_h)
+ {
+- struct drm_mode_set_plane *plane_req = data;
+- struct drm_mode_object *obj;
+- struct drm_plane *plane;
+- struct drm_crtc *crtc;
+- struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+ int ret = 0;
+ unsigned int fb_width, fb_height;
+- int i;
+-
+- if (!drm_core_check_feature(dev, DRIVER_MODESET))
+- return -EINVAL;
+-
+- /*
+- * First, find the plane, crtc, and fb objects. If not available,
+- * we don't bother to call the driver.
+- */
+- obj = drm_mode_object_find(dev, plane_req->plane_id,
+- DRM_MODE_OBJECT_PLANE);
+- if (!obj) {
+- DRM_DEBUG_KMS("Unknown plane ID %d\n",
+- plane_req->plane_id);
+- return -ENOENT;
+- }
+- plane = obj_to_plane(obj);
++ unsigned int i;
+
+ /* No fb means shut it down */
+- if (!plane_req->fb_id) {
+- drm_modeset_lock_all(dev);
+- old_fb = plane->fb;
+- plane->funcs->disable_plane(plane);
+- plane->crtc = NULL;
+- plane->fb = NULL;
+- drm_modeset_unlock_all(dev);
+- goto out;
+- }
+-
+- obj = drm_mode_object_find(dev, plane_req->crtc_id,
+- DRM_MODE_OBJECT_CRTC);
+- if (!obj) {
+- DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+- plane_req->crtc_id);
+- ret = -ENOENT;
++ if (!fb) {
++ plane->old_fb = plane->fb;
++ ret = plane->funcs->disable_plane(plane);
++ if (!ret) {
++ plane->crtc = NULL;
++ plane->fb = NULL;
++ } else {
++ plane->old_fb = NULL;
++ }
+ goto out;
+ }
+- crtc = obj_to_crtc(obj);
+
+- fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+- if (!fb) {
+- DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+- plane_req->fb_id);
+- ret = -ENOENT;
++ /* Check whether this plane is usable on this CRTC */
++ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
++ DRM_DEBUG_KMS("Invalid crtc for plane\n");
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -1991,24 +2394,86 @@
+ fb_height = fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+- if (plane_req->src_w > fb_width ||
+- plane_req->src_x > fb_width - plane_req->src_w ||
+- plane_req->src_h > fb_height ||
+- plane_req->src_y > fb_height - plane_req->src_h) {
++ if (src_w > fb_width ||
++ src_x > fb_width - src_w ||
++ src_h > fb_height ||
++ src_y > fb_height - src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+- plane_req->src_w >> 16,
+- ((plane_req->src_w & 0xffff) * 15625) >> 10,
+- plane_req->src_h >> 16,
+- ((plane_req->src_h & 0xffff) * 15625) >> 10,
+- plane_req->src_x >> 16,
+- ((plane_req->src_x & 0xffff) * 15625) >> 10,
+- plane_req->src_y >> 16,
+- ((plane_req->src_y & 0xffff) * 15625) >> 10);
++ src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
++ src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
++ src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
++ src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
+ ret = -ENOSPC;
+ goto out;
+ }
+
++ plane->old_fb = plane->fb;
++ ret = plane->funcs->update_plane(plane, crtc, fb,
++ crtc_x, crtc_y, crtc_w, crtc_h,
++ src_x, src_y, src_w, src_h);
++ if (!ret) {
++ plane->crtc = crtc;
++ plane->fb = fb;
++ fb = NULL;
++ } else {
++ plane->old_fb = NULL;
++ }
++
++out:
++ if (fb)
++ drm_framebuffer_unreference(fb);
++ if (plane->old_fb)
++ drm_framebuffer_unreference(plane->old_fb);
++ plane->old_fb = NULL;
++
++ return ret;
++}
++
++static int setplane_internal(struct drm_plane *plane,
++ struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ int32_t crtc_x, int32_t crtc_y,
++ uint32_t crtc_w, uint32_t crtc_h,
++ /* src_{x,y,w,h} values are 16.16 fixed point */
++ uint32_t src_x, uint32_t src_y,
++ uint32_t src_w, uint32_t src_h)
++{
++ int ret;
++
++ drm_modeset_lock_all(plane->dev);
++ ret = __setplane_internal(plane, crtc, fb,
++ crtc_x, crtc_y, crtc_w, crtc_h,
++ src_x, src_y, src_w, src_h);
++ drm_modeset_unlock_all(plane->dev);
++
++ return ret;
++}
++
++/**
++ * drm_mode_setplane - configure a plane's configuration
++ * @dev: DRM device
++ * @data: ioctl data*
++ * @file_priv: DRM file info
++ *
++ * Set plane configuration, including placement, fb, scaling, and other factors.
++ * Or pass a NULL fb to disable (planes may be disabled without providing a
++ * valid crtc).
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
++int drm_mode_setplane(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_mode_set_plane *plane_req = data;
++ struct drm_plane *plane;
++ struct drm_crtc *crtc = NULL;
++ struct drm_framebuffer *fb = NULL;
++
++ if (!drm_core_check_feature(dev, DRIVER_MODESET))
++ return -EINVAL;
++
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+@@ -2017,31 +2482,45 @@
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+- ret = -ERANGE;
+- goto out;
++ return -ERANGE;
+ }
+
+- drm_modeset_lock_all(dev);
+- ret = plane->funcs->update_plane(plane, crtc, fb,
+- plane_req->crtc_x, plane_req->crtc_y,
+- plane_req->crtc_w, plane_req->crtc_h,
+- plane_req->src_x, plane_req->src_y,
+- plane_req->src_w, plane_req->src_h);
+- if (!ret) {
+- old_fb = plane->fb;
+- plane->crtc = crtc;
+- plane->fb = fb;
+- fb = NULL;
++ /*
++ * First, find the plane, crtc, and fb objects. If not available,
++ * we don't bother to call the driver.
++ */
++ plane = drm_plane_find(dev, plane_req->plane_id);
++ if (!plane) {
++ DRM_DEBUG_KMS("Unknown plane ID %d\n",
++ plane_req->plane_id);
++ return -ENOENT;
+ }
+- drm_modeset_unlock_all(dev);
+
+-out:
+- if (fb)
+- drm_framebuffer_unreference(fb);
+- if (old_fb)
+- drm_framebuffer_unreference(old_fb);
++ if (plane_req->fb_id) {
++ fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
++ if (!fb) {
++ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
++ plane_req->fb_id);
++ return -ENOENT;
++ }
+
+- return ret;
++ crtc = drm_crtc_find(dev, plane_req->crtc_id);
++ if (!crtc) {
++ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
++ plane_req->crtc_id);
++ return -ENOENT;
++ }
++ }
++
++ /*
++ * setplane_internal will take care of deref'ing either the old or new
++ * framebuffer depending on success.
++ */
++ return setplane_internal(plane, crtc, fb,
++ plane_req->crtc_x, plane_req->crtc_y,
++ plane_req->crtc_w, plane_req->crtc_h,
++ plane_req->src_x, plane_req->src_y,
++ plane_req->src_w, plane_req->src_h);
+ }
+
+ /**
+@@ -2050,6 +2529,9 @@
+ *
+ * This is a little helper to wrap internal calls to the ->set_config driver
+ * interface. The only thing it adds is correct refcounting dance.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ int drm_mode_set_config_internal(struct drm_mode_set *set)
+ {
+@@ -2064,35 +2546,41 @@
+ * crtcs. Atomic modeset will have saner semantics ...
+ */
+ list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
+- tmp->old_fb = tmp->fb;
++ tmp->primary->old_fb = tmp->primary->fb;
+
+ fb = set->fb;
+
+ ret = crtc->funcs->set_config(set);
+ if (ret == 0) {
+- /* crtc->fb must be updated by ->set_config, enforces this. */
+- WARN_ON(fb != crtc->fb);
++ crtc->primary->crtc = crtc;
++ crtc->primary->fb = fb;
+ }
+
+ list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+- if (tmp->fb)
+- drm_framebuffer_reference(tmp->fb);
+- if (tmp->old_fb)
+- drm_framebuffer_unreference(tmp->old_fb);
++ if (tmp->primary->fb)
++ drm_framebuffer_reference(tmp->primary->fb);
++ if (tmp->primary->old_fb)
++ drm_framebuffer_unreference(tmp->primary->old_fb);
++ tmp->primary->old_fb = NULL;
+ }
+
+ return ret;
+ }
+ EXPORT_SYMBOL(drm_mode_set_config_internal);
+
+-/*
+- * Checks that the framebuffer is big enough for the CRTC viewport
+- * (x, y, hdisplay, vdisplay)
+- */
+-static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+- int x, int y,
+- const struct drm_display_mode *mode,
+- const struct drm_framebuffer *fb)
++/**
++ * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
++ * CRTC viewport
++ * @crtc: CRTC that framebuffer will be displayed on
++ * @x: x panning
++ * @y: y panning
++ * @mode: mode that framebuffer will be displayed under
++ * @fb: framebuffer to check size of
++ */
++int drm_crtc_check_viewport(const struct drm_crtc *crtc,
++ int x, int y,
++ const struct drm_display_mode *mode,
++ const struct drm_framebuffer *fb)
+
+ {
+ int hdisplay, vdisplay;
+@@ -2123,6 +2611,7 @@
+
+ return 0;
+ }
++EXPORT_SYMBOL(drm_crtc_check_viewport);
+
+ /**
+ * drm_mode_setcrtc - set CRTC configuration
+@@ -2134,15 +2623,14 @@
+ *
+ * Called by the user via ioctl.
+ *
+- * RETURNS:
+- * Zero on success, errno on failure.
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_mode_crtc *crtc_req = data;
+- struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_connector **connector_set = NULL, *connector;
+ struct drm_framebuffer *fb = NULL;
+@@ -2160,26 +2648,24 @@
+ return -ERANGE;
+
+ drm_modeset_lock_all(dev);
+- obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+- DRM_MODE_OBJECT_CRTC);
+- if (!obj) {
++ crtc = drm_crtc_find(dev, crtc_req->crtc_id);
++ if (!crtc) {
+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+- crtc = obj_to_crtc(obj);
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+ if (crtc_req->mode_valid) {
+ /* If we have a mode we need a framebuffer. */
+ /* If we pass -1, set the mode with the currently bound fb */
+ if (crtc_req->fb_id == -1) {
+- if (!crtc->fb) {
++ if (!crtc->primary->fb) {
+ DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+ ret = -EINVAL;
+ goto out;
+ }
+- fb = crtc->fb;
++ fb = crtc->primary->fb;
+ /* Make refcounting symmetric with the lookup path. */
+ drm_framebuffer_reference(fb);
+ } else {
+@@ -2250,39 +2736,132 @@
+ goto out;
+ }
+
+- obj = drm_mode_object_find(dev, out_id,
+- DRM_MODE_OBJECT_CONNECTOR);
+- if (!obj) {
++ connector = drm_connector_find(dev, out_id);
++ if (!connector) {
+ DRM_DEBUG_KMS("Connector id %d unknown\n",
+ out_id);
+ ret = -ENOENT;
+ goto out;
+ }
+- connector = obj_to_connector(obj);
+- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+- connector->base.id,
+- drm_get_connector_name(connector));
++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
++ connector->base.id,
++ connector->name);
++
++ connector_set[i] = connector;
++ }
++ }
++
++ set.crtc = crtc;
++ set.x = crtc_req->x;
++ set.y = crtc_req->y;
++ set.mode = mode;
++ set.connectors = connector_set;
++ set.num_connectors = crtc_req->count_connectors;
++ set.fb = fb;
++ ret = drm_mode_set_config_internal(&set);
++
++out:
++ if (fb)
++ drm_framebuffer_unreference(fb);
++
++ kfree(connector_set);
++ drm_mode_destroy(dev, mode);
++ drm_modeset_unlock_all(dev);
++ return ret;
++}
++
++/**
++ * drm_mode_cursor_universal - translate legacy cursor ioctl call into a
++ * universal plane handler call
++ * @crtc: crtc to update cursor for
++ * @req: data pointer for the ioctl
++ * @file_priv: drm file for the ioctl call
++ *
++ * Legacy cursor ioctl's work directly with driver buffer handles. To
++ * translate legacy ioctl calls into universal plane handler calls, we need to
++ * wrap the native buffer handle in a drm_framebuffer.
++ *
++ * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB
++ * buffer with a pitch of 4*width; the universal plane interface should be used
++ * directly in cases where the hardware can support other buffer settings and
++ * userspace wants to make use of these capabilities.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
++static int drm_mode_cursor_universal(struct drm_crtc *crtc,
++ struct drm_mode_cursor2 *req,
++ struct drm_file *file_priv)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_framebuffer *fb = NULL;
++ struct drm_mode_fb_cmd2 fbreq = {
++ .width = req->width,
++ .height = req->height,
++ .pixel_format = DRM_FORMAT_ARGB8888,
++ .pitches = { req->width * 4 },
++ .handles = { req->handle },
++ };
++ int32_t crtc_x, crtc_y;
++ uint32_t crtc_w = 0, crtc_h = 0;
++ uint32_t src_w = 0, src_h = 0;
++ int ret = 0;
++
++ BUG_ON(!crtc->cursor);
++ WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
++
++ /*
++ * Obtain fb we'll be using (either new or existing) and take an extra
++ * reference to it if fb != null. setplane will take care of dropping
++ * the reference if the plane update fails.
++ */
++ if (req->flags & DRM_MODE_CURSOR_BO) {
++ if (req->handle) {
++ fb = add_framebuffer_internal(dev, &fbreq, file_priv);
++ if (IS_ERR(fb)) {
++ DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
++ return PTR_ERR(fb);
++ }
+
+- connector_set[i] = connector;
++ drm_framebuffer_reference(fb);
++ } else {
++ fb = NULL;
+ }
++ } else {
++ fb = crtc->cursor->fb;
++ if (fb)
++ drm_framebuffer_reference(fb);
+ }
+
+- set.crtc = crtc;
+- set.x = crtc_req->x;
+- set.y = crtc_req->y;
+- set.mode = mode;
+- set.connectors = connector_set;
+- set.num_connectors = crtc_req->count_connectors;
+- set.fb = fb;
+- ret = drm_mode_set_config_internal(&set);
++ if (req->flags & DRM_MODE_CURSOR_MOVE) {
++ crtc_x = req->x;
++ crtc_y = req->y;
++ } else {
++ crtc_x = crtc->cursor_x;
++ crtc_y = crtc->cursor_y;
++ }
+
+-out:
+- if (fb)
+- drm_framebuffer_unreference(fb);
++ if (fb) {
++ crtc_w = fb->width;
++ crtc_h = fb->height;
++ src_w = fb->width << 16;
++ src_h = fb->height << 16;
++ }
++
++ /*
++ * setplane_internal will take care of deref'ing either the old or new
++ * framebuffer depending on success.
++ */
++ ret = __setplane_internal(crtc->cursor, crtc, fb,
++ crtc_x, crtc_y, crtc_w, crtc_h,
++ 0, 0, src_w, src_h);
++
++ /* Update successful; save new cursor position, if necessary */
++ if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
++ crtc->cursor_x = req->x;
++ crtc->cursor_y = req->y;
++ }
+
+- kfree(connector_set);
+- drm_mode_destroy(dev, mode);
+- drm_modeset_unlock_all(dev);
+ return ret;
+ }
+
+@@ -2290,7 +2869,6 @@
+ struct drm_mode_cursor2 *req,
+ struct drm_file *file_priv)
+ {
+- struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+@@ -2300,14 +2878,22 @@
+ if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+ return -EINVAL;
+
+- obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+- if (!obj) {
++ crtc = drm_crtc_find(dev, req->crtc_id);
++ if (!crtc) {
+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+ return -ENOENT;
+ }
+- crtc = obj_to_crtc(obj);
+
+- mutex_lock(&crtc->mutex);
++ /*
++ * If this crtc has a universal cursor plane, call that plane's update
++ * handler rather than using legacy cursor handlers.
++ */
++ drm_modeset_lock_crtc(crtc, crtc->cursor);
++ if (crtc->cursor) {
++ ret = drm_mode_cursor_universal(crtc, req, file_priv);
++ goto out;
++ }
++
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
+ ret = -ENXIO;
+@@ -2331,13 +2917,28 @@
+ }
+ }
+ out:
+- mutex_unlock(&crtc->mutex);
++ drm_modeset_unlock_crtc(crtc);
+
+ return ret;
+
+ }
++
++
++/**
++ * drm_mode_cursor_ioctl - set CRTC's cursor configuration
++ * @dev: drm device for the ioctl
++ * @data: data pointer for the ioctl
++ * @file_priv: drm file for the ioctl call
++ *
++ * Set the cursor configuration based on user request.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_cursor_ioctl(struct drm_device *dev,
+- void *data, struct drm_file *file_priv)
++ void *data, struct drm_file *file_priv)
+ {
+ struct drm_mode_cursor *req = data;
+ struct drm_mode_cursor2 new_req;
+@@ -2348,6 +2949,21 @@
+ return drm_mode_cursor_common(dev, &new_req, file_priv);
+ }
+
++/**
++ * drm_mode_cursor2_ioctl - set CRTC's cursor configuration
++ * @dev: drm device for the ioctl
++ * @data: data pointer for the ioctl
++ * @file_priv: drm file for the ioctl call
++ *
++ * Set the cursor configuration based on user request. This implements the 2nd
++ * version of the cursor ioctl, which allows userspace to additionally specify
++ * the hotspot of the pointer.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_cursor2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+@@ -2355,7 +2971,14 @@
+ return drm_mode_cursor_common(dev, req, file_priv);
+ }
+
+-/* Original addfb only supported RGB formats, so figure out which one */
++/**
++ * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
++ * @bpp: bits per pixels
++ * @depth: bit depth per pixel
++ *
++ * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
++ * Useful in fbdev emulation code, since that deals in those values.
++ */
+ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+ {
+ uint32_t fmt;
+@@ -2397,23 +3020,22 @@
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+- * Add a new FB to the specified CRTC, given a user request.
++ * Add a new FB to the specified CRTC, given a user request. This is the
++ * original addfb ioctl which only supported RGB formats.
+ *
+ * Called by the user via ioctl.
+ *
+- * RETURNS:
+- * Zero on success, errno on failure.
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+ struct drm_mode_fb_cmd *or = data;
+ struct drm_mode_fb_cmd2 r = {};
+- struct drm_mode_config *config = &dev->mode_config;
+- struct drm_framebuffer *fb;
+- int ret = 0;
++ int ret;
+
+- /* Use new struct with format internally */
++ /* convert to new format and call new ioctl */
+ r.fb_id = or->fb_id;
+ r.width = or->width;
+ r.height = or->height;
+@@ -2421,28 +3043,13 @@
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ r.handles[0] = or->handle;
+
+- if (!drm_core_check_feature(dev, DRIVER_MODESET))
+- return -EINVAL;
+-
+- if ((config->min_width > r.width) || (r.width > config->max_width))
+- return -EINVAL;
+-
+- if ((config->min_height > r.height) || (r.height > config->max_height))
+- return -EINVAL;
+-
+- fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
+- if (IS_ERR(fb)) {
+- DRM_DEBUG_KMS("could not create framebuffer\n");
+- return PTR_ERR(fb);
+- }
++ ret = drm_mode_addfb2(dev, &r, file_priv);
++ if (ret)
++ return ret;
+
+- mutex_lock(&file_priv->fbs_lock);
+- or->fb_id = fb->base.id;
+- list_add(&fb->filp_head, &file_priv->fbs);
+- DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+- mutex_unlock(&file_priv->fbs_lock);
++ or->fb_id = r.fb_id;
+
+- return ret;
++ return 0;
+ }
+
+ static int format_check(const struct drm_mode_fb_cmd2 *r)
+@@ -2534,7 +3141,7 @@
+ num_planes = drm_format_num_planes(r->pixel_format);
+
+ if (r->width == 0 || r->width % hsub) {
+- DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
++ DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
+ return -EINVAL;
+ }
+
+@@ -2568,54 +3175,38 @@
+ return 0;
+ }
+
+-/**
+- * drm_mode_addfb2 - add an FB to the graphics configuration
+- * @dev: drm device for the ioctl
+- * @data: data pointer for the ioctl
+- * @file_priv: drm file for the ioctl call
+- *
+- * Add a new FB to the specified CRTC, given a user request with format.
+- *
+- * Called by the user via ioctl.
+- *
+- * RETURNS:
+- * Zero on success, errno on failure.
+- */
+-int drm_mode_addfb2(struct drm_device *dev,
+- void *data, struct drm_file *file_priv)
++static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
++ struct drm_mode_fb_cmd2 *r,
++ struct drm_file *file_priv)
+ {
+- struct drm_mode_fb_cmd2 *r = data;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret;
+
+- if (!drm_core_check_feature(dev, DRIVER_MODESET))
+- return -EINVAL;
+-
+ if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+- return -EINVAL;
++ return ERR_PTR(-EINVAL);
+ }
+
+ if ((config->min_width > r->width) || (r->width > config->max_width)) {
+ DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
+- return -EINVAL;
++ return ERR_PTR(-EINVAL);
+ }
+ if ((config->min_height > r->height) || (r->height > config->max_height)) {
+ DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
+- return -EINVAL;
++ return ERR_PTR(-EINVAL);
+ }
+
+ ret = framebuffer_check(r);
+ if (ret)
+- return ret;
++ return ERR_PTR(ret);
+
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("could not create framebuffer\n");
+- return PTR_ERR(fb);
++ return fb;
+ }
+
+ mutex_lock(&file_priv->fbs_lock);
+@@ -2624,8 +3215,37 @@
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file_priv->fbs_lock);
+
++ return fb;
++}
++
++/**
++ * drm_mode_addfb2 - add an FB to the graphics configuration
++ * @dev: drm device for the ioctl
++ * @data: data pointer for the ioctl
++ * @file_priv: drm file for the ioctl call
++ *
++ * Add a new FB to the specified CRTC, given a user request with format. This is
++ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
++ * and uses fourcc codes as pixel format specifiers.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
++int drm_mode_addfb2(struct drm_device *dev,
++ void *data, struct drm_file *file_priv)
++{
++ struct drm_framebuffer *fb;
+
+- return ret;
++ if (!drm_core_check_feature(dev, DRIVER_MODESET))
++ return -EINVAL;
++
++ fb = add_framebuffer_internal(dev, data, file_priv);
++ if (IS_ERR(fb))
++ return PTR_ERR(fb);
++
++ return 0;
+ }
+
+ /**
+@@ -2638,8 +3258,8 @@
+ *
+ * Called by the user via ioctl.
+ *
+- * RETURNS:
+- * Zero on success, errno on failure.
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+@@ -2692,8 +3312,8 @@
+ *
+ * Called by the user via ioctl.
+ *
+- * RETURNS:
+- * Zero on success, errno on failure.
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+@@ -2715,7 +3335,8 @@
+ r->bpp = fb->bits_per_pixel;
+ r->pitch = fb->pitches[0];
+ if (fb->funcs->create_handle) {
+- if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
++ if (file_priv->is_master || capable(CAP_SYS_ADMIN) ||
++ drm_is_control_client(file_priv)) {
+ ret = fb->funcs->create_handle(fb, file_priv,
+ &r->handle);
+ } else {
+@@ -2736,6 +3357,25 @@
+ return ret;
+ }
+
++/**
++ * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
++ * @dev: drm device for the ioctl
++ * @data: data pointer for the ioctl
++ * @file_priv: drm file for the ioctl call
++ *
++ * Lookup the FB and flush out the damaged area supplied by userspace as a clip
++ * rectangle list. Generic userspace which does frontbuffer rendering must call
++ * this ioctl to flush out the changes on manual-update display outputs, e.g.
++ * usb display-link, mipi manual update panels or edp panel self refresh modes.
++ *
++ * Modesetting drivers which always update the frontbuffer do not need to
++ * implement the corresponding ->dirty framebuffer callback.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+@@ -2813,15 +3453,24 @@
+ *
+ * Called by the user via ioctl.
+ *
+- * RETURNS:
+- * Zero on success, errno on failure.
++ * Returns:
++ * Zero on success, negative errno on failure.
+ */
+ void drm_fb_release(struct drm_file *priv)
+ {
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_framebuffer *fb, *tfb;
+
+- mutex_lock(&priv->fbs_lock);
++ /*
++ * When the file gets released that means no one else can access the fb
++ * list any more, so no need to grab fpriv->fbs_lock. And we need to
++ * avoid upsetting lockdep since the universal cursor code adds a
++ * framebuffer while holding mutex locks.
++ *
++ * Note that a real deadlock between fpriv->fbs_lock and the modeset
++ * locks is impossible here since no one else but this function can get
++ * at it any more.
++ */
+ list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+
+ mutex_lock(&dev->mode_config.fb_lock);
+@@ -2834,9 +3483,26 @@
+ /* This will also drop the fpriv->fbs reference. */
+ drm_framebuffer_remove(fb);
+ }
+- mutex_unlock(&priv->fbs_lock);
+ }
+
++/**
++ * drm_property_create - create a new property type
++ * @dev: drm device
++ * @flags: flags specifying the property type
++ * @name: name of the property
++ * @num_values: number of pre-defined values
++ *
++ * This creates a new generic drm property which can then be attached to a drm
++ * object with drm_object_attach_property. The returned property object must be
++ * freed with drm_property_destroy.
++ *
++ * Note that the DRM core keeps a per-device list of properties and that, if
++ * drm_mode_config_cleanup() is called, it will destroy all properties created
++ * by the driver.
++ *
++ * Returns:
++ * A pointer to the newly created property on success, NULL on failure.
++ */
+ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values)
+ {
+@@ -2847,6 +3513,8 @@
+ if (!property)
+ return NULL;
+
++ property->dev = dev;
++
+ if (num_values) {
+ property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+ if (!property->values)
+@@ -2859,7 +3527,7 @@
+
+ property->flags = flags;
+ property->num_values = num_values;
+- INIT_LIST_HEAD(&property->enum_blob_list);
++ INIT_LIST_HEAD(&property->enum_list);
+
+ if (name) {
+ strncpy(property->name, name, DRM_PROP_NAME_LEN);
+@@ -2867,6 +3535,9 @@
+ }
+
+ list_add_tail(&property->head, &dev->mode_config.property_list);
++
++ WARN_ON(!drm_property_type_valid(property));
++
+ return property;
+ fail:
+ kfree(property->values);
+@@ -2875,6 +3546,24 @@
+ }
+ EXPORT_SYMBOL(drm_property_create);
+
++/**
++ * drm_property_create_enum - create a new enumeration property type
++ * @dev: drm device
++ * @flags: flags specifying the property type
++ * @name: name of the property
++ * @props: enumeration lists with property values
++ * @num_values: number of pre-defined values
++ *
++ * This creates a new generic drm property which can then be attached to a drm
++ * object with drm_object_attach_property. The returned property object must be
++ * freed with drm_property_destroy.
++ *
++ * Userspace is only allowed to set one of the predefined values for enumeration
++ * properties.
++ *
++ * Returns:
++ * A pointer to the newly created property on success, NULL on failure.
++ */
+ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+@@ -2903,22 +3592,50 @@
+ }
+ EXPORT_SYMBOL(drm_property_create_enum);
+
++/**
++ * drm_property_create_bitmask - create a new bitmask property type
++ * @dev: drm device
++ * @flags: flags specifying the property type
++ * @name: name of the property
++ * @props: enumeration lists with property bitflags
++ * @num_props: size of the @props array
++ * @supported_bits: bitmask of all supported enumeration values
++ *
++ * This creates a new bitmask drm property which can then be attached to a drm
++ * object with drm_object_attach_property. The returned property object must be
++ * freed with drm_property_destroy.
++ *
++ * Compared to plain enumeration properties userspace is allowed to set any
++ * or'ed together combination of the predefined property bitflag values
++ *
++ * Returns:
++ * A pointer to the newly created property on success, NULL on failure.
++ */
+ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+- int num_values)
++ int num_props,
++ uint64_t supported_bits)
+ {
+ struct drm_property *property;
+- int i, ret;
++ int i, ret, index = 0;
++ int num_values = hweight64(supported_bits);
+
+ flags |= DRM_MODE_PROP_BITMASK;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
++ for (i = 0; i < num_props; i++) {
++ if (!(supported_bits & (1ULL << props[i].type)))
++ continue;
+
+- for (i = 0; i < num_values; i++) {
+- ret = drm_property_add_enum(property, i,
++ if (WARN_ON(index >= num_values)) {
++ drm_property_destroy(dev, property);
++ return NULL;
++ }
++
++ ret = drm_property_add_enum(property, index++,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+@@ -2931,14 +3648,12 @@
+ }
+ EXPORT_SYMBOL(drm_property_create_bitmask);
+
+-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+- const char *name,
++static struct drm_property *property_create_range(struct drm_device *dev,
++ int flags, const char *name,
+ uint64_t min, uint64_t max)
+ {
+ struct drm_property *property;
+
+- flags |= DRM_MODE_PROP_RANGE;
+-
+ property = drm_property_create(dev, flags, name, 2);
+ if (!property)
+ return NULL;
+@@ -2948,25 +3663,94 @@
+
+ return property;
+ }
++
++/**
++ * drm_property_create_range - create a new ranged property type
++ * @dev: drm device
++ * @flags: flags specifying the property type
++ * @name: name of the property
++ * @min: minimum value of the property
++ * @max: maximum value of the property
++ *
++ * This creates a new generic drm property which can then be attached to a drm
++ * object with drm_object_attach_property. The returned property object must be
++ * freed with drm_property_destroy.
++ *
++ * Userspace is allowed to set any integer value in the (min, max) range
++ * inclusive.
++ *
++ * Returns:
++ * A pointer to the newly created property on success, NULL on failure.
++ */
++struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
++ const char *name,
++ uint64_t min, uint64_t max)
++{
++ return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
++ name, min, max);
++}
+ EXPORT_SYMBOL(drm_property_create_range);
+
++struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
++ int flags, const char *name,
++ int64_t min, int64_t max)
++{
++ return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
++ name, I642U64(min), I642U64(max));
++}
++EXPORT_SYMBOL(drm_property_create_signed_range);
++
++struct drm_property *drm_property_create_object(struct drm_device *dev,
++ int flags, const char *name, uint32_t type)
++{
++ struct drm_property *property;
++
++ flags |= DRM_MODE_PROP_OBJECT;
++
++ property = drm_property_create(dev, flags, name, 1);
++ if (!property)
++ return NULL;
++
++ property->values[0] = type;
++
++ return property;
++}
++EXPORT_SYMBOL(drm_property_create_object);
++
++/**
++ * drm_property_add_enum - add a possible value to an enumeration property
++ * @property: enumeration property to change
++ * @index: index of the new enumeration
++ * @value: value of the new enumeration
++ * @name: symbolic name of the new enumeration
++ *
++ * This functions adds enumerations to a property.
++ *
++ * It's use is deprecated, drivers should use one of the more specific helpers
++ * to directly create the property with all enumerations already attached.
++ *
++ * Returns:
++ * Zero on success, error code on failure.
++ */
+ int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name)
+ {
+ struct drm_property_enum *prop_enum;
+
+- if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
++ if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
++ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
+ return -EINVAL;
+
+ /*
+ * Bitmask enum properties have the additional constraint of values
+ * from 0 to 63
+ */
+- if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
++ if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
++ (value > 63))
+ return -EINVAL;
+
+- if (!list_empty(&property->enum_blob_list)) {
+- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
++ if (!list_empty(&property->enum_list)) {
++ list_for_each_entry(prop_enum, &property->enum_list, head) {
+ if (prop_enum->value == value) {
+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+@@ -2984,16 +3768,24 @@
+ prop_enum->value = value;
+
+ property->values[index] = value;
+- list_add_tail(&prop_enum->head, &property->enum_blob_list);
++ list_add_tail(&prop_enum->head, &property->enum_list);
+ return 0;
+ }
+ EXPORT_SYMBOL(drm_property_add_enum);
+
++/**
++ * drm_property_destroy - destroy a drm property
++ * @dev: drm device
++ * @property: property to destry
++ *
++ * This function frees a property including any attached resources like
++ * enumeration values.
++ */
+ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+ {
+ struct drm_property_enum *prop_enum, *pt;
+
+- list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
++ list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
+ list_del(&prop_enum->head);
+ kfree(prop_enum);
+ }
+@@ -3006,6 +3798,16 @@
+ }
+ EXPORT_SYMBOL(drm_property_destroy);
+
++/**
++ * drm_object_attach_property - attach a property to a modeset object
++ * @obj: drm modeset object
++ * @property: property to attach
++ * @init_val: initial value of the property
++ *
++ * This attaches the given property to the modeset object with the given initial
++ * value. Currently this function cannot fail since the properties are stored in
++ * a statically sized array.
++ */
+ void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val)
+@@ -3026,6 +3828,19 @@
+ }
+ EXPORT_SYMBOL(drm_object_attach_property);
+
++/**
++ * drm_object_property_set_value - set the value of a property
++ * @obj: drm mode object to set property value for
++ * @property: property to set
++ * @val: value the property should be set to
++ *
++ * This functions sets a given property on a given object. This function only
++ * changes the software state of the property, it does not call into the
++ * driver's ->set_property callback.
++ *
++ * Returns:
++ * Zero on success, error code on failure.
++ */
+ int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t val)
+ {
+@@ -3042,6 +3857,20 @@
+ }
+ EXPORT_SYMBOL(drm_object_property_set_value);
+
++/**
++ * drm_object_property_get_value - retrieve the value of a property
++ * @obj: drm mode object to get property value from
++ * @property: property to retrieve
++ * @val: storage for the property value
++ *
++ * This function retrieves the softare state of the given property for the given
++ * property. Since there is no driver callback to retrieve the current property
++ * value this might be out of sync with the hardware, depending upon the driver
++ * and property.
++ *
++ * Returns:
++ * Zero on success, error code on failure.
++ */
+ int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val)
+ {
+@@ -3058,41 +3887,49 @@
+ }
+ EXPORT_SYMBOL(drm_object_property_get_value);
+
++/**
++ * drm_mode_getproperty_ioctl - get the property metadata
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * This function retrieves the metadata for a given property, like the different
++ * possible values for an enum property or the limits for a range property.
++ *
++ * Blob properties are special
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+- struct drm_mode_object *obj;
+ struct drm_mode_get_property *out_resp = data;
+ struct drm_property *property;
+ int enum_count = 0;
+- int blob_count = 0;
+ int value_count = 0;
+ int ret = 0, i;
+ int copied;
+ struct drm_property_enum *prop_enum;
+ struct drm_mode_property_enum __user *enum_ptr;
+- struct drm_property_blob *prop_blob;
+- uint32_t __user *blob_id_ptr;
+ uint64_t __user *values_ptr;
+- uint32_t __user *blob_length_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+- if (!obj) {
++ property = drm_property_find(dev, out_resp->prop_id);
++ if (!property) {
+ ret = -ENOENT;
+ goto done;
+ }
+- property = obj_to_property(obj);
+
+- if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+- list_for_each_entry(prop_enum, &property->enum_blob_list, head)
++ if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
++ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
++ list_for_each_entry(prop_enum, &property->enum_list, head)
+ enum_count++;
+- } else if (property->flags & DRM_MODE_PROP_BLOB) {
+- list_for_each_entry(prop_blob, &property->enum_blob_list, head)
+- blob_count++;
+ }
+
+ value_count = property->num_values;
+@@ -3112,11 +3949,12 @@
+ }
+ out_resp->count_values = value_count;
+
+- if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
++ if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
++ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+ if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+ copied = 0;
+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
+- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
++ list_for_each_entry(prop_enum, &property->enum_list, head) {
+
+ if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+ ret = -EFAULT;
+@@ -3124,45 +3962,34 @@
+ }
+
+ if (copy_to_user(&enum_ptr[copied].name,
+- &prop_enum->name, DRM_PROP_NAME_LEN)) {
+- ret = -EFAULT;
+- goto done;
+- }
+- copied++;
+- }
+- }
+- out_resp->count_enum_blobs = enum_count;
+- }
+-
+- if (property->flags & DRM_MODE_PROP_BLOB) {
+- if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+- copied = 0;
+- blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
+- blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
+-
+- list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
+- if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
+- ret = -EFAULT;
+- goto done;
+- }
+-
+- if (put_user(prop_blob->length, blob_length_ptr + copied)) {
++ &prop_enum->name, DRM_PROP_NAME_LEN)) {
+ ret = -EFAULT;
+ goto done;
+ }
+-
+ copied++;
+ }
+ }
+- out_resp->count_enum_blobs = blob_count;
++ out_resp->count_enum_blobs = enum_count;
+ }
++
++ /*
++ * NOTE: The idea seems to have been to use this to read all the blob
++ * property values. But nothing ever added them to the corresponding
++ * list, userspace always used the special-purpose get_blob ioctl to
++ * read the value for a blob property. It also doesn't make a lot of
++ * sense to return values here when everything else is just metadata for
++ * the property itself.
++ */
++ if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
++ out_resp->count_enum_blobs = 0;
+ done:
+ drm_modeset_unlock_all(dev);
+ return ret;
+ }
+
+-static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
+- void *data)
++static struct drm_property_blob *
++drm_property_create_blob(struct drm_device *dev, size_t length,
++ const void *data)
+ {
+ struct drm_property_blob *blob;
+ int ret;
+@@ -3196,10 +4023,23 @@
+ kfree(blob);
+ }
+
++/**
++ * drm_mode_getblob_ioctl - get the contents of a blob property value
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * This function retrieves the contents of a blob property. The value stored in
++ * an object's blob property is just a normal modeset object id.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_getblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+- struct drm_mode_object *obj;
+ struct drm_mode_get_blob *out_resp = data;
+ struct drm_property_blob *blob;
+ int ret = 0;
+@@ -3209,12 +4049,11 @@
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+- obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
+- if (!obj) {
++ blob = drm_property_blob_find(dev, out_resp->blob_id);
++ if (!blob) {
+ ret = -ENOENT;
+ goto done;
+ }
+- blob = obj_to_blob(obj);
+
+ if (out_resp->length == blob->length) {
+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
+@@ -3230,11 +4069,105 @@
+ return ret;
+ }
+
+-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+- struct edid *edid)
++/**
++ * drm_mode_connector_set_path_property - set tile property on connector
++ * @connector: connector to set property on.
++ * @path: path to use for property.
++ *
++ * This creates a property to expose to userspace to specify a
++ * connector path. This is mainly used for DisplayPort MST where
++ * connectors have a topology and we want to allow userspace to give
++ * them more meaningful names.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
++int drm_mode_connector_set_path_property(struct drm_connector *connector,
++ const char *path)
++{
++ struct drm_device *dev = connector->dev;
++ size_t size = strlen(path) + 1;
++ int ret;
++
++ connector->path_blob_ptr = drm_property_create_blob(connector->dev,
++ size, path);
++ if (!connector->path_blob_ptr)
++ return -EINVAL;
++
++ ret = drm_object_property_set_value(&connector->base,
++ dev->mode_config.path_property,
++ connector->path_blob_ptr->base.id);
++ return ret;
++}
++EXPORT_SYMBOL(drm_mode_connector_set_path_property);
++
++/**
++ * drm_mode_connector_set_tile_property - set tile property on connector
++ * @connector: connector to set property on.
++ *
++ * This looks up the tile information for a connector, and creates a
++ * property for userspace to parse if it exists. The property is of
++ * the form of 8 integers using ':' as a separator.
++ *
++ * Returns:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+ {
+ struct drm_device *dev = connector->dev;
+ int ret, size;
++ char tile[256];
++
++ if (connector->tile_blob_ptr)
++ drm_property_destroy_blob(dev, connector->tile_blob_ptr);
++
++ if (!connector->has_tile) {
++ connector->tile_blob_ptr = NULL;
++ ret = drm_object_property_set_value(&connector->base,
++ dev->mode_config.tile_property, 0);
++ return ret;
++ }
++
++ snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
++ connector->tile_group->id, connector->tile_is_single_monitor,
++ connector->num_h_tile, connector->num_v_tile,
++ connector->tile_h_loc, connector->tile_v_loc,
++ connector->tile_h_size, connector->tile_v_size);
++ size = strlen(tile) + 1;
++
++ connector->tile_blob_ptr = drm_property_create_blob(connector->dev,
++ size, tile);
++ if (!connector->tile_blob_ptr)
++ return -EINVAL;
++
++ ret = drm_object_property_set_value(&connector->base,
++ dev->mode_config.tile_property,
++ connector->tile_blob_ptr->base.id);
++ return ret;
++}
++EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
++
++/**
++ * drm_mode_connector_update_edid_property - update the edid property of a connector
++ * @connector: drm connector
++ * @edid: new value of the edid property
++ *
++ * This function creates a new blob modeset object and assigns its id to the
++ * connector's edid property.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
++int drm_mode_connector_update_edid_property(struct drm_connector *connector,
++ const struct edid *edid)
++{
++ struct drm_device *dev = connector->dev;
++ size_t size;
++ int ret;
++
++ /* ignore requests to set edid when overridden */
++ if (connector->override_edid)
++ return 0;
+
+ if (connector->edid_blob_ptr)
+ drm_property_destroy_blob(dev, connector->edid_blob_ptr);
+@@ -3265,19 +4198,40 @@
+ {
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ return false;
+- if (property->flags & DRM_MODE_PROP_RANGE) {
++
++ if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
+ if (value < property->values[0] || value > property->values[1])
+ return false;
+ return true;
+- } else if (property->flags & DRM_MODE_PROP_BITMASK) {
++ } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
++ int64_t svalue = U642I64(value);
++ if (svalue < U642I64(property->values[0]) ||
++ svalue > U642I64(property->values[1]))
++ return false;
++ return true;
++ } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+ int i;
+ uint64_t valid_mask = 0;
+ for (i = 0; i < property->num_values; i++)
+ valid_mask |= (1ULL << property->values[i]);
+ return !(value & ~valid_mask);
+- } else if (property->flags & DRM_MODE_PROP_BLOB) {
++ } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
+ /* Only the driver knows */
+ return true;
++ } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
++ struct drm_mode_object *obj;
++ /* a zero value for an object property translates to null: */
++ if (value == 0)
++ return true;
++ /*
++ * NOTE: use _object_find() directly to bypass restriction on
++ * looking up refcnt'd objects (ie. fb's). For a refcnt'd
++ * object this could race against object finalization, so it
++ * simply tells us that the object *was* valid. Which is good
++ * enough.
++ */
++ obj = _object_find(property->dev, value, property->values[0]);
++ return obj != NULL;
+ } else {
+ int i;
+ for (i = 0; i < property->num_values; i++)
+@@ -3287,6 +4241,20 @@
+ }
+ }
+
++/**
++ * drm_mode_connector_property_set_ioctl - set the current value of a connector property
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * This function sets the current value for a connectors's property. It also
++ * calls into a driver's ->set_property callback to update the hardware state
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+@@ -3338,12 +4306,25 @@
+ return ret;
+ }
+
+-static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
+- struct drm_property *property,
+- uint64_t value)
++/**
++ * drm_mode_plane_set_obj_prop - set the value of a property
++ * @plane: drm plane object to set property value for
++ * @property: property to set
++ * @value: value the property should be set to
++ *
++ * This functions sets a given property on a given plane object. This function
++ * calls the driver's ->set_property callback and changes the software state of
++ * the property if the callback succeeds.
++ *
++ * Returns:
++ * Zero on success, error code on failure.
++ */
++int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
++ struct drm_property *property,
++ uint64_t value)
+ {
+ int ret = -EINVAL;
+- struct drm_plane *plane = obj_to_plane(obj);
++ struct drm_mode_object *obj = &plane->base;
+
+ if (plane->funcs->set_property)
+ ret = plane->funcs->set_property(plane, property, value);
+@@ -3352,7 +4333,23 @@
+
+ return ret;
+ }
++EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
+
++/**
++ * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * This function retrieves the current value for an object's property. Compared
++ * to the connector specific ioctl this one is extended to also work on crtc and
++ * plane objects.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+@@ -3409,6 +4406,22 @@
+ return ret;
+ }
+
++/**
++ * drm_mode_obj_set_property_ioctl - set the current value of an object's property
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * This function sets the current value for an object's property. It also calls
++ * into a driver's ->set_property callback to update the hardware state.
++ * Compared to the connector specific ioctl this one is extended to also work on
++ * crtc and plane objects.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+@@ -3459,7 +4472,8 @@
+ ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ case DRM_MODE_OBJECT_PLANE:
+- ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
++ ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
++ property, arg->value);
+ break;
+ }
+
+@@ -3468,6 +4482,18 @@
+ return ret;
+ }
+
++/**
++ * drm_mode_connector_attach_encoder - attach a connector to an encoder
++ * @connector: connector to attach
++ * @encoder: encoder to attach @connector to
++ *
++ * This function links up a connector to an encoder. Note that the routing
++ * restrictions between encoders and crtcs are exposed to userspace through the
++ * possible_clones and possible_crtcs bitmasks.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+ {
+@@ -3483,23 +4509,20 @@
+ }
+ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+-void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+- struct drm_encoder *encoder)
+-{
+- int i;
+- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+- if (connector->encoder_ids[i] == encoder->base.id) {
+- connector->encoder_ids[i] = 0;
+- if (connector->encoder == encoder)
+- connector->encoder = NULL;
+- break;
+- }
+- }
+-}
+-EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
+-
++/**
++ * drm_mode_crtc_set_gamma_size - set the gamma table size
++ * @crtc: CRTC to set the gamma table size for
++ * @gamma_size: size of the gamma table
++ *
++ * Drivers which support gamma tables should set this to the supported gamma
++ * table size when initializing the CRTC. Currently the drm core only supports a
++ * fixed gamma table size.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+- int gamma_size)
++ int gamma_size)
+ {
+ crtc->gamma_size = gamma_size;
+
+@@ -3513,11 +4536,24 @@
+ }
+ EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
++/**
++ * drm_mode_gamma_set_ioctl - set the gamma table
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
++ * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+ struct drm_mode_crtc_lut *crtc_lut = data;
+- struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+@@ -3527,12 +4563,11 @@
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+- if (!obj) {
++ crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
++ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+- crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->gamma_set == NULL) {
+ ret = -ENOSYS;
+@@ -3572,11 +4607,25 @@
+
+ }
+
++/**
++ * drm_mode_gamma_get_ioctl - get the gamma table
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * Copy the current gamma table into the storage provided. This also provides
++ * the gamma table size the driver expects, which can be used to size the
++ * allocated storage.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+ struct drm_mode_crtc_lut *crtc_lut = data;
+- struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+@@ -3586,12 +4635,11 @@
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+- obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+- if (!obj) {
++ crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
++ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+- crtc = obj_to_crtc(obj);
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+@@ -3622,13 +4670,30 @@
+ return ret;
+ }
+
++/**
++ * drm_mode_page_flip_ioctl - schedule an asynchronous fb update
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * This schedules an asynchronous update on a given CRTC, called page flip.
++ * Optionally a drm event is generated to signal the completion of the event.
++ * Generic drivers cannot assume that a pageflip with changed framebuffer
++ * properties (including driver specific metadata like tiling layout) will work,
++ * but some drivers support e.g. pixel format changes through the pageflip
++ * ioctl.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+ struct drm_mode_crtc_page_flip *page_flip = data;
+- struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+- struct drm_framebuffer *fb = NULL, *old_fb = NULL;
++ struct drm_framebuffer *fb = NULL;
+ struct drm_pending_vblank_event *e = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+@@ -3640,13 +4705,12 @@
+ if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
+ return -EINVAL;
+
+- obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+- if (!obj)
++ crtc = drm_crtc_find(dev, page_flip->crtc_id);
++ if (!crtc)
+ return -ENOENT;
+- crtc = obj_to_crtc(obj);
+
+- mutex_lock(&crtc->mutex);
+- if (crtc->fb == NULL) {
++ drm_modeset_lock_crtc(crtc, crtc->primary);
++ if (crtc->primary->fb == NULL) {
+ /* The framebuffer is currently unbound, presumably
+ * due to a hotplug event, that userspace has not
+ * yet discovered.
+@@ -3668,7 +4732,7 @@
+ if (ret)
+ goto out;
+
+- if (crtc->fb->pixel_format != fb->pixel_format) {
++ if (crtc->primary->fb->pixel_format != fb->pixel_format) {
+ DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+ ret = -EINVAL;
+ goto out;
+@@ -3701,7 +4765,7 @@
+ (void (*) (struct drm_pending_event *)) kfree;
+ }
+
+- old_fb = crtc->fb;
++ crtc->primary->old_fb = crtc->primary->fb;
+ ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
+ if (ret) {
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+@@ -3711,7 +4775,7 @@
+ kfree(e);
+ }
+ /* Keep the old fb, don't unref it. */
+- old_fb = NULL;
++ crtc->primary->old_fb = NULL;
+ } else {
+ /*
+ * Warn if the driver hasn't properly updated the crtc->fb
+@@ -3719,7 +4783,7 @@
+ * Failing to do so will screw with the reference counting
+ * on framebuffers.
+ */
+- WARN_ON(crtc->fb != fb);
++ WARN_ON(crtc->primary->fb != fb);
+ /* Unref only the old framebuffer. */
+ fb = NULL;
+ }
+@@ -3727,19 +4791,33 @@
+ out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+- if (old_fb)
+- drm_framebuffer_unreference(old_fb);
+- mutex_unlock(&crtc->mutex);
++ if (crtc->primary->old_fb)
++ drm_framebuffer_unreference(crtc->primary->old_fb);
++ crtc->primary->old_fb = NULL;
++ drm_modeset_unlock_crtc(crtc);
+
+ return ret;
+ }
+
++/**
++ * drm_mode_config_reset - call ->reset callbacks
++ * @dev: drm device
++ *
++ * This functions calls all the crtc's, encoder's and connector's ->reset
++ * callback. Drivers can use this in e.g. their driver load or resume code to
++ * reset hardware and software state.
++ */
+ void drm_mode_config_reset(struct drm_device *dev)
+ {
+ struct drm_crtc *crtc;
++ struct drm_plane *plane;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
++ list_for_each_entry(plane, &dev->mode_config.plane_list, head)
++ if (plane->funcs->reset)
++ plane->funcs->reset(plane);
++
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->funcs->reset)
+ crtc->funcs->reset(crtc);
+@@ -3757,16 +4835,77 @@
+ }
+ EXPORT_SYMBOL(drm_mode_config_reset);
+
++/**
++ * drm_mode_create_dumb_ioctl - create a dumb backing storage buffer
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * This creates a new dumb buffer in the driver's backing storage manager (GEM,
++ * TTM or something else entirely) and returns the resulting buffer handle. This
++ * handle can then be wrapped up into a framebuffer modeset object.
++ *
++ * Note that userspace is not allowed to use such objects for render
++ * acceleration - drivers must create their own private ioctls for such a use
++ * case.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+ struct drm_mode_create_dumb *args = data;
++ u32 cpp, stride, size;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
++ if (!args->width || !args->height || !args->bpp)
++ return -EINVAL;
++
++ /* overflow checks for 32bit size calculations */
++ /* NOTE: DIV_ROUND_UP() can overflow */
++ cpp = DIV_ROUND_UP(args->bpp, 8);
++ if (!cpp || cpp > 0xffffffffU / args->width)
++ return -EINVAL;
++ stride = cpp * args->width;
++ if (args->height > 0xffffffffU / stride)
++ return -EINVAL;
++
++ /* test for wrap-around */
++ size = args->height * stride;
++ if (PAGE_ALIGN(size) == 0)
++ return -EINVAL;
++
++ /*
++ * handle, pitch and size are output parameters. Zero them out to
++ * prevent drivers from accidentally using uninitialized data. Since
++ * not all existing userspace is clearing these fields properly we
++ * cannot reject IOCTL with garbage in them.
++ */
++ args->handle = 0;
++ args->pitch = 0;
++ args->size = 0;
++
+ return dev->driver->dumb_create(file_priv, dev, args);
+ }
+
++/**
++ * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * Allocate an offset in the drm device node's address space to be able to
++ * memory map a dumb buffer.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+@@ -3779,6 +4918,21 @@
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+ }
+
++/**
++ * drm_mode_destroy_dumb_ioctl - destroy a dumb backing strage buffer
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * This destroys the userspace handle for the given dumb backing storage buffer.
++ * Since buffer objects must be reference counted in the kernel a buffer object
++ * won't be immediately freed if a framebuffer modeset object still uses it.
++ *
++ * Called by the user via ioctl.
++ *
++ * Returns:
++ * Zero on success, negative errno on failure.
++ */
+ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
+@@ -3790,9 +4944,14 @@
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+ }
+
+-/*
+- * Just need to support RGB formats here for compat with code that doesn't
+- * use pixel formats directly yet.
++/**
++ * drm_fb_get_bpp_depth - get the bpp/depth values for format
++ * @format: pixel format (DRM_FORMAT_*)
++ * @depth: storage for the depth value
++ * @bpp: storage for the bpp value
++ *
++ * This only supports RGB formats here for compat with code that doesn't use
++ * pixel formats directly yet.
+ */
+ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+@@ -3864,7 +5023,7 @@
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+- * RETURNS:
++ * Returns:
+ * The number of planes used by the specified pixel format.
+ */
+ int drm_format_num_planes(uint32_t format)
+@@ -3899,7 +5058,7 @@
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+- * RETURNS:
++ * Returns:
+ * The bytes per pixel value for the specified plane.
+ */
+ int drm_format_plane_cpp(uint32_t format, int plane)
+@@ -3945,7 +5104,7 @@
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+- * RETURNS:
++ * Returns:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+@@ -3980,7 +5139,7 @@
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+- * RETURNS:
++ * Returns:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+@@ -4002,6 +5161,36 @@
+ EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
+
+ /**
++ * drm_rotation_simplify() - Try to simplify the rotation
++ * @rotation: Rotation to be simplified
++ * @supported_rotations: Supported rotations
++ *
++ * Attempt to simplify the rotation to a form that is supported.
++ * Eg. if the hardware supports everything except DRM_REFLECT_X
++ * one could call this function like this:
++ *
++ * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
++ * BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
++ * BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
++ *
++ * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
++ * transforms the hardware supports, this function may not
++ * be able to produce a supported transform, so the caller should
++ * check the result afterwards.
++ */
++unsigned int drm_rotation_simplify(unsigned int rotation,
++ unsigned int supported_rotations)
++{
++ if (rotation & ~supported_rotations) {
++ rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
++ rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
++ }
++
++ return rotation;
++}
++EXPORT_SYMBOL(drm_rotation_simplify);
++
++/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+@@ -4016,6 +5205,7 @@
+ void drm_mode_config_init(struct drm_device *dev)
+ {
+ mutex_init(&dev->mode_config.mutex);
++ drm_modeset_lock_init(&dev->mode_config.connection_mutex);
+ mutex_init(&dev->mode_config.idr_mutex);
+ mutex_init(&dev->mode_config.fb_lock);
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+@@ -4027,9 +5217,11 @@
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ idr_init(&dev->mode_config.crtc_idr);
++ idr_init(&dev->mode_config.tile_idr);
+
+ drm_modeset_lock_all(dev);
+ drm_mode_create_standard_connector_properties(dev);
++ drm_mode_create_standard_plane_properties(dev);
+ drm_modeset_unlock_all(dev);
+
+ /* Just to be sure */
+@@ -4037,6 +5229,8 @@
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
++ dev->mode_config.num_overlay_plane = 0;
++ dev->mode_config.num_total_plane = 0;
+ }
+ EXPORT_SYMBOL(drm_mode_config_init);
+
+@@ -4111,6 +5305,123 @@
+ crtc->funcs->destroy(crtc);
+ }
+
++ idr_destroy(&dev->mode_config.tile_idr);
+ idr_destroy(&dev->mode_config.crtc_idr);
++ drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
+ }
+ EXPORT_SYMBOL(drm_mode_config_cleanup);
++
++struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
++ unsigned int supported_rotations)
++{
++ static const struct drm_prop_enum_list props[] = {
++ { DRM_ROTATE_0, "rotate-0" },
++ { DRM_ROTATE_90, "rotate-90" },
++ { DRM_ROTATE_180, "rotate-180" },
++ { DRM_ROTATE_270, "rotate-270" },
++ { DRM_REFLECT_X, "reflect-x" },
++ { DRM_REFLECT_Y, "reflect-y" },
++ };
++
++ return drm_property_create_bitmask(dev, 0, "rotation",
++ props, ARRAY_SIZE(props),
++ supported_rotations);
++}
++EXPORT_SYMBOL(drm_mode_create_rotation_property);
++
++/**
++ * DOC: Tile group
++ *
++ * Tile groups are used to represent tiled monitors with a unique
++ * integer identifier. Tiled monitors using DisplayID v1.3 have
++ * a unique 8-byte handle, we store this in a tile group, so we
++ * have a common identifier for all tiles in a monitor group.
++ */
++static void drm_tile_group_free(struct kref *kref)
++{
++ struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
++ struct drm_device *dev = tg->dev;
++ mutex_lock(&dev->mode_config.idr_mutex);
++ idr_remove(&dev->mode_config.tile_idr, tg->id);
++ mutex_unlock(&dev->mode_config.idr_mutex);
++ kfree(tg);
++}
++
++/**
++ * drm_mode_put_tile_group - drop a reference to a tile group.
++ * @dev: DRM device
++ * @tg: tile group to drop reference to.
++ *
++ * drop reference to tile group and free if 0.
++ */
++void drm_mode_put_tile_group(struct drm_device *dev,
++ struct drm_tile_group *tg)
++{
++ kref_put(&tg->refcount, drm_tile_group_free);
++}
++
++/**
++ * drm_mode_get_tile_group - get a reference to an existing tile group
++ * @dev: DRM device
++ * @topology: 8-bytes unique per monitor.
++ *
++ * Use the unique bytes to get a reference to an existing tile group.
++ *
++ * RETURNS:
++ * tile group or NULL if not found.
++ */
++struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
++ char topology[8])
++{
++ struct drm_tile_group *tg;
++ int id;
++ mutex_lock(&dev->mode_config.idr_mutex);
++ idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
++ if (!memcmp(tg->group_data, topology, 8)) {
++ if (!kref_get_unless_zero(&tg->refcount))
++ tg = NULL;
++ mutex_unlock(&dev->mode_config.idr_mutex);
++ return tg;
++ }
++ }
++ mutex_unlock(&dev->mode_config.idr_mutex);
++ return NULL;
++}
++
++/**
++ * drm_mode_create_tile_group - create a tile group from a displayid description
++ * @dev: DRM device
++ * @topology: 8-bytes unique per monitor.
++ *
++ * Create a tile group for the unique monitor, and get a unique
++ * identifier for the tile group.
++ *
++ * RETURNS:
++ * new tile group or error.
++ */
++struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
++ char topology[8])
++{
++ struct drm_tile_group *tg;
++ int ret;
++
++ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
++ if (!tg)
++ return ERR_PTR(-ENOMEM);
++
++ kref_init(&tg->refcount);
++ memcpy(tg->group_data, topology, 8);
++ tg->dev = dev;
++
++ mutex_lock(&dev->mode_config.idr_mutex);
++ ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
++ if (ret >= 0) {
++ tg->id = ret;
++ } else {
++ kfree(tg);
++ tg = ERR_PTR(ret);
++ }
++
++ mutex_unlock(&dev->mode_config.idr_mutex);
++ return tg;
++}
+diff -Naur a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+--- a/drivers/gpu/drm/drm_crtc_helper.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_crtc_helper.c 2015-03-26 14:42:38.714435422 +0530
+@@ -29,16 +29,40 @@
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
++#include <linux/kernel.h>
+ #include <linux/export.h>
+ #include <linux/moduleparam.h>
+
+ #include <drm/drmP.h>
++#include <drm/drm_atomic.h>
+ #include <drm/drm_crtc.h>
+ #include <drm/drm_fourcc.h>
+ #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_fb_helper.h>
++#include <drm/drm_plane_helper.h>
++#include <drm/drm_atomic_helper.h>
+ #include <drm/drm_edid.h>
+
++/**
++ * DOC: overview
++ *
++ * The CRTC modeset helper library provides a default set_config implementation
++ * in drm_crtc_helper_set_config(). Plus a few other convenience functions using
++ * the same callbacks which drivers can use to e.g. restore the modeset
++ * configuration on resume with drm_helper_resume_force_mode().
++ *
++ * The driver callbacks are mostly compatible with the atomic modeset helpers,
++ * except for the handling of the primary plane: Atomic helpers require that the
++ * primary plane is implemented as a real standalone plane and not directly tied
++ * to the CRTC state. For easier transition this library provides functions to
++ * implement the old semantics required by the CRTC helpers using the new plane
++ * and atomic helper callbacks.
++ *
++ * Drivers are strongly urged to convert to the atomic helpers (by way of first
++ * converting to the plane helpers). New drivers must not use these functions
++ * but need to implement the atomic interface instead, potentially using the
++ * atomic helpers for that.
++ */
+ MODULE_AUTHOR("David Airlie, Jesse Barnes");
+ MODULE_DESCRIPTION("DRM KMS helper");
+ MODULE_LICENSE("GPL and additional rights");
+@@ -72,165 +96,31 @@
+ }
+ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
+-static bool drm_kms_helper_poll = true;
+-module_param_named(poll, drm_kms_helper_poll, bool, 0600);
+-
+-static void drm_mode_validate_flag(struct drm_connector *connector,
+- int flags)
+-{
+- struct drm_display_mode *mode;
+-
+- if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
+- DRM_MODE_FLAG_3D_MASK))
+- return;
+-
+- list_for_each_entry(mode, &connector->modes, head) {
+- if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+- !(flags & DRM_MODE_FLAG_INTERLACE))
+- mode->status = MODE_NO_INTERLACE;
+- if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
+- !(flags & DRM_MODE_FLAG_DBLSCAN))
+- mode->status = MODE_NO_DBLESCAN;
+- if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
+- !(flags & DRM_MODE_FLAG_3D_MASK))
+- mode->status = MODE_NO_STEREO;
+- }
+-
+- return;
+-}
+-
+-/**
+- * drm_helper_probe_single_connector_modes - get complete set of display modes
+- * @connector: connector to probe
+- * @maxX: max width for modes
+- * @maxY: max height for modes
+- *
+- * LOCKING:
+- * Caller must hold mode config lock.
+- *
+- * Based on the helper callbacks implemented by @connector try to detect all
+- * valid modes. Modes will first be added to the connector's probed_modes list,
+- * then culled (based on validity and the @maxX, @maxY parameters) and put into
+- * the normal modes list.
+- *
+- * Intended to be use as a generic implementation of the ->fill_modes()
+- * @connector vfunc for drivers that use the crtc helpers for output mode
+- * filtering and detection.
+- *
+- * RETURNS:
+- * Number of modes found on @connector.
+- */
+-int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+- uint32_t maxX, uint32_t maxY)
+-{
+- struct drm_device *dev = connector->dev;
+- struct drm_display_mode *mode;
+- struct drm_connector_helper_funcs *connector_funcs =
+- connector->helper_private;
+- int count = 0;
+- int mode_flags = 0;
+- bool verbose_prune = true;
+-
+- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+- drm_get_connector_name(connector));
+- /* set all modes to the unverified state */
+- list_for_each_entry(mode, &connector->modes, head)
+- mode->status = MODE_UNVERIFIED;
+-
+- if (connector->force) {
+- if (connector->force == DRM_FORCE_ON)
+- connector->status = connector_status_connected;
+- else
+- connector->status = connector_status_disconnected;
+- if (connector->funcs->force)
+- connector->funcs->force(connector);
+- } else {
+- connector->status = connector->funcs->detect(connector, true);
+- }
+-
+- /* Re-enable polling in case the global poll config changed. */
+- if (drm_kms_helper_poll != dev->mode_config.poll_running)
+- drm_kms_helper_poll_enable(dev);
+-
+- dev->mode_config.poll_running = drm_kms_helper_poll;
+-
+- if (connector->status == connector_status_disconnected) {
+- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
+- connector->base.id, drm_get_connector_name(connector));
+- drm_mode_connector_update_edid_property(connector, NULL);
+- verbose_prune = false;
+- goto prune;
+- }
+-
+-#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
+- count = drm_load_edid_firmware(connector);
+- if (count == 0)
+-#endif
+- count = (*connector_funcs->get_modes)(connector);
+-
+- if (count == 0 && connector->status == connector_status_connected)
+- count = drm_add_modes_noedid(connector, 1024, 768);
+- if (count == 0)
+- goto prune;
+-
+- drm_mode_connector_list_update(connector);
+-
+- if (maxX && maxY)
+- drm_mode_validate_size(dev, &connector->modes, maxX,
+- maxY, 0);
+-
+- if (connector->interlace_allowed)
+- mode_flags |= DRM_MODE_FLAG_INTERLACE;
+- if (connector->doublescan_allowed)
+- mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+- if (connector->stereo_allowed)
+- mode_flags |= DRM_MODE_FLAG_3D_MASK;
+- drm_mode_validate_flag(connector, mode_flags);
+-
+- list_for_each_entry(mode, &connector->modes, head) {
+- if (mode->status == MODE_OK)
+- mode->status = connector_funcs->mode_valid(connector,
+- mode);
+- }
+-
+-prune:
+- drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
+-
+- if (list_empty(&connector->modes))
+- return 0;
+-
+- list_for_each_entry(mode, &connector->modes, head)
+- mode->vrefresh = drm_mode_vrefresh(mode);
+-
+- drm_mode_sort(&connector->modes);
+-
+- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
+- drm_get_connector_name(connector));
+- list_for_each_entry(mode, &connector->modes, head) {
+- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+- drm_mode_debug_printmodeline(mode);
+- }
+-
+- return count;
+-}
+-EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
+-
+ /**
+ * drm_helper_encoder_in_use - check if a given encoder is in use
+ * @encoder: encoder to check
+ *
+- * LOCKING:
+- * Caller must hold mode config lock.
++ * Checks whether @encoder is with the current mode setting output configuration
++ * in use by any connector. This doesn't mean that it is actually enabled since
++ * the DPMS state is tracked separately.
+ *
+- * Walk @encoders's DRM device's mode_config and see if it's in use.
+- *
+- * RETURNS:
+- * True if @encoder is part of the mode_config, false otherwise.
++ * Returns:
++ * True if @encoder is used, false otherwise.
+ */
+ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
+ {
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
++
++ /*
++ * We can expect this mutex to be locked if we are not panicking.
++ * Locking is currently fubar in the panic handler.
++ */
++ if (!oops_in_progress) {
++ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
++ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
++ }
++
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return true;
+@@ -242,19 +132,25 @@
+ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
+ * @crtc: CRTC to check
+ *
+- * LOCKING:
+- * Caller must hold mode config lock.
+- *
+- * Walk @crtc's DRM device's mode_config and see if it's in use.
++ * Checks whether @crtc is with the current mode setting output configuration
++ * in use by any connector. This doesn't mean that it is actually enabled since
++ * the DPMS state is tracked separately.
+ *
+- * RETURNS:
+- * True if @crtc is part of the mode_config, false otherwise.
++ * Returns:
++ * True if @crtc is used, false otherwise.
+ */
+ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
+ {
+ struct drm_encoder *encoder;
+ struct drm_device *dev = crtc->dev;
+- /* FIXME: Locking around list access? */
++
++ /*
++ * We can expect this mutex to be locked if we are not panicking.
++ * Locking is currently fubar in the panic handler.
++ */
++ if (!oops_in_progress)
++ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
++
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
+ return true;
+@@ -279,33 +175,17 @@
+ encoder->bridge->funcs->post_disable(encoder->bridge);
+ }
+
+-/**
+- * drm_helper_disable_unused_functions - disable unused objects
+- * @dev: DRM device
+- *
+- * LOCKING:
+- * Caller must hold mode config lock.
+- *
+- * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
+- * by calling its dpms function, which should power it off.
+- */
+-void drm_helper_disable_unused_functions(struct drm_device *dev)
++static void __drm_helper_disable_unused_functions(struct drm_device *dev)
+ {
+ struct drm_encoder *encoder;
+- struct drm_connector *connector;
+ struct drm_crtc *crtc;
+
+- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+- if (!connector->encoder)
+- continue;
+- if (connector->status == connector_status_disconnected)
+- connector->encoder = NULL;
+- }
++ drm_warn_on_modeset_not_all_locked(dev);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (!drm_helper_encoder_in_use(encoder)) {
+ drm_encoder_disable(encoder);
+- /* disconnector encoder from any connector */
++ /* disconnect encoder from any connector */
+ encoder->crtc = NULL;
+ }
+ }
+@@ -318,10 +198,27 @@
+ (*crtc_funcs->disable)(crtc);
+ else
+ (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
+- crtc->fb = NULL;
++ crtc->primary->fb = NULL;
+ }
+ }
+ }
++
++/**
++ * drm_helper_disable_unused_functions - disable unused objects
++ * @dev: DRM device
++ *
++ * This function walks through the entire mode setting configuration of @dev. It
++ * will remove any crtc links of unused encoders and encoder links of
++ * disconnected connectors. Then it will disable all unused encoders and crtcs
++ * either by calling their disable callback if available or by calling their
++ * dpms callback with DRM_MODE_DPMS_OFF.
++ */
++void drm_helper_disable_unused_functions(struct drm_device *dev)
++{
++ drm_modeset_lock_all(dev);
++ __drm_helper_disable_unused_functions(dev);
++ drm_modeset_unlock_all(dev);
++}
+ EXPORT_SYMBOL(drm_helper_disable_unused_functions);
+
+ /*
+@@ -355,9 +252,6 @@
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
+ *
+- * LOCKING:
+- * Caller must hold mode config lock.
+- *
+ * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+@@ -367,8 +261,8 @@
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
+ *
+- * RETURNS:
+- * True if the mode was set successfully, or false otherwise.
++ * Returns:
++ * True if the mode was set successfully, false otherwise.
+ */
+ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+@@ -384,6 +278,8 @@
+ struct drm_encoder *encoder;
+ bool ret = true;
+
++ drm_warn_on_modeset_not_all_locked(dev);
++
+ saved_enabled = crtc->enabled;
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ if (!crtc->enabled)
+@@ -472,7 +368,7 @@
+ continue;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+- encoder->base.id, drm_get_encoder_name(encoder),
++ encoder->base.id, encoder->name,
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+@@ -523,8 +419,7 @@
+ }
+ EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+
+-
+-static int
++static void
+ drm_crtc_helper_disable(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
+@@ -552,25 +447,21 @@
+ }
+ }
+
+- drm_helper_disable_unused_functions(dev);
+- return 0;
++ __drm_helper_disable_unused_functions(dev);
+ }
+
+ /**
+ * drm_crtc_helper_set_config - set a new config from userspace
+ * @set: mode set configuration
+ *
+- * LOCKING:
+- * Caller must hold mode config lock.
+- *
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+- * from userspace or internally e.g. from the fbdev suppport code) in @set, and
++ * from userspace or internally e.g. from the fbdev support code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
+ *
+- * RETURNS:
+- * Returns 0 on success, -ERRNO on failure.
++ * Returns:
++ * Returns 0 on success, negative errno numbers on failure.
+ */
+ int drm_crtc_helper_set_config(struct drm_mode_set *set)
+ {
+@@ -607,11 +498,14 @@
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+- return drm_crtc_helper_disable(set->crtc);
++ drm_crtc_helper_disable(set->crtc);
++ return 0;
+ }
+
+ dev = set->crtc->dev;
+
++ drm_warn_on_modeset_not_all_locked(dev);
++
+ /*
+ * Allocate space for the backup of all (non-pointer) encoder and
+ * connector data.
+@@ -647,19 +541,19 @@
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+- save_set.fb = set->crtc->fb;
++ save_set.fb = set->crtc->primary->fb;
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+- if (set->crtc->fb != set->fb) {
++ if (set->crtc->primary->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+- if (set->crtc->fb == NULL) {
++ if (set->crtc->primary->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ mode_changed = true;
+ } else if (set->fb == NULL) {
+ mode_changed = true;
+ } else if (set->fb->pixel_format !=
+- set->crtc->fb->pixel_format) {
++ set->crtc->primary->fb->pixel_format) {
+ mode_changed = true;
+ } else
+ fb_changed = true;
+@@ -689,12 +583,13 @@
+ if (new_encoder == NULL)
+ /* don't break so fail path works correct */
+ fail = 1;
+- break;
+
+ if (connector->dpms != DRM_MODE_DPMS_ON) {
+ DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
+ mode_changed = true;
+ }
++
++ break;
+ }
+ }
+
+@@ -743,11 +638,11 @@
+ }
+ if (new_crtc) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+- connector->base.id, drm_get_connector_name(connector),
++ connector->base.id, connector->name,
+ new_crtc->base.id);
+ } else {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+- connector->base.id, drm_get_connector_name(connector));
++ connector->base.id, connector->name);
+ }
+ }
+
+@@ -760,34 +655,34 @@
+ DRM_DEBUG_KMS("attempting to set mode from"
+ " userspace\n");
+ drm_mode_debug_printmodeline(set->mode);
+- set->crtc->fb = set->fb;
++ set->crtc->primary->fb = set->fb;
+ if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
+ set->x, set->y,
+ save_set.fb)) {
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
+- set->crtc->fb = save_set.fb;
++ set->crtc->primary->fb = save_set.fb;
+ ret = -EINVAL;
+ goto fail;
+ }
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+- drm_get_connector_name(set->connectors[i]));
++ set->connectors[i]->name);
+ set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
+ }
+ }
+- drm_helper_disable_unused_functions(dev);
++ __drm_helper_disable_unused_functions(dev);
+ } else if (fb_changed) {
+ set->crtc->x = set->x;
+ set->crtc->y = set->y;
+- set->crtc->fb = set->fb;
++ set->crtc->primary->fb = set->fb;
+ ret = crtc_funcs->mode_set_base(set->crtc,
+ set->x, set->y, save_set.fb);
+ if (ret != 0) {
+ set->crtc->x = save_set.x;
+ set->crtc->y = save_set.y;
+- set->crtc->fb = save_set.fb;
++ set->crtc->primary->fb = save_set.fb;
+ goto fail;
+ }
+ }
+@@ -924,8 +819,16 @@
+ }
+ EXPORT_SYMBOL(drm_helper_connector_dpms);
+
+-int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+- struct drm_mode_fb_cmd2 *mode_cmd)
++/**
++ * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
++ * @fb: drm_framebuffer object to fill out
++ * @mode_cmd: metadata from the userspace fb creation request
++ *
++ * This helper can be used in a drivers fb_create callback to pre-fill the fb's
++ * metadata fields.
++ */
++void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
++ struct drm_mode_fb_cmd2 *mode_cmd)
+ {
+ int i;
+
+@@ -938,26 +841,48 @@
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+ &fb->bits_per_pixel);
+ fb->pixel_format = mode_cmd->pixel_format;
+-
+- return 0;
++ fb->flags = mode_cmd->flags;
+ }
+ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
+
+-int drm_helper_resume_force_mode(struct drm_device *dev)
++/**
++ * drm_helper_resume_force_mode - force-restore mode setting configuration
++ * @dev: drm_device which should be restored
++ *
++ * Drivers which use the mode setting helpers can use this function to
++ * force-restore the mode setting configuration e.g. on resume or when something
++ * else might have trampled over the hw state (like some overzealous old BIOSen
++ * tended to do).
++ *
++ * This helper doesn't provide a error return value since restoring the old
++ * config should never fail due to resource allocation issues since the driver
++ * has successfully set the restored configuration already. Hence this should
++ * boil down to the equivalent of a few dpms on calls, which also don't provide
++ * an error code.
++ *
++ * Drivers where simply restoring an old configuration again might fail (e.g.
++ * due to slight differences in allocating shared resources when the
++ * configuration is restored in a different order than when userspace set it up)
++ * need to use their own restore logic.
++ */
++void drm_helper_resume_force_mode(struct drm_device *dev)
+ {
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+- int ret, encoder_dpms;
++ int encoder_dpms;
++ bool ret;
+
++ drm_modeset_lock_all(dev);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+ if (!crtc->enabled)
+ continue;
+
+ ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
+- crtc->x, crtc->y, crtc->fb);
++ crtc->x, crtc->y, crtc->primary->fb);
+
++ /* Restoring the old config should never fail! */
+ if (ret == false)
+ DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+
+@@ -980,155 +905,118 @@
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
++
+ /* disable the unused connectors while restoring the modesetting */
+- drm_helper_disable_unused_functions(dev);
+- return 0;
++ __drm_helper_disable_unused_functions(dev);
++ drm_modeset_unlock_all(dev);
+ }
+ EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+-void drm_kms_helper_hotplug_event(struct drm_device *dev)
+-{
+- /* send a uevent + call fbdev */
+- drm_sysfs_hotplug_event(dev);
+- if (dev->mode_config.funcs->output_poll_changed)
+- dev->mode_config.funcs->output_poll_changed(dev);
+-}
+-EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+-
+-#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+-static void output_poll_execute(struct work_struct *work)
++/**
++ * drm_helper_crtc_mode_set - mode_set implementation for atomic plane helpers
++ * @crtc: DRM CRTC
++ * @mode: DRM display mode which userspace requested
++ * @adjusted_mode: DRM display mode adjusted by ->mode_fixup callbacks
++ * @x: x offset of the CRTC scanout area on the underlying framebuffer
++ * @y: y offset of the CRTC scanout area on the underlying framebuffer
++ * @old_fb: previous framebuffer
++ *
++ * This function implements a callback useable as the ->mode_set callback
++ * required by the crtc helpers. Besides the atomic plane helper functions for
++ * the primary plane the driver must also provide the ->mode_set_nofb callback
++ * to set up the crtc.
++ *
++ * This is a transitional helper useful for converting drivers to the atomic
++ * interfaces.
++ */
++int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode, int x, int y,
++ struct drm_framebuffer *old_fb)
+ {
+- struct delayed_work *delayed_work = to_delayed_work(work);
+- struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
+- struct drm_connector *connector;
+- enum drm_connector_status old_status;
+- bool repoll = false, changed = false;
+-
+- if (!drm_kms_helper_poll)
+- return;
+-
+- mutex_lock(&dev->mode_config.mutex);
+- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+-
+- /* Ignore forced connectors. */
+- if (connector->force)
+- continue;
+-
+- /* Ignore HPD capable connectors and connectors where we don't
+- * want any hotplug detection at all for polling. */
+- if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+- continue;
++ struct drm_crtc_state *crtc_state;
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++ int ret;
+
+- repoll = true;
++ if (crtc->funcs->atomic_duplicate_state)
++ crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
++ else if (crtc->state)
++ crtc_state = kmemdup(crtc->state, sizeof(*crtc_state),
++ GFP_KERNEL);
++ else
++ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
++ if (!crtc_state)
++ return -ENOMEM;
+
+- old_status = connector->status;
+- /* if we are connected and don't want to poll for disconnect
+- skip it */
+- if (old_status == connector_status_connected &&
+- !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
+- continue;
++ crtc_state->enable = true;
++ crtc_state->planes_changed = true;
++ crtc_state->mode_changed = true;
++ drm_mode_copy(&crtc_state->mode, mode);
++ drm_mode_copy(&crtc_state->adjusted_mode, adjusted_mode);
+
+- connector->status = connector->funcs->detect(connector, false);
+- if (old_status != connector->status) {
+- const char *old, *new;
+-
+- old = drm_get_connector_status_name(old_status);
+- new = drm_get_connector_status_name(connector->status);
+-
+- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
+- "status updated from %s to %s\n",
+- connector->base.id,
+- drm_get_connector_name(connector),
+- old, new);
++ if (crtc_funcs->atomic_check) {
++ ret = crtc_funcs->atomic_check(crtc, crtc_state);
++ if (ret) {
++ kfree(crtc_state);
+
+- changed = true;
++ return ret;
+ }
+ }
+
+- mutex_unlock(&dev->mode_config.mutex);
+-
+- if (changed)
+- drm_kms_helper_hotplug_event(dev);
+-
+- if (repoll)
+- schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
+-}
++ swap(crtc->state, crtc_state);
+
+-void drm_kms_helper_poll_disable(struct drm_device *dev)
+-{
+- if (!dev->mode_config.poll_enabled)
+- return;
+- cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
+-}
+-EXPORT_SYMBOL(drm_kms_helper_poll_disable);
++ crtc_funcs->mode_set_nofb(crtc);
+
+-void drm_kms_helper_poll_enable(struct drm_device *dev)
+-{
+- bool poll = false;
+- struct drm_connector *connector;
+-
+- if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+- return;
+-
+- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+- if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+- DRM_CONNECTOR_POLL_DISCONNECT))
+- poll = true;
++ if (crtc_state) {
++ if (crtc->funcs->atomic_destroy_state)
++ crtc->funcs->atomic_destroy_state(crtc, crtc_state);
++ else
++ kfree(crtc_state);
+ }
+
+- if (poll)
+- schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+-}
+-EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+-
+-void drm_kms_helper_poll_init(struct drm_device *dev)
+-{
+- INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
+- dev->mode_config.poll_enabled = true;
+-
+- drm_kms_helper_poll_enable(dev);
++ return drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
+ }
+-EXPORT_SYMBOL(drm_kms_helper_poll_init);
++EXPORT_SYMBOL(drm_helper_crtc_mode_set);
+
+-void drm_kms_helper_poll_fini(struct drm_device *dev)
+-{
+- drm_kms_helper_poll_disable(dev);
+-}
+-EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+-
+-bool drm_helper_hpd_irq_event(struct drm_device *dev)
++/**
++ * drm_helper_crtc_mode_set_base - mode_set_base implementation for atomic plane helpers
++ * @crtc: DRM CRTC
++ * @x: x offset of the CRTC scanout area on the underlying framebuffer
++ * @y: y offset of the CRTC scanout area on the underlying framebuffer
++ * @old_fb: previous framebuffer
++ *
++ * This function implements a callback useable as the ->mode_set_base used
++ * required by the crtc helpers. The driver must provide the atomic plane helper
++ * functions for the primary plane.
++ *
++ * This is a transitional helper useful for converting drivers to the atomic
++ * interfaces.
++ */
++int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
++ struct drm_framebuffer *old_fb)
+ {
+- struct drm_connector *connector;
+- enum drm_connector_status old_status;
+- bool changed = false;
+-
+- if (!dev->mode_config.poll_enabled)
+- return false;
++ struct drm_plane_state *plane_state;
++ struct drm_plane *plane = crtc->primary;
+
+- mutex_lock(&dev->mode_config.mutex);
+- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+-
+- /* Only handle HPD capable connectors. */
+- if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+- continue;
+-
+- old_status = connector->status;
+-
+- connector->status = connector->funcs->detect(connector, false);
+- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+- connector->base.id,
+- drm_get_connector_name(connector),
+- drm_get_connector_status_name(old_status),
+- drm_get_connector_status_name(connector->status));
+- if (old_status != connector->status)
+- changed = true;
+- }
+-
+- mutex_unlock(&dev->mode_config.mutex);
++ if (plane->funcs->atomic_duplicate_state)
++ plane_state = plane->funcs->atomic_duplicate_state(plane);
++ else if (plane->state)
++ plane_state = drm_atomic_helper_plane_duplicate_state(plane);
++ else
++ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
++ if (!plane_state)
++ return -ENOMEM;
+
+- if (changed)
+- drm_kms_helper_hotplug_event(dev);
++ plane_state->crtc = crtc;
++ drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
++ plane_state->crtc_x = 0;
++ plane_state->crtc_y = 0;
++ plane_state->crtc_h = crtc->mode.vdisplay;
++ plane_state->crtc_w = crtc->mode.hdisplay;
++ plane_state->src_x = x << 16;
++ plane_state->src_y = y << 16;
++ plane_state->src_h = crtc->mode.vdisplay << 16;
++ plane_state->src_w = crtc->mode.hdisplay << 16;
+
+- return changed;
++ return drm_plane_helper_commit(plane, plane_state, old_fb);
+ }
+-EXPORT_SYMBOL(drm_helper_hpd_irq_event);
++EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
+diff -Naur a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
+--- a/drivers/gpu/drm/drm_crtc_internal.h 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/gpu/drm/drm_crtc_internal.h 2015-03-26 14:42:38.714435422 +0530
+@@ -0,0 +1,38 @@
++/*
++ * Copyright © 2006 Keith Packard
++ * Copyright © 2007-2008 Dave Airlie
++ * Copyright © 2007-2008 Intel Corporation
++ * Jesse Barnes <jesse.barnes@intel.com>
++ * Copyright © 2014 Intel Corporation
++ * Daniel Vetter <daniel.vetter@ffwll.ch>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/*
++ * This header file contains mode setting related functions and definitions
++ * which are only used within the drm module as internal implementation details
++ * and are not exported to drivers.
++ */
++
++int drm_mode_object_get(struct drm_device *dev,
++ struct drm_mode_object *obj, uint32_t obj_type);
++void drm_mode_object_put(struct drm_device *dev,
++ struct drm_mode_object *object);
++
+diff -Naur a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
+--- a/drivers/gpu/drm/drm_debugfs.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_debugfs.c 2015-03-26 14:42:38.714435422 +0530
+@@ -35,6 +35,8 @@
+ #include <linux/slab.h>
+ #include <linux/export.h>
+ #include <drm/drmP.h>
++#include <drm/drm_edid.h>
++#include "drm_internal.h"
+
+ #if defined(CONFIG_DEBUG_FS)
+
+@@ -48,9 +50,7 @@
+ {"clients", drm_clients_info, 0},
+ {"bufs", drm_bufs_info, 0},
+ {"gem_names", drm_gem_name_info, DRIVER_GEM},
+-#if DRM_DEBUG_CODE
+ {"vma", drm_vma_info, 0},
+-#endif
+ };
+ #define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
+
+@@ -237,5 +237,186 @@
+ return 0;
+ }
+
++static int connector_show(struct seq_file *m, void *data)
++{
++ struct drm_connector *connector = m->private;
++ const char *status;
++
++ switch (connector->force) {
++ case DRM_FORCE_ON:
++ status = "on\n";
++ break;
++
++ case DRM_FORCE_ON_DIGITAL:
++ status = "digital\n";
++ break;
++
++ case DRM_FORCE_OFF:
++ status = "off\n";
++ break;
++
++ case DRM_FORCE_UNSPECIFIED:
++ status = "unspecified\n";
++ break;
++
++ default:
++ return 0;
++ }
++
++ seq_puts(m, status);
++
++ return 0;
++}
++
++static int connector_open(struct inode *inode, struct file *file)
++{
++ struct drm_connector *dev = inode->i_private;
++
++ return single_open(file, connector_show, dev);
++}
++
++static ssize_t connector_write(struct file *file, const char __user *ubuf,
++ size_t len, loff_t *offp)
++{
++ struct seq_file *m = file->private_data;
++ struct drm_connector *connector = m->private;
++ char buf[12];
++
++ if (len > sizeof(buf) - 1)
++ return -EINVAL;
++
++ if (copy_from_user(buf, ubuf, len))
++ return -EFAULT;
++
++ buf[len] = '\0';
++
++ if (!strcmp(buf, "on"))
++ connector->force = DRM_FORCE_ON;
++ else if (!strcmp(buf, "digital"))
++ connector->force = DRM_FORCE_ON_DIGITAL;
++ else if (!strcmp(buf, "off"))
++ connector->force = DRM_FORCE_OFF;
++ else if (!strcmp(buf, "unspecified"))
++ connector->force = DRM_FORCE_UNSPECIFIED;
++ else
++ return -EINVAL;
++
++ return len;
++}
++
++static int edid_show(struct seq_file *m, void *data)
++{
++ struct drm_connector *connector = m->private;
++ struct drm_property_blob *edid = connector->edid_blob_ptr;
++
++ if (connector->override_edid && edid)
++ seq_write(m, edid->data, edid->length);
++
++ return 0;
++}
++
++static int edid_open(struct inode *inode, struct file *file)
++{
++ struct drm_connector *dev = inode->i_private;
++
++ return single_open(file, edid_show, dev);
++}
++
++static ssize_t edid_write(struct file *file, const char __user *ubuf,
++ size_t len, loff_t *offp)
++{
++ struct seq_file *m = file->private_data;
++ struct drm_connector *connector = m->private;
++ char *buf;
++ struct edid *edid;
++ int ret;
++
++ buf = memdup_user(ubuf, len);
++ if (IS_ERR(buf))
++ return PTR_ERR(buf);
++
++ edid = (struct edid *) buf;
++
++ if (len == 5 && !strncmp(buf, "reset", 5)) {
++ connector->override_edid = false;
++ ret = drm_mode_connector_update_edid_property(connector, NULL);
++ } else if (len < EDID_LENGTH ||
++ EDID_LENGTH * (1 + edid->extensions) > len)
++ ret = -EINVAL;
++ else {
++ connector->override_edid = false;
++ ret = drm_mode_connector_update_edid_property(connector, edid);
++ if (!ret)
++ connector->override_edid = true;
++ }
++
++ kfree(buf);
++
++ return (ret) ? ret : len;
++}
++
++static const struct file_operations drm_edid_fops = {
++ .owner = THIS_MODULE,
++ .open = edid_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++ .write = edid_write
++};
++
++
++static const struct file_operations drm_connector_fops = {
++ .owner = THIS_MODULE,
++ .open = connector_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++ .write = connector_write
++};
++
++int drm_debugfs_connector_add(struct drm_connector *connector)
++{
++ struct drm_minor *minor = connector->dev->primary;
++ struct dentry *root, *ent;
++
++ if (!minor->debugfs_root)
++ return -1;
++
++ root = debugfs_create_dir(connector->name, minor->debugfs_root);
++ if (!root)
++ return -ENOMEM;
++
++ connector->debugfs_entry = root;
++
++ /* force */
++ ent = debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector,
++ &drm_connector_fops);
++ if (!ent)
++ goto error;
++
++ /* edid */
++ ent = debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root,
++ connector, &drm_edid_fops);
++ if (!ent)
++ goto error;
++
++ return 0;
++
++error:
++ debugfs_remove_recursive(connector->debugfs_entry);
++ connector->debugfs_entry = NULL;
++ return -ENOMEM;
++}
++
++void drm_debugfs_connector_remove(struct drm_connector *connector)
++{
++ if (!connector->debugfs_entry)
++ return;
++
++ debugfs_remove_recursive(connector->debugfs_entry);
++
++ connector->debugfs_entry = NULL;
++}
++
+ #endif /* CONFIG_DEBUG_FS */
+
+diff -Naur a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
+--- a/drivers/gpu/drm/drm_dma.c 2015-03-26 14:43:30.418436435 +0530
++++ b/drivers/gpu/drm/drm_dma.c 2015-03-26 14:42:38.714435422 +0530
+@@ -35,6 +35,7 @@
+
+ #include <linux/export.h>
+ #include <drm/drmP.h>
++#include "drm_legacy.h"
+
+ /**
+ * Initialize the DMA data.
+@@ -124,7 +125,7 @@
+ *
+ * Resets the fields of \p buf.
+ */
+-void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
++void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
+ {
+ if (!buf)
+ return;
+@@ -142,8 +143,8 @@
+ *
+ * Frees each buffer associated with \p file_priv not already on the hardware.
+ */
+-void drm_core_reclaim_buffers(struct drm_device *dev,
+- struct drm_file *file_priv)
++void drm_legacy_reclaim_buffers(struct drm_device *dev,
++ struct drm_file *file_priv)
+ {
+ struct drm_device_dma *dma = dev->dma;
+ int i;
+@@ -154,7 +155,7 @@
+ if (dma->buflist[i]->file_priv == file_priv) {
+ switch (dma->buflist[i]->list) {
+ case DRM_LIST_NONE:
+- drm_free_buffer(dev, dma->buflist[i]);
++ drm_legacy_free_buffer(dev, dma->buflist[i]);
+ break;
+ case DRM_LIST_WAIT:
+ dma->buflist[i]->list = DRM_LIST_RECLAIM;
+@@ -166,5 +167,3 @@
+ }
+ }
+ }
+-
+-EXPORT_SYMBOL(drm_core_reclaim_buffers);
+diff -Naur a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
+--- a/drivers/gpu/drm/drm_dp_helper.c 2015-03-26 14:43:30.410436435 +0530
++++ b/drivers/gpu/drm/drm_dp_helper.c 2015-03-26 14:42:38.714435422 +0530
+@@ -39,194 +39,6 @@
+ * blocks, ...
+ */
+
+-/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+-static int
+-i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+- uint8_t write_byte, uint8_t *read_byte)
+-{
+- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+- int ret;
+-
+- ret = (*algo_data->aux_ch)(adapter, mode,
+- write_byte, read_byte);
+- return ret;
+-}
+-
+-/*
+- * I2C over AUX CH
+- */
+-
+-/*
+- * Send the address. If the I2C link is running, this 'restarts'
+- * the connection with the new address, this is used for doing
+- * a write followed by a read (as needed for DDC)
+- */
+-static int
+-i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+-{
+- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+- int mode = MODE_I2C_START;
+- int ret;
+-
+- if (reading)
+- mode |= MODE_I2C_READ;
+- else
+- mode |= MODE_I2C_WRITE;
+- algo_data->address = address;
+- algo_data->running = true;
+- ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+- return ret;
+-}
+-
+-/*
+- * Stop the I2C transaction. This closes out the link, sending
+- * a bare address packet with the MOT bit turned off
+- */
+-static void
+-i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+-{
+- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+- int mode = MODE_I2C_STOP;
+-
+- if (reading)
+- mode |= MODE_I2C_READ;
+- else
+- mode |= MODE_I2C_WRITE;
+- if (algo_data->running) {
+- (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+- algo_data->running = false;
+- }
+-}
+-
+-/*
+- * Write a single byte to the current I2C address, the
+- * the I2C link must be running or this returns -EIO
+- */
+-static int
+-i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+-{
+- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+- int ret;
+-
+- if (!algo_data->running)
+- return -EIO;
+-
+- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+- return ret;
+-}
+-
+-/*
+- * Read a single byte from the current I2C address, the
+- * I2C link must be running or this returns -EIO
+- */
+-static int
+-i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+-{
+- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+- int ret;
+-
+- if (!algo_data->running)
+- return -EIO;
+-
+- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+- return ret;
+-}
+-
+-static int
+-i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+- struct i2c_msg *msgs,
+- int num)
+-{
+- int ret = 0;
+- bool reading = false;
+- int m;
+- int b;
+-
+- for (m = 0; m < num; m++) {
+- u16 len = msgs[m].len;
+- u8 *buf = msgs[m].buf;
+- reading = (msgs[m].flags & I2C_M_RD) != 0;
+- ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+- if (ret < 0)
+- break;
+- if (reading) {
+- for (b = 0; b < len; b++) {
+- ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+- if (ret < 0)
+- break;
+- }
+- } else {
+- for (b = 0; b < len; b++) {
+- ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+- if (ret < 0)
+- break;
+- }
+- }
+- if (ret < 0)
+- break;
+- }
+- if (ret >= 0)
+- ret = num;
+- i2c_algo_dp_aux_stop(adapter, reading);
+- DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
+- return ret;
+-}
+-
+-static u32
+-i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+-{
+- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+- I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+- I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+- I2C_FUNC_10BIT_ADDR;
+-}
+-
+-static const struct i2c_algorithm i2c_dp_aux_algo = {
+- .master_xfer = i2c_algo_dp_aux_xfer,
+- .functionality = i2c_algo_dp_aux_functionality,
+-};
+-
+-static void
+-i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+-{
+- (void) i2c_algo_dp_aux_address(adapter, 0, false);
+- (void) i2c_algo_dp_aux_stop(adapter, false);
+-}
+-
+-static int
+-i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+-{
+- adapter->algo = &i2c_dp_aux_algo;
+- adapter->retries = 3;
+- i2c_dp_aux_reset_bus(adapter);
+- return 0;
+-}
+-
+-/**
+- * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+- * @adapter: i2c adapter to register
+- *
+- * This registers an i2c adapater that uses dp aux channel as it's underlaying
+- * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+- * and store it in the algo_data member of the @adapter argument. This will be
+- * used by the i2c over dp aux algorithm to drive the hardware.
+- *
+- * RETURNS:
+- * 0 on success, -ERRNO on failure.
+- */
+-int
+-i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+-{
+- int error;
+-
+- error = i2c_dp_aux_prepare_bus(adapter);
+- if (error)
+- return error;
+- error = i2c_add_adapter(adapter);
+- return error;
+-}
+-EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+-
+ /* Helpers for DP link training */
+ static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
+ {
+@@ -346,3 +158,424 @@
+ }
+ }
+ EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
++
++/**
++ * DOC: dp helpers
++ *
++ * The DisplayPort AUX channel is an abstraction to allow generic, driver-
++ * independent access to AUX functionality. Drivers can take advantage of
++ * this by filling in the fields of the drm_dp_aux structure.
++ *
++ * Transactions are described using a hardware-independent drm_dp_aux_msg
++ * structure, which is passed into a driver's .transfer() implementation.
++ * Both native and I2C-over-AUX transactions are supported.
++ */
++
++static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
++ unsigned int offset, void *buffer, size_t size)
++{
++ struct drm_dp_aux_msg msg;
++ unsigned int retry;
++ int err;
++
++ memset(&msg, 0, sizeof(msg));
++ msg.address = offset;
++ msg.request = request;
++ msg.buffer = buffer;
++ msg.size = size;
++
++ /*
++ * The specification doesn't give any recommendation on how often to
++ * retry native transactions. We used to retry 7 times like for
++ * aux i2c transactions but real world devices this wasn't
++ * sufficient, bump to 32 which makes Dell 4k monitors happier.
++ */
++ for (retry = 0; retry < 32; retry++) {
++
++ mutex_lock(&aux->hw_mutex);
++ err = aux->transfer(aux, &msg);
++ mutex_unlock(&aux->hw_mutex);
++ if (err < 0) {
++ if (err == -EBUSY)
++ continue;
++
++ return err;
++ }
++
++
++ switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
++ case DP_AUX_NATIVE_REPLY_ACK:
++ if (err < size)
++ return -EPROTO;
++ return err;
++
++ case DP_AUX_NATIVE_REPLY_NACK:
++ return -EIO;
++
++ case DP_AUX_NATIVE_REPLY_DEFER:
++ usleep_range(400, 500);
++ break;
++ }
++ }
++
++ DRM_DEBUG_KMS("too many retries, giving up\n");
++ return -EIO;
++}
++
++/**
++ * drm_dp_dpcd_read() - read a series of bytes from the DPCD
++ * @aux: DisplayPort AUX channel
++ * @offset: address of the (first) register to read
++ * @buffer: buffer to store the register values
++ * @size: number of bytes in @buffer
++ *
++ * Returns the number of bytes transferred on success, or a negative error
++ * code on failure. -EIO is returned if the request was NAKed by the sink or
++ * if the retry count was exceeded. If not all bytes were transferred, this
++ * function returns -EPROTO. Errors from the underlying AUX channel transfer
++ * function, with the exception of -EBUSY (which causes the transaction to
++ * be retried), are propagated to the caller.
++ */
++ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
++ void *buffer, size_t size)
++{
++ return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
++ size);
++}
++EXPORT_SYMBOL(drm_dp_dpcd_read);
++
++/**
++ * drm_dp_dpcd_write() - write a series of bytes to the DPCD
++ * @aux: DisplayPort AUX channel
++ * @offset: address of the (first) register to write
++ * @buffer: buffer containing the values to write
++ * @size: number of bytes in @buffer
++ *
++ * Returns the number of bytes transferred on success, or a negative error
++ * code on failure. -EIO is returned if the request was NAKed by the sink or
++ * if the retry count was exceeded. If not all bytes were transferred, this
++ * function returns -EPROTO. Errors from the underlying AUX channel transfer
++ * function, with the exception of -EBUSY (which causes the transaction to
++ * be retried), are propagated to the caller.
++ */
++ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
++ void *buffer, size_t size)
++{
++ return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
++ size);
++}
++EXPORT_SYMBOL(drm_dp_dpcd_write);
++
++/**
++ * drm_dp_dpcd_read_link_status() - read DPCD link status (bytes 0x202-0x207)
++ * @aux: DisplayPort AUX channel
++ * @status: buffer to store the link status in (must be at least 6 bytes)
++ *
++ * Returns the number of bytes transferred on success or a negative error
++ * code on failure.
++ */
++int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
++ u8 status[DP_LINK_STATUS_SIZE])
++{
++ return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
++ DP_LINK_STATUS_SIZE);
++}
++EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
++
++/**
++ * drm_dp_link_probe() - probe a DisplayPort link for capabilities
++ * @aux: DisplayPort AUX channel
++ * @link: pointer to structure in which to return link capabilities
++ *
++ * The structure filled in by this function can usually be passed directly
++ * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
++ * configure the link based on the link's capabilities.
++ *
++ * Returns 0 on success or a negative error code on failure.
++ */
++int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
++{
++ u8 values[3];
++ int err;
++
++ memset(link, 0, sizeof(*link));
++
++ err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
++ if (err < 0)
++ return err;
++
++ link->revision = values[0];
++ link->rate = drm_dp_bw_code_to_link_rate(values[1]);
++ link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
++
++ if (values[2] & DP_ENHANCED_FRAME_CAP)
++ link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_dp_link_probe);
++
++/**
++ * drm_dp_link_power_up() - power up a DisplayPort link
++ * @aux: DisplayPort AUX channel
++ * @link: pointer to a structure containing the link configuration
++ *
++ * Returns 0 on success or a negative error code on failure.
++ */
++int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
++{
++ u8 value;
++ int err;
++
++ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
++ if (link->revision < 0x11)
++ return 0;
++
++ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
++ if (err < 0)
++ return err;
++
++ value &= ~DP_SET_POWER_MASK;
++ value |= DP_SET_POWER_D0;
++
++ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
++ if (err < 0)
++ return err;
++
++ /*
++ * According to the DP 1.1 specification, a "Sink Device must exit the
++ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
++ * Control Field" (register 0x600).
++ */
++ usleep_range(1000, 2000);
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_dp_link_power_up);
++
++/**
++ * drm_dp_link_configure() - configure a DisplayPort link
++ * @aux: DisplayPort AUX channel
++ * @link: pointer to a structure containing the link configuration
++ *
++ * Returns 0 on success or a negative error code on failure.
++ */
++int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
++{
++ u8 values[2];
++ int err;
++
++ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
++ values[1] = link->num_lanes;
++
++ if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
++ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
++
++ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_dp_link_configure);
++
++/*
++ * I2C-over-AUX implementation
++ */
++
++static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
++{
++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
++ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
++ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
++ I2C_FUNC_10BIT_ADDR;
++}
++
++/*
++ * Transfer a single I2C-over-AUX message and handle various error conditions,
++ * retrying the transaction as appropriate. It is assumed that the
++ * aux->transfer function does not modify anything in the msg other than the
++ * reply field.
++ */
++static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
++{
++ unsigned int retry;
++ int err;
++
++ /*
++ * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
++ * is required to retry at least seven times upon receiving AUX_DEFER
++ * before giving up the AUX transaction.
++ */
++ for (retry = 0; retry < 7; retry++) {
++ mutex_lock(&aux->hw_mutex);
++ err = aux->transfer(aux, msg);
++ mutex_unlock(&aux->hw_mutex);
++ if (err < 0) {
++ if (err == -EBUSY)
++ continue;
++
++ DRM_DEBUG_KMS("transaction failed: %d\n", err);
++ return err;
++ }
++
++
++ switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) {
++ case DP_AUX_NATIVE_REPLY_ACK:
++ /*
++ * For I2C-over-AUX transactions this isn't enough, we
++ * need to check for the I2C ACK reply.
++ */
++ break;
++
++ case DP_AUX_NATIVE_REPLY_NACK:
++ DRM_DEBUG_KMS("native nack\n");
++ return -EREMOTEIO;
++
++ case DP_AUX_NATIVE_REPLY_DEFER:
++ DRM_DEBUG_KMS("native defer");
++ /*
++ * We could check for I2C bit rate capabilities and if
++ * available adjust this interval. We could also be
++ * more careful with DP-to-legacy adapters where a
++ * long legacy cable may force very low I2C bit rates.
++ *
++ * For now just defer for long enough to hopefully be
++ * safe for all use-cases.
++ */
++ usleep_range(500, 600);
++ continue;
++
++ default:
++ DRM_ERROR("invalid native reply %#04x\n", msg->reply);
++ return -EREMOTEIO;
++ }
++
++ switch (msg->reply & DP_AUX_I2C_REPLY_MASK) {
++ case DP_AUX_I2C_REPLY_ACK:
++ /*
++ * Both native ACK and I2C ACK replies received. We
++ * can assume the transfer was successful.
++ */
++ if (err < msg->size)
++ return -EPROTO;
++ return 0;
++
++ case DP_AUX_I2C_REPLY_NACK:
++ DRM_DEBUG_KMS("I2C nack\n");
++ aux->i2c_nack_count++;
++ return -EREMOTEIO;
++
++ case DP_AUX_I2C_REPLY_DEFER:
++ DRM_DEBUG_KMS("I2C defer\n");
++ aux->i2c_defer_count++;
++ usleep_range(400, 500);
++ continue;
++
++ default:
++ DRM_ERROR("invalid I2C reply %#04x\n", msg->reply);
++ return -EREMOTEIO;
++ }
++ }
++
++ DRM_DEBUG_KMS("too many retries, giving up\n");
++ return -EREMOTEIO;
++}
++
++static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
++ int num)
++{
++ struct drm_dp_aux *aux = adapter->algo_data;
++ unsigned int i, j;
++ struct drm_dp_aux_msg msg;
++ int err = 0;
++
++ memset(&msg, 0, sizeof(msg));
++
++ for (i = 0; i < num; i++) {
++ msg.address = msgs[i].addr;
++ msg.request = (msgs[i].flags & I2C_M_RD) ?
++ DP_AUX_I2C_READ :
++ DP_AUX_I2C_WRITE;
++ msg.request |= DP_AUX_I2C_MOT;
++ /* Send a bare address packet to start the transaction.
++ * Zero sized messages specify an address only (bare
++ * address) transaction.
++ */
++ msg.buffer = NULL;
++ msg.size = 0;
++ err = drm_dp_i2c_do_msg(aux, &msg);
++ if (err < 0)
++ break;
++ /*
++ * Many hardware implementations support FIFOs larger than a
++ * single byte, but it has been empirically determined that
++ * transferring data in larger chunks can actually lead to
++ * decreased performance. Therefore each message is simply
++ * transferred byte-by-byte.
++ */
++ for (j = 0; j < msgs[i].len; j++) {
++ msg.buffer = msgs[i].buf + j;
++ msg.size = 1;
++
++ err = drm_dp_i2c_do_msg(aux, &msg);
++ if (err < 0)
++ break;
++ }
++ if (err < 0)
++ break;
++ }
++ if (err >= 0)
++ err = num;
++ /* Send a bare address packet to close out the transaction.
++ * Zero sized messages specify an address only (bare
++ * address) transaction.
++ */
++ msg.request &= ~DP_AUX_I2C_MOT;
++ msg.buffer = NULL;
++ msg.size = 0;
++ (void)drm_dp_i2c_do_msg(aux, &msg);
++
++ return err;
++}
++
++static const struct i2c_algorithm drm_dp_i2c_algo = {
++ .functionality = drm_dp_i2c_functionality,
++ .master_xfer = drm_dp_i2c_xfer,
++};
++
++/**
++ * drm_dp_aux_register() - initialise and register aux channel
++ * @aux: DisplayPort AUX channel
++ *
++ * Returns 0 on success or a negative error code on failure.
++ */
++int drm_dp_aux_register(struct drm_dp_aux *aux)
++{
++ mutex_init(&aux->hw_mutex);
++
++ aux->ddc.algo = &drm_dp_i2c_algo;
++ aux->ddc.algo_data = aux;
++ aux->ddc.retries = 3;
++
++ aux->ddc.class = I2C_CLASS_DDC;
++ aux->ddc.owner = THIS_MODULE;
++ aux->ddc.dev.parent = aux->dev;
++ aux->ddc.dev.of_node = aux->dev->of_node;
++
++ strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
++ sizeof(aux->ddc.name));
++
++ return i2c_add_adapter(&aux->ddc);
++}
++EXPORT_SYMBOL(drm_dp_aux_register);
++
++/**
++ * drm_dp_aux_unregister() - unregister an AUX adapter
++ * @aux: DisplayPort AUX channel
++ */
++void drm_dp_aux_unregister(struct drm_dp_aux *aux)
++{
++ i2c_del_adapter(&aux->ddc);
++}
++EXPORT_SYMBOL(drm_dp_aux_unregister);
+diff -Naur a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c 2015-03-26 14:42:38.718435422 +0530
+@@ -0,0 +1,2789 @@
++/*
++ * Copyright © 2014 Red Hat
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that copyright
++ * notice and this permission notice appear in supporting documentation, and
++ * that the name of the copyright holders not be used in advertising or
++ * publicity pertaining to distribution of the software without specific,
++ * written prior permission. The copyright holders make no representations
++ * about the suitability of this software for any purpose. It is provided "as
++ * is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
++ * OF THIS SOFTWARE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/seq_file.h>
++#include <linux/i2c.h>
++#include <drm/drm_dp_mst_helper.h>
++#include <drm/drmP.h>
++
++#include <drm/drm_fixed.h>
++
++/**
++ * DOC: dp mst helper
++ *
++ * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
++ * protocol. The helpers contain a topology manager and bandwidth manager.
++ * The helpers encapsulate the sending and received of sideband msgs.
++ */
++static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
++ char *buf);
++static int test_calc_pbn_mode(void);
++
++static void drm_dp_put_port(struct drm_dp_mst_port *port);
++
++static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
++ int id,
++ struct drm_dp_payload *payload);
++
++static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_port *port,
++ int offset, int size, u8 *bytes);
++
++static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_branch *mstb);
++static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_branch *mstb,
++ struct drm_dp_mst_port *port);
++static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
++ u8 *guid);
++
++static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
++static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
++static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
++/* sideband msg handling */
++static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
++{
++ u8 bitmask = 0x80;
++ u8 bitshift = 7;
++ u8 array_index = 0;
++ int number_of_bits = num_nibbles * 4;
++ u8 remainder = 0;
++
++ while (number_of_bits != 0) {
++ number_of_bits--;
++ remainder <<= 1;
++ remainder |= (data[array_index] & bitmask) >> bitshift;
++ bitmask >>= 1;
++ bitshift--;
++ if (bitmask == 0) {
++ bitmask = 0x80;
++ bitshift = 7;
++ array_index++;
++ }
++ if ((remainder & 0x10) == 0x10)
++ remainder ^= 0x13;
++ }
++
++ number_of_bits = 4;
++ while (number_of_bits != 0) {
++ number_of_bits--;
++ remainder <<= 1;
++ if ((remainder & 0x10) != 0)
++ remainder ^= 0x13;
++ }
++
++ return remainder;
++}
++
++static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
++{
++ u8 bitmask = 0x80;
++ u8 bitshift = 7;
++ u8 array_index = 0;
++ int number_of_bits = number_of_bytes * 8;
++ u16 remainder = 0;
++
++ while (number_of_bits != 0) {
++ number_of_bits--;
++ remainder <<= 1;
++ remainder |= (data[array_index] & bitmask) >> bitshift;
++ bitmask >>= 1;
++ bitshift--;
++ if (bitmask == 0) {
++ bitmask = 0x80;
++ bitshift = 7;
++ array_index++;
++ }
++ if ((remainder & 0x100) == 0x100)
++ remainder ^= 0xd5;
++ }
++
++ number_of_bits = 8;
++ while (number_of_bits != 0) {
++ number_of_bits--;
++ remainder <<= 1;
++ if ((remainder & 0x100) != 0)
++ remainder ^= 0xd5;
++ }
++
++ return remainder & 0xff;
++}
++static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
++{
++ u8 size = 3;
++ size += (hdr->lct / 2);
++ return size;
++}
++
++static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
++ u8 *buf, int *len)
++{
++ int idx = 0;
++ int i;
++ u8 crc4;
++ buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
++ for (i = 0; i < (hdr->lct / 2); i++)
++ buf[idx++] = hdr->rad[i];
++ buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
++ (hdr->msg_len & 0x3f);
++ buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
++
++ crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
++ buf[idx - 1] |= (crc4 & 0xf);
++
++ *len = idx;
++}
++
++static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
++ u8 *buf, int buflen, u8 *hdrlen)
++{
++ u8 crc4;
++ u8 len;
++ int i;
++ u8 idx;
++ if (buf[0] == 0)
++ return false;
++ len = 3;
++ len += ((buf[0] & 0xf0) >> 4) / 2;
++ if (len > buflen)
++ return false;
++ crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
++
++ if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
++ DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
++ return false;
++ }
++
++ hdr->lct = (buf[0] & 0xf0) >> 4;
++ hdr->lcr = (buf[0] & 0xf);
++ idx = 1;
++ for (i = 0; i < (hdr->lct / 2); i++)
++ hdr->rad[i] = buf[idx++];
++ hdr->broadcast = (buf[idx] >> 7) & 0x1;
++ hdr->path_msg = (buf[idx] >> 6) & 0x1;
++ hdr->msg_len = buf[idx] & 0x3f;
++ idx++;
++ hdr->somt = (buf[idx] >> 7) & 0x1;
++ hdr->eomt = (buf[idx] >> 6) & 0x1;
++ hdr->seqno = (buf[idx] >> 4) & 0x1;
++ idx++;
++ *hdrlen = idx;
++ return true;
++}
++
++static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
++ struct drm_dp_sideband_msg_tx *raw)
++{
++ int idx = 0;
++ int i;
++ u8 *buf = raw->msg;
++ buf[idx++] = req->req_type & 0x7f;
++
++ switch (req->req_type) {
++ case DP_ENUM_PATH_RESOURCES:
++ buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
++ idx++;
++ break;
++ case DP_ALLOCATE_PAYLOAD:
++ buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
++ (req->u.allocate_payload.number_sdp_streams & 0xf);
++ idx++;
++ buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
++ idx++;
++ buf[idx] = (req->u.allocate_payload.pbn >> 8);
++ idx++;
++ buf[idx] = (req->u.allocate_payload.pbn & 0xff);
++ idx++;
++ for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
++ buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
++ (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
++ idx++;
++ }
++ if (req->u.allocate_payload.number_sdp_streams & 1) {
++ i = req->u.allocate_payload.number_sdp_streams - 1;
++ buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
++ idx++;
++ }
++ break;
++ case DP_QUERY_PAYLOAD:
++ buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
++ idx++;
++ buf[idx] = (req->u.query_payload.vcpi & 0x7f);
++ idx++;
++ break;
++ case DP_REMOTE_DPCD_READ:
++ buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
++ buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
++ idx++;
++ buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
++ idx++;
++ buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
++ idx++;
++ buf[idx] = (req->u.dpcd_read.num_bytes);
++ idx++;
++ break;
++
++ case DP_REMOTE_DPCD_WRITE:
++ buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
++ buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
++ idx++;
++ buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
++ idx++;
++ buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
++ idx++;
++ buf[idx] = (req->u.dpcd_write.num_bytes);
++ idx++;
++ memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
++ idx += req->u.dpcd_write.num_bytes;
++ break;
++ case DP_REMOTE_I2C_READ:
++ buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
++ buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
++ idx++;
++ for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
++ buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
++ idx++;
++ buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
++ idx++;
++ memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
++ idx += req->u.i2c_read.transactions[i].num_bytes;
++
++ buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
++ buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
++ idx++;
++ }
++ buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
++ idx++;
++ buf[idx] = (req->u.i2c_read.num_bytes_read);
++ idx++;
++ break;
++
++ case DP_REMOTE_I2C_WRITE:
++ buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
++ idx++;
++ buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
++ idx++;
++ buf[idx] = (req->u.i2c_write.num_bytes);
++ idx++;
++ memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
++ idx += req->u.i2c_write.num_bytes;
++ break;
++ }
++ raw->cur_len = idx;
++}
++
++static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
++{
++ u8 crc4;
++ crc4 = drm_dp_msg_data_crc4(msg, len);
++ msg[len] = crc4;
++}
++
++static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
++ struct drm_dp_sideband_msg_tx *raw)
++{
++ int idx = 0;
++ u8 *buf = raw->msg;
++
++ buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
++
++ raw->cur_len = idx;
++}
++
++/* this adds a chunk of msg to the builder to get the final msg */
++static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
++ u8 *replybuf, u8 replybuflen, bool hdr)
++{
++ int ret;
++ u8 crc4;
++
++ if (hdr) {
++ u8 hdrlen;
++ struct drm_dp_sideband_msg_hdr recv_hdr;
++ ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
++ if (ret == false) {
++ print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
++ return false;
++ }
++
++ /* get length contained in this portion */
++ msg->curchunk_len = recv_hdr.msg_len;
++ msg->curchunk_hdrlen = hdrlen;
++
++ /* we have already gotten an somt - don't bother parsing */
++ if (recv_hdr.somt && msg->have_somt)
++ return false;
++
++ if (recv_hdr.somt) {
++ memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
++ msg->have_somt = true;
++ }
++ if (recv_hdr.eomt)
++ msg->have_eomt = true;
++
++ /* copy the bytes for the remainder of this header chunk */
++ msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
++ memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
++ } else {
++ memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
++ msg->curchunk_idx += replybuflen;
++ }
++
++ if (msg->curchunk_idx >= msg->curchunk_len) {
++ /* do CRC */
++ crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
++ /* copy chunk into bigger msg */
++ memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
++ msg->curlen += msg->curchunk_len - 1;
++ }
++ return true;
++}
++
++static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
++ struct drm_dp_sideband_msg_reply_body *repmsg)
++{
++ int idx = 1;
++ int i;
++ memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
++ idx += 16;
++ repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++ for (i = 0; i < repmsg->u.link_addr.nports; i++) {
++ if (raw->msg[idx] & 0x80)
++ repmsg->u.link_addr.ports[i].input_port = 1;
++
++ repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
++ repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
++
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++ repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
++ repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
++ if (repmsg->u.link_addr.ports[i].input_port == 0)
++ repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++ if (repmsg->u.link_addr.ports[i].input_port == 0) {
++ repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++ memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
++ idx += 16;
++ if (idx > raw->curlen)
++ goto fail_len;
++ repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
++ repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
++ idx++;
++
++ }
++ if (idx > raw->curlen)
++ goto fail_len;
++ }
++
++ return true;
++fail_len:
++ DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
++ return false;
++}
++
++static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
++ struct drm_dp_sideband_msg_reply_body *repmsg)
++{
++ int idx = 1;
++ repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++ repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
++ if (idx > raw->curlen)
++ goto fail_len;
++
++ memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
++ return true;
++fail_len:
++ DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
++ return false;
++}
++
++static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
++ struct drm_dp_sideband_msg_reply_body *repmsg)
++{
++ int idx = 1;
++ repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++ return true;
++fail_len:
++ DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
++ return false;
++}
++
++static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
++ struct drm_dp_sideband_msg_reply_body *repmsg)
++{
++ int idx = 1;
++
++ repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++ repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
++ idx++;
++ /* TODO check */
++ memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
++ return true;
++fail_len:
++ DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
++ return false;
++}
++
++static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
++ struct drm_dp_sideband_msg_reply_body *repmsg)
++{
++ int idx = 1;
++ repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++ repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
++ idx += 2;
++ if (idx > raw->curlen)
++ goto fail_len;
++ repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
++ idx += 2;
++ if (idx > raw->curlen)
++ goto fail_len;
++ return true;
++fail_len:
++ DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
++ return false;
++}
++
++static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
++ struct drm_dp_sideband_msg_reply_body *repmsg)
++{
++ int idx = 1;
++ repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++ repmsg->u.allocate_payload.vcpi = raw->msg[idx];
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++ repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
++ idx += 2;
++ if (idx > raw->curlen)
++ goto fail_len;
++ return true;
++fail_len:
++ DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
++ return false;
++}
++
++static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
++ struct drm_dp_sideband_msg_reply_body *repmsg)
++{
++ int idx = 1;
++ repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++ repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
++ idx += 2;
++ if (idx > raw->curlen)
++ goto fail_len;
++ return true;
++fail_len:
++ DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
++ return false;
++}
++
++static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
++ struct drm_dp_sideband_msg_reply_body *msg)
++{
++ memset(msg, 0, sizeof(*msg));
++ msg->reply_type = (raw->msg[0] & 0x80) >> 7;
++ msg->req_type = (raw->msg[0] & 0x7f);
++
++ if (msg->reply_type) {
++ memcpy(msg->u.nak.guid, &raw->msg[1], 16);
++ msg->u.nak.reason = raw->msg[17];
++ msg->u.nak.nak_data = raw->msg[18];
++ return false;
++ }
++
++ switch (msg->req_type) {
++ case DP_LINK_ADDRESS:
++ return drm_dp_sideband_parse_link_address(raw, msg);
++ case DP_QUERY_PAYLOAD:
++ return drm_dp_sideband_parse_query_payload_ack(raw, msg);
++ case DP_REMOTE_DPCD_READ:
++ return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
++ case DP_REMOTE_DPCD_WRITE:
++ return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
++ case DP_REMOTE_I2C_READ:
++ return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
++ case DP_ENUM_PATH_RESOURCES:
++ return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
++ case DP_ALLOCATE_PAYLOAD:
++ return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
++ default:
++ DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
++ return false;
++ }
++}
++
++static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
++ struct drm_dp_sideband_msg_req_body *msg)
++{
++ int idx = 1;
++
++ msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++
++ memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
++ idx += 16;
++ if (idx > raw->curlen)
++ goto fail_len;
++
++ msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
++ msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
++ msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
++ msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
++ msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
++ idx++;
++ return true;
++fail_len:
++ DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
++ return false;
++}
++
++static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
++ struct drm_dp_sideband_msg_req_body *msg)
++{
++ int idx = 1;
++
++ msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
++ idx++;
++ if (idx > raw->curlen)
++ goto fail_len;
++
++ memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
++ idx += 16;
++ if (idx > raw->curlen)
++ goto fail_len;
++
++ msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
++ idx++;
++ return true;
++fail_len:
++ DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
++ return false;
++}
++
++static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
++ struct drm_dp_sideband_msg_req_body *msg)
++{
++ memset(msg, 0, sizeof(*msg));
++ msg->req_type = (raw->msg[0] & 0x7f);
++
++ switch (msg->req_type) {
++ case DP_CONNECTION_STATUS_NOTIFY:
++ return drm_dp_sideband_parse_connection_status_notify(raw, msg);
++ case DP_RESOURCE_STATUS_NOTIFY:
++ return drm_dp_sideband_parse_resource_status_notify(raw, msg);
++ default:
++ DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
++ return false;
++ }
++}
++
++static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
++{
++ struct drm_dp_sideband_msg_req_body req;
++
++ req.req_type = DP_REMOTE_DPCD_WRITE;
++ req.u.dpcd_write.port_number = port_num;
++ req.u.dpcd_write.dpcd_address = offset;
++ req.u.dpcd_write.num_bytes = num_bytes;
++ req.u.dpcd_write.bytes = bytes;
++ drm_dp_encode_sideband_req(&req, msg);
++
++ return 0;
++}
++
++static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
++{
++ struct drm_dp_sideband_msg_req_body req;
++
++ req.req_type = DP_LINK_ADDRESS;
++ drm_dp_encode_sideband_req(&req, msg);
++ return 0;
++}
++
++static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
++{
++ struct drm_dp_sideband_msg_req_body req;
++
++ req.req_type = DP_ENUM_PATH_RESOURCES;
++ req.u.port_num.port_number = port_num;
++ drm_dp_encode_sideband_req(&req, msg);
++ msg->path_msg = true;
++ return 0;
++}
++
++static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
++ u8 vcpi, uint16_t pbn)
++{
++ struct drm_dp_sideband_msg_req_body req;
++ memset(&req, 0, sizeof(req));
++ req.req_type = DP_ALLOCATE_PAYLOAD;
++ req.u.allocate_payload.port_number = port_num;
++ req.u.allocate_payload.vcpi = vcpi;
++ req.u.allocate_payload.pbn = pbn;
++ drm_dp_encode_sideband_req(&req, msg);
++ msg->path_msg = true;
++ return 0;
++}
++
++static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_vcpi *vcpi)
++{
++ int ret, vcpi_ret;
++
++ mutex_lock(&mgr->payload_lock);
++ ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
++ if (ret > mgr->max_payloads) {
++ ret = -EINVAL;
++ DRM_DEBUG_KMS("out of payload ids %d\n", ret);
++ goto out_unlock;
++ }
++
++ vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
++ if (vcpi_ret > mgr->max_payloads) {
++ ret = -EINVAL;
++ DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
++ goto out_unlock;
++ }
++
++ set_bit(ret, &mgr->payload_mask);
++ set_bit(vcpi_ret, &mgr->vcpi_mask);
++ vcpi->vcpi = vcpi_ret + 1;
++ mgr->proposed_vcpis[ret - 1] = vcpi;
++out_unlock:
++ mutex_unlock(&mgr->payload_lock);
++ return ret;
++}
++
++static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
++ int vcpi)
++{
++ int i;
++ if (vcpi == 0)
++ return;
++
++ mutex_lock(&mgr->payload_lock);
++ DRM_DEBUG_KMS("putting payload %d\n", vcpi);
++ clear_bit(vcpi - 1, &mgr->vcpi_mask);
++
++ for (i = 0; i < mgr->max_payloads; i++) {
++ if (mgr->proposed_vcpis[i])
++ if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
++ mgr->proposed_vcpis[i] = NULL;
++ clear_bit(i + 1, &mgr->payload_mask);
++ }
++ }
++ mutex_unlock(&mgr->payload_lock);
++}
++
++static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_sideband_msg_tx *txmsg)
++{
++ bool ret;
++ mutex_lock(&mgr->qlock);
++ ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
++ txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
++ mutex_unlock(&mgr->qlock);
++ return ret;
++}
++
++static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
++ struct drm_dp_sideband_msg_tx *txmsg)
++{
++ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
++ int ret;
++
++ ret = wait_event_timeout(mgr->tx_waitq,
++ check_txmsg_state(mgr, txmsg),
++ (4 * HZ));
++ mutex_lock(&mstb->mgr->qlock);
++ if (ret > 0) {
++ if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
++ ret = -EIO;
++ goto out;
++ }
++ } else {
++ DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
++
++ /* dump some state */
++ ret = -EIO;
++
++ /* remove from q */
++ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
++ txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
++ list_del(&txmsg->next);
++ }
++
++ if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
++ txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
++ mstb->tx_slots[txmsg->seqno] = NULL;
++ }
++ }
++out:
++ mutex_unlock(&mgr->qlock);
++
++ return ret;
++}
++
++static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
++{
++ struct drm_dp_mst_branch *mstb;
++
++ mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
++ if (!mstb)
++ return NULL;
++
++ mstb->lct = lct;
++ if (lct > 1)
++ memcpy(mstb->rad, rad, lct / 2);
++ INIT_LIST_HEAD(&mstb->ports);
++ kref_init(&mstb->kref);
++ return mstb;
++}
++
++static void drm_dp_destroy_mst_branch_device(struct kref *kref)
++{
++ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
++ struct drm_dp_mst_port *port, *tmp;
++ bool wake_tx = false;
++
++ cancel_work_sync(&mstb->mgr->work);
++
++ /*
++ * destroy all ports - don't need lock
++ * as there are no more references to the mst branch
++ * device at this point.
++ */
++ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
++ list_del(&port->next);
++ drm_dp_put_port(port);
++ }
++
++ /* drop any tx slots msg */
++ mutex_lock(&mstb->mgr->qlock);
++ if (mstb->tx_slots[0]) {
++ mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
++ mstb->tx_slots[0] = NULL;
++ wake_tx = true;
++ }
++ if (mstb->tx_slots[1]) {
++ mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
++ mstb->tx_slots[1] = NULL;
++ wake_tx = true;
++ }
++ mutex_unlock(&mstb->mgr->qlock);
++
++ if (wake_tx)
++ wake_up(&mstb->mgr->tx_waitq);
++ kfree(mstb);
++}
++
++static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
++{
++ kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
++}
++
++
++static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
++{
++ struct drm_dp_mst_branch *mstb;
++
++ switch (old_pdt) {
++ case DP_PEER_DEVICE_DP_LEGACY_CONV:
++ case DP_PEER_DEVICE_SST_SINK:
++ /* remove i2c over sideband */
++ drm_dp_mst_unregister_i2c_bus(&port->aux);
++ break;
++ case DP_PEER_DEVICE_MST_BRANCHING:
++ mstb = port->mstb;
++ port->mstb = NULL;
++ drm_dp_put_mst_branch_device(mstb);
++ break;
++ }
++}
++
++static void drm_dp_destroy_port(struct kref *kref)
++{
++ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
++ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
++ if (!port->input) {
++ port->vcpi.num_slots = 0;
++
++ kfree(port->cached_edid);
++ if (port->connector)
++ (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
++ drm_dp_port_teardown_pdt(port, port->pdt);
++
++ if (!port->input && port->vcpi.vcpi > 0)
++ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
++ }
++ kfree(port);
++
++ (*mgr->cbs->hotplug)(mgr);
++}
++
++static void drm_dp_put_port(struct drm_dp_mst_port *port)
++{
++ kref_put(&port->kref, drm_dp_destroy_port);
++}
++
++static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
++{
++ struct drm_dp_mst_port *port;
++ struct drm_dp_mst_branch *rmstb;
++ if (to_find == mstb) {
++ kref_get(&mstb->kref);
++ return mstb;
++ }
++ list_for_each_entry(port, &mstb->ports, next) {
++ if (port->mstb) {
++ rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
++ if (rmstb)
++ return rmstb;
++ }
++ }
++ return NULL;
++}
++
++static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
++{
++ struct drm_dp_mst_branch *rmstb = NULL;
++ mutex_lock(&mgr->lock);
++ if (mgr->mst_primary)
++ rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
++ mutex_unlock(&mgr->lock);
++ return rmstb;
++}
++
++static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
++{
++ struct drm_dp_mst_port *port, *mport;
++
++ list_for_each_entry(port, &mstb->ports, next) {
++ if (port == to_find) {
++ kref_get(&port->kref);
++ return port;
++ }
++ if (port->mstb) {
++ mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
++ if (mport)
++ return mport;
++ }
++ }
++ return NULL;
++}
++
++static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
++{
++ struct drm_dp_mst_port *rport = NULL;
++ mutex_lock(&mgr->lock);
++ if (mgr->mst_primary)
++ rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
++ mutex_unlock(&mgr->lock);
++ return rport;
++}
++
++static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
++{
++ struct drm_dp_mst_port *port;
++
++ list_for_each_entry(port, &mstb->ports, next) {
++ if (port->port_num == port_num) {
++ kref_get(&port->kref);
++ return port;
++ }
++ }
++
++ return NULL;
++}
++
++/*
++ * calculate a new RAD for this MST branch device
++ * if parent has an LCT of 2 then it has 1 nibble of RAD,
++ * if parent has an LCT of 3 then it has 2 nibbles of RAD,
++ */
++static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
++ u8 *rad)
++{
++ int lct = port->parent->lct;
++ int shift = 4;
++ int idx = lct / 2;
++ if (lct > 1) {
++ memcpy(rad, port->parent->rad, idx);
++ shift = (lct % 2) ? 4 : 0;
++ } else
++ rad[0] = 0;
++
++ rad[idx] |= port->port_num << shift;
++ return lct + 1;
++}
++
++/*
++ * return sends link address for new mstb
++ */
++static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
++{
++ int ret;
++ u8 rad[6], lct;
++ bool send_link = false;
++ switch (port->pdt) {
++ case DP_PEER_DEVICE_DP_LEGACY_CONV:
++ case DP_PEER_DEVICE_SST_SINK:
++ /* add i2c over sideband */
++ ret = drm_dp_mst_register_i2c_bus(&port->aux);
++ break;
++ case DP_PEER_DEVICE_MST_BRANCHING:
++ lct = drm_dp_calculate_rad(port, rad);
++
++ port->mstb = drm_dp_add_mst_branch_device(lct, rad);
++ port->mstb->mgr = port->mgr;
++ port->mstb->port_parent = port;
++
++ send_link = true;
++ break;
++ }
++ return send_link;
++}
++
++static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
++ struct drm_dp_mst_port *port)
++{
++ int ret;
++ if (port->dpcd_rev >= 0x12) {
++ port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
++ if (!port->guid_valid) {
++ ret = drm_dp_send_dpcd_write(mstb->mgr,
++ port,
++ DP_GUID,
++ 16, port->guid);
++ port->guid_valid = true;
++ }
++ }
++}
++
++static void build_mst_prop_path(struct drm_dp_mst_port *port,
++ struct drm_dp_mst_branch *mstb,
++ char *proppath,
++ size_t proppath_size)
++{
++ int i;
++ char temp[8];
++ snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
++ for (i = 0; i < (mstb->lct - 1); i++) {
++ int shift = (i % 2) ? 0 : 4;
++ int port_num = mstb->rad[i / 2] >> shift;
++ snprintf(temp, sizeof(temp), "-%d", port_num);
++ strlcat(proppath, temp, proppath_size);
++ }
++ snprintf(temp, sizeof(temp), "-%d", port->port_num);
++ strlcat(proppath, temp, proppath_size);
++}
++
++static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
++ struct device *dev,
++ struct drm_dp_link_addr_reply_port *port_msg)
++{
++ struct drm_dp_mst_port *port;
++ bool ret;
++ bool created = false;
++ int old_pdt = 0;
++ int old_ddps = 0;
++ port = drm_dp_get_port(mstb, port_msg->port_number);
++ if (!port) {
++ port = kzalloc(sizeof(*port), GFP_KERNEL);
++ if (!port)
++ return;
++ kref_init(&port->kref);
++ port->parent = mstb;
++ port->port_num = port_msg->port_number;
++ port->mgr = mstb->mgr;
++ port->aux.name = "DPMST";
++ port->aux.dev = dev;
++ created = true;
++ } else {
++ old_pdt = port->pdt;
++ old_ddps = port->ddps;
++ }
++
++ port->pdt = port_msg->peer_device_type;
++ port->input = port_msg->input_port;
++ port->mcs = port_msg->mcs;
++ port->ddps = port_msg->ddps;
++ port->ldps = port_msg->legacy_device_plug_status;
++ port->dpcd_rev = port_msg->dpcd_revision;
++ port->num_sdp_streams = port_msg->num_sdp_streams;
++ port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
++ memcpy(port->guid, port_msg->peer_guid, 16);
++
++ /* manage mstb port lists with mgr lock - take a reference
++ for this list */
++ if (created) {
++ mutex_lock(&mstb->mgr->lock);
++ kref_get(&port->kref);
++ list_add(&port->next, &mstb->ports);
++ mutex_unlock(&mstb->mgr->lock);
++ }
++
++ if (old_ddps != port->ddps) {
++ if (port->ddps) {
++ drm_dp_check_port_guid(mstb, port);
++ if (!port->input)
++ drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
++ } else {
++ port->guid_valid = false;
++ port->available_pbn = 0;
++ }
++ }
++
++ if (old_pdt != port->pdt && !port->input) {
++ drm_dp_port_teardown_pdt(port, old_pdt);
++
++ ret = drm_dp_port_setup_pdt(port);
++ if (ret == true) {
++ drm_dp_send_link_address(mstb->mgr, port->mstb);
++ port->mstb->link_address_sent = true;
++ }
++ }
++
++ if (created && !port->input) {
++ char proppath[255];
++ build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
++ port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
++
++ if (port->port_num >= 8) {
++ port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
++ }
++ }
++
++ /* put reference to this port */
++ drm_dp_put_port(port);
++}
++
++static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
++ struct drm_dp_connection_status_notify *conn_stat)
++{
++ struct drm_dp_mst_port *port;
++ int old_pdt;
++ int old_ddps;
++ bool dowork = false;
++ port = drm_dp_get_port(mstb, conn_stat->port_number);
++ if (!port)
++ return;
++
++ old_ddps = port->ddps;
++ old_pdt = port->pdt;
++ port->pdt = conn_stat->peer_device_type;
++ port->mcs = conn_stat->message_capability_status;
++ port->ldps = conn_stat->legacy_device_plug_status;
++ port->ddps = conn_stat->displayport_device_plug_status;
++
++ if (old_ddps != port->ddps) {
++ if (port->ddps) {
++ drm_dp_check_port_guid(mstb, port);
++ dowork = true;
++ } else {
++ port->guid_valid = false;
++ port->available_pbn = 0;
++ }
++ }
++ if (old_pdt != port->pdt && !port->input) {
++ drm_dp_port_teardown_pdt(port, old_pdt);
++
++ if (drm_dp_port_setup_pdt(port))
++ dowork = true;
++ }
++
++ drm_dp_put_port(port);
++ if (dowork)
++ queue_work(system_long_wq, &mstb->mgr->work);
++
++}
++
++static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
++ u8 lct, u8 *rad)
++{
++ struct drm_dp_mst_branch *mstb;
++ struct drm_dp_mst_port *port;
++ int i;
++ /* find the port by iterating down */
++ mstb = mgr->mst_primary;
++
++ for (i = 0; i < lct - 1; i++) {
++ int shift = (i % 2) ? 0 : 4;
++ int port_num = rad[i / 2] >> shift;
++
++ list_for_each_entry(port, &mstb->ports, next) {
++ if (port->port_num == port_num) {
++ if (!port->mstb) {
++ DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
++ return NULL;
++ }
++
++ mstb = port->mstb;
++ break;
++ }
++ }
++ }
++ kref_get(&mstb->kref);
++ return mstb;
++}
++
++static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_branch *mstb)
++{
++ struct drm_dp_mst_port *port;
++
++ if (!mstb->link_address_sent) {
++ drm_dp_send_link_address(mgr, mstb);
++ mstb->link_address_sent = true;
++ }
++ list_for_each_entry(port, &mstb->ports, next) {
++ if (port->input)
++ continue;
++
++ if (!port->ddps)
++ continue;
++
++ if (!port->available_pbn)
++ drm_dp_send_enum_path_resources(mgr, mstb, port);
++
++ if (port->mstb)
++ drm_dp_check_and_send_link_address(mgr, port->mstb);
++ }
++}
++
++static void drm_dp_mst_link_probe_work(struct work_struct *work)
++{
++ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
++
++ drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
++
++}
++
++static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
++ u8 *guid)
++{
++ static u8 zero_guid[16];
++
++ if (!memcmp(guid, zero_guid, 16)) {
++ u64 salt = get_jiffies_64();
++ memcpy(&guid[0], &salt, sizeof(u64));
++ memcpy(&guid[8], &salt, sizeof(u64));
++ return false;
++ }
++ return true;
++}
++
++#if 0
++static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
++{
++ struct drm_dp_sideband_msg_req_body req;
++
++ req.req_type = DP_REMOTE_DPCD_READ;
++ req.u.dpcd_read.port_number = port_num;
++ req.u.dpcd_read.dpcd_address = offset;
++ req.u.dpcd_read.num_bytes = num_bytes;
++ drm_dp_encode_sideband_req(&req, msg);
++
++ return 0;
++}
++#endif
++
++static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
++ bool up, u8 *msg, int len)
++{
++ int ret;
++ int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
++ int tosend, total, offset;
++ int retries = 0;
++
++retry:
++ total = len;
++ offset = 0;
++ do {
++ tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
++
++ ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
++ &msg[offset],
++ tosend);
++ if (ret != tosend) {
++ if (ret == -EIO && retries < 5) {
++ retries++;
++ goto retry;
++ }
++ DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
++ WARN(1, "fail\n");
++
++ return -EIO;
++ }
++ offset += tosend;
++ total -= tosend;
++ } while (total > 0);
++ return 0;
++}
++
++static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
++ struct drm_dp_sideband_msg_tx *txmsg)
++{
++ struct drm_dp_mst_branch *mstb = txmsg->dst;
++
++ /* both msg slots are full */
++ if (txmsg->seqno == -1) {
++ if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
++ DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
++ return -EAGAIN;
++ }
++ if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
++ txmsg->seqno = mstb->last_seqno;
++ mstb->last_seqno ^= 1;
++ } else if (mstb->tx_slots[0] == NULL)
++ txmsg->seqno = 0;
++ else
++ txmsg->seqno = 1;
++ mstb->tx_slots[txmsg->seqno] = txmsg;
++ }
++ hdr->broadcast = 0;
++ hdr->path_msg = txmsg->path_msg;
++ hdr->lct = mstb->lct;
++ hdr->lcr = mstb->lct - 1;
++ if (mstb->lct > 1)
++ memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
++ hdr->seqno = txmsg->seqno;
++ return 0;
++}
++/*
++ * process a single block of the next message in the sideband queue
++ */
++static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_sideband_msg_tx *txmsg,
++ bool up)
++{
++ u8 chunk[48];
++ struct drm_dp_sideband_msg_hdr hdr;
++ int len, space, idx, tosend;
++ int ret;
++
++ memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
++
++ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
++ txmsg->seqno = -1;
++ txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
++ }
++
++ /* make hdr from dst mst - for replies use seqno
++ otherwise assign one */
++ ret = set_hdr_from_dst_qlock(&hdr, txmsg);
++ if (ret < 0)
++ return ret;
++
++ /* amount left to send in this message */
++ len = txmsg->cur_len - txmsg->cur_offset;
++
++ /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
++ space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
++
++ tosend = min(len, space);
++ if (len == txmsg->cur_len)
++ hdr.somt = 1;
++ if (space >= len)
++ hdr.eomt = 1;
++
++
++ hdr.msg_len = tosend + 1;
++ drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
++ memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
++ /* add crc at end */
++ drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
++ idx += tosend + 1;
++
++ ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
++ if (ret) {
++ DRM_DEBUG_KMS("sideband msg failed to send\n");
++ return ret;
++ }
++
++ txmsg->cur_offset += tosend;
++ if (txmsg->cur_offset == txmsg->cur_len) {
++ txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
++ return 1;
++ }
++ return 0;
++}
++
++/* must be called holding qlock */
++static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
++{
++ struct drm_dp_sideband_msg_tx *txmsg;
++ int ret;
++
++ /* construct a chunk from the first msg in the tx_msg queue */
++ if (list_empty(&mgr->tx_msg_downq)) {
++ mgr->tx_down_in_progress = false;
++ return;
++ }
++ mgr->tx_down_in_progress = true;
++
++ txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
++ ret = process_single_tx_qlock(mgr, txmsg, false);
++ if (ret == 1) {
++ /* txmsg is sent it should be in the slots now */
++ list_del(&txmsg->next);
++ } else if (ret) {
++ DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
++ list_del(&txmsg->next);
++ if (txmsg->seqno != -1)
++ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
++ txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
++ wake_up(&mgr->tx_waitq);
++ }
++ if (list_empty(&mgr->tx_msg_downq)) {
++ mgr->tx_down_in_progress = false;
++ return;
++ }
++}
++
++/* called holding qlock */
++static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
++{
++ struct drm_dp_sideband_msg_tx *txmsg;
++ int ret;
++
++ /* construct a chunk from the first msg in the tx_msg queue */
++ if (list_empty(&mgr->tx_msg_upq)) {
++ mgr->tx_up_in_progress = false;
++ return;
++ }
++
++ txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
++ ret = process_single_tx_qlock(mgr, txmsg, true);
++ if (ret == 1) {
++ /* up txmsgs aren't put in slots - so free after we send it */
++ list_del(&txmsg->next);
++ kfree(txmsg);
++ } else if (ret)
++ DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
++ mgr->tx_up_in_progress = true;
++}
++
++static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_sideband_msg_tx *txmsg)
++{
++ mutex_lock(&mgr->qlock);
++ list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
++ if (!mgr->tx_down_in_progress)
++ process_single_down_tx_qlock(mgr);
++ mutex_unlock(&mgr->qlock);
++}
++
++static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_branch *mstb)
++{
++ int len;
++ struct drm_dp_sideband_msg_tx *txmsg;
++ int ret;
++
++ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
++ if (!txmsg)
++ return -ENOMEM;
++
++ txmsg->dst = mstb;
++ len = build_link_address(txmsg);
++
++ drm_dp_queue_down_tx(mgr, txmsg);
++
++ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
++ if (ret > 0) {
++ int i;
++
++ if (txmsg->reply.reply_type == 1)
++ DRM_DEBUG_KMS("link address nak received\n");
++ else {
++ DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
++ for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
++ DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
++ txmsg->reply.u.link_addr.ports[i].input_port,
++ txmsg->reply.u.link_addr.ports[i].peer_device_type,
++ txmsg->reply.u.link_addr.ports[i].port_number,
++ txmsg->reply.u.link_addr.ports[i].dpcd_revision,
++ txmsg->reply.u.link_addr.ports[i].mcs,
++ txmsg->reply.u.link_addr.ports[i].ddps,
++ txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
++ txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
++ txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
++ }
++ for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
++ drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
++ }
++ (*mgr->cbs->hotplug)(mgr);
++ }
++ } else
++ DRM_DEBUG_KMS("link address failed %d\n", ret);
++
++ kfree(txmsg);
++ return 0;
++}
++
++static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_branch *mstb,
++ struct drm_dp_mst_port *port)
++{
++ int len;
++ struct drm_dp_sideband_msg_tx *txmsg;
++ int ret;
++
++ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
++ if (!txmsg)
++ return -ENOMEM;
++
++ txmsg->dst = mstb;
++ len = build_enum_path_resources(txmsg, port->port_num);
++
++ drm_dp_queue_down_tx(mgr, txmsg);
++
++ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
++ if (ret > 0) {
++ if (txmsg->reply.reply_type == 1)
++ DRM_DEBUG_KMS("enum path resources nak received\n");
++ else {
++ if (port->port_num != txmsg->reply.u.path_resources.port_number)
++ DRM_ERROR("got incorrect port in response\n");
++ DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
++ txmsg->reply.u.path_resources.avail_payload_bw_number);
++ port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
++ }
++ }
++
++ kfree(txmsg);
++ return 0;
++}
++
++static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_port *port,
++ int id,
++ int pbn)
++{
++ struct drm_dp_sideband_msg_tx *txmsg;
++ struct drm_dp_mst_branch *mstb;
++ int len, ret;
++
++ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
++ if (!mstb)
++ return -EINVAL;
++
++ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
++ if (!txmsg) {
++ ret = -ENOMEM;
++ goto fail_put;
++ }
++
++ txmsg->dst = mstb;
++ len = build_allocate_payload(txmsg, port->port_num,
++ id,
++ pbn);
++
++ drm_dp_queue_down_tx(mgr, txmsg);
++
++ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
++ if (ret > 0) {
++ if (txmsg->reply.reply_type == 1) {
++ ret = -EINVAL;
++ } else
++ ret = 0;
++ }
++ kfree(txmsg);
++fail_put:
++ drm_dp_put_mst_branch_device(mstb);
++ return ret;
++}
++
++static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
++ int id,
++ struct drm_dp_payload *payload)
++{
++ int ret;
++
++ ret = drm_dp_dpcd_write_payload(mgr, id, payload);
++ if (ret < 0) {
++ payload->payload_state = 0;
++ return ret;
++ }
++ payload->payload_state = DP_PAYLOAD_LOCAL;
++ return 0;
++}
++
++static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_port *port,
++ int id,
++ struct drm_dp_payload *payload)
++{
++ int ret;
++ ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
++ if (ret < 0)
++ return ret;
++ payload->payload_state = DP_PAYLOAD_REMOTE;
++ return ret;
++}
++
++static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_port *port,
++ int id,
++ struct drm_dp_payload *payload)
++{
++ DRM_DEBUG_KMS("\n");
++ /* its okay for these to fail */
++ if (port) {
++ drm_dp_payload_send_msg(mgr, port, id, 0);
++ }
++
++ drm_dp_dpcd_write_payload(mgr, id, payload);
++ payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
++ return 0;
++}
++
++static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
++ int id,
++ struct drm_dp_payload *payload)
++{
++ payload->payload_state = 0;
++ return 0;
++}
++
++/**
++ * drm_dp_update_payload_part1() - Execute payload update part 1
++ * @mgr: manager to use.
++ *
++ * This iterates over all proposed virtual channels, and tries to
++ * allocate space in the link for them. For 0->slots transitions,
++ * this step just writes the VCPI to the MST device. For slots->0
++ * transitions, this writes the updated VCPIs and removes the
++ * remote VC payloads.
++ *
++ * after calling this the driver should generate ACT and payload
++ * packets.
++ */
++int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
++{
++ int i, j;
++ int cur_slots = 1;
++ struct drm_dp_payload req_payload;
++ struct drm_dp_mst_port *port;
++
++ mutex_lock(&mgr->payload_lock);
++ for (i = 0; i < mgr->max_payloads; i++) {
++ /* solve the current payloads - compare to the hw ones
++ - update the hw view */
++ req_payload.start_slot = cur_slots;
++ if (mgr->proposed_vcpis[i]) {
++ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
++ req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
++ } else {
++ port = NULL;
++ req_payload.num_slots = 0;
++ }
++
++ if (mgr->payloads[i].start_slot != req_payload.start_slot) {
++ mgr->payloads[i].start_slot = req_payload.start_slot;
++ }
++ /* work out what is required to happen with this payload */
++ if (mgr->payloads[i].num_slots != req_payload.num_slots) {
++
++ /* need to push an update for this payload */
++ if (req_payload.num_slots) {
++ drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
++ mgr->payloads[i].num_slots = req_payload.num_slots;
++ } else if (mgr->payloads[i].num_slots) {
++ mgr->payloads[i].num_slots = 0;
++ drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
++ req_payload.payload_state = mgr->payloads[i].payload_state;
++ mgr->payloads[i].start_slot = 0;
++ }
++ mgr->payloads[i].payload_state = req_payload.payload_state;
++ }
++ cur_slots += req_payload.num_slots;
++ }
++
++ for (i = 0; i < mgr->max_payloads; i++) {
++ if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
++ DRM_DEBUG_KMS("removing payload %d\n", i);
++ for (j = i; j < mgr->max_payloads - 1; j++) {
++ memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
++ mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
++ if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
++ set_bit(j + 1, &mgr->payload_mask);
++ } else {
++ clear_bit(j + 1, &mgr->payload_mask);
++ }
++ }
++ memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
++ mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
++ clear_bit(mgr->max_payloads, &mgr->payload_mask);
++
++ }
++ }
++ mutex_unlock(&mgr->payload_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_dp_update_payload_part1);
++
++/**
++ * drm_dp_update_payload_part2() - Execute payload update part 2
++ * @mgr: manager to use.
++ *
++ * This iterates over all proposed virtual channels, and tries to
++ * allocate space in the link for them. For 0->slots transitions,
++ * this step writes the remote VC payload commands. For slots->0
++ * this just resets some internal state.
++ */
++int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
++{
++ struct drm_dp_mst_port *port;
++ int i;
++ int ret = 0;
++ mutex_lock(&mgr->payload_lock);
++ for (i = 0; i < mgr->max_payloads; i++) {
++
++ if (!mgr->proposed_vcpis[i])
++ continue;
++
++ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
++
++ DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
++ if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
++ ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
++ } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
++ ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
++ }
++ if (ret) {
++ mutex_unlock(&mgr->payload_lock);
++ return ret;
++ }
++ }
++ mutex_unlock(&mgr->payload_lock);
++ return 0;
++}
++EXPORT_SYMBOL(drm_dp_update_payload_part2);
++
++#if 0 /* unused as of yet */
++static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_port *port,
++ int offset, int size)
++{
++ int len;
++ struct drm_dp_sideband_msg_tx *txmsg;
++
++ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
++ if (!txmsg)
++ return -ENOMEM;
++
++ len = build_dpcd_read(txmsg, port->port_num, 0, 8);
++ txmsg->dst = port->parent;
++
++ drm_dp_queue_down_tx(mgr, txmsg);
++
++ return 0;
++}
++#endif
++
++static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_port *port,
++ int offset, int size, u8 *bytes)
++{
++ int len;
++ int ret;
++ struct drm_dp_sideband_msg_tx *txmsg;
++ struct drm_dp_mst_branch *mstb;
++
++ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
++ if (!mstb)
++ return -EINVAL;
++
++ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
++ if (!txmsg) {
++ ret = -ENOMEM;
++ goto fail_put;
++ }
++
++ len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
++ txmsg->dst = mstb;
++
++ drm_dp_queue_down_tx(mgr, txmsg);
++
++ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
++ if (ret > 0) {
++ if (txmsg->reply.reply_type == 1) {
++ ret = -EINVAL;
++ } else
++ ret = 0;
++ }
++ kfree(txmsg);
++fail_put:
++ drm_dp_put_mst_branch_device(mstb);
++ return ret;
++}
++
++static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
++{
++ struct drm_dp_sideband_msg_reply_body reply;
++
++ reply.reply_type = 1;
++ reply.req_type = req_type;
++ drm_dp_encode_sideband_reply(&reply, msg);
++ return 0;
++}
++
++static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_branch *mstb,
++ int req_type, int seqno, bool broadcast)
++{
++ struct drm_dp_sideband_msg_tx *txmsg;
++
++ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
++ if (!txmsg)
++ return -ENOMEM;
++
++ txmsg->dst = mstb;
++ txmsg->seqno = seqno;
++ drm_dp_encode_up_ack_reply(txmsg, req_type);
++
++ mutex_lock(&mgr->qlock);
++ list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
++ if (!mgr->tx_up_in_progress) {
++ process_single_up_tx_qlock(mgr);
++ }
++ mutex_unlock(&mgr->qlock);
++ return 0;
++}
++
++static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
++ int dp_link_count,
++ int *out)
++{
++ switch (dp_link_bw) {
++ default:
++ DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
++ dp_link_bw, dp_link_count);
++ return false;
++
++ case DP_LINK_BW_1_62:
++ *out = 3 * dp_link_count;
++ break;
++ case DP_LINK_BW_2_7:
++ *out = 5 * dp_link_count;
++ break;
++ case DP_LINK_BW_5_4:
++ *out = 10 * dp_link_count;
++ break;
++ }
++ return true;
++}
++
++/**
++ * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
++ * @mgr: manager to set state for
++ * @mst_state: true to enable MST on this connector - false to disable.
++ *
++ * This is called by the driver when it detects an MST capable device plugged
++ * into a DP MST capable port, or when a DP MST capable device is unplugged.
++ */
++int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
++{
++ int ret = 0;
++ struct drm_dp_mst_branch *mstb = NULL;
++
++ mutex_lock(&mgr->lock);
++ if (mst_state == mgr->mst_state)
++ goto out_unlock;
++
++ mgr->mst_state = mst_state;
++ /* set the device into MST mode */
++ if (mst_state) {
++ WARN_ON(mgr->mst_primary);
++
++ /* get dpcd info */
++ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
++ if (ret != DP_RECEIVER_CAP_SIZE) {
++ DRM_DEBUG_KMS("failed to read DPCD\n");
++ goto out_unlock;
++ }
++
++ if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
++ mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
++ &mgr->pbn_div)) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
++
++ mgr->total_pbn = 2560;
++ mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
++ mgr->avail_slots = mgr->total_slots;
++
++ /* add initial branch device at LCT 1 */
++ mstb = drm_dp_add_mst_branch_device(1, NULL);
++ if (mstb == NULL) {
++ ret = -ENOMEM;
++ goto out_unlock;
++ }
++ mstb->mgr = mgr;
++
++ /* give this the main reference */
++ mgr->mst_primary = mstb;
++ kref_get(&mgr->mst_primary->kref);
++
++ {
++ struct drm_dp_payload reset_pay;
++ reset_pay.start_slot = 0;
++ reset_pay.num_slots = 0x3f;
++ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
++ }
++
++ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
++ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
++ if (ret < 0) {
++ goto out_unlock;
++ }
++
++
++ /* sort out guid */
++ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
++ if (ret != 16) {
++ DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
++ goto out_unlock;
++ }
++
++ mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
++ if (!mgr->guid_valid) {
++ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
++ mgr->guid_valid = true;
++ }
++
++ queue_work(system_long_wq, &mgr->work);
++
++ ret = 0;
++ } else {
++ /* disable MST on the device */
++ mstb = mgr->mst_primary;
++ mgr->mst_primary = NULL;
++ /* this can fail if the device is gone */
++ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
++ ret = 0;
++ memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
++ mgr->payload_mask = 0;
++ set_bit(0, &mgr->payload_mask);
++ mgr->vcpi_mask = 0;
++ }
++
++out_unlock:
++ mutex_unlock(&mgr->lock);
++ if (mstb)
++ drm_dp_put_mst_branch_device(mstb);
++ return ret;
++
++}
++EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
++
++/**
++ * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
++ * @mgr: manager to suspend
++ *
++ * This function tells the MST device that we can't handle UP messages
++ * anymore. This should stop it from sending any since we are suspended.
++ */
++void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
++{
++ mutex_lock(&mgr->lock);
++ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
++ DP_MST_EN | DP_UPSTREAM_IS_SRC);
++ mutex_unlock(&mgr->lock);
++}
++EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
++
++/**
++ * drm_dp_mst_topology_mgr_resume() - resume the MST manager
++ * @mgr: manager to resume
++ *
++ * This will fetch DPCD and see if the device is still there,
++ * if it is, it will rewrite the MSTM control bits, and return.
++ *
++ * if the device fails this returns -1, and the driver should do
++ * a full MST reprobe, in case we were undocked.
++ */
++int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
++{
++ int ret = 0;
++
++ mutex_lock(&mgr->lock);
++
++ if (mgr->mst_primary) {
++ int sret;
++ sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
++ if (sret != DP_RECEIVER_CAP_SIZE) {
++ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
++ ret = -1;
++ goto out_unlock;
++ }
++
++ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
++ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
++ if (ret < 0) {
++ DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
++ ret = -1;
++ goto out_unlock;
++ }
++ ret = 0;
++ } else
++ ret = -1;
++
++out_unlock:
++ mutex_unlock(&mgr->lock);
++ return ret;
++}
++EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
++
++static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
++{
++ int len;
++ u8 replyblock[32];
++ int replylen, origlen, curreply;
++ int ret;
++ struct drm_dp_sideband_msg_rx *msg;
++ int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
++ msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
++
++ len = min(mgr->max_dpcd_transaction_bytes, 16);
++ ret = drm_dp_dpcd_read(mgr->aux, basereg,
++ replyblock, len);
++ if (ret != len) {
++ DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
++ return;
++ }
++ ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
++ if (!ret) {
++ DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
++ return;
++ }
++ replylen = msg->curchunk_len + msg->curchunk_hdrlen;
++
++ origlen = replylen;
++ replylen -= len;
++ curreply = len;
++ while (replylen > 0) {
++ len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
++ ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
++ replyblock, len);
++ if (ret != len) {
++ DRM_DEBUG_KMS("failed to read a chunk\n");
++ }
++ ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
++ if (ret == false)
++ DRM_DEBUG_KMS("failed to build sideband msg\n");
++ curreply += len;
++ replylen -= len;
++ }
++}
++
++static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
++{
++ int ret = 0;
++
++ drm_dp_get_one_sb_msg(mgr, false);
++
++ if (mgr->down_rep_recv.have_eomt) {
++ struct drm_dp_sideband_msg_tx *txmsg;
++ struct drm_dp_mst_branch *mstb;
++ int slot = -1;
++ mstb = drm_dp_get_mst_branch_device(mgr,
++ mgr->down_rep_recv.initial_hdr.lct,
++ mgr->down_rep_recv.initial_hdr.rad);
++
++ if (!mstb) {
++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
++ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++ return 0;
++ }
++
++ /* find the message */
++ slot = mgr->down_rep_recv.initial_hdr.seqno;
++ mutex_lock(&mgr->qlock);
++ txmsg = mstb->tx_slots[slot];
++ /* remove from slots */
++ mutex_unlock(&mgr->qlock);
++
++ if (!txmsg) {
++ DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
++ mstb,
++ mgr->down_rep_recv.initial_hdr.seqno,
++ mgr->down_rep_recv.initial_hdr.lct,
++ mgr->down_rep_recv.initial_hdr.rad[0],
++ mgr->down_rep_recv.msg[0]);
++ drm_dp_put_mst_branch_device(mstb);
++ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++ return 0;
++ }
++
++ drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
++ if (txmsg->reply.reply_type == 1) {
++ DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
++ }
++
++ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++ drm_dp_put_mst_branch_device(mstb);
++
++ mutex_lock(&mgr->qlock);
++ txmsg->state = DRM_DP_SIDEBAND_TX_RX;
++ mstb->tx_slots[slot] = NULL;
++ mutex_unlock(&mgr->qlock);
++
++ wake_up(&mgr->tx_waitq);
++ }
++ return ret;
++}
++
++static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
++{
++ int ret = 0;
++ drm_dp_get_one_sb_msg(mgr, true);
++
++ if (mgr->up_req_recv.have_eomt) {
++ struct drm_dp_sideband_msg_req_body msg;
++ struct drm_dp_mst_branch *mstb;
++ bool seqno;
++ mstb = drm_dp_get_mst_branch_device(mgr,
++ mgr->up_req_recv.initial_hdr.lct,
++ mgr->up_req_recv.initial_hdr.rad);
++ if (!mstb) {
++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++ return 0;
++ }
++
++ seqno = mgr->up_req_recv.initial_hdr.seqno;
++ drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
++
++ if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
++ drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
++ drm_dp_update_port(mstb, &msg.u.conn_stat);
++ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
++ (*mgr->cbs->hotplug)(mgr);
++
++ } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
++ drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
++ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
++ }
++
++ drm_dp_put_mst_branch_device(mstb);
++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++ }
++ return ret;
++}
++
++/**
++ * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
++ * @mgr: manager to notify irq for.
++ * @esi: 4 bytes from SINK_COUNT_ESI
++ * @handled: whether the hpd interrupt was consumed or not
++ *
++ * This should be called from the driver when it detects a short IRQ,
++ * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
++ * topology manager will process the sideband messages received as a result
++ * of this.
++ */
++int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
++{
++ int ret = 0;
++ int sc;
++ *handled = false;
++ sc = esi[0] & 0x3f;
++
++ if (sc != mgr->sink_count) {
++ mgr->sink_count = sc;
++ *handled = true;
++ }
++
++ if (esi[1] & DP_DOWN_REP_MSG_RDY) {
++ ret = drm_dp_mst_handle_down_rep(mgr);
++ *handled = true;
++ }
++
++ if (esi[1] & DP_UP_REQ_MSG_RDY) {
++ ret |= drm_dp_mst_handle_up_req(mgr);
++ *handled = true;
++ }
++
++ drm_dp_mst_kick_tx(mgr);
++ return ret;
++}
++EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
++
++/**
++ * drm_dp_mst_detect_port() - get connection status for an MST port
++ * @mgr: manager for this port
++ * @port: unverified pointer to a port
++ *
++ * This returns the current connection state for a port. It validates the
++ * port pointer still exists so the caller doesn't require a reference
++ */
++enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
++ struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
++{
++ enum drm_connector_status status = connector_status_disconnected;
++
++ /* we need to search for the port in the mgr in case its gone */
++ port = drm_dp_get_validated_port_ref(mgr, port);
++ if (!port)
++ return connector_status_disconnected;
++
++ if (!port->ddps)
++ goto out;
++
++ switch (port->pdt) {
++ case DP_PEER_DEVICE_NONE:
++ case DP_PEER_DEVICE_MST_BRANCHING:
++ break;
++
++ case DP_PEER_DEVICE_SST_SINK:
++ status = connector_status_connected;
++ /* for logical ports - cache the EDID */
++ if (port->port_num >= 8 && !port->cached_edid) {
++ port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
++ }
++ break;
++ case DP_PEER_DEVICE_DP_LEGACY_CONV:
++ if (port->ldps)
++ status = connector_status_connected;
++ break;
++ }
++out:
++ drm_dp_put_port(port);
++ return status;
++}
++EXPORT_SYMBOL(drm_dp_mst_detect_port);
++
++/**
++ * drm_dp_mst_get_edid() - get EDID for an MST port
++ * @connector: toplevel connector to get EDID for
++ * @mgr: manager for this port
++ * @port: unverified pointer to a port.
++ *
++ * This returns an EDID for the port connected to a connector,
++ * It validates the pointer still exists so the caller doesn't require a
++ * reference.
++ */
++struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
++{
++ struct edid *edid = NULL;
++
++ /* we need to search for the port in the mgr in case its gone */
++ port = drm_dp_get_validated_port_ref(mgr, port);
++ if (!port)
++ return NULL;
++
++ if (port->cached_edid)
++ edid = drm_edid_duplicate(port->cached_edid);
++ else
++ edid = drm_get_edid(connector, &port->aux.ddc);
++
++ drm_mode_connector_set_tile_property(connector);
++ drm_dp_put_port(port);
++ return edid;
++}
++EXPORT_SYMBOL(drm_dp_mst_get_edid);
++
++/**
++ * drm_dp_find_vcpi_slots() - find slots for this PBN value
++ * @mgr: manager to use
++ * @pbn: payload bandwidth to convert into slots.
++ */
++int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
++ int pbn)
++{
++ int num_slots;
++
++ num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
++
++ if (num_slots > mgr->avail_slots)
++ return -ENOSPC;
++ return num_slots;
++}
++EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
++
++static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_vcpi *vcpi, int pbn)
++{
++ int num_slots;
++ int ret;
++
++ num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
++
++ if (num_slots > mgr->avail_slots)
++ return -ENOSPC;
++
++ vcpi->pbn = pbn;
++ vcpi->aligned_pbn = num_slots * mgr->pbn_div;
++ vcpi->num_slots = num_slots;
++
++ ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
++ if (ret < 0)
++ return ret;
++ return 0;
++}
++
++/**
++ * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
++ * @mgr: manager for this port
++ * @port: port to allocate a virtual channel for.
++ * @pbn: payload bandwidth number to request
++ * @slots: returned number of slots for this PBN.
++ */
++bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
++{
++ int ret;
++
++ port = drm_dp_get_validated_port_ref(mgr, port);
++ if (!port)
++ return false;
++
++ if (port->vcpi.vcpi > 0) {
++ DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
++ if (pbn == port->vcpi.pbn) {
++ *slots = port->vcpi.num_slots;
++ return true;
++ }
++ }
++
++ ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
++ if (ret) {
++ DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
++ goto out;
++ }
++ DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
++ *slots = port->vcpi.num_slots;
++
++ drm_dp_put_port(port);
++ return true;
++out:
++ return false;
++}
++EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
++
++/**
++ * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
++ * @mgr: manager for this port
++ * @port: unverified pointer to a port.
++ *
++ * This just resets the number of slots for the ports VCPI for later programming.
++ */
++void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
++{
++ port = drm_dp_get_validated_port_ref(mgr, port);
++ if (!port)
++ return;
++ port->vcpi.num_slots = 0;
++ drm_dp_put_port(port);
++}
++EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
++
++/**
++ * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
++ * @mgr: manager for this port
++ * @port: unverified port to deallocate vcpi for
++ */
++void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
++{
++ port = drm_dp_get_validated_port_ref(mgr, port);
++ if (!port)
++ return;
++
++ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
++ port->vcpi.num_slots = 0;
++ port->vcpi.pbn = 0;
++ port->vcpi.aligned_pbn = 0;
++ port->vcpi.vcpi = 0;
++ drm_dp_put_port(port);
++}
++EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
++
++static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
++ int id, struct drm_dp_payload *payload)
++{
++ u8 payload_alloc[3], status;
++ int ret;
++ int retries = 0;
++
++ drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
++ DP_PAYLOAD_TABLE_UPDATED);
++
++ payload_alloc[0] = id;
++ payload_alloc[1] = payload->start_slot;
++ payload_alloc[2] = payload->num_slots;
++
++ ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
++ if (ret != 3) {
++ DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
++ goto fail;
++ }
++
++retry:
++ ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
++ if (ret < 0) {
++ DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
++ goto fail;
++ }
++
++ if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
++ retries++;
++ if (retries < 20) {
++ usleep_range(10000, 20000);
++ goto retry;
++ }
++ DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
++ ret = -EINVAL;
++ goto fail;
++ }
++ ret = 0;
++fail:
++ return ret;
++}
++
++
++/**
++ * drm_dp_check_act_status() - Check ACT handled status.
++ * @mgr: manager to use
++ *
++ * Check the payload status bits in the DPCD for ACT handled completion.
++ */
++int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
++{
++ u8 status;
++ int ret;
++ int count = 0;
++
++ do {
++ ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
++
++ if (ret < 0) {
++ DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
++ goto fail;
++ }
++
++ if (status & DP_PAYLOAD_ACT_HANDLED)
++ break;
++ count++;
++ udelay(100);
++
++ } while (count < 30);
++
++ if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
++ DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
++ ret = -EINVAL;
++ goto fail;
++ }
++ return 0;
++fail:
++ return ret;
++}
++EXPORT_SYMBOL(drm_dp_check_act_status);
++
++/**
++ * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
++ * @clock: dot clock for the mode
++ * @bpp: bpp for the mode.
++ *
++ * This uses the formula in the spec to calculate the PBN value for a mode.
++ */
++int drm_dp_calc_pbn_mode(int clock, int bpp)
++{
++ fixed20_12 pix_bw;
++ fixed20_12 fbpp;
++ fixed20_12 result;
++ fixed20_12 margin, tmp;
++ u32 res;
++
++ pix_bw.full = dfixed_const(clock);
++ fbpp.full = dfixed_const(bpp);
++ tmp.full = dfixed_const(8);
++ fbpp.full = dfixed_div(fbpp, tmp);
++
++ result.full = dfixed_mul(pix_bw, fbpp);
++ margin.full = dfixed_const(54);
++ tmp.full = dfixed_const(64);
++ margin.full = dfixed_div(margin, tmp);
++ result.full = dfixed_div(result, margin);
++
++ margin.full = dfixed_const(1006);
++ tmp.full = dfixed_const(1000);
++ margin.full = dfixed_div(margin, tmp);
++ result.full = dfixed_mul(result, margin);
++
++ result.full = dfixed_div(result, tmp);
++ result.full = dfixed_ceil(result);
++ res = dfixed_trunc(result);
++ return res;
++}
++EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
++
++static int test_calc_pbn_mode(void)
++{
++ int ret;
++ ret = drm_dp_calc_pbn_mode(154000, 30);
++ if (ret != 689)
++ return -EINVAL;
++ ret = drm_dp_calc_pbn_mode(234000, 30);
++ if (ret != 1047)
++ return -EINVAL;
++ return 0;
++}
++
++/* we want to kick the TX after we've ack the up/down IRQs. */
++static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
++{
++ queue_work(system_long_wq, &mgr->tx_work);
++}
++
++static void drm_dp_mst_dump_mstb(struct seq_file *m,
++ struct drm_dp_mst_branch *mstb)
++{
++ struct drm_dp_mst_port *port;
++ int tabs = mstb->lct;
++ char prefix[10];
++ int i;
++
++ for (i = 0; i < tabs; i++)
++ prefix[i] = '\t';
++ prefix[i] = '\0';
++
++ seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
++ list_for_each_entry(port, &mstb->ports, next) {
++ seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
++ if (port->mstb)
++ drm_dp_mst_dump_mstb(m, port->mstb);
++ }
++}
++
++static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
++ char *buf)
++{
++ int ret;
++ int i;
++ for (i = 0; i < 4; i++) {
++ ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
++ if (ret != 16)
++ break;
++ }
++ if (i == 4)
++ return true;
++ return false;
++}
++
++/**
++ * drm_dp_mst_dump_topology(): dump topology to seq file.
++ * @m: seq_file to dump output to
++ * @mgr: manager to dump current topology for.
++ *
++ * helper to dump MST topology to a seq file for debugfs.
++ */
++void drm_dp_mst_dump_topology(struct seq_file *m,
++ struct drm_dp_mst_topology_mgr *mgr)
++{
++ int i;
++ struct drm_dp_mst_port *port;
++ mutex_lock(&mgr->lock);
++ if (mgr->mst_primary)
++ drm_dp_mst_dump_mstb(m, mgr->mst_primary);
++
++ /* dump VCPIs */
++ mutex_unlock(&mgr->lock);
++
++ mutex_lock(&mgr->payload_lock);
++ seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
++
++ for (i = 0; i < mgr->max_payloads; i++) {
++ if (mgr->proposed_vcpis[i]) {
++ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
++ seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
++ } else
++ seq_printf(m, "vcpi %d:unsed\n", i);
++ }
++ for (i = 0; i < mgr->max_payloads; i++) {
++ seq_printf(m, "payload %d: %d, %d, %d\n",
++ i,
++ mgr->payloads[i].payload_state,
++ mgr->payloads[i].start_slot,
++ mgr->payloads[i].num_slots);
++
++
++ }
++ mutex_unlock(&mgr->payload_lock);
++
++ mutex_lock(&mgr->lock);
++ if (mgr->mst_primary) {
++ u8 buf[64];
++ bool bret;
++ int ret;
++ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
++ seq_printf(m, "dpcd: ");
++ for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
++ seq_printf(m, "%02x ", buf[i]);
++ seq_printf(m, "\n");
++ ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
++ seq_printf(m, "faux/mst: ");
++ for (i = 0; i < 2; i++)
++ seq_printf(m, "%02x ", buf[i]);
++ seq_printf(m, "\n");
++ ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
++ seq_printf(m, "mst ctrl: ");
++ for (i = 0; i < 1; i++)
++ seq_printf(m, "%02x ", buf[i]);
++ seq_printf(m, "\n");
++
++ bret = dump_dp_payload_table(mgr, buf);
++ if (bret == true) {
++ seq_printf(m, "payload table: ");
++ for (i = 0; i < 63; i++)
++ seq_printf(m, "%02x ", buf[i]);
++ seq_printf(m, "\n");
++ }
++
++ }
++
++ mutex_unlock(&mgr->lock);
++
++}
++EXPORT_SYMBOL(drm_dp_mst_dump_topology);
++
++static void drm_dp_tx_work(struct work_struct *work)
++{
++ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
++
++ mutex_lock(&mgr->qlock);
++ if (mgr->tx_down_in_progress)
++ process_single_down_tx_qlock(mgr);
++ mutex_unlock(&mgr->qlock);
++}
++
++/**
++ * drm_dp_mst_topology_mgr_init - initialise a topology manager
++ * @mgr: manager struct to initialise
++ * @dev: device providing this structure - for i2c addition.
++ * @aux: DP helper aux channel to talk to this device
++ * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
++ * @max_payloads: maximum number of payloads this GPU can source
++ * @conn_base_id: the connector object ID the MST device is connected to.
++ *
++ * Return 0 for success, or negative error code on failure
++ */
++int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
++ struct device *dev, struct drm_dp_aux *aux,
++ int max_dpcd_transaction_bytes,
++ int max_payloads, int conn_base_id)
++{
++ mutex_init(&mgr->lock);
++ mutex_init(&mgr->qlock);
++ mutex_init(&mgr->payload_lock);
++ INIT_LIST_HEAD(&mgr->tx_msg_upq);
++ INIT_LIST_HEAD(&mgr->tx_msg_downq);
++ INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
++ INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
++ init_waitqueue_head(&mgr->tx_waitq);
++ mgr->dev = dev;
++ mgr->aux = aux;
++ mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
++ mgr->max_payloads = max_payloads;
++ mgr->conn_base_id = conn_base_id;
++ mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
++ if (!mgr->payloads)
++ return -ENOMEM;
++ mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
++ if (!mgr->proposed_vcpis)
++ return -ENOMEM;
++ set_bit(0, &mgr->payload_mask);
++ test_calc_pbn_mode();
++ return 0;
++}
++EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
++
++/**
++ * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
++ * @mgr: manager to destroy
++ */
++void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
++{
++ mutex_lock(&mgr->payload_lock);
++ kfree(mgr->payloads);
++ mgr->payloads = NULL;
++ kfree(mgr->proposed_vcpis);
++ mgr->proposed_vcpis = NULL;
++ mutex_unlock(&mgr->payload_lock);
++ mgr->dev = NULL;
++ mgr->aux = NULL;
++}
++EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
++
++/* I2C device */
++static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
++ int num)
++{
++ struct drm_dp_aux *aux = adapter->algo_data;
++ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
++ struct drm_dp_mst_branch *mstb;
++ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
++ unsigned int i;
++ bool reading = false;
++ struct drm_dp_sideband_msg_req_body msg;
++ struct drm_dp_sideband_msg_tx *txmsg = NULL;
++ int ret;
++
++ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
++ if (!mstb)
++ return -EREMOTEIO;
++
++ /* construct i2c msg */
++ /* see if last msg is a read */
++ if (msgs[num - 1].flags & I2C_M_RD)
++ reading = true;
++
++ if (!reading) {
++ DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
++ ret = -EIO;
++ goto out;
++ }
++
++ msg.req_type = DP_REMOTE_I2C_READ;
++ msg.u.i2c_read.num_transactions = num - 1;
++ msg.u.i2c_read.port_number = port->port_num;
++ for (i = 0; i < num - 1; i++) {
++ msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
++ msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
++ msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
++ }
++ msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
++ msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
++
++ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
++ if (!txmsg) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ txmsg->dst = mstb;
++ drm_dp_encode_sideband_req(&msg, txmsg);
++
++ drm_dp_queue_down_tx(mgr, txmsg);
++
++ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
++ if (ret > 0) {
++
++ if (txmsg->reply.reply_type == 1) { /* got a NAK back */
++ ret = -EREMOTEIO;
++ goto out;
++ }
++ if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
++ ret = -EIO;
++ goto out;
++ }
++ memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
++ ret = num;
++ }
++out:
++ kfree(txmsg);
++ drm_dp_put_mst_branch_device(mstb);
++ return ret;
++}
++
++static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
++{
++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
++ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
++ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
++ I2C_FUNC_10BIT_ADDR;
++}
++
++static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
++ .functionality = drm_dp_mst_i2c_functionality,
++ .master_xfer = drm_dp_mst_i2c_xfer,
++};
++
++/**
++ * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
++ * @aux: DisplayPort AUX channel
++ *
++ * Returns 0 on success or a negative error code on failure.
++ */
++static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
++{
++ aux->ddc.algo = &drm_dp_mst_i2c_algo;
++ aux->ddc.algo_data = aux;
++ aux->ddc.retries = 3;
++
++ aux->ddc.class = I2C_CLASS_DDC;
++ aux->ddc.owner = THIS_MODULE;
++ aux->ddc.dev.parent = aux->dev;
++ aux->ddc.dev.of_node = aux->dev->of_node;
++
++ strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
++ sizeof(aux->ddc.name));
++
++ return i2c_add_adapter(&aux->ddc);
++}
++
++/**
++ * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
++ * @aux: DisplayPort AUX channel
++ */
++static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
++{
++ i2c_del_adapter(&aux->ddc);
++}
+diff -Naur a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+--- a/drivers/gpu/drm/drm_drv.c 2015-03-26 14:43:30.486436437 +0530
++++ b/drivers/gpu/drm/drm_drv.c 2015-03-26 14:42:38.718435422 +0530
+@@ -1,31 +1,11 @@
+-/**
+- * \file drm_drv.c
+- * Generic driver template
+- *
+- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+- * \author Gareth Hughes <gareth@valinux.com>
+- *
+- * To use this template, you must at least define the following (samples
+- * given for the MGA driver):
+- *
+- * \code
+- * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
+- *
+- * #define DRIVER_NAME "mga"
+- * #define DRIVER_DESC "Matrox G200/G400"
+- * #define DRIVER_DATE "20001127"
+- *
+- * #define drm_x mga_##x
+- * \endcode
+- */
+-
+ /*
+- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
++ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+ *
+- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
++ * Author Rickard E. (Rik) Faith <faith@valinux.com>
++ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+@@ -40,138 +20,835 @@
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
+ */
+
+ #include <linux/debugfs.h>
++#include <linux/fs.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/mount.h>
+ #include <linux/slab.h>
+-#include <linux/export.h>
+ #include <drm/drmP.h>
+ #include <drm/drm_core.h>
++#include "drm_legacy.h"
++#include "drm_internal.h"
++
++unsigned int drm_debug = 0; /* 1 to enable debug output */
++EXPORT_SYMBOL(drm_debug);
++
++MODULE_AUTHOR(CORE_AUTHOR);
++MODULE_DESCRIPTION(CORE_DESC);
++MODULE_LICENSE("GPL and additional rights");
++MODULE_PARM_DESC(debug, "Enable debug output");
++MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
++MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
++MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
++
++module_param_named(debug, drm_debug, int, 0600);
++
++static DEFINE_SPINLOCK(drm_minor_lock);
++static struct idr drm_minors_idr;
++
++struct class *drm_class;
++static struct dentry *drm_debugfs_root;
++
++void drm_err(const char *format, ...)
++{
++ struct va_format vaf;
++ va_list args;
++
++ va_start(args, format);
++
++ vaf.fmt = format;
++ vaf.va = &args;
++
++ printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
++ __builtin_return_address(0), &vaf);
++
++ va_end(args);
++}
++EXPORT_SYMBOL(drm_err);
++
++void drm_ut_debug_printk(const char *function_name, const char *format, ...)
++{
++ struct va_format vaf;
++ va_list args;
++
++ va_start(args, format);
++ vaf.fmt = format;
++ vaf.va = &args;
++
++ printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
++
++ va_end(args);
++}
++EXPORT_SYMBOL(drm_ut_debug_printk);
++
++#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
++
++struct drm_master *drm_master_create(struct drm_minor *minor)
++{
++ struct drm_master *master;
++
++ master = kzalloc(sizeof(*master), GFP_KERNEL);
++ if (!master)
++ return NULL;
++
++ kref_init(&master->refcount);
++ spin_lock_init(&master->lock.spinlock);
++ init_waitqueue_head(&master->lock.lock_queue);
++ if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
++ kfree(master);
++ return NULL;
++ }
++ INIT_LIST_HEAD(&master->magicfree);
++ master->minor = minor;
++
++ return master;
++}
++
++struct drm_master *drm_master_get(struct drm_master *master)
++{
++ kref_get(&master->refcount);
++ return master;
++}
++EXPORT_SYMBOL(drm_master_get);
++
++static void drm_master_destroy(struct kref *kref)
++{
++ struct drm_master *master = container_of(kref, struct drm_master, refcount);
++ struct drm_device *dev = master->minor->dev;
++ struct drm_map_list *r_list, *list_temp;
++
++ mutex_lock(&dev->struct_mutex);
++ if (dev->driver->master_destroy)
++ dev->driver->master_destroy(dev, master);
++
++ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
++ if (r_list->master == master) {
++ drm_legacy_rmmap_locked(dev, r_list->map);
++ r_list = NULL;
++ }
++ }
++
++ if (master->unique) {
++ kfree(master->unique);
++ master->unique = NULL;
++ master->unique_len = 0;
++ }
++
++ drm_ht_remove(&master->magiclist);
++
++ mutex_unlock(&dev->struct_mutex);
++ kfree(master);
++}
++
++void drm_master_put(struct drm_master **master)
++{
++ kref_put(&(*master)->refcount, drm_master_destroy);
++ *master = NULL;
++}
++EXPORT_SYMBOL(drm_master_put);
++
++int drm_setmaster_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret = 0;
++
++ mutex_lock(&dev->master_mutex);
++ if (file_priv->is_master)
++ goto out_unlock;
++
++ if (file_priv->minor->master) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
++
++ if (!file_priv->master) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
++
++ file_priv->minor->master = drm_master_get(file_priv->master);
++ file_priv->is_master = 1;
++ if (dev->driver->master_set) {
++ ret = dev->driver->master_set(dev, file_priv, false);
++ if (unlikely(ret != 0)) {
++ file_priv->is_master = 0;
++ drm_master_put(&file_priv->minor->master);
++ }
++ }
++
++out_unlock:
++ mutex_unlock(&dev->master_mutex);
++ return ret;
++}
++
++int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret = -EINVAL;
++
++ mutex_lock(&dev->master_mutex);
++ if (!file_priv->is_master)
++ goto out_unlock;
++
++ if (!file_priv->minor->master)
++ goto out_unlock;
++
++ ret = 0;
++ if (dev->driver->master_drop)
++ dev->driver->master_drop(dev, file_priv, false);
++ drm_master_put(&file_priv->minor->master);
++ file_priv->is_master = 0;
++
++out_unlock:
++ mutex_unlock(&dev->master_mutex);
++ return ret;
++}
++
++/*
++ * DRM Minors
++ * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
++ * of them is represented by a drm_minor object. Depending on the capabilities
++ * of the device-driver, different interfaces are registered.
++ *
++ * Minors can be accessed via dev->$minor_name. This pointer is either
++ * NULL or a valid drm_minor pointer and stays valid as long as the device is
++ * valid. This means, DRM minors have the same life-time as the underlying
++ * device. However, this doesn't mean that the minor is active. Minors are
++ * registered and unregistered dynamically according to device-state.
++ */
++
++static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
++ unsigned int type)
++{
++ switch (type) {
++ case DRM_MINOR_LEGACY:
++ return &dev->primary;
++ case DRM_MINOR_RENDER:
++ return &dev->render;
++ case DRM_MINOR_CONTROL:
++ return &dev->control;
++ default:
++ return NULL;
++ }
++}
++
++static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
++{
++ struct drm_minor *minor;
++ unsigned long flags;
++ int r;
++
++ minor = kzalloc(sizeof(*minor), GFP_KERNEL);
++ if (!minor)
++ return -ENOMEM;
++
++ minor->type = type;
++ minor->dev = dev;
++
++ idr_preload(GFP_KERNEL);
++ spin_lock_irqsave(&drm_minor_lock, flags);
++ r = idr_alloc(&drm_minors_idr,
++ NULL,
++ 64 * type,
++ 64 * (type + 1),
++ GFP_NOWAIT);
++ spin_unlock_irqrestore(&drm_minor_lock, flags);
++ idr_preload_end();
++
++ if (r < 0)
++ goto err_free;
++
++ minor->index = r;
++
++ minor->kdev = drm_sysfs_minor_alloc(minor);
++ if (IS_ERR(minor->kdev)) {
++ r = PTR_ERR(minor->kdev);
++ goto err_index;
++ }
++
++ *drm_minor_get_slot(dev, type) = minor;
++ return 0;
++
++err_index:
++ spin_lock_irqsave(&drm_minor_lock, flags);
++ idr_remove(&drm_minors_idr, minor->index);
++ spin_unlock_irqrestore(&drm_minor_lock, flags);
++err_free:
++ kfree(minor);
++ return r;
++}
++
++static void drm_minor_free(struct drm_device *dev, unsigned int type)
++{
++ struct drm_minor **slot, *minor;
++ unsigned long flags;
+
++ slot = drm_minor_get_slot(dev, type);
++ minor = *slot;
++ if (!minor)
++ return;
++
++ drm_mode_group_destroy(&minor->mode_group);
++ put_device(minor->kdev);
++
++ spin_lock_irqsave(&drm_minor_lock, flags);
++ idr_remove(&drm_minors_idr, minor->index);
++ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+-static int drm_version(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
++ kfree(minor);
++ *slot = NULL;
++}
++
++static int drm_minor_register(struct drm_device *dev, unsigned int type)
++{
++ struct drm_minor *minor;
++ unsigned long flags;
++ int ret;
++
++ DRM_DEBUG("\n");
++
++ minor = *drm_minor_get_slot(dev, type);
++ if (!minor)
++ return 0;
++
++ ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
++ if (ret) {
++ DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
++ return ret;
++ }
+
+-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
++ ret = device_add(minor->kdev);
++ if (ret)
++ goto err_debugfs;
++
++ /* replace NULL with @minor so lookups will succeed from now on */
++ spin_lock_irqsave(&drm_minor_lock, flags);
++ idr_replace(&drm_minors_idr, minor, minor->index);
++ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+-/** Ioctl table */
+-static const struct drm_ioctl_desc drm_ioctls[] = {
+- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+- DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+- DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+- DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
+- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
+- DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+- DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+- DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+- DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+- DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+-
+-#if __OS_HAS_AGP
+- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+-#endif
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+- DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+-
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_DEBUG("new minor registered %d\n", minor->index);
++ return 0;
++
++err_debugfs:
++ drm_debugfs_cleanup(minor);
++ return ret;
++}
++
++static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
++{
++ struct drm_minor *minor;
++ unsigned long flags;
++
++ minor = *drm_minor_get_slot(dev, type);
++ if (!minor || !device_is_registered(minor->kdev))
++ return;
++
++ /* replace @minor with NULL so lookups will fail from now on */
++ spin_lock_irqsave(&drm_minor_lock, flags);
++ idr_replace(&drm_minors_idr, NULL, minor->index);
++ spin_unlock_irqrestore(&drm_minor_lock, flags);
++
++ device_del(minor->kdev);
++ dev_set_drvdata(minor->kdev, NULL); /* safety belt */
++ drm_debugfs_cleanup(minor);
++}
++
++/**
++ * drm_minor_acquire - Acquire a DRM minor
++ * @minor_id: Minor ID of the DRM-minor
++ *
++ * Looks up the given minor-ID and returns the respective DRM-minor object. The
++ * refence-count of the underlying device is increased so you must release this
++ * object with drm_minor_release().
++ *
++ * As long as you hold this minor, it is guaranteed that the object and the
++ * minor->dev pointer will stay valid! However, the device may get unplugged and
++ * unregistered while you hold the minor.
++ *
++ * Returns:
++ * Pointer to minor-object with increased device-refcount, or PTR_ERR on
++ * failure.
++ */
++struct drm_minor *drm_minor_acquire(unsigned int minor_id)
++{
++ struct drm_minor *minor;
++ unsigned long flags;
++
++ spin_lock_irqsave(&drm_minor_lock, flags);
++ minor = idr_find(&drm_minors_idr, minor_id);
++ if (minor)
++ drm_dev_ref(minor->dev);
++ spin_unlock_irqrestore(&drm_minor_lock, flags);
++
++ if (!minor) {
++ return ERR_PTR(-ENODEV);
++ } else if (drm_device_is_unplugged(minor->dev)) {
++ drm_dev_unref(minor->dev);
++ return ERR_PTR(-ENODEV);
++ }
++
++ return minor;
++}
++
++/**
++ * drm_minor_release - Release DRM minor
++ * @minor: Pointer to DRM minor object
++ *
++ * Release a minor that was previously acquired via drm_minor_acquire().
++ */
++void drm_minor_release(struct drm_minor *minor)
++{
++ drm_dev_unref(minor->dev);
++}
++
++/**
++ * drm_put_dev - Unregister and release a DRM device
++ * @dev: DRM device
++ *
++ * Called at module unload time or when a PCI device is unplugged.
++ *
++ * Use of this function is discouraged. It will eventually go away completely.
++ * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
++ *
++ * Cleans up all DRM device, calling drm_lastclose().
++ */
++void drm_put_dev(struct drm_device *dev)
++{
++ DRM_DEBUG("\n");
++
++ if (!dev) {
++ DRM_ERROR("cleanup called no dev\n");
++ return;
++ }
++
++ drm_dev_unregister(dev);
++ drm_dev_unref(dev);
++}
++EXPORT_SYMBOL(drm_put_dev);
++
++void drm_unplug_dev(struct drm_device *dev)
++{
++ /* for a USB device */
++ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
++ drm_minor_unregister(dev, DRM_MINOR_RENDER);
++ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
++
++ mutex_lock(&drm_global_mutex);
++
++ drm_device_set_unplugged(dev);
++
++ if (dev->open_count == 0) {
++ drm_put_dev(dev);
++ }
++ mutex_unlock(&drm_global_mutex);
++}
++EXPORT_SYMBOL(drm_unplug_dev);
++
++/*
++ * DRM internal mount
++ * We want to be able to allocate our own "struct address_space" to control
++ * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
++ * stand-alone address_space objects, so we need an underlying inode. As there
++ * is no way to allocate an independent inode easily, we need a fake internal
++ * VFS mount-point.
++ *
++ * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
++ * frees it again. You are allowed to use iget() and iput() to get references to
++ * the inode. But each drm_fs_inode_new() call must be paired with exactly one
++ * drm_fs_inode_free() call (which does not have to be the last iput()).
++ * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
++ * between multiple inode-users. You could, technically, call
++ * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
++ * iput(), but this way you'd end up with a new vfsmount for each inode.
++ */
++
++static int drm_fs_cnt;
++static struct vfsmount *drm_fs_mnt;
++
++static const struct dentry_operations drm_fs_dops = {
++ .d_dname = simple_dname,
++};
++
++static const struct super_operations drm_fs_sops = {
++ .statfs = simple_statfs,
+ };
+
+-#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
++static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
++ const char *dev_name, void *data)
++{
++ return mount_pseudo(fs_type,
++ "drm:",
++ &drm_fs_sops,
++ &drm_fs_dops,
++ 0x010203ff);
++}
++
++static struct file_system_type drm_fs_type = {
++ .name = "drm",
++ .owner = THIS_MODULE,
++ .mount = drm_fs_mount,
++ .kill_sb = kill_anon_super,
++};
++
++static struct inode *drm_fs_inode_new(void)
++{
++ struct inode *inode;
++ int r;
++
++ r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
++ if (r < 0) {
++ DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
++ return ERR_PTR(r);
++ }
++
++ inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
++ if (IS_ERR(inode))
++ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
++
++ return inode;
++}
++
++static void drm_fs_inode_free(struct inode *inode)
++{
++ if (inode) {
++ iput(inode);
++ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
++ }
++}
++
++/**
++ * drm_dev_alloc - Allocate new DRM device
++ * @driver: DRM driver to allocate device for
++ * @parent: Parent device object
++ *
++ * Allocate and initialize a new DRM device. No device registration is done.
++ * Call drm_dev_register() to advertice the device to user space and register it
++ * with other core subsystems.
++ *
++ * The initial ref-count of the object is 1. Use drm_dev_ref() and
++ * drm_dev_unref() to take and drop further ref-counts.
++ *
++ * Note that for purely virtual devices @parent can be NULL.
++ *
++ * RETURNS:
++ * Pointer to new DRM device, or NULL if out of memory.
++ */
++struct drm_device *drm_dev_alloc(struct drm_driver *driver,
++ struct device *parent)
++{
++ struct drm_device *dev;
++ int ret;
++
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ if (!dev)
++ return NULL;
++
++ kref_init(&dev->ref);
++ dev->dev = parent;
++ dev->driver = driver;
++
++ INIT_LIST_HEAD(&dev->filelist);
++ INIT_LIST_HEAD(&dev->ctxlist);
++ INIT_LIST_HEAD(&dev->vmalist);
++ INIT_LIST_HEAD(&dev->maplist);
++ INIT_LIST_HEAD(&dev->vblank_event_list);
++
++ spin_lock_init(&dev->buf_lock);
++ spin_lock_init(&dev->event_lock);
++ mutex_init(&dev->struct_mutex);
++ mutex_init(&dev->ctxlist_mutex);
++ mutex_init(&dev->master_mutex);
++
++ dev->anon_inode = drm_fs_inode_new();
++ if (IS_ERR(dev->anon_inode)) {
++ ret = PTR_ERR(dev->anon_inode);
++ DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
++ goto err_free;
++ }
++
++ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++ ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
++ if (ret)
++ goto err_minors;
++ }
++
++ if (drm_core_check_feature(dev, DRIVER_RENDER)) {
++ ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
++ if (ret)
++ goto err_minors;
++ }
++
++ ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
++ if (ret)
++ goto err_minors;
++
++ if (drm_ht_create(&dev->map_hash, 12))
++ goto err_minors;
++
++ ret = drm_legacy_ctxbitmap_init(dev);
++ if (ret) {
++ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
++ goto err_ht;
++ }
++
++ if (drm_core_check_feature(dev, DRIVER_GEM)) {
++ ret = drm_gem_init(dev);
++ if (ret) {
++ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
++ goto err_ctxbitmap;
++ }
++ }
++
++ return dev;
++
++err_ctxbitmap:
++ drm_legacy_ctxbitmap_cleanup(dev);
++err_ht:
++ drm_ht_remove(&dev->map_hash);
++err_minors:
++ drm_minor_free(dev, DRM_MINOR_LEGACY);
++ drm_minor_free(dev, DRM_MINOR_RENDER);
++ drm_minor_free(dev, DRM_MINOR_CONTROL);
++ drm_fs_inode_free(dev->anon_inode);
++err_free:
++ mutex_destroy(&dev->master_mutex);
++ kfree(dev);
++ return NULL;
++}
++EXPORT_SYMBOL(drm_dev_alloc);
++
++static void drm_dev_release(struct kref *ref)
++{
++ struct drm_device *dev = container_of(ref, struct drm_device, ref);
++
++ if (drm_core_check_feature(dev, DRIVER_GEM))
++ drm_gem_destroy(dev);
++
++ drm_legacy_ctxbitmap_cleanup(dev);
++ drm_ht_remove(&dev->map_hash);
++ drm_fs_inode_free(dev->anon_inode);
++
++ drm_minor_free(dev, DRM_MINOR_LEGACY);
++ drm_minor_free(dev, DRM_MINOR_RENDER);
++ drm_minor_free(dev, DRM_MINOR_CONTROL);
++
++ mutex_destroy(&dev->master_mutex);
++ kfree(dev->unique);
++ kfree(dev);
++}
++
++/**
++ * drm_dev_ref - Take reference of a DRM device
++ * @dev: device to take reference of or NULL
++ *
++ * This increases the ref-count of @dev by one. You *must* already own a
++ * reference when calling this. Use drm_dev_unref() to drop this reference
++ * again.
++ *
++ * This function never fails. However, this function does not provide *any*
++ * guarantee whether the device is alive or running. It only provides a
++ * reference to the object and the memory associated with it.
++ */
++void drm_dev_ref(struct drm_device *dev)
++{
++ if (dev)
++ kref_get(&dev->ref);
++}
++EXPORT_SYMBOL(drm_dev_ref);
++
++/**
++ * drm_dev_unref - Drop reference of a DRM device
++ * @dev: device to drop reference of or NULL
++ *
++ * This decreases the ref-count of @dev by one. The device is destroyed if the
++ * ref-count drops to zero.
++ */
++void drm_dev_unref(struct drm_device *dev)
++{
++ if (dev)
++ kref_put(&dev->ref, drm_dev_release);
++}
++EXPORT_SYMBOL(drm_dev_unref);
++
++/**
++ * drm_dev_register - Register DRM device
++ * @dev: Device to register
++ * @flags: Flags passed to the driver's .load() function
++ *
++ * Register the DRM device @dev with the system, advertise device to user-space
++ * and start normal device operation. @dev must be allocated via drm_dev_alloc()
++ * previously.
++ *
++ * Never call this twice on any device!
++ *
++ * RETURNS:
++ * 0 on success, negative error code on failure.
++ */
++int drm_dev_register(struct drm_device *dev, unsigned long flags)
++{
++ int ret;
++
++ mutex_lock(&drm_global_mutex);
++
++ ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
++ if (ret)
++ goto err_minors;
++
++ ret = drm_minor_register(dev, DRM_MINOR_RENDER);
++ if (ret)
++ goto err_minors;
++
++ ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
++ if (ret)
++ goto err_minors;
++
++ if (dev->driver->load) {
++ ret = dev->driver->load(dev, flags);
++ if (ret)
++ goto err_minors;
++ }
++
++ /* setup grouping for legacy outputs */
++ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++ ret = drm_mode_group_init_legacy_group(dev,
++ &dev->primary->mode_group);
++ if (ret)
++ goto err_unload;
++ }
++
++ ret = 0;
++ goto out_unlock;
++
++err_unload:
++ if (dev->driver->unload)
++ dev->driver->unload(dev);
++err_minors:
++ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
++ drm_minor_unregister(dev, DRM_MINOR_RENDER);
++ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
++out_unlock:
++ mutex_unlock(&drm_global_mutex);
++ return ret;
++}
++EXPORT_SYMBOL(drm_dev_register);
++
++/**
++ * drm_dev_unregister - Unregister DRM device
++ * @dev: Device to unregister
++ *
++ * Unregister the DRM device from the system. This does the reverse of
++ * drm_dev_register() but does not deallocate the device. The caller must call
++ * drm_dev_unref() to drop their final reference.
++ */
++void drm_dev_unregister(struct drm_device *dev)
++{
++ struct drm_map_list *r_list, *list_temp;
++
++ drm_lastclose(dev);
++
++ if (dev->driver->unload)
++ dev->driver->unload(dev);
++
++ if (dev->agp)
++ drm_pci_agp_destroy(dev);
++
++ drm_vblank_cleanup(dev);
++
++ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
++ drm_legacy_rmmap(dev, r_list->map);
++
++ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
++ drm_minor_unregister(dev, DRM_MINOR_RENDER);
++ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
++}
++EXPORT_SYMBOL(drm_dev_unregister);
++
++/**
++ * drm_dev_set_unique - Set the unique name of a DRM device
++ * @dev: device of which to set the unique name
++ * @fmt: format string for unique name
++ *
++ * Sets the unique name of a DRM device using the specified format string and
++ * a variable list of arguments. Drivers can use this at driver probe time if
++ * the unique name of the devices they drive is static.
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
++{
++ va_list ap;
++
++ kfree(dev->unique);
++
++ va_start(ap, fmt);
++ dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
++ va_end(ap);
++
++ return dev->unique ? 0 : -ENOMEM;
++}
++EXPORT_SYMBOL(drm_dev_set_unique);
++
++/*
++ * DRM Core
++ * The DRM core module initializes all global DRM objects and makes them
++ * available to drivers. Once setup, drivers can probe their respective
++ * devices.
++ * Currently, core management includes:
++ * - The "DRM-Global" key/value database
++ * - Global ID management for connectors
++ * - DRM major number allocation
++ * - DRM minor management
++ * - DRM sysfs class
++ * - DRM debugfs root
++ *
++ * Furthermore, the DRM core provides dynamic char-dev lookups. For each
++ * interface registered on a DRM device, you can request minor numbers from DRM
++ * core. DRM core takes care of major-number management and char-dev
++ * registration. A stub ->open() callback forwards any open() requests to the
++ * registered minor.
++ */
++
++static int drm_stub_open(struct inode *inode, struct file *filp)
++{
++ const struct file_operations *new_fops;
++ struct drm_minor *minor;
++ int err;
++
++ DRM_DEBUG("\n");
++
++ mutex_lock(&drm_global_mutex);
++ minor = drm_minor_acquire(iminor(inode));
++ if (IS_ERR(minor)) {
++ err = PTR_ERR(minor);
++ goto out_unlock;
++ }
++
++ new_fops = fops_get(minor->dev->driver->fops);
++ if (!new_fops) {
++ err = -ENODEV;
++ goto out_release;
++ }
++
++ replace_fops(filp, new_fops);
++ if (filp->f_op->open)
++ err = filp->f_op->open(inode, filp);
++ else
++ err = 0;
++
++out_release:
++ drm_minor_release(minor);
++out_unlock:
++ mutex_unlock(&drm_global_mutex);
++ return err;
++}
+
+-/** File operations structure */
+ static const struct file_operations drm_stub_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_stub_open,
+@@ -229,186 +906,3 @@
+
+ module_init(drm_core_init);
+ module_exit(drm_core_exit);
+-
+-/**
+- * Copy and IOCTL return string to user space
+- */
+-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+-{
+- int len;
+-
+- /* don't overflow userbuf */
+- len = strlen(value);
+- if (len > *buf_len)
+- len = *buf_len;
+-
+- /* let userspace know exact length of driver value (which could be
+- * larger than the userspace-supplied buffer) */
+- *buf_len = strlen(value);
+-
+- /* finally, try filling in the userbuf */
+- if (len && buf)
+- if (copy_to_user(buf, value, len))
+- return -EFAULT;
+- return 0;
+-}
+-
+-/**
+- * Get version information
+- *
+- * \param inode device inode.
+- * \param filp file pointer.
+- * \param cmd command.
+- * \param arg user argument, pointing to a drm_version structure.
+- * \return zero on success or negative number on failure.
+- *
+- * Fills in the version information in \p arg.
+- */
+-static int drm_version(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
+-{
+- struct drm_version *version = data;
+- int err;
+-
+- version->version_major = dev->driver->major;
+- version->version_minor = dev->driver->minor;
+- version->version_patchlevel = dev->driver->patchlevel;
+- err = drm_copy_field(version->name, &version->name_len,
+- dev->driver->name);
+- if (!err)
+- err = drm_copy_field(version->date, &version->date_len,
+- dev->driver->date);
+- if (!err)
+- err = drm_copy_field(version->desc, &version->desc_len,
+- dev->driver->desc);
+-
+- return err;
+-}
+-
+-/**
+- * Called whenever a process performs an ioctl on /dev/drm.
+- *
+- * \param inode device inode.
+- * \param file_priv DRM file private.
+- * \param cmd command.
+- * \param arg user argument.
+- * \return zero on success or negative number on failure.
+- *
+- * Looks up the ioctl function in the ::ioctls table, checking for root
+- * previleges if so required, and dispatches to the respective function.
+- */
+-long drm_ioctl(struct file *filp,
+- unsigned int cmd, unsigned long arg)
+-{
+- struct drm_file *file_priv = filp->private_data;
+- struct drm_device *dev;
+- const struct drm_ioctl_desc *ioctl = NULL;
+- drm_ioctl_t *func;
+- unsigned int nr = DRM_IOCTL_NR(cmd);
+- int retcode = -EINVAL;
+- char stack_kdata[128];
+- char *kdata = NULL;
+- unsigned int usize, asize;
+-
+- dev = file_priv->minor->dev;
+-
+- if (drm_device_is_unplugged(dev))
+- return -ENODEV;
+-
+- if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+- ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+- goto err_i1;
+- if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+- u32 drv_size;
+- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+- drv_size = _IOC_SIZE(ioctl->cmd_drv);
+- usize = asize = _IOC_SIZE(cmd);
+- if (drv_size > asize)
+- asize = drv_size;
+- cmd = ioctl->cmd_drv;
+- }
+- else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+- u32 drv_size;
+-
+- ioctl = &drm_ioctls[nr];
+-
+- drv_size = _IOC_SIZE(ioctl->cmd);
+- usize = asize = _IOC_SIZE(cmd);
+- if (drv_size > asize)
+- asize = drv_size;
+-
+- cmd = ioctl->cmd;
+- } else
+- goto err_i1;
+-
+- DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+- task_pid_nr(current),
+- (long)old_encode_dev(file_priv->minor->device),
+- file_priv->authenticated, ioctl->name);
+-
+- /* Do not trust userspace, use our own definition */
+- func = ioctl->func;
+-
+- if (!func) {
+- DRM_DEBUG("no function\n");
+- retcode = -EINVAL;
+- } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
+- ((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) ||
+- ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
+- (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ||
+- (!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) {
+- retcode = -EACCES;
+- } else {
+- if (cmd & (IOC_IN | IOC_OUT)) {
+- if (asize <= sizeof(stack_kdata)) {
+- kdata = stack_kdata;
+- } else {
+- kdata = kmalloc(asize, GFP_KERNEL);
+- if (!kdata) {
+- retcode = -ENOMEM;
+- goto err_i1;
+- }
+- }
+- if (asize > usize)
+- memset(kdata + usize, 0, asize - usize);
+- }
+-
+- if (cmd & IOC_IN) {
+- if (copy_from_user(kdata, (void __user *)arg,
+- usize) != 0) {
+- retcode = -EFAULT;
+- goto err_i1;
+- }
+- } else
+- memset(kdata, 0, usize);
+-
+- if (ioctl->flags & DRM_UNLOCKED)
+- retcode = func(dev, kdata, file_priv);
+- else {
+- mutex_lock(&drm_global_mutex);
+- retcode = func(dev, kdata, file_priv);
+- mutex_unlock(&drm_global_mutex);
+- }
+-
+- if (cmd & IOC_OUT) {
+- if (copy_to_user((void __user *)arg, kdata,
+- usize) != 0)
+- retcode = -EFAULT;
+- }
+- }
+-
+- err_i1:
+- if (!ioctl)
+- DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+- task_pid_nr(current),
+- (long)old_encode_dev(file_priv->minor->device),
+- file_priv->authenticated, cmd, nr);
+-
+- if (kdata != stack_kdata)
+- kfree(kdata);
+- if (retcode)
+- DRM_DEBUG("ret = %d\n", retcode);
+- return retcode;
+-}
+-EXPORT_SYMBOL(drm_ioctl);
+diff -Naur a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+--- a/drivers/gpu/drm/drm_edid.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/drm_edid.c 2015-03-26 14:42:38.722435422 +0530
+@@ -34,6 +34,7 @@
+ #include <linux/module.h>
+ #include <drm/drmP.h>
+ #include <drm/drm_edid.h>
++#include <drm/drm_displayid.h>
+
+ #define version_greater(edid, maj, min) \
+ (((edid)->version > (maj)) || \
+@@ -70,6 +71,8 @@
+ #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
+ /* Force 8bpc */
+ #define EDID_QUIRK_FORCE_8BPC (1 << 8)
++/* Force 12bpc */
++#define EDID_QUIRK_FORCE_12BPC (1 << 9)
+
+ struct detailed_mode_closure {
+ struct drm_connector *connector;
+@@ -125,6 +128,9 @@
+ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+ { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
++ /* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
++ { "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC },
++
+ /* ViewSonic VA2026w */
+ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
+@@ -627,27 +633,27 @@
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+- /* 6 - 1440x480i@60Hz */
+- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+- 1602, 1716, 0, 480, 488, 494, 525, 0,
++ /* 6 - 720(1440)x480i@60Hz */
++ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
++ 801, 858, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+- /* 7 - 1440x480i@60Hz */
+- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+- 1602, 1716, 0, 480, 488, 494, 525, 0,
++ /* 7 - 720(1440)x480i@60Hz */
++ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
++ 801, 858, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+- /* 8 - 1440x240@60Hz */
+- { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+- 1602, 1716, 0, 240, 244, 247, 262, 0,
++ /* 8 - 720(1440)x240@60Hz */
++ { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
++ 801, 858, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+- /* 9 - 1440x240@60Hz */
+- { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+- 1602, 1716, 0, 240, 244, 247, 262, 0,
++ /* 9 - 720(1440)x240@60Hz */
++ { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
++ 801, 858, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+@@ -709,27 +715,27 @@
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+- /* 21 - 1440x576i@50Hz */
+- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+- 1590, 1728, 0, 576, 580, 586, 625, 0,
++ /* 21 - 720(1440)x576i@50Hz */
++ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
++ 795, 864, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+- /* 22 - 1440x576i@50Hz */
+- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+- 1590, 1728, 0, 576, 580, 586, 625, 0,
++ /* 22 - 720(1440)x576i@50Hz */
++ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
++ 795, 864, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+- /* 23 - 1440x288@50Hz */
+- { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+- 1590, 1728, 0, 288, 290, 293, 312, 0,
++ /* 23 - 720(1440)x288@50Hz */
++ { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
++ 795, 864, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+- /* 24 - 1440x288@50Hz */
+- { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+- 1590, 1728, 0, 288, 290, 293, 312, 0,
++ /* 24 - 720(1440)x288@50Hz */
++ { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
++ 795, 864, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+@@ -832,17 +838,17 @@
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+- /* 44 - 1440x576i@100Hz */
+- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+- 1590, 1728, 0, 576, 580, 586, 625, 0,
++ /* 44 - 720(1440)x576i@100Hz */
++ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
++ 795, 864, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+- DRM_MODE_FLAG_DBLCLK),
++ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+- /* 45 - 1440x576i@100Hz */
+- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+- 1590, 1728, 0, 576, 580, 586, 625, 0,
++ /* 45 - 720(1440)x576i@100Hz */
++ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
++ 795, 864, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+- DRM_MODE_FLAG_DBLCLK),
++ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 46 - 1920x1080i@120Hz */
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+@@ -865,15 +871,15 @@
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+- /* 50 - 1440x480i@120Hz */
+- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+- 1602, 1716, 0, 480, 488, 494, 525, 0,
++ /* 50 - 720(1440)x480i@120Hz */
++ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
++ 801, 858, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+- /* 51 - 1440x480i@120Hz */
+- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+- 1602, 1716, 0, 480, 488, 494, 525, 0,
++ /* 51 - 720(1440)x480i@120Hz */
++ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
++ 801, 858, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+@@ -887,15 +893,15 @@
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+- /* 54 - 1440x576i@200Hz */
+- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+- 1590, 1728, 0, 576, 580, 586, 625, 0,
++ /* 54 - 720(1440)x576i@200Hz */
++ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
++ 795, 864, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+- /* 55 - 1440x576i@200Hz */
+- { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+- 1590, 1728, 0, 576, 580, 586, 625, 0,
++ /* 55 - 720(1440)x576i@200Hz */
++ { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
++ 795, 864, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+@@ -909,15 +915,15 @@
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+- /* 58 - 1440x480i@240 */
+- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+- 1602, 1716, 0, 480, 488, 494, 525, 0,
++ /* 58 - 720(1440)x480i@240 */
++ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
++ 801, 858, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+- /* 59 - 1440x480i@240 */
+- { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+- 1602, 1716, 0, 480, 488, 494, 525, 0,
++ /* 59 - 720(1440)x480i@240 */
++ { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
++ 801, 858, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+@@ -984,9 +990,13 @@
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+ };
+
+- /*
+- * Sanity check the header of the base EDID block. Return 8 if the header
+- * is perfect, down to 0 if it's totally wrong.
++/**
++ * drm_edid_header_is_valid - sanity check the header of the base EDID block
++ * @raw_edid: pointer to raw base EDID block
++ *
++ * Sanity check the header of the base EDID block.
++ *
++ * Return: 8 if the header is perfect, down to 0 if it's totally wrong.
+ */
+ int drm_edid_header_is_valid(const u8 *raw_edid)
+ {
+@@ -1005,14 +1015,41 @@
+ MODULE_PARM_DESC(edid_fixup,
+ "Minimum number of valid EDID header bytes (0-8, default 6)");
+
+-/*
+- * Sanity check the EDID block (base or extension). Return 0 if the block
+- * doesn't check out, or 1 if it's valid.
+- */
+-bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
++static void drm_get_displayid(struct drm_connector *connector,
++ struct edid *edid);
++
++static int drm_edid_block_checksum(const u8 *raw_edid)
+ {
+ int i;
+ u8 csum = 0;
++ for (i = 0; i < EDID_LENGTH; i++)
++ csum += raw_edid[i];
++
++ return csum;
++}
++
++static bool drm_edid_is_zero(const u8 *in_edid, int length)
++{
++ if (memchr_inv(in_edid, 0, length))
++ return false;
++
++ return true;
++}
++
++/**
++ * drm_edid_block_valid - Sanity check the EDID block (base or extension)
++ * @raw_edid: pointer to raw EDID block
++ * @block: type of block to validate (0 for base, extension otherwise)
++ * @print_bad_edid: if true, dump bad EDID blocks to the console
++ *
++ * Validate a base or extension EDID block and optionally dump bad blocks to
++ * the console.
++ *
++ * Return: True if the block is valid, false otherwise.
++ */
++bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
++{
++ u8 csum;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (WARN_ON(!raw_edid))
+@@ -1032,8 +1069,7 @@
+ }
+ }
+
+- for (i = 0; i < EDID_LENGTH; i++)
+- csum += raw_edid[i];
++ csum = drm_edid_block_checksum(raw_edid);
+ if (csum) {
+ if (print_bad_edid) {
+ DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+@@ -1064,9 +1100,13 @@
+
+ bad:
+ if (print_bad_edid) {
+- printk(KERN_ERR "Raw EDID:\n");
+- print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
++ if (drm_edid_is_zero(raw_edid, EDID_LENGTH)) {
++ printk(KERN_ERR "EDID block is all zeroes\n");
++ } else {
++ printk(KERN_ERR "Raw EDID:\n");
++ print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
+ raw_edid, EDID_LENGTH, false);
++ }
+ }
+ return false;
+ }
+@@ -1077,6 +1117,8 @@
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
++ *
++ * Return: True if the EDID data is valid, false otherwise.
+ */
+ bool drm_edid_is_valid(struct edid *edid)
+ {
+@@ -1096,25 +1138,27 @@
+
+ #define DDC_SEGMENT_ADDR 0x30
+ /**
+- * Get EDID information via I2C.
++ * drm_do_probe_ddc_edid() - get EDID information via I2C
++ * @data: I2C device adapter
++ * @buf: EDID data buffer to be filled
++ * @block: 128 byte EDID block to start fetching from
++ * @len: EDID data buffer length to fetch
+ *
+- * \param adapter : i2c device adaptor
+- * \param buf : EDID data buffer to be filled
+- * \param len : EDID data buffer length
+- * \return 0 on success or -1 on failure.
++ * Try to fetch EDID information by calling I2C driver functions.
+ *
+- * Try to fetch EDID information by calling i2c driver function.
++ * Return: 0 on success or -1 on failure.
+ */
+ static int
+-drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+- int block, int len)
++drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
+ {
++ struct i2c_adapter *adapter = data;
+ unsigned char start = block * EDID_LENGTH;
+ unsigned char segment = block >> 1;
+ unsigned char xfers = segment ? 3 : 2;
+ int ret, retries = 5;
+
+- /* The core i2c driver will automatically retry the transfer if the
++ /*
++ * The core I2C driver will automatically retry the transfer if the
+ * adapter reports EAGAIN. However, we find that bit-banging transfers
+ * are susceptible to errors under a heavily loaded machine and
+ * generate spurious NAKs and timeouts. Retrying the transfer
+@@ -1140,10 +1184,10 @@
+ }
+ };
+
+- /*
+- * Avoid sending the segment addr to not upset non-compliant ddc
+- * monitors.
+- */
++ /*
++ * Avoid sending the segment addr to not upset non-compliant
++ * DDC monitors.
++ */
+ ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
+
+ if (ret == -ENXIO) {
+@@ -1156,16 +1200,26 @@
+ return ret == xfers ? 0 : -1;
+ }
+
+-static bool drm_edid_is_zero(u8 *in_edid, int length)
+-{
+- if (memchr_inv(in_edid, 0, length))
+- return false;
+-
+- return true;
+-}
+-
+-static u8 *
+-drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
++/**
++ * drm_do_get_edid - get EDID data using a custom EDID block read function
++ * @connector: connector we're probing
++ * @get_edid_block: EDID block read function
++ * @data: private data passed to the block read function
++ *
++ * When the I2C adapter connected to the DDC bus is hidden behind a device that
++ * exposes a different interface to read EDID blocks this function can be used
++ * to get EDID data using a custom block read function.
++ *
++ * As in the general case the DDC bus is accessible by the kernel at the I2C
++ * level, drivers must make all reasonable efforts to expose it as an I2C
++ * adapter and use drm_get_edid() instead of abusing this function.
++ *
++ * Return: Pointer to valid EDID or NULL if we couldn't find any.
++ */
++struct edid *drm_do_get_edid(struct drm_connector *connector,
++ int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
++ size_t len),
++ void *data)
+ {
+ int i, j = 0, valid_extensions = 0;
+ u8 *block, *new;
+@@ -1176,7 +1230,7 @@
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+- if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
++ if (get_edid_block(data, block, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block, 0, print_bad_edid))
+ break;
+@@ -1190,7 +1244,7 @@
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+- return block;
++ return (struct edid *)block;
+
+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+@@ -1199,7 +1253,7 @@
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ for (i = 0; i < 4; i++) {
+- if (drm_do_probe_ddc_edid(adapter,
++ if (get_edid_block(data,
+ block + (valid_extensions + 1) * EDID_LENGTH,
+ j, EDID_LENGTH))
+ goto out;
+@@ -1212,7 +1266,7 @@
+ if (i == 4 && print_bad_edid) {
+ dev_warn(connector->dev->dev,
+ "%s: Ignoring invalid EDID block %d.\n",
+- drm_get_connector_name(connector), j);
++ connector->name, j);
+
+ connector->bad_edid_counter++;
+ }
+@@ -1227,12 +1281,12 @@
+ block = new;
+ }
+
+- return block;
++ return (struct edid *)block;
+
+ carp:
+ if (print_bad_edid) {
+ dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
+- drm_get_connector_name(connector), j);
++ connector->name, j);
+ }
+ connector->bad_edid_counter++;
+
+@@ -1240,12 +1294,13 @@
+ kfree(block);
+ return NULL;
+ }
++EXPORT_SYMBOL_GPL(drm_do_get_edid);
+
+ /**
+- * Probe DDC presence.
++ * drm_probe_ddc() - probe DDC presence
++ * @adapter: I2C adapter to probe
+ *
+- * \param adapter : i2c device adaptor
+- * \return 1 on success
++ * Return: True on success, false on failure.
+ */
+ bool
+ drm_probe_ddc(struct i2c_adapter *adapter)
+@@ -1259,21 +1314,24 @@
+ /**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+- * @adapter: i2c adapter to use for DDC
++ * @adapter: I2C adapter to use for DDC
+ *
+- * Poke the given i2c channel to grab EDID data if possible. If found,
++ * Poke the given I2C channel to grab EDID data if possible. If found,
+ * attach it to the connector.
+ *
+- * Return edid data or NULL if we couldn't find any.
++ * Return: Pointer to valid EDID or NULL if we couldn't find any.
+ */
+ struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+ {
+- struct edid *edid = NULL;
++ struct edid *edid;
+
+- if (drm_probe_ddc(adapter))
+- edid = (struct edid *)drm_do_get_edid(connector, adapter);
++ if (!drm_probe_ddc(adapter))
++ return NULL;
+
++ edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
++ if (edid)
++ drm_get_displayid(connector, edid);
+ return edid;
+ }
+ EXPORT_SYMBOL(drm_get_edid);
+@@ -1282,7 +1340,7 @@
+ * drm_edid_duplicate - duplicate an EDID and the extensions
+ * @edid: EDID to duplicate
+ *
+- * Return duplicate edid or NULL on allocation failure.
++ * Return: Pointer to duplicated EDID or NULL on allocation failure.
+ */
+ struct edid *drm_edid_duplicate(const struct edid *edid)
+ {
+@@ -1405,7 +1463,8 @@
+ * @rb: Mode reduced-blanking-ness
+ *
+ * Walk the DMT mode list looking for a match for the given parameters.
+- * Return a newly allocated copy of the mode, or NULL if not found.
++ *
++ * Return: A newly allocated copy of the mode, or NULL if not found.
+ */
+ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+@@ -1586,15 +1645,16 @@
+
+ /**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
++ * @connector: connector of for the EDID block
++ * @edid: EDID block to scan
+ * @t: standard timing params
+- * @timing_level: standard timing level
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT/GTF/DMT.
+ */
+ static struct drm_display_mode *
+ drm_mode_std(struct drm_connector *connector, struct edid *edid,
+- struct std_timing *t, int revision)
++ struct std_timing *t)
+ {
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *m, *mode = NULL;
+@@ -1615,7 +1675,7 @@
+ vrefresh_rate = vfreq + 60;
+ /* the vdisplay is calculated based on the aspect ratio */
+ if (aspect_ratio == 0) {
+- if (revision < 3)
++ if (edid->revision < 3)
+ vsize = hsize;
+ else
+ vsize = (hsize * 10) / 16;
+@@ -2081,7 +2141,8 @@
+ add_inferred_modes(struct drm_connector *connector, struct edid *edid)
+ {
+ struct detailed_mode_closure closure = {
+- connector, edid, 0, 0, 0
++ .connector = connector,
++ .edid = edid,
+ };
+
+ if (version_greater(edid, 1, 0))
+@@ -2132,6 +2193,7 @@
+
+ /**
+ * add_established_modes - get est. modes from EDID and add them
++ * @connector: connector to add mode(s) to
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+@@ -2146,7 +2208,8 @@
+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+- connector, edid, 0, 0, 0
++ .connector = connector,
++ .edid = edid,
+ };
+
+ for (i = 0; i <= EDID_EST_TIMINGS; i++) {
+@@ -2182,8 +2245,7 @@
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[i];
+- newmode = drm_mode_std(connector, edid, std,
+- edid->revision);
++ newmode = drm_mode_std(connector, edid, std);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ closure->modes++;
+@@ -2194,6 +2256,7 @@
+
+ /**
+ * add_standard_modes - get std. modes from EDID and add them
++ * @connector: connector to add mode(s) to
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the appropriate standard (DMT,
+@@ -2204,15 +2267,15 @@
+ {
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+- connector, edid, 0, 0, 0
++ .connector = connector,
++ .edid = edid,
+ };
+
+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid,
+- &edid->standard_timings[i],
+- edid->revision);
++ &edid->standard_timings[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+@@ -2291,7 +2354,8 @@
+ add_cvt_modes(struct drm_connector *connector, struct edid *edid)
+ {
+ struct detailed_mode_closure closure = {
+- connector, edid, 0, 0, 0
++ .connector = connector,
++ .edid = edid,
+ };
+
+ if (version_greater(edid, 1, 2))
+@@ -2335,11 +2399,10 @@
+ u32 quirks)
+ {
+ struct detailed_mode_closure closure = {
+- connector,
+- edid,
+- 1,
+- quirks,
+- 0
++ .connector = connector,
++ .edid = edid,
++ .preferred = 1,
++ .quirks = quirks,
+ };
+
+ if (closure.preferred && !version_greater(edid, 1, 3))
+@@ -2364,7 +2427,7 @@
+ /*
+ * Search EDID for CEA extension block.
+ */
+-static u8 *drm_find_cea_extension(struct edid *edid)
++static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
+ {
+ u8 *edid_ext = NULL;
+ int i;
+@@ -2376,7 +2439,7 @@
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+- if (edid_ext[0] == CEA_EXT)
++ if (edid_ext[0] == ext_id)
+ break;
+ }
+
+@@ -2386,6 +2449,16 @@
+ return edid_ext;
+ }
+
++static u8 *drm_find_cea_extension(struct edid *edid)
++{
++ return drm_find_edid_extension(edid, CEA_EXT);
++}
++
++static u8 *drm_find_displayid_extension(struct edid *edid)
++{
++ return drm_find_edid_extension(edid, DISPLAYID_EXT);
++}
++
+ /*
+ * Calculate the alternate clock for the CEA mode
+ * (60Hz vs. 59.94Hz etc.)
+@@ -2415,7 +2488,7 @@
+ * drm_match_cea_mode - look for a CEA mode matching given mode
+ * @to_match: display mode
+ *
+- * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
++ * Return: The CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
+ * mode.
+ */
+ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
+@@ -2442,6 +2515,22 @@
+ }
+ EXPORT_SYMBOL(drm_match_cea_mode);
+
++/**
++ * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
++ * the input VIC from the CEA mode list
++ * @video_code: ID given to each of the CEA modes
++ *
++ * Returns picture aspect ratio
++ */
++enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
++{
++ /* return picture aspect ratio for video_code - 1 to access the
++ * right array element
++ */
++ return edid_cea_modes[video_code-1].picture_aspect_ratio;
++}
++EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
++
+ /*
+ * Calculate the alternate clock for HDMI modes (those from the HDMI vendor
+ * specific block).
+@@ -2580,6 +2669,9 @@
+ return NULL;
+
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
++ if (!newmode)
++ return NULL;
++
+ newmode->vrefresh = 0;
+
+ return newmode;
+@@ -3010,11 +3102,9 @@
+ * @connector: connector corresponding to the HDMI/DP sink
+ * @edid: EDID to parse
+ *
+- * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
+- * Some ELD fields are left to the graphics driver caller:
+- * - Conn_Type
+- * - HDCP
+- * - Port_ID
++ * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
++ * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to
++ * fill in.
+ */
+ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+ {
+@@ -3086,9 +3176,12 @@
+ }
+ }
+ eld[5] |= sad_count << 4;
+- eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
+
+- DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
++ eld[DRM_ELD_BASELINE_ELD_LEN] =
++ DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
++
++ DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
++ drm_eld_size(eld), sad_count);
+ }
+ EXPORT_SYMBOL(drm_edid_to_eld);
+
+@@ -3098,9 +3191,10 @@
+ * @sads: pointer that will be set to the extracted SADs
+ *
+ * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
+- * Note: returned pointer needs to be kfreed
+ *
+- * Return number of found SADs or negative number on error.
++ * Note: The returned pointer needs to be freed using kfree().
++ *
++ * Return: The number of found SADs or negative number on error.
+ */
+ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
+ {
+@@ -3157,9 +3251,11 @@
+ * @sadb: pointer to the speaker block
+ *
+ * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
+- * Note: returned pointer needs to be kfreed
+ *
+- * Return number of found Speaker Allocation Blocks or negative number on error.
++ * Note: The returned pointer needs to be freed using kfree().
++ *
++ * Return: The number of found Speaker Allocation Blocks or negative number on
++ * error.
+ */
+ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
+ {
+@@ -3191,10 +3287,9 @@
+
+ /* Speaker Allocation Data Block */
+ if (dbl == 3) {
+- *sadb = kmalloc(dbl, GFP_KERNEL);
++ *sadb = kmemdup(&db[1], dbl, GFP_KERNEL);
+ if (!*sadb)
+ return -ENOMEM;
+- memcpy(*sadb, &db[1], dbl);
+ count = dbl;
+ break;
+ }
+@@ -3206,9 +3301,12 @@
+ EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
+
+ /**
+- * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
++ * drm_av_sync_delay - compute the HDMI/DP sink audio-video sync delay
+ * @connector: connector associated with the HDMI/DP sink
+ * @mode: the display mode
++ *
++ * Return: The HDMI/DP sink's audio-video sync delay in milliseconds or 0 if
++ * the sink doesn't support audio or video.
+ */
+ int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+@@ -3250,6 +3348,9 @@
+ *
+ * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
+ * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
++ *
++ * Return: The connector associated with the first HDMI/DP sink that has ELD
++ * attached to it.
+ */
+ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+@@ -3257,6 +3358,9 @@
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
++ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
++ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
++
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder && connector->eld[0])
+ return connector;
+@@ -3266,11 +3370,12 @@
+ EXPORT_SYMBOL(drm_select_eld);
+
+ /**
+- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
++ * drm_detect_hdmi_monitor - detect whether monitor is HDMI
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+- * Return true if HDMI, false if not or unknown.
++ *
++ * Return: True if the monitor is HDMI, false if not or unknown.
+ */
+ bool drm_detect_hdmi_monitor(struct edid *edid)
+ {
+@@ -3300,6 +3405,7 @@
+
+ /**
+ * drm_detect_monitor_audio - check monitor audio capability
++ * @edid: EDID block to scan
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+@@ -3307,6 +3413,7 @@
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
++ * Return: True if the monitor supports audio, false otherwise.
+ */
+ bool drm_detect_monitor_audio(struct edid *edid)
+ {
+@@ -3345,10 +3452,13 @@
+
+ /**
+ * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
++ * @edid: EDID block to scan
+ *
+ * Check whether the monitor reports the RGB quantization range selection
+ * as supported. The AVI infoframe can then be used to inform the monitor
+ * which quantization range (full or limited) is used.
++ *
++ * Return: True if the RGB quantization range is selectable, false otherwise.
+ */
+ bool drm_rgb_quant_range_selectable(struct edid *edid)
+ {
+@@ -3375,16 +3485,119 @@
+ EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
+
+ /**
++ * drm_assign_hdmi_deep_color_info - detect whether monitor supports
++ * hdmi deep color modes and update drm_display_info if so.
++ * @edid: monitor EDID information
++ * @info: Updated with maximum supported deep color bpc and color format
++ * if deep color supported.
++ * @connector: DRM connector, used only for debug output
++ *
++ * Parse the CEA extension according to CEA-861-B.
++ * Return true if HDMI deep color supported, false if not or unknown.
++ */
++static bool drm_assign_hdmi_deep_color_info(struct edid *edid,
++ struct drm_display_info *info,
++ struct drm_connector *connector)
++{
++ u8 *edid_ext, *hdmi;
++ int i;
++ int start_offset, end_offset;
++ unsigned int dc_bpc = 0;
++
++ edid_ext = drm_find_cea_extension(edid);
++ if (!edid_ext)
++ return false;
++
++ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
++ return false;
++
++ /*
++ * Because HDMI identifier is in Vendor Specific Block,
++ * search it from all data blocks of CEA extension.
++ */
++ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
++ if (cea_db_is_hdmi_vsdb(&edid_ext[i])) {
++ /* HDMI supports at least 8 bpc */
++ info->bpc = 8;
++
++ hdmi = &edid_ext[i];
++ if (cea_db_payload_len(hdmi) < 6)
++ return false;
++
++ if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
++ dc_bpc = 10;
++ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
++ DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
++ connector->name);
++ }
++
++ if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
++ dc_bpc = 12;
++ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
++ DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
++ connector->name);
++ }
++
++ if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
++ dc_bpc = 16;
++ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
++ DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
++ connector->name);
++ }
++
++ if (dc_bpc > 0) {
++ DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
++ connector->name, dc_bpc);
++ info->bpc = dc_bpc;
++
++ /*
++ * Deep color support mandates RGB444 support for all video
++ * modes and forbids YCRCB422 support for all video modes per
++ * HDMI 1.3 spec.
++ */
++ info->color_formats = DRM_COLOR_FORMAT_RGB444;
++
++ /* YCRCB444 is optional according to spec. */
++ if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
++ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
++ DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
++ connector->name);
++ }
++
++ /*
++ * Spec says that if any deep color mode is supported at all,
++ * then deep color 36 bit must be supported.
++ */
++ if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
++ DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
++ connector->name);
++ }
++
++ return true;
++ }
++ else {
++ DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
++ connector->name);
++ }
++ }
++ }
++
++ return false;
++}
++
++/**
+ * drm_add_display_info - pull display info out if present
+ * @edid: EDID data
+ * @info: display info (attached to connector)
++ * @connector: connector whose edid is used to build display info
+ *
+ * Grab any available display info and stuff it into the drm_display_info
+ * structure that's part of the connector. Useful for tracking bpp and
+ * color spaces.
+ */
+ static void drm_add_display_info(struct edid *edid,
+- struct drm_display_info *info)
++ struct drm_display_info *info,
++ struct drm_connector *connector)
+ {
+ u8 *edid_ext;
+
+@@ -3414,6 +3627,9 @@
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ }
+
++ /* HDMI deep color modes supported? Assign to info, if so */
++ drm_assign_hdmi_deep_color_info(edid, info, connector);
++
+ /* Only defined for 1.4 with digital displays */
+ if (edid->revision < 4)
+ return;
+@@ -3443,6 +3659,9 @@
+ break;
+ }
+
++ DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
++ connector->name, info->bpc);
++
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+@@ -3453,11 +3672,11 @@
+ /**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+- * @edid: edid data
++ * @edid: EDID data
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+- * Return number of modes added or 0 if we couldn't find any.
++ * Return: The number of modes added or 0 if we couldn't find any.
+ */
+ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+ {
+@@ -3469,7 +3688,7 @@
+ }
+ if (!drm_edid_is_valid(edid)) {
+ dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
+- drm_get_connector_name(connector));
++ connector->name);
+ return 0;
+ }
+
+@@ -3501,11 +3720,14 @@
+ if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ edid_fixup_preferred(connector, quirks);
+
+- drm_add_display_info(edid, &connector->display_info);
++ drm_add_display_info(edid, &connector->display_info, connector);
+
+ if (quirks & EDID_QUIRK_FORCE_8BPC)
+ connector->display_info.bpc = 8;
+
++ if (quirks & EDID_QUIRK_FORCE_12BPC)
++ connector->display_info.bpc = 12;
++
+ return num_modes;
+ }
+ EXPORT_SYMBOL(drm_add_edid_modes);
+@@ -3519,7 +3741,7 @@
+ * Add the specified modes to the connector's mode list. Only when the
+ * hdisplay/vdisplay is not beyond the given limit, it will be added.
+ *
+- * Return number of modes added or 0 if we couldn't find any.
++ * Return: The number of modes added or 0 if we couldn't find any.
+ */
+ int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay)
+@@ -3558,14 +3780,23 @@
+ }
+ EXPORT_SYMBOL(drm_add_modes_noedid);
+
++/**
++ * drm_set_preferred_mode - Sets the preferred mode of a connector
++ * @connector: connector whose mode list should be processed
++ * @hpref: horizontal resolution of preferred mode
++ * @vpref: vertical resolution of preferred mode
++ *
++ * Marks a mode as preferred if it matches the resolution specified by @hpref
++ * and @vpref.
++ */
+ void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref)
+ {
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+- if (drm_mode_width(mode) == hpref &&
+- drm_mode_height(mode) == vpref)
++ if (mode->hdisplay == hpref &&
++ mode->vdisplay == vpref)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+ }
+@@ -3577,7 +3808,7 @@
+ * @frame: HDMI AVI infoframe
+ * @mode: DRM display mode
+ *
+- * Returns 0 on success or a negative error code on failure.
++ * Return: 0 on success or a negative error code on failure.
+ */
+ int
+ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
+@@ -3598,7 +3829,20 @@
+ frame->video_code = drm_match_cea_mode(mode);
+
+ frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
++
++ /*
++ * Populate picture aspect ratio from either
++ * user input (if specified) or from the CEA mode list.
++ */
++ if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 ||
++ mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9)
++ frame->picture_aspect = mode->picture_aspect_ratio;
++ else if (frame->video_code > 0)
++ frame->picture_aspect = drm_get_cea_aspect_ratio(
++ frame->video_code);
++
+ frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
++ frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+
+ return 0;
+ }
+@@ -3641,7 +3885,7 @@
+ * 4k or stereoscopic 3D mode. So when giving any other mode as input this
+ * function will return -EINVAL, error that can be safely ignored.
+ *
+- * Returns 0 on success or a negative error code on failure.
++ * Return: 0 on success or a negative error code on failure.
+ */
+ int
+ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
+@@ -3675,3 +3919,123 @@
+ return 0;
+ }
+ EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
++
++static int drm_parse_display_id(struct drm_connector *connector,
++ u8 *displayid, int length,
++ bool is_edid_extension)
++{
++ /* if this is an EDID extension the first byte will be 0x70 */
++ int idx = 0;
++ struct displayid_hdr *base;
++ struct displayid_block *block;
++ u8 csum = 0;
++ int i;
++
++ if (is_edid_extension)
++ idx = 1;
++
++ base = (struct displayid_hdr *)&displayid[idx];
++
++ DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
++ base->rev, base->bytes, base->prod_id, base->ext_count);
++
++ if (base->bytes + 5 > length - idx)
++ return -EINVAL;
++
++ for (i = idx; i <= base->bytes + 5; i++) {
++ csum += displayid[i];
++ }
++ if (csum) {
++ DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
++ return -EINVAL;
++ }
++
++ block = (struct displayid_block *)&displayid[idx + 4];
++ DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
++ block->tag, block->rev, block->num_bytes);
++
++ switch (block->tag) {
++ case DATA_BLOCK_TILED_DISPLAY: {
++ struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
++
++ u16 w, h;
++ u8 tile_v_loc, tile_h_loc;
++ u8 num_v_tile, num_h_tile;
++ struct drm_tile_group *tg;
++
++ w = tile->tile_size[0] | tile->tile_size[1] << 8;
++ h = tile->tile_size[2] | tile->tile_size[3] << 8;
++
++ num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
++ num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
++ tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
++ tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
++
++ connector->has_tile = true;
++ if (tile->tile_cap & 0x80)
++ connector->tile_is_single_monitor = true;
++
++ connector->num_h_tile = num_h_tile + 1;
++ connector->num_v_tile = num_v_tile + 1;
++ connector->tile_h_loc = tile_h_loc;
++ connector->tile_v_loc = tile_v_loc;
++ connector->tile_h_size = w + 1;
++ connector->tile_v_size = h + 1;
++
++ DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
++ DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
++ DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
++ num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
++ DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
++
++ tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
++ if (!tg) {
++ tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
++ }
++ if (!tg)
++ return -ENOMEM;
++
++ if (connector->tile_group != tg) {
++ /* if we haven't got a pointer,
++ take the reference, drop ref to old tile group */
++ if (connector->tile_group) {
++ drm_mode_put_tile_group(connector->dev, connector->tile_group);
++ }
++ connector->tile_group = tg;
++ } else
++ /* if same tile group, then release the ref we just took. */
++ drm_mode_put_tile_group(connector->dev, tg);
++ }
++ break;
++ default:
++ printk("unknown displayid tag %d\n", block->tag);
++ break;
++ }
++ return 0;
++}
++
++static void drm_get_displayid(struct drm_connector *connector,
++ struct edid *edid)
++{
++ void *displayid = NULL;
++ int ret;
++ connector->has_tile = false;
++ displayid = drm_find_displayid_extension(edid);
++ if (!displayid) {
++ /* drop reference to any tile group we had */
++ goto out_drop_ref;
++ }
++
++ ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
++ if (ret < 0)
++ goto out_drop_ref;
++ if (!connector->has_tile)
++ goto out_drop_ref;
++ return;
++out_drop_ref:
++ if (connector->tile_group) {
++ drm_mode_put_tile_group(connector->dev, connector->tile_group);
++ connector->tile_group = NULL;
++ }
++ return;
++}
+diff -Naur a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
+--- a/drivers/gpu/drm/drm_edid_load.c 2015-03-26 14:43:30.398436435 +0530
++++ b/drivers/gpu/drm/drm_edid_load.c 2015-03-26 14:42:38.722435422 +0530
+@@ -31,8 +31,9 @@
+ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
+ "from built-in data or /lib/firmware instead. ");
+
+-#define GENERIC_EDIDS 5
++#define GENERIC_EDIDS 6
+ static const char *generic_edid_name[GENERIC_EDIDS] = {
++ "edid/800x600.bin",
+ "edid/1024x768.bin",
+ "edid/1280x1024.bin",
+ "edid/1600x1200.bin",
+@@ -44,6 +45,24 @@
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x1b, 0x14, 0x78,
++ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
++ 0x20, 0x50, 0x54, 0x01, 0x00, 0x00, 0x45, 0x40,
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xa0, 0x0f,
++ 0x20, 0x00, 0x31, 0x58, 0x1c, 0x20, 0x28, 0x80,
++ 0x14, 0x00, 0x15, 0xd0, 0x10, 0x00, 0x00, 0x1e,
++ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
++ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
++ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
++ 0x3d, 0x24, 0x26, 0x05, 0x00, 0x0a, 0x20, 0x20,
++ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
++ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
++ 0x56, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xc2,
++ },
++ {
++ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
++ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40,
+@@ -235,14 +254,13 @@
+ name, connector_name);
+
+ out:
+- if (fw)
+- release_firmware(fw);
++ release_firmware(fw);
+ return edid;
+ }
+
+ int drm_load_edid_firmware(struct drm_connector *connector)
+ {
+- const char *connector_name = drm_get_connector_name(connector);
++ const char *connector_name = connector->name;
+ char *edidname = edid_firmware, *last, *colon;
+ int ret;
+ struct edid *edid;
+diff -Naur a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
+--- a/drivers/gpu/drm/drm_fb_cma_helper.c 2015-03-26 14:43:30.398436435 +0530
++++ b/drivers/gpu/drm/drm_fb_cma_helper.c 2015-03-26 14:42:38.722435422 +0530
+@@ -327,7 +327,7 @@
+ return ret;
+ }
+
+-static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
++static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+ .fb_probe = drm_fbdev_cma_create,
+ };
+
+@@ -354,9 +354,10 @@
+ return ERR_PTR(-ENOMEM);
+ }
+
+- fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
+ helper = &fbdev_cma->fb_helper;
+
++ drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
++
+ ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+@@ -429,13 +430,8 @@
+ */
+ void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
+ {
+- if (fbdev_cma) {
+- struct drm_device *dev = fbdev_cma->fb_helper.dev;
+-
+- drm_modeset_lock_all(dev);
+- drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
+- drm_modeset_unlock_all(dev);
+- }
++ if (fbdev_cma)
++ drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
+ }
+ EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
+
+diff -Naur a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+--- a/drivers/gpu/drm/drm_fb_helper.c 2015-03-26 14:43:30.398436435 +0530
++++ b/drivers/gpu/drm/drm_fb_helper.c 2015-03-26 14:42:38.722435422 +0530
+@@ -45,24 +45,38 @@
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+- * mode setting driver. They can be used mostly independantely from the crtc
++ * mode setting driver. They can be used mostly independently from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ *
+- * Initialization is done as a three-step process with drm_fb_helper_init(),
+- * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
+- * Drivers with fancier requirements than the default beheviour can override the
+- * second step with their own code. Teardown is done with drm_fb_helper_fini().
++ * Initialization is done as a four-step process with drm_fb_helper_prepare(),
++ * drm_fb_helper_init(), drm_fb_helper_single_add_all_connectors() and
++ * drm_fb_helper_initial_config(). Drivers with fancier requirements than the
++ * default behaviour can override the third step with their own code.
++ * Teardown is done with drm_fb_helper_fini().
+ *
+ * At runtime drivers should restore the fbdev console by calling
+ * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
+ * should also notify the fb helper code from updates to the output
+ * configuration by calling drm_fb_helper_hotplug_event(). For easier
+ * integration with the output polling code in drm_crtc_helper.c the modeset
+- * code proves a ->output_poll_changed callback.
++ * code provides a ->output_poll_changed callback.
+ *
+ * All other functions exported by the fb helper library can be used to
+ * implement the fbdev driver interface by the driver.
++ *
++ * It is possible, though perhaps somewhat tricky, to implement race-free
++ * hotplug detection using the fbdev helpers. The drm_fb_helper_prepare()
++ * helper must be called first to initialize the minimum required to make
++ * hotplug detection work. Drivers also need to make sure to properly set up
++ * the dev->mode_config.funcs member. After calling drm_kms_helper_poll_init()
++ * it is safe to enable interrupts and start processing hotplug events. At the
++ * same time, drivers should initialize all modeset objects such as CRTCs,
++ * encoders and connectors. To finish up the fbdev helper initialization, the
++ * drm_fb_helper_init() function is called. To probe for all attached displays
++ * and set up an initial configuration using the detected hardware, drivers
++ * should call drm_fb_helper_single_add_all_connectors() followed by
++ * drm_fb_helper_initial_config().
+ */
+
+ /**
+@@ -105,59 +119,57 @@
+ }
+ EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
+
+-static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
++int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector)
+ {
+- struct drm_fb_helper_connector *fb_helper_conn;
+- int i;
++ struct drm_fb_helper_connector **temp;
++ struct drm_fb_helper_connector *fb_helper_connector;
+
+- for (i = 0; i < fb_helper->connector_count; i++) {
+- struct drm_cmdline_mode *mode;
+- struct drm_connector *connector;
+- char *option = NULL;
++ WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
++ if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
++ temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL);
++ if (!temp)
++ return -ENOMEM;
+
+- fb_helper_conn = fb_helper->connector_info[i];
+- connector = fb_helper_conn->connector;
+- mode = &fb_helper_conn->cmdline_mode;
++ fb_helper->connector_info_alloc_count = fb_helper->connector_count + 1;
++ fb_helper->connector_info = temp;
++ }
+
+- /* do something on return - turn off connector maybe */
+- if (fb_get_options(drm_get_connector_name(connector), &option))
+- continue;
+
+- if (drm_mode_parse_command_line_for_connector(option,
+- connector,
+- mode)) {
+- if (mode->force) {
+- const char *s;
+- switch (mode->force) {
+- case DRM_FORCE_OFF:
+- s = "OFF";
+- break;
+- case DRM_FORCE_ON_DIGITAL:
+- s = "ON - dig";
+- break;
+- default:
+- case DRM_FORCE_ON:
+- s = "ON";
+- break;
+- }
+-
+- DRM_INFO("forcing %s connector %s\n",
+- drm_get_connector_name(connector), s);
+- connector->force = mode->force;
+- }
++ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
++ if (!fb_helper_connector)
++ return -ENOMEM;
+
+- DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+- drm_get_connector_name(connector),
+- mode->xres, mode->yres,
+- mode->refresh_specified ? mode->refresh : 60,
+- mode->rb ? " reduced blanking" : "",
+- mode->margins ? " with margins" : "",
+- mode->interlace ? " interlaced" : "");
+- }
++ fb_helper_connector->connector = connector;
++ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
++ return 0;
++}
++EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
++
++int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
++ struct drm_connector *connector)
++{
++ struct drm_fb_helper_connector *fb_helper_connector;
++ int i, j;
++
++ WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+
++ for (i = 0; i < fb_helper->connector_count; i++) {
++ if (fb_helper->connector_info[i]->connector == connector)
++ break;
+ }
++
++ if (i == fb_helper->connector_count)
++ return -EINVAL;
++ fb_helper_connector = fb_helper->connector_info[i];
++
++ for (j = i + 1; j < fb_helper->connector_count; j++) {
++ fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
++ }
++ fb_helper->connector_count--;
++ kfree(fb_helper_connector);
+ return 0;
+ }
++EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
+
+ static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+ {
+@@ -199,9 +211,6 @@
+ struct drm_crtc_helper_funcs *funcs;
+ int i;
+
+- if (list_empty(&kernel_fb_helper_list))
+- return false;
+-
+ list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set =
+@@ -232,7 +241,7 @@
+
+ list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ if (crtc->base.id == c->base.id)
+- return c->fb;
++ return c->primary->fb;
+ }
+
+ return NULL;
+@@ -273,15 +282,7 @@
+ }
+ EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+
+-/**
+- * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
+- * @fb_helper: fbcon to restore
+- *
+- * This should be called from driver's drm ->lastclose callback
+- * when implementing an fbcon on top of kms using this helper. This ensures that
+- * the user isn't greeted with a black screen when e.g. X dies.
+- */
+-bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
++static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+ {
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_plane *plane;
+@@ -290,8 +291,16 @@
+
+ drm_warn_on_modeset_not_all_locked(dev);
+
+- list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+- drm_plane_force_disable(plane);
++ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
++ if (plane->type != DRM_PLANE_TYPE_PRIMARY)
++ drm_plane_force_disable(plane);
++
++ if (dev->mode_config.rotation_property) {
++ drm_mode_plane_set_obj_prop(plane,
++ dev->mode_config.rotation_property,
++ BIT(DRM_ROTATE_0));
++ }
++ }
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+@@ -310,7 +319,49 @@
+ }
+ return error;
+ }
+-EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
++/**
++ * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
++ * @fb_helper: fbcon to restore
++ *
++ * This should be called from driver's drm ->lastclose callback
++ * when implementing an fbcon on top of kms using this helper. This ensures that
++ * the user isn't greeted with a black screen when e.g. X dies.
++ *
++ * Use this variant if you need to bypass locking (panic), or already
++ * hold all modeset locks. Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked()
++ */
++static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
++{
++ return restore_fbdev_mode(fb_helper);
++}
++
++/**
++ * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
++ * @fb_helper: fbcon to restore
++ *
++ * This should be called from driver's drm ->lastclose callback
++ * when implementing an fbcon on top of kms using this helper. This ensures that
++ * the user isn't greeted with a black screen when e.g. X dies.
++ */
++bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
++{
++ struct drm_device *dev = fb_helper->dev;
++ bool ret;
++ bool do_delayed = false;
++
++ drm_modeset_lock_all(dev);
++ ret = restore_fbdev_mode(fb_helper);
++
++ do_delayed = fb_helper->delayed_hotplug;
++ if (do_delayed)
++ fb_helper->delayed_hotplug = false;
++ drm_modeset_unlock_all(dev);
++
++ if (do_delayed)
++ drm_fb_helper_hotplug_event(fb_helper);
++ return ret;
++}
++EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
+
+ /*
+ * restore fbcon display for all kms driver's using this helper, used for sysrq
+@@ -325,12 +376,25 @@
+ return false;
+
+ list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+- if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF)
++ struct drm_device *dev = helper->dev;
++
++ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ continue;
+
++ /*
++ * NOTE: Use trylock mode to avoid deadlocks and sleeping in
++ * panic context.
++ */
++ if (__drm_modeset_lock_all(dev, true) != 0) {
++ error = true;
++ continue;
++ }
++
+ ret = drm_fb_helper_restore_fbdev_mode(helper);
+ if (ret)
+ error = true;
++
++ drm_modeset_unlock_all(dev);
+ }
+ return error;
+ }
+@@ -365,9 +429,9 @@
+ return false;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+- if (crtc->fb)
++ if (crtc->primary->fb)
+ crtcs_bound++;
+- if (crtc->fb == fb_helper->fb)
++ if (crtc->primary->fb == fb_helper->fb)
+ bound++;
+ }
+
+@@ -492,6 +556,24 @@
+ }
+
+ /**
++ * drm_fb_helper_prepare - setup a drm_fb_helper structure
++ * @dev: DRM device
++ * @helper: driver-allocated fbdev helper structure to set up
++ * @funcs: pointer to structure of functions associate with this helper
++ *
++ * Sets up the bare minimum to make the framebuffer helper usable. This is
++ * useful to implement race-free initialization of the polling helpers.
++ */
++void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
++ const struct drm_fb_helper_funcs *funcs)
++{
++ INIT_LIST_HEAD(&helper->kernel_fb_list);
++ helper->funcs = funcs;
++ helper->dev = dev;
++}
++EXPORT_SYMBOL(drm_fb_helper_prepare);
++
++/**
+ * drm_fb_helper_init - initialize a drm_fb_helper structure
+ * @dev: drm device
+ * @fb_helper: driver-allocated fbdev helper structure to initialize
+@@ -503,8 +585,7 @@
+ * nor register the fbdev. This is only done in drm_fb_helper_initial_config()
+ * to allow driver writes more control over the exact init sequence.
+ *
+- * Drivers must set fb_helper->funcs before calling
+- * drm_fb_helper_initial_config().
++ * Drivers must call drm_fb_helper_prepare() before calling this function.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+@@ -516,9 +597,8 @@
+ struct drm_crtc *crtc;
+ int i;
+
+- fb_helper->dev = dev;
+-
+- INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
++ if (!max_conn_count)
++ return -EINVAL;
+
+ fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+ if (!fb_helper->crtc_info)
+@@ -530,6 +610,7 @@
+ kfree(fb_helper->crtc_info);
+ return -ENOMEM;
+ }
++ fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
+ fb_helper->connector_count = 0;
+
+ for (i = 0; i < crtc_count; i++) {
+@@ -807,30 +888,15 @@
+ int drm_fb_helper_set_par(struct fb_info *info)
+ {
+ struct drm_fb_helper *fb_helper = info->par;
+- struct drm_device *dev = fb_helper->dev;
+ struct fb_var_screeninfo *var = &info->var;
+- int ret;
+- int i;
+
+ if (var->pixclock != 0) {
+ DRM_ERROR("PIXEL CLOCK SET\n");
+ return -EINVAL;
+ }
+
+- drm_modeset_lock_all(dev);
+- for (i = 0; i < fb_helper->crtc_count; i++) {
+- ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
+- if (ret) {
+- drm_modeset_unlock_all(dev);
+- return ret;
+- }
+- }
+- drm_modeset_unlock_all(dev);
++ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+
+- if (fb_helper->delayed_hotplug) {
+- fb_helper->delayed_hotplug = false;
+- drm_fb_helper_hotplug_event(fb_helper);
+- }
+ return 0;
+ }
+ EXPORT_SYMBOL(drm_fb_helper_set_par);
+@@ -905,7 +971,7 @@
+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
+ struct drm_cmdline_mode *cmdline_mode;
+
+- cmdline_mode = &fb_helper_conn->cmdline_mode;
++ cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
+
+ if (cmdline_mode->bpp_specified) {
+ switch (cmdline_mode->bpp) {
+@@ -934,19 +1000,21 @@
+ crtc_count = 0;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_display_mode *desired_mode;
++ int x, y;
+ desired_mode = fb_helper->crtc_info[i].desired_mode;
+-
++ x = fb_helper->crtc_info[i].x;
++ y = fb_helper->crtc_info[i].y;
+ if (desired_mode) {
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+- if (desired_mode->hdisplay < sizes.fb_width)
+- sizes.fb_width = desired_mode->hdisplay;
+- if (desired_mode->vdisplay < sizes.fb_height)
+- sizes.fb_height = desired_mode->vdisplay;
+- if (desired_mode->hdisplay > sizes.surface_width)
+- sizes.surface_width = desired_mode->hdisplay;
+- if (desired_mode->vdisplay > sizes.surface_height)
+- sizes.surface_height = desired_mode->vdisplay;
++ if (desired_mode->hdisplay + x < sizes.fb_width)
++ sizes.fb_width = desired_mode->hdisplay + x;
++ if (desired_mode->vdisplay + y < sizes.fb_height)
++ sizes.fb_height = desired_mode->vdisplay + y;
++ if (desired_mode->hdisplay + x > sizes.surface_width)
++ sizes.surface_width = desired_mode->hdisplay + x;
++ if (desired_mode->vdisplay + y > sizes.surface_height)
++ sizes.surface_height = desired_mode->vdisplay + y;
+ crtc_count++;
+ }
+ }
+@@ -1025,7 +1093,6 @@
+ info->fix.ypanstep = 1; /* doing it in hw */
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+- info->fix.type_aux = 0;
+
+ info->fix.line_length = pitch;
+ return;
+@@ -1136,34 +1203,34 @@
+ return count;
+ }
+
+-static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
++struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+ {
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+- if (drm_mode_width(mode) > width ||
+- drm_mode_height(mode) > height)
++ if (mode->hdisplay > width ||
++ mode->vdisplay > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+ }
++EXPORT_SYMBOL(drm_has_preferred_mode);
+
+ static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+ {
+- struct drm_cmdline_mode *cmdline_mode;
+- cmdline_mode = &fb_connector->cmdline_mode;
+- return cmdline_mode->specified;
++ return fb_connector->connector->cmdline_mode.specified;
+ }
+
+-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
++struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height)
+ {
+ struct drm_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
++ bool prefer_non_interlace;
+
+- cmdline_mode = &fb_helper_conn->cmdline_mode;
++ cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
+ if (cmdline_mode->specified == false)
+ return mode;
+
+@@ -1173,6 +1240,8 @@
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
++ prefer_non_interlace = !cmdline_mode->interlace;
++ again:
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+@@ -1187,16 +1256,25 @@
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
++ } else if (prefer_non_interlace) {
++ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++ continue;
+ }
+ return mode;
+ }
+
++ if (prefer_non_interlace) {
++ prefer_non_interlace = false;
++ goto again;
++ }
++
+ create_mode:
+ mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
+ cmdline_mode);
+ list_add(&mode->head, &fb_helper_conn->connector->modes);
+ return mode;
+ }
++EXPORT_SYMBOL(drm_pick_cmdline_mode);
+
+ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+ {
+@@ -1236,6 +1314,7 @@
+
+ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
++ struct drm_fb_offset *offsets,
+ bool *enabled, int width, int height)
+ {
+ int count, i, j;
+@@ -1307,27 +1386,88 @@
+ return false;
+ }
+
++static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper,
++ struct drm_display_mode **modes,
++ struct drm_fb_offset *offsets,
++ int idx,
++ int h_idx, int v_idx)
++{
++ struct drm_fb_helper_connector *fb_helper_conn;
++ int i;
++ int hoffset = 0, voffset = 0;
++
++ for (i = 0; i < fb_helper->connector_count; i++) {
++ fb_helper_conn = fb_helper->connector_info[i];
++ if (!fb_helper_conn->connector->has_tile)
++ continue;
++
++ if (!modes[i] && (h_idx || v_idx)) {
++ DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
++ fb_helper_conn->connector->base.id);
++ continue;
++ }
++ if (fb_helper_conn->connector->tile_h_loc < h_idx)
++ hoffset += modes[i]->hdisplay;
++
++ if (fb_helper_conn->connector->tile_v_loc < v_idx)
++ voffset += modes[i]->vdisplay;
++ }
++ offsets[idx].x = hoffset;
++ offsets[idx].y = voffset;
++ DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
++ return 0;
++}
++
+ static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
++ struct drm_fb_offset *offsets,
+ bool *enabled, int width, int height)
+ {
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+-
++ uint64_t conn_configured = 0, mask;
++ int tile_pass = 0;
++ mask = (1 << fb_helper->connector_count) - 1;
++retry:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+
+- if (enabled[i] == false)
++ if (conn_configured & (1 << i))
+ continue;
+
++ if (enabled[i] == false) {
++ conn_configured |= (1 << i);
++ continue;
++ }
++
++ /* first pass over all the untiled connectors */
++ if (tile_pass == 0 && fb_helper_conn->connector->has_tile)
++ continue;
++
++ if (tile_pass == 1) {
++ if (fb_helper_conn->connector->tile_h_loc != 0 ||
++ fb_helper_conn->connector->tile_v_loc != 0)
++ continue;
++
++ } else {
++ if (fb_helper_conn->connector->tile_h_loc != tile_pass -1 &&
++ fb_helper_conn->connector->tile_v_loc != tile_pass - 1)
++ /* if this tile_pass doesn't cover any of the tiles - keep going */
++ continue;
++
++ /* find the tile offsets for this pass - need
++ to find all tiles left and above */
++ drm_get_tile_offsets(fb_helper, modes, offsets,
++ i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc);
++ }
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+- fb_helper_conn->connector->base.id);
++ DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
++ fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0);
+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+@@ -1337,6 +1477,12 @@
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
++ conn_configured |= (1 << i);
++ }
++
++ if ((conn_configured & mask) != mask) {
++ tile_pass++;
++ goto retry;
+ }
+ return true;
+ }
+@@ -1426,6 +1572,7 @@
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
++ struct drm_fb_offset *offsets;
+ struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+@@ -1440,9 +1587,11 @@
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ modes = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_display_mode *), GFP_KERNEL);
++ offsets = kcalloc(dev->mode_config.num_connector,
++ sizeof(struct drm_fb_offset), GFP_KERNEL);
+ enabled = kcalloc(dev->mode_config.num_connector,
+ sizeof(bool), GFP_KERNEL);
+- if (!crtcs || !modes || !enabled) {
++ if (!crtcs || !modes || !enabled || !offsets) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto out;
+ }
+@@ -1452,14 +1601,16 @@
+
+ if (!(fb_helper->funcs->initial_config &&
+ fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
++ offsets,
+ enabled, width, height))) {
+ memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
+ memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
++ memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
+
+- if (!drm_target_cloned(fb_helper,
+- modes, enabled, width, height) &&
+- !drm_target_preferred(fb_helper,
+- modes, enabled, width, height))
++ if (!drm_target_cloned(fb_helper, modes, offsets,
++ enabled, width, height) &&
++ !drm_target_preferred(fb_helper, modes, offsets,
++ enabled, width, height))
+ DRM_ERROR("Unable to find initial modes\n");
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
+@@ -1479,18 +1630,23 @@
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
++ struct drm_fb_offset *offset = &offsets[i];
+ modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
+- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+- mode->name, fb_crtc->mode_set.crtc->base.id);
++ DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
++ mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
+ fb_crtc->desired_mode = mode;
++ fb_crtc->x = offset->x;
++ fb_crtc->y = offset->y;
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ modeset->fb = fb_helper->fb;
++ modeset->x = offset->x;
++ modeset->y = offset->y;
+ }
+ }
+
+@@ -1499,7 +1655,6 @@
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ if (modeset->num_connectors == 0) {
+ BUG_ON(modeset->fb);
+- BUG_ON(modeset->num_connectors);
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = NULL;
+@@ -1508,6 +1663,7 @@
+ out:
+ kfree(crtcs);
+ kfree(modes);
++ kfree(offsets);
+ kfree(enabled);
+ }
+
+@@ -1537,11 +1693,11 @@
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+
+- drm_fb_helper_parse_command_line(fb_helper);
+-
++ mutex_lock(&dev->mode_config.mutex);
+ count = drm_fb_helper_probe_connector_modes(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
++ mutex_unlock(&dev->mode_config.mutex);
+ /*
+ * we shouldn't end up with no modes here.
+ */
+@@ -1567,8 +1723,10 @@
+ * either the output polling work or a work item launched from the driver's
+ * hotplug interrupt).
+ *
+- * Note that the driver must ensure that this is only called _after_ the fb has
+- * been fully set up, i.e. after the call to drm_fb_helper_initial_config.
++ * Note that drivers may call this even before calling
++ * drm_fb_helper_initial_config but only aftert drm_fb_helper_init. This allows
++ * for a race-free fbcon setup and will make sure that the fbdev emulation will
++ * not miss any hotplug events.
+ *
+ * RETURNS:
+ * 0 on success and a non-zero error code otherwise.
+@@ -1578,11 +1736,8 @@
+ struct drm_device *dev = fb_helper->dev;
+ u32 max_width, max_height;
+
+- if (!fb_helper->fb)
+- return 0;
+-
+ mutex_lock(&fb_helper->dev->mode_config.mutex);
+- if (!drm_fb_helper_is_bound(fb_helper)) {
++ if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
+ fb_helper->delayed_hotplug = true;
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ return 0;
+diff -Naur a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
+--- a/drivers/gpu/drm/drm_flip_work.c 2015-03-26 14:43:30.430436436 +0530
++++ b/drivers/gpu/drm/drm_flip_work.c 2015-03-26 14:42:38.722435422 +0530
+@@ -25,6 +25,44 @@
+ #include "drm_flip_work.h"
+
+ /**
++ * drm_flip_work_allocate_task - allocate a flip-work task
++ * @data: data associated to the task
++ * @flags: allocator flags
++ *
++ * Allocate a drm_flip_task object and attach private data to it.
++ */
++struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
++{
++ struct drm_flip_task *task;
++
++ task = kzalloc(sizeof(*task), flags);
++ if (task)
++ task->data = data;
++
++ return task;
++}
++EXPORT_SYMBOL(drm_flip_work_allocate_task);
++
++/**
++ * drm_flip_work_queue_task - queue a specific task
++ * @work: the flip-work
++ * @task: the task to handle
++ *
++ * Queues task, that will later be run (passed back to drm_flip_func_t
++ * func) on a work queue after drm_flip_work_commit() is called.
++ */
++void drm_flip_work_queue_task(struct drm_flip_work *work,
++ struct drm_flip_task *task)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&work->lock, flags);
++ list_add_tail(&task->node, &work->queued);
++ spin_unlock_irqrestore(&work->lock, flags);
++}
++EXPORT_SYMBOL(drm_flip_work_queue_task);
++
++/**
+ * drm_flip_work_queue - queue work
+ * @work: the flip-work
+ * @val: the value to queue
+@@ -34,10 +72,14 @@
+ */
+ void drm_flip_work_queue(struct drm_flip_work *work, void *val)
+ {
+- if (kfifo_put(&work->fifo, val)) {
+- atomic_inc(&work->pending);
++ struct drm_flip_task *task;
++
++ task = drm_flip_work_allocate_task(val,
++ drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
++ if (task) {
++ drm_flip_work_queue_task(work, task);
+ } else {
+- DRM_ERROR("%s fifo full!\n", work->name);
++ DRM_ERROR("%s could not allocate task!\n", work->name);
+ work->func(work, val);
+ }
+ }
+@@ -56,9 +98,12 @@
+ void drm_flip_work_commit(struct drm_flip_work *work,
+ struct workqueue_struct *wq)
+ {
+- uint32_t pending = atomic_read(&work->pending);
+- atomic_add(pending, &work->count);
+- atomic_sub(pending, &work->pending);
++ unsigned long flags;
++
++ spin_lock_irqsave(&work->lock, flags);
++ list_splice_tail(&work->queued, &work->commited);
++ INIT_LIST_HEAD(&work->queued);
++ spin_unlock_irqrestore(&work->lock, flags);
+ queue_work(wq, &work->worker);
+ }
+ EXPORT_SYMBOL(drm_flip_work_commit);
+@@ -66,47 +111,46 @@
+ static void flip_worker(struct work_struct *w)
+ {
+ struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
+- uint32_t count = atomic_read(&work->count);
+- void *val = NULL;
++ struct list_head tasks;
++ unsigned long flags;
+
+- atomic_sub(count, &work->count);
++ while (1) {
++ struct drm_flip_task *task, *tmp;
+
+- while(count--)
+- if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
+- work->func(work, val);
++ INIT_LIST_HEAD(&tasks);
++ spin_lock_irqsave(&work->lock, flags);
++ list_splice_tail(&work->commited, &tasks);
++ INIT_LIST_HEAD(&work->commited);
++ spin_unlock_irqrestore(&work->lock, flags);
++
++ if (list_empty(&tasks))
++ break;
++
++ list_for_each_entry_safe(task, tmp, &tasks, node) {
++ work->func(work, task->data);
++ kfree(task);
++ }
++ }
+ }
+
+ /**
+ * drm_flip_work_init - initialize flip-work
+ * @work: the flip-work to initialize
+- * @size: the max queue depth
+ * @name: debug name
+ * @func: the callback work function
+ *
+ * Initializes/allocates resources for the flip-work
+- *
+- * RETURNS:
+- * Zero on success, error code on failure.
+ */
+-int drm_flip_work_init(struct drm_flip_work *work, int size,
++void drm_flip_work_init(struct drm_flip_work *work,
+ const char *name, drm_flip_func_t func)
+ {
+- int ret;
+-
+ work->name = name;
+- atomic_set(&work->count, 0);
+- atomic_set(&work->pending, 0);
++ INIT_LIST_HEAD(&work->queued);
++ INIT_LIST_HEAD(&work->commited);
++ spin_lock_init(&work->lock);
+ work->func = func;
+
+- ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
+- if (ret) {
+- DRM_ERROR("could not allocate %s fifo\n", name);
+- return ret;
+- }
+-
+ INIT_WORK(&work->worker, flip_worker);
+-
+- return 0;
+ }
+ EXPORT_SYMBOL(drm_flip_work_init);
+
+@@ -118,7 +162,6 @@
+ */
+ void drm_flip_work_cleanup(struct drm_flip_work *work)
+ {
+- WARN_ON(!kfifo_is_empty(&work->fifo));
+- kfifo_free(&work->fifo);
++ WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
+ }
+ EXPORT_SYMBOL(drm_flip_work_cleanup);
+diff -Naur a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+--- a/drivers/gpu/drm/drm_fops.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_fops.c 2015-03-26 14:42:38.722435422 +0530
+@@ -38,13 +38,13 @@
+ #include <linux/poll.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include "drm_legacy.h"
++#include "drm_internal.h"
+
+-/* from BKL pushdown: note that nothing else serializes idr_find() */
++/* from BKL pushdown */
+ DEFINE_MUTEX(drm_global_mutex);
+-EXPORT_SYMBOL(drm_global_mutex);
+
+-static int drm_open_helper(struct inode *inode, struct file *filp,
+- struct drm_device * dev);
++static int drm_open_helper(struct file *filp, struct drm_minor *minor);
+
+ static int drm_setup(struct drm_device * dev)
+ {
+@@ -79,38 +79,23 @@
+ */
+ int drm_open(struct inode *inode, struct file *filp)
+ {
+- struct drm_device *dev = NULL;
+- int minor_id = iminor(inode);
++ struct drm_device *dev;
+ struct drm_minor *minor;
+- int retcode = 0;
++ int retcode;
+ int need_setup = 0;
+- struct address_space *old_mapping;
+- struct address_space *old_imapping;
+-
+- minor = idr_find(&drm_minors_idr, minor_id);
+- if (!minor)
+- return -ENODEV;
+-
+- if (!(dev = minor->dev))
+- return -ENODEV;
+
+- if (drm_device_is_unplugged(dev))
+- return -ENODEV;
++ minor = drm_minor_acquire(iminor(inode));
++ if (IS_ERR(minor))
++ return PTR_ERR(minor);
+
++ dev = minor->dev;
+ if (!dev->open_count++)
+ need_setup = 1;
+- mutex_lock(&dev->struct_mutex);
+- old_imapping = inode->i_mapping;
+- old_mapping = dev->dev_mapping;
+- if (old_mapping == NULL)
+- dev->dev_mapping = &inode->i_data;
+- /* ihold ensures nobody can remove inode with our i_data */
+- ihold(container_of(dev->dev_mapping, struct inode, i_data));
+- inode->i_mapping = dev->dev_mapping;
+- filp->f_mapping = dev->dev_mapping;
+- mutex_unlock(&dev->struct_mutex);
+
+- retcode = drm_open_helper(inode, filp, dev);
++ /* share address_space across all char-devs of a single device */
++ filp->f_mapping = dev->anon_inode->i_mapping;
++
++ retcode = drm_open_helper(filp, minor);
+ if (retcode)
+ goto err_undo;
+ if (need_setup) {
+@@ -121,70 +106,19 @@
+ return 0;
+
+ err_undo:
+- mutex_lock(&dev->struct_mutex);
+- filp->f_mapping = old_imapping;
+- inode->i_mapping = old_imapping;
+- iput(container_of(dev->dev_mapping, struct inode, i_data));
+- dev->dev_mapping = old_mapping;
+- mutex_unlock(&dev->struct_mutex);
+ dev->open_count--;
++ drm_minor_release(minor);
+ return retcode;
+ }
+ EXPORT_SYMBOL(drm_open);
+
+ /**
+- * File \c open operation.
+- *
+- * \param inode device inode.
+- * \param filp file pointer.
+- *
+- * Puts the dev->fops corresponding to the device minor number into
+- * \p filp, call the \c open method, and restore the file operations.
+- */
+-int drm_stub_open(struct inode *inode, struct file *filp)
+-{
+- struct drm_device *dev = NULL;
+- struct drm_minor *minor;
+- int minor_id = iminor(inode);
+- int err = -ENODEV;
+- const struct file_operations *new_fops;
+-
+- DRM_DEBUG("\n");
+-
+- mutex_lock(&drm_global_mutex);
+- minor = idr_find(&drm_minors_idr, minor_id);
+- if (!minor)
+- goto out;
+-
+- if (!(dev = minor->dev))
+- goto out;
+-
+- if (drm_device_is_unplugged(dev))
+- goto out;
+-
+- new_fops = fops_get(dev->driver->fops);
+- if (!new_fops)
+- goto out;
+-
+- replace_fops(filp, new_fops);
+- if (filp->f_op->open)
+- err = filp->f_op->open(inode, filp);
+-out:
+- mutex_unlock(&drm_global_mutex);
+- return err;
+-}
+-
+-/**
+ * Check whether DRI will run on this CPU.
+ *
+ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
+ */
+ static int drm_cpu_valid(void)
+ {
+-#if defined(__i386__)
+- if (boot_cpu_data.x86 == 3)
+- return 0; /* No cmpxchg on a 386 */
+-#endif
+ #if defined(__sparc__) && !defined(__sparc_v9__)
+ return 0; /* No cmpxchg before v9 sparc. */
+ #endif
+@@ -194,18 +128,16 @@
+ /**
+ * Called whenever a process opens /dev/drm.
+ *
+- * \param inode device inode.
+ * \param filp file pointer.
+- * \param dev device.
++ * \param minor acquired minor-object.
+ * \return zero on success or a negative number on failure.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+-static int drm_open_helper(struct inode *inode, struct file *filp,
+- struct drm_device * dev)
++static int drm_open_helper(struct file *filp, struct drm_minor *minor)
+ {
+- int minor_id = iminor(inode);
++ struct drm_device *dev = minor->dev;
+ struct drm_file *priv;
+ int ret;
+
+@@ -216,7 +148,7 @@
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return -EINVAL;
+
+- DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
++ DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+@@ -226,15 +158,10 @@
+ priv->filp = filp;
+ priv->uid = current_euid();
+ priv->pid = get_pid(task_pid(current));
+- priv->minor = idr_find(&drm_minors_idr, minor_id);
+- if (!priv->minor) {
+- ret = -ENODEV;
+- goto out_put_pid;
+- }
++ priv->minor = minor;
+
+ /* for compatibility root is always authenticated */
+- priv->always_authenticated = capable(CAP_SYS_ADMIN);
+- priv->authenticated = priv->always_authenticated;
++ priv->authenticated = capable(CAP_SYS_ADMIN);
+ priv->lock_count = 0;
+
+ INIT_LIST_HEAD(&priv->lhead);
+@@ -244,7 +171,7 @@
+ init_waitqueue_head(&priv->event_wait);
+ priv->event_space = 4096; /* set aside 4k for event buffer */
+
+- if (dev->driver->driver_features & DRIVER_GEM)
++ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_open(dev, priv);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+@@ -258,12 +185,11 @@
+
+ /* if there is no current master make this fd it, but do not create
+ * any master object for render clients */
+- mutex_lock(&dev->struct_mutex);
+- if (!priv->minor->master && !drm_is_render_client(priv)) {
++ mutex_lock(&dev->master_mutex);
++ if (drm_is_primary_client(priv) && !priv->minor->master) {
+ /* create a new master */
+ priv->minor->master = drm_master_create(priv->minor);
+ if (!priv->minor->master) {
+- mutex_unlock(&dev->struct_mutex);
+ ret = -ENOMEM;
+ goto out_close;
+ }
+@@ -271,37 +197,31 @@
+ priv->is_master = 1;
+ /* take another reference for the copy in the local file priv */
+ priv->master = drm_master_get(priv->minor->master);
+-
+ priv->authenticated = 1;
+
+- mutex_unlock(&dev->struct_mutex);
+ if (dev->driver->master_create) {
+ ret = dev->driver->master_create(dev, priv->master);
+ if (ret) {
+- mutex_lock(&dev->struct_mutex);
+ /* drop both references if this fails */
+ drm_master_put(&priv->minor->master);
+ drm_master_put(&priv->master);
+- mutex_unlock(&dev->struct_mutex);
+ goto out_close;
+ }
+ }
+- mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, priv, true);
+ if (ret) {
+ /* drop both references if this fails */
+ drm_master_put(&priv->minor->master);
+ drm_master_put(&priv->master);
+- mutex_unlock(&dev->struct_mutex);
+ goto out_close;
+ }
+ }
+- } else if (!drm_is_render_client(priv)) {
++ } else if (drm_is_primary_client(priv)) {
+ /* get a reference to the master */
+ priv->master = drm_master_get(priv->minor->master);
+ }
+- mutex_unlock(&dev->struct_mutex);
++ mutex_unlock(&dev->master_mutex);
+
+ mutex_lock(&dev->struct_mutex);
+ list_add(&priv->lhead, &dev->filelist);
+@@ -319,7 +239,8 @@
+ pci_dev_put(pci_dev);
+ }
+ if (!dev->hose) {
+- struct pci_bus *b = pci_bus_b(pci_root_buses.next);
++ struct pci_bus *b = list_entry(pci_root_buses.next,
++ struct pci_bus, node);
+ if (b)
+ dev->hose = b->sysdata;
+ }
+@@ -329,14 +250,14 @@
+ return 0;
+
+ out_close:
++ mutex_unlock(&dev->master_mutex);
+ if (dev->driver->postclose)
+ dev->driver->postclose(dev, priv);
+ out_prime_destroy:
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&priv->prime);
+- if (dev->driver->driver_features & DRIVER_GEM)
++ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_release(dev, priv);
+-out_put_pid:
+ put_pid(priv->pid);
+ kfree(priv);
+ filp->private_data = NULL;
+@@ -347,11 +268,11 @@
+ {
+ struct drm_file *file_priv = filp->private_data;
+
+- if (drm_i_have_hw_lock(dev, file_priv)) {
++ if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
+ DRM_DEBUG("File %p released, freeing lock for context %d\n",
+ filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+- drm_lock_free(&file_priv->master->lock,
+- _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
++ drm_legacy_lock_free(&file_priv->master->lock,
++ _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+ }
+ }
+
+@@ -409,8 +330,6 @@
+ */
+ int drm_lastclose(struct drm_device * dev)
+ {
+- struct drm_vma_entry *vma, *vma_temp;
+-
+ DRM_DEBUG("\n");
+
+ if (dev->driver->lastclose)
+@@ -425,16 +344,9 @@
+ drm_agp_clear(dev);
+
+ drm_legacy_sg_cleanup(dev);
+-
+- /* Clear vma list (only built for debugging) */
+- list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+- list_del(&vma->head);
+- kfree(vma);
+- }
+-
++ drm_legacy_vma_flush(dev);
+ drm_legacy_dma_takedown(dev);
+
+- dev->dev_mapping = NULL;
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_legacy_dev_reinit(dev);
+@@ -458,13 +370,18 @@
+ int drm_release(struct inode *inode, struct file *filp)
+ {
+ struct drm_file *file_priv = filp->private_data;
+- struct drm_device *dev = file_priv->minor->dev;
++ struct drm_minor *minor = file_priv->minor;
++ struct drm_device *dev = minor->dev;
+ int retcode = 0;
+
+ mutex_lock(&drm_global_mutex);
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+
++ mutex_lock(&dev->struct_mutex);
++ list_del(&file_priv->lhead);
++ mutex_unlock(&dev->struct_mutex);
++
+ if (dev->driver->preclose)
+ dev->driver->preclose(dev, file_priv);
+
+@@ -474,7 +391,7 @@
+
+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+ task_pid_nr(current),
+- (long)old_encode_dev(file_priv->minor->device),
++ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ dev->open_count);
+
+ /* Release any auth tokens that might point to this file_priv,
+@@ -487,52 +404,28 @@
+ drm_master_release(dev, filp);
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+- drm_core_reclaim_buffers(dev, file_priv);
++ drm_legacy_reclaim_buffers(dev, file_priv);
+
+ drm_events_release(file_priv);
+
+- if (dev->driver->driver_features & DRIVER_MODESET)
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_fb_release(file_priv);
+
+- if (dev->driver->driver_features & DRIVER_GEM)
++ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_release(dev, file_priv);
+
+- mutex_lock(&dev->ctxlist_mutex);
+- if (!list_empty(&dev->ctxlist)) {
+- struct drm_ctx_list *pos, *n;
+-
+- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+- if (pos->tag == file_priv &&
+- pos->handle != DRM_KERNEL_CONTEXT) {
+- if (dev->driver->context_dtor)
+- dev->driver->context_dtor(dev,
+- pos->handle);
+-
+- drm_ctxbitmap_free(dev, pos->handle);
+-
+- list_del(&pos->head);
+- kfree(pos);
+- }
+- }
+- }
+- mutex_unlock(&dev->ctxlist_mutex);
++ drm_legacy_ctxbitmap_flush(dev, file_priv);
+
+- mutex_lock(&dev->struct_mutex);
++ mutex_lock(&dev->master_mutex);
+
+ if (file_priv->is_master) {
+ struct drm_master *master = file_priv->master;
+- struct drm_file *temp;
+- list_for_each_entry(temp, &dev->filelist, lhead) {
+- if ((temp->master == file_priv->master) &&
+- (temp != file_priv))
+- temp->authenticated = temp->always_authenticated;
+- }
+
+ /**
+ * Since the master is disappearing, so is the
+ * possibility to lock.
+ */
+-
++ mutex_lock(&dev->struct_mutex);
+ if (master->lock.hw_lock) {
+ if (dev->sigdata.lock == master->lock.hw_lock)
+ dev->sigdata.lock = NULL;
+@@ -540,6 +433,7 @@
+ master->lock.file_priv = NULL;
+ wake_up_interruptible_all(&master->lock.lock_queue);
+ }
++ mutex_unlock(&dev->struct_mutex);
+
+ if (file_priv->minor->master == file_priv->master) {
+ /* drop the reference held my the minor */
+@@ -549,15 +443,11 @@
+ }
+ }
+
+- BUG_ON(dev->dev_mapping == NULL);
+- iput(container_of(dev->dev_mapping, struct inode, i_data));
+-
+- /* drop the reference held my the file priv */
++ /* drop the master reference held by the file priv */
+ if (file_priv->master)
+ drm_master_put(&file_priv->master);
+ file_priv->is_master = 0;
+- list_del(&file_priv->lhead);
+- mutex_unlock(&dev->struct_mutex);
++ mutex_unlock(&dev->master_mutex);
+
+ if (dev->driver->postclose)
+ dev->driver->postclose(dev, file_priv);
+@@ -566,6 +456,8 @@
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&file_priv->prime);
+
++ WARN_ON(!list_empty(&file_priv->event_list));
++
+ put_pid(file_priv->pid);
+ kfree(file_priv);
+
+@@ -580,6 +472,8 @@
+ }
+ mutex_unlock(&drm_global_mutex);
+
++ drm_minor_release(minor);
++
+ return retcode;
+ }
+ EXPORT_SYMBOL(drm_release);
+@@ -621,16 +515,19 @@
+ size_t total;
+ ssize_t ret;
+
+- ret = wait_event_interruptible(file_priv->event_wait,
+- !list_empty(&file_priv->event_list));
+- if (ret < 0)
+- return ret;
++ if ((filp->f_flags & O_NONBLOCK) == 0) {
++ ret = wait_event_interruptible(file_priv->event_wait,
++ !list_empty(&file_priv->event_list));
++ if (ret < 0)
++ return ret;
++ }
+
+ total = 0;
+ while (drm_dequeue_event(file_priv, total, count, &e)) {
+ if (copy_to_user(buffer + total,
+ e->event, e->event->length)) {
+ total = -EFAULT;
++ e->destroy(e);
+ break;
+ }
+
+@@ -638,7 +535,7 @@
+ e->destroy(e);
+ }
+
+- return total;
++ return total ?: -EAGAIN;
+ }
+ EXPORT_SYMBOL(drm_read);
+
+diff -Naur a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+--- a/drivers/gpu/drm/drm_gem.c 2015-03-26 14:43:30.462436436 +0530
++++ b/drivers/gpu/drm/drm_gem.c 2015-03-26 14:42:38.726435422 +0530
+@@ -38,6 +38,8 @@
+ #include <linux/dma-buf.h>
+ #include <drm/drmP.h>
+ #include <drm/drm_vma_manager.h>
++#include <drm/drm_gem.h>
++#include "drm_internal.h"
+
+ /** @file drm_gem.c
+ *
+@@ -85,9 +87,9 @@
+ #endif
+
+ /**
+- * Initialize the GEM device fields
++ * drm_gem_init - Initialize the GEM device fields
++ * @dev: drm_devic structure to initialize
+ */
+-
+ int
+ drm_gem_init(struct drm_device *dev)
+ {
+@@ -120,6 +122,11 @@
+ }
+
+ /**
++ * drm_gem_object_init - initialize an allocated shmem-backed GEM object
++ * @dev: drm_device the object should be initialized for
++ * @obj: drm_gem_object to initialize
++ * @size: object size
++ *
+ * Initialize an already allocated GEM object of the specified size with
+ * shmfs backing store.
+ */
+@@ -141,6 +148,11 @@
+ EXPORT_SYMBOL(drm_gem_object_init);
+
+ /**
++ * drm_gem_private_object_init - initialize an allocated private GEM object
++ * @dev: drm_device the object should be initialized for
++ * @obj: drm_gem_object to initialize
++ * @size: object size
++ *
+ * Initialize an already allocated GEM object of the specified size with
+ * no GEM provided backing store. Instead the caller is responsible for
+ * backing the object and handling it.
+@@ -176,6 +188,9 @@
+ }
+
+ /**
++ * drm_gem_object_handle_free - release resources bound to userspace handles
++ * @obj: GEM object to clean up.
++ *
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+@@ -225,7 +240,12 @@
+ }
+
+ /**
+- * Removes the mapping from handle to filp for this object.
++ * drm_gem_handle_delete - deletes the given file-private handle
++ * @filp: drm file-private structure to use for the handle look up
++ * @handle: userspace handle to delete
++ *
++ * Removes the GEM handle from the @filp lookup table and if this is the last
++ * handle also cleans up linked resources like GEM names.
+ */
+ int
+ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+@@ -270,6 +290,9 @@
+
+ /**
+ * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
++ * @file: drm file-private structure to remove the dumb handle from
++ * @dev: corresponding drm_device
++ * @handle: the dumb handle to remove
+ *
+ * This implements the ->dumb_destroy kms driver callback for drivers which use
+ * gem to manage their backing storage.
+@@ -284,6 +307,9 @@
+
+ /**
+ * drm_gem_handle_create_tail - internal functions to create a handle
++ * @file_priv: drm file-private structure to register the handle for
++ * @obj: object to register
++ * @handlep: pointer to return the created handle to the caller
+ *
+ * This expects the dev->object_name_lock to be held already and will drop it
+ * before returning. Used to avoid races in establishing new handles when
+@@ -336,14 +362,18 @@
+ }
+
+ /**
++ * drm_gem_handle_create - create a gem handle for an object
++ * @file_priv: drm file-private structure to register the handle for
++ * @obj: object to register
++ * @handlep: pionter to return the created handle to the caller
++ *
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+-int
+-drm_gem_handle_create(struct drm_file *file_priv,
+- struct drm_gem_object *obj,
+- u32 *handlep)
++int drm_gem_handle_create(struct drm_file *file_priv,
++ struct drm_gem_object *obj,
++ u32 *handlep)
+ {
+ mutex_lock(&obj->dev->object_name_lock);
+
+@@ -412,18 +442,31 @@
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * from shmem
+ * @obj: obj in question
+- * @gfpmask: gfp mask of requested pages
++ *
++ * This reads the page-array of the shmem-backing storage of the given gem
++ * object. An array of pages is returned. If a page is not allocated or
++ * swapped-out, this will allocate/swap-in the required pages. Note that the
++ * whole object is covered by the page-array and pinned in memory.
++ *
++ * Use drm_gem_put_pages() to release the array and unpin all pages.
++ *
++ * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
++ * If you require other GFP-masks, you have to do those allocations yourself.
++ *
++ * Note that you are not allowed to change gfp-zones during runtime. That is,
++ * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
++ * set during initialization. If you have special zone constraints, set them
++ * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
++ * to keep pages in the required zone during swap-in.
+ */
+-struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
++struct page **drm_gem_get_pages(struct drm_gem_object *obj)
+ {
+- struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+- inode = file_inode(obj->filp);
+- mapping = inode->i_mapping;
++ mapping = file_inode(obj->filp)->i_mapping;
+
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+@@ -437,31 +480,18 @@
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+- gfpmask |= mapping_gfp_mask(mapping);
+-
+ for (i = 0; i < npages; i++) {
+- p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
++ p = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+
+- /* There is a hypothetical issue w/ drivers that require
+- * buffer memory in the low 4GB.. if the pages are un-
+- * pinned, and swapped out, they can end up swapped back
+- * in above 4GB. If pages are already in memory, then
+- * shmem_read_mapping_page_gfp will ignore the gfpmask,
+- * even if the already in-memory page disobeys the mask.
+- *
+- * It is only a theoretical issue today, because none of
+- * the devices with this limitation can be populated with
+- * enough memory to trigger the issue. But this BUG_ON()
+- * is here as a reminder in case the problem with
+- * shmem_read_mapping_page_gfp() isn't solved by the time
+- * it does become a real issue.
+- *
+- * See this thread: http://lkml.org/lkml/2011/7/11/238
++ /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
++ * correct region during swapin. Note that this requires
++ * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
++ * so shmem can relocate pages during swapin if required.
+ */
+- BUG_ON((gfpmask & __GFP_DMA32) &&
++ BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
+ (page_to_pfn(p) >= 0x00100000UL));
+ }
+
+@@ -536,6 +566,11 @@
+ EXPORT_SYMBOL(drm_gem_object_lookup);
+
+ /**
++ * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
++ * @dev: drm_device
++ * @data: ioctl data
++ * @file_priv: drm file-private structure
++ *
+ * Releases the handle to an mm object.
+ */
+ int
+@@ -545,7 +580,7 @@
+ struct drm_gem_close *args = data;
+ int ret;
+
+- if (!(dev->driver->driver_features & DRIVER_GEM))
++ if (!drm_core_check_feature(dev, DRIVER_GEM))
+ return -ENODEV;
+
+ ret = drm_gem_handle_delete(file_priv, args->handle);
+@@ -554,6 +589,11 @@
+ }
+
+ /**
++ * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
++ * @dev: drm_device
++ * @data: ioctl data
++ * @file_priv: drm file-private structure
++ *
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+@@ -567,7 +607,7 @@
+ struct drm_gem_object *obj;
+ int ret;
+
+- if (!(dev->driver->driver_features & DRIVER_GEM))
++ if (!drm_core_check_feature(dev, DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+@@ -601,6 +641,11 @@
+ }
+
+ /**
++ * drm_gem_open - implementation of the GEM_OPEN ioctl
++ * @dev: drm_device
++ * @data: ioctl data
++ * @file_priv: drm file-private structure
++ *
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+@@ -615,7 +660,7 @@
+ int ret;
+ u32 handle;
+
+- if (!(dev->driver->driver_features & DRIVER_GEM))
++ if (!drm_core_check_feature(dev, DRIVER_GEM))
+ return -ENODEV;
+
+ mutex_lock(&dev->object_name_lock);
+@@ -640,6 +685,10 @@
+ }
+
+ /**
++ * gem_gem_open - initalizes GEM file-private structures at devnode open time
++ * @dev: drm_device which is being opened by userspace
++ * @file_private: drm file-private structure to set up
++ *
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+@@ -650,7 +699,7 @@
+ spin_lock_init(&file_private->table_lock);
+ }
+
+-/**
++/*
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+@@ -674,6 +723,10 @@
+ }
+
+ /**
++ * drm_gem_release - release file-private GEM resources
++ * @dev: drm_device which is being closed by userspace
++ * @file_private: drm file-private structure to clean up
++ *
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+@@ -692,11 +745,16 @@
+ WARN_ON(obj->dma_buf);
+
+ if (obj->filp)
+- fput(obj->filp);
++ fput(obj->filp);
++
++ drm_gem_free_mmap_offset(obj);
+ }
+ EXPORT_SYMBOL(drm_gem_object_release);
+
+ /**
++ * drm_gem_object_free - free a GEM object
++ * @kref: kref of the object to free
++ *
+ * Called after the last reference to the object has been lost.
+ * Must be called holding struct_ mutex
+ *
+@@ -782,7 +840,7 @@
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ vma->vm_private_data = obj;
+- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
++ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ /* Take a ref for this mapping of the object, so that the fault
+ * handler can dereference the mmap offset's pointer to the object.
+@@ -818,7 +876,7 @@
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_gem_object *obj;
+ struct drm_vma_offset_node *node;
+- int ret = 0;
++ int ret;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+@@ -830,7 +888,7 @@
+ vma_pages(vma));
+ if (!node) {
+ mutex_unlock(&dev->struct_mutex);
+- return drm_mmap(filp, vma);
++ return -EINVAL;
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EACCES;
+diff -Naur a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
+--- a/drivers/gpu/drm/drm_gem_cma_helper.c 2015-03-26 14:43:30.418436435 +0530
++++ b/drivers/gpu/drm/drm_gem_cma_helper.c 2015-03-26 14:42:38.726435422 +0530
+@@ -29,18 +29,31 @@
+ #include <drm/drm_gem_cma_helper.h>
+ #include <drm/drm_vma_manager.h>
+
+-/*
++/**
++ * DOC: cma helpers
++ *
++ * The Contiguous Memory Allocator reserves a pool of memory at early boot
++ * that is used to service requests for large blocks of contiguous memory.
++ *
++ * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
++ * objects that are physically contiguous in memory. This is useful for
++ * display drivers that are unable to map scattered buffers via an IOMMU.
++ */
++
++/**
+ * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
+- * @drm: The drm device
+- * @size: The GEM object size
++ * @drm: DRM device
++ * @size: size of the object to allocate
+ *
+- * This function creates and initializes a GEM CMA object of the given size, but
+- * doesn't allocate any memory to back the object.
++ * This function creates and initializes a GEM CMA object of the given size,
++ * but doesn't allocate any memory to back the object.
+ *
+- * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
++ * Returns:
++ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
++ * error code on failure.
+ */
+ static struct drm_gem_cma_object *
+-__drm_gem_cma_create(struct drm_device *drm, unsigned int size)
++__drm_gem_cma_create(struct drm_device *drm, size_t size)
+ {
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+@@ -69,17 +82,23 @@
+ return ERR_PTR(ret);
+ }
+
+-/*
++/**
+ * drm_gem_cma_create - allocate an object with the given size
++ * @drm: DRM device
++ * @size: size of the object to allocate
+ *
+- * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+- * on failure.
++ * This function creates a CMA GEM object and allocates a contiguous chunk of
++ * memory as backing store. The backing memory has the writecombine attribute
++ * set.
++ *
++ * Returns:
++ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
++ * error code on failure.
+ */
+ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+- unsigned int size)
++ size_t size)
+ {
+ struct drm_gem_cma_object *cma_obj;
+- struct sg_table *sgt = NULL;
+ int ret;
+
+ size = round_up(size, PAGE_SIZE);
+@@ -97,39 +116,34 @@
+ goto error;
+ }
+
+- sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL);
+- if (sgt == NULL) {
+- ret = -ENOMEM;
+- goto error;
+- }
+-
+- ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr,
+- cma_obj->paddr, size);
+- if (ret < 0)
+- goto error;
+-
+- cma_obj->sgt = sgt;
+-
+ return cma_obj;
+
+ error:
+- kfree(sgt);
+ drm_gem_cma_free_object(&cma_obj->base);
+ return ERR_PTR(ret);
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_cma_create);
+
+-/*
+- * drm_gem_cma_create_with_handle - allocate an object with the given
+- * size and create a gem handle on it
+- *
+- * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+- * on failure.
+- */
+-static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
+- struct drm_file *file_priv,
+- struct drm_device *drm, unsigned int size,
+- unsigned int *handle)
++/**
++ * drm_gem_cma_create_with_handle - allocate an object with the given size and
++ * return a GEM handle to it
++ * @file_priv: DRM file-private structure to register the handle for
++ * @drm: DRM device
++ * @size: size of the object to allocate
++ * @handle: return location for the GEM handle
++ *
++ * This function creates a CMA GEM object, allocating a physically contiguous
++ * chunk of memory as backing store. The GEM object is then added to the list
++ * of object associated with the given file and a handle to it is returned.
++ *
++ * Returns:
++ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
++ * error code on failure.
++ */
++static struct drm_gem_cma_object *
++drm_gem_cma_create_with_handle(struct drm_file *file_priv,
++ struct drm_device *drm, size_t size,
++ uint32_t *handle)
+ {
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+@@ -160,25 +174,24 @@
+ return ERR_PTR(ret);
+ }
+
+-/*
+- * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
+- * function
++/**
++ * drm_gem_cma_free_object - free resources associated with a CMA GEM object
++ * @gem_obj: GEM object to free
++ *
++ * This function frees the backing memory of the CMA GEM object, cleans up the
++ * GEM object state and frees the memory used to store the object itself.
++ * Drivers using the CMA helpers should set this as their DRM driver's
++ * ->gem_free_object() callback.
+ */
+ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+ {
+ struct drm_gem_cma_object *cma_obj;
+
+- drm_gem_free_mmap_offset(gem_obj);
+-
+ cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+ if (cma_obj->vaddr) {
+ dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr);
+- if (cma_obj->sgt) {
+- sg_free_table(cma_obj->sgt);
+- kfree(cma_obj->sgt);
+- }
+ } else if (gem_obj->import_attach) {
+ drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
+ }
+@@ -189,18 +202,26 @@
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
+
+-/*
+- * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+- * function
++/**
++ * drm_gem_cma_dumb_create_internal - create a dumb buffer object
++ * @file_priv: DRM file-private structure to create the dumb buffer for
++ * @drm: DRM device
++ * @args: IOCTL data
++ *
++ * This aligns the pitch and size arguments to the minimum required. This is
++ * an internal helper that can be wrapped by a driver to account for hardware
++ * with more specific alignment requirements. It should not be used directly
++ * as the ->dumb_create() callback in a DRM driver.
+ *
+- * This aligns the pitch and size arguments to the minimum required. wrap
+- * this into your own function if you need bigger alignment.
++ * Returns:
++ * 0 on success or a negative error code on failure.
+ */
+-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+- struct drm_device *dev, struct drm_mode_create_dumb *args)
++int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
++ struct drm_device *drm,
++ struct drm_mode_create_dumb *args)
+ {
++ unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct drm_gem_cma_object *cma_obj;
+- int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+@@ -208,18 +229,63 @@
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+- cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
+- args->size, &args->handle);
++ cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
++ &args->handle);
++ return PTR_ERR_OR_ZERO(cma_obj);
++}
++EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
++
++/**
++ * drm_gem_cma_dumb_create - create a dumb buffer object
++ * @file_priv: DRM file-private structure to create the dumb buffer for
++ * @drm: DRM device
++ * @args: IOCTL data
++ *
++ * This function computes the pitch of the dumb buffer and rounds it up to an
++ * integer number of bytes per pixel. Drivers for hardware that doesn't have
++ * any additional restrictions on the pitch can directly use this function as
++ * their ->dumb_create() callback.
++ *
++ * For hardware with additional restrictions, drivers can adjust the fields
++ * set up by userspace and pass the IOCTL data along to the
++ * drm_gem_cma_dumb_create_internal() function.
++ *
++ * Returns:
++ * 0 on success or a negative error code on failure.
++ */
++int drm_gem_cma_dumb_create(struct drm_file *file_priv,
++ struct drm_device *drm,
++ struct drm_mode_create_dumb *args)
++{
++ struct drm_gem_cma_object *cma_obj;
++
++ args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
++ args->size = args->pitch * args->height;
++
++ cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
++ &args->handle);
+ return PTR_ERR_OR_ZERO(cma_obj);
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
+
+-/*
+- * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
+- * function
++/**
++ * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
++ * object
++ * @file_priv: DRM file-private structure containing the GEM object
++ * @drm: DRM device
++ * @handle: GEM object handle
++ * @offset: return location for the fake mmap offset
++ *
++ * This function look up an object by its handle and returns the fake mmap
++ * offset associated with it. Drivers using the CMA helpers should set this
++ * as their DRM driver's ->dumb_map_offset() callback.
++ *
++ * Returns:
++ * 0 on success or a negative error code on failure.
+ */
+ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+- struct drm_device *drm, uint32_t handle, uint64_t *offset)
++ struct drm_device *drm, u32 handle,
++ u64 *offset)
+ {
+ struct drm_gem_object *gem_obj;
+
+@@ -227,7 +293,7 @@
+
+ gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
+ if (!gem_obj) {
+- dev_err(drm->dev, "failed to lookup gem object\n");
++ dev_err(drm->dev, "failed to lookup GEM object\n");
+ mutex_unlock(&drm->struct_mutex);
+ return -EINVAL;
+ }
+@@ -253,16 +319,37 @@
+ {
+ int ret;
+
+- ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
+- vma->vm_end - vma->vm_start, vma->vm_page_prot);
++ /*
++ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
++ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
++ * the whole buffer.
++ */
++ vma->vm_flags &= ~VM_PFNMAP;
++ vma->vm_pgoff = 0;
++
++ ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma,
++ cma_obj->vaddr, cma_obj->paddr,
++ vma->vm_end - vma->vm_start);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
+ }
+
+-/*
+- * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
++/**
++ * drm_gem_cma_mmap - memory-map a CMA GEM object
++ * @filp: file object
++ * @vma: VMA for the area to be mapped
++ *
++ * This function implements an augmented version of the GEM DRM file mmap
++ * operation for CMA objects: In addition to the usual GEM VMA setup it
++ * immediately faults in the entire object instead of using on-demaind
++ * faulting. Drivers which employ the CMA helpers should use this function
++ * as their ->mmap() handler in the DRM device file's file_operations
++ * structure.
++ *
++ * Returns:
++ * 0 on success or a negative error code on failure.
+ */
+ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+@@ -282,7 +369,16 @@
+ EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
+
+ #ifdef CONFIG_DEBUG_FS
+-void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
++/**
++ * drm_gem_cma_describe - describe a CMA GEM object for debugfs
++ * @cma_obj: CMA GEM object
++ * @m: debugfs file handle
++ *
++ * This function can be used to dump a human-readable representation of the
++ * CMA GEM object into a synthetic file.
++ */
++void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
++ struct seq_file *m)
+ {
+ struct drm_gem_object *obj = &cma_obj->base;
+ struct drm_device *dev = obj->dev;
+@@ -292,16 +388,27 @@
+
+ off = drm_vma_node_start(&obj->vma_node);
+
+- seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
++ seq_printf(m, "%2d (%2d) %08llx %pad %p %d",
+ obj->name, obj->refcount.refcount.counter,
+- off, cma_obj->paddr, cma_obj->vaddr, obj->size);
++ off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
+
+ seq_printf(m, "\n");
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
+ #endif
+
+-/* low-level interface prime helpers */
++/**
++ * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
++ * pages for a CMA GEM object
++ * @obj: GEM object
++ *
++ * This function exports a scatter/gather table suitable for PRIME usage by
++ * calling the standard DMA mapping API. Drivers using the CMA helpers should
++ * set this as their DRM driver's ->gem_prime_get_sg_table() callback.
++ *
++ * Returns:
++ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
++ */
+ struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
+ {
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+@@ -325,8 +432,26 @@
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
+
++/**
++ * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
++ * driver's scatter/gather table of pinned pages
++ * @dev: device to import into
++ * @attach: DMA-BUF attachment
++ * @sgt: scatter/gather table of pinned pages
++ *
++ * This function imports a scatter/gather table exported via DMA-BUF by
++ * another driver. Imported buffers must be physically contiguous in memory
++ * (i.e. the scatter/gather table must contain a single entry). Drivers that
++ * use the CMA helpers should set this as their DRM driver's
++ * ->gem_prime_import_sg_table() callback.
++ *
++ * Returns:
++ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
++ * error code on failure.
++ */
+ struct drm_gem_object *
+-drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
++drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
++ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+ {
+ struct drm_gem_cma_object *cma_obj;
+@@ -335,19 +460,31 @@
+ return ERR_PTR(-EINVAL);
+
+ /* Create a CMA GEM buffer. */
+- cma_obj = __drm_gem_cma_create(dev, size);
++ cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
+ if (IS_ERR(cma_obj))
+- return ERR_PTR(PTR_ERR(cma_obj));
++ return ERR_CAST(cma_obj);
+
+ cma_obj->paddr = sg_dma_address(sgt->sgl);
+ cma_obj->sgt = sgt;
+
+- DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr, size);
++ DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
+
+ return &cma_obj->base;
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
+
++/**
++ * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
++ * @obj: GEM object
++ * @vma: VMA for the area to be mapped
++ *
++ * This function maps a buffer imported via DRM PRIME into a userspace
++ * process's address space. Drivers that use the CMA helpers should set this
++ * as their DRM driver's ->gem_prime_mmap() callback.
++ *
++ * Returns:
++ * 0 on success or a negative error code on failure.
++ */
+ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+ {
+@@ -366,6 +503,20 @@
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
+
++/**
++ * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
++ * address space
++ * @obj: GEM object
++ *
++ * This function maps a buffer exported via DRM PRIME into the kernel's
++ * virtual address space. Since the CMA buffers are already mapped into the
++ * kernel virtual address space this simply returns the cached virtual
++ * address. Drivers using the CMA helpers should set this as their DRM
++ * driver's ->gem_prime_vmap() callback.
++ *
++ * Returns:
++ * The kernel virtual address of the CMA GEM object's backing store.
++ */
+ void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
+ {
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+@@ -374,6 +525,17 @@
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
+
++/**
++ * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
++ * address space
++ * @obj: GEM object
++ * @vaddr: kernel virtual address where the CMA GEM object was mapped
++ *
++ * This function removes a buffer exported via DRM PRIME from the kernel's
++ * virtual address space. This is a no-op because CMA buffers cannot be
++ * unmapped from kernel space. Drivers using the CMA helpers should set this
++ * as their DRM driver's ->gem_prime_vunmap() callback.
++ */
+ void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+ {
+ /* Nothing to do */
+diff -Naur a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
+--- a/drivers/gpu/drm/drm_info.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/drm_info.c 2015-03-26 14:42:38.726435422 +0530
+@@ -35,6 +35,9 @@
+
+ #include <linux/seq_file.h>
+ #include <drm/drmP.h>
++#include <drm/drm_gem.h>
++
++#include "drm_legacy.h"
+
+ /**
+ * Called when "/proc/dri/.../name" is read.
+@@ -47,18 +50,16 @@
+ struct drm_minor *minor = node->minor;
+ struct drm_device *dev = minor->dev;
+ struct drm_master *master = minor->master;
+- const char *bus_name;
+ if (!master)
+ return 0;
+
+- bus_name = dev->driver->bus->get_name(dev);
+ if (master->unique) {
+ seq_printf(m, "%s %s %s\n",
+- bus_name,
++ dev->driver->name,
+ dev_name(dev->dev), master->unique);
+ } else {
+ seq_printf(m, "%s %s\n",
+- bus_name, dev_name(dev->dev));
++ dev->driver->name, dev_name(dev->dev));
+ }
+ return 0;
+ }
+@@ -134,7 +135,7 @@
+ i,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].buf_count,
+- atomic_read(&dma->bufs[i].freelist.count),
++ 0,
+ dma->bufs[i].seg_count,
+ seg_pages,
+ seg_pages * PAGE_SIZE / 1024);
+@@ -185,15 +186,32 @@
+ struct drm_device *dev = node->minor->dev;
+ struct drm_file *priv;
+
++ seq_printf(m,
++ "%20s %5s %3s master a %5s %10s\n",
++ "command",
++ "pid",
++ "dev",
++ "uid",
++ "magic");
++
++ /* dev->filelist is sorted youngest first, but we want to present
++ * oldest first (i.e. kernel, servers, clients), so walk backwardss.
++ */
+ mutex_lock(&dev->struct_mutex);
+- seq_printf(m, "a dev pid uid magic\n\n");
+- list_for_each_entry(priv, &dev->filelist, lhead) {
+- seq_printf(m, "%c %3d %5d %5d %10u\n",
+- priv->authenticated ? 'y' : 'n',
+- priv->minor->index,
++ list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
++ struct task_struct *task;
++
++ rcu_read_lock(); /* locks pid_task()->comm */
++ task = pid_task(priv->pid, PIDTYPE_PID);
++ seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
++ task ? task->comm : "<unknown>",
+ pid_vnr(priv->pid),
++ priv->minor->index,
++ priv->is_master ? 'y' : 'n',
++ priv->authenticated ? 'y' : 'n',
+ from_kuid_munged(seq_user_ns(m), priv->uid),
+ priv->magic);
++ rcu_read_unlock();
+ }
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+@@ -225,62 +243,3 @@
+
+ return 0;
+ }
+-
+-#if DRM_DEBUG_CODE
+-
+-int drm_vma_info(struct seq_file *m, void *data)
+-{
+- struct drm_info_node *node = (struct drm_info_node *) m->private;
+- struct drm_device *dev = node->minor->dev;
+- struct drm_vma_entry *pt;
+- struct vm_area_struct *vma;
+- unsigned long vma_count = 0;
+-#if defined(__i386__)
+- unsigned int pgprot;
+-#endif
+-
+- mutex_lock(&dev->struct_mutex);
+- list_for_each_entry(pt, &dev->vmalist, head)
+- vma_count++;
+-
+- seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
+- vma_count, high_memory,
+- (void *)(unsigned long)virt_to_phys(high_memory));
+-
+- list_for_each_entry(pt, &dev->vmalist, head) {
+- vma = pt->vma;
+- if (!vma)
+- continue;
+- seq_printf(m,
+- "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
+- pt->pid,
+- (void *)vma->vm_start, (void *)vma->vm_end,
+- vma->vm_flags & VM_READ ? 'r' : '-',
+- vma->vm_flags & VM_WRITE ? 'w' : '-',
+- vma->vm_flags & VM_EXEC ? 'x' : '-',
+- vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+- vma->vm_flags & VM_LOCKED ? 'l' : '-',
+- vma->vm_flags & VM_IO ? 'i' : '-',
+- vma->vm_pgoff);
+-
+-#if defined(__i386__)
+- pgprot = pgprot_val(vma->vm_page_prot);
+- seq_printf(m, " %c%c%c%c%c%c%c%c%c",
+- pgprot & _PAGE_PRESENT ? 'p' : '-',
+- pgprot & _PAGE_RW ? 'w' : 'r',
+- pgprot & _PAGE_USER ? 'u' : 's',
+- pgprot & _PAGE_PWT ? 't' : 'b',
+- pgprot & _PAGE_PCD ? 'u' : 'c',
+- pgprot & _PAGE_ACCESSED ? 'a' : '-',
+- pgprot & _PAGE_DIRTY ? 'd' : '-',
+- pgprot & _PAGE_PSE ? 'm' : 'k',
+- pgprot & _PAGE_GLOBAL ? 'g' : 'l');
+-#endif
+- seq_printf(m, "\n");
+- }
+- mutex_unlock(&dev->struct_mutex);
+- return 0;
+-}
+-
+-#endif
+-
+diff -Naur a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
+--- a/drivers/gpu/drm/drm_internal.h 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/gpu/drm/drm_internal.h 2015-03-26 14:42:38.726435422 +0530
+@@ -0,0 +1,132 @@
++/*
++ * Copyright © 2014 Intel Corporation
++ * Daniel Vetter <daniel.vetter@ffwll.ch>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/* drm_irq.c */
++extern unsigned int drm_timestamp_monotonic;
++
++/* drm_fops.c */
++extern struct mutex drm_global_mutex;
++int drm_lastclose(struct drm_device *dev);
++
++/* drm_pci.c */
++int drm_pci_set_unique(struct drm_device *dev,
++ struct drm_master *master,
++ struct drm_unique *u);
++int drm_irq_by_busid(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++
++/* drm_vm.c */
++int drm_vma_info(struct seq_file *m, void *data);
++void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
++void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
++
++/* drm_prime.c */
++int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++
++void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
++void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
++void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
++ struct dma_buf *dma_buf);
++
++/* drm_info.c */
++int drm_name_info(struct seq_file *m, void *data);
++int drm_vm_info(struct seq_file *m, void *data);
++int drm_bufs_info(struct seq_file *m, void *data);
++int drm_vblank_info(struct seq_file *m, void *data);
++int drm_clients_info(struct seq_file *m, void* data);
++int drm_gem_name_info(struct seq_file *m, void *data);
++
++/* drm_irq.c */
++int drm_control(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int drm_modeset_ctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++
++/* drm_auth.c */
++int drm_getmagic(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int drm_authmagic(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
++
++/* drm_sysfs.c */
++extern struct class *drm_class;
++
++struct class *drm_sysfs_create(struct module *owner, char *name);
++void drm_sysfs_destroy(void);
++struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
++int drm_sysfs_connector_add(struct drm_connector *connector);
++void drm_sysfs_connector_remove(struct drm_connector *connector);
++
++/* drm_gem.c */
++int drm_gem_init(struct drm_device *dev);
++void drm_gem_destroy(struct drm_device *dev);
++int drm_gem_handle_create_tail(struct drm_file *file_priv,
++ struct drm_gem_object *obj,
++ u32 *handlep);
++int drm_gem_close_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int drm_gem_open_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
++void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
++
++/* drm_drv.c */
++int drm_setmaster_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++struct drm_master *drm_master_create(struct drm_minor *minor);
++
++/* drm_debugfs.c */
++#if defined(CONFIG_DEBUG_FS)
++int drm_debugfs_init(struct drm_minor *minor, int minor_id,
++ struct dentry *root);
++int drm_debugfs_cleanup(struct drm_minor *minor);
++int drm_debugfs_connector_add(struct drm_connector *connector);
++void drm_debugfs_connector_remove(struct drm_connector *connector);
++#else
++static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
++ struct dentry *root)
++{
++ return 0;
++}
++
++static inline int drm_debugfs_cleanup(struct drm_minor *minor)
++{
++ return 0;
++}
++
++static inline int drm_debugfs_connector_add(struct drm_connector *connector)
++{
++ return 0;
++}
++static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
++{
++}
++#endif
+diff -Naur a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
+--- a/drivers/gpu/drm/drm_ioctl.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_ioctl.c 2015-03-26 14:42:38.726435422 +0530
+@@ -1,11 +1,3 @@
+-/**
+- * \file drm_ioctl.c
+- * IOCTL processing for DRM
+- *
+- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+- * \author Gareth Hughes <gareth@valinux.com>
+- */
+-
+ /*
+ * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
+ *
+@@ -13,6 +5,9 @@
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
++ * Author Rickard E. (Rik) Faith <faith@valinux.com>
++ * Author Gareth Hughes <gareth@valinux.com>
++ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+@@ -35,6 +30,8 @@
+
+ #include <drm/drmP.h>
+ #include <drm/drm_core.h>
++#include "drm_legacy.h"
++#include "drm_internal.h"
+
+ #include <linux/pci.h>
+ #include <linux/export.h>
+@@ -42,6 +39,9 @@
+ #include <asm/mtrr.h>
+ #endif
+
++static int drm_version(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++
+ /**
+ * Get the bus id.
+ *
+@@ -53,7 +53,7 @@
+ *
+ * Copies the bus id from drm_device::unique into user space.
+ */
+-int drm_getunique(struct drm_device *dev, void *data,
++static int drm_getunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_unique *u = data;
+@@ -72,13 +72,9 @@
+ drm_unset_busid(struct drm_device *dev,
+ struct drm_master *master)
+ {
+- kfree(dev->devname);
+- dev->devname = NULL;
+-
+ kfree(master->unique);
+ master->unique = NULL;
+ master->unique_len = 0;
+- master->unique_size = 0;
+ }
+
+ /**
+@@ -93,9 +89,10 @@
+ * Copies the bus id from userspace into drm_device::unique, and verifies that
+ * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
+ * in interface version 1.1 and will return EBUSY when setversion has requested
+- * version 1.1 or greater.
++ * version 1.1 or greater. Also note that KMS is all version 1.1 and later and
++ * UMS was only ever supported on pci devices.
+ */
+-int drm_setunique(struct drm_device *dev, void *data,
++static int drm_setunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_unique *u = data;
+@@ -108,10 +105,13 @@
+ if (!u->unique_len || u->unique_len > 1024)
+ return -EINVAL;
+
+- if (!dev->driver->bus->set_unique)
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ return 0;
++
++ if (WARN_ON(!dev->pdev))
+ return -EINVAL;
+
+- ret = dev->driver->bus->set_unique(dev, master, u);
++ ret = drm_pci_set_unique(dev, master, u);
+ if (ret)
+ goto err;
+
+@@ -130,13 +130,25 @@
+ if (master->unique != NULL)
+ drm_unset_busid(dev, master);
+
+- ret = dev->driver->bus->set_busid(dev, master);
+- if (ret)
+- goto err;
++ if (dev->driver->set_busid) {
++ ret = dev->driver->set_busid(dev, master);
++ if (ret) {
++ drm_unset_busid(dev, master);
++ return ret;
++ }
++ } else {
++ if (WARN(dev->unique == NULL,
++ "No drm_driver.set_busid() implementation provided by "
++ "%ps. Use drm_dev_set_unique() to set the unique "
++ "name explicitly.", dev->driver))
++ return -EINVAL;
++
++ master->unique = kstrdup(dev->unique, GFP_KERNEL);
++ if (master->unique)
++ master->unique_len = strlen(dev->unique);
++ }
++
+ return 0;
+-err:
+- drm_unset_busid(dev, master);
+- return ret;
+ }
+
+ /**
+@@ -152,7 +164,7 @@
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+-int drm_getmap(struct drm_device *dev, void *data,
++static int drm_getmap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_map *map = data;
+@@ -213,7 +225,7 @@
+ * Searches for the client with the specified index and copies its information
+ * into userspace
+ */
+-int drm_getclient(struct drm_device *dev, void *data,
++static int drm_getclient(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_client *client = data;
+@@ -253,7 +265,7 @@
+ *
+ * \return zero on success or a negative number on failure.
+ */
+-int drm_getstats(struct drm_device *dev, void *data,
++static int drm_getstats(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_stats *stats = data;
+@@ -267,7 +279,7 @@
+ /**
+ * Get device/driver capabilities
+ */
+-int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
++static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ {
+ struct drm_get_cap *req = data;
+
+@@ -317,7 +329,7 @@
+ /**
+ * Set device/driver capabilities
+ */
+-int
++static int
+ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ {
+ struct drm_set_client_cap *req = data;
+@@ -328,6 +340,11 @@
+ return -EINVAL;
+ file_priv->stereo_allowed = req->value;
+ break;
++ case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
++ if (req->value > 1)
++ return -EINVAL;
++ file_priv->universal_planes = req->value;
++ break;
+ default:
+ return -EINVAL;
+ }
+@@ -346,7 +363,7 @@
+ *
+ * Sets the requested interface version
+ */
+-int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
++static int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ {
+ struct drm_set_version *sv = data;
+ int if_version, retcode = 0;
+@@ -397,3 +414,358 @@
+ return 0;
+ }
+ EXPORT_SYMBOL(drm_noop);
++
++/**
++ * Copy and IOCTL return string to user space
++ */
++static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
++{
++ int len;
++
++ /* don't overflow userbuf */
++ len = strlen(value);
++ if (len > *buf_len)
++ len = *buf_len;
++
++ /* let userspace know exact length of driver value (which could be
++ * larger than the userspace-supplied buffer) */
++ *buf_len = strlen(value);
++
++ /* finally, try filling in the userbuf */
++ if (len && buf)
++ if (copy_to_user(buf, value, len))
++ return -EFAULT;
++ return 0;
++}
++
++/**
++ * Get version information
++ *
++ * \param inode device inode.
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_version structure.
++ * \return zero on success or negative number on failure.
++ *
++ * Fills in the version information in \p arg.
++ */
++static int drm_version(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_version *version = data;
++ int err;
++
++ version->version_major = dev->driver->major;
++ version->version_minor = dev->driver->minor;
++ version->version_patchlevel = dev->driver->patchlevel;
++ err = drm_copy_field(version->name, &version->name_len,
++ dev->driver->name);
++ if (!err)
++ err = drm_copy_field(version->date, &version->date_len,
++ dev->driver->date);
++ if (!err)
++ err = drm_copy_field(version->desc, &version->desc_len,
++ dev->driver->desc);
++
++ return err;
++}
++
++/**
++ * drm_ioctl_permit - Check ioctl permissions against caller
++ *
++ * @flags: ioctl permission flags.
++ * @file_priv: Pointer to struct drm_file identifying the caller.
++ *
++ * Checks whether the caller is allowed to run an ioctl with the
++ * indicated permissions. If so, returns zero. Otherwise returns an
++ * error code suitable for ioctl return.
++ */
++static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
++{
++ /* ROOT_ONLY is only for CAP_SYS_ADMIN */
++ if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
++ return -EACCES;
++
++ /* AUTH is only for authenticated or render client */
++ if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
++ !file_priv->authenticated))
++ return -EACCES;
++
++ /* MASTER is only for master or control clients */
++ if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
++ !drm_is_control_client(file_priv)))
++ return -EACCES;
++
++ /* Control clients must be explicitly allowed */
++ if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
++ drm_is_control_client(file_priv)))
++ return -EACCES;
++
++ /* Render clients must be explicitly allowed */
++ if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
++ drm_is_render_client(file_priv)))
++ return -EACCES;
++
++ return 0;
++}
++
++#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
++ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
++
++/** Ioctl table */
++static const struct drm_ioctl_desc drm_ioctls[] = {
++ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
++ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
++ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
++ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++#if __OS_HAS_AGP
++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++#endif
++
++ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++};
++
++#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
++
++/**
++ * Called whenever a process performs an ioctl on /dev/drm.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ *
++ * Looks up the ioctl function in the ::ioctls table, checking for root
++ * previleges if so required, and dispatches to the respective function.
++ */
++long drm_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ struct drm_file *file_priv = filp->private_data;
++ struct drm_device *dev;
++ const struct drm_ioctl_desc *ioctl = NULL;
++ drm_ioctl_t *func;
++ unsigned int nr = DRM_IOCTL_NR(cmd);
++ int retcode = -EINVAL;
++ char stack_kdata[128];
++ char *kdata = NULL;
++ unsigned int usize, asize;
++
++ dev = file_priv->minor->dev;
++
++ if (drm_device_is_unplugged(dev))
++ return -ENODEV;
++
++ if ((nr >= DRM_CORE_IOCTL_COUNT) &&
++ ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
++ goto err_i1;
++ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
++ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
++ u32 drv_size;
++ ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
++ drv_size = _IOC_SIZE(ioctl->cmd_drv);
++ usize = asize = _IOC_SIZE(cmd);
++ if (drv_size > asize)
++ asize = drv_size;
++ cmd = ioctl->cmd_drv;
++ }
++ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
++ u32 drv_size;
++
++ ioctl = &drm_ioctls[nr];
++
++ drv_size = _IOC_SIZE(ioctl->cmd);
++ usize = asize = _IOC_SIZE(cmd);
++ if (drv_size > asize)
++ asize = drv_size;
++
++ cmd = ioctl->cmd;
++ } else
++ goto err_i1;
++
++ DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
++ task_pid_nr(current),
++ (long)old_encode_dev(file_priv->minor->kdev->devt),
++ file_priv->authenticated, ioctl->name);
++
++ /* Do not trust userspace, use our own definition */
++ func = ioctl->func;
++
++ if (unlikely(!func)) {
++ DRM_DEBUG("no function\n");
++ retcode = -EINVAL;
++ goto err_i1;
++ }
++
++ retcode = drm_ioctl_permit(ioctl->flags, file_priv);
++ if (unlikely(retcode))
++ goto err_i1;
++
++ if (cmd & (IOC_IN | IOC_OUT)) {
++ if (asize <= sizeof(stack_kdata)) {
++ kdata = stack_kdata;
++ } else {
++ kdata = kmalloc(asize, GFP_KERNEL);
++ if (!kdata) {
++ retcode = -ENOMEM;
++ goto err_i1;
++ }
++ }
++ if (asize > usize)
++ memset(kdata + usize, 0, asize - usize);
++ }
++
++ if (cmd & IOC_IN) {
++ if (copy_from_user(kdata, (void __user *)arg,
++ usize) != 0) {
++ retcode = -EFAULT;
++ goto err_i1;
++ }
++ } else if (cmd & IOC_OUT) {
++ memset(kdata, 0, usize);
++ }
++
++ if (ioctl->flags & DRM_UNLOCKED)
++ retcode = func(dev, kdata, file_priv);
++ else {
++ mutex_lock(&drm_global_mutex);
++ retcode = func(dev, kdata, file_priv);
++ mutex_unlock(&drm_global_mutex);
++ }
++
++ if (cmd & IOC_OUT) {
++ if (copy_to_user((void __user *)arg, kdata,
++ usize) != 0)
++ retcode = -EFAULT;
++ }
++
++ err_i1:
++ if (!ioctl)
++ DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
++ task_pid_nr(current),
++ (long)old_encode_dev(file_priv->minor->kdev->devt),
++ file_priv->authenticated, cmd, nr);
++
++ if (kdata != stack_kdata)
++ kfree(kdata);
++ if (retcode)
++ DRM_DEBUG("ret = %d\n", retcode);
++ return retcode;
++}
++EXPORT_SYMBOL(drm_ioctl);
++
++/**
++ * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
++ *
++ * @nr: Ioctl number.
++ * @flags: Where to return the ioctl permission flags
++ */
++bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
++{
++ if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
++ (nr < DRM_COMMAND_BASE)) {
++ *flags = drm_ioctls[nr].flags;
++ return true;
++ }
++
++ return false;
++}
++EXPORT_SYMBOL(drm_ioctl_flags);
+diff -Naur a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+--- a/drivers/gpu/drm/drm_irq.c 2015-03-26 14:43:30.426436436 +0530
++++ b/drivers/gpu/drm/drm_irq.c 2015-03-26 14:42:38.730435422 +0530
+@@ -1,6 +1,5 @@
+-/**
+- * \file drm_irq.c
+- * IRQ support
++/*
++ * drm_irq.c IRQ and vblank support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+@@ -35,6 +34,7 @@
+
+ #include <drm/drmP.h>
+ #include "drm_trace.h"
++#include "drm_internal.h"
+
+ #include <linux/interrupt.h> /* For task queue support */
+ #include <linux/slab.h>
+@@ -56,39 +56,91 @@
+ */
+ #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
++static bool
++drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
++ struct timeval *tvblank, unsigned flags);
++
++static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
++
++/*
++ * Default to use monotonic timestamps for wait-for-vblank and page-flip
++ * complete events.
++ */
++unsigned int drm_timestamp_monotonic = 1;
++
++static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
++
++module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
++module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
++module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
++
+ /**
+- * Get interrupt from bus id.
++ * drm_update_vblank_count - update the master vblank counter
++ * @dev: DRM device
++ * @crtc: counter to update
+ *
+- * \param inode device inode.
+- * \param file_priv DRM file private.
+- * \param cmd command.
+- * \param arg user argument, pointing to a drm_irq_busid structure.
+- * \return zero on success or a negative number on failure.
++ * Call back into the driver to update the appropriate vblank counter
++ * (specified by @crtc). Deal with wraparound, if it occurred, and
++ * update the last read value so we can deal with wraparound on the next
++ * call if necessary.
++ *
++ * Only necessary when going from off->on, to account for frames we
++ * didn't get an interrupt for.
+ *
+- * Finds the PCI device with the specified bus id and gets its IRQ number.
+- * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+- * to that of the device that this DRM instance attached to.
++ * Note: caller must hold dev->vbl_lock since this reads & writes
++ * device vblank fields.
+ */
+-int drm_irq_by_busid(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+ {
+- struct drm_irq_busid *p = data;
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
++ u32 cur_vblank, diff, tslot;
++ bool rc;
++ struct timeval t_vblank;
+
+- if (!dev->driver->bus->irq_by_busid)
+- return -EINVAL;
++ /*
++ * Interrupts were disabled prior to this call, so deal with counter
++ * wrap if needed.
++ * NOTE! It's possible we lost a full dev->max_vblank_count events
++ * here if the register is small or we had vblank interrupts off for
++ * a long time.
++ *
++ * We repeat the hardware vblank counter & timestamp query until
++ * we get consistent results. This to prevent races between gpu
++ * updating its hardware counter while we are retrieving the
++ * corresponding vblank timestamp.
++ */
++ do {
++ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
++ rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
++ } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+- return -EINVAL;
++ /* Deal with counter wrap */
++ diff = cur_vblank - vblank->last;
++ if (cur_vblank < vblank->last) {
++ diff += dev->max_vblank_count;
+
+- return dev->driver->bus->irq_by_busid(dev, p);
+-}
++ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
++ crtc, vblank->last, cur_vblank, diff);
++ }
+
+-/*
+- * Clear vblank timestamp buffer for a crtc.
+- */
+-static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
+-{
+- memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
++ DRM_DEBUG("updating vblank count on crtc %d, missed %d\n",
++ crtc, diff);
++
++ if (diff == 0)
++ return;
++
++ /* Reinitialize corresponding vblank timestamp if high-precision query
++ * available. Skip this step if query unsupported or failed. Will
++ * reinitialize delayed at next vblank interrupt in that case.
++ */
++ if (rc) {
++ tslot = atomic_read(&vblank->count) + diff;
++ vblanktimestamp(dev, crtc, tslot) = t_vblank;
++ }
++
++ smp_mb__before_atomic_inc();
++ atomic_add(diff, &vblank->count);
++ smp_mb__after_atomic_inc();
+ }
+
+ /*
+@@ -99,10 +151,11 @@
+ */
+ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+ {
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ unsigned long irqflags;
+ u32 vblcount;
+ s64 diff_ns;
+- int vblrc;
++ bool vblrc;
+ struct timeval tvblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
+
+@@ -112,8 +165,28 @@
+ */
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
++ /*
++ * If the vblank interrupt was already disabled update the count
++ * and timestamp to maintain the appearance that the counter
++ * has been ticking all along until this time. This makes the
++ * count account for the entire time between drm_vblank_on() and
++ * drm_vblank_off().
++ *
++ * But only do this if precise vblank timestamps are available.
++ * Otherwise we might read a totally bogus timestamp since drivers
++ * lacking precise timestamp support rely upon sampling the system clock
++ * at vblank interrupt time. Which obviously won't work out well if the
++ * vblank interrupt is disabled.
++ */
++ if (!vblank->enabled &&
++ drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) {
++ drm_update_vblank_count(dev, crtc);
++ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
++ return;
++ }
++
+ dev->driver->disable_vblank(dev, crtc);
+- dev->vblank[crtc].enabled = false;
++ vblank->enabled = false;
+
+ /* No further vblank irq's will be processed after
+ * this point. Get current hardware vblank count and
+@@ -128,9 +201,9 @@
+ * delayed gpu counter increment.
+ */
+ do {
+- dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
++ vblank->last = dev->driver->get_vblank_counter(dev, crtc);
+ vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+- } while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
++ } while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+ if (!count)
+ vblrc = 0;
+@@ -138,7 +211,7 @@
+ /* Compute time difference to stored timestamp of last vblank
+ * as updated by last invocation of drm_handle_vblank() in vblank irq.
+ */
+- vblcount = atomic_read(&dev->vblank[crtc].count);
++ vblcount = atomic_read(&vblank->count);
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+@@ -154,46 +227,63 @@
+ * available. In that case we can't account for this and just
+ * hope for the best.
+ */
+- if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
+- atomic_inc(&dev->vblank[crtc].count);
++ if (vblrc && (abs64(diff_ns) > 1000000)) {
++ /* Store new timestamp in ringbuffer. */
++ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
++
++ /* Increment cooked vblank count. This also atomically commits
++ * the timestamp computed above.
++ */
++ smp_mb__before_atomic_inc();
++ atomic_inc(&vblank->count);
+ smp_mb__after_atomic_inc();
+ }
+
+- /* Invalidate all timestamps while vblank irq's are off. */
+- clear_vblank_timestamps(dev, crtc);
+-
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ }
+
+ static void vblank_disable_fn(unsigned long arg)
+ {
+- struct drm_device *dev = (struct drm_device *)arg;
++ struct drm_vblank_crtc *vblank = (void *)arg;
++ struct drm_device *dev = vblank->dev;
+ unsigned long irqflags;
+- int i;
++ int crtc = vblank->crtc;
+
+ if (!dev->vblank_disable_allowed)
+ return;
+
+- for (i = 0; i < dev->num_crtcs; i++) {
+- spin_lock_irqsave(&dev->vbl_lock, irqflags);
+- if (atomic_read(&dev->vblank[i].refcount) == 0 &&
+- dev->vblank[i].enabled) {
+- DRM_DEBUG("disabling vblank on crtc %d\n", i);
+- vblank_disable_and_save(dev, i);
+- }
+- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++ spin_lock_irqsave(&dev->vbl_lock, irqflags);
++ if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
++ DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
++ vblank_disable_and_save(dev, crtc);
+ }
++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ }
+
++/**
++ * drm_vblank_cleanup - cleanup vblank support
++ * @dev: DRM device
++ *
++ * This function cleans up any resources allocated in drm_vblank_init.
++ */
+ void drm_vblank_cleanup(struct drm_device *dev)
+ {
++ int crtc;
++ unsigned long irqflags;
++
+ /* Bail if the driver didn't call drm_vblank_init() */
+ if (dev->num_crtcs == 0)
+ return;
+
+- del_timer_sync(&dev->vblank_disable_timer);
++ for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+
+- vblank_disable_fn((unsigned long)dev);
++ del_timer_sync(&vblank->disable_timer);
++
++ spin_lock_irqsave(&dev->vbl_lock, irqflags);
++ vblank_disable_and_save(dev, crtc);
++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++ }
+
+ kfree(dev->vblank);
+
+@@ -201,12 +291,20 @@
+ }
+ EXPORT_SYMBOL(drm_vblank_cleanup);
+
++/**
++ * drm_vblank_init - initialize vblank support
++ * @dev: drm_device
++ * @num_crtcs: number of crtcs supported by @dev
++ *
++ * This function initializes vblank support for @num_crtcs display pipelines.
++ *
++ * Returns:
++ * Zero on success or a negative error code on failure.
++ */
+ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+ {
+ int i, ret = -ENOMEM;
+
+- setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
+- (unsigned long)dev);
+ spin_lock_init(&dev->vbl_lock);
+ spin_lock_init(&dev->vblank_time_lock);
+
+@@ -216,8 +314,15 @@
+ if (!dev->vblank)
+ goto err;
+
+- for (i = 0; i < num_crtcs; i++)
+- init_waitqueue_head(&dev->vblank[i].queue);
++ for (i = 0; i < num_crtcs; i++) {
++ struct drm_vblank_crtc *vblank = &dev->vblank[i];
++
++ vblank->dev = dev;
++ vblank->crtc = i;
++ init_waitqueue_head(&vblank->queue);
++ setup_timer(&vblank->disable_timer, vblank_disable_fn,
++ (unsigned long)vblank);
++ }
+
+ DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
+
+@@ -232,7 +337,7 @@
+ return 0;
+
+ err:
+- drm_vblank_cleanup(dev);
++ dev->num_crtcs = 0;
+ return ret;
+ }
+ EXPORT_SYMBOL(drm_vblank_init);
+@@ -261,42 +366,42 @@
+ }
+
+ /**
+- * Install IRQ handler.
+- *
+- * \param dev DRM device.
++ * drm_irq_install - install IRQ handler
++ * @dev: DRM device
++ * @irq: IRQ number to install the handler for
+ *
+ * Initializes the IRQ related data. Installs the handler, calling the driver
+- * \c irq_preinstall() and \c irq_postinstall() functions
+- * before and after the installation.
++ * irq_preinstall() and irq_postinstall() functions before and after the
++ * installation.
++ *
++ * This is the simplified helper interface provided for drivers with no special
++ * needs. Drivers which need to install interrupt handlers for multiple
++ * interrupts must instead set drm_device->irq_enabled to signal the DRM core
++ * that vblank interrupts are available.
++ *
++ * Returns:
++ * Zero on success or a negative error code on failure.
+ */
+-int drm_irq_install(struct drm_device *dev)
++int drm_irq_install(struct drm_device *dev, int irq)
+ {
+ int ret;
+ unsigned long sh_flags = 0;
+- char *irqname;
+
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return -EINVAL;
+
+- if (drm_dev_to_irq(dev) == 0)
++ if (irq == 0)
+ return -EINVAL;
+
+- mutex_lock(&dev->struct_mutex);
+-
+ /* Driver must have been initialized */
+- if (!dev->dev_private) {
+- mutex_unlock(&dev->struct_mutex);
++ if (!dev->dev_private)
+ return -EINVAL;
+- }
+
+- if (dev->irq_enabled) {
+- mutex_unlock(&dev->struct_mutex);
++ if (dev->irq_enabled)
+ return -EBUSY;
+- }
+ dev->irq_enabled = true;
+- mutex_unlock(&dev->struct_mutex);
+
+- DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
++ DRM_DEBUG("irq=%d\n", irq);
+
+ /* Before installing handler */
+ if (dev->driver->irq_preinstall)
+@@ -306,18 +411,11 @@
+ if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
+ sh_flags = IRQF_SHARED;
+
+- if (dev->devname)
+- irqname = dev->devname;
+- else
+- irqname = dev->driver->name;
+-
+- ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
+- sh_flags, irqname, dev);
++ ret = request_irq(irq, dev->driver->irq_handler,
++ sh_flags, dev->driver->name, dev);
+
+ if (ret < 0) {
+- mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = false;
+- mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+@@ -329,12 +427,12 @@
+ ret = dev->driver->irq_postinstall(dev);
+
+ if (ret < 0) {
+- mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = false;
+- mutex_unlock(&dev->struct_mutex);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+- free_irq(drm_dev_to_irq(dev), dev);
++ free_irq(irq, dev);
++ } else {
++ dev->irq = irq;
+ }
+
+ return ret;
+@@ -342,11 +440,20 @@
+ EXPORT_SYMBOL(drm_irq_install);
+
+ /**
+- * Uninstall the IRQ handler.
++ * drm_irq_uninstall - uninstall the IRQ handler
++ * @dev: DRM device
+ *
+- * \param dev DRM device.
++ * Calls the driver's irq_uninstall() function and unregisters the IRQ handler.
++ * This should only be called by drivers which used drm_irq_install() to set up
++ * their interrupt handler. Other drivers must only reset
++ * drm_device->irq_enabled to false.
++ *
++ * Note that for kernel modesetting drivers it is a bug if this function fails.
++ * The sanity checks are only to catch buggy user modesetting drivers which call
++ * the same function through an ioctl.
+ *
+- * Calls the driver's \c irq_uninstall() function, and stops the irq.
++ * Returns:
++ * Zero on success or a negative error code on failure.
+ */
+ int drm_irq_uninstall(struct drm_device *dev)
+ {
+@@ -357,10 +464,8 @@
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return -EINVAL;
+
+- mutex_lock(&dev->struct_mutex);
+ irq_enabled = dev->irq_enabled;
+ dev->irq_enabled = false;
+- mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * Wake up any waiters so they don't hang.
+@@ -368,9 +473,11 @@
+ if (dev->num_crtcs) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for (i = 0; i < dev->num_crtcs; i++) {
+- wake_up(&dev->vblank[i].queue);
+- dev->vblank[i].enabled = false;
+- dev->vblank[i].last =
++ struct drm_vblank_crtc *vblank = &dev->vblank[i];
++
++ wake_up(&vblank->queue);
++ vblank->enabled = false;
++ vblank->last =
+ dev->driver->get_vblank_counter(dev, i);
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+@@ -379,7 +486,7 @@
+ if (!irq_enabled)
+ return -EINVAL;
+
+- DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
++ DRM_DEBUG("irq=%d\n", dev->irq);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+@@ -387,13 +494,13 @@
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+
+- free_irq(drm_dev_to_irq(dev), dev);
++ free_irq(dev->irq, dev);
+
+ return 0;
+ }
+ EXPORT_SYMBOL(drm_irq_uninstall);
+
+-/**
++/*
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+@@ -408,43 +515,52 @@
+ struct drm_file *file_priv)
+ {
+ struct drm_control *ctl = data;
++ int ret = 0, irq;
+
+ /* if we haven't irq we fallback for compatibility reasons -
+ * this used to be a separate function in drm_dma.h
+ */
+
++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++ return 0;
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ return 0;
++ /* UMS was only ever support on pci devices. */
++ if (WARN_ON(!dev->pdev))
++ return -EINVAL;
+
+ switch (ctl->func) {
+ case DRM_INST_HANDLER:
+- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+- return 0;
+- if (drm_core_check_feature(dev, DRIVER_MODESET))
+- return 0;
++ irq = dev->pdev->irq;
++
+ if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+- ctl->irq != drm_dev_to_irq(dev))
++ ctl->irq != irq)
+ return -EINVAL;
+- return drm_irq_install(dev);
++ mutex_lock(&dev->struct_mutex);
++ ret = drm_irq_install(dev, irq);
++ mutex_unlock(&dev->struct_mutex);
++
++ return ret;
+ case DRM_UNINST_HANDLER:
+- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+- return 0;
+- if (drm_core_check_feature(dev, DRIVER_MODESET))
+- return 0;
+- return drm_irq_uninstall(dev);
++ mutex_lock(&dev->struct_mutex);
++ ret = drm_irq_uninstall(dev);
++ mutex_unlock(&dev->struct_mutex);
++
++ return ret;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /**
+- * drm_calc_timestamping_constants - Calculate vblank timestamp constants
+- *
+- * @crtc drm_crtc whose timestamp constants should be updated.
+- * @mode display mode containing the scanout timings
++ * drm_calc_timestamping_constants - calculate vblank timestamp constants
++ * @crtc: drm_crtc whose timestamp constants should be updated.
++ * @mode: display mode containing the scanout timings
+ *
+ * Calculate and store various constants which are later
+ * needed by vblank and swap-completion timestamping, e.g,
+ * by drm_calc_vbltimestamp_from_scanoutpos(). They are
+- * derived from crtc's true scanout timing, so they take
++ * derived from CRTC's true scanout timing, so they take
+ * things like panel scaling or other adjustments into account.
+ */
+ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+@@ -489,11 +605,22 @@
+ EXPORT_SYMBOL(drm_calc_timestamping_constants);
+
+ /**
+- * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
+- * drivers. Implements calculation of exact vblank timestamps from
+- * given drm_display_mode timings and current video scanout position
+- * of a crtc. This can be called from within get_vblank_timestamp()
+- * implementation of a kms driver to implement the actual timestamping.
++ * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
++ * @dev: DRM device
++ * @crtc: Which CRTC's vblank timestamp to retrieve
++ * @max_error: Desired maximum allowable error in timestamps (nanosecs)
++ * On return contains true maximum error of timestamp
++ * @vblank_time: Pointer to struct timeval which should receive the timestamp
++ * @flags: Flags to pass to driver:
++ * 0 = Default,
++ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
++ * @refcrtc: CRTC which defines scanout timing
++ * @mode: mode which defines the scanout timings
++ *
++ * Implements calculation of exact vblank timestamps from given drm_display_mode
++ * timings and current video scanout position of a CRTC. This can be called from
++ * within get_vblank_timestamp() implementation of a kms driver to implement the
++ * actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+@@ -508,21 +635,11 @@
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+- * @dev: DRM device.
+- * @crtc: Which crtc's vblank timestamp to retrieve.
+- * @max_error: Desired maximum allowable error in timestamps (nanosecs).
+- * On return contains true maximum error of timestamp.
+- * @vblank_time: Pointer to struct timeval which should receive the timestamp.
+- * @flags: Flags to pass to driver:
+- * 0 = Default.
+- * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+- * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+- * @mode: mode which defines the scanout timings
+- *
+- * Returns negative value on error, failure or if not supported in current
++ * Returns:
++ * Negative value on error, failure or if not supported in current
+ * video mode:
+ *
+- * -EINVAL - Invalid crtc.
++ * -EINVAL - Invalid CRTC.
+ * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO - Failed, e.g., due to failed scanout position query.
+@@ -540,8 +657,8 @@
+ const struct drm_crtc *refcrtc,
+ const struct drm_display_mode *mode)
+ {
+- ktime_t stime, etime, mono_time_offset;
+ struct timeval tv_etime;
++ ktime_t stime, etime;
+ int vbl_status;
+ int vpos, hpos, i;
+ int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+@@ -586,13 +703,6 @@
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
+ &hpos, &stime, &etime);
+
+- /*
+- * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
+- * CLOCK_REALTIME is requested.
+- */
+- if (!drm_timestamp_monotonic)
+- mono_time_offset = ktime_get_monotonic_offset();
+-
+ /* Return as no-op if scanout query unsupported or failed. */
+ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+ DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+@@ -622,7 +732,7 @@
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+- invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
++ invbl = vbl_status & DRM_SCANOUTPOS_IN_VBLANK;
+
+ /* Convert scanout position into elapsed time at raw_time query
+ * since start of scanout at first display scanline. delta_ns
+@@ -631,7 +741,7 @@
+ delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
+
+ if (!drm_timestamp_monotonic)
+- etime = ktime_sub(etime, mono_time_offset);
++ etime = ktime_mono_to_real(etime);
+
+ /* save this only for debugging purposes */
+ tv_etime = ktime_to_timeval(etime);
+@@ -652,7 +762,7 @@
+
+ vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+ if (invbl)
+- vbl_status |= DRM_VBLANKTIME_INVBL;
++ vbl_status |= DRM_VBLANKTIME_IN_VBLANK;
+
+ return vbl_status;
+ }
+@@ -662,35 +772,33 @@
+ {
+ ktime_t now;
+
+- now = ktime_get();
+- if (!drm_timestamp_monotonic)
+- now = ktime_sub(now, ktime_get_monotonic_offset());
+-
++ now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
+ return ktime_to_timeval(now);
+ }
+
+ /**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+- * vblank interval.
+- *
++ * vblank interval
+ * @dev: DRM device
+- * @crtc: which crtc's vblank timestamp to retrieve
++ * @crtc: which CRTC's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+- * 0 = Default.
+- * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
++ * 0 = Default,
++ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+- * vblank interval on specified crtc. May call into kms-driver to
++ * vblank interval on specified CRTC. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+- * Returns non-zero if timestamp is considered to be very precise.
++ * Returns:
++ * True if timestamp is considered to be very precise, false otherwise.
+ */
+-u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+- struct timeval *tvblank, unsigned flags)
++static bool
++drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
++ struct timeval *tvblank, unsigned flags)
+ {
+ int ret;
+
+@@ -702,7 +810,7 @@
+ ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+ tvblank, flags);
+ if (ret > 0)
+- return (u32) ret;
++ return true;
+ }
+
+ /* GPU high precision timestamp query unsupported or failed.
+@@ -710,9 +818,8 @@
+ */
+ *tvblank = get_drm_timestamp();
+
+- return 0;
++ return false;
+ }
+-EXPORT_SYMBOL(drm_get_last_vbltimestamp);
+
+ /**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+@@ -722,10 +829,17 @@
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
++ *
++ * Returns:
++ * The software vblank counter.
+ */
+ u32 drm_vblank_count(struct drm_device *dev, int crtc)
+ {
+- return atomic_read(&dev->vblank[crtc].count);
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
++
++ if (WARN_ON(crtc >= dev->num_crtcs))
++ return 0;
++ return atomic_read(&vblank->count);
+ }
+ EXPORT_SYMBOL(drm_vblank_count);
+
+@@ -740,24 +854,27 @@
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+- * of the vblank interval that corresponds to the current value vblank counter
+- * value.
++ * of the vblank interval that corresponds to the current vblank counter value.
+ */
+ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime)
+ {
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ u32 cur_vblank;
+
++ if (WARN_ON(crtc >= dev->num_crtcs))
++ return 0;
++
+ /* Read timestamp from slot of _vblank_time ringbuffer
+ * that corresponds to current vblank count. Retry if
+ * count has incremented during readout. This works like
+ * a seqlock.
+ */
+ do {
+- cur_vblank = atomic_read(&dev->vblank[crtc].count);
++ cur_vblank = atomic_read(&vblank->count);
+ *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+ smp_rmb();
+- } while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
++ } while (cur_vblank != atomic_read(&vblank->count));
+
+ return cur_vblank;
+ }
+@@ -806,67 +923,40 @@
+ EXPORT_SYMBOL(drm_send_vblank_event);
+
+ /**
+- * drm_update_vblank_count - update the master vblank counter
++ * drm_vblank_enable - enable the vblank interrupt on a CRTC
+ * @dev: DRM device
+- * @crtc: counter to update
+- *
+- * Call back into the driver to update the appropriate vblank counter
+- * (specified by @crtc). Deal with wraparound, if it occurred, and
+- * update the last read value so we can deal with wraparound on the next
+- * call if necessary.
+- *
+- * Only necessary when going from off->on, to account for frames we
+- * didn't get an interrupt for.
+- *
+- * Note: caller must hold dev->vbl_lock since this reads & writes
+- * device vblank fields.
++ * @crtc: CRTC in question
+ */
+-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
++static int drm_vblank_enable(struct drm_device *dev, int crtc)
+ {
+- u32 cur_vblank, diff, tslot, rc;
+- struct timeval t_vblank;
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
++ int ret = 0;
+
+- /*
+- * Interrupts were disabled prior to this call, so deal with counter
+- * wrap if needed.
+- * NOTE! It's possible we lost a full dev->max_vblank_count events
+- * here if the register is small or we had vblank interrupts off for
+- * a long time.
+- *
+- * We repeat the hardware vblank counter & timestamp query until
+- * we get consistent results. This to prevent races between gpu
+- * updating its hardware counter while we are retrieving the
+- * corresponding vblank timestamp.
+- */
+- do {
+- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+- rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+- } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
++ assert_spin_locked(&dev->vbl_lock);
+
+- /* Deal with counter wrap */
+- diff = cur_vblank - dev->vblank[crtc].last;
+- if (cur_vblank < dev->vblank[crtc].last) {
+- diff += dev->max_vblank_count;
++ spin_lock(&dev->vblank_time_lock);
+
+- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+- crtc, dev->vblank[crtc].last, cur_vblank, diff);
++ if (!vblank->enabled) {
++ /*
++ * Enable vblank irqs under vblank_time_lock protection.
++ * All vblank count & timestamp updates are held off
++ * until we are done reinitializing master counter and
++ * timestamps. Filtercode in drm_handle_vblank() will
++ * prevent double-accounting of same vblank interval.
++ */
++ ret = dev->driver->enable_vblank(dev, crtc);
++ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
++ if (ret)
++ atomic_dec(&vblank->refcount);
++ else {
++ vblank->enabled = true;
++ drm_update_vblank_count(dev, crtc);
++ }
+ }
+
+- DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+- crtc, diff);
++ spin_unlock(&dev->vblank_time_lock);
+
+- /* Reinitialize corresponding vblank timestamp if high-precision query
+- * available. Skip this step if query unsupported or failed. Will
+- * reinitialize delayed at next vblank interrupt in that case.
+- */
+- if (rc) {
+- tslot = atomic_read(&dev->vblank[crtc].count) + diff;
+- vblanktimestamp(dev, crtc, tslot) = t_vblank;
+- }
+-
+- smp_mb__before_atomic_inc();
+- atomic_add(diff, &dev->vblank[crtc].count);
+- smp_mb__after_atomic_inc();
++ return ret;
+ }
+
+ /**
+@@ -877,39 +967,27 @@
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+- * RETURNS
++ * This is the legacy version of drm_crtc_vblank_get().
++ *
++ * Returns:
+ * Zero on success, nonzero on failure.
+ */
+ int drm_vblank_get(struct drm_device *dev, int crtc)
+ {
+- unsigned long irqflags, irqflags2;
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
++ unsigned long irqflags;
+ int ret = 0;
+
++ if (WARN_ON(crtc >= dev->num_crtcs))
++ return -EINVAL;
++
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ /* Going from 0->1 means we have to enable interrupts again */
+- if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
+- spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
+- if (!dev->vblank[crtc].enabled) {
+- /* Enable vblank irqs under vblank_time_lock protection.
+- * All vblank count & timestamp updates are held off
+- * until we are done reinitializing master counter and
+- * timestamps. Filtercode in drm_handle_vblank() will
+- * prevent double-accounting of same vblank interval.
+- */
+- ret = dev->driver->enable_vblank(dev, crtc);
+- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
+- crtc, ret);
+- if (ret)
+- atomic_dec(&dev->vblank[crtc].refcount);
+- else {
+- dev->vblank[crtc].enabled = true;
+- drm_update_vblank_count(dev, crtc);
+- }
+- }
+- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
++ if (atomic_add_return(1, &vblank->refcount) == 1) {
++ ret = drm_vblank_enable(dev, crtc);
+ } else {
+- if (!dev->vblank[crtc].enabled) {
+- atomic_dec(&dev->vblank[crtc].refcount);
++ if (!vblank->enabled) {
++ atomic_dec(&vblank->refcount);
+ ret = -EINVAL;
+ }
+ }
+@@ -920,47 +998,159 @@
+ EXPORT_SYMBOL(drm_vblank_get);
+
+ /**
++ * drm_crtc_vblank_get - get a reference count on vblank events
++ * @crtc: which CRTC to own
++ *
++ * Acquire a reference count on vblank events to avoid having them disabled
++ * while in use.
++ *
++ * This is the native kms version of drm_vblank_off().
++ *
++ * Returns:
++ * Zero on success, nonzero on failure.
++ */
++int drm_crtc_vblank_get(struct drm_crtc *crtc)
++{
++ return drm_vblank_get(crtc->dev, drm_crtc_index(crtc));
++}
++EXPORT_SYMBOL(drm_crtc_vblank_get);
++
++/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
++ *
++ * This is the legacy version of drm_crtc_vblank_put().
+ */
+ void drm_vblank_put(struct drm_device *dev, int crtc)
+ {
+- BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
++
++ if (WARN_ON(atomic_read(&vblank->refcount) == 0))
++ return;
++
++ if (WARN_ON(crtc >= dev->num_crtcs))
++ return;
+
+ /* Last user schedules interrupt disable */
+- if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
+- (drm_vblank_offdelay > 0))
+- mod_timer(&dev->vblank_disable_timer,
+- jiffies + ((drm_vblank_offdelay * HZ)/1000));
++ if (atomic_dec_and_test(&vblank->refcount)) {
++ if (drm_vblank_offdelay == 0)
++ return;
++ else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0)
++ vblank_disable_fn((unsigned long)vblank);
++ else
++ mod_timer(&vblank->disable_timer,
++ jiffies + ((drm_vblank_offdelay * HZ)/1000));
++ }
+ }
+ EXPORT_SYMBOL(drm_vblank_put);
+
+ /**
++ * drm_crtc_vblank_put - give up ownership of vblank events
++ * @crtc: which counter to give up
++ *
++ * Release ownership of a given vblank counter, turning off interrupts
++ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
++ *
++ * This is the native kms version of drm_vblank_put().
++ */
++void drm_crtc_vblank_put(struct drm_crtc *crtc)
++{
++ drm_vblank_put(crtc->dev, drm_crtc_index(crtc));
++}
++EXPORT_SYMBOL(drm_crtc_vblank_put);
++
++/**
++ * drm_wait_one_vblank - wait for one vblank
++ * @dev: DRM device
++ * @crtc: crtc index
++ *
++ * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
++ * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
++ * due to lack of driver support or because the crtc is off.
++ */
++void drm_wait_one_vblank(struct drm_device *dev, int crtc)
++{
++ int ret;
++ u32 last;
++
++ ret = drm_vblank_get(dev, crtc);
++ if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret))
++ return;
++
++ last = drm_vblank_count(dev, crtc);
++
++ ret = wait_event_timeout(dev->vblank[crtc].queue,
++ last != drm_vblank_count(dev, crtc),
++ msecs_to_jiffies(100));
++
++ WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc);
++
++ drm_vblank_put(dev, crtc);
++}
++EXPORT_SYMBOL(drm_wait_one_vblank);
++
++/**
++ * drm_crtc_wait_one_vblank - wait for one vblank
++ * @crtc: DRM crtc
++ *
++ * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
++ * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
++ * due to lack of driver support or because the crtc is off.
++ */
++void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
++{
++ drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc));
++}
++EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
++
++/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+- * Caller must hold event lock.
++ * Drivers can use this function to shut down the vblank interrupt handling when
++ * disabling a crtc. This function ensures that the latest vblank frame count is
++ * stored so that drm_vblank_on() can restore it again.
++ *
++ * Drivers must use this function when the hardware vblank counter can get
++ * reset, e.g. when suspending.
++ *
++ * This is the legacy version of drm_crtc_vblank_off().
+ */
+ void drm_vblank_off(struct drm_device *dev, int crtc)
+ {
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long irqflags;
+ unsigned int seq;
+
+- spin_lock_irqsave(&dev->vbl_lock, irqflags);
++ if (WARN_ON(crtc >= dev->num_crtcs))
++ return;
++
++ spin_lock_irqsave(&dev->event_lock, irqflags);
++
++ spin_lock(&dev->vbl_lock);
+ vblank_disable_and_save(dev, crtc);
+- wake_up(&dev->vblank[crtc].queue);
++ wake_up(&vblank->queue);
++
++ /*
++ * Prevent subsequent drm_vblank_get() from re-enabling
++ * the vblank interrupt by bumping the refcount.
++ */
++ if (!vblank->inmodeset) {
++ atomic_inc(&vblank->refcount);
++ vblank->inmodeset = 1;
++ }
++ spin_unlock(&dev->vbl_lock);
+
+ /* Send any queued vblank events, lest the natives grow disquiet */
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+- spin_lock(&dev->event_lock);
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+@@ -971,11 +1161,93 @@
+ drm_vblank_put(dev, e->pipe);
+ send_vblank_event(dev, e, seq, &now);
+ }
+- spin_unlock(&dev->event_lock);
++ spin_unlock_irqrestore(&dev->event_lock, irqflags);
++}
++EXPORT_SYMBOL(drm_vblank_off);
++
++/**
++ * drm_crtc_vblank_off - disable vblank events on a CRTC
++ * @crtc: CRTC in question
++ *
++ * Drivers can use this function to shut down the vblank interrupt handling when
++ * disabling a crtc. This function ensures that the latest vblank frame count is
++ * stored so that drm_vblank_on can restore it again.
++ *
++ * Drivers must use this function when the hardware vblank counter can get
++ * reset, e.g. when suspending.
++ *
++ * This is the native kms version of drm_vblank_off().
++ */
++void drm_crtc_vblank_off(struct drm_crtc *crtc)
++{
++ drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
++}
++EXPORT_SYMBOL(drm_crtc_vblank_off);
++
++/**
++ * drm_vblank_on - enable vblank events on a CRTC
++ * @dev: DRM device
++ * @crtc: CRTC in question
++ *
++ * This functions restores the vblank interrupt state captured with
++ * drm_vblank_off() again. Note that calls to drm_vblank_on() and
++ * drm_vblank_off() can be unbalanced and so can also be unconditionally called
++ * in driver load code to reflect the current hardware state of the crtc.
++ *
++ * This is the legacy version of drm_crtc_vblank_on().
++ */
++void drm_vblank_on(struct drm_device *dev, int crtc)
++{
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
++ unsigned long irqflags;
++
++ if (WARN_ON(crtc >= dev->num_crtcs))
++ return;
++
++ spin_lock_irqsave(&dev->vbl_lock, irqflags);
++ /* Drop our private "prevent drm_vblank_get" refcount */
++ if (vblank->inmodeset) {
++ atomic_dec(&vblank->refcount);
++ vblank->inmodeset = 0;
++ }
+
++ /*
++ * sample the current counter to avoid random jumps
++ * when drm_vblank_enable() applies the diff
++ *
++ * -1 to make sure user will never see the same
++ * vblank counter value before and after a modeset
++ */
++ vblank->last =
++ (dev->driver->get_vblank_counter(dev, crtc) - 1) &
++ dev->max_vblank_count;
++ /*
++ * re-enable interrupts if there are users left, or the
++ * user wishes vblank interrupts to be enabled all the time.
++ */
++ if (atomic_read(&vblank->refcount) != 0 ||
++ (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
++ WARN_ON(drm_vblank_enable(dev, crtc));
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ }
+-EXPORT_SYMBOL(drm_vblank_off);
++EXPORT_SYMBOL(drm_vblank_on);
++
++/**
++ * drm_crtc_vblank_on - enable vblank events on a CRTC
++ * @crtc: CRTC in question
++ *
++ * This functions restores the vblank interrupt state captured with
++ * drm_vblank_off() again. Note that calls to drm_vblank_on() and
++ * drm_vblank_off() can be unbalanced and so can also be unconditionally called
++ * in driver load code to reflect the current hardware state of the crtc.
++ *
++ * This is the native kms version of drm_vblank_on().
++ */
++void drm_crtc_vblank_on(struct drm_crtc *crtc)
++{
++ drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
++}
++EXPORT_SYMBOL(drm_crtc_vblank_on);
+
+ /**
+ * drm_vblank_pre_modeset - account for vblanks across mode sets
+@@ -984,12 +1256,33 @@
+ *
+ * Account for vblank events across mode setting events, which will likely
+ * reset the hardware frame counter.
++ *
++ * This is done by grabbing a temporary vblank reference to ensure that the
++ * vblank interrupt keeps running across the modeset sequence. With this the
++ * software-side vblank frame counting will ensure that there are no jumps or
++ * discontinuities.
++ *
++ * Unfortunately this approach is racy and also doesn't work when the vblank
++ * interrupt stops running, e.g. across system suspend resume. It is therefore
++ * highly recommended that drivers use the newer drm_vblank_off() and
++ * drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
++ * using "cooked" software vblank frame counters and not relying on any hardware
++ * counters.
++ *
++ * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
++ * again.
+ */
+ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+ {
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
++
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
++
++ if (WARN_ON(crtc >= dev->num_crtcs))
++ return;
++
+ /*
+ * To avoid all the problems that might happen if interrupts
+ * were enabled/disabled around or between these calls, we just
+@@ -997,36 +1290,45 @@
+ * to avoid corrupting the count if multiple, mismatch calls occur),
+ * so that interrupts remain enabled in the interim.
+ */
+- if (!dev->vblank[crtc].inmodeset) {
+- dev->vblank[crtc].inmodeset = 0x1;
++ if (!vblank->inmodeset) {
++ vblank->inmodeset = 0x1;
+ if (drm_vblank_get(dev, crtc) == 0)
+- dev->vblank[crtc].inmodeset |= 0x2;
++ vblank->inmodeset |= 0x2;
+ }
+ }
+ EXPORT_SYMBOL(drm_vblank_pre_modeset);
+
++/**
++ * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
++ * @dev: DRM device
++ * @crtc: CRTC in question
++ *
++ * This function again drops the temporary vblank reference acquired in
++ * drm_vblank_pre_modeset.
++ */
+ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+ {
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ unsigned long irqflags;
+
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+
+- if (dev->vblank[crtc].inmodeset) {
++ if (vblank->inmodeset) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ dev->vblank_disable_allowed = true;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+- if (dev->vblank[crtc].inmodeset & 0x2)
++ if (vblank->inmodeset & 0x2)
+ drm_vblank_put(dev, crtc);
+
+- dev->vblank[crtc].inmodeset = 0;
++ vblank->inmodeset = 0;
+ }
+ }
+ EXPORT_SYMBOL(drm_vblank_post_modeset);
+
+-/**
++/*
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+@@ -1073,6 +1375,7 @@
+ union drm_wait_vblank *vblwait,
+ struct drm_file *file_priv)
+ {
++ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+@@ -1096,6 +1399,18 @@
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
++ /*
++ * drm_vblank_off() might have been called after we called
++ * drm_vblank_get(). drm_vblank_off() holds event_lock
++ * around the vblank disable, so no need for further locking.
++ * The reference from drm_vblank_get() protects against
++ * vblank disable from another source.
++ */
++ if (!vblank->enabled) {
++ ret = -EINVAL;
++ goto err_unlock;
++ }
++
+ if (file_priv->event_space < sizeof e->event) {
+ ret = -EBUSY;
+ goto err_unlock;
+@@ -1139,7 +1454,7 @@
+ return ret;
+ }
+
+-/**
++/*
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+@@ -1150,19 +1465,19 @@
+ *
+ * This function enables the vblank interrupt on the pipe requested, then
+ * sleeps waiting for the requested sequence number to occur, and drops
+- * the vblank interrupt refcount afterwards. (vblank irq disable follows that
++ * the vblank interrupt refcount afterwards. (vblank IRQ disable follows that
+ * after a timeout with no further vblank waits scheduled).
+ */
+ int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
++ struct drm_vblank_crtc *vblank;
+ union drm_wait_vblank *vblwait = data;
+ int ret;
+ unsigned int flags, seq, crtc, high_crtc;
+
+- if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+- if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
+- return -EINVAL;
++ if (!dev->irq_enabled)
++ return -EINVAL;
+
+ if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
+ return -EINVAL;
+@@ -1186,6 +1501,8 @@
+ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
+
++ vblank = &dev->vblank[crtc];
++
+ ret = drm_vblank_get(dev, crtc);
+ if (ret) {
+ DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
+@@ -1218,10 +1535,11 @@
+
+ DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+ vblwait->request.sequence, crtc);
+- dev->vblank[crtc].last_wait = vblwait->request.sequence;
+- DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
++ vblank->last_wait = vblwait->request.sequence;
++ DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
+ (((drm_vblank_count(dev, crtc) -
+ vblwait->request.sequence) <= (1 << 23)) ||
++ !vblank->enabled ||
+ !dev->irq_enabled));
+
+ if (ret != -EINTR) {
+@@ -1246,12 +1564,11 @@
+ {
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+- unsigned long flags;
+ unsigned int seq;
+
+- seq = drm_vblank_count_and_time(dev, crtc, &now);
++ assert_spin_locked(&dev->event_lock);
+
+- spin_lock_irqsave(&dev->event_lock, flags);
++ seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+@@ -1267,8 +1584,6 @@
+ send_vblank_event(dev, e, seq, &now);
+ }
+
+- spin_unlock_irqrestore(&dev->event_lock, flags);
+-
+ trace_drm_vblank_event(crtc, seq);
+ }
+
+@@ -1282,6 +1597,7 @@
+ */
+ bool drm_handle_vblank(struct drm_device *dev, int crtc)
+ {
++ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ u32 vblcount;
+ s64 diff_ns;
+ struct timeval tvblank;
+@@ -1290,15 +1606,21 @@
+ if (!dev->num_crtcs)
+ return false;
+
++ if (WARN_ON(crtc >= dev->num_crtcs))
++ return false;
++
++ spin_lock_irqsave(&dev->event_lock, irqflags);
++
+ /* Need timestamp lock to prevent concurrent execution with
+ * vblank enable/disable, as this would cause inconsistent
+ * or corrupted timestamps and vblank counts.
+ */
+- spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
++ spin_lock(&dev->vblank_time_lock);
+
+ /* Vblank irq handling disabled. Nothing to do. */
+- if (!dev->vblank[crtc].enabled) {
+- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
++ if (!vblank->enabled) {
++ spin_unlock(&dev->vblank_time_lock);
++ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ return false;
+ }
+
+@@ -1307,7 +1629,7 @@
+ */
+
+ /* Get current timestamp and count. */
+- vblcount = atomic_read(&dev->vblank[crtc].count);
++ vblcount = atomic_read(&vblank->count);
+ drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+ /* Compute time difference to timestamp of last vblank */
+@@ -1331,17 +1653,20 @@
+ * the timestamp computed above.
+ */
+ smp_mb__before_atomic_inc();
+- atomic_inc(&dev->vblank[crtc].count);
++ atomic_inc(&vblank->count);
+ smp_mb__after_atomic_inc();
+ } else {
+ DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+ crtc, (int) diff_ns);
+ }
+
+- wake_up(&dev->vblank[crtc].queue);
++ spin_unlock(&dev->vblank_time_lock);
++
++ wake_up(&vblank->queue);
+ drm_handle_vblank_events(dev, crtc);
+
+- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
++ spin_unlock_irqrestore(&dev->event_lock, irqflags);
++
+ return true;
+ }
+ EXPORT_SYMBOL(drm_handle_vblank);
+diff -Naur a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
+--- a/drivers/gpu/drm/drm_legacy.h 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/gpu/drm/drm_legacy.h 2015-03-26 14:42:38.730435422 +0530
+@@ -0,0 +1,113 @@
++#ifndef __DRM_LEGACY_H__
++#define __DRM_LEGACY_H__
++
++/*
++ * Copyright (c) 2014 David Herrmann <dh.herrmann@gmail.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/*
++ * This file contains legacy interfaces that modern drm drivers
++ * should no longer be using. They cannot be removed as legacy
++ * drivers use them, and removing them are API breaks.
++ */
++#include <linux/list.h>
++#include <drm/drm_legacy.h>
++
++struct agp_memory;
++struct drm_device;
++struct drm_file;
++
++/*
++ * Generic DRM Contexts
++ */
++
++#define DRM_KERNEL_CONTEXT 0
++#define DRM_RESERVED_CONTEXTS 1
++
++int drm_legacy_ctxbitmap_init(struct drm_device *dev);
++void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
++void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
++void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
++
++int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
++
++int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
++
++/*
++ * Generic Buffer Management
++ */
++
++#define DRM_MAP_HASH_OFFSET 0x10000000
++
++int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
++
++void drm_legacy_vma_flush(struct drm_device *d);
++
++/*
++ * AGP Support
++ */
++
++struct drm_agp_mem {
++ unsigned long handle;
++ struct agp_memory *memory;
++ unsigned long bound;
++ int pages;
++ struct list_head head;
++};
++
++/*
++ * Generic Userspace Locking-API
++ */
++
++int drm_legacy_i_have_hw_lock(struct drm_device *d, struct drm_file *f);
++int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
++int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx);
++
++/* DMA support */
++int drm_legacy_dma_setup(struct drm_device *dev);
++void drm_legacy_dma_takedown(struct drm_device *dev);
++void drm_legacy_free_buffer(struct drm_device *dev,
++ struct drm_buf * buf);
++void drm_legacy_reclaim_buffers(struct drm_device *dev,
++ struct drm_file *filp);
++
++/* Scatter Gather Support */
++void drm_legacy_sg_cleanup(struct drm_device *dev);
++int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int drm_legacy_sg_free(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++
++#endif /* __DRM_LEGACY_H__ */
+diff -Naur a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
+--- a/drivers/gpu/drm/drm_lock.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/drm_lock.c 2015-03-26 14:42:38.730435422 +0530
+@@ -35,6 +35,8 @@
+
+ #include <linux/export.h>
+ #include <drm/drmP.h>
++#include "drm_legacy.h"
++#include "drm_internal.h"
+
+ static int drm_notifier(void *priv);
+
+@@ -51,7 +53,8 @@
+ *
+ * Add the current task to the lock wait queue, and attempt to take to lock.
+ */
+-int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
++int drm_legacy_lock(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ DECLARE_WAITQUEUE(entry, current);
+ struct drm_lock *lock = data;
+@@ -119,7 +122,7 @@
+ sigaddset(&dev->sigmask, SIGTTOU);
+ dev->sigdata.context = lock->context;
+ dev->sigdata.lock = master->lock.hw_lock;
+- block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
++ block_all_signals(drm_notifier, dev, &dev->sigmask);
+ }
+
+ if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+@@ -145,7 +148,7 @@
+ *
+ * Transfer and free the lock.
+ */
+-int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
++int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ {
+ struct drm_lock *lock = data;
+ struct drm_master *master = file_priv->master;
+@@ -156,7 +159,7 @@
+ return -EINVAL;
+ }
+
+- if (drm_lock_free(&master->lock, lock->context)) {
++ if (drm_legacy_lock_free(&master->lock, lock->context)) {
+ /* FIXME: Should really bail out here. */
+ }
+
+@@ -249,7 +252,7 @@
+ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
+ * waiting on the lock queue.
+ */
+-int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
++int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
+ {
+ unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+@@ -285,26 +288,27 @@
+ * If the lock is not held, then let the signal proceed as usual. If the lock
+ * is held, then set the contended flag and keep the signal blocked.
+ *
+- * \param priv pointer to a drm_sigdata structure.
++ * \param priv pointer to a drm_device structure.
+ * \return one if the signal should be delivered normally, or zero if the
+ * signal should be blocked.
+ */
+ static int drm_notifier(void *priv)
+ {
+- struct drm_sigdata *s = (struct drm_sigdata *) priv;
++ struct drm_device *dev = priv;
++ struct drm_hw_lock *lock = dev->sigdata.lock;
+ unsigned int old, new, prev;
+
+ /* Allow signal delivery if lock isn't held */
+- if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
+- || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
++ if (!lock || !_DRM_LOCK_IS_HELD(lock->lock)
++ || _DRM_LOCKING_CONTEXT(lock->lock) != dev->sigdata.context)
+ return 1;
+
+ /* Otherwise, set flag to force call to
+ drmUnlock */
+ do {
+- old = s->lock->lock;
++ old = lock->lock;
+ new = old | _DRM_LOCK_CONT;
+- prev = cmpxchg(&s->lock->lock, old, new);
++ prev = cmpxchg(&lock->lock, old, new);
+ } while (prev != old);
+ return 0;
+ }
+@@ -322,7 +326,7 @@
+ * having to worry about starvation.
+ */
+
+-void drm_idlelock_take(struct drm_lock_data *lock_data)
++void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
+ {
+ int ret;
+
+@@ -339,9 +343,9 @@
+ }
+ spin_unlock_bh(&lock_data->spinlock);
+ }
+-EXPORT_SYMBOL(drm_idlelock_take);
++EXPORT_SYMBOL(drm_legacy_idlelock_take);
+
+-void drm_idlelock_release(struct drm_lock_data *lock_data)
++void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
+ {
+ unsigned int old, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+@@ -359,9 +363,10 @@
+ }
+ spin_unlock_bh(&lock_data->spinlock);
+ }
+-EXPORT_SYMBOL(drm_idlelock_release);
++EXPORT_SYMBOL(drm_legacy_idlelock_release);
+
+-int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
++int drm_legacy_i_have_hw_lock(struct drm_device *dev,
++ struct drm_file *file_priv)
+ {
+ struct drm_master *master = file_priv->master;
+ return (file_priv->lock_count && master->lock.hw_lock &&
+diff -Naur a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
+--- a/drivers/gpu/drm/drm_memory.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_memory.c 2015-03-26 14:42:38.730435422 +0530
+@@ -36,8 +36,20 @@
+ #include <linux/highmem.h>
+ #include <linux/export.h>
+ #include <drm/drmP.h>
++#include "drm_legacy.h"
+
+ #if __OS_HAS_AGP
++
++#ifdef HAVE_PAGE_AGP
++# include <asm/agp.h>
++#else
++# ifdef __powerpc__
++# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
++# else
++# define PAGE_AGP PAGE_KERNEL
++# endif
++#endif
++
+ static void *agp_remap(unsigned long offset, unsigned long size,
+ struct drm_device * dev)
+ {
+@@ -108,25 +120,25 @@
+
+ #endif /* agp */
+
+-void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
++void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
+ {
+ if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ map->handle = agp_remap(map->offset, map->size, dev);
+ else
+ map->handle = ioremap(map->offset, map->size);
+ }
+-EXPORT_SYMBOL(drm_core_ioremap);
++EXPORT_SYMBOL(drm_legacy_ioremap);
+
+-void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
++void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
+ {
+ if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ map->handle = agp_remap(map->offset, map->size, dev);
+ else
+ map->handle = ioremap_wc(map->offset, map->size);
+ }
+-EXPORT_SYMBOL(drm_core_ioremap_wc);
++EXPORT_SYMBOL(drm_legacy_ioremap_wc);
+
+-void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
++void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
+ {
+ if (!map->handle || !map->size)
+ return;
+@@ -136,4 +148,4 @@
+ else
+ iounmap(map->handle);
+ }
+-EXPORT_SYMBOL(drm_core_ioremapfree);
++EXPORT_SYMBOL(drm_legacy_ioremapfree);
+diff -Naur a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
+--- a/drivers/gpu/drm/drm_mipi_dsi.c 2015-03-26 14:43:30.410436435 +0530
++++ b/drivers/gpu/drm/drm_mipi_dsi.c 2015-03-26 14:42:38.730435422 +0530
+@@ -35,6 +35,16 @@
+
+ #include <video/mipi_display.h>
+
++/**
++ * DOC: dsi helpers
++ *
++ * These functions contain some common logic and helpers to deal with MIPI DSI
++ * peripherals.
++ *
++ * Helpers are provided for a number of standard MIPI DSI command as well as a
++ * subset of the MIPI DCS command set.
++ */
++
+ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
+ {
+ return of_driver_match_device(dev, drv);
+@@ -57,6 +67,29 @@
+ .pm = &mipi_dsi_device_pm_ops,
+ };
+
++static int of_device_match(struct device *dev, void *data)
++{
++ return dev->of_node == data;
++}
++
++/**
++ * of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a
++ * device tree node
++ * @np: device tree node
++ *
++ * Return: A pointer to the MIPI DSI device corresponding to @np or NULL if no
++ * such device exists (or has not been registered yet).
++ */
++struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np)
++{
++ struct device *dev;
++
++ dev = bus_find_device(&mipi_dsi_bus_type, NULL, np, of_device_match);
++
++ return dev ? to_mipi_dsi_device(dev) : NULL;
++}
++EXPORT_SYMBOL(of_find_mipi_dsi_device_by_node);
++
+ static void mipi_dsi_dev_release(struct device *dev)
+ {
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+@@ -142,8 +175,12 @@
+ {
+ struct device_node *node;
+
+- for_each_available_child_of_node(host->dev->of_node, node)
++ for_each_available_child_of_node(host->dev->of_node, node) {
++ /* skip nodes without reg property */
++ if (!of_find_property(node, "reg", NULL))
++ continue;
+ of_mipi_dsi_device_add(host, node);
++ }
+
+ return 0;
+ }
+@@ -194,60 +231,353 @@
+ }
+ EXPORT_SYMBOL(mipi_dsi_detach);
+
++static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
++ struct mipi_dsi_msg *msg)
++{
++ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
++
++ if (!ops || !ops->transfer)
++ return -ENOSYS;
++
++ if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
++ msg->flags |= MIPI_DSI_MSG_USE_LPM;
++
++ return ops->transfer(dsi->host, msg);
++}
++
+ /**
+- * mipi_dsi_dcs_write - send DCS write command
+- * @dsi: DSI device
+- * @channel: virtual channel
+- * @data: pointer to the command followed by parameters
+- * @len: length of @data
++ * mipi_dsi_packet_format_is_short - check if a packet is of the short format
++ * @type: MIPI DSI data type of the packet
++ *
++ * Return: true if the packet for the given data type is a short packet, false
++ * otherwise.
+ */
+-int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
+- const void *data, size_t len)
++bool mipi_dsi_packet_format_is_short(u8 type)
+ {
+- const struct mipi_dsi_host_ops *ops = dsi->host->ops;
++ switch (type) {
++ case MIPI_DSI_V_SYNC_START:
++ case MIPI_DSI_V_SYNC_END:
++ case MIPI_DSI_H_SYNC_START:
++ case MIPI_DSI_H_SYNC_END:
++ case MIPI_DSI_END_OF_TRANSMISSION:
++ case MIPI_DSI_COLOR_MODE_OFF:
++ case MIPI_DSI_COLOR_MODE_ON:
++ case MIPI_DSI_SHUTDOWN_PERIPHERAL:
++ case MIPI_DSI_TURN_ON_PERIPHERAL:
++ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
++ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
++ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
++ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
++ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
++ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
++ case MIPI_DSI_DCS_SHORT_WRITE:
++ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
++ case MIPI_DSI_DCS_READ:
++ case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
++ return true;
++ }
++
++ return false;
++}
++EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
++
++/**
++ * mipi_dsi_packet_format_is_long - check if a packet is of the long format
++ * @type: MIPI DSI data type of the packet
++ *
++ * Return: true if the packet for the given data type is a long packet, false
++ * otherwise.
++ */
++bool mipi_dsi_packet_format_is_long(u8 type)
++{
++ switch (type) {
++ case MIPI_DSI_NULL_PACKET:
++ case MIPI_DSI_BLANKING_PACKET:
++ case MIPI_DSI_GENERIC_LONG_WRITE:
++ case MIPI_DSI_DCS_LONG_WRITE:
++ case MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20:
++ case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24:
++ case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16:
++ case MIPI_DSI_PACKED_PIXEL_STREAM_30:
++ case MIPI_DSI_PACKED_PIXEL_STREAM_36:
++ case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12:
++ case MIPI_DSI_PACKED_PIXEL_STREAM_16:
++ case MIPI_DSI_PACKED_PIXEL_STREAM_18:
++ case MIPI_DSI_PIXEL_STREAM_3BYTE_18:
++ case MIPI_DSI_PACKED_PIXEL_STREAM_24:
++ return true;
++ }
++
++ return false;
++}
++EXPORT_SYMBOL(mipi_dsi_packet_format_is_long);
++
++/**
++ * mipi_dsi_create_packet - create a packet from a message according to the
++ * DSI protocol
++ * @packet: pointer to a DSI packet structure
++ * @msg: message to translate into a packet
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
++ const struct mipi_dsi_msg *msg)
++{
++ const u8 *tx = msg->tx_buf;
++
++ if (!packet || !msg)
++ return -EINVAL;
++
++ /* do some minimum sanity checking */
++ if (!mipi_dsi_packet_format_is_short(msg->type) &&
++ !mipi_dsi_packet_format_is_long(msg->type))
++ return -EINVAL;
++
++ if (msg->channel > 3)
++ return -EINVAL;
++
++ memset(packet, 0, sizeof(*packet));
++ packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
++
++ /* TODO: compute ECC if hardware support is not available */
++
++ /*
++ * Long write packets contain the word count in header bytes 1 and 2.
++ * The payload follows the header and is word count bytes long.
++ *
++ * Short write packets encode up to two parameters in header bytes 1
++ * and 2.
++ */
++ if (mipi_dsi_packet_format_is_long(msg->type)) {
++ packet->header[1] = (msg->tx_len >> 0) & 0xff;
++ packet->header[2] = (msg->tx_len >> 8) & 0xff;
++
++ packet->payload_length = msg->tx_len;
++ packet->payload = tx;
++ } else {
++ packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
++ packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
++ }
++
++ packet->size = sizeof(packet->header) + packet->payload_length;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_create_packet);
++
++/*
++ * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
++ * the payload in a long packet transmitted from the peripheral back to the
++ * host processor
++ * @dsi: DSI peripheral device
++ * @value: the maximum size of the payload
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
++ u16 value)
++{
++ u8 tx[2] = { value & 0xff, value >> 8 };
+ struct mipi_dsi_msg msg = {
+- .channel = channel,
++ .channel = dsi->channel,
++ .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
++ .tx_len = sizeof(tx),
++ .tx_buf = tx,
++ };
++
++ return mipi_dsi_device_transfer(dsi, &msg);
++}
++EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
++
++/**
++ * mipi_dsi_generic_write() - transmit data using a generic write packet
++ * @dsi: DSI peripheral device
++ * @payload: buffer containing the payload
++ * @size: size of payload buffer
++ *
++ * This function will automatically choose the right data type depending on
++ * the payload length.
++ *
++ * Return: The number of bytes transmitted on success or a negative error code
++ * on failure.
++ */
++ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
++ size_t size)
++{
++ struct mipi_dsi_msg msg = {
++ .channel = dsi->channel,
++ .tx_buf = payload,
++ .tx_len = size
++ };
++
++ switch (size) {
++ case 0:
++ msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
++ break;
++
++ case 1:
++ msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
++ break;
++
++ case 2:
++ msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
++ break;
++
++ default:
++ msg.type = MIPI_DSI_GENERIC_LONG_WRITE;
++ break;
++ }
++
++ return mipi_dsi_device_transfer(dsi, &msg);
++}
++EXPORT_SYMBOL(mipi_dsi_generic_write);
++
++/**
++ * mipi_dsi_generic_read() - receive data using a generic read packet
++ * @dsi: DSI peripheral device
++ * @params: buffer containing the request parameters
++ * @num_params: number of request parameters
++ * @data: buffer in which to return the received data
++ * @size: size of receive buffer
++ *
++ * This function will automatically choose the right data type depending on
++ * the number of parameters passed in.
++ *
++ * Return: The number of bytes successfully read or a negative error code on
++ * failure.
++ */
++ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
++ size_t num_params, void *data, size_t size)
++{
++ struct mipi_dsi_msg msg = {
++ .channel = dsi->channel,
++ .tx_len = num_params,
++ .tx_buf = params,
++ .rx_len = size,
++ .rx_buf = data
++ };
++
++ switch (num_params) {
++ case 0:
++ msg.type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
++ break;
++
++ case 1:
++ msg.type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
++ break;
++
++ case 2:
++ msg.type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return mipi_dsi_device_transfer(dsi, &msg);
++}
++EXPORT_SYMBOL(mipi_dsi_generic_read);
++
++/**
++ * mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
++ * @dsi: DSI peripheral device
++ * @data: buffer containing data to be transmitted
++ * @len: size of transmission buffer
++ *
++ * This function will automatically choose the right data type depending on
++ * the command payload length.
++ *
++ * Return: The number of bytes successfully transmitted or a negative error
++ * code on failure.
++ */
++ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
++ const void *data, size_t len)
++{
++ struct mipi_dsi_msg msg = {
++ .channel = dsi->channel,
+ .tx_buf = data,
+ .tx_len = len
+ };
+
+- if (!ops || !ops->transfer)
+- return -ENOSYS;
+-
+ switch (len) {
+ case 0:
+ return -EINVAL;
++
+ case 1:
+ msg.type = MIPI_DSI_DCS_SHORT_WRITE;
+ break;
++
+ case 2:
+ msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
+ break;
++
+ default:
+ msg.type = MIPI_DSI_DCS_LONG_WRITE;
+ break;
+ }
+
+- return ops->transfer(dsi->host, &msg);
++ return mipi_dsi_device_transfer(dsi, &msg);
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer);
++
++/**
++ * mipi_dsi_dcs_write() - send DCS write command
++ * @dsi: DSI peripheral device
++ * @cmd: DCS command
++ * @data: buffer containing the command payload
++ * @len: command payload length
++ *
++ * This function will automatically choose the right data type depending on
++ * the command payload length.
++ *
++ * Return: The number of bytes successfully transmitted or a negative error
++ * code on failure.
++ */
++ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
++ const void *data, size_t len)
++{
++ ssize_t err;
++ size_t size;
++ u8 *tx;
++
++ if (len > 0) {
++ size = 1 + len;
++
++ tx = kmalloc(size, GFP_KERNEL);
++ if (!tx)
++ return -ENOMEM;
++
++ /* concatenate the DCS command byte and the payload */
++ tx[0] = cmd;
++ memcpy(&tx[1], data, len);
++ } else {
++ tx = &cmd;
++ size = 1;
++ }
++
++ err = mipi_dsi_dcs_write_buffer(dsi, tx, size);
++
++ if (len > 0)
++ kfree(tx);
++
++ return err;
+ }
+ EXPORT_SYMBOL(mipi_dsi_dcs_write);
+
+ /**
+- * mipi_dsi_dcs_read - send DCS read request command
+- * @dsi: DSI device
+- * @channel: virtual channel
+- * @cmd: DCS read command
+- * @data: pointer to read buffer
+- * @len: length of @data
++ * mipi_dsi_dcs_read() - send DCS read request command
++ * @dsi: DSI peripheral device
++ * @cmd: DCS command
++ * @data: buffer in which to receive data
++ * @len: size of receive buffer
+ *
+- * Function returns number of read bytes or error code.
++ * Return: The number of bytes read or a negative error code on failure.
+ */
+-ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
+- u8 cmd, void *data, size_t len)
++ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
++ size_t len)
+ {
+- const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ struct mipi_dsi_msg msg = {
+- .channel = channel,
++ .channel = dsi->channel,
+ .type = MIPI_DSI_DCS_READ,
+ .tx_buf = &cmd,
+ .tx_len = 1,
+@@ -255,13 +585,283 @@
+ .rx_len = len
+ };
+
+- if (!ops || !ops->transfer)
+- return -ENOSYS;
+-
+- return ops->transfer(dsi->host, &msg);
++ return mipi_dsi_device_transfer(dsi, &msg);
+ }
+ EXPORT_SYMBOL(mipi_dsi_dcs_read);
+
++/**
++ * mipi_dsi_dcs_nop() - send DCS nop packet
++ * @dsi: DSI peripheral device
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi)
++{
++ ssize_t err;
++
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_NOP, NULL, 0);
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_nop);
++
++/**
++ * mipi_dsi_dcs_soft_reset() - perform a software reset of the display module
++ * @dsi: DSI peripheral device
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi)
++{
++ ssize_t err;
++
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SOFT_RESET, NULL, 0);
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_soft_reset);
++
++/**
++ * mipi_dsi_dcs_get_power_mode() - query the display module's current power
++ * mode
++ * @dsi: DSI peripheral device
++ * @mode: return location for the current power mode
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode)
++{
++ ssize_t err;
++
++ err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_POWER_MODE, mode,
++ sizeof(*mode));
++ if (err <= 0) {
++ if (err == 0)
++ err = -ENODATA;
++
++ return err;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_get_power_mode);
++
++/**
++ * mipi_dsi_dcs_get_pixel_format() - gets the pixel format for the RGB image
++ * data used by the interface
++ * @dsi: DSI peripheral device
++ * @format: return location for the pixel format
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format)
++{
++ ssize_t err;
++
++ err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_PIXEL_FORMAT, format,
++ sizeof(*format));
++ if (err <= 0) {
++ if (err == 0)
++ err = -ENODATA;
++
++ return err;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_get_pixel_format);
++
++/**
++ * mipi_dsi_dcs_enter_sleep_mode() - disable all unnecessary blocks inside the
++ * display module except interface communication
++ * @dsi: DSI peripheral device
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi)
++{
++ ssize_t err;
++
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_enter_sleep_mode);
++
++/**
++ * mipi_dsi_dcs_exit_sleep_mode() - enable all blocks inside the display
++ * module
++ * @dsi: DSI peripheral device
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi)
++{
++ ssize_t err;
++
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_exit_sleep_mode);
++
++/**
++ * mipi_dsi_dcs_set_display_off() - stop displaying the image data on the
++ * display device
++ * @dsi: DSI peripheral device
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi)
++{
++ ssize_t err;
++
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_set_display_off);
++
++/**
++ * mipi_dsi_dcs_set_display_on() - start displaying the image data on the
++ * display device
++ * @dsi: DSI peripheral device
++ *
++ * Return: 0 on success or a negative error code on failure
++ */
++int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi)
++{
++ ssize_t err;
++
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_set_display_on);
++
++/**
++ * mipi_dsi_dcs_set_column_address() - define the column extent of the frame
++ * memory accessed by the host processor
++ * @dsi: DSI peripheral device
++ * @start: first column of frame memory
++ * @end: last column of frame memory
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
++ u16 end)
++{
++ u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
++ ssize_t err;
++
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_COLUMN_ADDRESS, payload,
++ sizeof(payload));
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address);
++
++/**
++ * mipi_dsi_dcs_set_page_address() - define the page extent of the frame
++ * memory accessed by the host processor
++ * @dsi: DSI peripheral device
++ * @start: first page of frame memory
++ * @end: last page of frame memory
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
++ u16 end)
++{
++ u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
++ ssize_t err;
++
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PAGE_ADDRESS, payload,
++ sizeof(payload));
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address);
++
++/**
++ * mipi_dsi_dcs_set_tear_off() - turn off the display module's Tearing Effect
++ * output signal on the TE signal line
++ * @dsi: DSI peripheral device
++ *
++ * Return: 0 on success or a negative error code on failure
++ */
++int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi)
++{
++ ssize_t err;
++
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
++
++/**
++ * mipi_dsi_dcs_set_tear_on() - turn on the display module's Tearing Effect
++ * output signal on the TE signal line.
++ * @dsi: DSI peripheral device
++ * @mode: the Tearing Effect Output Line mode
++ *
++ * Return: 0 on success or a negative error code on failure
++ */
++int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
++ enum mipi_dsi_dcs_tear_mode mode)
++{
++ u8 value = mode;
++ ssize_t err;
++
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_ON, &value,
++ sizeof(value));
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
++
++/**
++ * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
++ * data used by the interface
++ * @dsi: DSI peripheral device
++ * @format: pixel format
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
++{
++ ssize_t err;
++
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
++ sizeof(format));
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
++
+ static int mipi_dsi_drv_probe(struct device *dev)
+ {
+ struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+@@ -278,25 +878,43 @@
+ return drv->remove(dsi);
+ }
+
++static void mipi_dsi_drv_shutdown(struct device *dev)
++{
++ struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
++ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
++
++ drv->shutdown(dsi);
++}
++
+ /**
+- * mipi_dsi_driver_register - register a driver for DSI devices
++ * mipi_dsi_driver_register_full() - register a driver for DSI devices
+ * @drv: DSI driver structure
++ * @owner: owner module
++ *
++ * Return: 0 on success or a negative error code on failure.
+ */
+-int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
++int mipi_dsi_driver_register_full(struct mipi_dsi_driver *drv,
++ struct module *owner)
+ {
+ drv->driver.bus = &mipi_dsi_bus_type;
++ drv->driver.owner = owner;
++
+ if (drv->probe)
+ drv->driver.probe = mipi_dsi_drv_probe;
+ if (drv->remove)
+ drv->driver.remove = mipi_dsi_drv_remove;
++ if (drv->shutdown)
++ drv->driver.shutdown = mipi_dsi_drv_shutdown;
+
+ return driver_register(&drv->driver);
+ }
+-EXPORT_SYMBOL(mipi_dsi_driver_register);
++EXPORT_SYMBOL(mipi_dsi_driver_register_full);
+
+ /**
+- * mipi_dsi_driver_unregister - unregister a driver for DSI devices
++ * mipi_dsi_driver_unregister() - unregister a driver for DSI devices
+ * @drv: DSI driver structure
++ *
++ * Return: 0 on success or a negative error code on failure.
+ */
+ void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv)
+ {
+diff -Naur a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
+--- a/drivers/gpu/drm/drm_mm.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_mm.c 2015-03-26 14:42:38.730435422 +0530
+@@ -47,7 +47,48 @@
+ #include <linux/seq_file.h>
+ #include <linux/export.h>
+
+-#define MM_UNUSED_TARGET 4
++/**
++ * DOC: Overview
++ *
++ * drm_mm provides a simple range allocator. The drivers are free to use the
++ * resource allocator from the linux core if it suits them, the upside of drm_mm
++ * is that it's in the DRM core. Which means that it's easier to extend for
++ * some of the crazier special purpose needs of gpus.
++ *
++ * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
++ * Drivers are free to embed either of them into their own suitable
++ * datastructures. drm_mm itself will not do any allocations of its own, so if
++ * drivers choose not to embed nodes they need to still allocate them
++ * themselves.
++ *
++ * The range allocator also supports reservation of preallocated blocks. This is
++ * useful for taking over initial mode setting configurations from the firmware,
++ * where an object needs to be created which exactly matches the firmware's
++ * scanout target. As long as the range is still free it can be inserted anytime
++ * after the allocator is initialized, which helps with avoiding looped
++ * depencies in the driver load sequence.
++ *
++ * drm_mm maintains a stack of most recently freed holes, which of all
++ * simplistic datastructures seems to be a fairly decent approach to clustering
++ * allocations and avoiding too much fragmentation. This means free space
++ * searches are O(num_holes). Given that all the fancy features drm_mm supports
++ * something better would be fairly complex and since gfx thrashing is a fairly
++ * steep cliff not a real concern. Removing a node again is O(1).
++ *
++ * drm_mm supports a few features: Alignment and range restrictions can be
++ * supplied. Further more every &drm_mm_node has a color value (which is just an
++ * opaqua unsigned long) which in conjunction with a driver callback can be used
++ * to implement sophisticated placement restrictions. The i915 DRM driver uses
++ * this to implement guard pages between incompatible caching domains in the
++ * graphics TT.
++ *
++ * Two behaviors are supported for searching and allocating: bottom-up and top-down.
++ * The default is bottom-up. Top-down allocation can be used if the memory area
++ * has different restrictions, or just to reduce fragmentation.
++ *
++ * Finally iteration helpers to walk all nodes and all holes are provided as are
++ * some basic allocator dumpers for debugging.
++ */
+
+ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+@@ -65,7 +106,8 @@
+ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+- unsigned long color)
++ unsigned long color,
++ enum drm_mm_allocator_flags flags)
+ {
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+@@ -78,12 +120,22 @@
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+
++ if (flags & DRM_MM_CREATE_TOP)
++ adj_start = adj_end - size;
++
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+- if (tmp)
+- adj_start += alignment - tmp;
++ if (tmp) {
++ if (flags & DRM_MM_CREATE_TOP)
++ adj_start -= tmp;
++ else
++ adj_start += alignment - tmp;
++ }
+ }
+
++ BUG_ON(adj_start < hole_start);
++ BUG_ON(adj_end > hole_end);
++
+ if (adj_start == hole_start) {
+ hole_node->hole_follows = 0;
+ list_del(&hole_node->hole_stack);
+@@ -107,6 +159,20 @@
+ }
+ }
+
++/**
++ * drm_mm_reserve_node - insert an pre-initialized node
++ * @mm: drm_mm allocator to insert @node into
++ * @node: drm_mm_node to insert
++ *
++ * This functions inserts an already set-up drm_mm_node into the allocator,
++ * meaning that start, size and color must be set by the caller. This is useful
++ * to initialize the allocator with preallocated objects which must be set-up
++ * before the range allocator can be set-up, e.g. when taking over a firmware
++ * framebuffer.
++ *
++ * Returns:
++ * 0 on success, -ENOSPC if there's no hole where @node is.
++ */
+ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
+ {
+ struct drm_mm_node *hole;
+@@ -141,30 +207,39 @@
+ return 0;
+ }
+
+- WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
+- node->start, node->size);
+ return -ENOSPC;
+ }
+ EXPORT_SYMBOL(drm_mm_reserve_node);
+
+ /**
+- * Search for free space and insert a preallocated memory node. Returns
+- * -ENOSPC if no suitable free area is available. The preallocated memory node
+- * must be cleared.
++ * drm_mm_insert_node_generic - search for space and insert @node
++ * @mm: drm_mm to allocate from
++ * @node: preallocate node to insert
++ * @size: size of the allocation
++ * @alignment: alignment of the allocation
++ * @color: opaque tag value to use for this node
++ * @sflags: flags to fine-tune the allocation search
++ * @aflags: flags to fine-tune the allocation behavior
++ *
++ * The preallocated node must be cleared to 0.
++ *
++ * Returns:
++ * 0 on success, -ENOSPC if there's no suitable hole.
+ */
+ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color,
+- enum drm_mm_search_flags flags)
++ enum drm_mm_search_flags sflags,
++ enum drm_mm_allocator_flags aflags)
+ {
+ struct drm_mm_node *hole_node;
+
+ hole_node = drm_mm_search_free_generic(mm, size, alignment,
+- color, flags);
++ color, sflags);
+ if (!hole_node)
+ return -ENOSPC;
+
+- drm_mm_insert_helper(hole_node, node, size, alignment, color);
++ drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
+ return 0;
+ }
+ EXPORT_SYMBOL(drm_mm_insert_node_generic);
+@@ -173,7 +248,8 @@
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color,
+- unsigned long start, unsigned long end)
++ unsigned long start, unsigned long end,
++ enum drm_mm_allocator_flags flags)
+ {
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+@@ -188,13 +264,20 @@
+ if (adj_end > end)
+ adj_end = end;
+
++ if (flags & DRM_MM_CREATE_TOP)
++ adj_start = adj_end - size;
++
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+- if (tmp)
+- adj_start += alignment - tmp;
++ if (tmp) {
++ if (flags & DRM_MM_CREATE_TOP)
++ adj_start -= tmp;
++ else
++ adj_start += alignment - tmp;
++ }
+ }
+
+ if (adj_start == hole_start) {
+@@ -211,6 +294,8 @@
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
++ BUG_ON(node->start < start);
++ BUG_ON(node->start < adj_start);
+ BUG_ON(node->start + node->size > adj_end);
+ BUG_ON(node->start + node->size > end);
+
+@@ -222,32 +307,51 @@
+ }
+
+ /**
+- * Search for free space and insert a preallocated memory node. Returns
+- * -ENOSPC if no suitable free area is available. This is for range
+- * restricted allocations. The preallocated memory node must be cleared.
++ * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
++ * @mm: drm_mm to allocate from
++ * @node: preallocate node to insert
++ * @size: size of the allocation
++ * @alignment: alignment of the allocation
++ * @color: opaque tag value to use for this node
++ * @start: start of the allowed range for this node
++ * @end: end of the allowed range for this node
++ * @sflags: flags to fine-tune the allocation search
++ * @aflags: flags to fine-tune the allocation behavior
++ *
++ * The preallocated node must be cleared to 0.
++ *
++ * Returns:
++ * 0 on success, -ENOSPC if there's no suitable hole.
+ */
+ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+- unsigned long size, unsigned alignment, unsigned long color,
++ unsigned long size, unsigned alignment,
++ unsigned long color,
+ unsigned long start, unsigned long end,
+- enum drm_mm_search_flags flags)
++ enum drm_mm_search_flags sflags,
++ enum drm_mm_allocator_flags aflags)
+ {
+ struct drm_mm_node *hole_node;
+
+ hole_node = drm_mm_search_free_in_range_generic(mm,
+ size, alignment, color,
+- start, end, flags);
++ start, end, sflags);
+ if (!hole_node)
+ return -ENOSPC;
+
+ drm_mm_insert_helper_range(hole_node, node,
+ size, alignment, color,
+- start, end);
++ start, end, aflags);
+ return 0;
+ }
+ EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
+
+ /**
+- * Remove a memory node from the allocator.
++ * drm_mm_remove_node - Remove a memory node from the allocator.
++ * @node: drm_mm_node to remove
++ *
++ * This just removes a node from its drm_mm allocator. The node does not need to
++ * be cleared again before it can be re-inserted into this or any other drm_mm
++ * allocator. It is a bug to call this function on a un-allocated node.
+ */
+ void drm_mm_remove_node(struct drm_mm_node *node)
+ {
+@@ -315,7 +419,10 @@
+ best = NULL;
+ best_size = ~0UL;
+
+- drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
++ __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
++ flags & DRM_MM_SEARCH_BELOW) {
++ unsigned long hole_size = adj_end - adj_start;
++
+ if (mm->color_adjust) {
+ mm->color_adjust(entry, color, &adj_start, &adj_end);
+ if (adj_end <= adj_start)
+@@ -328,9 +435,9 @@
+ if (!(flags & DRM_MM_SEARCH_BEST))
+ return entry;
+
+- if (entry->size < best_size) {
++ if (hole_size < best_size) {
+ best = entry;
+- best_size = entry->size;
++ best_size = hole_size;
+ }
+ }
+
+@@ -356,7 +463,10 @@
+ best = NULL;
+ best_size = ~0UL;
+
+- drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
++ __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
++ flags & DRM_MM_SEARCH_BELOW) {
++ unsigned long hole_size = adj_end - adj_start;
++
+ if (adj_start < start)
+ adj_start = start;
+ if (adj_end > end)
+@@ -374,9 +484,9 @@
+ if (!(flags & DRM_MM_SEARCH_BEST))
+ return entry;
+
+- if (entry->size < best_size) {
++ if (hole_size < best_size) {
+ best = entry;
+- best_size = entry->size;
++ best_size = hole_size;
+ }
+ }
+
+@@ -384,7 +494,13 @@
+ }
+
+ /**
+- * Moves an allocation. To be used with embedded struct drm_mm_node.
++ * drm_mm_replace_node - move an allocation from @old to @new
++ * @old: drm_mm_node to remove from the allocator
++ * @new: drm_mm_node which should inherit @old's allocation
++ *
++ * This is useful for when drivers embed the drm_mm_node structure and hence
++ * can't move allocations by reassigning pointers. It's a combination of remove
++ * and insert with the guarantee that the allocation start will match.
+ */
+ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
+ {
+@@ -402,12 +518,46 @@
+ EXPORT_SYMBOL(drm_mm_replace_node);
+
+ /**
+- * Initializa lru scanning.
++ * DOC: lru scan roaster
++ *
++ * Very often GPUs need to have continuous allocations for a given object. When
++ * evicting objects to make space for a new one it is therefore not most
++ * efficient when we simply start to select all objects from the tail of an LRU
++ * until there's a suitable hole: Especially for big objects or nodes that
++ * otherwise have special allocation constraints there's a good chance we evict
++ * lots of (smaller) objects unecessarily.
++ *
++ * The DRM range allocator supports this use-case through the scanning
++ * interfaces. First a scan operation needs to be initialized with
++ * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
++ * objects to the roaster (probably by walking an LRU list, but this can be
++ * freely implemented) until a suitable hole is found or there's no further
++ * evitable object.
++ *
++ * The the driver must walk through all objects again in exactly the reverse
++ * order to restore the allocator state. Note that while the allocator is used
++ * in the scan mode no other operation is allowed.
++ *
++ * Finally the driver evicts all objects selected in the scan. Adding and
++ * removing an object is O(1), and since freeing a node is also O(1) the overall
++ * complexity is O(scanned_objects). So like the free stack which needs to be
++ * walked before a scan operation even begins this is linear in the number of
++ * objects. It doesn't seem to hurt badly.
++ */
++
++/**
++ * drm_mm_init_scan - initialize lru scanning
++ * @mm: drm_mm to scan
++ * @size: size of the allocation
++ * @alignment: alignment of the allocation
++ * @color: opaque tag value to use for the allocation
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+- * hole.
++ * hole. Note that there's no need to specify allocation flags, since they only
++ * change the place a node is allocated from within a suitable hole.
+ *
+- * Warning: As long as the scan list is non-empty, no other operations than
++ * Warning:
++ * As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+ void drm_mm_init_scan(struct drm_mm *mm,
+@@ -427,12 +577,20 @@
+ EXPORT_SYMBOL(drm_mm_init_scan);
+
+ /**
+- * Initializa lru scanning.
++ * drm_mm_init_scan - initialize range-restricted lru scanning
++ * @mm: drm_mm to scan
++ * @size: size of the allocation
++ * @alignment: alignment of the allocation
++ * @color: opaque tag value to use for the allocation
++ * @start: start of the allowed range for the allocation
++ * @end: end of the allowed range for the allocation
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+- * hole. This version is for range-restricted scans.
++ * hole. Note that there's no need to specify allocation flags, since they only
++ * change the place a node is allocated from within a suitable hole.
+ *
+- * Warning: As long as the scan list is non-empty, no other operations than
++ * Warning:
++ * As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+ void drm_mm_init_scan_with_range(struct drm_mm *mm,
+@@ -456,12 +614,16 @@
+ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
+
+ /**
++ * drm_mm_scan_add_block - add a node to the scan list
++ * @node: drm_mm_node to add
++ *
+ * Add a node to the scan list that might be freed to make space for the desired
+ * hole.
+ *
+- * Returns non-zero, if a hole has been found, zero otherwise.
++ * Returns:
++ * True if a hole has been found, false otherwise.
+ */
+-int drm_mm_scan_add_block(struct drm_mm_node *node)
++bool drm_mm_scan_add_block(struct drm_mm_node *node)
+ {
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+@@ -501,15 +663,16 @@
+ mm->scan_size, mm->scan_alignment)) {
+ mm->scan_hit_start = hole_start;
+ mm->scan_hit_end = hole_end;
+- return 1;
++ return true;
+ }
+
+- return 0;
++ return false;
+ }
+ EXPORT_SYMBOL(drm_mm_scan_add_block);
+
+ /**
+- * Remove a node from the scan list.
++ * drm_mm_scan_remove_block - remove a node from the scan list
++ * @node: drm_mm_node to remove
+ *
+ * Nodes _must_ be removed in the exact same order from the scan list as they
+ * have been added, otherwise the internal state of the memory manager will be
+@@ -519,10 +682,11 @@
+ * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
+ * return the just freed block (because its at the top of the free_stack list).
+ *
+- * Returns one if this block should be evicted, zero otherwise. Will always
+- * return zero when no hole has been found.
++ * Returns:
++ * True if this block should be evicted, false otherwise. Will always
++ * return false when no hole has been found.
+ */
+-int drm_mm_scan_remove_block(struct drm_mm_node *node)
++bool drm_mm_scan_remove_block(struct drm_mm_node *node)
+ {
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+@@ -543,7 +707,15 @@
+ }
+ EXPORT_SYMBOL(drm_mm_scan_remove_block);
+
+-int drm_mm_clean(struct drm_mm * mm)
++/**
++ * drm_mm_clean - checks whether an allocator is clean
++ * @mm: drm_mm allocator to check
++ *
++ * Returns:
++ * True if the allocator is completely free, false if there's still a node
++ * allocated in it.
++ */
++bool drm_mm_clean(struct drm_mm * mm)
+ {
+ struct list_head *head = &mm->head_node.node_list;
+
+@@ -551,6 +723,14 @@
+ }
+ EXPORT_SYMBOL(drm_mm_clean);
+
++/**
++ * drm_mm_init - initialize a drm-mm allocator
++ * @mm: the drm_mm structure to initialize
++ * @start: start of the range managed by @mm
++ * @size: end of the range managed by @mm
++ *
++ * Note that @mm must be cleared to 0 before calling this function.
++ */
+ void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+ {
+ INIT_LIST_HEAD(&mm->hole_stack);
+@@ -572,6 +752,13 @@
+ }
+ EXPORT_SYMBOL(drm_mm_init);
+
++/**
++ * drm_mm_takedown - clean up a drm_mm allocator
++ * @mm: drm_mm allocator to clean up
++ *
++ * Note that it is a bug to call this function on an allocator which is not
++ * clean.
++ */
+ void drm_mm_takedown(struct drm_mm * mm)
+ {
+ WARN(!list_empty(&mm->head_node.node_list),
+@@ -597,6 +784,11 @@
+ return 0;
+ }
+
++/**
++ * drm_mm_debug_table - dump allocator state to dmesg
++ * @mm: drm_mm allocator to dump
++ * @prefix: prefix to use for dumping to dmesg
++ */
+ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+ {
+ struct drm_mm_node *entry;
+@@ -635,6 +827,11 @@
+ return 0;
+ }
+
++/**
++ * drm_mm_dump_table - dump allocator state to a seq_file
++ * @m: seq_file to dump to
++ * @mm: drm_mm allocator to dump
++ */
+ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+ {
+ struct drm_mm_node *entry;
+diff -Naur a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
+--- a/drivers/gpu/drm/drm_modes.c 2015-03-26 14:43:30.410436435 +0530
++++ b/drivers/gpu/drm/drm_modes.c 2015-03-26 14:42:38.734435422 +0530
+@@ -37,15 +37,14 @@
+ #include <drm/drm_crtc.h>
+ #include <video/of_videomode.h>
+ #include <video/videomode.h>
++#include <drm/drm_modes.h>
++
++#include "drm_crtc_internal.h"
+
+ /**
+- * drm_mode_debug_printmodeline - debug print a mode
+- * @dev: DRM device
++ * drm_mode_debug_printmodeline - print a mode to dmesg
+ * @mode: mode to print
+ *
+- * LOCKING:
+- * None.
+- *
+ * Describe @mode using DRM_DEBUG.
+ */
+ void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
+@@ -61,18 +60,77 @@
+ EXPORT_SYMBOL(drm_mode_debug_printmodeline);
+
+ /**
+- * drm_cvt_mode -create a modeline based on CVT algorithm
++ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+- * @hdisplay: hdisplay size
+- * @vdisplay: vdisplay size
+- * @vrefresh : vrefresh rate
+- * @reduced : Whether the GTF calculation is simplified
+- * @interlaced:Whether the interlace is supported
+ *
+- * LOCKING:
+- * none.
++ * Create a new, cleared drm_display_mode with kzalloc, allocate an ID for it
++ * and return it.
++ *
++ * Returns:
++ * Pointer to new mode on success, NULL on error.
++ */
++struct drm_display_mode *drm_mode_create(struct drm_device *dev)
++{
++ struct drm_display_mode *nmode;
++
++ nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
++ if (!nmode)
++ return NULL;
++
++ if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
++ kfree(nmode);
++ return NULL;
++ }
++
++ return nmode;
++}
++EXPORT_SYMBOL(drm_mode_create);
++
++/**
++ * drm_mode_destroy - remove a mode
++ * @dev: DRM device
++ * @mode: mode to remove
++ *
++ * Release @mode's unique ID, then free it @mode structure itself using kfree.
++ */
++void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
++{
++ if (!mode)
++ return;
++
++ drm_mode_object_put(dev, &mode->base);
++
++ kfree(mode);
++}
++EXPORT_SYMBOL(drm_mode_destroy);
++
++/**
++ * drm_mode_probed_add - add a mode to a connector's probed_mode list
++ * @connector: connector the new mode
++ * @mode: mode data
+ *
+- * return the modeline based on CVT algorithm
++ * Add @mode to @connector's probed_mode list for later use. This list should
++ * then in a second step get filtered and all the modes actually supported by
++ * the hardware moved to the @connector's modes list.
++ */
++void drm_mode_probed_add(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
++
++ list_add_tail(&mode->head, &connector->probed_modes);
++}
++EXPORT_SYMBOL(drm_mode_probed_add);
++
++/**
++ * drm_cvt_mode -create a modeline based on the CVT algorithm
++ * @dev: drm device
++ * @hdisplay: hdisplay size
++ * @vdisplay: vdisplay size
++ * @vrefresh: vrefresh rate
++ * @reduced: whether to use reduced blanking
++ * @interlaced: whether to compute an interlaced mode
++ * @margins: whether to add margins (borders)
+ *
+ * This function is called to generate the modeline based on CVT algorithm
+ * according to the hdisplay, vdisplay, vrefresh.
+@@ -82,12 +140,17 @@
+ *
+ * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
+ * What I have done is to translate it by using integer calculation.
++ *
++ * Returns:
++ * The modeline based on the CVT algorithm stored in a drm_display_mode object.
++ * The display mode object is allocated with drm_mode_create(). Returns NULL
++ * when no mode could be allocated.
+ */
+-#define HV_FACTOR 1000
+ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
+ int vdisplay, int vrefresh,
+ bool reduced, bool interlaced, bool margins)
+ {
++#define HV_FACTOR 1000
+ /* 1) top/bottom margin size (% of height) - default: 1.8, */
+ #define CVT_MARGIN_PERCENTAGE 18
+ /* 2) character cell horizontal granularity (pixels) - default 8 */
+@@ -281,23 +344,25 @@
+ EXPORT_SYMBOL(drm_cvt_mode);
+
+ /**
+- * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
+- *
+- * @dev :drm device
+- * @hdisplay :hdisplay size
+- * @vdisplay :vdisplay size
+- * @vrefresh :vrefresh rate.
+- * @interlaced :whether the interlace is supported
+- * @margins :desired margin size
+- * @GTF_[MCKJ] :extended GTF formula parameters
+- *
+- * LOCKING.
+- * none.
+- *
+- * return the modeline based on full GTF algorithm.
++ * drm_gtf_mode_complex - create the modeline based on the full GTF algorithm
++ * @dev: drm device
++ * @hdisplay: hdisplay size
++ * @vdisplay: vdisplay size
++ * @vrefresh: vrefresh rate.
++ * @interlaced: whether to compute an interlaced mode
++ * @margins: desired margin (borders) size
++ * @GTF_M: extended GTF formula parameters
++ * @GTF_2C: extended GTF formula parameters
++ * @GTF_K: extended GTF formula parameters
++ * @GTF_2J: extended GTF formula parameters
+ *
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two. For a C of 40, pass in 80.
++ *
++ * Returns:
++ * The modeline based on the full GTF algorithm stored in a drm_display_mode object.
++ * The display mode object is allocated with drm_mode_create(). Returns NULL
++ * when no mode could be allocated.
+ */
+ struct drm_display_mode *
+ drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+@@ -467,17 +532,13 @@
+ EXPORT_SYMBOL(drm_gtf_mode_complex);
+
+ /**
+- * drm_gtf_mode - create the modeline based on GTF algorithm
+- *
+- * @dev :drm device
+- * @hdisplay :hdisplay size
+- * @vdisplay :vdisplay size
+- * @vrefresh :vrefresh rate.
+- * @interlaced :whether the interlace is supported
+- * @margins :whether the margin is supported
+- *
+- * LOCKING.
+- * none.
++ * drm_gtf_mode - create the modeline based on the GTF algorithm
++ * @dev: drm device
++ * @hdisplay: hdisplay size
++ * @vdisplay: vdisplay size
++ * @vrefresh: vrefresh rate.
++ * @interlaced: whether to compute an interlaced mode
++ * @margins: desired margin (borders) size
+ *
+ * return the modeline based on GTF algorithm
+ *
+@@ -496,19 +557,32 @@
+ * C = 40
+ * K = 128
+ * J = 20
++ *
++ * Returns:
++ * The modeline based on the GTF algorithm stored in a drm_display_mode object.
++ * The display mode object is allocated with drm_mode_create(). Returns NULL
++ * when no mode could be allocated.
+ */
+ struct drm_display_mode *
+ drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+- bool lace, int margins)
++ bool interlaced, int margins)
+ {
+- return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+- margins, 600, 40 * 2, 128, 20 * 2);
++ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh,
++ interlaced, margins,
++ 600, 40 * 2, 128, 20 * 2);
+ }
+ EXPORT_SYMBOL(drm_gtf_mode);
+
+ #ifdef CONFIG_VIDEOMODE_HELPERS
+-int drm_display_mode_from_videomode(const struct videomode *vm,
+- struct drm_display_mode *dmode)
++/**
++ * drm_display_mode_from_videomode - fill in @dmode using @vm,
++ * @vm: videomode structure to use as source
++ * @dmode: drm_display_mode structure to use as destination
++ *
++ * Fills out @dmode using the display mode specified in @vm.
++ */
++void drm_display_mode_from_videomode(const struct videomode *vm,
++ struct drm_display_mode *dmode)
+ {
+ dmode->hdisplay = vm->hactive;
+ dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
+@@ -538,8 +612,6 @@
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
+ dmode->flags |= DRM_MODE_FLAG_DBLCLK;
+ drm_mode_set_name(dmode);
+-
+- return 0;
+ }
+ EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
+
+@@ -553,6 +625,9 @@
+ * This function is expensive and should only be used, if only one mode is to be
+ * read from DT. To get multiple modes start with of_get_display_timings and
+ * work with that instead.
++ *
++ * Returns:
++ * 0 on success, a negative errno code when no of videomode node was found.
+ */
+ int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, int index)
+@@ -580,10 +655,8 @@
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+- * LOCKING:
+- * None.
+- *
+- * Set the name of @mode to a standard format.
++ * Set the name of @mode to a standard format which is <hdisplay>x<vdisplay>
++ * with an optional 'i' suffix for interlaced modes.
+ */
+ void drm_mode_set_name(struct drm_display_mode *mode)
+ {
+@@ -595,54 +668,12 @@
+ }
+ EXPORT_SYMBOL(drm_mode_set_name);
+
+-/**
+- * drm_mode_width - get the width of a mode
+- * @mode: mode
+- *
+- * LOCKING:
+- * None.
+- *
+- * Return @mode's width (hdisplay) value.
+- *
+- * FIXME: is this needed?
+- *
+- * RETURNS:
+- * @mode->hdisplay
+- */
+-int drm_mode_width(const struct drm_display_mode *mode)
+-{
+- return mode->hdisplay;
+-
+-}
+-EXPORT_SYMBOL(drm_mode_width);
+-
+-/**
+- * drm_mode_height - get the height of a mode
+- * @mode: mode
+- *
+- * LOCKING:
+- * None.
+- *
+- * Return @mode's height (vdisplay) value.
+- *
+- * FIXME: is this needed?
+- *
+- * RETURNS:
+- * @mode->vdisplay
+- */
+-int drm_mode_height(const struct drm_display_mode *mode)
+-{
+- return mode->vdisplay;
+-}
+-EXPORT_SYMBOL(drm_mode_height);
+-
+ /** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+- * LOCKING:
+- * None.
+- *
+- * Return @modes's hsync rate in kHz, rounded to the nearest int.
++ * Returns:
++ * @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the
++ * value first if it is not yet set.
+ */
+ int drm_mode_hsync(const struct drm_display_mode *mode)
+ {
+@@ -666,17 +697,9 @@
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+- * LOCKING:
+- * None.
+- *
+- * Return @mode's vrefresh rate in Hz or calculate it if necessary.
+- *
+- * FIXME: why is this needed? shouldn't vrefresh be set already?
+- *
+- * RETURNS:
+- * Vertical refresh rate. It will be the result of actual value plus 0.5.
+- * If it is 70.288, it will return 70Hz.
+- * If it is 59.6, it will return 60Hz.
++ * Returns:
++ * @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the
++ * value first if it is not yet set.
+ */
+ int drm_mode_vrefresh(const struct drm_display_mode *mode)
+ {
+@@ -705,14 +728,11 @@
+ EXPORT_SYMBOL(drm_mode_vrefresh);
+
+ /**
+- * drm_mode_set_crtcinfo - set CRTC modesetting parameters
++ * drm_mode_set_crtcinfo - set CRTC modesetting timing parameters
+ * @p: mode
+ * @adjust_flags: a combination of adjustment flags
+ *
+- * LOCKING:
+- * None.
+- *
+- * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
++ * Setup the CRTC modesetting timing parameters for @p, adjusting if necessary.
+ *
+ * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
+ * interlaced modes.
+@@ -780,15 +800,11 @@
+ }
+ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
+
+-
+ /**
+ * drm_mode_copy - copy the mode
+ * @dst: mode to overwrite
+ * @src: mode to copy
+ *
+- * LOCKING:
+- * None.
+- *
+ * Copy an existing mode into another mode, preserving the object id and
+ * list head of the destination mode.
+ */
+@@ -805,13 +821,14 @@
+
+ /**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+- * @m: mode to duplicate
+- *
+- * LOCKING:
+- * None.
++ * @dev: drm_device to allocate the duplicated mode for
++ * @mode: mode to duplicate
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it. Used to create new instances of established modes.
++ *
++ * Returns:
++ * Pointer to duplicated mode on success, NULL on error.
+ */
+ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+@@ -833,12 +850,9 @@
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+- * LOCKING:
+- * None.
+- *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+- * RETURNS:
++ * Returns:
+ * True if the modes are equal, false otherwise.
+ */
+ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+@@ -864,13 +878,10 @@
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+- * LOCKING:
+- * None.
+- *
+ * Check to see if @mode1 and @mode2 are equivalent, but
+ * don't check the pixel clocks nor the stereo layout.
+ *
+- * RETURNS:
++ * Returns:
+ * True if the modes are equal, false otherwise.
+ */
+ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
+@@ -900,25 +911,19 @@
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+- * @maxPitch: max pitch
+- *
+- * LOCKING:
+- * Caller must hold a lock protecting @mode_list.
+ *
+- * The DRM device (@dev) has size and pitch limits. Here we validate the
+- * modes we probed for @dev against those limits and set their status as
+- * necessary.
++ * This function is a helper which can be used to validate modes against size
++ * limitations of the DRM device/connector. If a mode is too big its status
++ * member is updated with the appropriate validation failure code. The list
++ * itself is not changed.
+ */
+ void drm_mode_validate_size(struct drm_device *dev,
+ struct list_head *mode_list,
+- int maxX, int maxY, int maxPitch)
++ int maxX, int maxY)
+ {
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, mode_list, head) {
+- if (maxPitch > 0 && mode->hdisplay > maxPitch)
+- mode->status = MODE_BAD_WIDTH;
+-
+ if (maxX > 0 && mode->hdisplay > maxX)
+ mode->status = MODE_VIRTUAL_X;
+
+@@ -934,12 +939,10 @@
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+- * LOCKING:
+- * Caller must hold a lock protecting @mode_list.
+- *
+- * Once mode list generation is complete, a caller can use this routine to
+- * remove invalid modes from a mode list. If any of the modes have a
+- * status other than %MODE_OK, they are removed from @mode_list and freed.
++ * This helper function can be used to prune a display mode list after
++ * validation has been completed. All modes who's status is not MODE_OK will be
++ * removed from the list, and if @verbose the status code and mode name is also
++ * printed to dmesg.
+ */
+ void drm_mode_prune_invalid(struct drm_device *dev,
+ struct list_head *mode_list, bool verbose)
+@@ -966,13 +969,10 @@
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+- * LOCKING:
+- * None.
+- *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+- * RETURNS:
++ * Returns:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+@@ -1000,12 +1000,9 @@
+
+ /**
+ * drm_mode_sort - sort mode list
+- * @mode_list: list to sort
+- *
+- * LOCKING:
+- * Caller must hold a lock protecting @mode_list.
++ * @mode_list: list of drm_display_mode structures to sort
+ *
+- * Sort @mode_list by favorability, putting good modes first.
++ * Sort @mode_list by favorability, moving good modes to the head of the list.
+ */
+ void drm_mode_sort(struct list_head *mode_list)
+ {
+@@ -1016,21 +1013,24 @@
+ /**
+ * drm_mode_connector_list_update - update the mode list for the connector
+ * @connector: the connector to update
+- *
+- * LOCKING:
+- * Caller must hold a lock protecting @mode_list.
++ * @merge_type_bits: whether to merge or overright type bits.
+ *
+ * This moves the modes from the @connector probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+- * list and only adds different modes. All modes unverified after this point
+- * will be removed by the prune invalid modes.
++ * list and only adds different/new modes.
++ *
++ * This is just a helper functions doesn't validate any modes itself and also
++ * doesn't prune any invalid modes. Callers need to do that themselves.
+ */
+-void drm_mode_connector_list_update(struct drm_connector *connector)
++void drm_mode_connector_list_update(struct drm_connector *connector,
++ bool merge_type_bits)
+ {
+ struct drm_display_mode *mode;
+ struct drm_display_mode *pmode, *pt;
+ int found_it;
+
++ WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
++
+ list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
+ head) {
+ found_it = 0;
+@@ -1041,7 +1041,10 @@
+ /* if equal delete the probed mode */
+ mode->status = pmode->status;
+ /* Merge type bits together */
+- mode->type |= pmode->type;
++ if (merge_type_bits)
++ mode->type |= pmode->type;
++ else
++ mode->type = pmode->type;
+ list_del(&pmode->head);
+ drm_mode_destroy(connector->dev, pmode);
+ break;
+@@ -1056,17 +1059,25 @@
+ EXPORT_SYMBOL(drm_mode_connector_list_update);
+
+ /**
+- * drm_mode_parse_command_line_for_connector - parse command line for connector
+- * @mode_option - per connector mode option
+- * @connector - connector to parse line for
++ * drm_mode_parse_command_line_for_connector - parse command line modeline for connector
++ * @mode_option: optional per connector mode option
++ * @connector: connector to parse modeline for
++ * @mode: preallocated drm_cmdline_mode structure to fill out
++ *
++ * This parses @mode_option command line modeline for modes and options to
++ * configure the connector. If @mode_option is NULL the default command line
++ * modeline in fb_mode_option will be parsed instead.
+ *
+- * This parses the connector specific then generic command lines for
+- * modes and options to configure the connector.
++ * This uses the same parameters as the fb modedb.c, except for an extra
++ * force-enable, force-enable-digital and force-disable bit at the end:
+ *
+- * This uses the same parameters as the fb modedb.c, except for extra
+ * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
+ *
+- * enable/enable Digital/disable bit at the end
++ * The intermediate drm_cmdline_mode structure is required to store additional
++ * options from the command line modline like the force-enabel/disable flag.
++ *
++ * Returns:
++ * True if a valid modeline has been parsed, false otherwise.
+ */
+ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
+ struct drm_connector *connector,
+@@ -1219,6 +1230,14 @@
+ }
+ EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
+
++/**
++ * drm_mode_create_from_cmdline_mode - convert a command line modeline into a DRM display mode
++ * @dev: DRM device to create the new mode for
++ * @cmd: input command line modeline
++ *
++ * Returns:
++ * Pointer to converted mode on success, NULL on error.
++ */
+ struct drm_display_mode *
+ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd)
+@@ -1240,6 +1259,7 @@
+ if (!mode)
+ return NULL;
+
++ mode->type |= DRM_MODE_TYPE_USERDEF;
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ return mode;
+ }
+diff -Naur a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
+--- a/drivers/gpu/drm/drm_modeset_lock.c 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/gpu/drm/drm_modeset_lock.c 2015-03-26 14:42:38.734435422 +0530
+@@ -0,0 +1,482 @@
++/*
++ * Copyright (C) 2014 Red Hat
++ * Author: Rob Clark <robdclark@gmail.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <drm/drmP.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_modeset_lock.h>
++
++/**
++ * DOC: kms locking
++ *
++ * As KMS moves toward more fine grained locking, and atomic ioctl where
++ * userspace can indirectly control locking order, it becomes necessary
++ * to use ww_mutex and acquire-contexts to avoid deadlocks. But because
++ * the locking is more distributed around the driver code, we want a bit
++ * of extra utility/tracking out of our acquire-ctx. This is provided
++ * by drm_modeset_lock / drm_modeset_acquire_ctx.
++ *
++ * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
++ *
++ * The basic usage pattern is to:
++ *
++ * drm_modeset_acquire_init(&ctx)
++ * retry:
++ * foreach (lock in random_ordered_set_of_locks) {
++ * ret = drm_modeset_lock(lock, &ctx)
++ * if (ret == -EDEADLK) {
++ * drm_modeset_backoff(&ctx);
++ * goto retry;
++ * }
++ * }
++ *
++ * ... do stuff ...
++ *
++ * drm_modeset_drop_locks(&ctx);
++ * drm_modeset_acquire_fini(&ctx);
++ */
++
++
++/**
++ * __drm_modeset_lock_all - internal helper to grab all modeset locks
++ * @dev: DRM device
++ * @trylock: trylock mode for atomic contexts
++ *
++ * This is a special version of drm_modeset_lock_all() which can also be used in
++ * atomic contexts. Then @trylock must be set to true.
++ *
++ * Returns:
++ * 0 on success or negative error code on failure.
++ */
++int __drm_modeset_lock_all(struct drm_device *dev,
++ bool trylock)
++{
++ struct drm_mode_config *config = &dev->mode_config;
++ struct drm_modeset_acquire_ctx *ctx;
++ int ret;
++
++ ctx = kzalloc(sizeof(*ctx),
++ trylock ? GFP_ATOMIC : GFP_KERNEL);
++ if (!ctx)
++ return -ENOMEM;
++
++ if (trylock) {
++ if (!mutex_trylock(&config->mutex))
++ return -EBUSY;
++ } else {
++ mutex_lock(&config->mutex);
++ }
++
++ drm_modeset_acquire_init(ctx, 0);
++ ctx->trylock_only = trylock;
++
++retry:
++ ret = drm_modeset_lock(&config->connection_mutex, ctx);
++ if (ret)
++ goto fail;
++ ret = drm_modeset_lock_all_crtcs(dev, ctx);
++ if (ret)
++ goto fail;
++
++ WARN_ON(config->acquire_ctx);
++
++ /* now we hold the locks, so now that it is safe, stash the
++ * ctx for drm_modeset_unlock_all():
++ */
++ config->acquire_ctx = ctx;
++
++ drm_warn_on_modeset_not_all_locked(dev);
++
++ return 0;
++
++fail:
++ if (ret == -EDEADLK) {
++ drm_modeset_backoff(ctx);
++ goto retry;
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(__drm_modeset_lock_all);
++
++/**
++ * drm_modeset_lock_all - take all modeset locks
++ * @dev: drm device
++ *
++ * This function takes all modeset locks, suitable where a more fine-grained
++ * scheme isn't (yet) implemented. Locks must be dropped with
++ * drm_modeset_unlock_all.
++ */
++void drm_modeset_lock_all(struct drm_device *dev)
++{
++ WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
++}
++EXPORT_SYMBOL(drm_modeset_lock_all);
++
++/**
++ * drm_modeset_unlock_all - drop all modeset locks
++ * @dev: device
++ *
++ * This function drop all modeset locks taken by drm_modeset_lock_all.
++ */
++void drm_modeset_unlock_all(struct drm_device *dev)
++{
++ struct drm_mode_config *config = &dev->mode_config;
++ struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
++
++ if (WARN_ON(!ctx))
++ return;
++
++ config->acquire_ctx = NULL;
++ drm_modeset_drop_locks(ctx);
++ drm_modeset_acquire_fini(ctx);
++
++ kfree(ctx);
++
++ mutex_unlock(&dev->mode_config.mutex);
++}
++EXPORT_SYMBOL(drm_modeset_unlock_all);
++
++/**
++ * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
++ * @crtc: DRM CRTC
++ * @plane: DRM plane to be updated on @crtc
++ *
++ * This function locks the given crtc and plane (which should be either the
++ * primary or cursor plane) using a hidden acquire context. This is necessary so
++ * that drivers internally using the atomic interfaces can grab further locks
++ * with the lock acquire context.
++ *
++ * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
++ * converted to universal planes yet.
++ */
++void drm_modeset_lock_crtc(struct drm_crtc *crtc,
++ struct drm_plane *plane)
++{
++ struct drm_modeset_acquire_ctx *ctx;
++ int ret;
++
++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++ if (WARN_ON(!ctx))
++ return;
++
++ drm_modeset_acquire_init(ctx, 0);
++
++retry:
++ ret = drm_modeset_lock(&crtc->mutex, ctx);
++ if (ret)
++ goto fail;
++
++ if (plane) {
++ ret = drm_modeset_lock(&plane->mutex, ctx);
++ if (ret)
++ goto fail;
++
++ if (plane->crtc) {
++ ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
++ if (ret)
++ goto fail;
++ }
++ }
++
++ WARN_ON(crtc->acquire_ctx);
++
++ /* now we hold the locks, so now that it is safe, stash the
++ * ctx for drm_modeset_unlock_crtc():
++ */
++ crtc->acquire_ctx = ctx;
++
++ return;
++
++fail:
++ if (ret == -EDEADLK) {
++ drm_modeset_backoff(ctx);
++ goto retry;
++ }
++}
++EXPORT_SYMBOL(drm_modeset_lock_crtc);
++
++/**
++ * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
++ * @crtc: drm crtc
++ *
++ * Legacy ioctl operations like cursor updates or page flips only have per-crtc
++ * locking, and store the acquire ctx in the corresponding crtc. All other
++ * legacy operations take all locks and use a global acquire context. This
++ * function grabs the right one.
++ */
++struct drm_modeset_acquire_ctx *
++drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
++{
++ if (crtc->acquire_ctx)
++ return crtc->acquire_ctx;
++
++ WARN_ON(!crtc->dev->mode_config.acquire_ctx);
++
++ return crtc->dev->mode_config.acquire_ctx;
++}
++EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
++
++/**
++ * drm_modeset_unlock_crtc - drop crtc lock
++ * @crtc: drm crtc
++ *
++ * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
++ * locks acquired through the hidden context.
++ */
++void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
++{
++ struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
++
++ if (WARN_ON(!ctx))
++ return;
++
++ crtc->acquire_ctx = NULL;
++ drm_modeset_drop_locks(ctx);
++ drm_modeset_acquire_fini(ctx);
++
++ kfree(ctx);
++}
++EXPORT_SYMBOL(drm_modeset_unlock_crtc);
++
++/**
++ * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
++ * @dev: device
++ *
++ * Useful as a debug assert.
++ */
++void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
++{
++ struct drm_crtc *crtc;
++
++ /* Locking is currently fubar in the panic handler. */
++ if (oops_in_progress)
++ return;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
++
++ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
++ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
++}
++EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
++
++/**
++ * drm_modeset_acquire_init - initialize acquire context
++ * @ctx: the acquire context
++ * @flags: for future
++ */
++void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
++ uint32_t flags)
++{
++ memset(ctx, 0, sizeof(*ctx));
++ ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
++ INIT_LIST_HEAD(&ctx->locked);
++}
++EXPORT_SYMBOL(drm_modeset_acquire_init);
++
++/**
++ * drm_modeset_acquire_fini - cleanup acquire context
++ * @ctx: the acquire context
++ */
++void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
++{
++ ww_acquire_fini(&ctx->ww_ctx);
++}
++EXPORT_SYMBOL(drm_modeset_acquire_fini);
++
++/**
++ * drm_modeset_drop_locks - drop all locks
++ * @ctx: the acquire context
++ *
++ * Drop all locks currently held against this acquire context.
++ */
++void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
++{
++ WARN_ON(ctx->contended);
++ while (!list_empty(&ctx->locked)) {
++ struct drm_modeset_lock *lock;
++
++ lock = list_first_entry(&ctx->locked,
++ struct drm_modeset_lock, head);
++
++ drm_modeset_unlock(lock);
++ }
++}
++EXPORT_SYMBOL(drm_modeset_drop_locks);
++
++static inline int modeset_lock(struct drm_modeset_lock *lock,
++ struct drm_modeset_acquire_ctx *ctx,
++ bool interruptible, bool slow)
++{
++ int ret;
++
++ WARN_ON(ctx->contended);
++
++ if (ctx->trylock_only) {
++ if (!ww_mutex_trylock(&lock->mutex))
++ return -EBUSY;
++ else
++ return 0;
++ } else if (interruptible && slow) {
++ ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
++ } else if (interruptible) {
++ ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
++ } else if (slow) {
++ ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
++ ret = 0;
++ } else {
++ ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
++ }
++ if (!ret) {
++ WARN_ON(!list_empty(&lock->head));
++ list_add(&lock->head, &ctx->locked);
++ } else if (ret == -EALREADY) {
++ /* we already hold the lock.. this is fine. For atomic
++ * we will need to be able to drm_modeset_lock() things
++ * without having to keep track of what is already locked
++ * or not.
++ */
++ ret = 0;
++ } else if (ret == -EDEADLK) {
++ ctx->contended = lock;
++ }
++
++ return ret;
++}
++
++static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
++ bool interruptible)
++{
++ struct drm_modeset_lock *contended = ctx->contended;
++
++ ctx->contended = NULL;
++
++ if (WARN_ON(!contended))
++ return 0;
++
++ drm_modeset_drop_locks(ctx);
++
++ return modeset_lock(contended, ctx, interruptible, true);
++}
++
++/**
++ * drm_modeset_backoff - deadlock avoidance backoff
++ * @ctx: the acquire context
++ *
++ * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
++ * you must call this function to drop all currently held locks and
++ * block until the contended lock becomes available.
++ */
++void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
++{
++ modeset_backoff(ctx, false);
++}
++EXPORT_SYMBOL(drm_modeset_backoff);
++
++/**
++ * drm_modeset_backoff_interruptible - deadlock avoidance backoff
++ * @ctx: the acquire context
++ *
++ * Interruptible version of drm_modeset_backoff()
++ */
++int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
++{
++ return modeset_backoff(ctx, true);
++}
++EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
++
++/**
++ * drm_modeset_lock - take modeset lock
++ * @lock: lock to take
++ * @ctx: acquire ctx
++ *
++ * If ctx is not NULL, then its ww acquire context is used and the
++ * lock will be tracked by the context and can be released by calling
++ * drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
++ * deadlock scenario has been detected and it is an error to attempt
++ * to take any more locks without first calling drm_modeset_backoff().
++ */
++int drm_modeset_lock(struct drm_modeset_lock *lock,
++ struct drm_modeset_acquire_ctx *ctx)
++{
++ if (ctx)
++ return modeset_lock(lock, ctx, false, false);
++
++ ww_mutex_lock(&lock->mutex, NULL);
++ return 0;
++}
++EXPORT_SYMBOL(drm_modeset_lock);
++
++/**
++ * drm_modeset_lock_interruptible - take modeset lock
++ * @lock: lock to take
++ * @ctx: acquire ctx
++ *
++ * Interruptible version of drm_modeset_lock()
++ */
++int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
++ struct drm_modeset_acquire_ctx *ctx)
++{
++ if (ctx)
++ return modeset_lock(lock, ctx, true, false);
++
++ return ww_mutex_lock_interruptible(&lock->mutex, NULL);
++}
++EXPORT_SYMBOL(drm_modeset_lock_interruptible);
++
++/**
++ * drm_modeset_unlock - drop modeset lock
++ * @lock: lock to release
++ */
++void drm_modeset_unlock(struct drm_modeset_lock *lock)
++{
++ list_del_init(&lock->head);
++ ww_mutex_unlock(&lock->mutex);
++}
++EXPORT_SYMBOL(drm_modeset_unlock);
++
++/* In some legacy codepaths it's convenient to just grab all the crtc and plane
++ * related locks. */
++int drm_modeset_lock_all_crtcs(struct drm_device *dev,
++ struct drm_modeset_acquire_ctx *ctx)
++{
++ struct drm_mode_config *config = &dev->mode_config;
++ struct drm_crtc *crtc;
++ struct drm_plane *plane;
++ int ret = 0;
++
++ list_for_each_entry(crtc, &config->crtc_list, head) {
++ ret = drm_modeset_lock(&crtc->mutex, ctx);
++ if (ret)
++ return ret;
++ }
++
++ list_for_each_entry(plane, &config->plane_list, head) {
++ ret = drm_modeset_lock(&plane->mutex, ctx);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
+diff -Naur a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
+--- a/drivers/gpu/drm/drm_of.c 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/gpu/drm/drm_of.c 2015-03-26 14:42:38.734435422 +0530
+@@ -0,0 +1,67 @@
++#include <linux/export.h>
++#include <linux/list.h>
++#include <linux/of_graph.h>
++#include <drm/drmP.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_of.h>
++
++/**
++ * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
++ * @dev: DRM device
++ * @port: port OF node
++ *
++ * Given a port OF node, return the possible mask of the corresponding
++ * CRTC within a device's list of CRTCs. Returns zero if not found.
++ */
++static uint32_t drm_crtc_port_mask(struct drm_device *dev,
++ struct device_node *port)
++{
++ unsigned int index = 0;
++ struct drm_crtc *tmp;
++
++ list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
++ if (tmp->port == port)
++ return 1 << index;
++
++ index++;
++ }
++
++ return 0;
++}
++
++/**
++ * drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
++ * @dev: DRM device
++ * @port: encoder port to scan for endpoints
++ *
++ * Scan all endpoints attached to a port, locate their attached CRTCs,
++ * and generate the DRM mask of CRTCs which may be attached to this
++ * encoder.
++ *
++ * See Documentation/devicetree/bindings/graph.txt for the bindings.
++ */
++uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
++ struct device_node *port)
++{
++ struct device_node *remote_port, *ep = NULL;
++ uint32_t possible_crtcs = 0;
++
++ do {
++ ep = of_graph_get_next_endpoint(port, ep);
++ if (!ep)
++ break;
++
++ remote_port = of_graph_get_remote_port(ep);
++ if (!remote_port) {
++ of_node_put(ep);
++ return 0;
++ }
++
++ possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
++
++ of_node_put(remote_port);
++ } while (1);
++
++ return possible_crtcs;
++}
++EXPORT_SYMBOL(drm_of_find_possible_crtcs);
+diff -Naur a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
+--- a/drivers/gpu/drm/drm_pci.c 2015-03-26 14:43:30.426436436 +0530
++++ b/drivers/gpu/drm/drm_pci.c 2015-03-26 14:42:38.734435422 +0530
+@@ -1,17 +1,3 @@
+-/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
+-/**
+- * \file drm_pci.c
+- * \brief Functions and ioctls to manage PCI memory
+- *
+- * \warning These interfaces aren't stable yet.
+- *
+- * \todo Implement the remaining ioctl's for the PCI pools.
+- * \todo The wrappers here are so thin that they would be better off inlined..
+- *
+- * \author José Fonseca <jrfonseca@tungstengraphics.com>
+- * \author Leif Delgass <ldelgass@retinalburn.net>
+- */
+-
+ /*
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
+@@ -41,13 +27,16 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/export.h>
+ #include <drm/drmP.h>
+-
+-/**********************************************************************/
+-/** \name PCI memory */
+-/*@{*/
++#include "drm_legacy.h"
+
+ /**
+- * \brief Allocate a PCI consistent memory block, for DMA.
++ * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
++ * @dev: DRM device
++ * @size: size of block to allocate
++ * @align: alignment of block
++ *
++ * Return: A handle to the allocated memory block on success or NULL on
++ * failure.
+ */
+ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
+ {
+@@ -88,12 +77,12 @@
+
+ EXPORT_SYMBOL(drm_pci_alloc);
+
+-/**
+- * \brief Free a PCI consistent memory block without freeing its descriptor.
++/*
++ * Free a PCI consistent memory block without freeing its descriptor.
+ *
+ * This function is for internal use in the Linux-specific DRM core code.
+ */
+-void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
++void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+ {
+ unsigned long addr;
+ size_t sz;
+@@ -111,11 +100,13 @@
+ }
+
+ /**
+- * \brief Free a PCI consistent memory block
++ * drm_pci_free - Free a PCI consistent memory block
++ * @dev: DRM device
++ * @dmah: handle to memory block
+ */
+ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+ {
+- __drm_pci_free(dev, dmah);
++ __drm_legacy_pci_free(dev, dmah);
+ kfree(dmah);
+ }
+
+@@ -137,69 +128,29 @@
+ return pci_domain_nr(dev->pdev->bus);
+ }
+
+-static int drm_pci_get_irq(struct drm_device *dev)
+-{
+- return dev->pdev->irq;
+-}
+-
+-static const char *drm_pci_get_name(struct drm_device *dev)
+-{
+- struct pci_driver *pdriver = dev->driver->kdriver.pci;
+- return pdriver->name;
+-}
+-
+-static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
++int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
+ {
+- int len, ret;
+- struct pci_driver *pdriver = dev->driver->kdriver.pci;
+- master->unique_len = 40;
+- master->unique_size = master->unique_len;
+- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+- if (master->unique == NULL)
++ master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
++ drm_get_pci_domain(dev),
++ dev->pdev->bus->number,
++ PCI_SLOT(dev->pdev->devfn),
++ PCI_FUNC(dev->pdev->devfn));
++ if (!master->unique)
+ return -ENOMEM;
+
+-
+- len = snprintf(master->unique, master->unique_len,
+- "pci:%04x:%02x:%02x.%d",
+- drm_get_pci_domain(dev),
+- dev->pdev->bus->number,
+- PCI_SLOT(dev->pdev->devfn),
+- PCI_FUNC(dev->pdev->devfn));
+-
+- if (len >= master->unique_len) {
+- DRM_ERROR("buffer overflow");
+- ret = -EINVAL;
+- goto err;
+- } else
+- master->unique_len = len;
+-
+- dev->devname =
+- kmalloc(strlen(pdriver->name) +
+- master->unique_len + 2, GFP_KERNEL);
+-
+- if (dev->devname == NULL) {
+- ret = -ENOMEM;
+- goto err;
+- }
+-
+- sprintf(dev->devname, "%s@%s", pdriver->name,
+- master->unique);
+-
++ master->unique_len = strlen(master->unique);
+ return 0;
+-err:
+- return ret;
+ }
++EXPORT_SYMBOL(drm_pci_set_busid);
+
+-static int drm_pci_set_unique(struct drm_device *dev,
+- struct drm_master *master,
+- struct drm_unique *u)
++int drm_pci_set_unique(struct drm_device *dev,
++ struct drm_master *master,
++ struct drm_unique *u)
+ {
+ int domain, bus, slot, func, ret;
+- const char *bus_name;
+
+ master->unique_len = u->unique_len;
+- master->unique_size = u->unique_len + 1;
+- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
++ master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+ if (!master->unique) {
+ ret = -ENOMEM;
+ goto err;
+@@ -212,17 +163,6 @@
+
+ master->unique[master->unique_len] = '\0';
+
+- bus_name = dev->driver->bus->get_name(dev);
+- dev->devname = kmalloc(strlen(bus_name) +
+- strlen(master->unique) + 2, GFP_KERNEL);
+- if (!dev->devname) {
+- ret = -ENOMEM;
+- goto err;
+- }
+-
+- sprintf(dev->devname, "%s@%s", bus_name,
+- master->unique);
+-
+ /* Return error if the busid submitted doesn't match the device's actual
+ * busid.
+ */
+@@ -247,7 +187,6 @@
+ return ret;
+ }
+
+-
+ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+ {
+ if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+@@ -262,6 +201,36 @@
+ return 0;
+ }
+
++/**
++ * drm_irq_by_busid - Get interrupt from bus ID
++ * @dev: DRM device
++ * @data: IOCTL parameter pointing to a drm_irq_busid structure
++ * @file_priv: DRM file private.
++ *
++ * Finds the PCI device with the specified bus id and gets its IRQ number.
++ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
++ * to that of the device that this DRM instance attached to.
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int drm_irq_by_busid(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_irq_busid *p = data;
++
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ return -EINVAL;
++
++ /* UMS was only ever support on PCI devices. */
++ if (WARN_ON(!dev->pdev))
++ return -EINVAL;
++
++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++ return -EINVAL;
++
++ return drm_pci_irq_by_busid(dev, p);
++}
++
+ static void drm_pci_agp_init(struct drm_device *dev)
+ {
+ if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
+@@ -286,25 +255,17 @@
+ }
+ }
+
+-static struct drm_bus drm_pci_bus = {
+- .bus_type = DRIVER_BUS_PCI,
+- .get_irq = drm_pci_get_irq,
+- .get_name = drm_pci_get_name,
+- .set_busid = drm_pci_set_busid,
+- .set_unique = drm_pci_set_unique,
+- .irq_by_busid = drm_pci_irq_by_busid,
+-};
+-
+ /**
+- * Register.
+- *
+- * \param pdev - PCI device structure
+- * \param ent entry from the PCI ID table with device type flags
+- * \return zero on success or a negative number on failure.
++ * drm_get_pci_dev - Register a PCI device with the DRM subsystem
++ * @pdev: PCI device
++ * @ent: entry from the PCI ID table that matches @pdev
++ * @driver: DRM device driver
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
++ *
++ * Return: 0 on success or a negative error code on failure.
+ */
+ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+ struct drm_driver *driver)
+@@ -351,21 +312,20 @@
+ drm_pci_agp_destroy(dev);
+ pci_disable_device(pdev);
+ err_free:
+- drm_dev_free(dev);
++ drm_dev_unref(dev);
+ return ret;
+ }
+ EXPORT_SYMBOL(drm_get_pci_dev);
+
+ /**
+- * PCI device initialization. Called direct from modules at load time.
++ * drm_pci_init - Register matching PCI devices with the DRM subsystem
++ * @driver: DRM device driver
++ * @pdriver: PCI device driver
+ *
+- * \return zero on success or a negative number on failure.
++ * Initializes a drm_device structures, registering the stubs and initializing
++ * the AGP device.
+ *
+- * Initializes a drm_device structures,registering the
+- * stubs and initializing the AGP device.
+- *
+- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+- * after the initialization for driver customization.
++ * Return: 0 on success or a negative error code on failure.
+ */
+ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+ {
+@@ -375,9 +335,6 @@
+
+ DRM_DEBUG("\n");
+
+- driver->kdriver.pci = pdriver;
+- driver->bus = &drm_pci_bus;
+-
+ if (driver->driver_features & DRIVER_MODESET)
+ return pci_register_driver(pdriver);
+
+@@ -453,11 +410,31 @@
+ }
+
+ void drm_pci_agp_destroy(struct drm_device *dev) {}
++
++int drm_irq_by_busid(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return -EINVAL;
++}
++
++int drm_pci_set_unique(struct drm_device *dev,
++ struct drm_master *master,
++ struct drm_unique *u)
++{
++ return -EINVAL;
++}
+ #endif
+
+ EXPORT_SYMBOL(drm_pci_init);
+
+-/*@}*/
++/**
++ * drm_pci_exit - Unregister matching PCI devices from the DRM subsystem
++ * @driver: DRM device driver
++ * @pdriver: PCI device driver
++ *
++ * Unregisters one or more devices matched by a PCI driver from the DRM
++ * subsystem.
++ */
+ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+ {
+ struct drm_device *dev, *tmp;
+diff -Naur a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
+--- a/drivers/gpu/drm/drm_plane_helper.c 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/gpu/drm/drm_plane_helper.c 2015-03-26 14:42:38.734435422 +0530
+@@ -0,0 +1,572 @@
++/*
++ * Copyright (C) 2014 Intel Corporation
++ *
++ * DRM universal plane helper functions
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++#include <linux/list.h>
++#include <drm/drmP.h>
++#include <drm/drm_plane_helper.h>
++#include <drm/drm_rect.h>
++#include <drm/drm_atomic.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/drm_atomic_helper.h>
++
++#define SUBPIXEL_MASK 0xffff
++
++/**
++ * DOC: overview
++ *
++ * This helper library has two parts. The first part has support to implement
++ * primary plane support on top of the normal CRTC configuration interface.
++ * Since the legacy ->set_config interface ties the primary plane together with
++ * the CRTC state this does not allow userspace to disable the primary plane
++ * itself. To avoid too much duplicated code use
++ * drm_plane_helper_check_update() which can be used to enforce the same
++ * restrictions as primary planes had thus. The default primary plane only
++ * expose XRBG8888 and ARGB8888 as valid pixel formats for the attached
++ * framebuffer.
++ *
++ * Drivers are highly recommended to implement proper support for primary
++ * planes, and newly merged drivers must not rely upon these transitional
++ * helpers.
++ *
++ * The second part also implements transitional helpers which allow drivers to
++ * gradually switch to the atomic helper infrastructure for plane updates. Once
++ * that switch is complete drivers shouldn't use these any longer, instead using
++ * the proper legacy implementations for update and disable plane hooks provided
++ * by the atomic helpers.
++ *
++ * Again drivers are strongly urged to switch to the new interfaces.
++ */
++
++/*
++ * This is the minimal list of formats that seem to be safe for modeset use
++ * with all current DRM drivers. Most hardware can actually support more
++ * formats than this and drivers may specify a more accurate list when
++ * creating the primary plane. However drivers that still call
++ * drm_plane_init() will use this minimal format list as the default.
++ */
++static const uint32_t safe_modeset_formats[] = {
++ DRM_FORMAT_XRGB8888,
++ DRM_FORMAT_ARGB8888,
++};
++
++/*
++ * Returns the connectors currently associated with a CRTC. This function
++ * should be called twice: once with a NULL connector list to retrieve
++ * the list size, and once with the properly allocated list to be filled in.
++ */
++static int get_connectors_for_crtc(struct drm_crtc *crtc,
++ struct drm_connector **connector_list,
++ int num_connectors)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_connector *connector;
++ int count = 0;
++
++ /*
++ * Note: Once we change the plane hooks to more fine-grained locking we
++ * need to grab the connection_mutex here to be able to make these
++ * checks.
++ */
++ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++ if (connector->encoder && connector->encoder->crtc == crtc) {
++ if (connector_list != NULL && count < num_connectors)
++ *(connector_list++) = connector;
++
++ count++;
++ }
++
++ return count;
++}
++
++/**
++ * drm_plane_helper_check_update() - Check plane update for validity
++ * @plane: plane object to update
++ * @crtc: owning CRTC of owning plane
++ * @fb: framebuffer to flip onto plane
++ * @src: source coordinates in 16.16 fixed point
++ * @dest: integer destination coordinates
++ * @clip: integer clipping coordinates
++ * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
++ * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
++ * @can_position: is it legal to position the plane such that it
++ * doesn't cover the entire crtc? This will generally
++ * only be false for primary planes.
++ * @can_update_disabled: can the plane be updated while the crtc
++ * is disabled?
++ * @visible: output parameter indicating whether plane is still visible after
++ * clipping
++ *
++ * Checks that a desired plane update is valid. Drivers that provide
++ * their own plane handling rather than helper-provided implementations may
++ * still wish to call this function to avoid duplication of error checking
++ * code.
++ *
++ * RETURNS:
++ * Zero if update appears valid, error code on failure
++ */
++int drm_plane_helper_check_update(struct drm_plane *plane,
++ struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ struct drm_rect *src,
++ struct drm_rect *dest,
++ const struct drm_rect *clip,
++ int min_scale,
++ int max_scale,
++ bool can_position,
++ bool can_update_disabled,
++ bool *visible)
++{
++ int hscale, vscale;
++
++ if (!crtc->enabled && !can_update_disabled) {
++ DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
++ return -EINVAL;
++ }
++
++ /* Check scaling */
++ hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
++ vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
++ if (hscale < 0 || vscale < 0) {
++ DRM_DEBUG_KMS("Invalid scaling of plane\n");
++ return -ERANGE;
++ }
++
++ if (!fb) {
++ *visible = false;
++ return 0;
++ }
++
++ *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
++ if (!*visible)
++ /*
++ * Plane isn't visible; some drivers can handle this
++ * so we just return success here. Drivers that can't
++ * (including those that use the primary plane helper's
++ * update function) will return an error from their
++ * update_plane handler.
++ */
++ return 0;
++
++ if (!can_position && !drm_rect_equals(dest, clip)) {
++ DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(drm_plane_helper_check_update);
++
++/**
++ * drm_primary_helper_update() - Helper for primary plane update
++ * @plane: plane object to update
++ * @crtc: owning CRTC of owning plane
++ * @fb: framebuffer to flip onto plane
++ * @crtc_x: x offset of primary plane on crtc
++ * @crtc_y: y offset of primary plane on crtc
++ * @crtc_w: width of primary plane rectangle on crtc
++ * @crtc_h: height of primary plane rectangle on crtc
++ * @src_x: x offset of @fb for panning
++ * @src_y: y offset of @fb for panning
++ * @src_w: width of source rectangle in @fb
++ * @src_h: height of source rectangle in @fb
++ *
++ * Provides a default plane update handler for primary planes. This is handler
++ * is called in response to a userspace SetPlane operation on the plane with a
++ * non-NULL framebuffer. We call the driver's modeset handler to update the
++ * framebuffer.
++ *
++ * SetPlane() on a primary plane of a disabled CRTC is not supported, and will
++ * return an error.
++ *
++ * Note that we make some assumptions about hardware limitations that may not be
++ * true for all hardware --
++ * 1) Primary plane cannot be repositioned.
++ * 2) Primary plane cannot be scaled.
++ * 3) Primary plane must cover the entire CRTC.
++ * 4) Subpixel positioning is not supported.
++ * Drivers for hardware that don't have these restrictions can provide their
++ * own implementation rather than using this helper.
++ *
++ * RETURNS:
++ * Zero on success, error code on failure
++ */
++int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ int crtc_x, int crtc_y,
++ unsigned int crtc_w, unsigned int crtc_h,
++ uint32_t src_x, uint32_t src_y,
++ uint32_t src_w, uint32_t src_h)
++{
++ struct drm_mode_set set = {
++ .crtc = crtc,
++ .fb = fb,
++ .mode = &crtc->mode,
++ .x = src_x >> 16,
++ .y = src_y >> 16,
++ };
++ struct drm_rect src = {
++ .x1 = src_x,
++ .y1 = src_y,
++ .x2 = src_x + src_w,
++ .y2 = src_y + src_h,
++ };
++ struct drm_rect dest = {
++ .x1 = crtc_x,
++ .y1 = crtc_y,
++ .x2 = crtc_x + crtc_w,
++ .y2 = crtc_y + crtc_h,
++ };
++ const struct drm_rect clip = {
++ .x2 = crtc->mode.hdisplay,
++ .y2 = crtc->mode.vdisplay,
++ };
++ struct drm_connector **connector_list;
++ int num_connectors, ret;
++ bool visible;
++
++ ret = drm_plane_helper_check_update(plane, crtc, fb,
++ &src, &dest, &clip,
++ DRM_PLANE_HELPER_NO_SCALING,
++ DRM_PLANE_HELPER_NO_SCALING,
++ false, false, &visible);
++ if (ret)
++ return ret;
++
++ if (!visible)
++ /*
++ * Primary plane isn't visible. Note that unless a driver
++ * provides their own disable function, this will just
++ * wind up returning -EINVAL to userspace.
++ */
++ return plane->funcs->disable_plane(plane);
++
++ /* Find current connectors for CRTC */
++ num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
++ BUG_ON(num_connectors == 0);
++ connector_list = kzalloc(num_connectors * sizeof(*connector_list),
++ GFP_KERNEL);
++ if (!connector_list)
++ return -ENOMEM;
++ get_connectors_for_crtc(crtc, connector_list, num_connectors);
++
++ set.connectors = connector_list;
++ set.num_connectors = num_connectors;
++
++ /*
++ * We call set_config() directly here rather than using
++ * drm_mode_set_config_internal. We're reprogramming the same
++ * connectors that were already in use, so we shouldn't need the extra
++ * cross-CRTC fb refcounting to accomodate stealing connectors.
++ * drm_mode_setplane() already handles the basic refcounting for the
++ * framebuffers involved in this operation.
++ */
++ ret = crtc->funcs->set_config(&set);
++
++ kfree(connector_list);
++ return ret;
++}
++EXPORT_SYMBOL(drm_primary_helper_update);
++
++/**
++ * drm_primary_helper_disable() - Helper for primary plane disable
++ * @plane: plane to disable
++ *
++ * Provides a default plane disable handler for primary planes. This is handler
++ * is called in response to a userspace SetPlane operation on the plane with a
++ * NULL framebuffer parameter. It unconditionally fails the disable call with
++ * -EINVAL the only way to disable the primary plane without driver support is
++ * to disable the entier CRTC. Which does not match the plane ->disable hook.
++ *
++ * Note that some hardware may be able to disable the primary plane without
++ * disabling the whole CRTC. Drivers for such hardware should provide their
++ * own disable handler that disables just the primary plane (and they'll likely
++ * need to provide their own update handler as well to properly re-enable a
++ * disabled primary plane).
++ *
++ * RETURNS:
++ * Unconditionally returns -EINVAL.
++ */
++int drm_primary_helper_disable(struct drm_plane *plane)
++{
++ return -EINVAL;
++}
++EXPORT_SYMBOL(drm_primary_helper_disable);
++
++/**
++ * drm_primary_helper_destroy() - Helper for primary plane destruction
++ * @plane: plane to destroy
++ *
++ * Provides a default plane destroy handler for primary planes. This handler
++ * is called during CRTC destruction. We disable the primary plane, remove
++ * it from the DRM plane list, and deallocate the plane structure.
++ */
++void drm_primary_helper_destroy(struct drm_plane *plane)
++{
++ drm_plane_cleanup(plane);
++ kfree(plane);
++}
++EXPORT_SYMBOL(drm_primary_helper_destroy);
++
++const struct drm_plane_funcs drm_primary_helper_funcs = {
++ .update_plane = drm_primary_helper_update,
++ .disable_plane = drm_primary_helper_disable,
++ .destroy = drm_primary_helper_destroy,
++};
++EXPORT_SYMBOL(drm_primary_helper_funcs);
++
++/**
++ * drm_primary_helper_create_plane() - Create a generic primary plane
++ * @dev: drm device
++ * @formats: pixel formats supported, or NULL for a default safe list
++ * @num_formats: size of @formats; ignored if @formats is NULL
++ *
++ * Allocates and initializes a primary plane that can be used with the primary
++ * plane helpers. Drivers that wish to use driver-specific plane structures or
++ * provide custom handler functions may perform their own allocation and
++ * initialization rather than calling this function.
++ */
++struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
++ const uint32_t *formats,
++ int num_formats)
++{
++ struct drm_plane *primary;
++ int ret;
++
++ primary = kzalloc(sizeof(*primary), GFP_KERNEL);
++ if (primary == NULL) {
++ DRM_DEBUG_KMS("Failed to allocate primary plane\n");
++ return NULL;
++ }
++
++ if (formats == NULL) {
++ formats = safe_modeset_formats;
++ num_formats = ARRAY_SIZE(safe_modeset_formats);
++ }
++
++ /* possible_crtc's will be filled in later by crtc_init */
++ ret = drm_universal_plane_init(dev, primary, 0,
++ &drm_primary_helper_funcs,
++ formats, num_formats,
++ DRM_PLANE_TYPE_PRIMARY);
++ if (ret) {
++ kfree(primary);
++ primary = NULL;
++ }
++
++ return primary;
++}
++EXPORT_SYMBOL(drm_primary_helper_create_plane);
++
++/**
++ * drm_crtc_init - Legacy CRTC initialization function
++ * @dev: DRM device
++ * @crtc: CRTC object to init
++ * @funcs: callbacks for the new CRTC
++ *
++ * Initialize a CRTC object with a default helper-provided primary plane and no
++ * cursor plane.
++ *
++ * Returns:
++ * Zero on success, error code on failure.
++ */
++int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
++ const struct drm_crtc_funcs *funcs)
++{
++ struct drm_plane *primary;
++
++ primary = drm_primary_helper_create_plane(dev, NULL, 0);
++ return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
++}
++EXPORT_SYMBOL(drm_crtc_init);
++
++int drm_plane_helper_commit(struct drm_plane *plane,
++ struct drm_plane_state *plane_state,
++ struct drm_framebuffer *old_fb)
++{
++ struct drm_plane_helper_funcs *plane_funcs;
++ struct drm_crtc *crtc[2];
++ struct drm_crtc_helper_funcs *crtc_funcs[2];
++ int i, ret = 0;
++
++ plane_funcs = plane->helper_private;
++
++ /* Since this is a transitional helper we can't assume that plane->state
++ * is always valid. Hence we need to use plane->crtc instead of
++ * plane->state->crtc as the old crtc. */
++ crtc[0] = plane->crtc;
++ crtc[1] = crtc[0] != plane_state->crtc ? plane_state->crtc : NULL;
++
++ for (i = 0; i < 2; i++)
++ crtc_funcs[i] = crtc[i] ? crtc[i]->helper_private : NULL;
++
++ if (plane_funcs->atomic_check) {
++ ret = plane_funcs->atomic_check(plane, plane_state);
++ if (ret)
++ goto out;
++ }
++
++ if (plane_funcs->prepare_fb && plane_state->fb) {
++ ret = plane_funcs->prepare_fb(plane, plane_state->fb);
++ if (ret)
++ goto out;
++ }
++
++ /* Point of no return, commit sw state. */
++ swap(plane->state, plane_state);
++
++ for (i = 0; i < 2; i++) {
++ if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
++ crtc_funcs[i]->atomic_begin(crtc[i]);
++ }
++
++ plane_funcs->atomic_update(plane, plane_state);
++
++ for (i = 0; i < 2; i++) {
++ if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
++ crtc_funcs[i]->atomic_flush(crtc[i]);
++ }
++
++ for (i = 0; i < 2; i++) {
++ if (!crtc[i])
++ continue;
++
++ /* There's no other way to figure out whether the crtc is running. */
++ ret = drm_crtc_vblank_get(crtc[i]);
++ if (ret == 0) {
++ drm_crtc_wait_one_vblank(crtc[i]);
++ drm_crtc_vblank_put(crtc[i]);
++ }
++
++ ret = 0;
++ }
++
++ if (plane_funcs->cleanup_fb && old_fb)
++ plane_funcs->cleanup_fb(plane, old_fb);
++out:
++ if (plane_state) {
++ if (plane->funcs->atomic_destroy_state)
++ plane->funcs->atomic_destroy_state(plane, plane_state);
++ else
++ drm_atomic_helper_plane_destroy_state(plane, plane_state);
++ }
++
++ return ret;
++}
++
++/**
++ * drm_plane_helper_update() - Helper for primary plane update
++ * @plane: plane object to update
++ * @crtc: owning CRTC of owning plane
++ * @fb: framebuffer to flip onto plane
++ * @crtc_x: x offset of primary plane on crtc
++ * @crtc_y: y offset of primary plane on crtc
++ * @crtc_w: width of primary plane rectangle on crtc
++ * @crtc_h: height of primary plane rectangle on crtc
++ * @src_x: x offset of @fb for panning
++ * @src_y: y offset of @fb for panning
++ * @src_w: width of source rectangle in @fb
++ * @src_h: height of source rectangle in @fb
++ *
++ * Provides a default plane update handler using the atomic plane update
++ * functions. It is fully left to the driver to check plane constraints and
++ * handle corner-cases like a fully occluded or otherwise invisible plane.
++ *
++ * This is useful for piecewise transitioning of a driver to the atomic helpers.
++ *
++ * RETURNS:
++ * Zero on success, error code on failure
++ */
++int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ int crtc_x, int crtc_y,
++ unsigned int crtc_w, unsigned int crtc_h,
++ uint32_t src_x, uint32_t src_y,
++ uint32_t src_w, uint32_t src_h)
++{
++ struct drm_plane_state *plane_state;
++
++ if (plane->funcs->atomic_duplicate_state)
++ plane_state = plane->funcs->atomic_duplicate_state(plane);
++ else if (plane->state)
++ plane_state = drm_atomic_helper_plane_duplicate_state(plane);
++ else
++ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
++ if (!plane_state)
++ return -ENOMEM;
++
++ plane_state->crtc = crtc;
++ drm_atomic_set_fb_for_plane(plane_state, fb);
++ plane_state->crtc_x = crtc_x;
++ plane_state->crtc_y = crtc_y;
++ plane_state->crtc_h = crtc_h;
++ plane_state->crtc_w = crtc_w;
++ plane_state->src_x = src_x;
++ plane_state->src_y = src_y;
++ plane_state->src_h = src_h;
++ plane_state->src_w = src_w;
++
++ return drm_plane_helper_commit(plane, plane_state, plane->fb);
++}
++EXPORT_SYMBOL(drm_plane_helper_update);
++
++/**
++ * drm_plane_helper_disable() - Helper for primary plane disable
++ * @plane: plane to disable
++ *
++ * Provides a default plane disable handler using the atomic plane update
++ * functions. It is fully left to the driver to check plane constraints and
++ * handle corner-cases like a fully occluded or otherwise invisible plane.
++ *
++ * This is useful for piecewise transitioning of a driver to the atomic helpers.
++ *
++ * RETURNS:
++ * Zero on success, error code on failure
++ */
++int drm_plane_helper_disable(struct drm_plane *plane)
++{
++ struct drm_plane_state *plane_state;
++
++ /* crtc helpers love to call disable functions for already disabled hw
++ * functions. So cope with that. */
++ if (!plane->crtc)
++ return 0;
++
++ if (plane->funcs->atomic_duplicate_state)
++ plane_state = plane->funcs->atomic_duplicate_state(plane);
++ else if (plane->state)
++ plane_state = drm_atomic_helper_plane_duplicate_state(plane);
++ else
++ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
++ if (!plane_state)
++ return -ENOMEM;
++
++ plane_state->crtc = NULL;
++ drm_atomic_set_fb_for_plane(plane_state, NULL);
++
++ return drm_plane_helper_commit(plane, plane_state, plane->fb);
++}
++EXPORT_SYMBOL(drm_plane_helper_disable);
+diff -Naur a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
+--- a/drivers/gpu/drm/drm_platform.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_platform.c 2015-03-26 14:42:38.734435422 +0530
+@@ -64,89 +64,43 @@
+ return 0;
+
+ err_free:
+- drm_dev_free(dev);
++ drm_dev_unref(dev);
+ return ret;
+ }
+
+-static int drm_platform_get_irq(struct drm_device *dev)
++int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
+ {
+- return platform_get_irq(dev->platformdev, 0);
+-}
+-
+-static const char *drm_platform_get_name(struct drm_device *dev)
+-{
+- return dev->platformdev->name;
+-}
+-
+-static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
+-{
+- int len, ret, id;
+-
+- master->unique_len = 13 + strlen(dev->platformdev->name);
+- master->unique_size = master->unique_len;
+- master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+-
+- if (master->unique == NULL)
+- return -ENOMEM;
++ int id;
+
+ id = dev->platformdev->id;
+-
+- /* if only a single instance of the platform device, id will be
+- * set to -1.. use 0 instead to avoid a funny looking bus-id:
+- */
+- if (id == -1)
++ if (id < 0)
+ id = 0;
+
+- len = snprintf(master->unique, master->unique_len,
+- "platform:%s:%02d", dev->platformdev->name, id);
+-
+- if (len > master->unique_len) {
+- DRM_ERROR("Unique buffer overflowed\n");
+- ret = -EINVAL;
+- goto err;
+- }
+-
+- dev->devname =
+- kmalloc(strlen(dev->platformdev->name) +
+- master->unique_len + 2, GFP_KERNEL);
+-
+- if (dev->devname == NULL) {
+- ret = -ENOMEM;
+- goto err;
+- }
++ master->unique = kasprintf(GFP_KERNEL, "platform:%s:%02d",
++ dev->platformdev->name, id);
++ if (!master->unique)
++ return -ENOMEM;
+
+- sprintf(dev->devname, "%s@%s", dev->platformdev->name,
+- master->unique);
++ master->unique_len = strlen(master->unique);
+ return 0;
+-err:
+- return ret;
+ }
+-
+-static struct drm_bus drm_platform_bus = {
+- .bus_type = DRIVER_BUS_PLATFORM,
+- .get_irq = drm_platform_get_irq,
+- .get_name = drm_platform_get_name,
+- .set_busid = drm_platform_set_busid,
+-};
++EXPORT_SYMBOL(drm_platform_set_busid);
+
+ /**
+- * Platform device initialization. Called direct from modules.
++ * drm_platform_init - Register a platform device with the DRM subsystem
++ * @driver: DRM device driver
++ * @platform_device: platform device to register
+ *
+- * \return zero on success or a negative number on failure.
++ * Registers the specified DRM device driver and platform device with the DRM
++ * subsystem, initializing a drm_device structure and calling the driver's
++ * .load() function.
+ *
+- * Initializes a drm_device structures,registering the
+- * stubs
+- *
+- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+- * after the initialization for driver customization.
++ * Return: 0 on success or a negative error code on failure.
+ */
+-
+ int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
+ {
+ DRM_DEBUG("\n");
+
+- driver->kdriver.platform_device = platform_device;
+- driver->bus = &drm_platform_bus;
+ return drm_get_platform_dev(platform_device, driver);
+ }
+ EXPORT_SYMBOL(drm_platform_init);
+diff -Naur a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
+--- a/drivers/gpu/drm/drm_prime.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_prime.c 2015-03-26 14:42:38.734435422 +0530
+@@ -29,6 +29,9 @@
+ #include <linux/export.h>
+ #include <linux/dma-buf.h>
+ #include <drm/drmP.h>
++#include <drm/drm_gem.h>
++
++#include "drm_internal.h"
+
+ /*
+ * DMA-BUF/GEM Object references and lifetime overview:
+@@ -68,7 +71,8 @@
+ enum dma_data_direction dir;
+ };
+
+-static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
++static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
++ struct dma_buf *dma_buf, uint32_t handle)
+ {
+ struct drm_prime_member *member;
+
+@@ -174,7 +178,7 @@
+ }
+
+ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+- enum dma_data_direction dir)
++ enum dma_data_direction dir)
+ {
+ struct drm_prime_attachment *prime_attach = attach->priv;
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+@@ -211,11 +215,19 @@
+ }
+
+ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+- struct sg_table *sgt, enum dma_data_direction dir)
++ struct sg_table *sgt,
++ enum dma_data_direction dir)
+ {
+ /* nothing to be done here */
+ }
+
++/**
++ * drm_gem_dmabuf_release - dma_buf release implementation for GEM
++ * @dma_buf: buffer to be released
++ *
++ * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
++ * must use this in their dma_buf ops structure as the release callback.
++ */
+ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+ {
+ struct drm_gem_object *obj = dma_buf->priv;
+@@ -242,30 +254,30 @@
+ }
+
+ static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+- unsigned long page_num)
++ unsigned long page_num)
+ {
+ return NULL;
+ }
+
+ static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+- unsigned long page_num, void *addr)
++ unsigned long page_num, void *addr)
+ {
+
+ }
+ static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+- unsigned long page_num)
++ unsigned long page_num)
+ {
+ return NULL;
+ }
+
+ static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+- unsigned long page_num, void *addr)
++ unsigned long page_num, void *addr)
+ {
+
+ }
+
+ static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+- struct vm_area_struct *vma)
++ struct vm_area_struct *vma)
+ {
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+@@ -315,10 +327,25 @@
+ * driver's scatter/gather table
+ */
+
++/**
++ * drm_gem_prime_export - helper library implementation of the export callback
++ * @dev: drm_device to export from
++ * @obj: GEM object to export
++ * @flags: flags like DRM_CLOEXEC
++ *
++ * This is the implementation of the gem_prime_export functions for GEM drivers
++ * using the PRIME helpers.
++ */
+ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+ {
+- return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
++ struct reservation_object *robj = NULL;
++
++ if (dev->driver->gem_prime_res_obj)
++ robj = dev->driver->gem_prime_res_obj(obj);
++
++ return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
++ flags);
+ }
+ EXPORT_SYMBOL(drm_gem_prime_export);
+
+@@ -355,9 +382,23 @@
+ return dmabuf;
+ }
+
++/**
++ * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
++ * @dev: dev to export the buffer from
++ * @file_priv: drm file-private structure
++ * @handle: buffer handle to export
++ * @flags: flags like DRM_CLOEXEC
++ * @prime_fd: pointer to storage for the fd id of the create dma-buf
++ *
++ * This is the PRIME export function which must be used mandatorily by GEM
++ * drivers to ensure correct lifetime management of the underlying GEM object.
++ * The actual exporting from GEM object to a dma-buf is done through the
++ * gem_prime_export driver callback.
++ */
+ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+- struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+- int *prime_fd)
++ struct drm_file *file_priv, uint32_t handle,
++ uint32_t flags,
++ int *prime_fd)
+ {
+ struct drm_gem_object *obj;
+ int ret = 0;
+@@ -441,6 +482,14 @@
+ }
+ EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+
++/**
++ * drm_gem_prime_import - helper library implementation of the import callback
++ * @dev: drm_device to import into
++ * @dma_buf: dma-buf object to import
++ *
++ * This is the implementation of the gem_prime_import functions for GEM drivers
++ * using the PRIME helpers.
++ */
+ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+ {
+@@ -471,12 +520,12 @@
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+- if (IS_ERR_OR_NULL(sgt)) {
++ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+- obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
++ obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail_unmap;
+@@ -496,8 +545,21 @@
+ }
+ EXPORT_SYMBOL(drm_gem_prime_import);
+
++/**
++ * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
++ * @dev: dev to export the buffer from
++ * @file_priv: drm file-private structure
++ * @prime_fd: fd id of the dma-buf which should be imported
++ * @handle: pointer to storage for the handle of the imported buffer object
++ *
++ * This is the PRIME import function which must be used mandatorily by GEM
++ * drivers to ensure correct lifetime management of the underlying GEM object.
++ * The actual importing of GEM object from the dma-buf is done through the
++ * gem_import_export driver callback.
++ */
+ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+- struct drm_file *file_priv, int prime_fd, uint32_t *handle)
++ struct drm_file *file_priv, int prime_fd,
++ uint32_t *handle)
+ {
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+@@ -598,14 +660,16 @@
+ args->fd, &args->handle);
+ }
+
+-/*
+- * drm_prime_pages_to_sg
++/**
++ * drm_prime_pages_to_sg - converts a page array into an sg list
++ * @pages: pointer to the array of page pointers to convert
++ * @nr_pages: length of the page vector
+ *
+- * this helper creates an sg table object from a set of pages
++ * This helper creates an sg table object from a set of pages
+ * the driver is responsible for mapping the pages into the
+- * importers address space
++ * importers address space for use with dma_buf itself.
+ */
+-struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
++struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
+ {
+ struct sg_table *sg = NULL;
+ int ret;
+@@ -628,9 +692,16 @@
+ }
+ EXPORT_SYMBOL(drm_prime_pages_to_sg);
+
+-/* export an sg table into an array of pages and addresses
+- this is currently required by the TTM driver in order to do correct fault
+- handling */
++/**
++ * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
++ * @sgt: scatter-gather table to convert
++ * @pages: array of page pointers to store the page array in
++ * @addrs: optional array to store the dma bus address of each page
++ * @max_pages: size of both the passed-in arrays
++ *
++ * Exports an sg table into an array of pages and addresses. This is currently
++ * required by the TTM driver in order to do correct fault handling.
++ */
+ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+ dma_addr_t *addrs, int max_pages)
+ {
+@@ -663,7 +734,15 @@
+ return 0;
+ }
+ EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
+-/* helper function to cleanup a GEM/prime object */
++
++/**
++ * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
++ * @obj: GEM object which was created from a dma-buf
++ * @sg: the sg-table which was pinned at import time
++ *
++ * This is the cleanup functions which GEM drivers need to call when they use
++ * @drm_gem_prime_import to import dma-bufs.
++ */
+ void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
+ {
+ struct dma_buf_attachment *attach;
+@@ -683,11 +762,9 @@
+ INIT_LIST_HEAD(&prime_fpriv->head);
+ mutex_init(&prime_fpriv->lock);
+ }
+-EXPORT_SYMBOL(drm_prime_init_file_private);
+
+ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
+ {
+ /* by now drm_gem_release should've made sure the list is empty */
+ WARN_ON(!list_empty(&prime_fpriv->head));
+ }
+-EXPORT_SYMBOL(drm_prime_destroy_file_private);
+diff -Naur a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+--- a/drivers/gpu/drm/drm_probe_helper.c 1970-01-01 05:30:00.000000000 +0530
++++ b/drivers/gpu/drm/drm_probe_helper.c 2015-03-26 14:42:38.738435422 +0530
+@@ -0,0 +1,473 @@
++/*
++ * Copyright (c) 2006-2008 Intel Corporation
++ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
++ *
++ * DRM core CRTC related functions
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that copyright
++ * notice and this permission notice appear in supporting documentation, and
++ * that the name of the copyright holders not be used in advertising or
++ * publicity pertaining to distribution of the software without specific,
++ * written prior permission. The copyright holders make no representations
++ * about the suitability of this software for any purpose. It is provided "as
++ * is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
++ * OF THIS SOFTWARE.
++ *
++ * Authors:
++ * Keith Packard
++ * Eric Anholt <eric@anholt.net>
++ * Dave Airlie <airlied@linux.ie>
++ * Jesse Barnes <jesse.barnes@intel.com>
++ */
++
++#include <linux/export.h>
++#include <linux/moduleparam.h>
++
++#include <drm/drmP.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_fourcc.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/drm_fb_helper.h>
++#include <drm/drm_edid.h>
++
++/**
++ * DOC: output probing helper overview
++ *
++ * This library provides some helper code for output probing. It provides an
++ * implementation of the core connector->fill_modes interface with
++ * drm_helper_probe_single_connector_modes.
++ *
++ * It also provides support for polling connectors with a work item and for
++ * generic hotplug interrupt handling where the driver doesn't or cannot keep
++ * track of a per-connector hpd interrupt.
++ *
++ * This helper library can be used independently of the modeset helper library.
++ * Drivers can also overwrite different parts e.g. use their own hotplug
++ * handling code to avoid probing unrelated outputs.
++ */
++
++static bool drm_kms_helper_poll = true;
++module_param_named(poll, drm_kms_helper_poll, bool, 0600);
++
++static void drm_mode_validate_flag(struct drm_connector *connector,
++ int flags)
++{
++ struct drm_display_mode *mode;
++
++ if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
++ DRM_MODE_FLAG_3D_MASK))
++ return;
++
++ list_for_each_entry(mode, &connector->modes, head) {
++ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
++ !(flags & DRM_MODE_FLAG_INTERLACE))
++ mode->status = MODE_NO_INTERLACE;
++ if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
++ !(flags & DRM_MODE_FLAG_DBLSCAN))
++ mode->status = MODE_NO_DBLESCAN;
++ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
++ !(flags & DRM_MODE_FLAG_3D_MASK))
++ mode->status = MODE_NO_STEREO;
++ }
++
++ return;
++}
++
++static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
++{
++ struct drm_display_mode *mode;
++
++ if (!connector->cmdline_mode.specified)
++ return 0;
++
++ mode = drm_mode_create_from_cmdline_mode(connector->dev,
++ &connector->cmdline_mode);
++ if (mode == NULL)
++ return 0;
++
++ drm_mode_probed_add(connector, mode);
++ return 1;
++}
++
++static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
++ uint32_t maxX, uint32_t maxY, bool merge_type_bits)
++{
++ struct drm_device *dev = connector->dev;
++ struct drm_display_mode *mode;
++ struct drm_connector_helper_funcs *connector_funcs =
++ connector->helper_private;
++ int count = 0;
++ int mode_flags = 0;
++ bool verbose_prune = true;
++
++ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
++
++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
++ connector->name);
++ /* set all modes to the unverified state */
++ list_for_each_entry(mode, &connector->modes, head)
++ mode->status = MODE_UNVERIFIED;
++
++ if (connector->force) {
++ if (connector->force == DRM_FORCE_ON ||
++ connector->force == DRM_FORCE_ON_DIGITAL)
++ connector->status = connector_status_connected;
++ else
++ connector->status = connector_status_disconnected;
++ if (connector->funcs->force)
++ connector->funcs->force(connector);
++ } else {
++ connector->status = connector->funcs->detect(connector, true);
++ }
++
++ /* Re-enable polling in case the global poll config changed. */
++ if (drm_kms_helper_poll != dev->mode_config.poll_running)
++ drm_kms_helper_poll_enable(dev);
++
++ dev->mode_config.poll_running = drm_kms_helper_poll;
++
++ if (connector->status == connector_status_disconnected) {
++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
++ connector->base.id, connector->name);
++ drm_mode_connector_update_edid_property(connector, NULL);
++ verbose_prune = false;
++ goto prune;
++ }
++
++#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
++ count = drm_load_edid_firmware(connector);
++ if (count == 0)
++#endif
++ {
++ if (connector->override_edid) {
++ struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
++
++ count = drm_add_edid_modes(connector, edid);
++ } else
++ count = (*connector_funcs->get_modes)(connector);
++ }
++
++ if (count == 0 && connector->status == connector_status_connected)
++ count = drm_add_modes_noedid(connector, 1024, 768);
++ count += drm_helper_probe_add_cmdline_mode(connector);
++ if (count == 0)
++ goto prune;
++
++ drm_mode_connector_list_update(connector, merge_type_bits);
++
++ if (maxX && maxY)
++ drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
++
++ if (connector->interlace_allowed)
++ mode_flags |= DRM_MODE_FLAG_INTERLACE;
++ if (connector->doublescan_allowed)
++ mode_flags |= DRM_MODE_FLAG_DBLSCAN;
++ if (connector->stereo_allowed)
++ mode_flags |= DRM_MODE_FLAG_3D_MASK;
++ drm_mode_validate_flag(connector, mode_flags);
++
++ list_for_each_entry(mode, &connector->modes, head) {
++ if (mode->status == MODE_OK && connector_funcs->mode_valid)
++ mode->status = connector_funcs->mode_valid(connector,
++ mode);
++ }
++
++prune:
++ drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
++
++ if (list_empty(&connector->modes))
++ return 0;
++
++ list_for_each_entry(mode, &connector->modes, head)
++ mode->vrefresh = drm_mode_vrefresh(mode);
++
++ drm_mode_sort(&connector->modes);
++
++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
++ connector->name);
++ list_for_each_entry(mode, &connector->modes, head) {
++ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
++ drm_mode_debug_printmodeline(mode);
++ }
++
++ return count;
++}
++
++/**
++ * drm_helper_probe_single_connector_modes - get complete set of display modes
++ * @connector: connector to probe
++ * @maxX: max width for modes
++ * @maxY: max height for modes
++ *
++ * Based on the helper callbacks implemented by @connector try to detect all
++ * valid modes. Modes will first be added to the connector's probed_modes list,
++ * then culled (based on validity and the @maxX, @maxY parameters) and put into
++ * the normal modes list.
++ *
++ * Intended to be use as a generic implementation of the ->fill_modes()
++ * @connector vfunc for drivers that use the crtc helpers for output mode
++ * filtering and detection.
++ *
++ * Returns:
++ * The number of modes found on @connector.
++ */
++int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
++ uint32_t maxX, uint32_t maxY)
++{
++ return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true);
++}
++EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
++
++/**
++ * drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes
++ * @connector: connector to probe
++ * @maxX: max width for modes
++ * @maxY: max height for modes
++ *
++ * This operates like drm_hehlper_probe_single_connector_modes except it
++ * replaces the mode bits instead of merging them for preferred modes.
++ */
++int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector,
++ uint32_t maxX, uint32_t maxY)
++{
++ return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false);
++}
++EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge);
++
++/**
++ * drm_kms_helper_hotplug_event - fire off KMS hotplug events
++ * @dev: drm_device whose connector state changed
++ *
++ * This function fires off the uevent for userspace and also calls the
++ * output_poll_changed function, which is most commonly used to inform the fbdev
++ * emulation code and allow it to update the fbcon output configuration.
++ *
++ * Drivers should call this from their hotplug handling code when a change is
++ * detected. Note that this function does not do any output detection of its
++ * own, like drm_helper_hpd_irq_event() does - this is assumed to be done by the
++ * driver already.
++ *
++ * This function must be called from process context with no mode
++ * setting locks held.
++ */
++void drm_kms_helper_hotplug_event(struct drm_device *dev)
++{
++ /* send a uevent + call fbdev */
++ drm_sysfs_hotplug_event(dev);
++ if (dev->mode_config.funcs->output_poll_changed)
++ dev->mode_config.funcs->output_poll_changed(dev);
++}
++EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
++
++#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
++static void output_poll_execute(struct work_struct *work)
++{
++ struct delayed_work *delayed_work = to_delayed_work(work);
++ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
++ struct drm_connector *connector;
++ enum drm_connector_status old_status;
++ bool repoll = false, changed = false;
++
++ if (!drm_kms_helper_poll)
++ return;
++
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++
++ /* Ignore forced connectors. */
++ if (connector->force)
++ continue;
++
++ /* Ignore HPD capable connectors and connectors where we don't
++ * want any hotplug detection at all for polling. */
++ if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
++ continue;
++
++ repoll = true;
++
++ old_status = connector->status;
++ /* if we are connected and don't want to poll for disconnect
++ skip it */
++ if (old_status == connector_status_connected &&
++ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
++ continue;
++
++ connector->status = connector->funcs->detect(connector, false);
++ if (old_status != connector->status) {
++ const char *old, *new;
++
++ old = drm_get_connector_status_name(old_status);
++ new = drm_get_connector_status_name(connector->status);
++
++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
++ "status updated from %s to %s\n",
++ connector->base.id,
++ connector->name,
++ old, new);
++
++ changed = true;
++ }
++ }
++
++ mutex_unlock(&dev->mode_config.mutex);
++
++ if (changed)
++ drm_kms_helper_hotplug_event(dev);
++
++ if (repoll)
++ schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
++}
++
++/**
++ * drm_kms_helper_poll_disable - disable output polling
++ * @dev: drm_device
++ *
++ * This function disables the output polling work.
++ *
++ * Drivers can call this helper from their device suspend implementation. It is
++ * not an error to call this even when output polling isn't enabled or arlready
++ * disabled.
++ */
++void drm_kms_helper_poll_disable(struct drm_device *dev)
++{
++ if (!dev->mode_config.poll_enabled)
++ return;
++ cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
++}
++EXPORT_SYMBOL(drm_kms_helper_poll_disable);
++
++/**
++ * drm_kms_helper_poll_enable - re-enable output polling.
++ * @dev: drm_device
++ *
++ * This function re-enables the output polling work.
++ *
++ * Drivers can call this helper from their device resume implementation. It is
++ * an error to call this when the output polling support has not yet been set
++ * up.
++ */
++void drm_kms_helper_poll_enable(struct drm_device *dev)
++{
++ bool poll = false;
++ struct drm_connector *connector;
++
++ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
++ return;
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
++ DRM_CONNECTOR_POLL_DISCONNECT))
++ poll = true;
++ }
++
++ if (poll)
++ schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
++}
++EXPORT_SYMBOL(drm_kms_helper_poll_enable);
++
++/**
++ * drm_kms_helper_poll_init - initialize and enable output polling
++ * @dev: drm_device
++ *
++ * This function intializes and then also enables output polling support for
++ * @dev. Drivers which do not have reliable hotplug support in hardware can use
++ * this helper infrastructure to regularly poll such connectors for changes in
++ * their connection state.
++ *
++ * Drivers can control which connectors are polled by setting the
++ * DRM_CONNECTOR_POLL_CONNECT and DRM_CONNECTOR_POLL_DISCONNECT flags. On
++ * connectors where probing live outputs can result in visual distortion drivers
++ * should not set the DRM_CONNECTOR_POLL_DISCONNECT flag to avoid this.
++ * Connectors which have no flag or only DRM_CONNECTOR_POLL_HPD set are
++ * completely ignored by the polling logic.
++ *
++ * Note that a connector can be both polled and probed from the hotplug handler,
++ * in case the hotplug interrupt is known to be unreliable.
++ */
++void drm_kms_helper_poll_init(struct drm_device *dev)
++{
++ INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
++ dev->mode_config.poll_enabled = true;
++
++ drm_kms_helper_poll_enable(dev);
++}
++EXPORT_SYMBOL(drm_kms_helper_poll_init);
++
++/**
++ * drm_kms_helper_poll_fini - disable output polling and clean it up
++ * @dev: drm_device
++ */
++void drm_kms_helper_poll_fini(struct drm_device *dev)
++{
++ drm_kms_helper_poll_disable(dev);
++}
++EXPORT_SYMBOL(drm_kms_helper_poll_fini);
++
++/**
++ * drm_helper_hpd_irq_event - hotplug processing
++ * @dev: drm_device
++ *
++ * Drivers can use this helper function to run a detect cycle on all connectors
++ * which have the DRM_CONNECTOR_POLL_HPD flag set in their &polled member. All
++ * other connectors are ignored, which is useful to avoid reprobing fixed
++ * panels.
++ *
++ * This helper function is useful for drivers which can't or don't track hotplug
++ * interrupts for each connector.
++ *
++ * Drivers which support hotplug interrupts for each connector individually and
++ * which have a more fine-grained detect logic should bypass this code and
++ * directly call drm_kms_helper_hotplug_event() in case the connector state
++ * changed.
++ *
++ * This function must be called from process context with no mode
++ * setting locks held.
++ *
++ * Note that a connector can be both polled and probed from the hotplug handler,
++ * in case the hotplug interrupt is known to be unreliable.
++ */
++bool drm_helper_hpd_irq_event(struct drm_device *dev)
++{
++ struct drm_connector *connector;
++ enum drm_connector_status old_status;
++ bool changed = false;
++
++ if (!dev->mode_config.poll_enabled)
++ return false;
++
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++
++ /* Only handle HPD capable connectors. */
++ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
++ continue;
++
++ old_status = connector->status;
++
++ connector->status = connector->funcs->detect(connector, false);
++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
++ connector->base.id,
++ connector->name,
++ drm_get_connector_status_name(old_status),
++ drm_get_connector_status_name(connector->status));
++ if (old_status != connector->status)
++ changed = true;
++ }
++
++ mutex_unlock(&dev->mode_config.mutex);
++
++ if (changed)
++ drm_kms_helper_hotplug_event(dev);
++
++ return changed;
++}
++EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+diff -Naur a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
+--- a/drivers/gpu/drm/drm_rect.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_rect.c 2015-03-26 14:42:38.738435422 +0530
+@@ -293,3 +293,143 @@
+ DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
+ }
+ EXPORT_SYMBOL(drm_rect_debug_print);
++
++/**
++ * drm_rect_rotate - Rotate the rectangle
++ * @r: rectangle to be rotated
++ * @width: Width of the coordinate space
++ * @height: Height of the coordinate space
++ * @rotation: Transformation to be applied
++ *
++ * Apply @rotation to the coordinates of rectangle @r.
++ *
++ * @width and @height combined with @rotation define
++ * the location of the new origin.
++ *
++ * @width correcsponds to the horizontal and @height
++ * to the vertical axis of the untransformed coordinate
++ * space.
++ */
++void drm_rect_rotate(struct drm_rect *r,
++ int width, int height,
++ unsigned int rotation)
++{
++ struct drm_rect tmp;
++
++ if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
++ tmp = *r;
++
++ if (rotation & BIT(DRM_REFLECT_X)) {
++ r->x1 = width - tmp.x2;
++ r->x2 = width - tmp.x1;
++ }
++
++ if (rotation & BIT(DRM_REFLECT_Y)) {
++ r->y1 = height - tmp.y2;
++ r->y2 = height - tmp.y1;
++ }
++ }
++
++ switch (rotation & 0xf) {
++ case BIT(DRM_ROTATE_0):
++ break;
++ case BIT(DRM_ROTATE_90):
++ tmp = *r;
++ r->x1 = tmp.y1;
++ r->x2 = tmp.y2;
++ r->y1 = width - tmp.x2;
++ r->y2 = width - tmp.x1;
++ break;
++ case BIT(DRM_ROTATE_180):
++ tmp = *r;
++ r->x1 = width - tmp.x2;
++ r->x2 = width - tmp.x1;
++ r->y1 = height - tmp.y2;
++ r->y2 = height - tmp.y1;
++ break;
++ case BIT(DRM_ROTATE_270):
++ tmp = *r;
++ r->x1 = height - tmp.y2;
++ r->x2 = height - tmp.y1;
++ r->y1 = tmp.x1;
++ r->y2 = tmp.x2;
++ break;
++ default:
++ break;
++ }
++}
++EXPORT_SYMBOL(drm_rect_rotate);
++
++/**
++ * drm_rect_rotate_inv - Inverse rotate the rectangle
++ * @r: rectangle to be rotated
++ * @width: Width of the coordinate space
++ * @height: Height of the coordinate space
++ * @rotation: Transformation whose inverse is to be applied
++ *
++ * Apply the inverse of @rotation to the coordinates
++ * of rectangle @r.
++ *
++ * @width and @height combined with @rotation define
++ * the location of the new origin.
++ *
++ * @width correcsponds to the horizontal and @height
++ * to the vertical axis of the original untransformed
++ * coordinate space, so that you never have to flip
++ * them when doing a rotatation and its inverse.
++ * That is, if you do:
++ *
++ * drm_rotate(&r, width, height, rotation);
++ * drm_rotate_inv(&r, width, height, rotation);
++ *
++ * you will always get back the original rectangle.
++ */
++void drm_rect_rotate_inv(struct drm_rect *r,
++ int width, int height,
++ unsigned int rotation)
++{
++ struct drm_rect tmp;
++
++ switch (rotation & 0xf) {
++ case BIT(DRM_ROTATE_0):
++ break;
++ case BIT(DRM_ROTATE_90):
++ tmp = *r;
++ r->x1 = width - tmp.y2;
++ r->x2 = width - tmp.y1;
++ r->y1 = tmp.x1;
++ r->y2 = tmp.x2;
++ break;
++ case BIT(DRM_ROTATE_180):
++ tmp = *r;
++ r->x1 = width - tmp.x2;
++ r->x2 = width - tmp.x1;
++ r->y1 = height - tmp.y2;
++ r->y2 = height - tmp.y1;
++ break;
++ case BIT(DRM_ROTATE_270):
++ tmp = *r;
++ r->x1 = tmp.y1;
++ r->x2 = tmp.y2;
++ r->y1 = height - tmp.x2;
++ r->y2 = height - tmp.x1;
++ break;
++ default:
++ break;
++ }
++
++ if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
++ tmp = *r;
++
++ if (rotation & BIT(DRM_REFLECT_X)) {
++ r->x1 = width - tmp.x2;
++ r->x2 = width - tmp.x1;
++ }
++
++ if (rotation & BIT(DRM_REFLECT_Y)) {
++ r->y1 = height - tmp.y2;
++ r->y2 = height - tmp.y1;
++ }
++ }
++}
++EXPORT_SYMBOL(drm_rect_rotate_inv);
+diff -Naur a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
+--- a/drivers/gpu/drm/drm_scatter.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_scatter.c 2015-03-26 14:42:38.738435422 +0530
+@@ -34,6 +34,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/slab.h>
+ #include <drm/drmP.h>
++#include "drm_legacy.h"
+
+ #define DEBUG_SCATTER 0
+
+@@ -78,8 +79,8 @@
+ # define ScatterHandle(x) (unsigned int)(x)
+ #endif
+
+-int drm_sg_alloc(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_scatter_gather *request = data;
+ struct drm_sg_mem *entry;
+@@ -194,8 +195,8 @@
+ return -ENOMEM;
+ }
+
+-int drm_sg_free(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++int drm_legacy_sg_free(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ struct drm_scatter_gather *request = data;
+ struct drm_sg_mem *entry;
+diff -Naur a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
+--- a/drivers/gpu/drm/drm_stub.c 2015-03-26 14:43:30.398436435 +0530
++++ b/drivers/gpu/drm/drm_stub.c 1970-01-01 05:30:00.000000000 +0530
+@@ -1,607 +0,0 @@
+-/**
+- * \file drm_stub.h
+- * Stub support
+- *
+- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+- */
+-
+-/*
+- * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+- *
+- * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+- * All Rights Reserved.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- */
+-
+-#include <linux/module.h>
+-#include <linux/moduleparam.h>
+-#include <linux/slab.h>
+-#include <drm/drmP.h>
+-#include <drm/drm_core.h>
+-
+-unsigned int drm_debug = 0; /* 1 to enable debug output */
+-EXPORT_SYMBOL(drm_debug);
+-
+-unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
+-EXPORT_SYMBOL(drm_rnodes);
+-
+-unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
+-EXPORT_SYMBOL(drm_vblank_offdelay);
+-
+-unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
+-EXPORT_SYMBOL(drm_timestamp_precision);
+-
+-/*
+- * Default to use monotonic timestamps for wait-for-vblank and page-flip
+- * complete events.
+- */
+-unsigned int drm_timestamp_monotonic = 1;
+-
+-MODULE_AUTHOR(CORE_AUTHOR);
+-MODULE_DESCRIPTION(CORE_DESC);
+-MODULE_LICENSE("GPL and additional rights");
+-MODULE_PARM_DESC(debug, "Enable debug output");
+-MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
+-MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+-MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+-MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
+-
+-module_param_named(debug, drm_debug, int, 0600);
+-module_param_named(rnodes, drm_rnodes, int, 0600);
+-module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+-module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+-module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
+-
+-struct idr drm_minors_idr;
+-
+-struct class *drm_class;
+-struct dentry *drm_debugfs_root;
+-
+-int drm_err(const char *func, const char *format, ...)
+-{
+- struct va_format vaf;
+- va_list args;
+- int r;
+-
+- va_start(args, format);
+-
+- vaf.fmt = format;
+- vaf.va = &args;
+-
+- r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
+-
+- va_end(args);
+-
+- return r;
+-}
+-EXPORT_SYMBOL(drm_err);
+-
+-void drm_ut_debug_printk(unsigned int request_level,
+- const char *prefix,
+- const char *function_name,
+- const char *format, ...)
+-{
+- struct va_format vaf;
+- va_list args;
+-
+- if (drm_debug & request_level) {
+- va_start(args, format);
+- vaf.fmt = format;
+- vaf.va = &args;
+-
+- if (function_name)
+- printk(KERN_DEBUG "[%s:%s], %pV", prefix,
+- function_name, &vaf);
+- else
+- printk(KERN_DEBUG "%pV", &vaf);
+- va_end(args);
+- }
+-}
+-EXPORT_SYMBOL(drm_ut_debug_printk);
+-
+-static int drm_minor_get_id(struct drm_device *dev, int type)
+-{
+- int ret;
+- int base = 0, limit = 63;
+-
+- if (type == DRM_MINOR_CONTROL) {
+- base += 64;
+- limit = base + 63;
+- } else if (type == DRM_MINOR_RENDER) {
+- base += 128;
+- limit = base + 63;
+- }
+-
+- mutex_lock(&dev->struct_mutex);
+- ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
+- mutex_unlock(&dev->struct_mutex);
+-
+- return ret == -ENOSPC ? -EINVAL : ret;
+-}
+-
+-struct drm_master *drm_master_create(struct drm_minor *minor)
+-{
+- struct drm_master *master;
+-
+- master = kzalloc(sizeof(*master), GFP_KERNEL);
+- if (!master)
+- return NULL;
+-
+- kref_init(&master->refcount);
+- spin_lock_init(&master->lock.spinlock);
+- init_waitqueue_head(&master->lock.lock_queue);
+- drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
+- INIT_LIST_HEAD(&master->magicfree);
+- master->minor = minor;
+-
+- list_add_tail(&master->head, &minor->master_list);
+-
+- return master;
+-}
+-
+-struct drm_master *drm_master_get(struct drm_master *master)
+-{
+- kref_get(&master->refcount);
+- return master;
+-}
+-EXPORT_SYMBOL(drm_master_get);
+-
+-static void drm_master_destroy(struct kref *kref)
+-{
+- struct drm_master *master = container_of(kref, struct drm_master, refcount);
+- struct drm_magic_entry *pt, *next;
+- struct drm_device *dev = master->minor->dev;
+- struct drm_map_list *r_list, *list_temp;
+-
+- list_del(&master->head);
+-
+- if (dev->driver->master_destroy)
+- dev->driver->master_destroy(dev, master);
+-
+- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+- if (r_list->master == master) {
+- drm_rmmap_locked(dev, r_list->map);
+- r_list = NULL;
+- }
+- }
+-
+- if (master->unique) {
+- kfree(master->unique);
+- master->unique = NULL;
+- master->unique_len = 0;
+- }
+-
+- kfree(dev->devname);
+- dev->devname = NULL;
+-
+- list_for_each_entry_safe(pt, next, &master->magicfree, head) {
+- list_del(&pt->head);
+- drm_ht_remove_item(&master->magiclist, &pt->hash_item);
+- kfree(pt);
+- }
+-
+- drm_ht_remove(&master->magiclist);
+-
+- kfree(master);
+-}
+-
+-void drm_master_put(struct drm_master **master)
+-{
+- kref_put(&(*master)->refcount, drm_master_destroy);
+- *master = NULL;
+-}
+-EXPORT_SYMBOL(drm_master_put);
+-
+-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
+-{
+- int ret = 0;
+-
+- if (file_priv->is_master)
+- return 0;
+-
+- if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
+- return -EINVAL;
+-
+- if (!file_priv->master)
+- return -EINVAL;
+-
+- if (file_priv->minor->master)
+- return -EINVAL;
+-
+- mutex_lock(&dev->struct_mutex);
+- file_priv->minor->master = drm_master_get(file_priv->master);
+- file_priv->is_master = 1;
+- if (dev->driver->master_set) {
+- ret = dev->driver->master_set(dev, file_priv, false);
+- if (unlikely(ret != 0)) {
+- file_priv->is_master = 0;
+- drm_master_put(&file_priv->minor->master);
+- }
+- }
+- mutex_unlock(&dev->struct_mutex);
+-
+- return ret;
+-}
+-
+-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
+-{
+- if (!file_priv->is_master)
+- return -EINVAL;
+-
+- if (!file_priv->minor->master)
+- return -EINVAL;
+-
+- mutex_lock(&dev->struct_mutex);
+- if (dev->driver->master_drop)
+- dev->driver->master_drop(dev, file_priv, false);
+- drm_master_put(&file_priv->minor->master);
+- file_priv->is_master = 0;
+- mutex_unlock(&dev->struct_mutex);
+- return 0;
+-}
+-
+-/**
+- * drm_get_minor - Allocate and register new DRM minor
+- * @dev: DRM device
+- * @minor: Pointer to where new minor is stored
+- * @type: Type of minor
+- *
+- * Allocate a new minor of the given type and register it. A pointer to the new
+- * minor is returned in @minor.
+- * Caller must hold the global DRM mutex.
+- *
+- * RETURNS:
+- * 0 on success, negative error code on failure.
+- */
+-static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor,
+- int type)
+-{
+- struct drm_minor *new_minor;
+- int ret;
+- int minor_id;
+-
+- DRM_DEBUG("\n");
+-
+- minor_id = drm_minor_get_id(dev, type);
+- if (minor_id < 0)
+- return minor_id;
+-
+- new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
+- if (!new_minor) {
+- ret = -ENOMEM;
+- goto err_idr;
+- }
+-
+- new_minor->type = type;
+- new_minor->device = MKDEV(DRM_MAJOR, minor_id);
+- new_minor->dev = dev;
+- new_minor->index = minor_id;
+- INIT_LIST_HEAD(&new_minor->master_list);
+-
+- idr_replace(&drm_minors_idr, new_minor, minor_id);
+-
+-#if defined(CONFIG_DEBUG_FS)
+- ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
+- if (ret) {
+- DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
+- goto err_mem;
+- }
+-#endif
+-
+- ret = drm_sysfs_device_add(new_minor);
+- if (ret) {
+- printk(KERN_ERR
+- "DRM: Error sysfs_device_add.\n");
+- goto err_debugfs;
+- }
+- *minor = new_minor;
+-
+- DRM_DEBUG("new minor assigned %d\n", minor_id);
+- return 0;
+-
+-
+-err_debugfs:
+-#if defined(CONFIG_DEBUG_FS)
+- drm_debugfs_cleanup(new_minor);
+-err_mem:
+-#endif
+- kfree(new_minor);
+-err_idr:
+- idr_remove(&drm_minors_idr, minor_id);
+- *minor = NULL;
+- return ret;
+-}
+-
+-/**
+- * drm_unplug_minor - Unplug DRM minor
+- * @minor: Minor to unplug
+- *
+- * Unplugs the given DRM minor but keeps the object. So after this returns,
+- * minor->dev is still valid so existing open-files can still access it to get
+- * device information from their drm_file ojects.
+- * If the minor is already unplugged or if @minor is NULL, nothing is done.
+- * The global DRM mutex must be held by the caller.
+- */
+-static void drm_unplug_minor(struct drm_minor *minor)
+-{
+- if (!minor || !minor->kdev)
+- return;
+-
+-#if defined(CONFIG_DEBUG_FS)
+- drm_debugfs_cleanup(minor);
+-#endif
+-
+- drm_sysfs_device_remove(minor);
+- idr_remove(&drm_minors_idr, minor->index);
+-}
+-
+-/**
+- * drm_put_minor - Destroy DRM minor
+- * @minor: Minor to destroy
+- *
+- * This calls drm_unplug_minor() on the given minor and then frees it. Nothing
+- * is done if @minor is NULL. It is fine to call this on already unplugged
+- * minors.
+- * The global DRM mutex must be held by the caller.
+- */
+-static void drm_put_minor(struct drm_minor *minor)
+-{
+- if (!minor)
+- return;
+-
+- DRM_DEBUG("release secondary minor %d\n", minor->index);
+-
+- drm_unplug_minor(minor);
+- kfree(minor);
+-}
+-
+-/**
+- * Called via drm_exit() at module unload time or when pci device is
+- * unplugged.
+- *
+- * Cleans up all DRM device, calling drm_lastclose().
+- *
+- */
+-void drm_put_dev(struct drm_device *dev)
+-{
+- DRM_DEBUG("\n");
+-
+- if (!dev) {
+- DRM_ERROR("cleanup called no dev\n");
+- return;
+- }
+-
+- drm_dev_unregister(dev);
+- drm_dev_free(dev);
+-}
+-EXPORT_SYMBOL(drm_put_dev);
+-
+-void drm_unplug_dev(struct drm_device *dev)
+-{
+- /* for a USB device */
+- if (drm_core_check_feature(dev, DRIVER_MODESET))
+- drm_unplug_minor(dev->control);
+- if (dev->render)
+- drm_unplug_minor(dev->render);
+- drm_unplug_minor(dev->primary);
+-
+- mutex_lock(&drm_global_mutex);
+-
+- drm_device_set_unplugged(dev);
+-
+- if (dev->open_count == 0) {
+- drm_put_dev(dev);
+- }
+- mutex_unlock(&drm_global_mutex);
+-}
+-EXPORT_SYMBOL(drm_unplug_dev);
+-
+-/**
+- * drm_dev_alloc - Allocate new drm device
+- * @driver: DRM driver to allocate device for
+- * @parent: Parent device object
+- *
+- * Allocate and initialize a new DRM device. No device registration is done.
+- * Call drm_dev_register() to advertice the device to user space and register it
+- * with other core subsystems.
+- *
+- * RETURNS:
+- * Pointer to new DRM device, or NULL if out of memory.
+- */
+-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+- struct device *parent)
+-{
+- struct drm_device *dev;
+- int ret;
+-
+- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+- if (!dev)
+- return NULL;
+-
+- dev->dev = parent;
+- dev->driver = driver;
+-
+- INIT_LIST_HEAD(&dev->filelist);
+- INIT_LIST_HEAD(&dev->ctxlist);
+- INIT_LIST_HEAD(&dev->vmalist);
+- INIT_LIST_HEAD(&dev->maplist);
+- INIT_LIST_HEAD(&dev->vblank_event_list);
+-
+- spin_lock_init(&dev->count_lock);
+- spin_lock_init(&dev->event_lock);
+- mutex_init(&dev->struct_mutex);
+- mutex_init(&dev->ctxlist_mutex);
+-
+- if (drm_ht_create(&dev->map_hash, 12))
+- goto err_free;
+-
+- ret = drm_ctxbitmap_init(dev);
+- if (ret) {
+- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+- goto err_ht;
+- }
+-
+- if (driver->driver_features & DRIVER_GEM) {
+- ret = drm_gem_init(dev);
+- if (ret) {
+- DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+- goto err_ctxbitmap;
+- }
+- }
+-
+- return dev;
+-
+-err_ctxbitmap:
+- drm_ctxbitmap_cleanup(dev);
+-err_ht:
+- drm_ht_remove(&dev->map_hash);
+-err_free:
+- kfree(dev);
+- return NULL;
+-}
+-EXPORT_SYMBOL(drm_dev_alloc);
+-
+-/**
+- * drm_dev_free - Free DRM device
+- * @dev: DRM device to free
+- *
+- * Free a DRM device that has previously been allocated via drm_dev_alloc().
+- * You must not use kfree() instead or you will leak memory.
+- *
+- * This must not be called once the device got registered. Use drm_put_dev()
+- * instead, which then calls drm_dev_free().
+- */
+-void drm_dev_free(struct drm_device *dev)
+-{
+- drm_put_minor(dev->control);
+- drm_put_minor(dev->render);
+- drm_put_minor(dev->primary);
+-
+- if (dev->driver->driver_features & DRIVER_GEM)
+- drm_gem_destroy(dev);
+-
+- drm_ctxbitmap_cleanup(dev);
+- drm_ht_remove(&dev->map_hash);
+-
+- kfree(dev->devname);
+- kfree(dev);
+-}
+-EXPORT_SYMBOL(drm_dev_free);
+-
+-/**
+- * drm_dev_register - Register DRM device
+- * @dev: Device to register
+- *
+- * Register the DRM device @dev with the system, advertise device to user-space
+- * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+- * previously.
+- *
+- * Never call this twice on any device!
+- *
+- * RETURNS:
+- * 0 on success, negative error code on failure.
+- */
+-int drm_dev_register(struct drm_device *dev, unsigned long flags)
+-{
+- int ret;
+-
+- mutex_lock(&drm_global_mutex);
+-
+- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+- if (ret)
+- goto out_unlock;
+- }
+-
+- if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+- ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+- if (ret)
+- goto err_control_node;
+- }
+-
+- ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+- if (ret)
+- goto err_render_node;
+-
+- if (dev->driver->load) {
+- ret = dev->driver->load(dev, flags);
+- if (ret)
+- goto err_primary_node;
+- }
+-
+- /* setup grouping for legacy outputs */
+- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+- ret = drm_mode_group_init_legacy_group(dev,
+- &dev->primary->mode_group);
+- if (ret)
+- goto err_unload;
+- }
+-
+- ret = 0;
+- goto out_unlock;
+-
+-err_unload:
+- if (dev->driver->unload)
+- dev->driver->unload(dev);
+-err_primary_node:
+- drm_unplug_minor(dev->primary);
+-err_render_node:
+- drm_unplug_minor(dev->render);
+-err_control_node:
+- drm_unplug_minor(dev->control);
+-out_unlock:
+- mutex_unlock(&drm_global_mutex);
+- return ret;
+-}
+-EXPORT_SYMBOL(drm_dev_register);
+-
+-/**
+- * drm_dev_unregister - Unregister DRM device
+- * @dev: Device to unregister
+- *
+- * Unregister the DRM device from the system. This does the reverse of
+- * drm_dev_register() but does not deallocate the device. The caller must call
+- * drm_dev_free() to free all resources.
+- */
+-void drm_dev_unregister(struct drm_device *dev)
+-{
+- struct drm_map_list *r_list, *list_temp;
+-
+- drm_lastclose(dev);
+-
+- if (dev->driver->unload)
+- dev->driver->unload(dev);
+-
+- if (dev->agp)
+- drm_pci_agp_destroy(dev);
+-
+- drm_vblank_cleanup(dev);
+-
+- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+- drm_rmmap(dev, r_list->map);
+-
+- drm_unplug_minor(dev->control);
+- drm_unplug_minor(dev->render);
+- drm_unplug_minor(dev->primary);
+-}
+-EXPORT_SYMBOL(drm_dev_unregister);
+diff -Naur a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
+--- a/drivers/gpu/drm/drm_sysfs.c 2015-03-26 14:43:30.462436436 +0530
++++ b/drivers/gpu/drm/drm_sysfs.c 2015-03-26 14:42:38.738435422 +0530
+@@ -21,6 +21,7 @@
+ #include <drm/drm_sysfs.h>
+ #include <drm/drm_core.h>
+ #include <drm/drmP.h>
++#include "drm_internal.h"
+
+ #define to_drm_minor(d) dev_get_drvdata(d)
+ #define to_drm_connector(d) dev_get_drvdata(d)
+@@ -380,9 +381,9 @@
+
+ connector->kdev = device_create(drm_class, dev->primary->kdev,
+ 0, connector, "card%d-%s",
+- dev->primary->index, drm_get_connector_name(connector));
++ dev->primary->index, connector->name);
+ DRM_DEBUG("adding \"%s\" to sysfs\n",
+- drm_get_connector_name(connector));
++ connector->name);
+
+ if (IS_ERR(connector->kdev)) {
+ DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
+@@ -438,7 +439,6 @@
+ out:
+ return ret;
+ }
+-EXPORT_SYMBOL(drm_sysfs_connector_add);
+
+ /**
+ * drm_sysfs_connector_remove - remove an connector device from sysfs
+@@ -460,7 +460,7 @@
+ if (!connector->kdev)
+ return;
+ DRM_DEBUG("removing \"%s\" from sysfs\n",
+- drm_get_connector_name(connector));
++ connector->name);
+
+ for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
+ device_remove_file(connector->kdev, &connector_attrs[i]);
+@@ -468,7 +468,6 @@
+ device_unregister(connector->kdev);
+ connector->kdev = NULL;
+ }
+-EXPORT_SYMBOL(drm_sysfs_connector_remove);
+
+ /**
+ * drm_sysfs_hotplug_event - generate a DRM uevent
+@@ -495,71 +494,55 @@
+ }
+
+ /**
+- * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+- * @dev: DRM device to be added
+- * @head: DRM head in question
++ * drm_sysfs_minor_alloc() - Allocate sysfs device for given minor
++ * @minor: minor to allocate sysfs device for
+ *
+- * Add a DRM device to the DRM's device model class. We use @dev's PCI device
+- * as the parent for the Linux device, and make sure it has a file containing
+- * the driver we're using (for userspace compatibility).
++ * This allocates a new sysfs device for @minor and returns it. The device is
++ * not registered nor linked. The caller has to use device_add() and
++ * device_del() to register and unregister it.
++ *
++ * Note that dev_get_drvdata() on the new device will return the minor.
++ * However, the device does not hold a ref-count to the minor nor to the
++ * underlying drm_device. This is unproblematic as long as you access the
++ * private data only in sysfs callbacks. device_del() disables those
++ * synchronously, so they cannot be called after you cleanup a minor.
+ */
+-int drm_sysfs_device_add(struct drm_minor *minor)
++struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
+ {
+- char *minor_str;
++ const char *minor_str;
++ struct device *kdev;
+ int r;
+
+ if (minor->type == DRM_MINOR_CONTROL)
+ minor_str = "controlD%d";
+- else if (minor->type == DRM_MINOR_RENDER)
+- minor_str = "renderD%d";
+- else
+- minor_str = "card%d";
+-
+- minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
+- if (!minor->kdev) {
+- r = -ENOMEM;
+- goto error;
+- }
+-
+- device_initialize(minor->kdev);
+- minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index);
+- minor->kdev->class = drm_class;
+- minor->kdev->type = &drm_sysfs_device_minor;
+- minor->kdev->parent = minor->dev->dev;
+- minor->kdev->release = drm_sysfs_release;
+- dev_set_drvdata(minor->kdev, minor);
++ else if (minor->type == DRM_MINOR_RENDER)
++ minor_str = "renderD%d";
++ else
++ minor_str = "card%d";
++
++ kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
++ if (!kdev)
++ return ERR_PTR(-ENOMEM);
++
++ device_initialize(kdev);
++ kdev->devt = MKDEV(DRM_MAJOR, minor->index);
++ kdev->class = drm_class;
++ kdev->type = &drm_sysfs_device_minor;
++ kdev->parent = minor->dev->dev;
++ kdev->release = drm_sysfs_release;
++ dev_set_drvdata(kdev, minor);
+
+- r = dev_set_name(minor->kdev, minor_str, minor->index);
++ r = dev_set_name(kdev, minor_str, minor->index);
+ if (r < 0)
+- goto error;
++ goto err_free;
+
+- r = device_add(minor->kdev);
+- if (r < 0)
+- goto error;
+-
+- return 0;
+-
+-error:
+- DRM_ERROR("device create failed %d\n", r);
+- put_device(minor->kdev);
+- return r;
+-}
++ return kdev;
+
+-/**
+- * drm_sysfs_device_remove - remove DRM device
+- * @dev: DRM device to remove
+- *
+- * This call unregisters and cleans up a class device that was created with a
+- * call to drm_sysfs_device_add()
+- */
+-void drm_sysfs_device_remove(struct drm_minor *minor)
+-{
+- if (minor->kdev)
+- device_unregister(minor->kdev);
+- minor->kdev = NULL;
++err_free:
++ put_device(kdev);
++ return ERR_PTR(r);
+ }
+
+-
+ /**
+ * drm_class_device_register - Register a struct device in the drm class.
+ *
+diff -Naur a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
+--- a/drivers/gpu/drm/drm_usb.c 2015-03-26 14:43:30.402436435 +0530
++++ b/drivers/gpu/drm/drm_usb.c 1970-01-01 05:30:00.000000000 +0530
+@@ -1,84 +0,0 @@
+-#include <drm/drmP.h>
+-#include <drm/drm_usb.h>
+-#include <linux/usb.h>
+-#include <linux/module.h>
+-
+-int drm_get_usb_dev(struct usb_interface *interface,
+- const struct usb_device_id *id,
+- struct drm_driver *driver)
+-{
+- struct drm_device *dev;
+- int ret;
+-
+- DRM_DEBUG("\n");
+-
+- dev = drm_dev_alloc(driver, &interface->dev);
+- if (!dev)
+- return -ENOMEM;
+-
+- dev->usbdev = interface_to_usbdev(interface);
+- usb_set_intfdata(interface, dev);
+-
+- ret = drm_dev_register(dev, 0);
+- if (ret)
+- goto err_free;
+-
+- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+- driver->name, driver->major, driver->minor, driver->patchlevel,
+- driver->date, dev->primary->index);
+-
+- return 0;
+-
+-err_free:
+- drm_dev_free(dev);
+- return ret;
+-
+-}
+-EXPORT_SYMBOL(drm_get_usb_dev);
+-
+-static int drm_usb_get_irq(struct drm_device *dev)
+-{
+- return 0;
+-}
+-
+-static const char *drm_usb_get_name(struct drm_device *dev)
+-{
+- return "USB";
+-}
+-
+-static int drm_usb_set_busid(struct drm_device *dev,
+- struct drm_master *master)
+-{
+- return 0;
+-}
+-
+-static struct drm_bus drm_usb_bus = {
+- .bus_type = DRIVER_BUS_USB,
+- .get_irq = drm_usb_get_irq,
+- .get_name = drm_usb_get_name,
+- .set_busid = drm_usb_set_busid,
+-};
+-
+-int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
+-{
+- int res;
+- DRM_DEBUG("\n");
+-
+- driver->kdriver.usb = udriver;
+- driver->bus = &drm_usb_bus;
+-
+- res = usb_register(udriver);
+- return res;
+-}
+-EXPORT_SYMBOL(drm_usb_init);
+-
+-void drm_usb_exit(struct drm_driver *driver,
+- struct usb_driver *udriver)
+-{
+- usb_deregister(udriver);
+-}
+-EXPORT_SYMBOL(drm_usb_exit);
+-
+-MODULE_AUTHOR("David Airlie");
+-MODULE_DESCRIPTION("USB DRM support");
+-MODULE_LICENSE("GPL and additional rights");
+diff -Naur a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
+--- a/drivers/gpu/drm/drm_vm.c 2015-03-26 14:43:30.422436435 +0530
++++ b/drivers/gpu/drm/drm_vm.c 2015-03-26 14:42:38.738435422 +0530
+@@ -35,10 +35,19 @@
+
+ #include <drm/drmP.h>
+ #include <linux/export.h>
++#include <linux/seq_file.h>
+ #if defined(__ia64__)
+ #include <linux/efi.h>
+ #include <linux/slab.h>
+ #endif
++#include <asm/pgtable.h>
++#include "drm_legacy.h"
++
++struct drm_vma_entry {
++ struct list_head head;
++ struct vm_area_struct *vma;
++ pid_t pid;
++};
+
+ static void drm_vm_open(struct vm_area_struct *vma);
+ static void drm_vm_close(struct vm_area_struct *vma);
+@@ -48,15 +57,11 @@
+ {
+ pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+-#if defined(__i386__) || defined(__x86_64__)
++#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
+ if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
+ tmp = pgprot_noncached(tmp);
+ else
+ tmp = pgprot_writecombine(tmp);
+-#elif defined(__powerpc__)
+- pgprot_val(tmp) |= _PAGE_NO_CACHE;
+- if (map->type == _DRM_REGISTERS)
+- pgprot_val(tmp) |= _PAGE_GUARDED;
+ #elif defined(__ia64__)
+ if (efi_range_is_wc(vma->vm_start, vma->vm_end -
+ vma->vm_start))
+@@ -263,7 +268,7 @@
+ dmah.vaddr = map->handle;
+ dmah.busaddr = map->offset;
+ dmah.size = map->size;
+- __drm_pci_free(dev, &dmah);
++ __drm_legacy_pci_free(dev, &dmah);
+ break;
+ }
+ kfree(map);
+@@ -412,7 +417,6 @@
+ list_add(&vma_entry->head, &dev->vmalist);
+ }
+ }
+-EXPORT_SYMBOL_GPL(drm_vm_open_locked);
+
+ static void drm_vm_open(struct vm_area_struct *vma)
+ {
+@@ -532,7 +536,7 @@
+ * according to the mapping type and remaps the pages. Finally sets the file
+ * pointer and calls vm_open().
+ */
+-int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
++static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
+ {
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+@@ -646,7 +650,7 @@
+ return 0;
+ }
+
+-int drm_mmap(struct file *filp, struct vm_area_struct *vma)
++int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+@@ -661,4 +665,69 @@
+
+ return ret;
+ }
+-EXPORT_SYMBOL(drm_mmap);
++EXPORT_SYMBOL(drm_legacy_mmap);
++
++void drm_legacy_vma_flush(struct drm_device *dev)
++{
++ struct drm_vma_entry *vma, *vma_temp;
++
++ /* Clear vma list (only needed for legacy drivers) */
++ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
++ list_del(&vma->head);
++ kfree(vma);
++ }
++}
++
++int drm_vma_info(struct seq_file *m, void *data)
++{
++ struct drm_info_node *node = (struct drm_info_node *) m->private;
++ struct drm_device *dev = node->minor->dev;
++ struct drm_vma_entry *pt;
++ struct vm_area_struct *vma;
++ unsigned long vma_count = 0;
++#if defined(__i386__)
++ unsigned int pgprot;
++#endif
++
++ mutex_lock(&dev->struct_mutex);
++ list_for_each_entry(pt, &dev->vmalist, head)
++ vma_count++;
++
++ seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
++ vma_count, high_memory,
++ (void *)(unsigned long)virt_to_phys(high_memory));
++
++ list_for_each_entry(pt, &dev->vmalist, head) {
++ vma = pt->vma;
++ if (!vma)
++ continue;
++ seq_printf(m,
++ "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
++ pt->pid,
++ (void *)vma->vm_start, (void *)vma->vm_end,
++ vma->vm_flags & VM_READ ? 'r' : '-',
++ vma->vm_flags & VM_WRITE ? 'w' : '-',
++ vma->vm_flags & VM_EXEC ? 'x' : '-',
++ vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
++ vma->vm_flags & VM_LOCKED ? 'l' : '-',
++ vma->vm_flags & VM_IO ? 'i' : '-',
++ vma->vm_pgoff);
++
++#if defined(__i386__)
++ pgprot = pgprot_val(vma->vm_page_prot);
++ seq_printf(m, " %c%c%c%c%c%c%c%c%c",
++ pgprot & _PAGE_PRESENT ? 'p' : '-',
++ pgprot & _PAGE_RW ? 'w' : 'r',
++ pgprot & _PAGE_USER ? 'u' : 's',
++ pgprot & _PAGE_PWT ? 't' : 'b',
++ pgprot & _PAGE_PCD ? 'u' : 'c',
++ pgprot & _PAGE_ACCESSED ? 'a' : '-',
++ pgprot & _PAGE_DIRTY ? 'd' : '-',
++ pgprot & _PAGE_PSE ? 'm' : 'k',
++ pgprot & _PAGE_GLOBAL ? 'g' : 'l');
++#endif
++ seq_printf(m, "\n");
++ }
++ mutex_unlock(&dev->struct_mutex);
++ return 0;
++}
+diff -Naur a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+--- a/drivers/gpu/drm/Makefile 2015-03-26 14:43:51.314436845 +0530
++++ b/drivers/gpu/drm/Makefile 2015-03-26 14:42:38.738435422 +0530
+@@ -6,23 +6,24 @@
+
+ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
+ drm_context.o drm_dma.o \
+- drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
+- drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
++ drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
++ drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
+ drm_agpsupport.o drm_scatter.o drm_pci.o \
+ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
+ drm_crtc.o drm_modes.o drm_edid.o \
+ drm_info.o drm_debugfs.o drm_encoder_slave.o \
+ drm_trace_points.o drm_global.o drm_prime.o \
+- drm_rect.o drm_vma_manager.o drm_flip_work.o
++ drm_rect.o drm_vma_manager.o drm_flip_work.o \
++ drm_modeset_lock.o drm_atomic.o
+
+ drm-$(CONFIG_COMPAT) += drm_ioc32.o
+ drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+ drm-$(CONFIG_PCI) += ati_pcigart.o
+ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
++drm-$(CONFIG_OF) += drm_of.o
+
+-drm-usb-y := drm_usb.o
+-
+-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o
++drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
++ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
+ drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+ drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
+ drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
+@@ -33,7 +34,6 @@
+
+ obj-$(CONFIG_DRM) += drm.o
+ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
+-obj-$(CONFIG_DRM_USB) += drm_usb.o
+ obj-$(CONFIG_DRM_TTM) += ttm/
+ obj-$(CONFIG_DRM_TDFX) += tdfx/
+ obj-$(CONFIG_DRM_R128) += r128/
+diff -Naur a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+--- a/drivers/gpu/drm/ttm/ttm_bo.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/ttm/ttm_bo.c 2015-03-26 14:42:38.742435422 +0530
+@@ -40,6 +40,7 @@
+ #include <linux/file.h>
+ #include <linux/module.h>
+ #include <linux/atomic.h>
++#include <linux/reservation.h>
+
+ #define TTM_ASSERT_LOCKED(param)
+ #define TTM_DEBUG(fmt, arg...)
+@@ -53,12 +54,13 @@
+ .mode = S_IRUGO
+ };
+
+-static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
++static inline int ttm_mem_type_from_place(const struct ttm_place *place,
++ uint32_t *mem_type)
+ {
+ int i;
+
+ for (i = 0; i <= TTM_PL_PRIV5; i++)
+- if (flags & (1 << i)) {
++ if (place->flags & (1 << i)) {
+ *mem_type = i;
+ return 0;
+ }
+@@ -89,12 +91,12 @@
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
+ for (i = 0; i < placement->num_placement; i++) {
+- ret = ttm_mem_type_from_flags(placement->placement[i],
++ ret = ttm_mem_type_from_place(&placement->placement[i],
+ &mem_type);
+ if (ret)
+ return;
+ pr_err(" placement[%d]=0x%08X (%d)\n",
+- i, placement->placement[i], mem_type);
++ i, placement->placement[i].flags, mem_type);
+ ttm_mem_type_debug(bo->bdev, mem_type);
+ }
+ }
+@@ -141,7 +143,6 @@
+ BUG_ON(atomic_read(&bo->list_kref.refcount));
+ BUG_ON(atomic_read(&bo->kref.refcount));
+ BUG_ON(atomic_read(&bo->cpu_writers));
+- BUG_ON(bo->sync_obj != NULL);
+ BUG_ON(bo->mem.mm_node != NULL);
+ BUG_ON(!list_empty(&bo->lru));
+ BUG_ON(!list_empty(&bo->ddestroy));
+@@ -402,36 +403,48 @@
+ ww_mutex_unlock (&bo->resv->lock);
+ }
+
++static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
++{
++ struct reservation_object_list *fobj;
++ struct fence *fence;
++ int i;
++
++ fobj = reservation_object_get_list(bo->resv);
++ fence = reservation_object_get_excl(bo->resv);
++ if (fence && !fence->ops->signaled)
++ fence_enable_sw_signaling(fence);
++
++ for (i = 0; fobj && i < fobj->shared_count; ++i) {
++ fence = rcu_dereference_protected(fobj->shared[i],
++ reservation_object_held(bo->resv));
++
++ if (!fence->ops->signaled)
++ fence_enable_sw_signaling(fence);
++ }
++}
++
+ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
+ {
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_global *glob = bo->glob;
+- struct ttm_bo_driver *driver = bdev->driver;
+- void *sync_obj = NULL;
+ int put_count;
+ int ret;
+
+ spin_lock(&glob->lru_lock);
+- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+-
+- spin_lock(&bdev->fence_lock);
+- (void) ttm_bo_wait(bo, false, false, true);
+- if (!ret && !bo->sync_obj) {
+- spin_unlock(&bdev->fence_lock);
+- put_count = ttm_bo_del_from_lru(bo);
++ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+
+- spin_unlock(&glob->lru_lock);
+- ttm_bo_cleanup_memtype_use(bo);
++ if (!ret) {
++ if (!ttm_bo_wait(bo, false, false, true)) {
++ put_count = ttm_bo_del_from_lru(bo);
+
+- ttm_bo_list_ref_sub(bo, put_count, true);
++ spin_unlock(&glob->lru_lock);
++ ttm_bo_cleanup_memtype_use(bo);
+
+- return;
+- }
+- if (bo->sync_obj)
+- sync_obj = driver->sync_obj_ref(bo->sync_obj);
+- spin_unlock(&bdev->fence_lock);
++ ttm_bo_list_ref_sub(bo, put_count, true);
+
+- if (!ret) {
++ return;
++ } else
++ ttm_bo_flush_all_fences(bo);
+
+ /*
+ * Make NO_EVICT bos immediately available to
+@@ -443,17 +456,13 @@
+ ttm_bo_add_to_lru(bo);
+ }
+
+- ww_mutex_unlock(&bo->resv->lock);
++ __ttm_bo_unreserve(bo);
+ }
+
+ kref_get(&bo->list_kref);
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ spin_unlock(&glob->lru_lock);
+
+- if (sync_obj) {
+- driver->sync_obj_flush(sync_obj);
+- driver->sync_obj_unref(&sync_obj);
+- }
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+ }
+@@ -474,47 +483,29 @@
+ bool interruptible,
+ bool no_wait_gpu)
+ {
+- struct ttm_bo_device *bdev = bo->bdev;
+- struct ttm_bo_driver *driver = bdev->driver;
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count;
+ int ret;
+
+- spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
+
+ if (ret && !no_wait_gpu) {
+- void *sync_obj;
+-
+- /*
+- * Take a reference to the fence and unreserve,
+- * at this point the buffer should be dead, so
+- * no new sync objects can be attached.
+- */
+- sync_obj = driver->sync_obj_ref(bo->sync_obj);
+- spin_unlock(&bdev->fence_lock);
+-
++ long lret;
+ ww_mutex_unlock(&bo->resv->lock);
+ spin_unlock(&glob->lru_lock);
+
+- ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+- driver->sync_obj_unref(&sync_obj);
+- if (ret)
+- return ret;
+-
+- /*
+- * remove sync_obj with ttm_bo_wait, the wait should be
+- * finished, and no new wait object should have been added.
+- */
+- spin_lock(&bdev->fence_lock);
+- ret = ttm_bo_wait(bo, false, false, true);
+- WARN_ON(ret);
+- spin_unlock(&bdev->fence_lock);
+- if (ret)
+- return ret;
++ lret = reservation_object_wait_timeout_rcu(bo->resv,
++ true,
++ interruptible,
++ 30 * HZ);
++
++ if (lret < 0)
++ return lret;
++ else if (lret == 0)
++ return -EBUSY;
+
+ spin_lock(&glob->lru_lock);
+- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
++ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+
+ /*
+ * We raced, and lost, someone else holds the reservation now,
+@@ -528,11 +519,17 @@
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+- } else
+- spin_unlock(&bdev->fence_lock);
++
++ /*
++ * remove sync_obj with ttm_bo_wait, the wait should be
++ * finished, and no new wait object should have been added.
++ */
++ ret = ttm_bo_wait(bo, false, false, true);
++ WARN_ON(ret);
++ }
+
+ if (ret || unlikely(list_empty(&bo->ddestroy))) {
+- ww_mutex_unlock(&bo->resv->lock);
++ __ttm_bo_unreserve(bo);
+ spin_unlock(&glob->lru_lock);
+ return ret;
+ }
+@@ -577,11 +574,11 @@
+ kref_get(&nentry->list_kref);
+ }
+
+- ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
++ ret = __ttm_bo_reserve(entry, false, true, false, NULL);
+ if (remove_all && ret) {
+ spin_unlock(&glob->lru_lock);
+- ret = ttm_bo_reserve_nolru(entry, false, false,
+- false, 0);
++ ret = __ttm_bo_reserve(entry, false, false,
++ false, NULL);
+ spin_lock(&glob->lru_lock);
+ }
+
+@@ -667,9 +664,7 @@
+ struct ttm_placement placement;
+ int ret = 0;
+
+- spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+- spin_unlock(&bdev->fence_lock);
+
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS) {
+@@ -685,8 +680,6 @@
+ evict_mem.bus.io_reserved_vm = false;
+ evict_mem.bus.io_reserved_count = 0;
+
+- placement.fpfn = 0;
+- placement.lpfn = 0;
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->driver->evict_flags(bo, &placement);
+@@ -716,6 +709,7 @@
+
+ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ uint32_t mem_type,
++ const struct ttm_place *place,
+ bool interruptible,
+ bool no_wait_gpu)
+ {
+@@ -726,9 +720,22 @@
+
+ spin_lock(&glob->lru_lock);
+ list_for_each_entry(bo, &man->lru, lru) {
+- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+- if (!ret)
++ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
++ if (!ret) {
++ if (place && (place->fpfn || place->lpfn)) {
++ /* Don't evict this BO if it's outside of the
++ * requested placement range
++ */
++ if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
++ (place->lpfn && place->lpfn <= bo->mem.start)) {
++ __ttm_bo_unreserve(bo);
++ ret = -EBUSY;
++ continue;
++ }
++ }
++
+ break;
++ }
+ }
+
+ if (ret) {
+@@ -774,7 +781,7 @@
+ */
+ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ uint32_t mem_type,
+- struct ttm_placement *placement,
++ const struct ttm_place *place,
+ struct ttm_mem_reg *mem,
+ bool interruptible,
+ bool no_wait_gpu)
+@@ -784,12 +791,12 @@
+ int ret;
+
+ do {
+- ret = (*man->func->get_node)(man, bo, placement, mem);
++ ret = (*man->func->get_node)(man, bo, place, mem);
+ if (unlikely(ret != 0))
+ return ret;
+ if (mem->mm_node)
+ break;
+- ret = ttm_mem_evict_first(bdev, mem_type,
++ ret = ttm_mem_evict_first(bdev, mem_type, place,
+ interruptible, no_wait_gpu);
+ if (unlikely(ret != 0))
+ return ret;
+@@ -827,18 +834,18 @@
+
+ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+ uint32_t mem_type,
+- uint32_t proposed_placement,
++ const struct ttm_place *place,
+ uint32_t *masked_placement)
+ {
+ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
+
+- if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
++ if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
+ return false;
+
+- if ((proposed_placement & man->available_caching) == 0)
++ if ((place->flags & man->available_caching) == 0)
+ return false;
+
+- cur_flags |= (proposed_placement & man->available_caching);
++ cur_flags |= (place->flags & man->available_caching);
+
+ *masked_placement = cur_flags;
+ return true;
+@@ -869,15 +876,14 @@
+
+ mem->mm_node = NULL;
+ for (i = 0; i < placement->num_placement; ++i) {
+- ret = ttm_mem_type_from_flags(placement->placement[i],
+- &mem_type);
++ const struct ttm_place *place = &placement->placement[i];
++
++ ret = ttm_mem_type_from_place(place, &mem_type);
+ if (ret)
+ return ret;
+ man = &bdev->man[mem_type];
+
+- type_ok = ttm_bo_mt_compatible(man,
+- mem_type,
+- placement->placement[i],
++ type_ok = ttm_bo_mt_compatible(man, mem_type, place,
+ &cur_flags);
+
+ if (!type_ok)
+@@ -889,7 +895,7 @@
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+- ttm_flag_masked(&cur_flags, placement->placement[i],
++ ttm_flag_masked(&cur_flags, place->flags,
+ ~TTM_PL_MASK_MEMTYPE);
+
+ if (mem_type == TTM_PL_SYSTEM)
+@@ -897,7 +903,7 @@
+
+ if (man->has_type && man->use_type) {
+ type_found = true;
+- ret = (*man->func->get_node)(man, bo, placement, mem);
++ ret = (*man->func->get_node)(man, bo, place, mem);
+ if (unlikely(ret))
+ return ret;
+ }
+@@ -915,17 +921,15 @@
+ return -EINVAL;
+
+ for (i = 0; i < placement->num_busy_placement; ++i) {
+- ret = ttm_mem_type_from_flags(placement->busy_placement[i],
+- &mem_type);
++ const struct ttm_place *place = &placement->busy_placement[i];
++
++ ret = ttm_mem_type_from_place(place, &mem_type);
+ if (ret)
+ return ret;
+ man = &bdev->man[mem_type];
+ if (!man->has_type)
+ continue;
+- if (!ttm_bo_mt_compatible(man,
+- mem_type,
+- placement->busy_placement[i],
+- &cur_flags))
++ if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
+ continue;
+
+ cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+@@ -934,10 +938,9 @@
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+- ttm_flag_masked(&cur_flags, placement->busy_placement[i],
++ ttm_flag_masked(&cur_flags, place->flags,
+ ~TTM_PL_MASK_MEMTYPE);
+
+-
+ if (mem_type == TTM_PL_SYSTEM) {
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+@@ -945,7 +948,7 @@
+ return 0;
+ }
+
+- ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
++ ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
+ interruptible, no_wait_gpu);
+ if (ret == 0 && mem->mm_node) {
+ mem->placement = cur_flags;
+@@ -966,7 +969,6 @@
+ {
+ int ret = 0;
+ struct ttm_mem_reg mem;
+- struct ttm_bo_device *bdev = bo->bdev;
+
+ lockdep_assert_held(&bo->resv->lock.base);
+
+@@ -975,9 +977,7 @@
+ * Have the driver move function wait for idle when necessary,
+ * instead of doing it here.
+ */
+- spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+- spin_unlock(&bdev->fence_lock);
+ if (ret)
+ return ret;
+ mem.num_pages = bo->num_pages;
+@@ -1006,20 +1006,27 @@
+ {
+ int i;
+
+- if (mem->mm_node && placement->lpfn != 0 &&
+- (mem->start < placement->fpfn ||
+- mem->start + mem->num_pages > placement->lpfn))
+- return false;
+-
+ for (i = 0; i < placement->num_placement; i++) {
+- *new_flags = placement->placement[i];
++ const struct ttm_place *heap = &placement->placement[i];
++ if (mem->mm_node &&
++ (mem->start < heap->fpfn ||
++ (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
++ continue;
++
++ *new_flags = heap->flags;
+ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+ return true;
+ }
+
+ for (i = 0; i < placement->num_busy_placement; i++) {
+- *new_flags = placement->busy_placement[i];
++ const struct ttm_place *heap = &placement->busy_placement[i];
++ if (mem->mm_node &&
++ (mem->start < heap->fpfn ||
++ (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
++ continue;
++
++ *new_flags = heap->flags;
+ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+ return true;
+@@ -1037,11 +1044,6 @@
+ uint32_t new_flags;
+
+ lockdep_assert_held(&bo->resv->lock.base);
+- /* Check that range is valid */
+- if (placement->lpfn || placement->fpfn)
+- if (placement->fpfn > placement->lpfn ||
+- (placement->lpfn - placement->fpfn) < bo->num_pages)
+- return -EINVAL;
+ /*
+ * Check whether we need to move buffer.
+ */
+@@ -1070,15 +1072,6 @@
+ }
+ EXPORT_SYMBOL(ttm_bo_validate);
+
+-int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+- struct ttm_placement *placement)
+-{
+- BUG_ON((placement->fpfn || placement->lpfn) &&
+- (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
+-
+- return 0;
+-}
+-
+ int ttm_bo_init(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+@@ -1089,6 +1082,7 @@
+ struct file *persistent_swap_storage,
+ size_t acc_size,
+ struct sg_table *sg,
++ struct reservation_object *resv,
+ void (*destroy) (struct ttm_buffer_object *))
+ {
+ int ret = 0;
+@@ -1142,30 +1136,38 @@
+ bo->persistent_swap_storage = persistent_swap_storage;
+ bo->acc_size = acc_size;
+ bo->sg = sg;
+- bo->resv = &bo->ttm_resv;
+- reservation_object_init(bo->resv);
++ if (resv) {
++ bo->resv = resv;
++ lockdep_assert_held(&bo->resv->lock.base);
++ } else {
++ bo->resv = &bo->ttm_resv;
++ reservation_object_init(&bo->ttm_resv);
++ }
+ atomic_inc(&bo->glob->bo_count);
+ drm_vma_node_reset(&bo->vma_node);
+
+- ret = ttm_bo_check_placement(bo, placement);
+-
+ /*
+ * For ttm_bo_type_device buffers, allocate
+ * address space from the device.
+ */
+- if (likely(!ret) &&
+- (bo->type == ttm_bo_type_device ||
+- bo->type == ttm_bo_type_sg))
++ if (bo->type == ttm_bo_type_device ||
++ bo->type == ttm_bo_type_sg)
+ ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+ bo->mem.num_pages);
+
+- locked = ww_mutex_trylock(&bo->resv->lock);
+- WARN_ON(!locked);
++ /* passed reservation objects should already be locked,
++ * since otherwise lockdep will be angered in radeon.
++ */
++ if (!resv) {
++ locked = ww_mutex_trylock(&bo->resv->lock);
++ WARN_ON(!locked);
++ }
+
+ if (likely(!ret))
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
+
+- ttm_bo_unreserve(bo);
++ if (!resv)
++ ttm_bo_unreserve(bo);
+
+ if (unlikely(ret))
+ ttm_bo_unref(&bo);
+@@ -1223,7 +1225,7 @@
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
+ ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
+ interruptible, persistent_swap_storage, acc_size,
+- NULL, NULL);
++ NULL, NULL, NULL);
+ if (likely(ret == 0))
+ *p_bo = bo;
+
+@@ -1245,7 +1247,7 @@
+ spin_lock(&glob->lru_lock);
+ while (!list_empty(&man->lru)) {
+ spin_unlock(&glob->lru_lock);
+- ret = ttm_mem_evict_first(bdev, mem_type, false, false);
++ ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
+ if (ret) {
+ if (allow_errors) {
+ return ret;
+@@ -1451,6 +1453,7 @@
+ int ttm_bo_device_init(struct ttm_bo_device *bdev,
+ struct ttm_bo_global *glob,
+ struct ttm_bo_driver *driver,
++ struct address_space *mapping,
+ uint64_t file_page_offset,
+ bool need_dma32)
+ {
+@@ -1472,11 +1475,10 @@
+ 0x10000000);
+ INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
+ INIT_LIST_HEAD(&bdev->ddestroy);
+- bdev->dev_mapping = NULL;
++ bdev->dev_mapping = mapping;
+ bdev->glob = glob;
+ bdev->need_dma32 = need_dma32;
+ bdev->val_seq = 0;
+- spin_lock_init(&bdev->fence_lock);
+ mutex_lock(&glob->device_list_mutex);
+ list_add_tail(&bdev->device_list, &glob->device_list);
+ mutex_unlock(&glob->device_list_mutex);
+@@ -1529,77 +1531,66 @@
+
+ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
+
+-
+ int ttm_bo_wait(struct ttm_buffer_object *bo,
+ bool lazy, bool interruptible, bool no_wait)
+ {
+- struct ttm_bo_driver *driver = bo->bdev->driver;
+- struct ttm_bo_device *bdev = bo->bdev;
+- void *sync_obj;
+- int ret = 0;
+-
+- if (likely(bo->sync_obj == NULL))
+- return 0;
++ struct reservation_object_list *fobj;
++ struct reservation_object *resv;
++ struct fence *excl;
++ long timeout = 15 * HZ;
++ int i;
+
+- while (bo->sync_obj) {
++ resv = bo->resv;
++ fobj = reservation_object_get_list(resv);
++ excl = reservation_object_get_excl(resv);
++ if (excl) {
++ if (!fence_is_signaled(excl)) {
++ if (no_wait)
++ return -EBUSY;
+
+- if (driver->sync_obj_signaled(bo->sync_obj)) {
+- void *tmp_obj = bo->sync_obj;
+- bo->sync_obj = NULL;
+- clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+- spin_unlock(&bdev->fence_lock);
+- driver->sync_obj_unref(&tmp_obj);
+- spin_lock(&bdev->fence_lock);
+- continue;
++ timeout = fence_wait_timeout(excl,
++ interruptible, timeout);
+ }
++ }
+
+- if (no_wait)
+- return -EBUSY;
++ for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
++ struct fence *fence;
++ fence = rcu_dereference_protected(fobj->shared[i],
++ reservation_object_held(resv));
+
+- sync_obj = driver->sync_obj_ref(bo->sync_obj);
+- spin_unlock(&bdev->fence_lock);
+- ret = driver->sync_obj_wait(sync_obj,
+- lazy, interruptible);
+- if (unlikely(ret != 0)) {
+- driver->sync_obj_unref(&sync_obj);
+- spin_lock(&bdev->fence_lock);
+- return ret;
+- }
+- spin_lock(&bdev->fence_lock);
+- if (likely(bo->sync_obj == sync_obj)) {
+- void *tmp_obj = bo->sync_obj;
+- bo->sync_obj = NULL;
+- clear_bit(TTM_BO_PRIV_FLAG_MOVING,
+- &bo->priv_flags);
+- spin_unlock(&bdev->fence_lock);
+- driver->sync_obj_unref(&sync_obj);
+- driver->sync_obj_unref(&tmp_obj);
+- spin_lock(&bdev->fence_lock);
+- } else {
+- spin_unlock(&bdev->fence_lock);
+- driver->sync_obj_unref(&sync_obj);
+- spin_lock(&bdev->fence_lock);
++ if (!fence_is_signaled(fence)) {
++ if (no_wait)
++ return -EBUSY;
++
++ timeout = fence_wait_timeout(fence,
++ interruptible, timeout);
+ }
+ }
++
++ if (timeout < 0)
++ return timeout;
++
++ if (timeout == 0)
++ return -EBUSY;
++
++ reservation_object_add_excl_fence(resv, NULL);
++ clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ return 0;
+ }
+ EXPORT_SYMBOL(ttm_bo_wait);
+
+ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
+ {
+- struct ttm_bo_device *bdev = bo->bdev;
+ int ret = 0;
+
+ /*
+ * Using ttm_bo_reserve makes sure the lru lists are updated.
+ */
+
+- ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++ ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
+ if (unlikely(ret != 0))
+ return ret;
+- spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, true, no_wait);
+- spin_unlock(&bdev->fence_lock);
+ if (likely(ret == 0))
+ atomic_inc(&bo->cpu_writers);
+ ttm_bo_unreserve(bo);
+@@ -1629,7 +1620,7 @@
+
+ spin_lock(&glob->lru_lock);
+ list_for_each_entry(bo, &glob->swap_lru, swap) {
+- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
++ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+ if (!ret)
+ break;
+ }
+@@ -1656,9 +1647,7 @@
+ * Wait for GPU, then move to system cached.
+ */
+
+- spin_lock(&bo->bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, false);
+- spin_unlock(&bo->bdev->fence_lock);
+
+ if (unlikely(ret != 0))
+ goto out;
+@@ -1696,7 +1685,7 @@
+ * already swapped buffer.
+ */
+
+- ww_mutex_unlock(&bo->resv->lock);
++ __ttm_bo_unreserve(bo);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ return ret;
+ }
+@@ -1730,10 +1719,10 @@
+ return -ERESTARTSYS;
+ if (!ww_mutex_is_locked(&bo->resv->lock))
+ goto out_unlock;
+- ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
++ ret = __ttm_bo_reserve(bo, true, false, false, NULL);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+- ww_mutex_unlock(&bo->resv->lock);
++ __ttm_bo_unreserve(bo);
+
+ out_unlock:
+ mutex_unlock(&bo->wu_mutex);
+diff -Naur a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
+--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c 2015-03-26 14:42:38.742435422 +0530
+@@ -49,16 +49,18 @@
+
+ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+- struct ttm_placement *placement,
++ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+ {
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+ struct drm_mm_node *node = NULL;
++ enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
++ enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+ unsigned long lpfn;
+ int ret;
+
+- lpfn = placement->lpfn;
++ lpfn = place->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+
+@@ -66,11 +68,16 @@
+ if (!node)
+ return -ENOMEM;
+
++ if (place->flags & TTM_PL_FLAG_TOPDOWN) {
++ sflags = DRM_MM_SEARCH_BELOW;
++ aflags = DRM_MM_CREATE_TOP;
++ }
++
+ spin_lock(&rman->lock);
+- ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+- mem->page_alignment,
+- placement->fpfn, lpfn,
+- DRM_MM_SEARCH_BEST);
++ ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
++ mem->page_alignment, 0,
++ place->fpfn, lpfn,
++ sflags, aflags);
+ spin_unlock(&rman->lock);
+
+ if (unlikely(ret)) {
+diff -Naur a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
+--- a/drivers/gpu/drm/ttm/ttm_bo_util.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c 2015-03-26 14:42:38.742435422 +0530
+@@ -37,6 +37,7 @@
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/module.h>
++#include <linux/reservation.h>
+
+ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
+ {
+@@ -444,8 +445,6 @@
+ struct ttm_buffer_object **new_obj)
+ {
+ struct ttm_buffer_object *fbo;
+- struct ttm_bo_device *bdev = bo->bdev;
+- struct ttm_bo_driver *driver = bdev->driver;
+ int ret;
+
+ fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
+@@ -466,12 +465,6 @@
+ drm_vma_node_reset(&fbo->vma_node);
+ atomic_set(&fbo->cpu_writers, 0);
+
+- spin_lock(&bdev->fence_lock);
+- if (bo->sync_obj)
+- fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+- else
+- fbo->sync_obj = NULL;
+- spin_unlock(&bdev->fence_lock);
+ kref_init(&fbo->list_kref);
+ kref_init(&fbo->kref);
+ fbo->destroy = &ttm_transfered_destroy;
+@@ -487,28 +480,24 @@
+
+ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+ {
++ /* Cached mappings need no adjustment */
++ if (caching_flags & TTM_PL_FLAG_CACHED)
++ return tmp;
++
+ #if defined(__i386__) || defined(__x86_64__)
+ if (caching_flags & TTM_PL_FLAG_WC)
+ tmp = pgprot_writecombine(tmp);
+ else if (boot_cpu_data.x86 > 3)
+ tmp = pgprot_noncached(tmp);
+-
+-#elif defined(__powerpc__)
+- if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
+- pgprot_val(tmp) |= _PAGE_NO_CACHE;
+- if (caching_flags & TTM_PL_FLAG_UNCACHED)
+- pgprot_val(tmp) |= _PAGE_GUARDED;
+- }
+ #endif
+-#if defined(__ia64__)
++#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
+ if (caching_flags & TTM_PL_FLAG_WC)
+ tmp = pgprot_writecombine(tmp);
+ else
+ tmp = pgprot_noncached(tmp);
+ #endif
+ #if defined(__sparc__) || defined(__mips__)
+- if (!(caching_flags & TTM_PL_FLAG_CACHED))
+- tmp = pgprot_noncached(tmp);
++ tmp = pgprot_noncached(tmp);
+ #endif
+ return tmp;
+ }
+@@ -567,9 +556,7 @@
+ * We need to use vmap to get the desired page protection
+ * or to make the buffer object look contiguous.
+ */
+- prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
+- PAGE_KERNEL :
+- ttm_io_prot(mem->placement, PAGE_KERNEL);
++ prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
+ map->bo_kmap_type = ttm_bo_map_vmap;
+ map->virtual = vmap(ttm->pages + start_page, num_pages,
+ 0, prot);
+@@ -644,30 +631,20 @@
+ EXPORT_SYMBOL(ttm_bo_kunmap);
+
+ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+- void *sync_obj,
++ struct fence *fence,
+ bool evict,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+ {
+ struct ttm_bo_device *bdev = bo->bdev;
+- struct ttm_bo_driver *driver = bdev->driver;
+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ int ret;
+ struct ttm_buffer_object *ghost_obj;
+- void *tmp_obj = NULL;
+
+- spin_lock(&bdev->fence_lock);
+- if (bo->sync_obj) {
+- tmp_obj = bo->sync_obj;
+- bo->sync_obj = NULL;
+- }
+- bo->sync_obj = driver->sync_obj_ref(sync_obj);
++ reservation_object_add_excl_fence(bo->resv, fence);
+ if (evict) {
+ ret = ttm_bo_wait(bo, false, false, false);
+- spin_unlock(&bdev->fence_lock);
+- if (tmp_obj)
+- driver->sync_obj_unref(&tmp_obj);
+ if (ret)
+ return ret;
+
+@@ -688,14 +665,13 @@
+ */
+
+ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+- spin_unlock(&bdev->fence_lock);
+- if (tmp_obj)
+- driver->sync_obj_unref(&tmp_obj);
+
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+ if (ret)
+ return ret;
+
++ reservation_object_add_excl_fence(ghost_obj->resv, fence);
++
+ /**
+ * If we're not moving to fixed memory, the TTM object
+ * needs to stay alive. Otherwhise hang it on the ghost
+diff -Naur a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c 2015-03-26 14:42:38.742435422 +0530
+@@ -45,10 +45,8 @@
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+ {
+- struct ttm_bo_device *bdev = bo->bdev;
+ int ret = 0;
+
+- spin_lock(&bdev->fence_lock);
+ if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
+ goto out_unlock;
+
+@@ -82,7 +80,6 @@
+ VM_FAULT_NOPAGE;
+
+ out_unlock:
+- spin_unlock(&bdev->fence_lock);
+ return ret;
+ }
+
+@@ -200,9 +197,8 @@
+ cvma.vm_page_prot);
+ } else {
+ ttm = bo->ttm;
+- if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
+- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+- cvma.vm_page_prot);
++ cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
++ cvma.vm_page_prot);
+
+ /* Allocate all page at once, most common usage */
+ if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+diff -Naur a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c 2015-03-26 14:42:38.742435422 +0530
+@@ -32,21 +32,13 @@
+ #include <linux/sched.h>
+ #include <linux/module.h>
+
+-static void ttm_eu_backoff_reservation_locked(struct list_head *list)
++static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
++ struct ttm_validate_buffer *entry)
+ {
+- struct ttm_validate_buffer *entry;
+-
+- list_for_each_entry(entry, list, head) {
++ list_for_each_entry_continue_reverse(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+- if (!entry->reserved)
+- continue;
+
+- entry->reserved = false;
+- if (entry->removed) {
+- ttm_bo_add_to_lru(bo);
+- entry->removed = false;
+- }
+- ww_mutex_unlock(&bo->resv->lock);
++ __ttm_bo_unreserve(bo);
+ }
+ }
+
+@@ -56,27 +48,9 @@
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+- if (!entry->reserved)
+- continue;
+-
+- if (!entry->removed) {
+- entry->put_count = ttm_bo_del_from_lru(bo);
+- entry->removed = true;
+- }
+- }
+-}
+-
+-static void ttm_eu_list_ref_sub(struct list_head *list)
+-{
+- struct ttm_validate_buffer *entry;
+-
+- list_for_each_entry(entry, list, head) {
+- struct ttm_buffer_object *bo = entry->bo;
++ unsigned put_count = ttm_bo_del_from_lru(bo);
+
+- if (entry->put_count) {
+- ttm_bo_list_ref_sub(bo, entry->put_count, true);
+- entry->put_count = 0;
+- }
++ ttm_bo_list_ref_sub(bo, put_count, true);
+ }
+ }
+
+@@ -91,11 +65,18 @@
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
++
+ spin_lock(&glob->lru_lock);
+- ttm_eu_backoff_reservation_locked(list);
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++
++ ttm_bo_add_to_lru(bo);
++ __ttm_bo_unreserve(bo);
++ }
++ spin_unlock(&glob->lru_lock);
++
+ if (ticket)
+ ww_acquire_fini(ticket);
+- spin_unlock(&glob->lru_lock);
+ }
+ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
+
+@@ -112,7 +93,8 @@
+ */
+
+ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
+- struct list_head *list)
++ struct list_head *list, bool intr,
++ struct list_head *dups)
+ {
+ struct ttm_bo_global *glob;
+ struct ttm_validate_buffer *entry;
+@@ -121,60 +103,71 @@
+ if (list_empty(list))
+ return 0;
+
+- list_for_each_entry(entry, list, head) {
+- entry->reserved = false;
+- entry->put_count = 0;
+- entry->removed = false;
+- }
+-
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
+
+ if (ticket)
+ ww_acquire_init(ticket, &reservation_ww_class);
+-retry:
++
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+- /* already slowpath reserved? */
+- if (entry->reserved)
++ ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
++ ticket);
++ if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
++ __ttm_bo_unreserve(bo);
++
++ ret = -EBUSY;
++
++ } else if (ret == -EALREADY && dups) {
++ struct ttm_validate_buffer *safe = entry;
++ entry = list_prev_entry(entry, head);
++ list_del(&safe->head);
++ list_add(&safe->head, dups);
+ continue;
++ }
++
++ if (!ret) {
++ if (!entry->shared)
++ continue;
++
++ ret = reservation_object_reserve_shared(bo->resv);
++ if (!ret)
++ continue;
++ }
+
+- ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
+- ticket);
++ /* uh oh, we lost out, drop every reservation and try
++ * to only reserve this buffer, then start over if
++ * this succeeds.
++ */
++ ttm_eu_backoff_reservation_reverse(list, entry);
+
+- if (ret == -EDEADLK) {
+- /* uh oh, we lost out, drop every reservation and try
+- * to only reserve this buffer, then start over if
+- * this succeeds.
+- */
+- BUG_ON(ticket == NULL);
+- spin_lock(&glob->lru_lock);
+- ttm_eu_backoff_reservation_locked(list);
+- spin_unlock(&glob->lru_lock);
+- ttm_eu_list_ref_sub(list);
++ if (ret == -EDEADLK && intr) {
+ ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+ ticket);
+- if (unlikely(ret != 0)) {
+- if (ret == -EINTR)
+- ret = -ERESTARTSYS;
+- goto err_fini;
+- }
++ } else if (ret == -EDEADLK) {
++ ww_mutex_lock_slow(&bo->resv->lock, ticket);
++ ret = 0;
++ }
+
+- entry->reserved = true;
+- if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+- ret = -EBUSY;
+- goto err;
+- }
+- goto retry;
+- } else if (ret)
+- goto err;
++ if (!ret && entry->shared)
++ ret = reservation_object_reserve_shared(bo->resv);
+
+- entry->reserved = true;
+- if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+- ret = -EBUSY;
+- goto err;
++ if (unlikely(ret != 0)) {
++ if (ret == -EINTR)
++ ret = -ERESTARTSYS;
++ if (ticket) {
++ ww_acquire_done(ticket);
++ ww_acquire_fini(ticket);
++ }
++ return ret;
+ }
++
++ /* move this item to the front of the list,
++ * forces correct iteration of the loop without keeping track
++ */
++ list_del(&entry->head);
++ list_add(&entry->head, list);
+ }
+
+ if (ticket)
+@@ -182,25 +175,12 @@
+ spin_lock(&glob->lru_lock);
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+- ttm_eu_list_ref_sub(list);
+ return 0;
+-
+-err:
+- spin_lock(&glob->lru_lock);
+- ttm_eu_backoff_reservation_locked(list);
+- spin_unlock(&glob->lru_lock);
+- ttm_eu_list_ref_sub(list);
+-err_fini:
+- if (ticket) {
+- ww_acquire_done(ticket);
+- ww_acquire_fini(ticket);
+- }
+- return ret;
+ }
+ EXPORT_SYMBOL(ttm_eu_reserve_buffers);
+
+ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
+- struct list_head *list, void *sync_obj)
++ struct list_head *list, struct fence *fence)
+ {
+ struct ttm_validate_buffer *entry;
+ struct ttm_buffer_object *bo;
+@@ -217,24 +197,18 @@
+ glob = bo->glob;
+
+ spin_lock(&glob->lru_lock);
+- spin_lock(&bdev->fence_lock);
+
+ list_for_each_entry(entry, list, head) {
+ bo = entry->bo;
+- entry->old_sync_obj = bo->sync_obj;
+- bo->sync_obj = driver->sync_obj_ref(sync_obj);
++ if (entry->shared)
++ reservation_object_add_shared_fence(bo->resv, fence);
++ else
++ reservation_object_add_excl_fence(bo->resv, fence);
+ ttm_bo_add_to_lru(bo);
+- ww_mutex_unlock(&bo->resv->lock);
+- entry->reserved = false;
++ __ttm_bo_unreserve(bo);
+ }
+- spin_unlock(&bdev->fence_lock);
+ spin_unlock(&glob->lru_lock);
+ if (ticket)
+ ww_acquire_fini(ticket);
+-
+- list_for_each_entry(entry, list, head) {
+- if (entry->old_sync_obj)
+- driver->sync_obj_unref(&entry->old_sync_obj);
+- }
+ }
+ EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
+diff -Naur a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
+--- a/drivers/gpu/drm/ttm/ttm_memory.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/ttm/ttm_memory.c 2015-03-26 14:42:38.742435422 +0530
+@@ -300,7 +300,8 @@
+ zone->glob = glob;
+ glob->zone_highmem = zone;
+ ret = kobject_init_and_add(
+- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
++ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
++ zone->name);
+ if (unlikely(ret != 0)) {
+ kobject_put(&zone->kobj);
+ return ret;
+diff -Naur a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
+--- a/drivers/gpu/drm/ttm/ttm_module.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/ttm/ttm_module.c 2015-03-26 14:42:38.742435422 +0530
+@@ -35,7 +35,7 @@
+ #include <drm/drm_sysfs.h>
+
+ static DECLARE_WAIT_QUEUE_HEAD(exit_q);
+-atomic_t device_released;
++static atomic_t device_released;
+
+ static struct device_type ttm_drm_class_type = {
+ .name = "ttm",
+diff -Naur a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
+--- a/drivers/gpu/drm/ttm/ttm_object.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/ttm/ttm_object.c 2015-03-26 14:42:38.742435422 +0530
+@@ -270,6 +270,52 @@
+ }
+ EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
+
++/**
++ * ttm_ref_object_exists - Check whether a caller has a valid ref object
++ * (has opened) a base object.
++ *
++ * @tfile: Pointer to a struct ttm_object_file identifying the caller.
++ * @base: Pointer to a struct base object.
++ *
++ * Checks wether the caller identified by @tfile has put a valid USAGE
++ * reference object on the base object identified by @base.
++ */
++bool ttm_ref_object_exists(struct ttm_object_file *tfile,
++ struct ttm_base_object *base)
++{
++ struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
++ struct drm_hash_item *hash;
++ struct ttm_ref_object *ref;
++
++ rcu_read_lock();
++ if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
++ goto out_false;
++
++ /*
++ * Verify that the ref object is really pointing to our base object.
++ * Our base object could actually be dead, and the ref object pointing
++ * to another base object with the same handle.
++ */
++ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++ if (unlikely(base != ref->obj))
++ goto out_false;
++
++ /*
++ * Verify that the ref->obj pointer was actually valid!
++ */
++ rmb();
++ if (unlikely(atomic_read(&ref->kref.refcount) == 0))
++ goto out_false;
++
++ rcu_read_unlock();
++ return true;
++
++ out_false:
++ rcu_read_unlock();
++ return false;
++}
++EXPORT_SYMBOL(ttm_ref_object_exists);
++
+ int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed)
+diff -Naur a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c 2015-03-26 14:42:38.746435422 +0530
+@@ -297,11 +297,12 @@
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+- * @gfp: GFP flags.
++ * @use_static: Safe to use static buffer
+ **/
+ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
+- gfp_t gfp)
++ bool use_static)
+ {
++ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
+ unsigned long irq_flags;
+ struct page *p;
+ struct page **pages_to_free;
+@@ -311,7 +312,11 @@
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
++ if (use_static)
++ pages_to_free = static_buf;
++ else
++ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
++ GFP_KERNEL);
+ if (!pages_to_free) {
+ pr_err("Failed to allocate memory for pool free operation\n");
+ return 0;
+@@ -374,7 +379,8 @@
+ if (freed_pages)
+ ttm_pages_put(pages_to_free, freed_pages);
+ out:
+- kfree(pages_to_free);
++ if (pages_to_free != static_buf)
++ kfree(pages_to_free);
+ return nr_free;
+ }
+
+@@ -383,8 +389,6 @@
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+- * We need to pass sc->gfp_mask to ttm_page_pool_free().
+- *
+ * This code is crying out for a shrinker per pool....
+ */
+ static unsigned long
+@@ -407,8 +411,8 @@
+ if (shrink_pages == 0)
+ break;
+ pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+- shrink_pages = ttm_page_pool_free(pool, nr_free,
+- sc->gfp_mask);
++ /* OK to use static buffer since global mutex is held. */
++ shrink_pages = ttm_page_pool_free(pool, nr_free, true);
+ freed += nr_free - shrink_pages;
+ }
+ mutex_unlock(&lock);
+@@ -710,7 +714,7 @@
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ if (npages)
+- ttm_page_pool_free(pool, npages, GFP_KERNEL);
++ ttm_page_pool_free(pool, npages, false);
+ }
+
+ /*
+@@ -794,7 +798,7 @@
+ return 0;
+ }
+
+-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
++static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
+ char *name)
+ {
+ spin_lock_init(&pool->lock);
+@@ -849,9 +853,9 @@
+ pr_info("Finalizing pool allocator\n");
+ ttm_pool_mm_shrink_fini(_manager);
+
++ /* OK to use static buffer since global mutex is no longer used. */
+ for (i = 0; i < NUM_POOLS; ++i)
+- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
+- GFP_KERNEL);
++ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
+
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
+diff -Naur a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 2015-03-26 14:42:38.746435422 +0530
+@@ -411,11 +411,12 @@
+ *
+ * @pool: to free the pages from
+ * @nr_free: If set to true will free all pages in pool
+- * @gfp: GFP flags.
++ * @use_static: Safe to use static buffer
+ **/
+ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
+- gfp_t gfp)
++ bool use_static)
+ {
++ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
+ unsigned long irq_flags;
+ struct dma_page *dma_p, *tmp;
+ struct page **pages_to_free;
+@@ -432,7 +433,11 @@
+ npages_to_free, nr_free);
+ }
+ #endif
+- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
++ if (use_static)
++ pages_to_free = static_buf;
++ else
++ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
++ GFP_KERNEL);
+
+ if (!pages_to_free) {
+ pr_err("%s: Failed to allocate memory for pool free operation\n",
+@@ -502,7 +507,8 @@
+ if (freed_pages)
+ ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+ out:
+- kfree(pages_to_free);
++ if (pages_to_free != static_buf)
++ kfree(pages_to_free);
+ return nr_free;
+ }
+
+@@ -531,7 +537,8 @@
+ if (pool->type != type)
+ continue;
+ /* Takes a spinlock.. */
+- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
++ /* OK to use static buffer since global mutex is held. */
++ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
+ WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+ /* This code path is called after _all_ references to the
+ * struct device has been dropped - so nobody should be
+@@ -848,6 +855,7 @@
+ if (count) {
+ d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
+ ttm->pages[index] = d_page->p;
++ ttm_dma->cpu_address[index] = d_page->vaddr;
+ ttm_dma->dma_address[index] = d_page->dma;
+ list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+ r = 0;
+@@ -979,12 +987,13 @@
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ for (i = 0; i < ttm->num_pages; i++) {
+ ttm->pages[i] = NULL;
++ ttm_dma->cpu_address[i] = 0;
+ ttm_dma->dma_address[i] = 0;
+ }
+
+ /* shrink pool if necessary (only on !is_cached pools)*/
+ if (npages)
+- ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
++ ttm_dma_page_pool_free(pool, npages, false);
+ ttm->state = tt_unpopulated;
+ }
+ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+@@ -994,8 +1003,6 @@
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+- * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
+- *
+ * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
+ * shrinkers
+ */
+@@ -1028,8 +1035,8 @@
+ if (++idx < pool_offset)
+ continue;
+ nr_free = shrink_pages;
+- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
+- sc->gfp_mask);
++ /* OK to use static buffer since global mutex is held. */
++ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
+ freed += nr_free - shrink_pages;
+
+ pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+diff -Naur a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
+--- a/drivers/gpu/drm/ttm/ttm_tt.c 2015-03-26 14:43:30.490436437 +0530
++++ b/drivers/gpu/drm/ttm/ttm_tt.c 2015-03-26 14:42:38.746435422 +0530
+@@ -55,9 +55,12 @@
+
+ static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+ {
+- ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
+- ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
+- sizeof(*ttm->dma_address));
++ ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
++ sizeof(*ttm->ttm.pages) +
++ sizeof(*ttm->dma_address) +
++ sizeof(*ttm->cpu_address));
++ ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
++ ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
+ }
+
+ #ifdef CONFIG_X86
+@@ -228,7 +231,7 @@
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ ttm_dma_tt_alloc_page_directory(ttm_dma);
+- if (!ttm->pages || !ttm_dma->dma_address) {
++ if (!ttm->pages) {
+ ttm_tt_destroy(ttm);
+ pr_err("Failed allocating page table\n");
+ return -ENOMEM;
+@@ -243,7 +246,7 @@
+
+ drm_free_large(ttm->pages);
+ ttm->pages = NULL;
+- drm_free_large(ttm_dma->dma_address);
++ ttm_dma->cpu_address = NULL;
+ ttm_dma->dma_address = NULL;
+ }
+ EXPORT_SYMBOL(ttm_dma_tt_fini);
+diff -Naur a/fs/dcache.c b/fs/dcache.c
+--- a/fs/dcache.c 2015-03-26 14:43:29.274436413 +0530
++++ b/fs/dcache.c 2015-03-26 14:42:38.746435422 +0530
+@@ -3115,7 +3115,7 @@
+ end = ERR_PTR(-ENAMETOOLONG);
+ return end;
+ }
+-
++EXPORT_SYMBOL(simple_dname);
+ /*
+ * Write full pathname from the root of the filesystem into the buffer.
+ */
+diff -Naur a/include/acpi/video.h b/include/acpi/video.h
+--- a/include/acpi/video.h 2015-03-26 14:43:28.078436390 +0530
++++ b/include/acpi/video.h 2015-03-26 14:42:38.746435422 +0530
+@@ -19,16 +19,20 @@
+ #if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
+ extern int acpi_video_register(void);
+ extern void acpi_video_unregister(void);
++extern void acpi_video_unregister_backlight(void);
+ extern int acpi_video_get_edid(struct acpi_device *device, int type,
+ int device_id, void **edid);
++extern bool acpi_video_verify_backlight_support(void);
+ #else
+ static inline int acpi_video_register(void) { return 0; }
+ static inline void acpi_video_unregister(void) { return; }
++static inline void acpi_video_unregister_backlight(void) { return; }
+ static inline int acpi_video_get_edid(struct acpi_device *device, int type,
+ int device_id, void **edid)
+ {
+ return -ENODEV;
+ }
++static inline bool acpi_video_verify_backlight_support(void) { return false; }
+ #endif
+
+ #endif
+diff -Naur a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h
+--- a/include/asm-generic/pci-dma-compat.h 2015-03-26 14:43:27.886436386 +0530
++++ b/include/asm-generic/pci-dma-compat.h 2015-03-26 14:42:38.746435422 +0530
+@@ -19,6 +19,14 @@
+ return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
+ }
+
++static inline void *
++pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
++ dma_addr_t *dma_handle)
++{
++ return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
++ size, dma_handle, GFP_ATOMIC);
++}
++
+ static inline void
+ pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+diff -Naur a/include/drm/ati_pcigart.h b/include/drm/ati_pcigart.h
+--- a/include/drm/ati_pcigart.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/drm/ati_pcigart.h 2015-03-26 14:42:38.746435422 +0530
+@@ -0,0 +1,30 @@
++#ifndef DRM_ATI_PCIGART_H
++#define DRM_ATI_PCIGART_H
++
++#include <drm/drm_legacy.h>
++
++/* location of GART table */
++#define DRM_ATI_GART_MAIN 1
++#define DRM_ATI_GART_FB 2
++
++#define DRM_ATI_GART_PCI 1
++#define DRM_ATI_GART_PCIE 2
++#define DRM_ATI_GART_IGP 3
++
++struct drm_ati_pcigart_info {
++ int gart_table_location;
++ int gart_reg_if;
++ void *addr;
++ dma_addr_t bus_addr;
++ dma_addr_t table_mask;
++ struct drm_dma_handle *table_handle;
++ struct drm_local_map mapping;
++ int table_size;
++};
++
++extern int drm_ati_pcigart_init(struct drm_device *dev,
++ struct drm_ati_pcigart_info * gart_info);
++extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
++ struct drm_ati_pcigart_info * gart_info);
++
++#endif
+diff -Naur a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
+--- a/include/drm/drm_agpsupport.h 2015-03-26 14:43:27.890436386 +0530
++++ b/include/drm/drm_agpsupport.h 2015-03-26 14:42:38.746435422 +0530
+@@ -1,12 +1,32 @@
+ #ifndef _DRM_AGPSUPPORT_H_
+ #define _DRM_AGPSUPPORT_H_
+
++#include <linux/agp_backend.h>
+ #include <linux/kernel.h>
++#include <linux/list.h>
+ #include <linux/mm.h>
+ #include <linux/mutex.h>
+ #include <linux/types.h>
+-#include <linux/agp_backend.h>
+-#include <drm/drmP.h>
++#include <uapi/drm/drm.h>
++
++struct drm_device;
++struct drm_file;
++
++#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && \
++ defined(MODULE)))
++
++struct drm_agp_head {
++ struct agp_kern_info agp_info;
++ struct list_head memory;
++ unsigned long mode;
++ struct agp_bridge_data *bridge;
++ int enabled;
++ int acquired;
++ unsigned long base;
++ int agp_mtrr;
++ int cant_use_aperture;
++ unsigned long page_mask;
++};
+
+ #if __OS_HAS_AGP
+
+@@ -45,6 +65,7 @@
+ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+ int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
++
+ #else /* __OS_HAS_AGP */
+
+ static inline void drm_free_agp(struct agp_memory * handle, int pages)
+@@ -172,6 +193,7 @@
+ {
+ return -ENODEV;
+ }
++
+ #endif /* __OS_HAS_AGP */
+
+ #endif /* _DRM_AGPSUPPORT_H_ */
+diff -Naur a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
+--- a/include/drm/drm_atomic.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/drm/drm_atomic.h 2015-03-26 14:42:38.750435422 +0530
+@@ -0,0 +1,69 @@
++/*
++ * Copyright (C) 2014 Red Hat
++ * Copyright (C) 2014 Intel Corp.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Rob Clark <robdclark@gmail.com>
++ * Daniel Vetter <daniel.vetter@ffwll.ch>
++ */
++
++#ifndef DRM_ATOMIC_H_
++#define DRM_ATOMIC_H_
++
++#include <drm/drm_crtc.h>
++
++struct drm_atomic_state * __must_check
++drm_atomic_state_alloc(struct drm_device *dev);
++void drm_atomic_state_clear(struct drm_atomic_state *state);
++void drm_atomic_state_free(struct drm_atomic_state *state);
++
++struct drm_crtc_state * __must_check
++drm_atomic_get_crtc_state(struct drm_atomic_state *state,
++ struct drm_crtc *crtc);
++struct drm_plane_state * __must_check
++drm_atomic_get_plane_state(struct drm_atomic_state *state,
++ struct drm_plane *plane);
++struct drm_connector_state * __must_check
++drm_atomic_get_connector_state(struct drm_atomic_state *state,
++ struct drm_connector *connector);
++
++int __must_check
++drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
++ struct drm_plane *plane, struct drm_crtc *crtc);
++void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
++ struct drm_framebuffer *fb);
++int __must_check
++drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
++ struct drm_crtc *crtc);
++int __must_check
++drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
++ struct drm_crtc *crtc);
++int
++drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
++ struct drm_crtc *crtc);
++
++void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
++
++int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
++int __must_check drm_atomic_commit(struct drm_atomic_state *state);
++int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
++
++#endif /* DRM_ATOMIC_H_ */
+diff -Naur a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
+--- a/include/drm/drm_atomic_helper.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/drm/drm_atomic_helper.h 2015-03-26 14:42:38.750435422 +0530
+@@ -0,0 +1,126 @@
++/*
++ * Copyright (C) 2014 Red Hat
++ * Copyright (C) 2014 Intel Corp.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Rob Clark <robdclark@gmail.com>
++ * Daniel Vetter <daniel.vetter@ffwll.ch>
++ */
++
++#ifndef DRM_ATOMIC_HELPER_H_
++#define DRM_ATOMIC_HELPER_H_
++
++#include <drm/drm_crtc.h>
++
++int drm_atomic_helper_check(struct drm_device *dev,
++ struct drm_atomic_state *state);
++int drm_atomic_helper_commit(struct drm_device *dev,
++ struct drm_atomic_state *state,
++ bool async);
++
++void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
++ struct drm_atomic_state *old_state);
++
++void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
++ struct drm_atomic_state *state);
++void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
++ struct drm_atomic_state *old_state);
++
++int drm_atomic_helper_prepare_planes(struct drm_device *dev,
++ struct drm_atomic_state *state);
++void drm_atomic_helper_commit_planes(struct drm_device *dev,
++ struct drm_atomic_state *state);
++void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
++ struct drm_atomic_state *old_state);
++
++void drm_atomic_helper_swap_state(struct drm_device *dev,
++ struct drm_atomic_state *state);
++
++/* implementations for legacy interfaces */
++int drm_atomic_helper_update_plane(struct drm_plane *plane,
++ struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ int crtc_x, int crtc_y,
++ unsigned int crtc_w, unsigned int crtc_h,
++ uint32_t src_x, uint32_t src_y,
++ uint32_t src_w, uint32_t src_h);
++int drm_atomic_helper_disable_plane(struct drm_plane *plane);
++int drm_atomic_helper_set_config(struct drm_mode_set *set);
++
++int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
++ struct drm_property *property,
++ uint64_t val);
++int drm_atomic_helper_plane_set_property(struct drm_plane *plane,
++ struct drm_property *property,
++ uint64_t val);
++int drm_atomic_helper_connector_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t val);
++int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ struct drm_pending_vblank_event *event,
++ uint32_t flags);
++
++/* default implementations for state handling */
++void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
++struct drm_crtc_state *
++drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
++void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
++ struct drm_crtc_state *state);
++
++void drm_atomic_helper_plane_reset(struct drm_plane *plane);
++struct drm_plane_state *
++drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
++void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
++ struct drm_plane_state *state);
++
++void drm_atomic_helper_connector_reset(struct drm_connector *connector);
++struct drm_connector_state *
++drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
++void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
++ struct drm_connector_state *state);
++
++/**
++ * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
++ * @plane: the loop cursor
++ * @crtc: the crtc whose planes are iterated
++ *
++ * This iterates over the current state, useful (for example) when applying
++ * atomic state after it has been checked and swapped. To iterate over the
++ * planes which *will* be attached (for ->atomic_check()) see
++ * drm_crtc_for_each_pending_plane()
++ */
++#define drm_atomic_crtc_for_each_plane(plane, crtc) \
++ drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
++
++/**
++ * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
++ * @plane: the loop cursor
++ * @crtc_state: the incoming crtc-state
++ *
++ * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
++ * attached if the specified state is applied. Useful during (for example)
++ * ->atomic_check() operations, to validate the incoming state
++ */
++#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
++ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
++
++#endif /* DRM_ATOMIC_HELPER_H_ */
+diff -Naur a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
+--- a/include/drm/drm_crtc.h 2015-03-26 14:43:27.886436386 +0530
++++ b/include/drm/drm_crtc.h 2015-03-26 14:42:38.750435422 +0530
+@@ -31,9 +31,9 @@
+ #include <linux/idr.h>
+ #include <linux/fb.h>
+ #include <linux/hdmi.h>
+-#include <drm/drm_mode.h>
+-
+-#include <drm/drm_fourcc.h>
++#include <uapi/drm/drm_mode.h>
++#include <uapi/drm/drm_fourcc.h>
++#include <drm/drm_modeset_lock.h>
+
+ struct drm_device;
+ struct drm_mode_set;
+@@ -41,6 +41,8 @@
+ struct drm_object_properties;
+ struct drm_file;
+ struct drm_clip_rect;
++struct device_node;
++struct fence;
+
+ #define DRM_MODE_OBJECT_CRTC 0xcccccccc
+ #define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+@@ -51,6 +53,7 @@
+ #define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+ #define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+ #define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
++#define DRM_MODE_OBJECT_ANY 0
+
+ struct drm_mode_object {
+ uint32_t id;
+@@ -65,130 +68,31 @@
+ uint64_t values[DRM_OBJECT_MAX_PROPERTY];
+ };
+
+-/*
+- * Note on terminology: here, for brevity and convenience, we refer to connector
+- * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
+- * DVI, etc. And 'screen' refers to the whole of the visible display, which
+- * may span multiple monitors (and therefore multiple CRTC and connector
+- * structures).
+- */
+-
+-enum drm_mode_status {
+- MODE_OK = 0, /* Mode OK */
+- MODE_HSYNC, /* hsync out of range */
+- MODE_VSYNC, /* vsync out of range */
+- MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
+- MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
+- MODE_BAD_WIDTH, /* requires an unsupported linepitch */
+- MODE_NOMODE, /* no mode with a matching name */
+- MODE_NO_INTERLACE, /* interlaced mode not supported */
+- MODE_NO_DBLESCAN, /* doublescan mode not supported */
+- MODE_NO_VSCAN, /* multiscan mode not supported */
+- MODE_MEM, /* insufficient video memory */
+- MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
+- MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
+- MODE_MEM_VIRT, /* insufficient video memory given virtual size */
+- MODE_NOCLOCK, /* no fixed clock available */
+- MODE_CLOCK_HIGH, /* clock required is too high */
+- MODE_CLOCK_LOW, /* clock required is too low */
+- MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
+- MODE_BAD_HVALUE, /* horizontal timing was out of range */
+- MODE_BAD_VVALUE, /* vertical timing was out of range */
+- MODE_BAD_VSCAN, /* VScan value out of range */
+- MODE_HSYNC_NARROW, /* horizontal sync too narrow */
+- MODE_HSYNC_WIDE, /* horizontal sync too wide */
+- MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
+- MODE_HBLANK_WIDE, /* horizontal blanking too wide */
+- MODE_VSYNC_NARROW, /* vertical sync too narrow */
+- MODE_VSYNC_WIDE, /* vertical sync too wide */
+- MODE_VBLANK_NARROW, /* vertical blanking too narrow */
+- MODE_VBLANK_WIDE, /* vertical blanking too wide */
+- MODE_PANEL, /* exceeds panel dimensions */
+- MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
+- MODE_ONE_WIDTH, /* only one width is supported */
+- MODE_ONE_HEIGHT, /* only one height is supported */
+- MODE_ONE_SIZE, /* only one resolution is supported */
+- MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
+- MODE_NO_STEREO, /* stereo modes not supported */
+- MODE_UNVERIFIED = -3, /* mode needs to reverified */
+- MODE_BAD = -2, /* unspecified reason */
+- MODE_ERROR = -1 /* error condition */
+-};
+-
+-#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
+- DRM_MODE_TYPE_CRTC_C)
+-
+-#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
+- .name = nm, .status = 0, .type = (t), .clock = (c), \
+- .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
+- .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
+- .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
+- .vscan = (vs), .flags = (f), \
+- .base.type = DRM_MODE_OBJECT_MODE
+-
+-#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
+-#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
+-
+-#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
+-
+-struct drm_display_mode {
+- /* Header */
+- struct list_head head;
+- struct drm_mode_object base;
+-
+- char name[DRM_DISPLAY_MODE_LEN];
++static inline int64_t U642I64(uint64_t val)
++{
++ return (int64_t)*((int64_t *)&val);
++}
++static inline uint64_t I642U64(int64_t val)
++{
++ return (uint64_t)*((uint64_t *)&val);
++}
+
+- enum drm_mode_status status;
+- unsigned int type;
++/* rotation property bits */
++#define DRM_ROTATE_0 0
++#define DRM_ROTATE_90 1
++#define DRM_ROTATE_180 2
++#define DRM_ROTATE_270 3
++#define DRM_REFLECT_X 4
++#define DRM_REFLECT_Y 5
+
+- /* Proposed mode values */
+- int clock; /* in kHz */
+- int hdisplay;
+- int hsync_start;
+- int hsync_end;
+- int htotal;
+- int hskew;
+- int vdisplay;
+- int vsync_start;
+- int vsync_end;
+- int vtotal;
+- int vscan;
+- unsigned int flags;
+-
+- /* Addressable image size (may be 0 for projectors, etc.) */
+- int width_mm;
+- int height_mm;
+-
+- /* Actual mode we give to hw */
+- int crtc_clock; /* in KHz */
+- int crtc_hdisplay;
+- int crtc_hblank_start;
+- int crtc_hblank_end;
+- int crtc_hsync_start;
+- int crtc_hsync_end;
+- int crtc_htotal;
+- int crtc_hskew;
+- int crtc_vdisplay;
+- int crtc_vblank_start;
+- int crtc_vblank_end;
+- int crtc_vsync_start;
+- int crtc_vsync_end;
+- int crtc_vtotal;
+-
+- /* Driver private mode info */
+- int private_size;
+- int *private;
+- int private_flags;
+-
+- int vrefresh; /* in Hz */
+- int hsync; /* in kHz */
+- enum hdmi_picture_aspect picture_aspect_ratio;
++enum drm_connector_force {
++ DRM_FORCE_UNSPECIFIED,
++ DRM_FORCE_OFF,
++ DRM_FORCE_ON, /* force on analog part normally */
++ DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+ };
+
+-static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
+-{
+- return mode->flags & DRM_MODE_FLAG_3D_MASK;
+-}
++#include <drm/drm_modes.h>
+
+ enum drm_connector_status {
+ connector_status_connected = 1,
+@@ -227,17 +131,28 @@
+ enum subpixel_order subpixel_order;
+ u32 color_formats;
+
++ /* Mask of supported hdmi deep color modes */
++ u8 edid_hdmi_dc_modes;
++
+ u8 cea_rev;
+ };
+
++/* data corresponds to displayid vend/prod/serial */
++struct drm_tile_group {
++ struct kref refcount;
++ struct drm_device *dev;
++ int id;
++ u8 group_data[8];
++};
++
+ struct drm_framebuffer_funcs {
+ /* note: use drm_framebuffer_remove() */
+ void (*destroy)(struct drm_framebuffer *framebuffer);
+ int (*create_handle)(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle);
+- /**
+- * Optinal callback for the dirty fb ioctl.
++ /*
++ * Optional callback for the dirty fb ioctl.
+ *
+ * Userspace can notify the driver via this callback
+ * that a area of the framebuffer has changed and should
+@@ -290,7 +205,7 @@
+ struct drm_property_blob {
+ struct drm_mode_object base;
+ struct list_head head;
+- unsigned int length;
++ size_t length;
+ unsigned char data[];
+ };
+
+@@ -307,8 +222,9 @@
+ char name[DRM_PROP_NAME_LEN];
+ uint32_t num_values;
+ uint64_t *values;
++ struct drm_device *dev;
+
+- struct list_head enum_blob_list;
++ struct list_head enum_list;
+ };
+
+ struct drm_crtc;
+@@ -317,19 +233,65 @@
+ struct drm_pending_vblank_event;
+ struct drm_plane;
+ struct drm_bridge;
++struct drm_atomic_state;
++
++/**
++ * struct drm_crtc_state - mutable CRTC state
++ * @enable: whether the CRTC should be enabled, gates all other state
++ * @mode_changed: for use by helpers and drivers when computing state updates
++ * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
++ * @last_vblank_count: for helpers and drivers to capture the vblank of the
++ * update to ensure framebuffer cleanup isn't done too early
++ * @planes_changed: for use by helpers and drivers when computing state updates
++ * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
++ * @mode: current mode timings
++ * @event: optional pointer to a DRM event to signal upon completion of the
++ * state update
++ * @state: backpointer to global drm_atomic_state
++ */
++struct drm_crtc_state {
++ bool enable;
++
++ /* computed state bits used by helpers and drivers */
++ bool planes_changed : 1;
++ bool mode_changed : 1;
++
++ /* attached planes bitmask:
++ * WARNING: transitional helpers do not maintain plane_mask so
++ * drivers not converted over to atomic helpers should not rely
++ * on plane_mask being accurate!
++ */
++ u32 plane_mask;
++
++ /* last_vblank_count: for vblank waits before cleanup */
++ u32 last_vblank_count;
++
++ /* adjusted_mode: for use by helpers and drivers */
++ struct drm_display_mode adjusted_mode;
++
++ struct drm_display_mode mode;
++
++ struct drm_pending_vblank_event *event;
++
++ struct drm_atomic_state *state;
++};
+
+ /**
+- * drm_crtc_funcs - control CRTCs for a given device
++ * struct drm_crtc_funcs - control CRTCs for a given device
+ * @save: save CRTC state
+ * @restore: restore CRTC state
+ * @reset: reset CRTC after state has been invalidated (e.g. resume)
+ * @cursor_set: setup the cursor
++ * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set
+ * @cursor_move: move the cursor
+ * @gamma_set: specify color ramp for CRTC
+ * @destroy: deinit and free object
+ * @set_property: called when a property is changed
+ * @set_config: apply a new CRTC configuration
+ * @page_flip: initiate a page flip
++ * @atomic_duplicate_state: duplicate the atomic state for this CRTC
++ * @atomic_destroy_state: destroy an atomic state for this CRTC
++ * @atomic_set_property: set a property on an atomic state for this CRTC
+ *
+ * The drm_crtc_funcs structure is the central CRTC management structure
+ * in the DRM. Each CRTC controls one or more connectors (note that the name
+@@ -380,13 +342,28 @@
+
+ int (*set_property)(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val);
++
++ /* atomic update handling */
++ struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc);
++ void (*atomic_destroy_state)(struct drm_crtc *crtc,
++ struct drm_crtc_state *state);
++ int (*atomic_set_property)(struct drm_crtc *crtc,
++ struct drm_crtc_state *state,
++ struct drm_property *property,
++ uint64_t val);
+ };
+
+ /**
+- * drm_crtc - central CRTC control structure
++ * struct drm_crtc - central CRTC control structure
+ * @dev: parent DRM device
++ * @port: OF node used by drm_of_find_possible_crtcs()
+ * @head: list management
++ * @mutex: per-CRTC locking
+ * @base: base KMS object for ID tracking etc.
++ * @primary: primary plane for this CRTC
++ * @cursor: cursor plane for this CRTC
++ * @cursor_x: current x position of the cursor, used for universal cursor planes
++ * @cursor_y: current y position of the cursor, used for universal cursor planes
+ * @enabled: is this CRTC enabled?
+ * @mode: current mode timings
+ * @hwmode: mode timings as programmed to hw regs
+@@ -399,35 +376,40 @@
+ * @gamma_size: size of gamma ramp
+ * @gamma_store: gamma ramp values
+ * @framedur_ns: precise frame timing
+- * @framedur_ns: precise line timing
++ * @linedur_ns: precise line timing
+ * @pixeldur_ns: precise pixel timing
+ * @helper_private: mid-layer private data
+ * @properties: property tracking for this CRTC
++ * @state: current atomic state for this CRTC
++ * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
++ * legacy ioctls
+ *
+ * Each CRTC may have one or more connectors associated with it. This structure
+ * allows the CRTC to be controlled.
+ */
+ struct drm_crtc {
+ struct drm_device *dev;
++ struct device_node *port;
+ struct list_head head;
+
+- /**
++ /*
+ * crtc mutex
+ *
+ * This provides a read lock for the overall crtc state (mode, dpms
+ * state, ...) and a write lock for everything which can be update
+ * without a full modeset (fb, cursor data, ...)
+ */
+- struct mutex mutex;
++ struct drm_modeset_lock mutex;
+
+ struct drm_mode_object base;
+
+- /* framebuffer the connector is currently bound to */
+- struct drm_framebuffer *fb;
+-
+- /* Temporary tracking of the old fb while a modeset is ongoing. Used
+- * by drm_mode_set_config_internal to implement correct refcounting. */
+- struct drm_framebuffer *old_fb;
++ /* primary and cursor planes for CRTC */
++ struct drm_plane *primary;
++ struct drm_plane *cursor;
++
++ /* position of cursor plane on crtc */
++ int cursor_x;
++ int cursor_y;
+
+ bool enabled;
+
+@@ -455,11 +437,32 @@
+ void *helper_private;
+
+ struct drm_object_properties properties;
++
++ struct drm_crtc_state *state;
++
++ /*
++ * For legacy crtc ioctls so that atomic drivers can get at the locking
++ * acquire context.
++ */
++ struct drm_modeset_acquire_ctx *acquire_ctx;
+ };
+
++/**
++ * struct drm_connector_state - mutable connector state
++ * @crtc: CRTC to connect connector to, NULL if disabled
++ * @best_encoder: can be used by helpers and drivers to select the encoder
++ * @state: backpointer to global drm_atomic_state
++ */
++struct drm_connector_state {
++ struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
++
++ struct drm_encoder *best_encoder;
++
++ struct drm_atomic_state *state;
++};
+
+ /**
+- * drm_connector_funcs - control connectors on a given device
++ * struct drm_connector_funcs - control connectors on a given device
+ * @dpms: set power state (see drm_crtc_funcs above)
+ * @save: save connector state
+ * @restore: restore connector state
+@@ -469,6 +472,9 @@
+ * @set_property: property for this connector may need an update
+ * @destroy: make object go away
+ * @force: notify the driver that the connector is forced on
++ * @atomic_duplicate_state: duplicate the atomic state for this connector
++ * @atomic_destroy_state: destroy an atomic state for this connector
++ * @atomic_set_property: set a property on an atomic state for this connector
+ *
+ * Each CRTC may have one or more connectors attached to it. The functions
+ * below allow the core DRM code to control connectors, enumerate available modes,
+@@ -493,10 +499,19 @@
+ uint64_t val);
+ void (*destroy)(struct drm_connector *connector);
+ void (*force)(struct drm_connector *connector);
++
++ /* atomic update handling */
++ struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
++ void (*atomic_destroy_state)(struct drm_connector *connector,
++ struct drm_connector_state *state);
++ int (*atomic_set_property)(struct drm_connector *connector,
++ struct drm_connector_state *state,
++ struct drm_property *property,
++ uint64_t val);
+ };
+
+ /**
+- * drm_encoder_funcs - encoder controls
++ * struct drm_encoder_funcs - encoder controls
+ * @reset: reset state (e.g. at init or resume time)
+ * @destroy: cleanup and free associated data
+ *
+@@ -510,10 +525,11 @@
+ #define DRM_CONNECTOR_MAX_ENCODER 3
+
+ /**
+- * drm_encoder - central DRM encoder structure
++ * struct drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
++ * @name: encoder name
+ * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
+ * @possible_crtcs: bitmask of potential CRTC bindings
+ * @possible_clones: bitmask of potential sibling encoders for cloning
+@@ -530,6 +546,7 @@
+ struct list_head head;
+
+ struct drm_mode_object base;
++ char *name;
+ int encoder_type;
+ uint32_t possible_crtcs;
+ uint32_t possible_clones;
+@@ -540,13 +557,6 @@
+ void *helper_private;
+ };
+
+-enum drm_connector_force {
+- DRM_FORCE_UNSPECIFIED,
+- DRM_FORCE_OFF,
+- DRM_FORCE_ON, /* force on analog part normally */
+- DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+-};
+-
+ /* should we poll this connector for connects and disconnects */
+ /* hot plug detectable */
+ #define DRM_CONNECTOR_POLL_HPD (1 << 0)
+@@ -559,16 +569,18 @@
+ #define MAX_ELD_BYTES 128
+
+ /**
+- * drm_connector - central DRM connector control structure
++ * struct drm_connector - central DRM connector control structure
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
++ * @name: connector name
+ * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
+ * @interlace_allowed: can this connector handle interlaced modes?
+ * @doublescan_allowed: can this connector handle doublescan?
++ * @stereo_allowed: can this connector handle stereo modes?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+@@ -576,10 +588,13 @@
+ * @funcs: connector control functions
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @properties: property tracking for this connector
++ * @path_blob_ptr: DRM blob property data for the DP MST path property
+ * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
++ * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
+ * @force: a %DRM_FORCE_<foo> state for forced mode sets
++ * @override_edid: has the EDID been overwritten through debugfs for testing?
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+@@ -589,6 +604,18 @@
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
++ * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
++ * @debugfs_entry: debugfs directory for this connector
++ * @state: current atomic state for this connector
++ * @has_tile: is this connector connected to a tiled monitor
++ * @tile_group: tile group for the connected monitor
++ * @tile_is_single_monitor: whether the tile is one monitor housing
++ * @num_h_tile: number of horizontal tiles in the tile group
++ * @num_v_tile: number of vertical tiles in the tile group
++ * @tile_h_loc: horizontal location of this tile
++ * @tile_v_loc: vertical location of this tile
++ * @tile_h_size: horizontal size of this tile.
++ * @tile_v_size: vertical size of this tile.
+ *
+ * Each connector may be connected to one or more CRTCs, or may be clonable by
+ * another connector if they can share a CRTC. Each connector also has a specific
+@@ -603,6 +630,7 @@
+
+ struct drm_mode_object base;
+
++ char *name;
+ int connector_type;
+ int connector_type_id;
+ bool interlace_allowed;
+@@ -621,6 +649,10 @@
+ struct drm_property_blob *edid_blob_ptr;
+ struct drm_object_properties properties;
+
++ struct drm_property_blob *path_blob_ptr;
++
++ struct drm_property_blob *tile_blob_ptr;
++
+ uint8_t polled; /* DRM_CONNECTOR_POLL_* */
+
+ /* requested DPMS state */
+@@ -629,7 +661,9 @@
+ void *helper_private;
+
+ /* forced on connector */
++ struct drm_cmdline_mode cmdline_mode;
+ enum drm_connector_force force;
++ bool override_edid;
+ uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+ struct drm_encoder *encoder; /* currently active encoder */
+
+@@ -642,14 +676,65 @@
+ int audio_latency[2];
+ int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+ unsigned bad_edid_counter;
++
++ struct dentry *debugfs_entry;
++
++ struct drm_connector_state *state;
++
++ /* DisplayID bits */
++ bool has_tile;
++ struct drm_tile_group *tile_group;
++ bool tile_is_single_monitor;
++
++ uint8_t num_h_tile, num_v_tile;
++ uint8_t tile_h_loc, tile_v_loc;
++ uint16_t tile_h_size, tile_v_size;
++};
++
++/**
++ * struct drm_plane_state - mutable plane state
++ * @crtc: currently bound CRTC, NULL if disabled
++ * @fb: currently bound framebuffer
++ * @fence: optional fence to wait for before scanning out @fb
++ * @crtc_x: left position of visible portion of plane on crtc
++ * @crtc_y: upper position of visible portion of plane on crtc
++ * @crtc_w: width of visible portion of plane on crtc
++ * @crtc_h: height of visible portion of plane on crtc
++ * @src_x: left position of visible portion of plane within
++ * plane (in 16.16)
++ * @src_y: upper position of visible portion of plane within
++ * plane (in 16.16)
++ * @src_w: width of visible portion of plane (in 16.16)
++ * @src_h: height of visible portion of plane (in 16.16)
++ * @state: backpointer to global drm_atomic_state
++ */
++struct drm_plane_state {
++ struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
++ struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
++ struct fence *fence;
++
++ /* Signed dest location allows it to be partially off screen */
++ int32_t crtc_x, crtc_y;
++ uint32_t crtc_w, crtc_h;
++
++ /* Source values are 16.16 fixed point */
++ uint32_t src_x, src_y;
++ uint32_t src_h, src_w;
++
++ struct drm_atomic_state *state;
+ };
+
++
+ /**
+- * drm_plane_funcs - driver plane control functions
++ * struct drm_plane_funcs - driver plane control functions
+ * @update_plane: update the plane configuration
+ * @disable_plane: shut down the plane
+ * @destroy: clean up plane resources
++ * @reset: reset plane after state has been invalidated (e.g. resume)
+ * @set_property: called when a property is changed
++ * @atomic_duplicate_state: duplicate the atomic state for this plane
++ * @atomic_destroy_state: destroy an atomic state for this plane
++ * @atomic_set_property: set a property on an atomic state for this plane
+ */
+ struct drm_plane_funcs {
+ int (*update_plane)(struct drm_plane *plane,
+@@ -660,13 +745,29 @@
+ uint32_t src_w, uint32_t src_h);
+ int (*disable_plane)(struct drm_plane *plane);
+ void (*destroy)(struct drm_plane *plane);
++ void (*reset)(struct drm_plane *plane);
+
+ int (*set_property)(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val);
++
++ /* atomic update handling */
++ struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
++ void (*atomic_destroy_state)(struct drm_plane *plane,
++ struct drm_plane_state *state);
++ int (*atomic_set_property)(struct drm_plane *plane,
++ struct drm_plane_state *state,
++ struct drm_property *property,
++ uint64_t val);
++};
++
++enum drm_plane_type {
++ DRM_PLANE_TYPE_OVERLAY,
++ DRM_PLANE_TYPE_PRIMARY,
++ DRM_PLANE_TYPE_CURSOR,
+ };
+
+ /**
+- * drm_plane - central DRM plane control structure
++ * struct drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @base: base mode object
+@@ -675,13 +776,19 @@
+ * @format_count: number of formats supported
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
++ * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
++ * drm_mode_set_config_internal() to implement correct refcounting.
+ * @funcs: helper functions
+ * @properties: property tracking for this plane
++ * @type: type of plane (overlay, primary, cursor)
++ * @state: current atomic state for this plane
+ */
+ struct drm_plane {
+ struct drm_device *dev;
+ struct list_head head;
+
++ struct drm_modeset_lock mutex;
++
+ struct drm_mode_object base;
+
+ uint32_t possible_crtcs;
+@@ -691,13 +798,21 @@
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
++ struct drm_framebuffer *old_fb;
++
+ const struct drm_plane_funcs *funcs;
+
+ struct drm_object_properties properties;
++
++ enum drm_plane_type type;
++
++ void *helper_private;
++
++ struct drm_plane_state *state;
+ };
+
+ /**
+- * drm_bridge_funcs - drm_bridge control functions
++ * struct drm_bridge_funcs - drm_bridge control functions
+ * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
+ * @disable: Called right before encoder prepare, disables the bridge
+ * @post_disable: Called right after encoder prepare, for lockstepped disable
+@@ -721,7 +836,7 @@
+ };
+
+ /**
+- * drm_bridge - central DRM bridge control structure
++ * struct drm_bridge - central DRM bridge control structure
+ * @dev: DRM device this bridge belongs to
+ * @head: list management
+ * @base: base mode object
+@@ -739,8 +854,35 @@
+ };
+
+ /**
+- * drm_mode_set - new values for a CRTC config change
+- * @head: list management
++ * struct struct drm_atomic_state - the global state object for atomic updates
++ * @dev: parent DRM device
++ * @flags: state flags like async update
++ * @planes: pointer to array of plane pointers
++ * @plane_states: pointer to array of plane states pointers
++ * @crtcs: pointer to array of CRTC pointers
++ * @crtc_states: pointer to array of CRTC states pointers
++ * @num_connector: size of the @connectors and @connector_states arrays
++ * @connectors: pointer to array of connector pointers
++ * @connector_states: pointer to array of connector states pointers
++ * @acquire_ctx: acquire context for this atomic modeset state update
++ */
++struct drm_atomic_state {
++ struct drm_device *dev;
++ uint32_t flags;
++ struct drm_plane **planes;
++ struct drm_plane_state **plane_states;
++ struct drm_crtc **crtcs;
++ struct drm_crtc_state **crtc_states;
++ int num_connector;
++ struct drm_connector **connectors;
++ struct drm_connector_state **connector_states;
++
++ struct drm_modeset_acquire_ctx *acquire_ctx;
++};
++
++
++/**
++ * struct drm_mode_set - new values for a CRTC config change
+ * @fb: framebuffer to use for new config
+ * @crtc: CRTC whose configuration we're about to change
+ * @mode: mode timings to use
+@@ -770,6 +912,9 @@
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ * @fb_create: create a new framebuffer object
+ * @output_poll_changed: function to handle output configuration changes
++ * @atomic_check: check whether a give atomic state update is possible
++ * @atomic_commit: commit an atomic state update previously verified with
++ * atomic_check()
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
+@@ -779,13 +924,20 @@
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+ void (*output_poll_changed)(struct drm_device *dev);
++
++ int (*atomic_check)(struct drm_device *dev,
++ struct drm_atomic_state *a);
++ int (*atomic_commit)(struct drm_device *dev,
++ struct drm_atomic_state *a,
++ bool async);
+ };
+
+ /**
+- * drm_mode_group - group of mode setting resources for potential sub-grouping
++ * struct drm_mode_group - group of mode setting resources for potential sub-grouping
+ * @num_crtcs: CRTC count
+ * @num_encoders: encoder count
+ * @num_connectors: connector count
++ * @num_bridges: bridge count
+ * @id_list: list of KMS object IDs in this group
+ *
+ * Currently this simply tracks the global mode setting state. But in the
+@@ -805,10 +957,14 @@
+ };
+
+ /**
+- * drm_mode_config - Mode configuration control structure
++ * struct drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
++ * @connection_mutex: ww mutex protecting connector state and routing
++ * @acquire_ctx: global implicit acquire context used by atomic drivers for
++ * legacy ioctls
+ * @idr_mutex: mutex for KMS ID allocation and management
+ * @crtc_idr: main KMS ID tracking object
++ * @fb_lock: mutex to protect fb state and lists
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_connector: number of connectors on this device
+@@ -817,17 +973,28 @@
+ * @bridge_list: list of bridge objects
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
++ * @num_overlay_plane: number of overlay planes on this device
++ * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
++ * @plane_list: list of plane objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
++ * @property_list: list of property objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+- * @poll_enabled: track polling status for this device
++ * @poll_enabled: track polling support for this device
++ * @poll_running: track polling status for this device
+ * @output_poll_work: delayed work for polling in process context
++ * @property_blob_list: list of all the blob property objects
+ * @*_property: core property tracking
++ * @preferred_depth: preferred RBG pixel depth, used by fb helpers
++ * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
++ * @async_page_flip: does this device support async flips on the primary plane?
++ * @cursor_width: hint to userspace for max cursor width
++ * @cursor_height: hint to userspace for max cursor height
+ *
+ * Core mode resource tracking structure. All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties. Some
+@@ -835,18 +1002,14 @@
+ */
+ struct drm_mode_config {
+ struct mutex mutex; /* protects configuration (mode lists etc.) */
++ struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
++ struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
+ struct mutex idr_mutex; /* for IDR management */
+ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
++ struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+ /* this is limited to one for now */
+
+-
+- /**
+- * fb_lock - mutex to protect fb state
+- *
+- * Besides the global fb list his also protects the fbs list in the
+- * file_priv
+- */
+- struct mutex fb_lock;
++ struct mutex fb_lock; /* proctects global and per-file fb lists */
+ int num_fb;
+ struct list_head fb_list;
+
+@@ -856,7 +1019,15 @@
+ struct list_head bridge_list;
+ int num_encoder;
+ struct list_head encoder_list;
+- int num_plane;
++
++ /*
++ * Track # of overlay planes separately from # of total planes. By
++ * default we only advertise overlay planes to userspace; if userspace
++ * sets the "universal plane" capability bit, we'll go ahead and
++ * expose all planes.
++ */
++ int num_overlay_plane;
++ int num_total_plane;
+ struct list_head plane_list;
+
+ int num_crtc;
+@@ -878,6 +1049,10 @@
+ struct list_head property_blob_list;
+ struct drm_property *edid_property;
+ struct drm_property *dpms_property;
++ struct drm_property *path_property;
++ struct drm_property *tile_property;
++ struct drm_property *plane_type_property;
++ struct drm_property *rotation_property;
+
+ /* DVI-I properties */
+ struct drm_property *dvi_i_subconnector_property;
+@@ -900,8 +1075,13 @@
+
+ /* Optional properties */
+ struct drm_property *scaling_mode_property;
++ struct drm_property *aspect_ratio_property;
+ struct drm_property *dirty_info_property;
+
++ /* properties for virtual machine layout */
++ struct drm_property *suggested_x_property;
++ struct drm_property *suggested_y_property;
++
+ /* dumb ioctl parameters */
+ uint32_t preferred_depth, prefer_shadow;
+
+@@ -912,6 +1092,19 @@
+ uint32_t cursor_width, cursor_height;
+ };
+
++/**
++ * drm_for_each_plane_mask - iterate over planes specified by bitmask
++ * @plane: the loop cursor
++ * @dev: the DRM device
++ * @plane_mask: bitmask of plane indices
++ *
++ * Iterate over all planes specified by bitmask.
++ */
++#define drm_for_each_plane_mask(plane, dev, plane_mask) \
++ list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
++ if ((plane_mask) & (1 << drm_plane_index(plane)))
++
++
+ #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
+ #define obj_to_connector(x) container_of(x, struct drm_connector, base)
+ #define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
+@@ -926,13 +1119,11 @@
+ char *name;
+ };
+
+-extern void drm_modeset_lock_all(struct drm_device *dev);
+-extern void drm_modeset_unlock_all(struct drm_device *dev);
+-extern void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
+-
+-extern int drm_crtc_init(struct drm_device *dev,
+- struct drm_crtc *crtc,
+- const struct drm_crtc_funcs *funcs);
++extern int drm_crtc_init_with_planes(struct drm_device *dev,
++ struct drm_crtc *crtc,
++ struct drm_plane *primary,
++ struct drm_plane *cursor,
++ const struct drm_crtc_funcs *funcs);
+ extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+ extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
+
+@@ -954,8 +1145,11 @@
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type);
++int drm_connector_register(struct drm_connector *connector);
++void drm_connector_unregister(struct drm_connector *connector);
+
+ extern void drm_connector_cleanup(struct drm_connector *connector);
++extern unsigned int drm_connector_index(struct drm_connector *connector);
+ /* helper to unplug all connectors from sysfs for device */
+ extern void drm_connector_unplug_all(struct drm_device *dev);
+
+@@ -981,19 +1175,31 @@
+ return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
+ }
+
++extern int drm_universal_plane_init(struct drm_device *dev,
++ struct drm_plane *plane,
++ unsigned long possible_crtcs,
++ const struct drm_plane_funcs *funcs,
++ const uint32_t *formats,
++ uint32_t format_count,
++ enum drm_plane_type type);
+ extern int drm_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+- bool priv);
++ bool is_primary);
+ extern void drm_plane_cleanup(struct drm_plane *plane);
++extern unsigned int drm_plane_index(struct drm_plane *plane);
+ extern void drm_plane_force_disable(struct drm_plane *plane);
++extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
++ int x, int y,
++ const struct drm_display_mode *mode,
++ const struct drm_framebuffer *fb);
+
+ extern void drm_encoder_cleanup(struct drm_encoder *encoder);
+
+-extern const char *drm_get_connector_name(const struct drm_connector *connector);
+ extern const char *drm_get_connector_status_name(enum drm_connector_status status);
++extern const char *drm_get_subpixel_order_name(enum subpixel_order order);
+ extern const char *drm_get_dpms_name(int val);
+ extern const char *drm_get_dvi_i_subconnector_name(int val);
+ extern const char *drm_get_dvi_i_select_name(int val);
+@@ -1001,41 +1207,39 @@
+ extern const char *drm_get_tv_select_name(int val);
+ extern void drm_fb_release(struct drm_file *file_priv);
+ extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
++extern void drm_mode_group_destroy(struct drm_mode_group *group);
++extern void drm_reinit_primary_mode_group(struct drm_device *dev);
+ extern bool drm_probe_ddc(struct i2c_adapter *adapter);
+ extern struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+ extern struct edid *drm_edid_duplicate(const struct edid *edid);
+ extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+-extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
+-extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
+-extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+- const struct drm_display_mode *mode);
+-extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
+ extern void drm_mode_config_init(struct drm_device *dev);
+ extern void drm_mode_config_reset(struct drm_device *dev);
+ extern void drm_mode_config_cleanup(struct drm_device *dev);
+-extern void drm_mode_set_name(struct drm_display_mode *mode);
+-extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+-extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+-extern int drm_mode_width(const struct drm_display_mode *mode);
+-extern int drm_mode_height(const struct drm_display_mode *mode);
+-
+-/* for us by fb module */
+-extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
+-extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
+-extern void drm_mode_validate_size(struct drm_device *dev,
+- struct list_head *mode_list,
+- int maxX, int maxY, int maxPitch);
+-extern void drm_mode_prune_invalid(struct drm_device *dev,
+- struct list_head *mode_list, bool verbose);
+-extern void drm_mode_sort(struct list_head *mode_list);
+-extern int drm_mode_hsync(const struct drm_display_mode *mode);
+-extern int drm_mode_vrefresh(const struct drm_display_mode *mode);
+-extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
+- int adjust_flags);
+-extern void drm_mode_connector_list_update(struct drm_connector *connector);
++
++extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
++ const char *path);
++int drm_mode_connector_set_tile_property(struct drm_connector *connector);
+ extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+- struct edid *edid);
++ const struct edid *edid);
++
++static inline bool drm_property_type_is(struct drm_property *property,
++ uint32_t type)
++{
++ /* instanceof for props.. handles extended type vs original types: */
++ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
++ return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
++ return property->flags & type;
++}
++
++static inline bool drm_property_type_valid(struct drm_property *property)
++{
++ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
++ return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
++ return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
++}
++
+ extern int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t val);
+@@ -1065,28 +1269,35 @@
+ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+- int num_values);
++ int num_props,
++ uint64_t supported_bits);
+ struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max);
++struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
++ int flags, const char *name,
++ int64_t min, int64_t max);
++struct drm_property *drm_property_create_object(struct drm_device *dev,
++ int flags, const char *name, uint32_t type);
+ extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+ extern int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name);
+ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+-extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
+- char *formats[]);
++extern int drm_mode_create_tv_properties(struct drm_device *dev,
++ unsigned int num_modes,
++ char *modes[]);
+ extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
++extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+ extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
+-extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
++extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
+
+ extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+-extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+- struct drm_encoder *encoder);
+ extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size);
+ extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type);
++
+ /* IOCTLs */
+ extern int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+@@ -1132,21 +1343,12 @@
+ extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+ extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
++extern enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
+ extern bool drm_detect_hdmi_monitor(struct edid *edid);
+ extern bool drm_detect_monitor_audio(struct edid *edid);
+ extern bool drm_rgb_quant_range_selectable(struct edid *edid);
+ extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+-extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
+- int hdisplay, int vdisplay, int vrefresh,
+- bool reduced, bool interlaced, bool margins);
+-extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
+- int hdisplay, int vdisplay, int vrefresh,
+- bool interlaced, int margins);
+-extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
+- int hdisplay, int vdisplay, int vrefresh,
+- bool interlaced, int margins, int GTF_M,
+- int GTF_2C, int GTF_K, int GTF_2J);
+ extern int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay);
+ extern void drm_set_preferred_mode(struct drm_connector *connector,
+@@ -1155,6 +1357,13 @@
+ extern int drm_edid_header_is_valid(const u8 *raw_edid);
+ extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
+ extern bool drm_edid_is_valid(struct edid *edid);
++
++extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
++ char topology[8]);
++extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
++ char topology[8]);
++extern void drm_mode_put_tile_group(struct drm_device *dev,
++ struct drm_tile_group *tg);
+ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+ bool rb);
+@@ -1169,6 +1378,9 @@
+ struct drm_file *file_priv);
+ extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
++extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
++ struct drm_property *property,
++ uint64_t value);
+
+ extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp);
+@@ -1177,8 +1389,21 @@
+ extern int drm_format_horz_chroma_subsampling(uint32_t format);
+ extern int drm_format_vert_chroma_subsampling(uint32_t format);
+ extern const char *drm_get_format_name(uint32_t format);
++extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
++ unsigned int supported_rotations);
++extern unsigned int drm_rotation_simplify(unsigned int rotation,
++ unsigned int supported_rotations);
+
+ /* Helpers */
++
++static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
++ uint32_t id)
++{
++ struct drm_mode_object *mo;
++ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
++ return mo ? obj_to_plane(mo) : NULL;
++}
++
+ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
+ uint32_t id)
+ {
+@@ -1195,4 +1420,33 @@
+ return mo ? obj_to_encoder(mo) : NULL;
+ }
+
++static inline struct drm_connector *drm_connector_find(struct drm_device *dev,
++ uint32_t id)
++{
++ struct drm_mode_object *mo;
++ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
++ return mo ? obj_to_connector(mo) : NULL;
++}
++
++static inline struct drm_property *drm_property_find(struct drm_device *dev,
++ uint32_t id)
++{
++ struct drm_mode_object *mo;
++ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
++ return mo ? obj_to_property(mo) : NULL;
++}
++
++static inline struct drm_property_blob *
++drm_property_blob_find(struct drm_device *dev, uint32_t id)
++{
++ struct drm_mode_object *mo;
++ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
++ return mo ? obj_to_blob(mo) : NULL;
++}
++
++/* Plane list iterator for legacy (overlay only) planes. */
++#define drm_for_each_legacy_plane(plane, planelist) \
++ list_for_each_entry(plane, planelist, head) \
++ if (plane->type == DRM_PLANE_TYPE_OVERLAY)
++
+ #endif /* __DRM_CRTC_H__ */
+diff -Naur a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
+--- a/include/drm/drm_crtc_helper.h 2015-03-26 14:43:27.886436386 +0530
++++ b/include/drm/drm_crtc_helper.h 2015-03-26 14:42:38.750435422 +0530
+@@ -68,6 +68,7 @@
+ int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
++ void (*mode_set_nofb)(struct drm_crtc *crtc);
+
+ /* Move the crtc on the current fb to the given position *optional* */
+ int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
+@@ -81,6 +82,12 @@
+
+ /* disable crtc when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_crtc *crtc);
++
++ /* atomic helpers */
++ int (*atomic_check)(struct drm_crtc *crtc,
++ struct drm_crtc_state *state);
++ void (*atomic_begin)(struct drm_crtc *crtc);
++ void (*atomic_flush)(struct drm_crtc *crtc);
+ };
+
+ /**
+@@ -114,7 +121,7 @@
+ /**
+ * drm_connector_helper_funcs - helper operations for connectors
+ * @get_modes: get mode list for this connector
+- * @mode_valid: is this mode valid on the given connector?
++ * @mode_valid (optional): is this mode valid on the given connector?
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+@@ -125,7 +132,6 @@
+ struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+ };
+
+-extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
+ extern void drm_helper_disable_unused_functions(struct drm_device *dev);
+ extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
+ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+@@ -139,8 +145,8 @@
+
+ extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
+-extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+- struct drm_mode_fb_cmd2 *mode_cmd);
++extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
++ struct drm_mode_fb_cmd2 *mode_cmd);
+
+ static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
+ const struct drm_crtc_helper_funcs *funcs)
+@@ -160,7 +166,22 @@
+ connector->helper_private = (void *)funcs;
+ }
+
+-extern int drm_helper_resume_force_mode(struct drm_device *dev);
++extern void drm_helper_resume_force_mode(struct drm_device *dev);
++
++int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode, int x, int y,
++ struct drm_framebuffer *old_fb);
++int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
++ struct drm_framebuffer *old_fb);
++
++/* drm_probe_helper.c */
++extern int drm_helper_probe_single_connector_modes(struct drm_connector
++ *connector, uint32_t maxX,
++ uint32_t maxY);
++extern int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector
++ *connector,
++ uint32_t maxX,
++ uint32_t maxY);
+ extern void drm_kms_helper_poll_init(struct drm_device *dev);
+ extern void drm_kms_helper_poll_fini(struct drm_device *dev);
+ extern bool drm_helper_hpd_irq_event(struct drm_device *dev);
+diff -Naur a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
+--- a/include/drm/drm_displayid.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/drm/drm_displayid.h 2015-03-26 14:42:38.750435422 +0530
+@@ -0,0 +1,76 @@
++/*
++ * Copyright © 2014 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef DRM_DISPLAYID_H
++#define DRM_DISPLAYID_H
++
++#define DATA_BLOCK_PRODUCT_ID 0x00
++#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
++#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
++#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
++#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
++#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
++#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
++#define DATA_BLOCK_VESA_TIMING 0x07
++#define DATA_BLOCK_CEA_TIMING 0x08
++#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
++#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
++#define DATA_BLOCK_GP_ASCII_STRING 0x0b
++#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
++#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
++#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
++#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
++#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
++#define DATA_BLOCK_TILED_DISPLAY 0x12
++
++#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
++
++#define PRODUCT_TYPE_EXTENSION 0
++#define PRODUCT_TYPE_TEST 1
++#define PRODUCT_TYPE_PANEL 2
++#define PRODUCT_TYPE_MONITOR 3
++#define PRODUCT_TYPE_TV 4
++#define PRODUCT_TYPE_REPEATER 5
++#define PRODUCT_TYPE_DIRECT_DRIVE 6
++
++struct displayid_hdr {
++ u8 rev;
++ u8 bytes;
++ u8 prod_id;
++ u8 ext_count;
++} __packed;
++
++struct displayid_block {
++ u8 tag;
++ u8 rev;
++ u8 num_bytes;
++} __packed;
++
++struct displayid_tiled_block {
++ struct displayid_block base;
++ u8 tile_cap;
++ u8 topo[3];
++ u8 tile_size[4];
++ u8 tile_pixel_bezel[5];
++ u8 topology_id[8];
++} __packed;
++
++#endif
+diff -Naur a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
+--- a/include/drm/drm_dp_helper.h 2015-03-26 14:43:27.886436386 +0530
++++ b/include/drm/drm_dp_helper.h 2015-03-26 14:42:38.750435422 +0530
+@@ -37,6 +37,7 @@
+ * eDP: Embedded DisplayPort version 1
+ * DPI: DisplayPort Interoperability Guideline v1.1a
+ * 1.2: DisplayPort 1.2
++ * MST: Multistream Transport - part of DP 1.2a
+ *
+ * 1.2 formally includes both eDP and DPI definitions.
+ */
+@@ -103,9 +104,14 @@
+ #define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
+
+ /* Multiple stream transport */
++#define DP_FAUX_CAP 0x020 /* 1.2 */
++# define DP_FAUX_CAP_1 (1 << 0)
++
+ #define DP_MSTM_CAP 0x021 /* 1.2 */
+ # define DP_MST_CAP (1 << 0)
+
++#define DP_GUID 0x030 /* 1.2 */
++
+ #define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
+ # define DP_PSR_IS_SUPPORTED 1
+ #define DP_PSR_CAPS 0x071 /* XXX 1.2? */
+@@ -184,16 +190,16 @@
+ # define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
+ # define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
+ # define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
+-# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
+-# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
+-# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
+-# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
++# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0)
++# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0)
++# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0)
++# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0)
+
+ # define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
+-# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
+-# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
+-# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
+-# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
++# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3)
++# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3)
++# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3)
++# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3)
+
+ # define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
+ # define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+@@ -221,6 +227,16 @@
+ # define DP_PSR_CRC_VERIFICATION (1 << 2)
+ # define DP_PSR_FRAME_CAPTURE (1 << 3)
+
++#define DP_ADAPTER_CTRL 0x1a0
++# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
++
++#define DP_BRANCH_DEVICE_CTRL 0x1a1
++# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0)
++
++#define DP_PAYLOAD_ALLOCATE_SET 0x1c0
++#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
++#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
++
+ #define DP_SINK_COUNT 0x200
+ /* prior to 1.2 bit 7 was reserved mbz */
+ # define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
+@@ -230,6 +246,9 @@
+ # define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
+ # define DP_AUTOMATED_TEST_REQUEST (1 << 1)
+ # define DP_CP_IRQ (1 << 2)
++# define DP_MCCS_IRQ (1 << 3)
++# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */
++# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */
+ # define DP_SINK_SPECIFIC_IRQ (1 << 6)
+
+ #define DP_LANE0_1_STATUS 0x202
+@@ -279,11 +298,31 @@
+
+ #define DP_TEST_PATTERN 0x221
+
++#define DP_TEST_CRC_R_CR 0x240
++#define DP_TEST_CRC_G_Y 0x242
++#define DP_TEST_CRC_B_CB 0x244
++
++#define DP_TEST_SINK_MISC 0x246
++# define DP_TEST_CRC_SUPPORTED (1 << 5)
++# define DP_TEST_COUNT_MASK 0x7
++
+ #define DP_TEST_RESPONSE 0x260
+ # define DP_TEST_ACK (1 << 0)
+ # define DP_TEST_NAK (1 << 1)
+ # define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
+
++#define DP_TEST_EDID_CHECKSUM 0x261
++
++#define DP_TEST_SINK 0x270
++# define DP_TEST_SINK_START (1 << 0)
++
++#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
++# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
++# define DP_PAYLOAD_ACT_HANDLED (1 << 1)
++
++#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */
++/* up to ID_SLOT_63 at 0x2ff */
++
+ #define DP_SOURCE_OUI 0x300
+ #define DP_SINK_OUI 0x400
+ #define DP_BRANCH_OUI 0x500
+@@ -291,6 +330,22 @@
+ #define DP_SET_POWER 0x600
+ # define DP_SET_POWER_D0 0x1
+ # define DP_SET_POWER_D3 0x2
++# define DP_SET_POWER_MASK 0x3
++
++#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
++#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
++#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
++#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */
++
++#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */
++/* 0-5 sink count */
++# define DP_SINK_COUNT_CP_READY (1 << 6)
++
++#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */
++
++#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */
++
++#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */
+
+ #define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
+ # define DP_PSR_LINK_CRC_ERROR (1 << 0)
+@@ -308,31 +363,48 @@
+ # define DP_PSR_SINK_INTERNAL_ERROR 7
+ # define DP_PSR_SINK_STATE_MASK 0x07
+
++/* DP 1.2 Sideband message defines */
++/* peer device type - DP 1.2a Table 2-92 */
++#define DP_PEER_DEVICE_NONE 0x0
++#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1
++#define DP_PEER_DEVICE_MST_BRANCHING 0x2
++#define DP_PEER_DEVICE_SST_SINK 0x3
++#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4
++
++/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */
++#define DP_LINK_ADDRESS 0x01
++#define DP_CONNECTION_STATUS_NOTIFY 0x02
++#define DP_ENUM_PATH_RESOURCES 0x10
++#define DP_ALLOCATE_PAYLOAD 0x11
++#define DP_QUERY_PAYLOAD 0x12
++#define DP_RESOURCE_STATUS_NOTIFY 0x13
++#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14
++#define DP_REMOTE_DPCD_READ 0x20
++#define DP_REMOTE_DPCD_WRITE 0x21
++#define DP_REMOTE_I2C_READ 0x22
++#define DP_REMOTE_I2C_WRITE 0x23
++#define DP_POWER_UP_PHY 0x24
++#define DP_POWER_DOWN_PHY 0x25
++#define DP_SINK_EVENT_NOTIFY 0x30
++#define DP_QUERY_STREAM_ENC_STATUS 0x38
++
++/* DP 1.2 MST sideband nak reasons - table 2.84 */
++#define DP_NAK_WRITE_FAILURE 0x01
++#define DP_NAK_INVALID_READ 0x02
++#define DP_NAK_CRC_FAILURE 0x03
++#define DP_NAK_BAD_PARAM 0x04
++#define DP_NAK_DEFER 0x05
++#define DP_NAK_LINK_FAILURE 0x06
++#define DP_NAK_NO_RESOURCES 0x07
++#define DP_NAK_DPCD_FAIL 0x08
++#define DP_NAK_I2C_NAK 0x09
++#define DP_NAK_ALLOCATE_FAIL 0x0a
++
+ #define MODE_I2C_START 1
+ #define MODE_I2C_WRITE 2
+ #define MODE_I2C_READ 4
+ #define MODE_I2C_STOP 8
+
+-/**
+- * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
+- * aux algorithm
+- * @running: set by the algo indicating whether an i2c is ongoing or whether
+- * the i2c bus is quiescent
+- * @address: i2c target address for the currently ongoing transfer
+- * @aux_ch: driver callback to transfer a single byte of the i2c payload
+- */
+-struct i2c_algo_dp_aux_data {
+- bool running;
+- u16 address;
+- int (*aux_ch) (struct i2c_adapter *adapter,
+- int mode, uint8_t write_byte,
+- uint8_t *read_byte);
+-};
+-
+-int
+-i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
+-
+-
+ #define DP_LINK_STATUS_SIZE 6
+ bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+@@ -398,4 +470,125 @@
+ (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
+ }
+
++/*
++ * DisplayPort AUX channel
++ */
++
++/**
++ * struct drm_dp_aux_msg - DisplayPort AUX channel transaction
++ * @address: address of the (first) register to access
++ * @request: contains the type of transaction (see DP_AUX_* macros)
++ * @reply: upon completion, contains the reply type of the transaction
++ * @buffer: pointer to a transmission or reception buffer
++ * @size: size of @buffer
++ */
++struct drm_dp_aux_msg {
++ unsigned int address;
++ u8 request;
++ u8 reply;
++ void *buffer;
++ size_t size;
++};
++
++/**
++ * struct drm_dp_aux - DisplayPort AUX channel
++ * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
++ * @ddc: I2C adapter that can be used for I2C-over-AUX communication
++ * @dev: pointer to struct device that is the parent for this AUX channel
++ * @hw_mutex: internal mutex used for locking transfers
++ * @transfer: transfers a message representing a single AUX transaction
++ *
++ * The .dev field should be set to a pointer to the device that implements
++ * the AUX channel.
++ *
++ * The .name field may be used to specify the name of the I2C adapter. If set to
++ * NULL, dev_name() of .dev will be used.
++ *
++ * Drivers provide a hardware-specific implementation of how transactions
++ * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg
++ * structure describing the transaction is passed into this function. Upon
++ * success, the implementation should return the number of payload bytes
++ * that were transferred, or a negative error-code on failure. Helpers
++ * propagate errors from the .transfer() function, with the exception of
++ * the -EBUSY error, which causes a transaction to be retried. On a short,
++ * helpers will return -EPROTO to make it simpler to check for failure.
++ *
++ * An AUX channel can also be used to transport I2C messages to a sink. A
++ * typical application of that is to access an EDID that's present in the
++ * sink device. The .transfer() function can also be used to execute such
++ * transactions. The drm_dp_aux_register_i2c_bus() function registers an
++ * I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
++ * should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter.
++ *
++ * Note that the aux helper code assumes that the .transfer() function
++ * only modifies the reply field of the drm_dp_aux_msg structure. The
++ * retry logic and i2c helpers assume this is the case.
++ */
++struct drm_dp_aux {
++ const char *name;
++ struct i2c_adapter ddc;
++ struct device *dev;
++ struct mutex hw_mutex;
++ ssize_t (*transfer)(struct drm_dp_aux *aux,
++ struct drm_dp_aux_msg *msg);
++ unsigned i2c_nack_count, i2c_defer_count;
++};
++
++ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
++ void *buffer, size_t size);
++ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
++ void *buffer, size_t size);
++
++/**
++ * drm_dp_dpcd_readb() - read a single byte from the DPCD
++ * @aux: DisplayPort AUX channel
++ * @offset: address of the register to read
++ * @valuep: location where the value of the register will be stored
++ *
++ * Returns the number of bytes transferred (1) on success, or a negative
++ * error code on failure.
++ */
++static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
++ unsigned int offset, u8 *valuep)
++{
++ return drm_dp_dpcd_read(aux, offset, valuep, 1);
++}
++
++/**
++ * drm_dp_dpcd_writeb() - write a single byte to the DPCD
++ * @aux: DisplayPort AUX channel
++ * @offset: address of the register to write
++ * @value: value to write to the register
++ *
++ * Returns the number of bytes transferred (1) on success, or a negative
++ * error code on failure.
++ */
++static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
++ unsigned int offset, u8 value)
++{
++ return drm_dp_dpcd_write(aux, offset, &value, 1);
++}
++
++int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
++ u8 status[DP_LINK_STATUS_SIZE]);
++
++/*
++ * DisplayPort link
++ */
++#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
++
++struct drm_dp_link {
++ unsigned char revision;
++ unsigned int rate;
++ unsigned int num_lanes;
++ unsigned long capabilities;
++};
++
++int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
++int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
++int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
++
++int drm_dp_aux_register(struct drm_dp_aux *aux);
++void drm_dp_aux_unregister(struct drm_dp_aux *aux);
++
+ #endif /* _DRM_DP_HELPER_H_ */
+diff -Naur a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
+--- a/include/drm/drm_dp_mst_helper.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/drm/drm_dp_mst_helper.h 2015-03-26 14:42:38.750435422 +0530
+@@ -0,0 +1,513 @@
++/*
++ * Copyright © 2014 Red Hat.
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that copyright
++ * notice and this permission notice appear in supporting documentation, and
++ * that the name of the copyright holders not be used in advertising or
++ * publicity pertaining to distribution of the software without specific,
++ * written prior permission. The copyright holders make no representations
++ * about the suitability of this software for any purpose. It is provided "as
++ * is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
++ * OF THIS SOFTWARE.
++ */
++#ifndef _DRM_DP_MST_HELPER_H_
++#define _DRM_DP_MST_HELPER_H_
++
++#include <linux/types.h>
++#include <drm/drm_dp_helper.h>
++
++struct drm_dp_mst_branch;
++
++/**
++ * struct drm_dp_vcpi - Virtual Channel Payload Identifier
++ * @vcpi: Virtual channel ID.
++ * @pbn: Payload Bandwidth Number for this channel
++ * @aligned_pbn: PBN aligned with slot size
++ * @num_slots: number of slots for this PBN
++ */
++struct drm_dp_vcpi {
++ int vcpi;
++ int pbn;
++ int aligned_pbn;
++ int num_slots;
++};
++
++/**
++ * struct drm_dp_mst_port - MST port
++ * @kref: reference count for this port.
++ * @guid_valid: for DP 1.2 devices if we have validated the GUID.
++ * @guid: guid for DP 1.2 device on this port.
++ * @port_num: port number
++ * @input: if this port is an input port.
++ * @mcs: message capability status - DP 1.2 spec.
++ * @ddps: DisplayPort Device Plug Status - DP 1.2
++ * @pdt: Peer Device Type
++ * @ldps: Legacy Device Plug Status
++ * @dpcd_rev: DPCD revision of device on this port
++ * @num_sdp_streams: Number of simultaneous streams
++ * @num_sdp_stream_sinks: Number of stream sinks
++ * @available_pbn: Available bandwidth for this port.
++ * @next: link to next port on this branch device
++ * @mstb: branch device attach below this port
++ * @aux: i2c aux transport to talk to device connected to this port.
++ * @parent: branch device parent of this port
++ * @vcpi: Virtual Channel Payload info for this port.
++ * @connector: DRM connector this port is connected to.
++ * @mgr: topology manager this port lives under.
++ *
++ * This structure represents an MST port endpoint on a device somewhere
++ * in the MST topology.
++ */
++struct drm_dp_mst_port {
++ struct kref kref;
++
++ /* if dpcd 1.2 device is on this port - its GUID info */
++ bool guid_valid;
++ u8 guid[16];
++
++ u8 port_num;
++ bool input;
++ bool mcs;
++ bool ddps;
++ u8 pdt;
++ bool ldps;
++ u8 dpcd_rev;
++ u8 num_sdp_streams;
++ u8 num_sdp_stream_sinks;
++ uint16_t available_pbn;
++ struct list_head next;
++ struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */
++ struct drm_dp_aux aux; /* i2c bus for this port? */
++ struct drm_dp_mst_branch *parent;
++
++ struct drm_dp_vcpi vcpi;
++ struct drm_connector *connector;
++ struct drm_dp_mst_topology_mgr *mgr;
++
++ struct edid *cached_edid; /* for DP logical ports - make tiling work */
++};
++
++/**
++ * struct drm_dp_mst_branch - MST branch device.
++ * @kref: reference count for this port.
++ * @rad: Relative Address to talk to this branch device.
++ * @lct: Link count total to talk to this branch device.
++ * @num_ports: number of ports on the branch.
++ * @msg_slots: one bit per transmitted msg slot.
++ * @ports: linked list of ports on this branch.
++ * @port_parent: pointer to the port parent, NULL if toplevel.
++ * @mgr: topology manager for this branch device.
++ * @tx_slots: transmission slots for this device.
++ * @last_seqno: last sequence number used to talk to this.
++ * @link_address_sent: if a link address message has been sent to this device yet.
++ *
++ * This structure represents an MST branch device, there is one
++ * primary branch device at the root, along with any others connected
++ * to downstream ports
++ */
++struct drm_dp_mst_branch {
++ struct kref kref;
++ u8 rad[8];
++ u8 lct;
++ int num_ports;
++
++ int msg_slots;
++ struct list_head ports;
++
++ /* list of tx ops queue for this port */
++ struct drm_dp_mst_port *port_parent;
++ struct drm_dp_mst_topology_mgr *mgr;
++
++ /* slots are protected by mstb->mgr->qlock */
++ struct drm_dp_sideband_msg_tx *tx_slots[2];
++ int last_seqno;
++ bool link_address_sent;
++};
++
++
++/* sideband msg header - not bit struct */
++struct drm_dp_sideband_msg_hdr {
++ u8 lct;
++ u8 lcr;
++ u8 rad[8];
++ bool broadcast;
++ bool path_msg;
++ u8 msg_len;
++ bool somt;
++ bool eomt;
++ bool seqno;
++};
++
++struct drm_dp_nak_reply {
++ u8 guid[16];
++ u8 reason;
++ u8 nak_data;
++};
++
++struct drm_dp_link_address_ack_reply {
++ u8 guid[16];
++ u8 nports;
++ struct drm_dp_link_addr_reply_port {
++ bool input_port;
++ u8 peer_device_type;
++ u8 port_number;
++ bool mcs;
++ bool ddps;
++ bool legacy_device_plug_status;
++ u8 dpcd_revision;
++ u8 peer_guid[16];
++ u8 num_sdp_streams;
++ u8 num_sdp_stream_sinks;
++ } ports[16];
++};
++
++struct drm_dp_remote_dpcd_read_ack_reply {
++ u8 port_number;
++ u8 num_bytes;
++ u8 bytes[255];
++};
++
++struct drm_dp_remote_dpcd_write_ack_reply {
++ u8 port_number;
++};
++
++struct drm_dp_remote_dpcd_write_nak_reply {
++ u8 port_number;
++ u8 reason;
++ u8 bytes_written_before_failure;
++};
++
++struct drm_dp_remote_i2c_read_ack_reply {
++ u8 port_number;
++ u8 num_bytes;
++ u8 bytes[255];
++};
++
++struct drm_dp_remote_i2c_read_nak_reply {
++ u8 port_number;
++ u8 nak_reason;
++ u8 i2c_nak_transaction;
++};
++
++struct drm_dp_remote_i2c_write_ack_reply {
++ u8 port_number;
++};
++
++
++struct drm_dp_sideband_msg_rx {
++ u8 chunk[48];
++ u8 msg[256];
++ u8 curchunk_len;
++ u8 curchunk_idx; /* chunk we are parsing now */
++ u8 curchunk_hdrlen;
++ u8 curlen; /* total length of the msg */
++ bool have_somt;
++ bool have_eomt;
++ struct drm_dp_sideband_msg_hdr initial_hdr;
++};
++
++
++struct drm_dp_allocate_payload {
++ u8 port_number;
++ u8 number_sdp_streams;
++ u8 vcpi;
++ u16 pbn;
++ u8 sdp_stream_sink[8];
++};
++
++struct drm_dp_allocate_payload_ack_reply {
++ u8 port_number;
++ u8 vcpi;
++ u16 allocated_pbn;
++};
++
++struct drm_dp_connection_status_notify {
++ u8 guid[16];
++ u8 port_number;
++ bool legacy_device_plug_status;
++ bool displayport_device_plug_status;
++ bool message_capability_status;
++ bool input_port;
++ u8 peer_device_type;
++};
++
++struct drm_dp_remote_dpcd_read {
++ u8 port_number;
++ u32 dpcd_address;
++ u8 num_bytes;
++};
++
++struct drm_dp_remote_dpcd_write {
++ u8 port_number;
++ u32 dpcd_address;
++ u8 num_bytes;
++ u8 *bytes;
++};
++
++struct drm_dp_remote_i2c_read {
++ u8 num_transactions;
++ u8 port_number;
++ struct {
++ u8 i2c_dev_id;
++ u8 num_bytes;
++ u8 *bytes;
++ u8 no_stop_bit;
++ u8 i2c_transaction_delay;
++ } transactions[4];
++ u8 read_i2c_device_id;
++ u8 num_bytes_read;
++};
++
++struct drm_dp_remote_i2c_write {
++ u8 port_number;
++ u8 write_i2c_device_id;
++ u8 num_bytes;
++ u8 *bytes;
++};
++
++/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
++struct drm_dp_port_number_req {
++ u8 port_number;
++};
++
++struct drm_dp_enum_path_resources_ack_reply {
++ u8 port_number;
++ u16 full_payload_bw_number;
++ u16 avail_payload_bw_number;
++};
++
++/* covers POWER_DOWN_PHY, POWER_UP_PHY */
++struct drm_dp_port_number_rep {
++ u8 port_number;
++};
++
++struct drm_dp_query_payload {
++ u8 port_number;
++ u8 vcpi;
++};
++
++struct drm_dp_resource_status_notify {
++ u8 port_number;
++ u8 guid[16];
++ u16 available_pbn;
++};
++
++struct drm_dp_query_payload_ack_reply {
++ u8 port_number;
++ u8 allocated_pbn;
++};
++
++struct drm_dp_sideband_msg_req_body {
++ u8 req_type;
++ union ack_req {
++ struct drm_dp_connection_status_notify conn_stat;
++ struct drm_dp_port_number_req port_num;
++ struct drm_dp_resource_status_notify resource_stat;
++
++ struct drm_dp_query_payload query_payload;
++ struct drm_dp_allocate_payload allocate_payload;
++
++ struct drm_dp_remote_dpcd_read dpcd_read;
++ struct drm_dp_remote_dpcd_write dpcd_write;
++
++ struct drm_dp_remote_i2c_read i2c_read;
++ struct drm_dp_remote_i2c_write i2c_write;
++ } u;
++};
++
++struct drm_dp_sideband_msg_reply_body {
++ u8 reply_type;
++ u8 req_type;
++ union ack_replies {
++ struct drm_dp_nak_reply nak;
++ struct drm_dp_link_address_ack_reply link_addr;
++ struct drm_dp_port_number_rep port_number;
++
++ struct drm_dp_enum_path_resources_ack_reply path_resources;
++ struct drm_dp_allocate_payload_ack_reply allocate_payload;
++ struct drm_dp_query_payload_ack_reply query_payload;
++
++ struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
++ struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
++ struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
++
++ struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
++ struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
++ struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
++ } u;
++};
++
++/* msg is queued to be put into a slot */
++#define DRM_DP_SIDEBAND_TX_QUEUED 0
++/* msg has started transmitting on a slot - still on msgq */
++#define DRM_DP_SIDEBAND_TX_START_SEND 1
++/* msg has finished transmitting on a slot - removed from msgq only in slot */
++#define DRM_DP_SIDEBAND_TX_SENT 2
++/* msg has received a response - removed from slot */
++#define DRM_DP_SIDEBAND_TX_RX 3
++#define DRM_DP_SIDEBAND_TX_TIMEOUT 4
++
++struct drm_dp_sideband_msg_tx {
++ u8 msg[256];
++ u8 chunk[48];
++ u8 cur_offset;
++ u8 cur_len;
++ struct drm_dp_mst_branch *dst;
++ struct list_head next;
++ int seqno;
++ int state;
++ bool path_msg;
++ struct drm_dp_sideband_msg_reply_body reply;
++};
++
++/* sideband msg handler */
++struct drm_dp_mst_topology_mgr;
++struct drm_dp_mst_topology_cbs {
++ /* create a connector for a port */
++ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
++ void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_connector *connector);
++ void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
++
++};
++
++#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
++
++#define DP_PAYLOAD_LOCAL 1
++#define DP_PAYLOAD_REMOTE 2
++#define DP_PAYLOAD_DELETE_LOCAL 3
++
++struct drm_dp_payload {
++ int payload_state;
++ int start_slot;
++ int num_slots;
++ int vcpi;
++};
++
++/**
++ * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
++ * @dev: device pointer for adding i2c devices etc.
++ * @cbs: callbacks for connector addition and destruction.
++ * @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
++ * @aux: aux channel for the DP connector.
++ * @max_payloads: maximum number of payloads the GPU can generate.
++ * @conn_base_id: DRM connector ID this mgr is connected to.
++ * @down_rep_recv: msg receiver state for down replies.
++ * @up_req_recv: msg receiver state for up requests.
++ * @lock: protects mst state, primary, guid, dpcd.
++ * @mst_state: if this manager is enabled for an MST capable port.
++ * @mst_primary: pointer to the primary branch device.
++ * @guid_valid: GUID valid for the primary branch device.
++ * @guid: GUID for primary port.
++ * @dpcd: cache of DPCD for primary port.
++ * @pbn_div: PBN to slots divisor.
++ *
++ * This struct represents the toplevel displayport MST topology manager.
++ * There should be one instance of this for every MST capable DP connector
++ * on the GPU.
++ */
++struct drm_dp_mst_topology_mgr {
++
++ struct device *dev;
++ struct drm_dp_mst_topology_cbs *cbs;
++ int max_dpcd_transaction_bytes;
++ struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
++ int max_payloads;
++ int conn_base_id;
++
++ /* only ever accessed from the workqueue - which should be serialised */
++ struct drm_dp_sideband_msg_rx down_rep_recv;
++ struct drm_dp_sideband_msg_rx up_req_recv;
++
++ /* pointer to info about the initial MST device */
++ struct mutex lock; /* protects mst_state + primary + guid + dpcd */
++
++ bool mst_state;
++ struct drm_dp_mst_branch *mst_primary;
++ /* primary MST device GUID */
++ bool guid_valid;
++ u8 guid[16];
++ u8 dpcd[DP_RECEIVER_CAP_SIZE];
++ u8 sink_count;
++ int pbn_div;
++ int total_slots;
++ int avail_slots;
++ int total_pbn;
++
++ /* messages to be transmitted */
++ /* qlock protects the upq/downq and in_progress,
++ the mstb tx_slots and txmsg->state once they are queued */
++ struct mutex qlock;
++ struct list_head tx_msg_downq;
++ struct list_head tx_msg_upq;
++ bool tx_down_in_progress;
++ bool tx_up_in_progress;
++
++ /* payload info + lock for it */
++ struct mutex payload_lock;
++ struct drm_dp_vcpi **proposed_vcpis;
++ struct drm_dp_payload *payloads;
++ unsigned long payload_mask;
++ unsigned long vcpi_mask;
++
++ wait_queue_head_t tx_waitq;
++ struct work_struct work;
++
++ struct work_struct tx_work;
++};
++
++int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
++
++void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
++
++
++int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
++
++
++int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
++
++
++enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
++
++struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
++
++
++int drm_dp_calc_pbn_mode(int clock, int bpp);
++
++
++bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
++
++
++void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
++
++
++void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_port *port);
++
++
++int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
++ int pbn);
++
++
++int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
++
++
++int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
++
++int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
++
++void drm_dp_mst_dump_topology(struct seq_file *m,
++ struct drm_dp_mst_topology_mgr *mgr);
++
++void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
++int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
++#endif
+diff -Naur a/include/drm/drm_edid.h b/include/drm/drm_edid.h
+--- a/include/drm/drm_edid.h 2015-03-26 14:43:27.886436386 +0530
++++ b/include/drm/drm_edid.h 2015-03-26 14:42:38.754435422 +0530
+@@ -27,12 +27,14 @@
+
+ #define EDID_LENGTH 128
+ #define DDC_ADDR 0x50
++#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
+
+ #define CEA_EXT 0x02
+ #define VTB_EXT 0x10
+ #define DI_EXT 0x40
+ #define LS_EXT 0x50
+ #define MI_EXT 0x60
++#define DISPLAYID_EXT 0x70
+
+ struct est_timings {
+ u8 t1;
+@@ -202,6 +204,66 @@
+ #define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
+ #define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
+
++#define DRM_EDID_HDMI_DC_48 (1 << 6)
++#define DRM_EDID_HDMI_DC_36 (1 << 5)
++#define DRM_EDID_HDMI_DC_30 (1 << 4)
++#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
++
++/* ELD Header Block */
++#define DRM_ELD_HEADER_BLOCK_SIZE 4
++
++#define DRM_ELD_VER 0
++# define DRM_ELD_VER_SHIFT 3
++# define DRM_ELD_VER_MASK (0x1f << 3)
++
++#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
++
++/* ELD Baseline Block for ELD_Ver == 2 */
++#define DRM_ELD_CEA_EDID_VER_MNL 4
++# define DRM_ELD_CEA_EDID_VER_SHIFT 5
++# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
++# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
++# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
++# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
++# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
++# define DRM_ELD_MNL_SHIFT 0
++# define DRM_ELD_MNL_MASK (0x1f << 0)
++
++#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
++# define DRM_ELD_SAD_COUNT_SHIFT 4
++# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
++# define DRM_ELD_CONN_TYPE_SHIFT 2
++# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
++# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
++# define DRM_ELD_CONN_TYPE_DP (1 << 2)
++# define DRM_ELD_SUPPORTS_AI (1 << 1)
++# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
++
++#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
++# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
++
++#define DRM_ELD_SPEAKER 7
++# define DRM_ELD_SPEAKER_RLRC (1 << 6)
++# define DRM_ELD_SPEAKER_FLRC (1 << 5)
++# define DRM_ELD_SPEAKER_RC (1 << 4)
++# define DRM_ELD_SPEAKER_RLR (1 << 3)
++# define DRM_ELD_SPEAKER_FC (1 << 2)
++# define DRM_ELD_SPEAKER_LFE (1 << 1)
++# define DRM_ELD_SPEAKER_FLR (1 << 0)
++
++#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
++# define DRM_ELD_PORT_ID_LEN 8
++
++#define DRM_ELD_MANUFACTURER_NAME0 16
++#define DRM_ELD_MANUFACTURER_NAME1 17
++
++#define DRM_ELD_PRODUCT_CODE0 18
++#define DRM_ELD_PRODUCT_CODE1 19
++
++#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
++
++#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
++
+ struct edid {
+ u8 header[8];
+ /* Vendor & product info */
+@@ -274,4 +336,56 @@
+ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
+ const struct drm_display_mode *mode);
+
++/**
++ * drm_eld_mnl - Get ELD monitor name length in bytes.
++ * @eld: pointer to an eld memory structure with mnl set
++ */
++static inline int drm_eld_mnl(const uint8_t *eld)
++{
++ return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
++}
++
++/**
++ * drm_eld_sad_count - Get ELD SAD count.
++ * @eld: pointer to an eld memory structure with sad_count set
++ */
++static inline int drm_eld_sad_count(const uint8_t *eld)
++{
++ return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
++ DRM_ELD_SAD_COUNT_SHIFT;
++}
++
++/**
++ * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
++ * @eld: pointer to an eld memory structure with mnl and sad_count set
++ *
++ * This is a helper for determining the payload size of the baseline block, in
++ * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
++ */
++static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld)
++{
++ return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
++ drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
++}
++
++/**
++ * drm_eld_size - Get ELD size in bytes
++ * @eld: pointer to a complete eld memory structure
++ *
++ * The returned value does not include the vendor block. It's vendor specific,
++ * and comprises of the remaining bytes in the ELD memory buffer after
++ * drm_eld_size() bytes of header and baseline block.
++ *
++ * The returned value is guaranteed to be a multiple of 4.
++ */
++static inline int drm_eld_size(const uint8_t *eld)
++{
++ return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
++}
++
++struct edid *drm_do_get_edid(struct drm_connector *connector,
++ int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
++ size_t len),
++ void *data);
++
+ #endif /* __DRM_EDID_H__ */
+diff -Naur a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
+--- a/include/drm/drm_fb_helper.h 2015-03-26 14:43:27.890436386 +0530
++++ b/include/drm/drm_fb_helper.h 2015-03-26 14:42:38.754435422 +0530
+@@ -34,9 +34,14 @@
+
+ #include <linux/kgdb.h>
+
++struct drm_fb_offset {
++ int x, y;
++};
++
+ struct drm_fb_helper_crtc {
+ struct drm_mode_set mode_set;
+ struct drm_display_mode *desired_mode;
++ int x, y;
+ };
+
+ struct drm_fb_helper_surface_size {
+@@ -55,7 +60,7 @@
+ * save the current lut when force-restoring the fbdev for e.g.
+ * kdbg.
+ * @fb_probe: Driver callback to allocate and initialize the fbdev info
+- * structure. Futhermore it also needs to allocate the drm
++ * structure. Furthermore it also needs to allocate the drm
+ * framebuffer used to back the fbdev.
+ * @initial_config: Setup an initial fbdev display configuration
+ *
+@@ -72,12 +77,12 @@
+ bool (*initial_config)(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **crtcs,
+ struct drm_display_mode **modes,
++ struct drm_fb_offset *offsets,
+ bool *enabled, int width, int height);
+ };
+
+ struct drm_fb_helper_connector {
+ struct drm_connector *connector;
+- struct drm_cmdline_mode cmdline_mode;
+ };
+
+ struct drm_fb_helper {
+@@ -86,8 +91,9 @@
+ int crtc_count;
+ struct drm_fb_helper_crtc *crtc_info;
+ int connector_count;
++ int connector_info_alloc_count;
+ struct drm_fb_helper_connector **connector_info;
+- struct drm_fb_helper_funcs *funcs;
++ const struct drm_fb_helper_funcs *funcs;
+ struct fb_info *fbdev;
+ u32 pseudo_palette[17];
+ struct list_head kernel_fb_list;
+@@ -97,6 +103,8 @@
+ bool delayed_hotplug;
+ };
+
++void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
++ const struct drm_fb_helper_funcs *funcs);
+ int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *helper, int crtc_count,
+ int max_conn);
+@@ -108,7 +116,7 @@
+ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info);
+
+-bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
++bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
+ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height);
+ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+@@ -121,5 +129,14 @@
+ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
+ int drm_fb_helper_debug_enter(struct fb_info *info);
+ int drm_fb_helper_debug_leave(struct fb_info *info);
+-
++struct drm_display_mode *
++drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
++ int width, int height);
++struct drm_display_mode *
++drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
++ int width, int height);
++
++int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
++int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
++ struct drm_connector *connector);
+ #endif
+diff -Naur a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
+--- a/include/drm/drm_flip_work.h 2015-03-26 14:43:27.890436386 +0530
++++ b/include/drm/drm_flip_work.h 2015-03-26 14:42:38.754435422 +0530
+@@ -25,6 +25,7 @@
+ #define DRM_FLIP_WORK_H
+
+ #include <linux/kfifo.h>
++#include <linux/spinlock.h>
+ #include <linux/workqueue.h>
+
+ /**
+@@ -32,9 +33,9 @@
+ *
+ * Util to queue up work to run from work-queue context after flip/vblank.
+ * Typically this can be used to defer unref of framebuffer's, cursor
+- * bo's, etc until after vblank. The APIs are all safe (and lockless)
+- * for up to one producer and once consumer at a time. The single-consumer
+- * aspect is ensured by committing the queued work to a single work-queue.
++ * bo's, etc until after vblank. The APIs are all thread-safe.
++ * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
++ * in atomic context.
+ */
+
+ struct drm_flip_work;
+@@ -51,25 +52,40 @@
+ typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
+
+ /**
++ * struct drm_flip_task - flip work task
++ * @node: list entry element
++ * @data: data to pass to work->func
++ */
++struct drm_flip_task {
++ struct list_head node;
++ void *data;
++};
++
++/**
+ * struct drm_flip_work - flip work queue
+ * @name: debug name
+- * @pending: number of queued but not committed items
+- * @count: number of committed items
+ * @func: callback fxn called for each committed item
+ * @worker: worker which calls @func
++ * @queued: queued tasks
++ * @commited: commited tasks
++ * @lock: lock to access queued and commited lists
+ */
+ struct drm_flip_work {
+ const char *name;
+- atomic_t pending, count;
+ drm_flip_func_t func;
+ struct work_struct worker;
+- DECLARE_KFIFO_PTR(fifo, void *);
++ struct list_head queued;
++ struct list_head commited;
++ spinlock_t lock;
+ };
+
++struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
++void drm_flip_work_queue_task(struct drm_flip_work *work,
++ struct drm_flip_task *task);
+ void drm_flip_work_queue(struct drm_flip_work *work, void *val);
+ void drm_flip_work_commit(struct drm_flip_work *work,
+ struct workqueue_struct *wq);
+-int drm_flip_work_init(struct drm_flip_work *work, int size,
++void drm_flip_work_init(struct drm_flip_work *work,
+ const char *name, drm_flip_func_t func);
+ void drm_flip_work_cleanup(struct drm_flip_work *work);
+
+diff -Naur a/include/drm/drm_gem.h b/include/drm/drm_gem.h
+--- a/include/drm/drm_gem.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/drm/drm_gem.h 2015-03-26 14:42:38.754435422 +0530
+@@ -0,0 +1,190 @@
++#ifndef __DRM_GEM_H__
++#define __DRM_GEM_H__
++
++/*
++ * GEM Graphics Execution Manager Driver Interfaces
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * Copyright (c) 2009-2010, Code Aurora Forum.
++ * All rights reserved.
++ * Copyright © 2014 Intel Corporation
++ * Daniel Vetter <daniel.vetter@ffwll.ch>
++ *
++ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
++ * Author: Gareth Hughes <gareth@valinux.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/**
++ * This structure defines the drm_mm memory object, which will be used by the
++ * DRM for its buffer objects.
++ */
++struct drm_gem_object {
++ /** Reference count of this object */
++ struct kref refcount;
++
++ /**
++ * handle_count - gem file_priv handle count of this object
++ *
++ * Each handle also holds a reference. Note that when the handle_count
++ * drops to 0 any global names (e.g. the id in the flink namespace) will
++ * be cleared.
++ *
++ * Protected by dev->object_name_lock.
++ * */
++ unsigned handle_count;
++
++ /** Related drm device */
++ struct drm_device *dev;
++
++ /** File representing the shmem storage */
++ struct file *filp;
++
++ /* Mapping info for this object */
++ struct drm_vma_offset_node vma_node;
++
++ /**
++ * Size of the object, in bytes. Immutable over the object's
++ * lifetime.
++ */
++ size_t size;
++
++ /**
++ * Global name for this object, starts at 1. 0 means unnamed.
++ * Access is covered by the object_name_lock in the related drm_device
++ */
++ int name;
++
++ /**
++ * Memory domains. These monitor which caches contain read/write data
++ * related to the object. When transitioning from one set of domains
++ * to another, the driver is called to ensure that caches are suitably
++ * flushed and invalidated
++ */
++ uint32_t read_domains;
++ uint32_t write_domain;
++
++ /**
++ * While validating an exec operation, the
++ * new read/write domain values are computed here.
++ * They will be transferred to the above values
++ * at the point that any cache flushing occurs
++ */
++ uint32_t pending_read_domains;
++ uint32_t pending_write_domain;
++
++ /**
++ * dma_buf - dma buf associated with this GEM object
++ *
++ * Pointer to the dma-buf associated with this gem object (either
++ * through importing or exporting). We break the resulting reference
++ * loop when the last gem handle for this object is released.
++ *
++ * Protected by obj->object_name_lock
++ */
++ struct dma_buf *dma_buf;
++
++ /**
++ * import_attach - dma buf attachment backing this object
++ *
++ * Any foreign dma_buf imported as a gem object has this set to the
++ * attachment point for the device. This is invariant over the lifetime
++ * of a gem object.
++ *
++ * The driver's ->gem_free_object callback is responsible for cleaning
++ * up the dma_buf attachment and references acquired at import time.
++ *
++ * Note that the drm gem/prime core does not depend upon drivers setting
++ * this field any more. So for drivers where this doesn't make sense
++ * (e.g. virtual devices or a displaylink behind an usb bus) they can
++ * simply leave it as NULL.
++ */
++ struct dma_buf_attachment *import_attach;
++
++ /**
++ * dumb - created as dumb buffer
++ * Whether the gem object was created using the dumb buffer interface
++ * as such it may not be used for GPU rendering.
++ */
++ bool dumb;
++};
++
++void drm_gem_object_release(struct drm_gem_object *obj);
++void drm_gem_object_free(struct kref *kref);
++int drm_gem_object_init(struct drm_device *dev,
++ struct drm_gem_object *obj, size_t size);
++void drm_gem_private_object_init(struct drm_device *dev,
++ struct drm_gem_object *obj, size_t size);
++void drm_gem_vm_open(struct vm_area_struct *vma);
++void drm_gem_vm_close(struct vm_area_struct *vma);
++int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
++ struct vm_area_struct *vma);
++int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
++
++static inline void
++drm_gem_object_reference(struct drm_gem_object *obj)
++{
++ kref_get(&obj->refcount);
++}
++
++static inline void
++drm_gem_object_unreference(struct drm_gem_object *obj)
++{
++ if (obj != NULL)
++ kref_put(&obj->refcount, drm_gem_object_free);
++}
++
++static inline void
++drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
++{
++ if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
++ struct drm_device *dev = obj->dev;
++
++ mutex_lock(&dev->struct_mutex);
++ if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
++ drm_gem_object_free(&obj->refcount);
++ mutex_unlock(&dev->struct_mutex);
++ }
++}
++
++int drm_gem_handle_create(struct drm_file *file_priv,
++ struct drm_gem_object *obj,
++ u32 *handlep);
++int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
++
++
++void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
++int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
++int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
++
++struct page **drm_gem_get_pages(struct drm_gem_object *obj);
++void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
++ bool dirty, bool accessed);
++
++struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
++ struct drm_file *filp,
++ u32 handle);
++int drm_gem_dumb_destroy(struct drm_file *file,
++ struct drm_device *dev,
++ uint32_t handle);
++
++#endif /* __DRM_GEM_H__ */
+diff -Naur a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
+--- a/include/drm/drm_legacy.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/drm/drm_legacy.h 2015-03-26 14:42:38.754435422 +0530
+@@ -0,0 +1,203 @@
++#ifndef __DRM_DRM_LEGACY_H__
++#define __DRM_DRM_LEGACY_H__
++
++/*
++ * Legacy driver interfaces for the Direct Rendering Manager
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * Copyright (c) 2009-2010, Code Aurora Forum.
++ * All rights reserved.
++ * Copyright © 2014 Intel Corporation
++ * Daniel Vetter <daniel.vetter@ffwll.ch>
++ *
++ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
++ * Author: Gareth Hughes <gareth@valinux.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++
++/*
++ * Legacy Support for palateontologic DRM drivers
++ *
++ * If you add a new driver and it uses any of these functions or structures,
++ * you're doing it terribly wrong.
++ */
++
++/**
++ * DMA buffer.
++ */
++struct drm_buf {
++ int idx; /**< Index into master buflist */
++ int total; /**< Buffer size */
++ int order; /**< log-base-2(total) */
++ int used; /**< Amount of buffer in use (for DMA) */
++ unsigned long offset; /**< Byte offset (used internally) */
++ void *address; /**< Address of buffer */
++ unsigned long bus_address; /**< Bus address of buffer */
++ struct drm_buf *next; /**< Kernel-only: used for free list */
++ __volatile__ int waiting; /**< On kernel DMA queue */
++ __volatile__ int pending; /**< On hardware DMA queue */
++ struct drm_file *file_priv; /**< Private of holding file descr */
++ int context; /**< Kernel queue for this buffer */
++ int while_locked; /**< Dispatch this buffer while locked */
++ enum {
++ DRM_LIST_NONE = 0,
++ DRM_LIST_FREE = 1,
++ DRM_LIST_WAIT = 2,
++ DRM_LIST_PEND = 3,
++ DRM_LIST_PRIO = 4,
++ DRM_LIST_RECLAIM = 5
++ } list; /**< Which list we're on */
++
++ int dev_priv_size; /**< Size of buffer private storage */
++ void *dev_private; /**< Per-buffer private storage */
++};
++
++typedef struct drm_dma_handle {
++ dma_addr_t busaddr;
++ void *vaddr;
++ size_t size;
++} drm_dma_handle_t;
++
++/**
++ * Buffer entry. There is one of this for each buffer size order.
++ */
++struct drm_buf_entry {
++ int buf_size; /**< size */
++ int buf_count; /**< number of buffers */
++ struct drm_buf *buflist; /**< buffer list */
++ int seg_count;
++ int page_order;
++ struct drm_dma_handle **seglist;
++
++ int low_mark; /**< Low water mark */
++ int high_mark; /**< High water mark */
++};
++
++/**
++ * DMA data.
++ */
++struct drm_device_dma {
++
++ struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
++ int buf_count; /**< total number of buffers */
++ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
++ int seg_count;
++ int page_count; /**< number of pages */
++ unsigned long *pagelist; /**< page list */
++ unsigned long byte_count;
++ enum {
++ _DRM_DMA_USE_AGP = 0x01,
++ _DRM_DMA_USE_SG = 0x02,
++ _DRM_DMA_USE_FB = 0x04,
++ _DRM_DMA_USE_PCI_RO = 0x08
++ } flags;
++
++};
++
++/**
++ * Scatter-gather memory.
++ */
++struct drm_sg_mem {
++ unsigned long handle;
++ void *virtual;
++ int pages;
++ struct page **pagelist;
++ dma_addr_t *busaddr;
++};
++
++/**
++ * Kernel side of a mapping
++ */
++struct drm_local_map {
++ resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
++ unsigned long size; /**< Requested physical size (bytes) */
++ enum drm_map_type type; /**< Type of memory to map */
++ enum drm_map_flags flags; /**< Flags */
++ void *handle; /**< User-space: "Handle" to pass to mmap() */
++ /**< Kernel-space: kernel-virtual address */
++ int mtrr; /**< MTRR slot used */
++};
++
++typedef struct drm_local_map drm_local_map_t;
++
++/**
++ * Mappings list
++ */
++struct drm_map_list {
++ struct list_head head; /**< list head */
++ struct drm_hash_item hash;
++ struct drm_local_map *map; /**< mapping */
++ uint64_t user_token;
++ struct drm_master *master;
++};
++
++int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
++ unsigned int size, enum drm_map_type type,
++ enum drm_map_flags flags, struct drm_local_map **map_p);
++int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
++int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
++struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
++int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
++
++int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
++int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
++
++/**
++ * Test that the hardware lock is held by the caller, returning otherwise.
++ *
++ * \param dev DRM device.
++ * \param filp file pointer of the caller.
++ */
++#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
++do { \
++ if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
++ _file_priv->master->lock.file_priv != _file_priv) { \
++ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
++ __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
++ _file_priv->master->lock.file_priv, _file_priv); \
++ return -EINVAL; \
++ } \
++} while (0)
++
++void drm_legacy_idlelock_take(struct drm_lock_data *lock);
++void drm_legacy_idlelock_release(struct drm_lock_data *lock);
++
++/* drm_pci.c dma alloc wrappers */
++void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
++
++/* drm_memory.c */
++void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
++void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
++void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
++
++static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
++ unsigned int token)
++{
++ struct drm_map_list *_entry;
++ list_for_each_entry(_entry, &dev->maplist, head)
++ if (_entry->user_token == token)
++ return _entry->map;
++ return NULL;
++}
++
++#endif /* __DRM_DRM_LEGACY_H__ */
+diff -Naur a/include/drm/drm_mm.h b/include/drm/drm_mm.h
+--- a/include/drm/drm_mm.h 2015-03-26 14:43:27.886436386 +0530
++++ b/include/drm/drm_mm.h 2015-03-26 14:42:38.754435422 +0530
+@@ -47,8 +47,17 @@
+ enum drm_mm_search_flags {
+ DRM_MM_SEARCH_DEFAULT = 0,
+ DRM_MM_SEARCH_BEST = 1 << 0,
++ DRM_MM_SEARCH_BELOW = 1 << 1,
+ };
+
++enum drm_mm_allocator_flags {
++ DRM_MM_CREATE_DEFAULT = 0,
++ DRM_MM_CREATE_TOP = 1 << 0,
++};
++
++#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
++#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
++
+ struct drm_mm_node {
+ struct list_head node_list;
+ struct list_head hole_stack;
+@@ -85,11 +94,31 @@
+ unsigned long *start, unsigned long *end);
+ };
+
++/**
++ * drm_mm_node_allocated - checks whether a node is allocated
++ * @node: drm_mm_node to check
++ *
++ * Drivers should use this helpers for proper encapusulation of drm_mm
++ * internals.
++ *
++ * Returns:
++ * True if the @node is allocated.
++ */
+ static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
+ {
+ return node->allocated;
+ }
+
++/**
++ * drm_mm_initialized - checks whether an allocator is initialized
++ * @mm: drm_mm to check
++ *
++ * Drivers should use this helpers for proper encapusulation of drm_mm
++ * internals.
++ *
++ * Returns:
++ * True if the @mm is initialized.
++ */
+ static inline bool drm_mm_initialized(struct drm_mm *mm)
+ {
+ return mm->hole_stack.next;
+@@ -100,6 +129,17 @@
+ return hole_node->start + hole_node->size;
+ }
+
++/**
++ * drm_mm_hole_node_start - computes the start of the hole following @node
++ * @hole_node: drm_mm_node which implicitly tracks the following hole
++ *
++ * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
++ * inspect holes themselves. Drivers must check first whether a hole indeed
++ * follows by looking at node->hole_follows.
++ *
++ * Returns:
++ * Start of the subsequent hole.
++ */
+ static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+ {
+ BUG_ON(!hole_node->hole_follows);
+@@ -112,18 +152,52 @@
+ struct drm_mm_node, node_list)->start;
+ }
+
++/**
++ * drm_mm_hole_node_end - computes the end of the hole following @node
++ * @hole_node: drm_mm_node which implicitly tracks the following hole
++ *
++ * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
++ * inspect holes themselves. Drivers must check first whether a hole indeed
++ * follows by looking at node->hole_follows.
++ *
++ * Returns:
++ * End of the subsequent hole.
++ */
+ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+ {
+ return __drm_mm_hole_node_end(hole_node);
+ }
+
++/**
++ * drm_mm_for_each_node - iterator to walk over all allocated nodes
++ * @entry: drm_mm_node structure to assign to in each iteration step
++ * @mm: drm_mm allocator to walk
++ *
++ * This iterator walks over all nodes in the range allocator. It is implemented
++ * with list_for_each, so not save against removal of elements.
++ */
+ #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
+ &(mm)->head_node.node_list, \
+ node_list)
+
+-/* Note that we need to unroll list_for_each_entry in order to inline
+- * setting hole_start and hole_end on each iteration and keep the
+- * macro sane.
++/**
++ * drm_mm_for_each_hole - iterator to walk over all holes
++ * @entry: drm_mm_node used internally to track progress
++ * @mm: drm_mm allocator to walk
++ * @hole_start: ulong variable to assign the hole start to on each iteration
++ * @hole_end: ulong variable to assign the hole end to on each iteration
++ *
++ * This iterator walks over all holes in the range allocator. It is implemented
++ * with list_for_each, so not save against removal of elements. @entry is used
++ * internally and will not reflect a real drm_mm_node for the very first hole.
++ * Hence users of this iterator may not access it.
++ *
++ * Implementation Note:
++ * We need to inline list_for_each_entry in order to be able to set hole_start
++ * and hole_end on each iteration while keeping the macro sane.
++ *
++ * The __drm_mm_for_each_hole version is similar, but with added support for
++ * going backwards.
+ */
+ #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
+ for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
+@@ -133,34 +207,79 @@
+ 1 : 0; \
+ entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
+
++#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
++ for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
++ &entry->hole_stack != &(mm)->hole_stack ? \
++ hole_start = drm_mm_hole_node_start(entry), \
++ hole_end = drm_mm_hole_node_end(entry), \
++ 1 : 0; \
++ entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
++
+ /*
+ * Basic range manager support (drm_mm.c)
+ */
+-extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
++int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
+
+-extern int drm_mm_insert_node_generic(struct drm_mm *mm,
+- struct drm_mm_node *node,
+- unsigned long size,
+- unsigned alignment,
+- unsigned long color,
+- enum drm_mm_search_flags flags);
++int drm_mm_insert_node_generic(struct drm_mm *mm,
++ struct drm_mm_node *node,
++ unsigned long size,
++ unsigned alignment,
++ unsigned long color,
++ enum drm_mm_search_flags sflags,
++ enum drm_mm_allocator_flags aflags);
++/**
++ * drm_mm_insert_node - search for space and insert @node
++ * @mm: drm_mm to allocate from
++ * @node: preallocate node to insert
++ * @size: size of the allocation
++ * @alignment: alignment of the allocation
++ * @flags: flags to fine-tune the allocation
++ *
++ * This is a simplified version of drm_mm_insert_node_generic() with @color set
++ * to 0.
++ *
++ * The preallocated node must be cleared to 0.
++ *
++ * Returns:
++ * 0 on success, -ENOSPC if there's no suitable hole.
++ */
+ static inline int drm_mm_insert_node(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ enum drm_mm_search_flags flags)
+ {
+- return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
++ return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
++ DRM_MM_CREATE_DEFAULT);
+ }
+
+-extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
+- struct drm_mm_node *node,
+- unsigned long size,
+- unsigned alignment,
+- unsigned long color,
+- unsigned long start,
+- unsigned long end,
+- enum drm_mm_search_flags flags);
++int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
++ struct drm_mm_node *node,
++ unsigned long size,
++ unsigned alignment,
++ unsigned long color,
++ unsigned long start,
++ unsigned long end,
++ enum drm_mm_search_flags sflags,
++ enum drm_mm_allocator_flags aflags);
++/**
++ * drm_mm_insert_node_in_range - ranged search for space and insert @node
++ * @mm: drm_mm to allocate from
++ * @node: preallocate node to insert
++ * @size: size of the allocation
++ * @alignment: alignment of the allocation
++ * @start: start of the allowed range for this node
++ * @end: end of the allowed range for this node
++ * @flags: flags to fine-tune the allocation
++ *
++ * This is a simplified version of drm_mm_insert_node_in_range_generic() with
++ * @color set to 0.
++ *
++ * The preallocated node must be cleared to 0.
++ *
++ * Returns:
++ * 0 on success, -ENOSPC if there's no suitable hole.
++ */
+ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+@@ -170,16 +289,17 @@
+ enum drm_mm_search_flags flags)
+ {
+ return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
+- 0, start, end, flags);
++ 0, start, end, flags,
++ DRM_MM_CREATE_DEFAULT);
+ }
+
+-extern void drm_mm_remove_node(struct drm_mm_node *node);
+-extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
+-extern void drm_mm_init(struct drm_mm *mm,
+- unsigned long start,
+- unsigned long size);
+-extern void drm_mm_takedown(struct drm_mm *mm);
+-extern int drm_mm_clean(struct drm_mm *mm);
++void drm_mm_remove_node(struct drm_mm_node *node);
++void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
++void drm_mm_init(struct drm_mm *mm,
++ unsigned long start,
++ unsigned long size);
++void drm_mm_takedown(struct drm_mm *mm);
++bool drm_mm_clean(struct drm_mm *mm);
+
+ void drm_mm_init_scan(struct drm_mm *mm,
+ unsigned long size,
+@@ -191,10 +311,10 @@
+ unsigned long color,
+ unsigned long start,
+ unsigned long end);
+-int drm_mm_scan_add_block(struct drm_mm_node *node);
+-int drm_mm_scan_remove_block(struct drm_mm_node *node);
++bool drm_mm_scan_add_block(struct drm_mm_node *node);
++bool drm_mm_scan_remove_block(struct drm_mm_node *node);
+
+-extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
++void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
+ #ifdef CONFIG_DEBUG_FS
+ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
+ #endif
+diff -Naur a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
+--- a/include/drm/drm_modeset_lock.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/drm/drm_modeset_lock.h 2015-03-26 14:42:38.754435422 +0530
+@@ -0,0 +1,145 @@
++/*
++ * Copyright (C) 2014 Red Hat
++ * Author: Rob Clark <robdclark@gmail.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef DRM_MODESET_LOCK_H_
++#define DRM_MODESET_LOCK_H_
++
++#include <linux/ww_mutex.h>
++
++struct drm_modeset_lock;
++
++/**
++ * struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
++ * @ww_ctx: base acquire ctx
++ * @contended: used internally for -EDEADLK handling
++ * @locked: list of held locks
++ * @trylock_only: trylock mode used in atomic contexts/panic notifiers
++ *
++ * Each thread competing for a set of locks must use one acquire
++ * ctx. And if any lock fxn returns -EDEADLK, it must backoff and
++ * retry.
++ */
++struct drm_modeset_acquire_ctx {
++
++ struct ww_acquire_ctx ww_ctx;
++
++ /**
++ * Contended lock: if a lock is contended you should only call
++ * drm_modeset_backoff() which drops locks and slow-locks the
++ * contended lock.
++ */
++ struct drm_modeset_lock *contended;
++
++ /**
++ * list of held locks (drm_modeset_lock)
++ */
++ struct list_head locked;
++
++ /**
++ * Trylock mode, use only for panic handlers!
++ */
++ bool trylock_only;
++};
++
++/**
++ * struct drm_modeset_lock - used for locking modeset resources.
++ * @mutex: resource locking
++ * @head: used to hold it's place on state->locked list when
++ * part of an atomic update
++ *
++ * Used for locking CRTCs and other modeset resources.
++ */
++struct drm_modeset_lock {
++ /**
++ * modeset lock
++ */
++ struct ww_mutex mutex;
++
++ /**
++ * Resources that are locked as part of an atomic update are added
++ * to a list (so we know what to unlock at the end).
++ */
++ struct list_head head;
++};
++
++extern struct ww_class crtc_ww_class;
++
++void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
++ uint32_t flags);
++void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx);
++void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx);
++void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
++int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx);
++
++/**
++ * drm_modeset_lock_init - initialize lock
++ * @lock: lock to init
++ */
++static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock)
++{
++ ww_mutex_init(&lock->mutex, &crtc_ww_class);
++ INIT_LIST_HEAD(&lock->head);
++}
++
++/**
++ * drm_modeset_lock_fini - cleanup lock
++ * @lock: lock to cleanup
++ */
++static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock)
++{
++ WARN_ON(!list_empty(&lock->head));
++}
++
++/**
++ * drm_modeset_is_locked - equivalent to mutex_is_locked()
++ * @lock: lock to check
++ */
++static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock)
++{
++ return ww_mutex_is_locked(&lock->mutex);
++}
++
++int drm_modeset_lock(struct drm_modeset_lock *lock,
++ struct drm_modeset_acquire_ctx *ctx);
++int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
++ struct drm_modeset_acquire_ctx *ctx);
++void drm_modeset_unlock(struct drm_modeset_lock *lock);
++
++struct drm_device;
++struct drm_crtc;
++struct drm_plane;
++
++void drm_modeset_lock_all(struct drm_device *dev);
++int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
++void drm_modeset_unlock_all(struct drm_device *dev);
++void drm_modeset_lock_crtc(struct drm_crtc *crtc,
++ struct drm_plane *plane);
++void drm_modeset_unlock_crtc(struct drm_crtc *crtc);
++void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
++struct drm_modeset_acquire_ctx *
++drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc);
++
++int drm_modeset_lock_all_crtcs(struct drm_device *dev,
++ struct drm_modeset_acquire_ctx *ctx);
++
++#endif /* DRM_MODESET_LOCK_H_ */
+diff -Naur a/include/drm/drm_modes.h b/include/drm/drm_modes.h
+--- a/include/drm/drm_modes.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/drm/drm_modes.h 2015-03-26 14:42:38.754435422 +0530
+@@ -0,0 +1,237 @@
++/*
++ * Copyright © 2006 Keith Packard
++ * Copyright © 2007-2008 Dave Airlie
++ * Copyright © 2007-2008 Intel Corporation
++ * Jesse Barnes <jesse.barnes@intel.com>
++ * Copyright © 2014 Intel Corporation
++ * Daniel Vetter <daniel.vetter@ffwll.ch>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++#ifndef __DRM_MODES_H__
++#define __DRM_MODES_H__
++
++/*
++ * Note on terminology: here, for brevity and convenience, we refer to connector
++ * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
++ * DVI, etc. And 'screen' refers to the whole of the visible display, which
++ * may span multiple monitors (and therefore multiple CRTC and connector
++ * structures).
++ */
++
++enum drm_mode_status {
++ MODE_OK = 0, /* Mode OK */
++ MODE_HSYNC, /* hsync out of range */
++ MODE_VSYNC, /* vsync out of range */
++ MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
++ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
++ MODE_BAD_WIDTH, /* requires an unsupported linepitch */
++ MODE_NOMODE, /* no mode with a matching name */
++ MODE_NO_INTERLACE, /* interlaced mode not supported */
++ MODE_NO_DBLESCAN, /* doublescan mode not supported */
++ MODE_NO_VSCAN, /* multiscan mode not supported */
++ MODE_MEM, /* insufficient video memory */
++ MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
++ MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
++ MODE_MEM_VIRT, /* insufficient video memory given virtual size */
++ MODE_NOCLOCK, /* no fixed clock available */
++ MODE_CLOCK_HIGH, /* clock required is too high */
++ MODE_CLOCK_LOW, /* clock required is too low */
++ MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
++ MODE_BAD_HVALUE, /* horizontal timing was out of range */
++ MODE_BAD_VVALUE, /* vertical timing was out of range */
++ MODE_BAD_VSCAN, /* VScan value out of range */
++ MODE_HSYNC_NARROW, /* horizontal sync too narrow */
++ MODE_HSYNC_WIDE, /* horizontal sync too wide */
++ MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
++ MODE_HBLANK_WIDE, /* horizontal blanking too wide */
++ MODE_VSYNC_NARROW, /* vertical sync too narrow */
++ MODE_VSYNC_WIDE, /* vertical sync too wide */
++ MODE_VBLANK_NARROW, /* vertical blanking too narrow */
++ MODE_VBLANK_WIDE, /* vertical blanking too wide */
++ MODE_PANEL, /* exceeds panel dimensions */
++ MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
++ MODE_ONE_WIDTH, /* only one width is supported */
++ MODE_ONE_HEIGHT, /* only one height is supported */
++ MODE_ONE_SIZE, /* only one resolution is supported */
++ MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
++ MODE_NO_STEREO, /* stereo modes not supported */
++ MODE_UNVERIFIED = -3, /* mode needs to reverified */
++ MODE_BAD = -2, /* unspecified reason */
++ MODE_ERROR = -1 /* error condition */
++};
++
++#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
++ DRM_MODE_TYPE_CRTC_C)
++
++#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
++ .name = nm, .status = 0, .type = (t), .clock = (c), \
++ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
++ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
++ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
++ .vscan = (vs), .flags = (f), \
++ .base.type = DRM_MODE_OBJECT_MODE
++
++#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
++#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
++
++#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
++
++struct drm_display_mode {
++ /* Header */
++ struct list_head head;
++ struct drm_mode_object base;
++
++ char name[DRM_DISPLAY_MODE_LEN];
++
++ enum drm_mode_status status;
++ unsigned int type;
++
++ /* Proposed mode values */
++ int clock; /* in kHz */
++ int hdisplay;
++ int hsync_start;
++ int hsync_end;
++ int htotal;
++ int hskew;
++ int vdisplay;
++ int vsync_start;
++ int vsync_end;
++ int vtotal;
++ int vscan;
++ unsigned int flags;
++
++ /* Addressable image size (may be 0 for projectors, etc.) */
++ int width_mm;
++ int height_mm;
++
++ /* Actual mode we give to hw */
++ int crtc_clock; /* in KHz */
++ int crtc_hdisplay;
++ int crtc_hblank_start;
++ int crtc_hblank_end;
++ int crtc_hsync_start;
++ int crtc_hsync_end;
++ int crtc_htotal;
++ int crtc_hskew;
++ int crtc_vdisplay;
++ int crtc_vblank_start;
++ int crtc_vblank_end;
++ int crtc_vsync_start;
++ int crtc_vsync_end;
++ int crtc_vtotal;
++
++ /* Driver private mode info */
++ int *private;
++ int private_flags;
++
++ int vrefresh; /* in Hz */
++ int hsync; /* in kHz */
++ enum hdmi_picture_aspect picture_aspect_ratio;
++};
++
++/* mode specified on the command line */
++struct drm_cmdline_mode {
++ bool specified;
++ bool refresh_specified;
++ bool bpp_specified;
++ int xres, yres;
++ int bpp;
++ int refresh;
++ bool rb;
++ bool interlace;
++ bool cvt;
++ bool margins;
++ enum drm_connector_force force;
++};
++
++/**
++ * drm_mode_is_stereo - check for stereo mode flags
++ * @mode: drm_display_mode to check
++ *
++ * Returns:
++ * True if the mode is one of the stereo modes (like side-by-side), false if
++ * not.
++ */
++static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
++{
++ return mode->flags & DRM_MODE_FLAG_3D_MASK;
++}
++
++struct drm_connector;
++struct drm_cmdline_mode;
++
++struct drm_display_mode *drm_mode_create(struct drm_device *dev);
++void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
++void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
++void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
++
++struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
++ int hdisplay, int vdisplay, int vrefresh,
++ bool reduced, bool interlaced,
++ bool margins);
++struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
++ int hdisplay, int vdisplay, int vrefresh,
++ bool interlaced, int margins);
++struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
++ int hdisplay, int vdisplay,
++ int vrefresh, bool interlaced,
++ int margins,
++ int GTF_M, int GTF_2C,
++ int GTF_K, int GTF_2J);
++void drm_display_mode_from_videomode(const struct videomode *vm,
++ struct drm_display_mode *dmode);
++int of_get_drm_display_mode(struct device_node *np,
++ struct drm_display_mode *dmode,
++ int index);
++
++void drm_mode_set_name(struct drm_display_mode *mode);
++int drm_mode_hsync(const struct drm_display_mode *mode);
++int drm_mode_vrefresh(const struct drm_display_mode *mode);
++
++void drm_mode_set_crtcinfo(struct drm_display_mode *p,
++ int adjust_flags);
++void drm_mode_copy(struct drm_display_mode *dst,
++ const struct drm_display_mode *src);
++struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
++ const struct drm_display_mode *mode);
++bool drm_mode_equal(const struct drm_display_mode *mode1,
++ const struct drm_display_mode *mode2);
++bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
++ const struct drm_display_mode *mode2);
++
++/* for use by the crtc helper probe functions */
++void drm_mode_validate_size(struct drm_device *dev,
++ struct list_head *mode_list,
++ int maxX, int maxY);
++void drm_mode_prune_invalid(struct drm_device *dev,
++ struct list_head *mode_list, bool verbose);
++void drm_mode_sort(struct list_head *mode_list);
++void drm_mode_connector_list_update(struct drm_connector *connector, bool merge_type_bits);
++
++/* parsing cmdline modes */
++bool
++drm_mode_parse_command_line_for_connector(const char *mode_option,
++ struct drm_connector *connector,
++ struct drm_cmdline_mode *mode);
++struct drm_display_mode *
++drm_mode_create_from_cmdline_mode(struct drm_device *dev,
++ struct drm_cmdline_mode *cmd);
++
++#endif /* __DRM_MODES_H__ */
+diff -Naur a/include/drm/drmP.h b/include/drm/drmP.h
+--- a/include/drm/drmP.h 2015-03-26 14:43:27.890436386 +0530
++++ b/include/drm/drmP.h 2015-03-26 14:42:38.758435422 +0530
+@@ -1,17 +1,14 @@
+-/**
+- * \file drmP.h
+- * Private header for Direct Rendering Manager
+- *
+- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+- * \author Gareth Hughes <gareth@valinux.com>
+- */
+-
+ /*
++ * Internal Header for the Direct Rendering Manager
++ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * All rights reserved.
+ *
++ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
++ * Author: Gareth Hughes <gareth@valinux.com>
++ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+@@ -35,101 +32,101 @@
+ #ifndef _DRM_P_H_
+ #define _DRM_P_H_
+
+-#ifdef __KERNEL__
+-#ifdef __alpha__
+-/* add include of current.h so that "current" is defined
+- * before static inline funcs in wait.h. Doing this so we
+- * can build the DRM (part of PI DRI). 4/21/2000 S + B */
+-#include <asm/current.h>
+-#endif /* __alpha__ */
+-#include <linux/kernel.h>
+-#include <linux/miscdevice.h>
++#include <linux/agp_backend.h>
++#include <linux/cdev.h>
++#include <linux/dma-mapping.h>
++#include <linux/file.h>
+ #include <linux/fs.h>
++#include <linux/highmem.h>
++#include <linux/idr.h>
+ #include <linux/init.h>
+-#include <linux/file.h>
+-#include <linux/platform_device.h>
+-#include <linux/pci.h>
++#include <linux/io.h>
+ #include <linux/jiffies.h>
+-#include <linux/dma-mapping.h>
++#include <linux/kernel.h>
++#include <linux/kref.h>
++#include <linux/miscdevice.h>
+ #include <linux/mm.h>
+-#include <linux/cdev.h>
+ #include <linux/mutex.h>
+-#include <linux/io.h>
+-#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
+ #include <linux/ratelimit.h>
+-#if defined(__alpha__) || defined(__powerpc__)
+-#include <asm/pgtable.h> /* For pte_wrprotect */
+-#endif
+-#include <asm/mman.h>
+-#include <asm/uaccess.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
+ #include <linux/types.h>
+-#include <linux/agp_backend.h>
++#include <linux/vmalloc.h>
+ #include <linux/workqueue.h>
+-#include <linux/poll.h>
++
++#include <asm/mman.h>
+ #include <asm/pgalloc.h>
+-#include <drm/drm.h>
+-#include <drm/drm_sarea.h>
+-#include <drm/drm_vma_manager.h>
++#include <asm/uaccess.h>
+
+-#include <linux/idr.h>
++#include <uapi/drm/drm.h>
++#include <uapi/drm/drm_mode.h>
+
+-#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
++#include <drm/drm_agpsupport.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_global.h>
++#include <drm/drm_hashtab.h>
++#include <drm/drm_mem_util.h>
++#include <drm/drm_mm.h>
++#include <drm/drm_os_linux.h>
++#include <drm/drm_sarea.h>
++#include <drm/drm_vma_manager.h>
+
+ struct module;
+
+ struct drm_file;
+ struct drm_device;
++struct drm_agp_head;
++struct drm_local_map;
++struct drm_device_dma;
++struct drm_dma_handle;
++struct drm_gem_object;
+
+ struct device_node;
+ struct videomode;
++struct reservation_object;
++struct dma_buf_attachment;
+
+-#include <drm/drm_os_linux.h>
+-#include <drm/drm_hashtab.h>
+-#include <drm/drm_mm.h>
+-
+-#define DRM_UT_CORE 0x01
+-#define DRM_UT_DRIVER 0x02
+-#define DRM_UT_KMS 0x04
+-#define DRM_UT_PRIME 0x08
+ /*
+- * Three debug levels are defined.
+- * drm_core, drm_driver, drm_kms
+- * drm_core level can be used in the generic drm code. For example:
+- * drm_ioctl, drm_mm, drm_memory
+- * The macro definition of DRM_DEBUG is used.
+- * DRM_DEBUG(fmt, args...)
+- * The debug info by using the DRM_DEBUG can be obtained by adding
+- * the boot option of "drm.debug=1".
++ * 4 debug categories are defined:
++ *
++ * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
++ * This is the category used by the DRM_DEBUG() macro.
+ *
+- * drm_driver level can be used in the specific drm driver. It is used
+- * to add the debug info related with the drm driver. For example:
+- * i915_drv, i915_dma, i915_gem, radeon_drv,
+- * The macro definition of DRM_DEBUG_DRIVER can be used.
+- * DRM_DEBUG_DRIVER(fmt, args...)
+- * The debug info by using the DRM_DEBUG_DRIVER can be obtained by
+- * adding the boot option of "drm.debug=0x02"
++ * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ...
++ * This is the category used by the DRM_DEBUG_DRIVER() macro.
+ *
+- * drm_kms level can be used in the KMS code related with specific drm driver.
+- * It is used to add the debug info related with KMS mode. For example:
+- * the connector/crtc ,
+- * The macro definition of DRM_DEBUG_KMS can be used.
+- * DRM_DEBUG_KMS(fmt, args...)
+- * The debug info by using the DRM_DEBUG_KMS can be obtained by
+- * adding the boot option of "drm.debug=0x04"
++ * KMS: used in the modesetting code.
++ * This is the category used by the DRM_DEBUG_KMS() macro.
+ *
+- * If we add the boot option of "drm.debug=0x06", we can get the debug info by
+- * using the DRM_DEBUG_KMS and DRM_DEBUG_DRIVER.
+- * If we add the boot option of "drm.debug=0x05", we can get the debug info by
+- * using the DRM_DEBUG_KMS and DRM_DEBUG.
++ * PRIME: used in the prime code.
++ * This is the category used by the DRM_DEBUG_PRIME() macro.
++ *
++ * Enabling verbose debug messages is done through the drm.debug parameter,
++ * each category being enabled by a bit.
++ *
++ * drm.debug=0x1 will enable CORE messages
++ * drm.debug=0x2 will enable DRIVER messages
++ * drm.debug=0x3 will enable CORE and DRIVER messages
++ * ...
++ * drm.debug=0xf will enable all messages
++ *
++ * An interesting feature is that it's possible to enable verbose logging at
++ * run-time by echoing the debug value in its sysfs node:
++ * # echo 0xf > /sys/module/drm/parameters/debug
+ */
++#define DRM_UT_CORE 0x01
++#define DRM_UT_DRIVER 0x02
++#define DRM_UT_KMS 0x04
++#define DRM_UT_PRIME 0x08
+
+-extern __printf(4, 5)
+-void drm_ut_debug_printk(unsigned int request_level,
+- const char *prefix,
+- const char *function_name,
+- const char *format, ...);
+ extern __printf(2, 3)
+-int drm_err(const char *func, const char *format, ...);
++void drm_ut_debug_printk(const char *function_name,
++ const char *format, ...);
++extern __printf(1, 2)
++void drm_err(const char *format, ...);
+
+ /***********************************************************************/
+ /** \name DRM template customization defaults */
+@@ -147,26 +144,6 @@
+ #define DRIVER_PRIME 0x4000
+ #define DRIVER_RENDER 0x8000
+
+-#define DRIVER_BUS_PCI 0x1
+-#define DRIVER_BUS_PLATFORM 0x2
+-#define DRIVER_BUS_USB 0x3
+-#define DRIVER_BUS_HOST1X 0x4
+-
+-/***********************************************************************/
+-/** \name Begin the DRM... */
+-/*@{*/
+-
+-#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
+- also include looping detection. */
+-
+-#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
+-#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
+-#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
+-
+-#define DRM_MAP_HASH_OFFSET 0x10000000
+-
+-/*@}*/
+-
+ /***********************************************************************/
+ /** \name Macros to make printk easier */
+ /*@{*/
+@@ -178,7 +155,7 @@
+ * \param arg arguments
+ */
+ #define DRM_ERROR(fmt, ...) \
+- drm_err(__func__, fmt, ##__VA_ARGS__)
++ drm_err(fmt, ##__VA_ARGS__)
+
+ /**
+ * Rate limited error output. Like DRM_ERROR() but won't flood the log.
+@@ -193,7 +170,7 @@
+ DEFAULT_RATELIMIT_BURST); \
+ \
+ if (__ratelimit(&_rs)) \
+- drm_err(__func__, fmt, ##__VA_ARGS__); \
++ drm_err(fmt, ##__VA_ARGS__); \
+ })
+
+ #define DRM_INFO(fmt, ...) \
+@@ -208,59 +185,27 @@
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+-#if DRM_DEBUG_CODE
+ #define DRM_DEBUG(fmt, args...) \
+ do { \
+- drm_ut_debug_printk(DRM_UT_CORE, DRM_NAME, \
+- __func__, fmt, ##args); \
++ if (unlikely(drm_debug & DRM_UT_CORE)) \
++ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
+
+ #define DRM_DEBUG_DRIVER(fmt, args...) \
+ do { \
+- drm_ut_debug_printk(DRM_UT_DRIVER, DRM_NAME, \
+- __func__, fmt, ##args); \
++ if (unlikely(drm_debug & DRM_UT_DRIVER)) \
++ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
+-#define DRM_DEBUG_KMS(fmt, args...) \
++#define DRM_DEBUG_KMS(fmt, args...) \
+ do { \
+- drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, \
+- __func__, fmt, ##args); \
++ if (unlikely(drm_debug & DRM_UT_KMS)) \
++ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
+ #define DRM_DEBUG_PRIME(fmt, args...) \
+ do { \
+- drm_ut_debug_printk(DRM_UT_PRIME, DRM_NAME, \
+- __func__, fmt, ##args); \
++ if (unlikely(drm_debug & DRM_UT_PRIME)) \
++ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
+-#define DRM_LOG(fmt, args...) \
+- do { \
+- drm_ut_debug_printk(DRM_UT_CORE, NULL, \
+- NULL, fmt, ##args); \
+- } while (0)
+-#define DRM_LOG_KMS(fmt, args...) \
+- do { \
+- drm_ut_debug_printk(DRM_UT_KMS, NULL, \
+- NULL, fmt, ##args); \
+- } while (0)
+-#define DRM_LOG_MODE(fmt, args...) \
+- do { \
+- drm_ut_debug_printk(DRM_UT_MODE, NULL, \
+- NULL, fmt, ##args); \
+- } while (0)
+-#define DRM_LOG_DRIVER(fmt, args...) \
+- do { \
+- drm_ut_debug_printk(DRM_UT_DRIVER, NULL, \
+- NULL, fmt, ##args); \
+- } while (0)
+-#else
+-#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
+-#define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
+-#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
+-#define DRM_DEBUG(fmt, arg...) do { } while (0)
+-#define DRM_LOG(fmt, arg...) do { } while (0)
+-#define DRM_LOG_KMS(fmt, args...) do { } while (0)
+-#define DRM_LOG_MODE(fmt, arg...) do { } while (0)
+-#define DRM_LOG_DRIVER(fmt, arg...) do { } while (0)
+-
+-#endif
+
+ /*@}*/
+
+@@ -268,28 +213,9 @@
+ /** \name Internal types and structures */
+ /*@{*/
+
+-#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
+-
+ #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+
+ /**
+- * Test that the hardware lock is held by the caller, returning otherwise.
+- *
+- * \param dev DRM device.
+- * \param filp file pointer of the caller.
+- */
+-#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
+-do { \
+- if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
+- _file_priv->master->lock.file_priv != _file_priv) { \
+- DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
+- __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
+- _file_priv->master->lock.file_priv, _file_priv); \
+- return -EINVAL; \
+- } \
+-} while (0)
+-
+-/**
+ * Ioctl function type.
+ *
+ * \param inode device inode.
+@@ -329,91 +255,6 @@
+ #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
+
+-struct drm_magic_entry {
+- struct list_head head;
+- struct drm_hash_item hash_item;
+- struct drm_file *priv;
+-};
+-
+-struct drm_vma_entry {
+- struct list_head head;
+- struct vm_area_struct *vma;
+- pid_t pid;
+-};
+-
+-/**
+- * DMA buffer.
+- */
+-struct drm_buf {
+- int idx; /**< Index into master buflist */
+- int total; /**< Buffer size */
+- int order; /**< log-base-2(total) */
+- int used; /**< Amount of buffer in use (for DMA) */
+- unsigned long offset; /**< Byte offset (used internally) */
+- void *address; /**< Address of buffer */
+- unsigned long bus_address; /**< Bus address of buffer */
+- struct drm_buf *next; /**< Kernel-only: used for free list */
+- __volatile__ int waiting; /**< On kernel DMA queue */
+- __volatile__ int pending; /**< On hardware DMA queue */
+- struct drm_file *file_priv; /**< Private of holding file descr */
+- int context; /**< Kernel queue for this buffer */
+- int while_locked; /**< Dispatch this buffer while locked */
+- enum {
+- DRM_LIST_NONE = 0,
+- DRM_LIST_FREE = 1,
+- DRM_LIST_WAIT = 2,
+- DRM_LIST_PEND = 3,
+- DRM_LIST_PRIO = 4,
+- DRM_LIST_RECLAIM = 5
+- } list; /**< Which list we're on */
+-
+- int dev_priv_size; /**< Size of buffer private storage */
+- void *dev_private; /**< Per-buffer private storage */
+-};
+-
+-/** bufs is one longer than it has to be */
+-struct drm_waitlist {
+- int count; /**< Number of possible buffers */
+- struct drm_buf **bufs; /**< List of pointers to buffers */
+- struct drm_buf **rp; /**< Read pointer */
+- struct drm_buf **wp; /**< Write pointer */
+- struct drm_buf **end; /**< End pointer */
+- spinlock_t read_lock;
+- spinlock_t write_lock;
+-};
+-
+-struct drm_freelist {
+- int initialized; /**< Freelist in use */
+- atomic_t count; /**< Number of free buffers */
+- struct drm_buf *next; /**< End pointer */
+-
+- wait_queue_head_t waiting; /**< Processes waiting on free bufs */
+- int low_mark; /**< Low water mark */
+- int high_mark; /**< High water mark */
+- atomic_t wfh; /**< If waiting for high mark */
+- spinlock_t lock;
+-};
+-
+-typedef struct drm_dma_handle {
+- dma_addr_t busaddr;
+- void *vaddr;
+- size_t size;
+-} drm_dma_handle_t;
+-
+-/**
+- * Buffer entry. There is one of this for each buffer size order.
+- */
+-struct drm_buf_entry {
+- int buf_size; /**< size */
+- int buf_count; /**< number of buffers */
+- struct drm_buf *buflist; /**< buffer list */
+- int seg_count;
+- int page_order;
+- struct drm_dma_handle **seglist;
+-
+- struct drm_freelist freelist;
+-};
+-
+ /* Event queued up for userspace to read */
+ struct drm_pending_event {
+ struct drm_event *event;
+@@ -432,11 +273,16 @@
+
+ /** File private data */
+ struct drm_file {
+- unsigned always_authenticated :1;
+ unsigned authenticated :1;
+- unsigned is_master :1; /* this file private is a master for a minor */
++ /* Whether we're master for a minor. Protected by master_mutex */
++ unsigned is_master :1;
+ /* true when the client has asked us to expose stereo 3D mode flags */
+ unsigned stereo_allowed :1;
++ /*
++ * true if client understands CRTC primary planes and cursor planes
++ * in the plane list
++ */
++ unsigned universal_planes:1;
+
+ struct pid *pid;
+ kuid_t uid;
+@@ -471,23 +317,6 @@
+ struct drm_prime_file_private prime;
+ };
+
+-/** Wait queue */
+-struct drm_queue {
+- atomic_t use_count; /**< Outstanding uses (+1) */
+- atomic_t finalization; /**< Finalization in progress */
+- atomic_t block_count; /**< Count of processes waiting */
+- atomic_t block_read; /**< Queue blocked for reads */
+- wait_queue_head_t read_queue; /**< Processes waiting on block_read */
+- atomic_t block_write; /**< Queue blocked for writes */
+- wait_queue_head_t write_queue; /**< Processes waiting on block_write */
+- atomic_t total_queued; /**< Total queued statistic */
+- atomic_t total_flushed; /**< Total flushes statistic */
+- atomic_t total_locks; /**< Total locks statistics */
+- enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
+- struct drm_waitlist waitlist; /**< Pending buffers */
+- wait_queue_head_t flush_queue; /**< Processes waiting until flush */
+-};
+-
+ /**
+ * Lock data.
+ */
+@@ -504,238 +333,26 @@
+ };
+
+ /**
+- * DMA data.
+- */
+-struct drm_device_dma {
+-
+- struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
+- int buf_count; /**< total number of buffers */
+- struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
+- int seg_count;
+- int page_count; /**< number of pages */
+- unsigned long *pagelist; /**< page list */
+- unsigned long byte_count;
+- enum {
+- _DRM_DMA_USE_AGP = 0x01,
+- _DRM_DMA_USE_SG = 0x02,
+- _DRM_DMA_USE_FB = 0x04,
+- _DRM_DMA_USE_PCI_RO = 0x08
+- } flags;
+-
+-};
+-
+-/**
+- * AGP memory entry. Stored as a doubly linked list.
+- */
+-struct drm_agp_mem {
+- unsigned long handle; /**< handle */
+- struct agp_memory *memory;
+- unsigned long bound; /**< address */
+- int pages;
+- struct list_head head;
+-};
+-
+-/**
+- * AGP data.
++ * struct drm_master - drm master structure
+ *
+- * \sa drm_agp_init() and drm_device::agp.
++ * @refcount: Refcount for this master object.
++ * @minor: Link back to minor char device we are master for. Immutable.
++ * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
++ * @unique_len: Length of unique field. Protected by drm_global_mutex.
++ * @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
++ * @magicfree: List of used authentication tokens. Protected by struct_mutex.
++ * @lock: DRI lock information.
++ * @driver_priv: Pointer to driver-private information.
+ */
+-struct drm_agp_head {
+- struct agp_kern_info agp_info; /**< AGP device information */
+- struct list_head memory;
+- unsigned long mode; /**< AGP mode */
+- struct agp_bridge_data *bridge;
+- int enabled; /**< whether the AGP bus as been enabled */
+- int acquired; /**< whether the AGP device has been acquired */
+- unsigned long base;
+- int agp_mtrr;
+- int cant_use_aperture;
+- unsigned long page_mask;
+-};
+-
+-/**
+- * Scatter-gather memory.
+- */
+-struct drm_sg_mem {
+- unsigned long handle;
+- void *virtual;
+- int pages;
+- struct page **pagelist;
+- dma_addr_t *busaddr;
+-};
+-
+-struct drm_sigdata {
+- int context;
+- struct drm_hw_lock *lock;
+-};
+-
+-
+-/**
+- * Kernel side of a mapping
+- */
+-struct drm_local_map {
+- resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
+- unsigned long size; /**< Requested physical size (bytes) */
+- enum drm_map_type type; /**< Type of memory to map */
+- enum drm_map_flags flags; /**< Flags */
+- void *handle; /**< User-space: "Handle" to pass to mmap() */
+- /**< Kernel-space: kernel-virtual address */
+- int mtrr; /**< MTRR slot used */
+-};
+-
+-typedef struct drm_local_map drm_local_map_t;
+-
+-/**
+- * Mappings list
+- */
+-struct drm_map_list {
+- struct list_head head; /**< list head */
+- struct drm_hash_item hash;
+- struct drm_local_map *map; /**< mapping */
+- uint64_t user_token;
+- struct drm_master *master;
+-};
+-
+-/**
+- * Context handle list
+- */
+-struct drm_ctx_list {
+- struct list_head head; /**< list head */
+- drm_context_t handle; /**< context handle */
+- struct drm_file *tag; /**< associated fd private data */
+-};
+-
+-/* location of GART table */
+-#define DRM_ATI_GART_MAIN 1
+-#define DRM_ATI_GART_FB 2
+-
+-#define DRM_ATI_GART_PCI 1
+-#define DRM_ATI_GART_PCIE 2
+-#define DRM_ATI_GART_IGP 3
+-
+-struct drm_ati_pcigart_info {
+- int gart_table_location;
+- int gart_reg_if;
+- void *addr;
+- dma_addr_t bus_addr;
+- dma_addr_t table_mask;
+- struct drm_dma_handle *table_handle;
+- struct drm_local_map mapping;
+- int table_size;
+-};
+-
+-/**
+- * This structure defines the drm_mm memory object, which will be used by the
+- * DRM for its buffer objects.
+- */
+-struct drm_gem_object {
+- /** Reference count of this object */
+- struct kref refcount;
+-
+- /**
+- * handle_count - gem file_priv handle count of this object
+- *
+- * Each handle also holds a reference. Note that when the handle_count
+- * drops to 0 any global names (e.g. the id in the flink namespace) will
+- * be cleared.
+- *
+- * Protected by dev->object_name_lock.
+- * */
+- unsigned handle_count;
+-
+- /** Related drm device */
+- struct drm_device *dev;
+-
+- /** File representing the shmem storage */
+- struct file *filp;
+-
+- /* Mapping info for this object */
+- struct drm_vma_offset_node vma_node;
+-
+- /**
+- * Size of the object, in bytes. Immutable over the object's
+- * lifetime.
+- */
+- size_t size;
+-
+- /**
+- * Global name for this object, starts at 1. 0 means unnamed.
+- * Access is covered by the object_name_lock in the related drm_device
+- */
+- int name;
+-
+- /**
+- * Memory domains. These monitor which caches contain read/write data
+- * related to the object. When transitioning from one set of domains
+- * to another, the driver is called to ensure that caches are suitably
+- * flushed and invalidated
+- */
+- uint32_t read_domains;
+- uint32_t write_domain;
+-
+- /**
+- * While validating an exec operation, the
+- * new read/write domain values are computed here.
+- * They will be transferred to the above values
+- * at the point that any cache flushing occurs
+- */
+- uint32_t pending_read_domains;
+- uint32_t pending_write_domain;
+-
+- /**
+- * dma_buf - dma buf associated with this GEM object
+- *
+- * Pointer to the dma-buf associated with this gem object (either
+- * through importing or exporting). We break the resulting reference
+- * loop when the last gem handle for this object is released.
+- *
+- * Protected by obj->object_name_lock
+- */
+- struct dma_buf *dma_buf;
+-
+- /**
+- * import_attach - dma buf attachment backing this object
+- *
+- * Any foreign dma_buf imported as a gem object has this set to the
+- * attachment point for the device. This is invariant over the lifetime
+- * of a gem object.
+- *
+- * The driver's ->gem_free_object callback is responsible for cleaning
+- * up the dma_buf attachment and references acquired at import time.
+- *
+- * Note that the drm gem/prime core does not depend upon drivers setting
+- * this field any more. So for drivers where this doesn't make sense
+- * (e.g. virtual devices or a displaylink behind an usb bus) they can
+- * simply leave it as NULL.
+- */
+- struct dma_buf_attachment *import_attach;
+-};
+-
+-#include <drm/drm_crtc.h>
+-
+-/* per-master structure */
+ struct drm_master {
+-
+- struct kref refcount; /* refcount for this master */
+-
+- struct list_head head; /**< each minor contains a list of masters */
+- struct drm_minor *minor; /**< link back to minor we are a master for */
+-
+- char *unique; /**< Unique identifier: e.g., busid */
+- int unique_len; /**< Length of unique field */
+- int unique_size; /**< amount allocated */
+-
+- int blocked; /**< Blocked due to VC switch? */
+-
+- /** \name Authentication */
+- /*@{ */
++ struct kref refcount;
++ struct drm_minor *minor;
++ char *unique;
++ int unique_len;
+ struct drm_open_hash magiclist;
+ struct list_head magicfree;
+- /*@} */
+-
+- struct drm_lock_data lock; /**< Information on hardware lock */
+-
+- void *driver_priv; /**< Private structure for driver to use */
++ struct drm_lock_data lock;
++ void *driver_priv;
+ };
+
+ /* Size of ringbuffer for vblank timestamps. Just double-buffer
+@@ -746,23 +363,13 @@
+ /* Flags and return codes for get_vblank_timestamp() driver function. */
+ #define DRM_CALLED_FROM_VBLIRQ 1
+ #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
+-#define DRM_VBLANKTIME_INVBL (1 << 1)
++#define DRM_VBLANKTIME_IN_VBLANK (1 << 1)
+
+ /* get_scanout_position() return flags */
+ #define DRM_SCANOUTPOS_VALID (1 << 0)
+-#define DRM_SCANOUTPOS_INVBL (1 << 1)
++#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
+ #define DRM_SCANOUTPOS_ACCURATE (1 << 2)
+
+-struct drm_bus {
+- int bus_type;
+- int (*get_irq)(struct drm_device *dev);
+- const char *(*get_name)(struct drm_device *dev);
+- int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+- int (*set_unique)(struct drm_device *dev, struct drm_master *master,
+- struct drm_unique *unique);
+- int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
+-};
+-
+ /**
+ * DRM driver structure. This structure represent the common code for
+ * a family of cards. There will one drm_device for each card present
+@@ -781,6 +388,7 @@
+ int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
+ int (*dma_quiescent) (struct drm_device *);
+ int (*context_dtor) (struct drm_device *dev, int context);
++ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+
+ /**
+ * get_vblank_counter - get raw hardware vblank counter
+@@ -959,9 +567,12 @@
+ /* low-level interface used by drm_gem_prime_{import,export} */
+ int (*gem_prime_pin)(struct drm_gem_object *obj);
+ void (*gem_prime_unpin)(struct drm_gem_object *obj);
++ struct reservation_object * (*gem_prime_res_obj)(
++ struct drm_gem_object *obj);
+ struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
+ struct drm_gem_object *(*gem_prime_import_sg_table)(
+- struct drm_device *dev, size_t size,
++ struct drm_device *dev,
++ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+ void *(*gem_prime_vmap)(struct drm_gem_object *obj);
+ void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
+@@ -997,21 +608,17 @@
+ const struct drm_ioctl_desc *ioctls;
+ int num_ioctls;
+ const struct file_operations *fops;
+- union {
+- struct pci_driver *pci;
+- struct platform_device *platform_device;
+- struct usb_driver *usb;
+- } kdriver;
+- struct drm_bus *bus;
+
+ /* List of devices hanging off this driver with stealth attach. */
+ struct list_head legacy_dev_list;
+ };
+
+-#define DRM_MINOR_UNASSIGNED 0
+-#define DRM_MINOR_LEGACY 1
+-#define DRM_MINOR_CONTROL 2
+-#define DRM_MINOR_RENDER 3
++enum drm_minor_type {
++ DRM_MINOR_LEGACY,
++ DRM_MINOR_CONTROL,
++ DRM_MINOR_RENDER,
++ DRM_MINOR_CNT,
++};
+
+ /**
+ * Info file list entry. This structure represents a debugfs or proc file to
+@@ -1040,7 +647,6 @@
+ struct drm_minor {
+ int index; /**< Minor device number */
+ int type; /**< Control or render */
+- dev_t device; /**< Device number for mknod */
+ struct device *kdev; /**< Linux device */
+ struct drm_device *dev;
+
+@@ -1049,26 +655,11 @@
+ struct list_head debugfs_list;
+ struct mutex debugfs_lock; /* Protects debugfs_list. */
+
+- struct drm_master *master; /* currently active master for this node */
+- struct list_head master_list;
++ /* currently active master for this node. Protected by master_mutex */
++ struct drm_master *master;
+ struct drm_mode_group mode_group;
+ };
+
+-/* mode specified on the command line */
+-struct drm_cmdline_mode {
+- bool specified;
+- bool refresh_specified;
+- bool bpp_specified;
+- int xres, yres;
+- int bpp;
+- int refresh;
+- bool rb;
+- bool interlace;
+- bool cvt;
+- bool margins;
+- enum drm_connector_force force;
+-};
+-
+
+ struct drm_pending_vblank_event {
+ struct drm_pending_event base;
+@@ -1077,14 +668,17 @@
+ };
+
+ struct drm_vblank_crtc {
++ struct drm_device *dev; /* pointer to the drm_device */
+ wait_queue_head_t queue; /**< VBLANK wait queue */
+ struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
++ struct timer_list disable_timer; /* delayed disable timer */
+ atomic_t count; /**< number of VBLANK interrupts */
+ atomic_t refcount; /* number of users of vblank interruptsper crtc */
+ u32 last; /* protected by dev->vbl_lock, used */
+ /* for wraparound handling */
+ u32 last_wait; /* Last vblank seqno waited per CRTC */
+ unsigned int inmodeset; /* Display driver is setting mode */
++ int crtc; /* crtc index */
+ bool enabled; /* so we don't call enable more than
+ once per disable */
+ };
+@@ -1095,18 +689,32 @@
+ */
+ struct drm_device {
+ struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
+- char *devname; /**< For /proc/interrupts */
+ int if_version; /**< Highest interface version set */
+
++ /** \name Lifetime Management */
++ /*@{ */
++ struct kref ref; /**< Object ref-count */
++ struct device *dev; /**< Device structure of bus-device */
++ struct drm_driver *driver; /**< DRM driver managing the device */
++ void *dev_private; /**< DRM driver private data */
++ struct drm_minor *control; /**< Control node */
++ struct drm_minor *primary; /**< Primary node */
++ struct drm_minor *render; /**< Render node */
++ atomic_t unplugged; /**< Flag whether dev is dead */
++ struct inode *anon_inode; /**< inode for private address-space */
++ char *unique; /**< unique name of the device */
++ /*@} */
++
+ /** \name Locks */
+ /*@{ */
+- spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
+ struct mutex struct_mutex; /**< For others */
++ struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
+ /*@} */
+
+ /** \name Usage Counters */
+ /*@{ */
+- int open_count; /**< Outstanding files open */
++ int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
++ spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
+ int buf_use; /**< Buffers in use -- cannot alloc */
+ atomic_t buf_alloc; /**< Buffer allocation in progress */
+ /*@} */
+@@ -1137,6 +745,8 @@
+ /** \name Context support */
+ /*@{ */
+ bool irq_enabled; /**< True if irq handler is enabled */
++ int irq;
++
+ __volatile__ long context_flag; /**< Context swapping flag */
+ int last_context; /**< Last current context */
+ /*@} */
+@@ -1152,12 +762,21 @@
+ */
+ bool vblank_disable_allowed;
+
++ /*
++ * If true, vblank interrupt will be disabled immediately when the
++ * refcount drops to zero, as opposed to via the vblank disable
++ * timer.
++ * This can be set to true it the hardware has a working vblank
++ * counter and the driver uses drm_vblank_on() and drm_vblank_off()
++ * appropriately.
++ */
++ bool vblank_disable_immediate;
++
+ /* array of size num_crtcs */
+ struct drm_vblank_crtc *vblank;
+
+ spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
+ spinlock_t vbl_lock;
+- struct timer_list vblank_disable_timer;
+
+ u32 max_vblank_count; /**< size of vblank counter register */
+
+@@ -1171,30 +790,26 @@
+
+ struct drm_agp_head *agp; /**< AGP data */
+
+- struct device *dev; /**< Device structure */
+ struct pci_dev *pdev; /**< PCI device structure */
+ #ifdef __alpha__
+ struct pci_controller *hose;
+ #endif
+
+ struct platform_device *platformdev; /**< Platform device struture */
+- struct usb_device *usbdev;
+
+ struct drm_sg_mem *sg; /**< Scatter gather memory */
+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
+- void *dev_private; /**< device private data */
+- struct address_space *dev_mapping;
+- struct drm_sigdata sigdata; /**< For block_all_signals */
+ sigset_t sigmask;
+
+- struct drm_driver *driver;
++ struct {
++ int context;
++ struct drm_hw_lock *lock;
++ } sigdata;
++
+ struct drm_local_map *agp_buffer_map;
+ unsigned int agp_buffer_token;
+- struct drm_minor *control; /**< Control node for card */
+- struct drm_minor *primary; /**< render type primary screen head */
+- struct drm_minor *render; /**< render node for card */
+
+- struct drm_mode_config mode_config; /**< Current mode config */
++ struct drm_mode_config mode_config; /**< Current mode config */
+
+ /** \name GEM information */
+ /*@{ */
+@@ -1203,8 +818,6 @@
+ struct drm_vma_offset_manager *vma_offset_manager;
+ /*@} */
+ int switch_power_state;
+-
+- atomic_t unplugged; /* device has been unplugged or gone away */
+ };
+
+ #define DRM_SWITCH_POWER_ON 0
+@@ -1218,11 +831,6 @@
+ return ((dev->driver->driver_features & feature) ? 1 : 0);
+ }
+
+-static inline int drm_dev_to_irq(struct drm_device *dev)
+-{
+- return dev->driver->bus->get_irq(dev);
+-}
+-
+ static inline void drm_device_set_unplugged(struct drm_device *dev)
+ {
+ smp_wmb();
+@@ -1236,14 +844,19 @@
+ return ret;
+ }
+
+-static inline bool drm_modeset_is_locked(struct drm_device *dev)
++static inline bool drm_is_render_client(const struct drm_file *file_priv)
+ {
+- return mutex_is_locked(&dev->mode_config.mutex);
++ return file_priv->minor->type == DRM_MINOR_RENDER;
+ }
+
+-static inline bool drm_is_render_client(struct drm_file *file_priv)
++static inline bool drm_is_control_client(const struct drm_file *file_priv)
+ {
+- return file_priv->minor->type == DRM_MINOR_RENDER;
++ return file_priv->minor->type == DRM_MINOR_CONTROL;
++}
++
++static inline bool drm_is_primary_client(const struct drm_file *file_priv)
++{
++ return file_priv->minor->type == DRM_MINOR_LEGACY;
+ }
+
+ /******************************************************************/
+@@ -1255,136 +868,33 @@
+ unsigned int cmd, unsigned long arg);
+ extern long drm_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+-extern int drm_lastclose(struct drm_device *dev);
++extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
+
+ /* Device support (drm_fops.h) */
+-extern struct mutex drm_global_mutex;
+ extern int drm_open(struct inode *inode, struct file *filp);
+-extern int drm_stub_open(struct inode *inode, struct file *filp);
+ extern ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset);
+ extern int drm_release(struct inode *inode, struct file *filp);
+
+ /* Mapping support (drm_vm.h) */
+-extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
+-extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
+-extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
+-extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
+ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+
+- /* Memory management support (drm_memory.h) */
+-#include <drm/drm_memory.h>
+-
+-
+- /* Misc. IOCTL support (drm_ioctl.h) */
+-extern int drm_irq_by_busid(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_getunique(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_setunique(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_getmap(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_getclient(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_getstats(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_getcap(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_setclientcap(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_setversion(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_noop(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-
+- /* Context IOCTL support (drm_context.h) */
+-extern int drm_resctx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_addctx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_getctx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_switchctx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_newctx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_rmctx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-
+-extern int drm_ctxbitmap_init(struct drm_device *dev);
+-extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
+-extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+-
+-extern int drm_setsareactx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_getsareactx(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-
+- /* Authentication IOCTL support (drm_auth.h) */
+-extern int drm_getmagic(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_authmagic(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
++/* Misc. IOCTL support (drm_ioctl.c) */
++int drm_noop(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
+
+ /* Cache management (drm_cache.c) */
+ void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+ void drm_clflush_sg(struct sg_table *st);
+-void drm_clflush_virt_range(char *addr, unsigned long length);
+-
+- /* Locking IOCTL support (drm_lock.h) */
+-extern int drm_lock(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_unlock(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
+-extern void drm_idlelock_take(struct drm_lock_data *lock_data);
+-extern void drm_idlelock_release(struct drm_lock_data *lock_data);
++void drm_clflush_virt_range(void *addr, unsigned long length);
+
+ /*
+ * These are exported to drivers so that they can implement fencing using
+ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
+ */
+
+-extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
+-
+- /* Buffer management support (drm_bufs.h) */
+-extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
+-extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
+-extern int drm_addmap(struct drm_device *dev, resource_size_t offset,
+- unsigned int size, enum drm_map_type type,
+- enum drm_map_flags flags, struct drm_local_map **map_ptr);
+-extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map);
+-extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
+-extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_addbufs(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_infobufs(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_markbufs(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_freebufs(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_mapbufs(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_dma_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-
+- /* DMA support (drm_dma.h) */
+-extern int drm_legacy_dma_setup(struct drm_device *dev);
+-extern void drm_legacy_dma_takedown(struct drm_device *dev);
+-extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
+-extern void drm_core_reclaim_buffers(struct drm_device *dev,
+- struct drm_file *filp);
+-
+ /* IRQ support (drm_irq.h) */
+-extern int drm_control(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_irq_install(struct drm_device *dev);
++extern int drm_irq_install(struct drm_device *dev, int irq);
+ extern int drm_irq_uninstall(struct drm_device *dev);
+
+ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+@@ -1398,10 +908,16 @@
+ extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+ extern int drm_vblank_get(struct drm_device *dev, int crtc);
+ extern void drm_vblank_put(struct drm_device *dev, int crtc);
++extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
++extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
++extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
++extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+ extern void drm_vblank_off(struct drm_device *dev, int crtc);
++extern void drm_vblank_on(struct drm_device *dev, int crtc);
++extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
++extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
+ extern void drm_vblank_cleanup(struct drm_device *dev);
+-extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+- struct timeval *tvblank, unsigned flags);
++
+ extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ int crtc, int *max_error,
+ struct timeval *vblank_time,
+@@ -1411,73 +927,38 @@
+ extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+
+-extern bool
+-drm_mode_parse_command_line_for_connector(const char *mode_option,
+- struct drm_connector *connector,
+- struct drm_cmdline_mode *mode);
+-
+-extern struct drm_display_mode *
+-drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+- struct drm_cmdline_mode *cmd);
+-
+-extern int drm_display_mode_from_videomode(const struct videomode *vm,
+- struct drm_display_mode *dmode);
+-extern int of_get_drm_display_mode(struct device_node *np,
+- struct drm_display_mode *dmode,
+- int index);
++/**
++ * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
++ * @crtc: which CRTC's vblank waitqueue to retrieve
++ *
++ * This function returns a pointer to the vblank waitqueue for the CRTC.
++ * Drivers can use this to implement vblank waits using wait_event() & co.
++ */
++static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
++{
++ return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
++}
+
+ /* Modesetting support */
+ extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
+ extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+-extern int drm_modeset_ctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-
+- /* AGP/GART support (drm_agpsupport.h) */
+-
+-#include <drm/drm_agpsupport.h>
+
+ /* Stub support (drm_stub.h) */
+-extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-struct drm_master *drm_master_create(struct drm_minor *minor);
+ extern struct drm_master *drm_master_get(struct drm_master *master);
+ extern void drm_master_put(struct drm_master **master);
+
+ extern void drm_put_dev(struct drm_device *dev);
+ extern void drm_unplug_dev(struct drm_device *dev);
+ extern unsigned int drm_debug;
+-extern unsigned int drm_rnodes;
+-
+-extern unsigned int drm_vblank_offdelay;
+-extern unsigned int drm_timestamp_precision;
+-extern unsigned int drm_timestamp_monotonic;
+-
+-extern struct class *drm_class;
+-extern struct dentry *drm_debugfs_root;
+-
+-extern struct idr drm_minors_idr;
+-
+-extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
+
+ /* Debugfs support */
+ #if defined(CONFIG_DEBUG_FS)
+-extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+- struct dentry *root);
+ extern int drm_debugfs_create_files(const struct drm_info_list *files,
+ int count, struct dentry *root,
+ struct drm_minor *minor);
+ extern int drm_debugfs_remove_files(const struct drm_info_list *files,
+ int count, struct drm_minor *minor);
+-extern int drm_debugfs_cleanup(struct drm_minor *minor);
+ #else
+-static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+- struct dentry *root)
+-{
+- return 0;
+-}
+-
+ static inline int drm_debugfs_create_files(const struct drm_info_list *files,
+ int count, struct dentry *root,
+ struct drm_minor *minor)
+@@ -1490,22 +971,8 @@
+ {
+ return 0;
+ }
+-
+-static inline int drm_debugfs_cleanup(struct drm_minor *minor)
+-{
+- return 0;
+-}
+ #endif
+
+- /* Info file support */
+-extern int drm_name_info(struct seq_file *m, void *data);
+-extern int drm_vm_info(struct seq_file *m, void *data);
+-extern int drm_bufs_info(struct seq_file *m, void *data);
+-extern int drm_vblank_info(struct seq_file *m, void *data);
+-extern int drm_clients_info(struct seq_file *m, void* data);
+-extern int drm_gem_name_info(struct seq_file *m, void *data);
+-
+-
+ extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+ extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+@@ -1517,153 +984,31 @@
+ struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+ extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
+
+-extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-
+ extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+ dma_addr_t *addrs, int max_pages);
+-extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
++extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
+ extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
+
+-int drm_gem_dumb_destroy(struct drm_file *file,
+- struct drm_device *dev,
+- uint32_t handle);
+-
+-void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
+-void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
+-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
+-
+-#if DRM_DEBUG_CODE
+-extern int drm_vma_info(struct seq_file *m, void *data);
+-#endif
+-
+- /* Scatter Gather Support (drm_scatter.h) */
+-extern void drm_legacy_sg_cleanup(struct drm_device *dev);
+-extern int drm_sg_alloc(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-extern int drm_sg_free(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+
+- /* ATI PCIGART support (ati_pcigart.h) */
+-extern int drm_ati_pcigart_init(struct drm_device *dev,
+- struct drm_ati_pcigart_info * gart_info);
+-extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
+- struct drm_ati_pcigart_info * gart_info);
+-
+-extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
+- size_t align);
+-extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
+-extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
++extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
++ size_t align);
++extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
+
+ /* sysfs support (drm_sysfs.c) */
+-struct drm_sysfs_class;
+-extern struct class *drm_sysfs_create(struct module *owner, char *name);
+-extern void drm_sysfs_destroy(void);
+-extern int drm_sysfs_device_add(struct drm_minor *minor);
+ extern void drm_sysfs_hotplug_event(struct drm_device *dev);
+-extern void drm_sysfs_device_remove(struct drm_minor *minor);
+-extern int drm_sysfs_connector_add(struct drm_connector *connector);
+-extern void drm_sysfs_connector_remove(struct drm_connector *connector);
+-
+-/* Graphics Execution Manager library functions (drm_gem.c) */
+-int drm_gem_init(struct drm_device *dev);
+-void drm_gem_destroy(struct drm_device *dev);
+-void drm_gem_object_release(struct drm_gem_object *obj);
+-void drm_gem_object_free(struct kref *kref);
+-int drm_gem_object_init(struct drm_device *dev,
+- struct drm_gem_object *obj, size_t size);
+-void drm_gem_private_object_init(struct drm_device *dev,
+- struct drm_gem_object *obj, size_t size);
+-void drm_gem_vm_open(struct vm_area_struct *vma);
+-void drm_gem_vm_close(struct vm_area_struct *vma);
+-int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
+- struct vm_area_struct *vma);
+-int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+-#include <drm/drm_global.h>
+-
+-static inline void
+-drm_gem_object_reference(struct drm_gem_object *obj)
+-{
+- kref_get(&obj->refcount);
+-}
+-
+-static inline void
+-drm_gem_object_unreference(struct drm_gem_object *obj)
+-{
+- if (obj != NULL)
+- kref_put(&obj->refcount, drm_gem_object_free);
+-}
+-
+-static inline void
+-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+-{
+- if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
+- struct drm_device *dev = obj->dev;
+-
+- mutex_lock(&dev->struct_mutex);
+- if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
+- drm_gem_object_free(&obj->refcount);
+- mutex_unlock(&dev->struct_mutex);
+- }
+-}
+-
+-int drm_gem_handle_create_tail(struct drm_file *file_priv,
+- struct drm_gem_object *obj,
+- u32 *handlep);
+-int drm_gem_handle_create(struct drm_file *file_priv,
+- struct drm_gem_object *obj,
+- u32 *handlep);
+-int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
+-
+-
+-void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
+-int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
+-int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+-
+-struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+-void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+- bool dirty, bool accessed);
+-
+-struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
+- struct drm_file *filp,
+- u32 handle);
+-int drm_gem_close_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-int drm_gem_open_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
+-void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+-
+-extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev);
+-extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
+-extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
+-
+-static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
+- unsigned int token)
+-{
+- struct drm_map_list *_entry;
+- list_for_each_entry(_entry, &dev->maplist, head)
+- if (_entry->user_token == token)
+- return _entry->map;
+- return NULL;
+-}
+-
+-static __inline__ void drm_core_dropmap(struct drm_local_map *map)
+-{
+-}
+-
+-#include <drm/drm_mem_util.h>
+
+ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent);
+-void drm_dev_free(struct drm_device *dev);
++void drm_dev_ref(struct drm_device *dev);
++void drm_dev_unref(struct drm_device *dev);
+ int drm_dev_register(struct drm_device *dev, unsigned long flags);
+ void drm_dev_unregister(struct drm_device *dev);
++int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...);
++
++struct drm_minor *drm_minor_acquire(unsigned int minor_id);
++void drm_minor_release(struct drm_minor *minor);
++
+ /*@}*/
+
+ /* PCI section */
+@@ -1683,9 +1028,25 @@
+
+ extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
+ extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
++#ifdef CONFIG_PCI
+ extern int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver);
++extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
++#else
++static inline int drm_get_pci_dev(struct pci_dev *pdev,
++ const struct pci_device_id *ent,
++ struct drm_driver *driver)
++{
++ return -ENOSYS;
++}
++
++static inline int drm_pci_set_busid(struct drm_device *dev,
++ struct drm_master *master)
++{
++ return -ENOSYS;
++}
++#endif
+
+ #define DRM_PCIE_SPEED_25 1
+ #define DRM_PCIE_SPEED_50 2
+@@ -1695,6 +1056,7 @@
+
+ /* platform section */
+ extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
++extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m);
+
+ /* returns true if currently okay to sleep */
+ static __inline__ bool drm_can_sleep(void)
+@@ -1704,5 +1066,4 @@
+ return true;
+ }
+
+-#endif /* __KERNEL__ */
+ #endif
+diff -Naur a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
+--- a/include/drm/drm_plane_helper.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/drm/drm_plane_helper.h 2015-03-26 14:42:38.758435422 +0530
+@@ -0,0 +1,115 @@
++/*
++ * Copyright (C) 2011-2013 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++#ifndef DRM_PLANE_HELPER_H
++#define DRM_PLANE_HELPER_H
++
++#include <drm/drm_rect.h>
++#include <drm/drm_crtc.h>
++
++/*
++ * Drivers that don't allow primary plane scaling may pass this macro in place
++ * of the min/max scale parameters of the update checker function.
++ *
++ * Due to src being in 16.16 fixed point and dest being in integer pixels,
++ * 1<<16 represents no scaling.
++ */
++#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
++
++/**
++ * DOC: plane helpers
++ *
++ * Helper functions to assist with creation and handling of CRTC primary
++ * planes.
++ */
++
++extern int drm_crtc_init(struct drm_device *dev,
++ struct drm_crtc *crtc,
++ const struct drm_crtc_funcs *funcs);
++
++/**
++ * drm_plane_helper_funcs - helper operations for CRTCs
++ * @prepare_fb: prepare a framebuffer for use by the plane
++ * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
++ * @atomic_check: check that a given atomic state is valid and can be applied
++ * @atomic_update: apply an atomic state to the plane
++ *
++ * The helper operations are called by the mid-layer CRTC helper.
++ */
++struct drm_plane_helper_funcs {
++ int (*prepare_fb)(struct drm_plane *plane,
++ struct drm_framebuffer *fb);
++ void (*cleanup_fb)(struct drm_plane *plane,
++ struct drm_framebuffer *fb);
++
++ int (*atomic_check)(struct drm_plane *plane,
++ struct drm_plane_state *state);
++ void (*atomic_update)(struct drm_plane *plane,
++ struct drm_plane_state *old_state);
++};
++
++static inline void drm_plane_helper_add(struct drm_plane *plane,
++ const struct drm_plane_helper_funcs *funcs)
++{
++ plane->helper_private = (void *)funcs;
++}
++
++extern int drm_plane_helper_check_update(struct drm_plane *plane,
++ struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ struct drm_rect *src,
++ struct drm_rect *dest,
++ const struct drm_rect *clip,
++ int min_scale,
++ int max_scale,
++ bool can_position,
++ bool can_update_disabled,
++ bool *visible);
++extern int drm_primary_helper_update(struct drm_plane *plane,
++ struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ int crtc_x, int crtc_y,
++ unsigned int crtc_w, unsigned int crtc_h,
++ uint32_t src_x, uint32_t src_y,
++ uint32_t src_w, uint32_t src_h);
++extern int drm_primary_helper_disable(struct drm_plane *plane);
++extern void drm_primary_helper_destroy(struct drm_plane *plane);
++extern const struct drm_plane_funcs drm_primary_helper_funcs;
++extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
++ const uint32_t *formats,
++ int num_formats);
++
++
++int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ int crtc_x, int crtc_y,
++ unsigned int crtc_w, unsigned int crtc_h,
++ uint32_t src_x, uint32_t src_y,
++ uint32_t src_w, uint32_t src_h);
++int drm_plane_helper_disable(struct drm_plane *plane);
++
++/* For use by drm_crtc_helper.c */
++int drm_plane_helper_commit(struct drm_plane *plane,
++ struct drm_plane_state *plane_state,
++ struct drm_framebuffer *old_fb);
++#endif
+diff -Naur a/include/drm/drm_rect.h b/include/drm/drm_rect.h
+--- a/include/drm/drm_rect.h 2015-03-26 14:43:27.890436386 +0530
++++ b/include/drm/drm_rect.h 2015-03-26 14:42:38.758435422 +0530
+@@ -163,5 +163,11 @@
+ struct drm_rect *dst,
+ int min_vscale, int max_vscale);
+ void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
++void drm_rect_rotate(struct drm_rect *r,
++ int width, int height,
++ unsigned int rotation);
++void drm_rect_rotate_inv(struct drm_rect *r,
++ int width, int height,
++ unsigned int rotation);
+
+ #endif
+diff -Naur a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
+--- a/include/drm/i915_pciids.h 2015-03-26 14:43:27.890436386 +0530
++++ b/include/drm/i915_pciids.h 2015-03-26 14:42:38.758435422 +0530
+@@ -191,8 +191,8 @@
+ INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
+- INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \
+- INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \
++ INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
++ INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
+@@ -223,14 +223,57 @@
+ _INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
+ _INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
+
+-#define INTEL_BDW_M_IDS(info) \
++#define INTEL_BDW_GT12M_IDS(info) \
+ _INTEL_BDW_M_IDS(1, info), \
+- _INTEL_BDW_M_IDS(2, info), \
+- _INTEL_BDW_M_IDS(3, info)
++ _INTEL_BDW_M_IDS(2, info)
+
+-#define INTEL_BDW_D_IDS(info) \
++#define INTEL_BDW_GT12D_IDS(info) \
+ _INTEL_BDW_D_IDS(1, info), \
+- _INTEL_BDW_D_IDS(2, info), \
++ _INTEL_BDW_D_IDS(2, info)
++
++#define INTEL_BDW_GT3M_IDS(info) \
++ _INTEL_BDW_M_IDS(3, info)
++
++#define INTEL_BDW_GT3D_IDS(info) \
+ _INTEL_BDW_D_IDS(3, info)
+
++#define INTEL_BDW_RSVDM_IDS(info) \
++ _INTEL_BDW_M_IDS(4, info)
++
++#define INTEL_BDW_RSVDD_IDS(info) \
++ _INTEL_BDW_D_IDS(4, info)
++
++#define INTEL_BDW_M_IDS(info) \
++ INTEL_BDW_GT12M_IDS(info), \
++ INTEL_BDW_GT3M_IDS(info), \
++ INTEL_BDW_RSVDM_IDS(info)
++
++#define INTEL_BDW_D_IDS(info) \
++ INTEL_BDW_GT12D_IDS(info), \
++ INTEL_BDW_GT3D_IDS(info), \
++ INTEL_BDW_RSVDD_IDS(info)
++
++#define INTEL_CHV_IDS(info) \
++ INTEL_VGA_DEVICE(0x22b0, info), \
++ INTEL_VGA_DEVICE(0x22b1, info), \
++ INTEL_VGA_DEVICE(0x22b2, info), \
++ INTEL_VGA_DEVICE(0x22b3, info)
++
++#define INTEL_SKL_IDS(info) \
++ INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
++ INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
++ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
++ INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
++ INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
++ INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
++ INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
++ INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
++ INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
++ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
++ INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
++ INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
++ INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
++ INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
++ INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
++
+ #endif /* _I915_PCIIDS_H */
+diff -Naur a/include/drm/i915_powerwell.h b/include/drm/i915_powerwell.h
+--- a/include/drm/i915_powerwell.h 2015-03-26 14:43:27.890436386 +0530
++++ b/include/drm/i915_powerwell.h 2015-03-26 14:42:38.758435422 +0530
+@@ -30,7 +30,8 @@
+ #define _I915_POWERWELL_H_
+
+ /* For use by hda_i915 driver */
+-extern void i915_request_power_well(void);
+-extern void i915_release_power_well(void);
++extern int i915_request_power_well(void);
++extern int i915_release_power_well(void);
++extern int i915_get_cdclk_freq(void);
+
+ #endif /* _I915_POWERWELL_H_ */
+diff -Naur a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+--- a/include/drm/ttm/ttm_bo_api.h 2015-03-26 14:43:27.890436386 +0530
++++ b/include/drm/ttm/ttm_bo_api.h 2015-03-26 14:42:38.758435422 +0530
+@@ -45,12 +45,24 @@
+
+ struct drm_mm_node;
+
++/**
++ * struct ttm_place
++ *
++ * @fpfn: first valid page frame number to put the object
++ * @lpfn: last valid page frame number to put the object
++ * @flags: memory domain and caching flags for the object
++ *
++ * Structure indicating a possible place to put an object.
++ */
++struct ttm_place {
++ unsigned fpfn;
++ unsigned lpfn;
++ uint32_t flags;
++};
+
+ /**
+ * struct ttm_placement
+ *
+- * @fpfn: first valid page frame number to put the object
+- * @lpfn: last valid page frame number to put the object
+ * @num_placement: number of preferred placements
+ * @placement: preferred placements
+ * @num_busy_placement: number of preferred placements when need to evict buffer
+@@ -59,12 +71,10 @@
+ * Structure indicating the placement you request for an object.
+ */
+ struct ttm_placement {
+- unsigned fpfn;
+- unsigned lpfn;
+- unsigned num_placement;
+- const uint32_t *placement;
+- unsigned num_busy_placement;
+- const uint32_t *busy_placement;
++ unsigned num_placement;
++ const struct ttm_place *placement;
++ unsigned num_busy_placement;
++ const struct ttm_place *busy_placement;
+ };
+
+ /**
+@@ -163,7 +173,6 @@
+ * @lru: List head for the lru list.
+ * @ddestroy: List head for the delayed destroy list.
+ * @swap: List head for swap LRU list.
+- * @sync_obj: Pointer to a synchronization object.
+ * @priv_flags: Flags describing buffer object internal state.
+ * @vma_node: Address space manager node.
+ * @offset: The current GPU offset, which can have different meanings
+@@ -227,13 +236,9 @@
+ struct list_head io_reserve_lru;
+
+ /**
+- * Members protected by struct buffer_object_device::fence_lock
+- * In addition, setting sync_obj to anything else
+- * than NULL requires bo::reserved to be held. This allows for
+- * checking NULL while reserved but not holding the mentioned lock.
++ * Members protected by a bo reservation.
+ */
+
+- void *sync_obj;
+ unsigned long priv_flags;
+
+ struct drm_vma_offset_node vma_node;
+@@ -455,6 +460,7 @@
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @acc_size: Accounted size for this object.
++ * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+@@ -482,16 +488,16 @@
+ struct file *persistent_swap_storage,
+ size_t acc_size,
+ struct sg_table *sg,
++ struct reservation_object *resv,
+ void (*destroy) (struct ttm_buffer_object *));
+
+ /**
+- * ttm_bo_synccpu_object_init
++ * ttm_bo_create
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+- * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+- * @flags: Initial placement flags.
++ * @placement: Initial placement.
+ * @page_alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep while waiting for GPU resources,
+ * sleep interruptible.
+@@ -520,20 +526,6 @@
+ struct ttm_buffer_object **p_bo);
+
+ /**
+- * ttm_bo_check_placement
+- *
+- * @bo: the buffer object.
+- * @placement: placements
+- *
+- * Performs minimal validity checking on an intended change of
+- * placement flags.
+- * Returns
+- * -EINVAL: Intended change is invalid or not allowed.
+- */
+-extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+- struct ttm_placement *placement);
+-
+-/**
+ * ttm_bo_init_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+diff -Naur a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
+--- a/include/drm/ttm/ttm_bo_driver.h 2015-03-26 14:43:27.890436386 +0530
++++ b/include/drm/ttm/ttm_bo_driver.h 2015-03-26 14:42:38.758435422 +0530
+@@ -133,6 +133,7 @@
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
++ * @cpu_address: The CPU address of the pages
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+@@ -142,6 +143,7 @@
+ */
+ struct ttm_dma_tt {
+ struct ttm_tt ttm;
++ void **cpu_address;
+ dma_addr_t *dma_address;
+ struct list_head pages_list;
+ };
+@@ -182,6 +184,7 @@
+ * @man: Pointer to a memory type manager.
+ * @bo: Pointer to the buffer object we're allocating space for.
+ * @placement: Placement details.
++ * @flags: Additional placement flags.
+ * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+ *
+ * This function should allocate space in the memory type managed
+@@ -205,7 +208,7 @@
+ */
+ int (*get_node)(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+- struct ttm_placement *placement,
++ const struct ttm_place *place,
+ struct ttm_mem_reg *mem);
+
+ /**
+@@ -309,11 +312,6 @@
+ * @move: Callback for a driver to hook in accelerated functions to
+ * move a buffer.
+ * If set to NULL, a potentially slow memcpy() move is used.
+- * @sync_obj_signaled: See ttm_fence_api.h
+- * @sync_obj_wait: See ttm_fence_api.h
+- * @sync_obj_flush: See ttm_fence_api.h
+- * @sync_obj_unref: See ttm_fence_api.h
+- * @sync_obj_ref: See ttm_fence_api.h
+ */
+
+ struct ttm_bo_driver {
+@@ -415,23 +413,6 @@
+ int (*verify_access) (struct ttm_buffer_object *bo,
+ struct file *filp);
+
+- /**
+- * In case a driver writer dislikes the TTM fence objects,
+- * the driver writer can replace those with sync objects of
+- * his / her own. If it turns out that no driver writer is
+- * using these. I suggest we remove these hooks and plug in
+- * fences directly. The bo driver needs the following functionality:
+- * See the corresponding functions in the fence object API
+- * documentation.
+- */
+-
+- bool (*sync_obj_signaled) (void *sync_obj);
+- int (*sync_obj_wait) (void *sync_obj,
+- bool lazy, bool interruptible);
+- int (*sync_obj_flush) (void *sync_obj);
+- void (*sync_obj_unref) (void **sync_obj);
+- void *(*sync_obj_ref) (void *sync_obj);
+-
+ /* hook to notify driver about a driver move so it
+ * can do tiling things */
+ void (*move_notify)(struct ttm_buffer_object *bo,
+@@ -518,8 +499,6 @@
+ *
+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
+ * @man: An array of mem_type_managers.
+- * @fence_lock: Protects the synchronizing members on *all* bos belonging
+- * to this device.
+ * @vma_manager: Address space manager
+ * lru_lock: Spinlock that protects the buffer+device lru lists and
+ * ddestroy lists.
+@@ -539,7 +518,6 @@
+ struct ttm_bo_global *glob;
+ struct ttm_bo_driver *driver;
+ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
+- spinlock_t fence_lock;
+
+ /*
+ * Protected by internal locks.
+@@ -653,18 +631,6 @@
+ extern int ttm_tt_swapin(struct ttm_tt *ttm);
+
+ /**
+- * ttm_tt_cache_flush:
+- *
+- * @pages: An array of pointers to struct page:s to flush.
+- * @num_pages: Number of pages to flush.
+- *
+- * Flush the data of the indicated pages from the cpu caches.
+- * This is used when changing caching attributes of the pages from
+- * cache-coherent.
+- */
+-extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
+-
+-/**
+ * ttm_tt_set_placement_caching:
+ *
+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
+@@ -747,6 +713,7 @@
+ * @bdev: A pointer to a struct ttm_bo_device to initialize.
+ * @glob: A pointer to an initialized struct ttm_bo_global.
+ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
++ * @mapping: The address space to use for this bo.
+ * @file_page_offset: Offset into the device address space that is available
+ * for buffer data. This ensures compatibility with other users of the
+ * address space.
+@@ -758,6 +725,7 @@
+ extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
+ struct ttm_bo_global *glob,
+ struct ttm_bo_driver *driver,
++ struct address_space *mapping,
+ uint64_t file_page_offset, bool need_dma32);
+
+ /**
+@@ -786,7 +754,7 @@
+ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+ /**
+- * ttm_bo_reserve_nolru:
++ * __ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+@@ -807,10 +775,10 @@
+ * -EALREADY: Bo already reserved using @ticket. This error code will only
+ * be returned if @use_ticket is set to true.
+ */
+-static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
+- bool interruptible,
+- bool no_wait, bool use_ticket,
+- struct ww_acquire_ctx *ticket)
++static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_ticket,
++ struct ww_acquire_ctx *ticket)
+ {
+ int ret = 0;
+
+@@ -886,8 +854,7 @@
+
+ WARN_ON(!atomic_read(&bo->kref.refcount));
+
+- ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
+- ticket);
++ ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
+ if (likely(ret == 0))
+ ttm_bo_del_sub_from_lru(bo);
+
+@@ -927,20 +894,14 @@
+ }
+
+ /**
+- * ttm_bo_unreserve_ticket
++ * __ttm_bo_unreserve
+ * @bo: A pointer to a struct ttm_buffer_object.
+- * @ticket: ww_acquire_ctx used for reserving
+ *
+- * Unreserve a previous reservation of @bo made with @ticket.
++ * Unreserve a previous reservation of @bo where the buffer object is
++ * already on lru lists.
+ */
+-static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
+- struct ww_acquire_ctx *t)
++static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
+ {
+- if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+- spin_lock(&bo->glob->lru_lock);
+- ttm_bo_add_to_lru(bo);
+- spin_unlock(&bo->glob->lru_lock);
+- }
+ ww_mutex_unlock(&bo->resv->lock);
+ }
+
+@@ -953,7 +914,25 @@
+ */
+ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+ {
+- ttm_bo_unreserve_ticket(bo, NULL);
++ if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
++ spin_lock(&bo->glob->lru_lock);
++ ttm_bo_add_to_lru(bo);
++ spin_unlock(&bo->glob->lru_lock);
++ }
++ __ttm_bo_unreserve(bo);
++}
++
++/**
++ * ttm_bo_unreserve_ticket
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @ticket: ww_acquire_ctx used for reserving
++ *
++ * Unreserve a previous reservation of @bo made with @ticket.
++ */
++static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
++ struct ww_acquire_ctx *t)
++{
++ ttm_bo_unreserve(bo);
+ }
+
+ /*
+@@ -1021,7 +1000,7 @@
+ * ttm_bo_move_accel_cleanup.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+- * @sync_obj: A sync object that signals when moving is complete.
++ * @fence: A fence object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+@@ -1035,7 +1014,7 @@
+ */
+
+ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+- void *sync_obj,
++ struct fence *fence,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+ /**
+diff -Naur a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
+--- a/include/drm/ttm/ttm_execbuf_util.h 2015-03-26 14:43:27.890436386 +0530
++++ b/include/drm/ttm/ttm_execbuf_util.h 2015-03-26 14:42:38.758435422 +0530
+@@ -39,19 +39,13 @@
+ *
+ * @head: list head for thread-private list.
+ * @bo: refcounted buffer object pointer.
+- * @reserved: Indicates whether @bo has been reserved for validation.
+- * @removed: Indicates whether @bo has been removed from lru lists.
+- * @put_count: Number of outstanding references on bo::list_kref.
+- * @old_sync_obj: Pointer to a sync object about to be unreferenced
++ * @shared: should the fence be added shared?
+ */
+
+ struct ttm_validate_buffer {
+ struct list_head head;
+ struct ttm_buffer_object *bo;
+- bool reserved;
+- bool removed;
+- int put_count;
+- void *old_sync_obj;
++ bool shared;
+ };
+
+ /**
+@@ -73,6 +67,8 @@
+ * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
+ * non-blocking reserves should be tried.
+ * @list: thread private list of ttm_validate_buffer structs.
++ * @intr: should the wait be interruptible
++ * @dups: [out] optional list of duplicates.
+ *
+ * Tries to reserve bos pointed to by the list entries for validation.
+ * If the function returns 0, all buffers are marked as "unfenced",
+@@ -84,9 +80,14 @@
+ * CPU write reservations to be cleared, and for other threads to
+ * unreserve their buffers.
+ *
+- * This function may return -ERESTART or -EAGAIN if the calling process
+- * receives a signal while waiting. In that case, no buffers on the list
+- * will be reserved upon return.
++ * If intr is set to true, this function may return -ERESTARTSYS if the
++ * calling process receives a signal while waiting. In that case, no
++ * buffers on the list will be reserved upon return.
++ *
++ * If dups is non NULL all buffers already reserved by the current thread
++ * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
++ * on the first already reserved buffer and all buffers from the list are
++ * unreserved again.
+ *
+ * Buffers reserved by this function should be unreserved by
+ * a call to either ttm_eu_backoff_reservation() or
+@@ -95,14 +96,15 @@
+ */
+
+ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
+- struct list_head *list);
++ struct list_head *list, bool intr,
++ struct list_head *dups);
+
+ /**
+ * function ttm_eu_fence_buffer_objects.
+ *
+ * @ticket: ww_acquire_ctx from reserve call
+ * @list: thread private list of ttm_validate_buffer structs.
+- * @sync_obj: The new sync object for the buffers.
++ * @fence: The new exclusive fence for the buffers.
+ *
+ * This function should be called when command submission is complete, and
+ * it will add a new sync object to bos pointed to by entries on @list.
+@@ -111,6 +113,7 @@
+ */
+
+ extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
+- struct list_head *list, void *sync_obj);
++ struct list_head *list,
++ struct fence *fence);
+
+ #endif
+diff -Naur a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
+--- a/include/drm/ttm/ttm_placement.h 2015-03-26 14:43:27.890436386 +0530
++++ b/include/drm/ttm/ttm_placement.h 2015-03-26 14:42:38.758435422 +0530
+@@ -65,6 +65,8 @@
+ * reference the buffer.
+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
+ * be evicted to make room for other buffers.
++ * TTM_PL_FLAG_TOPDOWN requests to be placed from the
++ * top of the memory area, instead of the bottom.
+ */
+
+ #define TTM_PL_FLAG_CACHED (1 << 16)
+@@ -72,6 +74,7 @@
+ #define TTM_PL_FLAG_WC (1 << 18)
+ #define TTM_PL_FLAG_SHARED (1 << 20)
+ #define TTM_PL_FLAG_NO_EVICT (1 << 21)
++#define TTM_PL_FLAG_TOPDOWN (1 << 22)
+
+ #define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
+ TTM_PL_FLAG_UNCACHED | \
+diff -Naur a/include/linux/fence.h b/include/linux/fence.h
+--- a/include/linux/fence.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/linux/fence.h 2015-03-26 14:42:38.762435422 +0530
+@@ -0,0 +1,360 @@
++/*
++ * Fence mechanism for dma-buf to allow for asynchronous dma access
++ *
++ * Copyright (C) 2012 Canonical Ltd
++ * Copyright (C) 2012 Texas Instruments
++ *
++ * Authors:
++ * Rob Clark <robdclark@gmail.com>
++ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ */
++
++#ifndef __LINUX_FENCE_H
++#define __LINUX_FENCE_H
++
++#include <linux/err.h>
++#include <linux/wait.h>
++#include <linux/list.h>
++#include <linux/bitops.h>
++#include <linux/kref.h>
++#include <linux/sched.h>
++#include <linux/printk.h>
++#include <linux/rcupdate.h>
++
++struct fence;
++struct fence_ops;
++struct fence_cb;
++
++/**
++ * struct fence - software synchronization primitive
++ * @refcount: refcount for this fence
++ * @ops: fence_ops associated with this fence
++ * @rcu: used for releasing fence with kfree_rcu
++ * @cb_list: list of all callbacks to call
++ * @lock: spin_lock_irqsave used for locking
++ * @context: execution context this fence belongs to, returned by
++ * fence_context_alloc()
++ * @seqno: the sequence number of this fence inside the execution context,
++ * can be compared to decide which fence would be signaled later.
++ * @flags: A mask of FENCE_FLAG_* defined below
++ * @timestamp: Timestamp when the fence was signaled.
++ * @status: Optional, only valid if < 0, must be set before calling
++ * fence_signal, indicates that the fence has completed with an error.
++ *
++ * the flags member must be manipulated and read using the appropriate
++ * atomic ops (bit_*), so taking the spinlock will not be needed most
++ * of the time.
++ *
++ * FENCE_FLAG_SIGNALED_BIT - fence is already signaled
++ * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
++ * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
++ * implementer of the fence for its own purposes. Can be used in different
++ * ways by different fence implementers, so do not rely on this.
++ *
++ * *) Since atomic bitops are used, this is not guaranteed to be the case.
++ * Particularly, if the bit was set, but fence_signal was called right
++ * before this bit was set, it would have been able to set the
++ * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
++ * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
++ * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
++ * after fence_signal was called, any enable_signaling call will have either
++ * been completed, or never called at all.
++ */
++struct fence {
++ struct kref refcount;
++ const struct fence_ops *ops;
++ struct rcu_head rcu;
++ struct list_head cb_list;
++ spinlock_t *lock;
++ unsigned context, seqno;
++ unsigned long flags;
++ ktime_t timestamp;
++ int status;
++};
++
++enum fence_flag_bits {
++ FENCE_FLAG_SIGNALED_BIT,
++ FENCE_FLAG_ENABLE_SIGNAL_BIT,
++ FENCE_FLAG_USER_BITS, /* must always be last member */
++};
++
++typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
++
++/**
++ * struct fence_cb - callback for fence_add_callback
++ * @node: used by fence_add_callback to append this struct to fence::cb_list
++ * @func: fence_func_t to call
++ *
++ * This struct will be initialized by fence_add_callback, additional
++ * data can be passed along by embedding fence_cb in another struct.
++ */
++struct fence_cb {
++ struct list_head node;
++ fence_func_t func;
++};
++
++/**
++ * struct fence_ops - operations implemented for fence
++ * @get_driver_name: returns the driver name.
++ * @get_timeline_name: return the name of the context this fence belongs to.
++ * @enable_signaling: enable software signaling of fence.
++ * @signaled: [optional] peek whether the fence is signaled, can be null.
++ * @wait: custom wait implementation, or fence_default_wait.
++ * @release: [optional] called on destruction of fence, can be null
++ * @fill_driver_data: [optional] callback to fill in free-form debug info
++ * Returns amount of bytes filled, or -errno.
++ * @fence_value_str: [optional] fills in the value of the fence as a string
++ * @timeline_value_str: [optional] fills in the current value of the timeline
++ * as a string
++ *
++ * Notes on enable_signaling:
++ * For fence implementations that have the capability for hw->hw
++ * signaling, they can implement this op to enable the necessary
++ * irqs, or insert commands into cmdstream, etc. This is called
++ * in the first wait() or add_callback() path to let the fence
++ * implementation know that there is another driver waiting on
++ * the signal (ie. hw->sw case).
++ *
++ * This function can be called called from atomic context, but not
++ * from irq context, so normal spinlocks can be used.
++ *
++ * A return value of false indicates the fence already passed,
++ * or some failure occurred that made it impossible to enable
++ * signaling. True indicates successful enabling.
++ *
++ * fence->status may be set in enable_signaling, but only when false is
++ * returned.
++ *
++ * Calling fence_signal before enable_signaling is called allows
++ * for a tiny race window in which enable_signaling is called during,
++ * before, or after fence_signal. To fight this, it is recommended
++ * that before enable_signaling returns true an extra reference is
++ * taken on the fence, to be released when the fence is signaled.
++ * This will mean fence_signal will still be called twice, but
++ * the second time will be a noop since it was already signaled.
++ *
++ * Notes on signaled:
++ * May set fence->status if returning true.
++ *
++ * Notes on wait:
++ * Must not be NULL, set to fence_default_wait for default implementation.
++ * the fence_default_wait implementation should work for any fence, as long
++ * as enable_signaling works correctly.
++ *
++ * Must return -ERESTARTSYS if the wait is intr = true and the wait was
++ * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
++ * timed out. Can also return other error values on custom implementations,
++ * which should be treated as if the fence is signaled. For example a hardware
++ * lockup could be reported like that.
++ *
++ * Notes on release:
++ * Can be NULL, this function allows additional commands to run on
++ * destruction of the fence. Can be called from irq context.
++ * If pointer is set to NULL, kfree will get called instead.
++ */
++
++struct fence_ops {
++ const char * (*get_driver_name)(struct fence *fence);
++ const char * (*get_timeline_name)(struct fence *fence);
++ bool (*enable_signaling)(struct fence *fence);
++ bool (*signaled)(struct fence *fence);
++ signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
++ void (*release)(struct fence *fence);
++
++ int (*fill_driver_data)(struct fence *fence, void *data, int size);
++ void (*fence_value_str)(struct fence *fence, char *str, int size);
++ void (*timeline_value_str)(struct fence *fence, char *str, int size);
++};
++
++void fence_init(struct fence *fence, const struct fence_ops *ops,
++ spinlock_t *lock, unsigned context, unsigned seqno);
++
++void fence_release(struct kref *kref);
++void fence_free(struct fence *fence);
++
++/**
++ * fence_get - increases refcount of the fence
++ * @fence: [in] fence to increase refcount of
++ *
++ * Returns the same fence, with refcount increased by 1.
++ */
++static inline struct fence *fence_get(struct fence *fence)
++{
++ if (fence)
++ kref_get(&fence->refcount);
++ return fence;
++}
++
++/**
++ * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
++ * @fence: [in] fence to increase refcount of
++ *
++ * Function returns NULL if no refcount could be obtained, or the fence.
++ */
++static inline struct fence *fence_get_rcu(struct fence *fence)
++{
++ if (kref_get_unless_zero(&fence->refcount))
++ return fence;
++ else
++ return NULL;
++}
++
++/**
++ * fence_put - decreases refcount of the fence
++ * @fence: [in] fence to reduce refcount of
++ */
++static inline void fence_put(struct fence *fence)
++{
++ if (fence)
++ kref_put(&fence->refcount, fence_release);
++}
++
++int fence_signal(struct fence *fence);
++int fence_signal_locked(struct fence *fence);
++signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
++int fence_add_callback(struct fence *fence, struct fence_cb *cb,
++ fence_func_t func);
++bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
++void fence_enable_sw_signaling(struct fence *fence);
++
++/**
++ * fence_is_signaled_locked - Return an indication if the fence is signaled yet.
++ * @fence: [in] the fence to check
++ *
++ * Returns true if the fence was already signaled, false if not. Since this
++ * function doesn't enable signaling, it is not guaranteed to ever return
++ * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
++ * haven't been called before.
++ *
++ * This function requires fence->lock to be held.
++ */
++static inline bool
++fence_is_signaled_locked(struct fence *fence)
++{
++ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++ return true;
++
++ if (fence->ops->signaled && fence->ops->signaled(fence)) {
++ fence_signal_locked(fence);
++ return true;
++ }
++
++ return false;
++}
++
++/**
++ * fence_is_signaled - Return an indication if the fence is signaled yet.
++ * @fence: [in] the fence to check
++ *
++ * Returns true if the fence was already signaled, false if not. Since this
++ * function doesn't enable signaling, it is not guaranteed to ever return
++ * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
++ * haven't been called before.
++ *
++ * It's recommended for seqno fences to call fence_signal when the
++ * operation is complete, it makes it possible to prevent issues from
++ * wraparound between time of issue and time of use by checking the return
++ * value of this function before calling hardware-specific wait instructions.
++ */
++static inline bool
++fence_is_signaled(struct fence *fence)
++{
++ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++ return true;
++
++ if (fence->ops->signaled && fence->ops->signaled(fence)) {
++ fence_signal(fence);
++ return true;
++ }
++
++ return false;
++}
++
++/**
++ * fence_later - return the chronologically later fence
++ * @f1: [in] the first fence from the same context
++ * @f2: [in] the second fence from the same context
++ *
++ * Returns NULL if both fences are signaled, otherwise the fence that would be
++ * signaled last. Both fences must be from the same context, since a seqno is
++ * not re-used across contexts.
++ */
++static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
++{
++ if (WARN_ON(f1->context != f2->context))
++ return NULL;
++
++ /*
++ * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
++ * set if enable_signaling wasn't called, and enabling that here is
++ * overkill.
++ */
++ if (f2->seqno - f1->seqno <= INT_MAX)
++ return fence_is_signaled(f2) ? NULL : f2;
++ else
++ return fence_is_signaled(f1) ? NULL : f1;
++}
++
++signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
++
++
++/**
++ * fence_wait - sleep until the fence gets signaled
++ * @fence: [in] the fence to wait on
++ * @intr: [in] if true, do an interruptible wait
++ *
++ * This function will return -ERESTARTSYS if interrupted by a signal,
++ * or 0 if the fence was signaled. Other error values may be
++ * returned on custom implementations.
++ *
++ * Performs a synchronous wait on this fence. It is assumed the caller
++ * directly or indirectly holds a reference to the fence, otherwise the
++ * fence might be freed before return, resulting in undefined behavior.
++ */
++static inline signed long fence_wait(struct fence *fence, bool intr)
++{
++ signed long ret;
++
++ /* Since fence_wait_timeout cannot timeout with
++ * MAX_SCHEDULE_TIMEOUT, only valid return values are
++ * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
++ */
++ ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
++
++ return ret < 0 ? ret : 0;
++}
++
++unsigned fence_context_alloc(unsigned num);
++
++#define FENCE_TRACE(f, fmt, args...) \
++ do { \
++ struct fence *__ff = (f); \
++ if (config_enabled(CONFIG_FENCE_TRACE)) \
++ pr_info("f %u#%u: " fmt, \
++ __ff->context, __ff->seqno, ##args); \
++ } while (0)
++
++#define FENCE_WARN(f, fmt, args...) \
++ do { \
++ struct fence *__ff = (f); \
++ pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \
++ ##args); \
++ } while (0)
++
++#define FENCE_ERR(f, fmt, args...) \
++ do { \
++ struct fence *__ff = (f); \
++ pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
++ ##args); \
++ } while (0)
++
++#endif /* __LINUX_FENCE_H */
+diff -Naur a/include/linux/ktime.h b/include/linux/ktime.h
+--- a/include/linux/ktime.h 2015-03-26 14:43:27.914436386 +0530
++++ b/include/linux/ktime.h 2015-03-26 14:42:38.762435422 +0530
+@@ -386,4 +386,6 @@
+ return ktime_add_ms(ktime_zero, ms);
+ }
+
++# include <linux/timekeeping.h>
++
+ #endif
+diff -Naur a/include/linux/moduleparam.h b/include/linux/moduleparam.h
+--- a/include/linux/moduleparam.h 2015-03-26 14:43:27.942436387 +0530
++++ b/include/linux/moduleparam.h 2015-03-26 14:42:38.762435422 +0530
+@@ -111,6 +111,11 @@
+ */
+ #define module_param(name, type, perm) \
+ module_param_named(name, name, type, perm)
++/**
++ * module_param_unsafe - same as module_param but taints kernel
++ */
++#define module_param_unsafe(name, type, perm) \
++ module_param_named_unsafe(name, name, type, perm)
+
+ /**
+ * module_param_named - typesafe helper for a renamed module/cmdline parameter
+@@ -129,6 +134,14 @@
+ __MODULE_PARM_TYPE(name, #type)
+
+ /**
++ * module_param_named_unsafe - same as module_param_named but taints kernel
++ */
++#define module_param_named_unsafe(name, value, type, perm) \
++ param_check_##type(name, &(value)); \
++ module_param_cb_unsafe(name, &param_ops_##type, &value, perm); \
++ __MODULE_PARM_TYPE(name, #type)
++
++/**
+ * module_param_cb - general callback for a module/cmdline parameter
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+@@ -139,6 +152,8 @@
+ #define module_param_cb(name, ops, arg, perm) \
+ __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1)
+
++#define module_param_cb_unsafe(name, ops, arg, perm) \
++ __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1)
+ /**
+ * <level>_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before certain initcall level
+diff -Naur a/include/linux/pci.h b/include/linux/pci.h
+--- a/include/linux/pci.h 2015-03-26 14:43:27.938436387 +0530
++++ b/include/linux/pci.h 2015-03-26 14:42:38.762435422 +0530
+@@ -297,6 +297,7 @@
+ D3cold, not set for devices
+ powered on/off by the
+ corresponding bridge */
++ unsigned int ignore_hotplug:1; /* Ignore hotplug events */
+ unsigned int d3_delay; /* D3->D0 transition time in ms */
+ unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
+
+@@ -997,6 +998,11 @@
+ bool pci_check_pme_status(struct pci_dev *dev);
+ void pci_pme_wakeup_bus(struct pci_bus *bus);
+
++static inline void pci_ignore_hotplug(struct pci_dev *dev)
++{
++ dev->ignore_hotplug = 1;
++}
++
+ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+ bool enable)
+ {
+diff -Naur a/include/linux/reservation.h b/include/linux/reservation.h
+--- a/include/linux/reservation.h 2015-03-26 14:43:27.946436387 +0530
++++ b/include/linux/reservation.h 2015-03-26 14:42:38.762435422 +0530
+@@ -6,7 +6,7 @@
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+- * Rob Clark <rob.clark@linaro.org>
++ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+@@ -40,23 +40,103 @@
+ #define _LINUX_RESERVATION_H
+
+ #include <linux/ww_mutex.h>
++#include <linux/fence.h>
++#include <linux/slab.h>
++#include <linux/seqlock.h>
++#include <linux/rcupdate.h>
+
+ extern struct ww_class reservation_ww_class;
++extern struct lock_class_key reservation_seqcount_class;
++extern const char reservation_seqcount_string[];
++
++struct reservation_object_list {
++ struct rcu_head rcu;
++ u32 shared_count, shared_max;
++ struct fence __rcu *shared[];
++};
+
+ struct reservation_object {
+ struct ww_mutex lock;
++ seqcount_t seq;
++
++ struct fence __rcu *fence_excl;
++ struct reservation_object_list __rcu *fence;
++ struct reservation_object_list *staged;
+ };
+
++#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
++#define reservation_object_assert_held(obj) \
++ lockdep_assert_held(&(obj)->lock.base)
++
+ static inline void
+ reservation_object_init(struct reservation_object *obj)
+ {
+ ww_mutex_init(&obj->lock, &reservation_ww_class);
++
++ __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
++ RCU_INIT_POINTER(obj->fence, NULL);
++ RCU_INIT_POINTER(obj->fence_excl, NULL);
++ obj->staged = NULL;
+ }
+
+ static inline void
+ reservation_object_fini(struct reservation_object *obj)
+ {
++ int i;
++ struct reservation_object_list *fobj;
++ struct fence *excl;
++
++ /*
++ * This object should be dead and all references must have
++ * been released to it, so no need to be protected with rcu.
++ */
++ excl = rcu_dereference_protected(obj->fence_excl, 1);
++ if (excl)
++ fence_put(excl);
++
++ fobj = rcu_dereference_protected(obj->fence, 1);
++ if (fobj) {
++ for (i = 0; i < fobj->shared_count; ++i)
++ fence_put(rcu_dereference_protected(fobj->shared[i], 1));
++
++ kfree(fobj);
++ }
++ kfree(obj->staged);
++
+ ww_mutex_destroy(&obj->lock);
+ }
+
++static inline struct reservation_object_list *
++reservation_object_get_list(struct reservation_object *obj)
++{
++ return rcu_dereference_protected(obj->fence,
++ reservation_object_held(obj));
++}
++
++static inline struct fence *
++reservation_object_get_excl(struct reservation_object *obj)
++{
++ return rcu_dereference_protected(obj->fence_excl,
++ reservation_object_held(obj));
++}
++
++int reservation_object_reserve_shared(struct reservation_object *obj);
++void reservation_object_add_shared_fence(struct reservation_object *obj,
++ struct fence *fence);
++
++void reservation_object_add_excl_fence(struct reservation_object *obj,
++ struct fence *fence);
++
++int reservation_object_get_fences_rcu(struct reservation_object *obj,
++ struct fence **pfence_excl,
++ unsigned *pshared_count,
++ struct fence ***pshared);
++
++long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
++ bool wait_all, bool intr,
++ unsigned long timeout);
++
++bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
++ bool test_all);
++
+ #endif /* _LINUX_RESERVATION_H */
+diff -Naur a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
+--- a/include/linux/seqno-fence.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/linux/seqno-fence.h 2015-03-26 14:42:38.762435422 +0530
+@@ -0,0 +1,117 @@
++/*
++ * seqno-fence, using a dma-buf to synchronize fencing
++ *
++ * Copyright (C) 2012 Texas Instruments
++ * Copyright (C) 2012 Canonical Ltd
++ * Authors:
++ * Rob Clark <robdclark@gmail.com>
++ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ */
++
++#ifndef __LINUX_SEQNO_FENCE_H
++#define __LINUX_SEQNO_FENCE_H
++
++#include <linux/fence.h>
++#include <linux/dma-buf.h>
++
++enum seqno_fence_condition {
++ SEQNO_FENCE_WAIT_GEQUAL,
++ SEQNO_FENCE_WAIT_NONZERO
++};
++
++struct seqno_fence {
++ struct fence base;
++
++ const struct fence_ops *ops;
++ struct dma_buf *sync_buf;
++ uint32_t seqno_ofs;
++ enum seqno_fence_condition condition;
++};
++
++extern const struct fence_ops seqno_fence_ops;
++
++/**
++ * to_seqno_fence - cast a fence to a seqno_fence
++ * @fence: fence to cast to a seqno_fence
++ *
++ * Returns NULL if the fence is not a seqno_fence,
++ * or the seqno_fence otherwise.
++ */
++static inline struct seqno_fence *
++to_seqno_fence(struct fence *fence)
++{
++ if (fence->ops != &seqno_fence_ops)
++ return NULL;
++ return container_of(fence, struct seqno_fence, base);
++}
++
++/**
++ * seqno_fence_init - initialize a seqno fence
++ * @fence: seqno_fence to initialize
++ * @lock: pointer to spinlock to use for fence
++ * @sync_buf: buffer containing the memory location to signal on
++ * @context: the execution context this fence is a part of
++ * @seqno_ofs: the offset within @sync_buf
++ * @seqno: the sequence # to signal on
++ * @cond: fence wait condition
++ * @ops: the fence_ops for operations on this seqno fence
++ *
++ * This function initializes a struct seqno_fence with passed parameters,
++ * and takes a reference on sync_buf which is released on fence destruction.
++ *
++ * A seqno_fence is a dma_fence which can complete in software when
++ * enable_signaling is called, but it also completes when
++ * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
++ *
++ * The seqno_fence will take a refcount on the sync_buf until it's
++ * destroyed, but actual lifetime of sync_buf may be longer if one of the
++ * callers take a reference to it.
++ *
++ * Certain hardware have instructions to insert this type of wait condition
++ * in the command stream, so no intervention from software would be needed.
++ * This type of fence can be destroyed before completed, however a reference
++ * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
++ * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
++ * device's vm can be expensive.
++ *
++ * It is recommended for creators of seqno_fence to call fence_signal
++ * before destruction. This will prevent possible issues from wraparound at
++ * time of issue vs time of check, since users can check fence_is_signaled
++ * before submitting instructions for the hardware to wait on the fence.
++ * However, when ops.enable_signaling is not called, it doesn't have to be
++ * done as soon as possible, just before there's any real danger of seqno
++ * wraparound.
++ */
++static inline void
++seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
++ struct dma_buf *sync_buf, uint32_t context,
++ uint32_t seqno_ofs, uint32_t seqno,
++ enum seqno_fence_condition cond,
++ const struct fence_ops *ops)
++{
++ BUG_ON(!fence || !sync_buf || !ops);
++ BUG_ON(!ops->wait || !ops->enable_signaling ||
++ !ops->get_driver_name || !ops->get_timeline_name);
++
++ /*
++ * ops is used in fence_init for get_driver_name, so needs to be
++ * initialized first
++ */
++ fence->ops = ops;
++ fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
++ get_dma_buf(sync_buf);
++ fence->sync_buf = sync_buf;
++ fence->seqno_ofs = seqno_ofs;
++ fence->condition = cond;
++}
++
++#endif /* __LINUX_SEQNO_FENCE_H */
+diff -Naur a/include/linux/time64.h b/include/linux/time64.h
+--- a/include/linux/time64.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/linux/time64.h 2015-03-26 14:42:38.762435422 +0530
+@@ -0,0 +1,190 @@
++#ifndef _LINUX_TIME64_H
++#define _LINUX_TIME64_H
++
++#include <uapi/linux/time.h>
++
++typedef __s64 time64_t;
++
++/*
++ * This wants to go into uapi/linux/time.h once we agreed about the
++ * userspace interfaces.
++ */
++#if __BITS_PER_LONG == 64
++# define timespec64 timespec
++#else
++struct timespec64 {
++ time64_t tv_sec; /* seconds */
++ long tv_nsec; /* nanoseconds */
++};
++#endif
++
++/* Parameters used to convert the timespec values: */
++#define MSEC_PER_SEC 1000L
++#define USEC_PER_MSEC 1000L
++#define NSEC_PER_USEC 1000L
++#define NSEC_PER_MSEC 1000000L
++#define USEC_PER_SEC 1000000L
++#define NSEC_PER_SEC 1000000000L
++#define FSEC_PER_SEC 1000000000000000LL
++
++/* Located here for timespec[64]_valid_strict */
++#define KTIME_MAX ((s64)~((u64)1 << 63))
++#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
++
++#if __BITS_PER_LONG == 64
++
++static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
++{
++ return ts64;
++}
++
++static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
++{
++ return ts;
++}
++
++# define timespec64_equal timespec_equal
++# define timespec64_compare timespec_compare
++# define set_normalized_timespec64 set_normalized_timespec
++# define timespec64_add_safe timespec_add_safe
++# define timespec64_add timespec_add
++# define timespec64_sub timespec_sub
++# define timespec64_valid timespec_valid
++# define timespec64_valid_strict timespec_valid_strict
++# define timespec64_to_ns timespec_to_ns
++# define ns_to_timespec64 ns_to_timespec
++# define timespec64_add_ns timespec_add_ns
++
++#else
++
++static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
++{
++ struct timespec ret;
++
++ ret.tv_sec = (time_t)ts64.tv_sec;
++ ret.tv_nsec = ts64.tv_nsec;
++ return ret;
++}
++
++static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
++{
++ struct timespec64 ret;
++
++ ret.tv_sec = ts.tv_sec;
++ ret.tv_nsec = ts.tv_nsec;
++ return ret;
++}
++
++static inline int timespec64_equal(const struct timespec64 *a,
++ const struct timespec64 *b)
++{
++ return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
++}
++
++/*
++ * lhs < rhs: return <0
++ * lhs == rhs: return 0
++ * lhs > rhs: return >0
++ */
++static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
++{
++ if (lhs->tv_sec < rhs->tv_sec)
++ return -1;
++ if (lhs->tv_sec > rhs->tv_sec)
++ return 1;
++ return lhs->tv_nsec - rhs->tv_nsec;
++}
++
++extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
++
++/*
++ * timespec64_add_safe assumes both values are positive and checks for
++ * overflow. It will return TIME_T_MAX if the returned value would be
++ * smaller then either of the arguments.
++ */
++extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
++ const struct timespec64 rhs);
++
++
++static inline struct timespec64 timespec64_add(struct timespec64 lhs,
++ struct timespec64 rhs)
++{
++ struct timespec64 ts_delta;
++ set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
++ lhs.tv_nsec + rhs.tv_nsec);
++ return ts_delta;
++}
++
++/*
++ * sub = lhs - rhs, in normalized form
++ */
++static inline struct timespec64 timespec64_sub(struct timespec64 lhs,
++ struct timespec64 rhs)
++{
++ struct timespec64 ts_delta;
++ set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
++ lhs.tv_nsec - rhs.tv_nsec);
++ return ts_delta;
++}
++
++/*
++ * Returns true if the timespec64 is norm, false if denorm:
++ */
++static inline bool timespec64_valid(const struct timespec64 *ts)
++{
++ /* Dates before 1970 are bogus */
++ if (ts->tv_sec < 0)
++ return false;
++ /* Can't have more nanoseconds then a second */
++ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
++ return false;
++ return true;
++}
++
++static inline bool timespec64_valid_strict(const struct timespec64 *ts)
++{
++ if (!timespec64_valid(ts))
++ return false;
++ /* Disallow values that could overflow ktime_t */
++ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
++ return false;
++ return true;
++}
++
++/**
++ * timespec64_to_ns - Convert timespec64 to nanoseconds
++ * @ts: pointer to the timespec64 variable to be converted
++ *
++ * Returns the scalar nanosecond representation of the timespec64
++ * parameter.
++ */
++static inline s64 timespec64_to_ns(const struct timespec64 *ts)
++{
++ return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
++}
++
++/**
++ * ns_to_timespec64 - Convert nanoseconds to timespec64
++ * @nsec: the nanoseconds value to be converted
++ *
++ * Returns the timespec64 representation of the nsec parameter.
++ */
++extern struct timespec64 ns_to_timespec64(const s64 nsec);
++
++/**
++ * timespec64_add_ns - Adds nanoseconds to a timespec64
++ * @a: pointer to timespec64 to be incremented
++ * @ns: unsigned nanoseconds value to be added
++ *
++ * This must always be inlined because its used from the x86-64 vdso,
++ * which cannot call other kernel functions.
++ */
++static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
++{
++ a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
++ a->tv_nsec = ns;
++}
++
++#endif
++
++#endif /* _LINUX_TIME64_H */
+diff -Naur a/include/linux/timekeeping_bkp.h b/include/linux/timekeeping_bkp.h
+--- a/include/linux/timekeeping_bkp.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/linux/timekeeping_bkp.h 2015-03-26 14:42:38.762435422 +0530
+@@ -0,0 +1,252 @@
++#ifndef _LINUX_TIMEKEEPING_H
++#define _LINUX_TIMEKEEPING_H
++
++/* Included from linux/ktime.h */
++
++void timekeeping_init(void);
++extern int timekeeping_suspended;
++
++/*
++ * Get and set timeofday
++ */
++extern void do_gettimeofday(struct timeval *tv);
++extern int do_settimeofday64(const struct timespec64 *ts);
++extern int do_sys_settimeofday(const struct timespec *tv,
++ const struct timezone *tz);
++
++/*
++ * Kernel time accessors
++ */
++unsigned long get_seconds(void);
++struct timespec current_kernel_time(void);
++/* does not take xtime_lock */
++struct timespec __current_kernel_time(void);
++
++/*
++ * timespec based interfaces
++ */
++struct timespec64 get_monotonic_coarse64(void);
++extern void getrawmonotonic64(struct timespec64 *ts);
++extern void ktime_get_ts64(struct timespec64 *ts);
++extern time64_t ktime_get_seconds(void);
++extern time64_t ktime_get_real_seconds(void);
++
++extern int __getnstimeofday64(struct timespec64 *tv);
++extern void getnstimeofday64(struct timespec64 *tv);
++
++#if BITS_PER_LONG == 64
++/**
++ * Deprecated. Use do_settimeofday64().
++ */
++static inline int do_settimeofday(const struct timespec *ts)
++{
++ return do_settimeofday64(ts);
++}
++
++static inline int __getnstimeofday(struct timespec *ts)
++{
++ return __getnstimeofday64(ts);
++}
++
++static inline void getnstimeofday(struct timespec *ts)
++{
++ getnstimeofday64(ts);
++}
++
++static inline void ktime_get_ts(struct timespec *ts)
++{
++ ktime_get_ts64(ts);
++}
++
++static inline void ktime_get_real_ts(struct timespec *ts)
++{
++ getnstimeofday64(ts);
++}
++
++static inline void getrawmonotonic(struct timespec *ts)
++{
++ getrawmonotonic64(ts);
++}
++
++static inline struct timespec get_monotonic_coarse(void)
++{
++ return get_monotonic_coarse64();
++}
++#else
++/**
++ * Deprecated. Use do_settimeofday64().
++ */
++static inline int do_settimeofday(const struct timespec *ts)
++{
++ struct timespec64 ts64;
++
++ ts64 = timespec_to_timespec64(*ts);
++ return do_settimeofday64(&ts64);
++}
++
++static inline int __getnstimeofday(struct timespec *ts)
++{
++ struct timespec64 ts64;
++ int ret = __getnstimeofday64(&ts64);
++
++ *ts = timespec64_to_timespec(ts64);
++ return ret;
++}
++
++static inline void getnstimeofday(struct timespec *ts)
++{
++ struct timespec64 ts64;
++
++ getnstimeofday64(&ts64);
++ *ts = timespec64_to_timespec(ts64);
++}
++
++static inline void ktime_get_ts(struct timespec *ts)
++{
++ struct timespec64 ts64;
++
++ ktime_get_ts64(&ts64);
++ *ts = timespec64_to_timespec(ts64);
++}
++
++static inline void ktime_get_real_ts(struct timespec *ts)
++{
++ struct timespec64 ts64;
++
++ getnstimeofday64(&ts64);
++ *ts = timespec64_to_timespec(ts64);
++}
++
++static inline void getrawmonotonic(struct timespec *ts)
++{
++ struct timespec64 ts64;
++
++ getrawmonotonic64(&ts64);
++ *ts = timespec64_to_timespec(ts64);
++}
++
++static inline struct timespec get_monotonic_coarse(void)
++{
++ return timespec64_to_timespec(get_monotonic_coarse64());
++}
++#endif
++
++extern void getboottime(struct timespec *ts);
++
++#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
++#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
++
++/*
++ * ktime_t based interfaces
++ */
++
++enum tk_offsets {
++ TK_OFFS_REAL,
++ TK_OFFS_BOOT,
++ TK_OFFS_TAI,
++ TK_OFFS_MAX,
++};
++
++extern ktime_t ktime_get(void);
++extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
++extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
++extern ktime_t ktime_get_raw(void);
++
++/**
++ * ktime_get_real - get the real (wall-) time in ktime_t format
++ */
++static inline ktime_t ktime_get_real(void)
++{
++ return ktime_get_with_offset(TK_OFFS_REAL);
++}
++
++/**
++ * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
++ *
++ * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
++ * time spent in suspend.
++ */
++static inline ktime_t ktime_get_boottime(void)
++{
++ return ktime_get_with_offset(TK_OFFS_BOOT);
++}
++
++/**
++ * ktime_get_clocktai - Returns the TAI time of day in ktime_t format
++ */
++static inline ktime_t ktime_get_clocktai(void)
++{
++ return ktime_get_with_offset(TK_OFFS_TAI);
++}
++
++/**
++ * ktime_mono_to_real - Convert monotonic time to clock realtime
++ */
++static inline ktime_t ktime_mono_to_real(ktime_t mono)
++{
++ return ktime_mono_to_any(mono, TK_OFFS_REAL);
++}
++
++static inline u64 ktime_get_ns(void)
++{
++ return ktime_to_ns(ktime_get());
++}
++
++static inline u64 ktime_get_real_ns(void)
++{
++ return ktime_to_ns(ktime_get_real());
++}
++
++static inline u64 ktime_get_boot_ns(void)
++{
++ return ktime_to_ns(ktime_get_boottime());
++}
++
++static inline u64 ktime_get_raw_ns(void)
++{
++ return ktime_to_ns(ktime_get_raw());
++}
++
++extern u64 ktime_get_mono_fast_ns(void);
++
++/*
++ * Timespec interfaces utilizing the ktime based ones
++ */
++static inline void get_monotonic_boottime(struct timespec *ts)
++{
++ *ts = ktime_to_timespec(ktime_get_boottime());
++}
++
++static inline void timekeeping_clocktai(struct timespec *ts)
++{
++ *ts = ktime_to_timespec(ktime_get_clocktai());
++}
++
++/*
++ * RTC specific
++ */
++extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
++
++/*
++ * PPS accessor
++ */
++extern void getnstime_raw_and_real(struct timespec *ts_raw,
++ struct timespec *ts_real);
++
++/*
++ * Persistent clock related interfaces
++ */
++extern bool persistent_clock_exist;
++extern int persistent_clock_is_local;
++
++static inline bool has_persistent_clock(void)
++{
++ return persistent_clock_exist;
++}
++
++extern void read_persistent_clock(struct timespec *ts);
++extern void read_boot_clock(struct timespec *ts);
++extern int update_persistent_clock(struct timespec now);
++
++
++#endif
+diff -Naur a/include/linux/timekeeping.h b/include/linux/timekeeping.h
+--- a/include/linux/timekeeping.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/linux/timekeeping.h 2015-03-26 14:42:38.762435422 +0530
+@@ -0,0 +1,34 @@
++#ifndef _LINUX_TIMEKEEPING_H
++#define _LINUX_TIMEKEEPING_H
++
++/*
++ * ktime_t based interfaces
++ */
++
++enum tk_offsets {
++ TK_OFFS_REAL,
++ TK_OFFS_BOOT,
++ TK_OFFS_TAI,
++ TK_OFFS_MAX,
++};
++
++
++
++
++extern ktime_t ktime_get_raw(void);
++extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
++
++
++static inline ktime_t ktime_mono_to_real(ktime_t mono)
++{
++ return ktime_mono_to_any(mono, TK_OFFS_REAL);
++}
++
++
++static inline u64 ktime_get_raw_ns(void)
++{
++ return ktime_to_ns(ktime_get_raw());
++}
++
++
++#endif
+diff -Naur a/include/trace/events/fence.h b/include/trace/events/fence.h
+--- a/include/trace/events/fence.h 1970-01-01 05:30:00.000000000 +0530
++++ b/include/trace/events/fence.h 2015-03-26 14:42:38.762435422 +0530
+@@ -0,0 +1,128 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM fence
++
++#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_FENCE_H
++
++#include <linux/tracepoint.h>
++
++struct fence;
++
++TRACE_EVENT(fence_annotate_wait_on,
++
++ /* fence: the fence waiting on f1, f1: the fence to be waited on. */
++ TP_PROTO(struct fence *fence, struct fence *f1),
++
++ TP_ARGS(fence, f1),
++
++ TP_STRUCT__entry(
++ __string(driver, fence->ops->get_driver_name(fence))
++ __string(timeline, fence->ops->get_driver_name(fence))
++ __field(unsigned int, context)
++ __field(unsigned int, seqno)
++
++ __string(waiting_driver, f1->ops->get_driver_name(f1))
++ __string(waiting_timeline, f1->ops->get_timeline_name(f1))
++ __field(unsigned int, waiting_context)
++ __field(unsigned int, waiting_seqno)
++ ),
++
++ TP_fast_assign(
++ __assign_str(driver, fence->ops->get_driver_name(fence))
++ __assign_str(timeline, fence->ops->get_timeline_name(fence))
++ __entry->context = fence->context;
++ __entry->seqno = fence->seqno;
++
++ __assign_str(waiting_driver, f1->ops->get_driver_name(f1))
++ __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1))
++ __entry->waiting_context = f1->context;
++ __entry->waiting_seqno = f1->seqno;
++
++ ),
++
++ TP_printk("driver=%s timeline=%s context=%u seqno=%u " \
++ "waits on driver=%s timeline=%s context=%u seqno=%u",
++ __get_str(driver), __get_str(timeline), __entry->context,
++ __entry->seqno,
++ __get_str(waiting_driver), __get_str(waiting_timeline),
++ __entry->waiting_context, __entry->waiting_seqno)
++);
++
++DECLARE_EVENT_CLASS(fence,
++
++ TP_PROTO(struct fence *fence),
++
++ TP_ARGS(fence),
++
++ TP_STRUCT__entry(
++ __string(driver, fence->ops->get_driver_name(fence))
++ __string(timeline, fence->ops->get_timeline_name(fence))
++ __field(unsigned int, context)
++ __field(unsigned int, seqno)
++ ),
++
++ TP_fast_assign(
++ __assign_str(driver, fence->ops->get_driver_name(fence))
++ __assign_str(timeline, fence->ops->get_timeline_name(fence))
++ __entry->context = fence->context;
++ __entry->seqno = fence->seqno;
++ ),
++
++ TP_printk("driver=%s timeline=%s context=%u seqno=%u",
++ __get_str(driver), __get_str(timeline), __entry->context,
++ __entry->seqno)
++);
++
++DEFINE_EVENT(fence, fence_emit,
++
++ TP_PROTO(struct fence *fence),
++
++ TP_ARGS(fence)
++);
++
++DEFINE_EVENT(fence, fence_init,
++
++ TP_PROTO(struct fence *fence),
++
++ TP_ARGS(fence)
++);
++
++DEFINE_EVENT(fence, fence_destroy,
++
++ TP_PROTO(struct fence *fence),
++
++ TP_ARGS(fence)
++);
++
++DEFINE_EVENT(fence, fence_enable_signal,
++
++ TP_PROTO(struct fence *fence),
++
++ TP_ARGS(fence)
++);
++
++DEFINE_EVENT(fence, fence_signaled,
++
++ TP_PROTO(struct fence *fence),
++
++ TP_ARGS(fence)
++);
++
++DEFINE_EVENT(fence, fence_wait_start,
++
++ TP_PROTO(struct fence *fence),
++
++ TP_ARGS(fence)
++);
++
++DEFINE_EVENT(fence, fence_wait_end,
++
++ TP_PROTO(struct fence *fence),
++
++ TP_ARGS(fence)
++);
++
++#endif /* _TRACE_FENCE_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff -Naur a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
+--- a/include/uapi/drm/drm.h 2015-03-26 14:43:28.038436389 +0530
++++ b/include/uapi/drm/drm.h 2015-03-26 14:42:38.766435423 +0530
+@@ -619,6 +619,15 @@
+ #define DRM_PRIME_CAP_EXPORT 0x2
+ #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+ #define DRM_CAP_ASYNC_PAGE_FLIP 0x7
++/*
++ * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
++ * combination for the hardware cursor. The intention is that a hardware
++ * agnostic userspace can query a cursor plane size to use.
++ *
++ * Note that the cross-driver contract is to merely return a valid size;
++ * drivers are free to attach another meaning on top, eg. i915 returns the
++ * maximum plane size.
++ */
+ #define DRM_CAP_CURSOR_WIDTH 0x8
+ #define DRM_CAP_CURSOR_HEIGHT 0x9
+
+@@ -637,6 +646,14 @@
+ */
+ #define DRM_CLIENT_CAP_STEREO_3D 1
+
++/**
++ * DRM_CLIENT_CAP_UNIVERSAL_PLANES
++ *
++ * If set to 1, the DRM core will expose all planes (overlay, primary, and
++ * cursor) to userspace.
++ */
++#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
++
+ /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+ struct drm_set_client_cap {
+ __u64 capability;
+@@ -763,7 +780,7 @@
+
+ /**
+ * Device specific ioctls should only be in their respective headers
+- * The device specific ioctl range is from 0x40 to 0x99.
++ * The device specific ioctl range is from 0x40 to 0x9f.
+ * Generic IOCTLS restart at 0xA0.
+ *
+ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
+diff -Naur a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
+--- a/include/uapi/drm/drm_mode.h 2015-03-26 14:43:28.038436389 +0530
++++ b/include/uapi/drm/drm_mode.h 2015-03-26 14:42:38.766435423 +0530
+@@ -88,6 +88,11 @@
+ #define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
+ #define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
+
++/* Picture aspect ratio options */
++#define DRM_MODE_PICTURE_ASPECT_NONE 0
++#define DRM_MODE_PICTURE_ASPECT_4_3 1
++#define DRM_MODE_PICTURE_ASPECT_16_9 2
++
+ /* Dithering mode options */
+ #define DRM_MODE_DITHERING_OFF 0
+ #define DRM_MODE_DITHERING_ON 1
+@@ -181,6 +186,7 @@
+ #define DRM_MODE_ENCODER_TVDAC 4
+ #define DRM_MODE_ENCODER_VIRTUAL 5
+ #define DRM_MODE_ENCODER_DSI 6
++#define DRM_MODE_ENCODER_DPMST 7
+
+ struct drm_mode_get_encoder {
+ __u32 encoder_id;
+@@ -251,6 +257,21 @@
+ #define DRM_MODE_PROP_BLOB (1<<4)
+ #define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
+
++/* non-extended types: legacy bitmask, one bit per type: */
++#define DRM_MODE_PROP_LEGACY_TYPE ( \
++ DRM_MODE_PROP_RANGE | \
++ DRM_MODE_PROP_ENUM | \
++ DRM_MODE_PROP_BLOB | \
++ DRM_MODE_PROP_BITMASK)
++
++/* extended-types: rather than continue to consume a bit per type,
++ * grab a chunk of the bits to use as integer type id.
++ */
++#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
++#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
++#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
++#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
++
+ struct drm_mode_property_enum {
+ __u64 value;
+ char name[DRM_PROP_NAME_LEN];
+@@ -265,6 +286,8 @@
+ char name[DRM_PROP_NAME_LEN];
+
+ __u32 count_values;
++ /* This is only used to count enum values, not blobs. The _blobs is
++ * simply because of a historical reason, i.e. backwards compat. */
+ __u32 count_enum_blobs;
+ };
+
+diff -Naur a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
+--- a/include/uapi/drm/i915_drm.h 2015-03-26 14:43:28.038436389 +0530
++++ b/include/uapi/drm/i915_drm.h 2015-03-26 14:42:38.766435423 +0530
+@@ -223,6 +223,7 @@
+ #define DRM_I915_GEM_GET_CACHING 0x30
+ #define DRM_I915_REG_READ 0x31
+ #define DRM_I915_GET_RESET_STATS 0x32
++#define DRM_I915_GEM_USERPTR 0x33
+
+ #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+ #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+@@ -273,6 +274,7 @@
+ #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
+ #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+ #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
++#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
+
+ /* Allow drivers to submit batchbuffers directly to hardware, relying
+ * on the security mechanisms provided by hardware.
+@@ -337,6 +339,8 @@
+ #define I915_PARAM_HAS_EXEC_NO_RELOC 25
+ #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
+ #define I915_PARAM_HAS_WT 27
++#define I915_PARAM_CMD_PARSER_VERSION 28
++#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
+
+ typedef struct drm_i915_getparam {
+ int param;
+@@ -873,6 +877,12 @@
+ * mmap mapping.
+ */
+ __u32 swizzle_mode;
++
++ /**
++ * Returned address bit 6 swizzling required for CPU access through
++ * mmap mapping whilst bound.
++ */
++ __u32 phys_swizzle_mode;
+ };
+
+ struct drm_i915_gem_get_aperture {
+@@ -1049,4 +1059,18 @@
+ __u32 pad;
+ };
+
++struct drm_i915_gem_userptr {
++ __u64 user_ptr;
++ __u64 user_size;
++ __u32 flags;
++#define I915_USERPTR_READ_ONLY 0x1
++#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
++ /**
++ * Returned handle for the object.
++ *
++ * Object handles are nonzero.
++ */
++ __u32 handle;
++};
++
+ #endif /* _UAPI_I915_DRM_H_ */
+diff -Naur a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
+--- a/include/uapi/drm/nouveau_drm.h 2015-03-26 14:43:28.038436389 +0530
++++ b/include/uapi/drm/nouveau_drm.h 2015-03-26 14:42:38.766435423 +0530
+@@ -25,6 +25,16 @@
+ #ifndef __NOUVEAU_DRM_H__
+ #define __NOUVEAU_DRM_H__
+
++#define DRM_NOUVEAU_EVENT_NVIF 0x80000000
++
++/* reserved object handles when using deprecated object APIs - these
++ * are here so that libdrm can allow interoperability with the new
++ * object APIs
++ */
++#define NOUVEAU_ABI16_CLIENT 0xffffffff
++#define NOUVEAU_ABI16_DEVICE 0xdddddddd
++#define NOUVEAU_ABI16_CHAN(n) (0xcccc0000 | (n))
++
+ #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
+ #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
+ #define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
+@@ -123,6 +133,7 @@
+ #define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
+ #define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
+ #define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
++#define DRM_NOUVEAU_NVIF 0x07
+ #define DRM_NOUVEAU_GEM_NEW 0x40
+ #define DRM_NOUVEAU_GEM_PUSHBUF 0x41
+ #define DRM_NOUVEAU_GEM_CPU_PREP 0x42
+diff -Naur a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+--- a/kernel/time/timekeeping.c 2015-03-26 14:43:29.038436408 +0530
++++ b/kernel/time/timekeeping.c 2015-03-26 14:42:38.766435423 +0530
+@@ -388,6 +388,33 @@
+ }
+ EXPORT_SYMBOL_GPL(ktime_get_ts);
+
++static ktime_t *offsets[TK_OFFS_MAX] = {
++ [TK_OFFS_REAL] = &timekeeper.offs_real,
++ [TK_OFFS_BOOT] = &timekeeper.offs_boot,
++ [TK_OFFS_TAI] = &timekeeper.offs_tai,
++};
++
++
++
++/**
++ * ktime_mono_to_any() - convert mononotic time to any other time
++ * @tmono: time to convert.
++ * @offs: which offset to use
++ */
++ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
++{
++ ktime_t *offset = offsets[offs];
++ unsigned long seq;
++ ktime_t tconv;
++
++ do {
++ seq = read_seqcount_begin(&timekeeper_seq);
++ tconv = ktime_add(tmono, *offset);
++ } while (read_seqcount_retry(&timekeeper_seq, seq));
++
++ return tconv;
++}
++EXPORT_SYMBOL_GPL(ktime_mono_to_any);
+
+ /**
+ * timekeeping_clocktai - Returns the TAI time of day in a timespec
+diff -Naur a/kernel/time.c b/kernel/time.c
+--- a/kernel/time.c 2015-03-26 14:43:29.058436409 +0530
++++ b/kernel/time.c 2015-03-26 14:42:38.766435423 +0530
+@@ -700,6 +700,7 @@
+ {
+ return (unsigned long)nsecs_to_jiffies64(n);
+ }
++EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
+
+ /*
+ * Add two timespec values and do a safety check for overflow.
+diff -Naur a/lib/interval_tree.c b/lib/interval_tree.c
+--- a/lib/interval_tree.c 2015-03-26 14:43:29.510436418 +0530
++++ b/lib/interval_tree.c 2015-03-26 14:42:38.766435423 +0530
+@@ -1,6 +1,7 @@
+ #include <linux/init.h>
+ #include <linux/interval_tree.h>
+ #include <linux/interval_tree_generic.h>
++#include <linux/module.h>
+
+ #define START(node) ((node)->start)
+ #define LAST(node) ((node)->last)
+@@ -8,3 +9,8 @@
+ INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
+ unsigned long, __subtree_last,
+ START, LAST,, interval_tree)
++
++EXPORT_SYMBOL_GPL(interval_tree_insert);
++EXPORT_SYMBOL_GPL(interval_tree_remove);
++EXPORT_SYMBOL_GPL(interval_tree_iter_first);
++EXPORT_SYMBOL_GPL(interval_tree_iter_next);
+diff -Naur a/lib/interval_tree_test.c b/lib/interval_tree_test.c
+--- a/lib/interval_tree_test.c 1970-01-01 05:30:00.000000000 +0530
++++ b/lib/interval_tree_test.c 2015-03-26 14:42:38.770435423 +0530
+@@ -0,0 +1,106 @@
++#include <linux/module.h>
++#include <linux/interval_tree.h>
++#include <linux/random.h>
++#include <asm/timex.h>
++
++#define NODES 100
++#define PERF_LOOPS 100000
++#define SEARCHES 100
++#define SEARCH_LOOPS 10000
++
++static struct rb_root root = RB_ROOT;
++static struct interval_tree_node nodes[NODES];
++static u32 queries[SEARCHES];
++
++static struct rnd_state rnd;
++
++static inline unsigned long
++search(unsigned long query, struct rb_root *root)
++{
++ struct interval_tree_node *node;
++ unsigned long results = 0;
++
++ for (node = interval_tree_iter_first(root, query, query); node;
++ node = interval_tree_iter_next(node, query, query))
++ results++;
++ return results;
++}
++
++static void init(void)
++{
++ int i;
++ for (i = 0; i < NODES; i++) {
++ u32 a = prandom_u32_state(&rnd);
++ u32 b = prandom_u32_state(&rnd);
++ if (a <= b) {
++ nodes[i].start = a;
++ nodes[i].last = b;
++ } else {
++ nodes[i].start = b;
++ nodes[i].last = a;
++ }
++ }
++ for (i = 0; i < SEARCHES; i++)
++ queries[i] = prandom_u32_state(&rnd);
++}
++
++static int interval_tree_test_init(void)
++{
++ int i, j;
++ unsigned long results;
++ cycles_t time1, time2, time;
++
++ printk(KERN_ALERT "interval tree insert/remove");
++
++ prandom_seed_state(&rnd, 3141592653589793238ULL);
++ init();
++
++ time1 = get_cycles();
++
++ for (i = 0; i < PERF_LOOPS; i++) {
++ for (j = 0; j < NODES; j++)
++ interval_tree_insert(nodes + j, &root);
++ for (j = 0; j < NODES; j++)
++ interval_tree_remove(nodes + j, &root);
++ }
++
++ time2 = get_cycles();
++ time = time2 - time1;
++
++ time = div_u64(time, PERF_LOOPS);
++ printk(" -> %llu cycles\n", (unsigned long long)time);
++
++ printk(KERN_ALERT "interval tree search");
++
++ for (j = 0; j < NODES; j++)
++ interval_tree_insert(nodes + j, &root);
++
++ time1 = get_cycles();
++
++ results = 0;
++ for (i = 0; i < SEARCH_LOOPS; i++)
++ for (j = 0; j < SEARCHES; j++)
++ results += search(queries[j], &root);
++
++ time2 = get_cycles();
++ time = time2 - time1;
++
++ time = div_u64(time, SEARCH_LOOPS);
++ results = div_u64(results, SEARCH_LOOPS);
++ printk(" -> %llu cycles (%lu results)\n",
++ (unsigned long long)time, results);
++
++ return -EAGAIN; /* Fail will directly unload the module */
++}
++
++static void interval_tree_test_exit(void)
++{
++ printk(KERN_ALERT "test exit\n");
++}
++
++module_init(interval_tree_test_init)
++module_exit(interval_tree_test_exit)
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Michel Lespinasse");
++MODULE_DESCRIPTION("Interval Tree test");
+diff -Naur a/lib/interval_tree_test_main.c b/lib/interval_tree_test_main.c
+--- a/lib/interval_tree_test_main.c 2015-03-26 14:43:29.518436418 +0530
++++ b/lib/interval_tree_test_main.c 1970-01-01 05:30:00.000000000 +0530
+@@ -1,106 +0,0 @@
+-#include <linux/module.h>
+-#include <linux/interval_tree.h>
+-#include <linux/random.h>
+-#include <asm/timex.h>
+-
+-#define NODES 100
+-#define PERF_LOOPS 100000
+-#define SEARCHES 100
+-#define SEARCH_LOOPS 10000
+-
+-static struct rb_root root = RB_ROOT;
+-static struct interval_tree_node nodes[NODES];
+-static u32 queries[SEARCHES];
+-
+-static struct rnd_state rnd;
+-
+-static inline unsigned long
+-search(unsigned long query, struct rb_root *root)
+-{
+- struct interval_tree_node *node;
+- unsigned long results = 0;
+-
+- for (node = interval_tree_iter_first(root, query, query); node;
+- node = interval_tree_iter_next(node, query, query))
+- results++;
+- return results;
+-}
+-
+-static void init(void)
+-{
+- int i;
+- for (i = 0; i < NODES; i++) {
+- u32 a = prandom_u32_state(&rnd);
+- u32 b = prandom_u32_state(&rnd);
+- if (a <= b) {
+- nodes[i].start = a;
+- nodes[i].last = b;
+- } else {
+- nodes[i].start = b;
+- nodes[i].last = a;
+- }
+- }
+- for (i = 0; i < SEARCHES; i++)
+- queries[i] = prandom_u32_state(&rnd);
+-}
+-
+-static int interval_tree_test_init(void)
+-{
+- int i, j;
+- unsigned long results;
+- cycles_t time1, time2, time;
+-
+- printk(KERN_ALERT "interval tree insert/remove");
+-
+- prandom_seed_state(&rnd, 3141592653589793238ULL);
+- init();
+-
+- time1 = get_cycles();
+-
+- for (i = 0; i < PERF_LOOPS; i++) {
+- for (j = 0; j < NODES; j++)
+- interval_tree_insert(nodes + j, &root);
+- for (j = 0; j < NODES; j++)
+- interval_tree_remove(nodes + j, &root);
+- }
+-
+- time2 = get_cycles();
+- time = time2 - time1;
+-
+- time = div_u64(time, PERF_LOOPS);
+- printk(" -> %llu cycles\n", (unsigned long long)time);
+-
+- printk(KERN_ALERT "interval tree search");
+-
+- for (j = 0; j < NODES; j++)
+- interval_tree_insert(nodes + j, &root);
+-
+- time1 = get_cycles();
+-
+- results = 0;
+- for (i = 0; i < SEARCH_LOOPS; i++)
+- for (j = 0; j < SEARCHES; j++)
+- results += search(queries[j], &root);
+-
+- time2 = get_cycles();
+- time = time2 - time1;
+-
+- time = div_u64(time, SEARCH_LOOPS);
+- results = div_u64(results, SEARCH_LOOPS);
+- printk(" -> %llu cycles (%lu results)\n",
+- (unsigned long long)time, results);
+-
+- return -EAGAIN; /* Fail will directly unload the module */
+-}
+-
+-static void interval_tree_test_exit(void)
+-{
+- printk(KERN_ALERT "test exit\n");
+-}
+-
+-module_init(interval_tree_test_init)
+-module_exit(interval_tree_test_exit)
+-
+-MODULE_LICENSE("GPL");
+-MODULE_AUTHOR("Michel Lespinasse");
+-MODULE_DESCRIPTION("Interval Tree test");
+diff -Naur a/lib/Kconfig b/lib/Kconfig
+--- a/lib/Kconfig 2015-03-26 14:43:29.510436418 +0530
++++ b/lib/Kconfig 2015-03-26 14:42:38.770435423 +0530
+@@ -322,6 +322,20 @@
+ config BTREE
+ boolean
+
++config INTERVAL_TREE
++ boolean
++ help
++ Simple, embeddable, interval-tree. Can find the start of an
++ overlapping range in log(n) time and then iterate over all
++ overlapping nodes. The algorithm is implemented as an
++ augmented rbtree.
++
++ See:
++
++ Documentation/rbtree.txt
++
++ for more information.
++
+ config ASSOCIATIVE_ARRAY
+ bool
+ help
+diff -Naur a/lib/Kconfig.debug b/lib/Kconfig.debug
+--- a/lib/Kconfig.debug 2015-03-26 14:43:29.526436418 +0530
++++ b/lib/Kconfig.debug 2015-03-26 14:42:38.770435423 +0530
+@@ -1487,6 +1487,7 @@
+ config INTERVAL_TREE_TEST
+ tristate "Interval tree test"
+ depends on m && DEBUG_KERNEL
++ select INTERVAL_TREE
+ help
+ A benchmark measuring the performance of the interval tree library
+
+diff -Naur a/lib/Makefile b/lib/Makefile
+--- a/lib/Makefile 2015-03-26 14:43:29.514436418 +0530
++++ b/lib/Makefile 2015-03-26 14:42:38.770435423 +0530
+@@ -50,6 +50,7 @@
+ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+
+ obj-$(CONFIG_BTREE) += btree.o
++obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
+ obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
+ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
+ obj-$(CONFIG_DEBUG_LIST) += list_debug.o
+@@ -155,8 +156,6 @@
+ obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
+ obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
+
+-interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
+-
+ obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
+
+ obj-$(CONFIG_ASN1) += asn1_decoder.o